summaryrefslogtreecommitdiffstats
path: root/models
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-12-12 23:57:56 +0100
commite68b9d00a6e05b3a941f63ffb696f91e554ac5ec (patch)
tree97775d6c13b0f416af55314eb6a89ef792474615 /models
parentInitial commit. (diff)
downloadforgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.tar.xz
forgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.zip
Adding upstream version 9.0.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to '')
-rw-r--r--models/actions/artifact.go183
-rw-r--r--models/actions/forgejo.go84
-rw-r--r--models/actions/forgejo_test.go178
-rw-r--r--models/actions/main_test.go19
-rw-r--r--models/actions/run.go425
-rw-r--r--models/actions/run_job.go180
-rw-r--r--models/actions/run_job_list.go80
-rw-r--r--models/actions/run_list.go138
-rw-r--r--models/actions/runner.go362
-rw-r--r--models/actions/runner_list.go65
-rw-r--r--models/actions/runner_test.go75
-rw-r--r--models/actions/runner_token.go120
-rw-r--r--models/actions/runner_token_test.go41
-rw-r--r--models/actions/schedule.go140
-rw-r--r--models/actions/schedule_list.go83
-rw-r--r--models/actions/schedule_spec.go73
-rw-r--r--models/actions/schedule_spec_list.go105
-rw-r--r--models/actions/schedule_spec_test.go71
-rw-r--r--models/actions/status.go104
-rw-r--r--models/actions/task.go527
-rw-r--r--models/actions/task_list.go87
-rw-r--r--models/actions/task_output.go55
-rw-r--r--models/actions/task_step.go41
-rw-r--r--models/actions/tasks_version.go105
-rw-r--r--models/actions/utils.go84
-rw-r--r--models/actions/utils_test.go90
-rw-r--r--models/actions/variable.go139
-rw-r--r--models/activities/action.go777
-rw-r--r--models/activities/action_list.go203
-rw-r--r--models/activities/action_test.go320
-rw-r--r--models/activities/main_test.go17
-rw-r--r--models/activities/notification.go407
-rw-r--r--models/activities/notification_list.go476
-rw-r--r--models/activities/notification_test.go141
-rw-r--r--models/activities/repo_activity.go391
-rw-r--r--models/activities/repo_activity_test.go30
-rw-r--r--models/activities/statistic.go120
-rw-r--r--models/activities/user_heatmap.go78
-rw-r--r--models/activities/user_heatmap_test.go101
-rw-r--r--models/admin/task.go232
-rw-r--r--models/asymkey/error.go318
-rw-r--r--models/asymkey/gpg_key.go273
-rw-r--r--models/asymkey/gpg_key_add.go167
-rw-r--r--models/asymkey/gpg_key_commit_verification.go63
-rw-r--r--models/asymkey/gpg_key_common.go146
-rw-r--r--models/asymkey/gpg_key_import.go47
-rw-r--r--models/asymkey/gpg_key_list.go38
-rw-r--r--models/asymkey/gpg_key_object_verification.go520
-rw-r--r--models/asymkey/gpg_key_tag_verification.go15
-rw-r--r--models/asymkey/gpg_key_test.go466
-rw-r--r--models/asymkey/gpg_key_verify.go119
-rw-r--r--models/asymkey/main_test.go24
-rw-r--r--models/asymkey/ssh_key.go427
-rw-r--r--models/asymkey/ssh_key_authorized_keys.go220
-rw-r--r--models/asymkey/ssh_key_authorized_principals.go142
-rw-r--r--models/asymkey/ssh_key_deploy.go218
-rw-r--r--models/asymkey/ssh_key_fingerprint.go89
-rw-r--r--models/asymkey/ssh_key_object_verification.go85
-rw-r--r--models/asymkey/ssh_key_object_verification_test.go153
-rw-r--r--models/asymkey/ssh_key_parse.go312
-rw-r--r--models/asymkey/ssh_key_principals.go96
-rw-r--r--models/asymkey/ssh_key_test.go513
-rw-r--r--models/asymkey/ssh_key_verify.go55
-rw-r--r--models/auth/TestOrphanedOAuth2Applications/oauth2_application.yaml33
-rw-r--r--models/auth/access_token.go236
-rw-r--r--models/auth/access_token_scope.go350
-rw-r--r--models/auth/access_token_scope_test.go90
-rw-r--r--models/auth/access_token_test.go133
-rw-r--r--models/auth/auth_token.go116
-rw-r--r--models/auth/main_test.go20
-rw-r--r--models/auth/oauth2.go678
-rw-r--r--models/auth/oauth2_list.go32
-rw-r--r--models/auth/oauth2_test.go300
-rw-r--r--models/auth/session.go120
-rw-r--r--models/auth/session_test.go143
-rw-r--r--models/auth/source.go412
-rw-r--r--models/auth/source_test.go61
-rw-r--r--models/auth/twofactor.go166
-rw-r--r--models/auth/webauthn.go209
-rw-r--r--models/auth/webauthn_test.go78
-rw-r--r--models/avatars/avatar.go238
-rw-r--r--models/avatars/avatar_test.go59
-rw-r--r--models/avatars/main_test.go18
-rw-r--r--models/db/collation.go159
-rw-r--r--models/db/common.go53
-rw-r--r--models/db/consistency.go31
-rw-r--r--models/db/context.go331
-rw-r--r--models/db/context_committer_test.go102
-rw-r--r--models/db/context_test.go87
-rw-r--r--models/db/convert.go64
-rwxr-xr-xmodels/db/engine.go354
-rw-r--r--models/db/engine_test.go154
-rw-r--r--models/db/error.go74
-rw-r--r--models/db/index.go148
-rw-r--r--models/db/index_test.go127
-rw-r--r--models/db/install/db.go64
-rw-r--r--models/db/iterate.go43
-rw-r--r--models/db/iterate_test.go45
-rw-r--r--models/db/list.go215
-rw-r--r--models/db/list_test.go53
-rw-r--r--models/db/log.go107
-rw-r--r--models/db/main_test.go17
-rw-r--r--models/db/name.go106
-rw-r--r--models/db/paginator/main_test.go14
-rw-r--r--models/db/paginator/paginator.go7
-rw-r--r--models/db/paginator/paginator_test.go59
-rw-r--r--models/db/search.go33
-rw-r--r--models/db/sequence.go70
-rw-r--r--models/db/sql_postgres_with_schema.go74
-rw-r--r--models/dbfs/dbfile.go368
-rw-r--r--models/dbfs/dbfs.go131
-rw-r--r--models/dbfs/dbfs_test.go191
-rw-r--r--models/dbfs/main_test.go14
-rw-r--r--models/error.go552
-rw-r--r--models/fixture_generation.go50
-rw-r--r--models/fixture_test.go36
-rw-r--r--models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/issue.yml12
-rw-r--r--models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/pull_request.yml13
-rw-r--r--models/fixtures/TestParseCommitWithSSHSignature/public_key.yml13
-rw-r--r--models/fixtures/access.yml161
-rw-r--r--models/fixtures/access_token.yml33
-rw-r--r--models/fixtures/action.yml76
-rw-r--r--models/fixtures/action_run.yml435
-rw-r--r--models/fixtures/action_run_job.yml42
-rw-r--r--models/fixtures/action_runner.yml20
-rw-r--r--models/fixtures/action_runner_token.yml35
-rw-r--r--models/fixtures/action_task.yml59
-rw-r--r--models/fixtures/attachment.yml155
-rw-r--r--models/fixtures/branch.yml47
-rw-r--r--models/fixtures/collaboration.yml53
-rw-r--r--models/fixtures/comment.yml115
-rw-r--r--models/fixtures/commit_status.yml65
-rw-r--r--models/fixtures/commit_status_index.yml5
-rw-r--r--models/fixtures/deploy_key.yml1
-rw-r--r--models/fixtures/email_address.yml319
-rw-r--r--models/fixtures/external_login_user.yml1
-rw-r--r--models/fixtures/follow.yml19
-rw-r--r--models/fixtures/forgejo_blocked_user.yml5
-rw-r--r--models/fixtures/gpg_key.yml23
-rw-r--r--models/fixtures/gpg_key_import.yml1
-rw-r--r--models/fixtures/hook_task.yml43
-rw-r--r--models/fixtures/issue.yml374
-rw-r--r--models/fixtures/issue_assignees.yml16
-rw-r--r--models/fixtures/issue_index.yml27
-rw-r--r--models/fixtures/issue_label.yml19
-rw-r--r--models/fixtures/issue_user.yml20
-rw-r--r--models/fixtures/issue_watch.yml31
-rw-r--r--models/fixtures/label.yml98
-rw-r--r--models/fixtures/lfs_meta_object.yml32
-rw-r--r--models/fixtures/login_source.yml1
-rw-r--r--models/fixtures/milestone.yml54
-rw-r--r--models/fixtures/mirror.yml49
-rw-r--r--models/fixtures/notice.yml14
-rw-r--r--models/fixtures/notification.yml54
-rw-r--r--models/fixtures/oauth2_application.yml20
-rw-r--r--models/fixtures/oauth2_authorization_code.yml15
-rw-r--r--models/fixtures/oauth2_grant.yml31
-rw-r--r--models/fixtures/org_user.yml119
-rw-r--r--models/fixtures/project.yml71
-rw-r--r--models/fixtures/project_board.yml77
-rw-r--r--models/fixtures/project_issue.yml23
-rw-r--r--models/fixtures/protected_branch.yml1
-rw-r--r--models/fixtures/protected_tag.yml24
-rw-r--r--models/fixtures/public_key.yml11
-rw-r--r--models/fixtures/pull_request.yml119
-rw-r--r--models/fixtures/push_mirror.yml1
-rw-r--r--models/fixtures/reaction.yml39
-rw-r--r--models/fixtures/release.yml166
-rw-r--r--models/fixtures/renamed_branch.yml5
-rw-r--r--models/fixtures/repo_archiver.yml1
-rw-r--r--models/fixtures/repo_indexer_status.yml1
-rw-r--r--models/fixtures/repo_redirect.yml5
-rw-r--r--models/fixtures/repo_topic.yml27
-rw-r--r--models/fixtures/repo_transfer.yml7
-rw-r--r--models/fixtures/repo_unit.yml797
-rw-r--r--models/fixtures/repository.yml1826
-rw-r--r--models/fixtures/review.yml200
-rw-r--r--models/fixtures/star.yml9
-rw-r--r--models/fixtures/stopwatch.yml11
-rw-r--r--models/fixtures/system_setting.yml15
-rw-r--r--models/fixtures/team.yml241
-rw-r--r--models/fixtures/team_repo.yml77
-rw-r--r--models/fixtures/team_unit.yml324
-rw-r--r--models/fixtures/team_user.yml149
-rw-r--r--models/fixtures/topic.yml29
-rw-r--r--models/fixtures/tracked_time.yml71
-rw-r--r--models/fixtures/two_factor.yml9
-rw-r--r--models/fixtures/user.yml1520
-rw-r--r--models/fixtures/user_open_id.yml17
-rw-r--r--models/fixtures/user_redirect.yml4
-rw-r--r--models/fixtures/watch.yml35
-rw-r--r--models/fixtures/webauthn_credential.yml10
-rw-r--r--models/fixtures/webhook.yml37
-rw-r--r--models/forgefed/federationhost.go52
-rw-r--r--models/forgefed/federationhost_repository.go61
-rw-r--r--models/forgefed/federationhost_test.go78
-rw-r--r--models/forgefed/nodeinfo.go123
-rw-r--r--models/forgefed/nodeinfo_test.go92
-rw-r--r--models/forgejo/semver/main_test.go17
-rw-r--r--models/forgejo/semver/semver.go80
-rw-r--r--models/forgejo/semver/semver_test.go47
-rw-r--r--models/forgejo_migrations/main_test.go14
-rw-r--r--models/forgejo_migrations/migrate.go192
-rw-r--r--models/forgejo_migrations/migrate_test.go39
-rw-r--r--models/forgejo_migrations/v13.go15
-rw-r--r--models/forgejo_migrations/v14.go43
-rw-r--r--models/forgejo_migrations/v15.go33
-rw-r--r--models/forgejo_migrations/v16.go17
-rw-r--r--models/forgejo_migrations/v17.go14
-rw-r--r--models/forgejo_migrations/v18.go18
-rw-r--r--models/forgejo_migrations/v19.go14
-rw-r--r--models/forgejo_migrations/v1_20/v1.go21
-rw-r--r--models/forgejo_migrations/v1_20/v2.go15
-rw-r--r--models/forgejo_migrations/v1_20/v3.go26
-rw-r--r--models/forgejo_migrations/v1_22/main_test.go14
-rw-r--r--models/forgejo_migrations/v1_22/v10.go17
-rw-r--r--models/forgejo_migrations/v1_22/v11.go19
-rw-r--r--models/forgejo_migrations/v1_22/v12.go18
-rw-r--r--models/forgejo_migrations/v1_22/v4.go17
-rw-r--r--models/forgejo_migrations/v1_22/v5.go22
-rw-r--r--models/forgejo_migrations/v1_22/v6.go24
-rw-r--r--models/forgejo_migrations/v1_22/v7.go17
-rw-r--r--models/forgejo_migrations/v1_22/v8.go51
-rw-r--r--models/forgejo_migrations/v1_22/v8_test.go35
-rw-r--r--models/forgejo_migrations/v1_22/v9.go15
-rw-r--r--models/forgejo_migrations/v20.go52
-rw-r--r--models/forgejo_migrations/v21.go16
-rw-r--r--models/forgejo_migrations/v22.go17
-rw-r--r--models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml7
-rw-r--r--models/git/branch.go434
-rw-r--r--models/git/branch_list.go132
-rw-r--r--models/git/branch_test.go195
-rw-r--r--models/git/commit_status.go519
-rw-r--r--models/git/commit_status_summary.go88
-rw-r--r--models/git/commit_status_test.go267
-rw-r--r--models/git/lfs.go419
-rw-r--r--models/git/lfs_lock.go209
-rw-r--r--models/git/lfs_lock_list.go54
-rw-r--r--models/git/lfs_test.go102
-rw-r--r--models/git/main_test.go18
-rw-r--r--models/git/protected_banch_list_test.go77
-rw-r--r--models/git/protected_branch.go511
-rw-r--r--models/git/protected_branch_list.go95
-rw-r--r--models/git/protected_branch_test.go69
-rw-r--r--models/git/protected_tag.go150
-rw-r--r--models/git/protected_tag_test.go166
-rw-r--r--models/issues/TestGetUIDsAndStopwatch/stopwatch.yml11
-rw-r--r--models/issues/assignees.go177
-rw-r--r--models/issues/assignees_test.go95
-rw-r--r--models/issues/comment.go1333
-rw-r--r--models/issues/comment_code.go181
-rw-r--r--models/issues/comment_list.go488
-rw-r--r--models/issues/comment_list_test.go86
-rw-r--r--models/issues/comment_test.go127
-rw-r--r--models/issues/content_history.go242
-rw-r--r--models/issues/content_history_test.go94
-rw-r--r--models/issues/dependency.go222
-rw-r--r--models/issues/dependency_test.go63
-rw-r--r--models/issues/issue.go939
-rw-r--r--models/issues/issue_index.go39
-rw-r--r--models/issues/issue_index_test.go39
-rw-r--r--models/issues/issue_label.go507
-rw-r--r--models/issues/issue_label_test.go138
-rw-r--r--models/issues/issue_list.go622
-rw-r--r--models/issues/issue_list_test.go129
-rw-r--r--models/issues/issue_lock.go66
-rw-r--r--models/issues/issue_project.go162
-rw-r--r--models/issues/issue_search.go489
-rw-r--r--models/issues/issue_stats.go191
-rw-r--r--models/issues/issue_stats_test.go34
-rw-r--r--models/issues/issue_test.go498
-rw-r--r--models/issues/issue_update.go795
-rw-r--r--models/issues/issue_user.go96
-rw-r--r--models/issues/issue_user_test.go61
-rw-r--r--models/issues/issue_watch.go134
-rw-r--r--models/issues/issue_watch_test.go68
-rw-r--r--models/issues/issue_xref.go364
-rw-r--r--models/issues/issue_xref_test.go185
-rw-r--r--models/issues/label.go509
-rw-r--r--models/issues/label_test.go422
-rw-r--r--models/issues/main_test.go33
-rw-r--r--models/issues/milestone.go394
-rw-r--r--models/issues/milestone_list.go195
-rw-r--r--models/issues/milestone_test.go371
-rw-r--r--models/issues/pull.go1105
-rw-r--r--models/issues/pull_list.go264
-rw-r--r--models/issues/pull_test.go476
-rw-r--r--models/issues/reaction.go373
-rw-r--r--models/issues/reaction_test.go178
-rw-r--r--models/issues/review.go1056
-rw-r--r--models/issues/review_list.go200
-rw-r--r--models/issues/review_test.go321
-rw-r--r--models/issues/stopwatch.go281
-rw-r--r--models/issues/stopwatch_test.go119
-rw-r--r--models/issues/tracked_time.go386
-rw-r--r--models/issues/tracked_time_test.go135
-rw-r--r--models/main_test.go33
-rw-r--r--models/migrations/base/db.go436
-rw-r--r--models/migrations/base/db_test.go98
-rw-r--r--models/migrations/base/hash.go16
-rw-r--r--models/migrations/base/main_test.go14
-rw-r--r--models/migrations/fixtures/Test_AddCombinedIndexToIssueUser/issue_user.yml13
-rw-r--r--models/migrations/fixtures/Test_AddConfidentialClientColumnToOAuth2ApplicationTable/oauth2_application.yml2
-rw-r--r--models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/expected_webhook.yml9
-rw-r--r--models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/hook_task.yml8
-rw-r--r--models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/webhook.yml10
-rw-r--r--models/migrations/fixtures/Test_AddIssueResourceIndexTable/issue.yml4
-rw-r--r--models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task.yml16
-rw-r--r--models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task_migrated.yml18
-rw-r--r--models/migrations/fixtures/Test_AddRepoIDForAttachment/attachment.yml11
-rw-r--r--models/migrations/fixtures/Test_AddRepoIDForAttachment/issue.yml3
-rw-r--r--models/migrations/fixtures/Test_AddRepoIDForAttachment/release.yml3
-rw-r--r--models/migrations/fixtures/Test_AddUniqueIndexForProjectIssue/project_issue.yml9
-rw-r--r--models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project.yml23
-rw-r--r--models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project_board.yml26
-rw-r--r--models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/issue_label.yml28
-rw-r--r--models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/label.yml43
-rw-r--r--models/migrations/fixtures/Test_RemigrateU2FCredentials/expected_webauthn_credential.yml12
-rw-r--r--models/migrations/fixtures/Test_RemigrateU2FCredentials/u2f_registration.yml21
-rw-r--r--models/migrations/fixtures/Test_RemigrateU2FCredentials/webauthn_credential.yml30
-rw-r--r--models/migrations/fixtures/Test_RemoveInvalidLabels/comment.yml52
-rw-r--r--models/migrations/fixtures/Test_RemoveInvalidLabels/issue.yml21
-rw-r--r--models/migrations/fixtures/Test_RemoveInvalidLabels/issue_label.yml35
-rw-r--r--models/migrations/fixtures/Test_RemoveInvalidLabels/label.yml25
-rw-r--r--models/migrations/fixtures/Test_RemoveInvalidLabels/repository.yml17
-rw-r--r--models/migrations/fixtures/Test_RemoveSSHSignaturesFromReleaseNotes/release.yml22
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/comment.yml3
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/commit_status.yml3
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/pull_request.yml5
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/release.yml3
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/repo_archiver.yml3
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/repo_indexer_status.yml3
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/repository.yml11
-rw-r--r--models/migrations/fixtures/Test_RepositoryFormat/review_state.yml5
-rw-r--r--models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/expected_webauthn_credential.yml9
-rw-r--r--models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/webauthn_credential.yml30
-rw-r--r--models/migrations/fixtures/Test_UnwrapLDAPSourceCfg/login_source.yml48
-rw-r--r--models/migrations/fixtures/Test_UpdateBadgeColName/badge.yml4
-rw-r--r--models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/expected_milestone.yml19
-rw-r--r--models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/issue.yml25
-rw-r--r--models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/milestone.yml19
-rw-r--r--models/migrations/migrations.go721
-rw-r--r--models/migrations/test/tests.go274
-rw-r--r--models/migrations/v1_10/v100.go82
-rw-r--r--models/migrations/v1_10/v101.go18
-rw-r--r--models/migrations/v1_10/v88.go65
-rw-r--r--models/migrations/v1_10/v89.go35
-rw-r--r--models/migrations/v1_10/v90.go17
-rw-r--r--models/migrations/v1_10/v91.go25
-rw-r--r--models/migrations/v1_10/v92.go14
-rw-r--r--models/migrations/v1_10/v93.go15
-rw-r--r--models/migrations/v1_10/v94.go23
-rw-r--r--models/migrations/v1_10/v95.go19
-rw-r--r--models/migrations/v1_10/v96.go64
-rw-r--r--models/migrations/v1_10/v97.go14
-rw-r--r--models/migrations/v1_10/v98.go16
-rw-r--r--models/migrations/v1_10/v99.go38
-rw-r--r--models/migrations/v1_11/v102.go22
-rw-r--r--models/migrations/v1_11/v103.go17
-rw-r--r--models/migrations/v1_11/v104.go34
-rw-r--r--models/migrations/v1_11/v105.go23
-rw-r--r--models/migrations/v1_11/v106.go25
-rw-r--r--models/migrations/v1_11/v107.go17
-rw-r--r--models/migrations/v1_11/v108.go17
-rw-r--r--models/migrations/v1_11/v109.go16
-rw-r--r--models/migrations/v1_11/v110.go26
-rw-r--r--models/migrations/v1_11/v111.go437
-rw-r--r--models/migrations/v1_11/v112.go47
-rw-r--r--models/migrations/v1_11/v113.go22
-rw-r--r--models/migrations/v1_11/v114.go50
-rw-r--r--models/migrations/v1_11/v115.go159
-rw-r--r--models/migrations/v1_11/v116.go32
-rw-r--r--models/migrations/v1_12/v117.go16
-rw-r--r--models/migrations/v1_12/v118.go25
-rw-r--r--models/migrations/v1_12/v119.go15
-rw-r--r--models/migrations/v1_12/v120.go19
-rw-r--r--models/migrations/v1_12/v121.go16
-rw-r--r--models/migrations/v1_12/v122.go16
-rw-r--r--models/migrations/v1_12/v123.go17
-rw-r--r--models/migrations/v1_12/v124.go23
-rw-r--r--models/migrations/v1_12/v125.go22
-rw-r--r--models/migrations/v1_12/v126.go24
-rw-r--r--models/migrations/v1_12/v127.go44
-rw-r--r--models/migrations/v1_12/v128.go127
-rw-r--r--models/migrations/v1_12/v129.go16
-rw-r--r--models/migrations/v1_12/v130.go111
-rw-r--r--models/migrations/v1_12/v131.go21
-rw-r--r--models/migrations/v1_12/v132.go21
-rw-r--r--models/migrations/v1_12/v133.go15
-rw-r--r--models/migrations/v1_12/v134.go115
-rw-r--r--models/migrations/v1_12/v135.go21
-rw-r--r--models/migrations/v1_12/v136.go125
-rw-r--r--models/migrations/v1_12/v137.go15
-rw-r--r--models/migrations/v1_12/v138.go21
-rw-r--r--models/migrations/v1_12/v139.go23
-rw-r--r--models/migrations/v1_13/v140.go56
-rw-r--r--models/migrations/v1_13/v141.go21
-rw-r--r--models/migrations/v1_13/v142.go24
-rw-r--r--models/migrations/v1_13/v143.go51
-rw-r--r--models/migrations/v1_13/v144.go25
-rw-r--r--models/migrations/v1_13/v145.go55
-rw-r--r--models/migrations/v1_13/v146.go83
-rw-r--r--models/migrations/v1_13/v147.go153
-rw-r--r--models/migrations/v1_13/v148.go13
-rw-r--r--models/migrations/v1_13/v149.go24
-rw-r--r--models/migrations/v1_13/v150.go39
-rw-r--r--models/migrations/v1_13/v151.go166
-rw-r--r--models/migrations/v1_13/v152.go13
-rw-r--r--models/migrations/v1_13/v153.go24
-rw-r--r--models/migrations/v1_13/v154.go55
-rw-r--r--models/migrations/v1_14/main_test.go14
-rw-r--r--models/migrations/v1_14/v155.go21
-rw-r--r--models/migrations/v1_14/v156.go177
-rw-r--r--models/migrations/v1_14/v157.go66
-rw-r--r--models/migrations/v1_14/v158.go101
-rw-r--r--models/migrations/v1_14/v159.go38
-rw-r--r--models/migrations/v1_14/v160.go16
-rw-r--r--models/migrations/v1_14/v161.go73
-rw-r--r--models/migrations/v1_14/v162.go62
-rw-r--r--models/migrations/v1_14/v163.go35
-rw-r--r--models/migrations/v1_14/v164.go37
-rw-r--r--models/migrations/v1_14/v165.go57
-rw-r--r--models/migrations/v1_14/v166.go112
-rw-r--r--models/migrations/v1_14/v167.go23
-rw-r--r--models/migrations/v1_14/v168.go10
-rw-r--r--models/migrations/v1_14/v169.go13
-rw-r--r--models/migrations/v1_14/v170.go21
-rw-r--r--models/migrations/v1_14/v171.go21
-rw-r--r--models/migrations/v1_14/v172.go19
-rw-r--r--models/migrations/v1_14/v173.go21
-rw-r--r--models/migrations/v1_14/v174.go34
-rw-r--r--models/migrations/v1_14/v175.go53
-rw-r--r--models/migrations/v1_14/v176.go76
-rw-r--r--models/migrations/v1_14/v176_test.go128
-rw-r--r--models/migrations/v1_14/v177.go42
-rw-r--r--models/migrations/v1_14/v177_test.go89
-rw-r--r--models/migrations/v1_15/main_test.go14
-rw-r--r--models/migrations/v1_15/v178.go17
-rw-r--r--models/migrations/v1_15/v179.go28
-rw-r--r--models/migrations/v1_15/v180.go121
-rw-r--r--models/migrations/v1_15/v181.go91
-rw-r--r--models/migrations/v1_15/v181_test.go56
-rw-r--r--models/migrations/v1_15/v182.go41
-rw-r--r--models/migrations/v1_15/v182_test.go61
-rw-r--r--models/migrations/v1_15/v183.go38
-rw-r--r--models/migrations/v1_15/v184.go66
-rw-r--r--models/migrations/v1_15/v185.go21
-rw-r--r--models/migrations/v1_15/v186.go25
-rw-r--r--models/migrations/v1_15/v187.go47
-rw-r--r--models/migrations/v1_15/v188.go14
-rw-r--r--models/migrations/v1_16/main_test.go14
-rw-r--r--models/migrations/v1_16/v189.go111
-rw-r--r--models/migrations/v1_16/v189_test.go83
-rw-r--r--models/migrations/v1_16/v190.go23
-rw-r--r--models/migrations/v1_16/v191.go28
-rw-r--r--models/migrations/v1_16/v192.go19
-rw-r--r--models/migrations/v1_16/v193.go32
-rw-r--r--models/migrations/v1_16/v193_test.go81
-rw-r--r--models/migrations/v1_16/v194.go21
-rw-r--r--models/migrations/v1_16/v195.go46
-rw-r--r--models/migrations/v1_16/v195_test.go64
-rw-r--r--models/migrations/v1_16/v196.go21
-rw-r--r--models/migrations/v1_16/v197.go19
-rw-r--r--models/migrations/v1_16/v198.go32
-rw-r--r--models/migrations/v1_16/v199.go6
-rw-r--r--models/migrations/v1_16/v200.go22
-rw-r--r--models/migrations/v1_16/v201.go14
-rw-r--r--models/migrations/v1_16/v202.go23
-rw-r--r--models/migrations/v1_16/v203.go17
-rw-r--r--models/migrations/v1_16/v204.go14
-rw-r--r--models/migrations/v1_16/v205.go42
-rw-r--r--models/migrations/v1_16/v206.go28
-rw-r--r--models/migrations/v1_16/v207.go14
-rw-r--r--models/migrations/v1_16/v208.go13
-rw-r--r--models/migrations/v1_16/v209.go16
-rw-r--r--models/migrations/v1_16/v210.go177
-rw-r--r--models/migrations/v1_16/v210_test.go88
-rw-r--r--models/migrations/v1_17/main_test.go14
-rw-r--r--models/migrations/v1_17/v211.go12
-rw-r--r--models/migrations/v1_17/v212.go93
-rw-r--r--models/migrations/v1_17/v213.go17
-rw-r--r--models/migrations/v1_17/v214.go22
-rw-r--r--models/migrations/v1_17/v215.go24
-rw-r--r--models/migrations/v1_17/v216.go7
-rw-r--r--models/migrations/v1_17/v217.go25
-rw-r--r--models/migrations/v1_17/v218.go52
-rw-r--r--models/migrations/v1_17/v219.go30
-rw-r--r--models/migrations/v1_17/v220.go23
-rw-r--r--models/migrations/v1_17/v221.go74
-rw-r--r--models/migrations/v1_17/v221_test.go63
-rw-r--r--models/migrations/v1_17/v222.go64
-rw-r--r--models/migrations/v1_17/v223.go98
-rw-r--r--models/migrations/v1_18/main_test.go14
-rw-r--r--models/migrations/v1_18/v224.go27
-rw-r--r--models/migrations/v1_18/v225.go28
-rw-r--r--models/migrations/v1_18/v226.go14
-rw-r--r--models/migrations/v1_18/v227.go23
-rw-r--r--models/migrations/v1_18/v228.go25
-rw-r--r--models/migrations/v1_18/v229.go46
-rw-r--r--models/migrations/v1_18/v229_test.go45
-rw-r--r--models/migrations/v1_18/v230.go17
-rw-r--r--models/migrations/v1_18/v230_test.go47
-rw-r--r--models/migrations/v1_19/main_test.go14
-rw-r--r--models/migrations/v1_19/v231.go18
-rw-r--r--models/migrations/v1_19/v232.go25
-rw-r--r--models/migrations/v1_19/v233.go181
-rw-r--r--models/migrations/v1_19/v233_test.go86
-rw-r--r--models/migrations/v1_19/v234.go28
-rw-r--r--models/migrations/v1_19/v235.go16
-rw-r--r--models/migrations/v1_19/v236.go23
-rw-r--r--models/migrations/v1_19/v237.go15
-rw-r--r--models/migrations/v1_19/v238.go27
-rw-r--r--models/migrations/v1_19/v239.go22
-rw-r--r--models/migrations/v1_19/v240.go176
-rw-r--r--models/migrations/v1_19/v241.go17
-rw-r--r--models/migrations/v1_19/v242.go26
-rw-r--r--models/migrations/v1_19/v243.go16
-rw-r--r--models/migrations/v1_20/main_test.go14
-rw-r--r--models/migrations/v1_20/v244.go22
-rw-r--r--models/migrations/v1_20/v245.go69
-rw-r--r--models/migrations/v1_20/v246.go16
-rw-r--r--models/migrations/v1_20/v247.go50
-rw-r--r--models/migrations/v1_20/v248.go14
-rw-r--r--models/migrations/v1_20/v249.go45
-rw-r--r--models/migrations/v1_20/v250.go135
-rw-r--r--models/migrations/v1_20/v251.go47
-rw-r--r--models/migrations/v1_20/v252.go47
-rw-r--r--models/migrations/v1_20/v253.go49
-rw-r--r--models/migrations/v1_20/v254.go18
-rw-r--r--models/migrations/v1_20/v255.go23
-rw-r--r--models/migrations/v1_20/v256.go23
-rw-r--r--models/migrations/v1_20/v257.go33
-rw-r--r--models/migrations/v1_20/v258.go16
-rw-r--r--models/migrations/v1_20/v259.go360
-rw-r--r--models/migrations/v1_20/v259_test.go111
-rw-r--r--models/migrations/v1_21/main_test.go14
-rw-r--r--models/migrations/v1_21/v260.go26
-rw-r--r--models/migrations/v1_21/v261.go24
-rw-r--r--models/migrations/v1_21/v262.go16
-rw-r--r--models/migrations/v1_21/v263.go46
-rw-r--r--models/migrations/v1_21/v264.go93
-rw-r--r--models/migrations/v1_21/v265.go19
-rw-r--r--models/migrations/v1_21/v266.go23
-rw-r--r--models/migrations/v1_21/v267.go23
-rw-r--r--models/migrations/v1_21/v268.go16
-rw-r--r--models/migrations/v1_21/v269.go12
-rw-r--r--models/migrations/v1_21/v270.go26
-rw-r--r--models/migrations/v1_21/v271.go16
-rw-r--r--models/migrations/v1_21/v272.go14
-rw-r--r--models/migrations/v1_21/v273.go45
-rw-r--r--models/migrations/v1_21/v274.go36
-rw-r--r--models/migrations/v1_21/v275.go15
-rw-r--r--models/migrations/v1_21/v276.go156
-rw-r--r--models/migrations/v1_21/v277.go16
-rw-r--r--models/migrations/v1_21/v278.go16
-rw-r--r--models/migrations/v1_21/v279.go20
-rw-r--r--models/migrations/v1_22/main_test.go14
-rw-r--r--models/migrations/v1_22/v280.go29
-rw-r--r--models/migrations/v1_22/v281.go21
-rw-r--r--models/migrations/v1_22/v282.go16
-rw-r--r--models/migrations/v1_22/v283.go38
-rw-r--r--models/migrations/v1_22/v283_test.go28
-rw-r--r--models/migrations/v1_22/v284.go18
-rw-r--r--models/migrations/v1_22/v285.go22
-rw-r--r--models/migrations/v1_22/v286.go75
-rw-r--r--models/migrations/v1_22/v286_test.go119
-rw-r--r--models/migrations/v1_22/v287.go46
-rw-r--r--models/migrations/v1_22/v288.go26
-rw-r--r--models/migrations/v1_22/v289.go21
-rw-r--r--models/migrations/v1_22/v290.go46
-rw-r--r--models/migrations/v1_22/v290_test.go59
-rw-r--r--models/migrations/v1_22/v291.go18
-rw-r--r--models/migrations/v1_22/v292.go9
-rw-r--r--models/migrations/v1_22/v293.go108
-rw-r--r--models/migrations/v1_22/v293_test.go45
-rw-r--r--models/migrations/v1_22/v294.go44
-rw-r--r--models/migrations/v1_22/v294_test.go53
-rw-r--r--models/migrations/v1_22/v295.go18
-rw-r--r--models/migrations/v1_22/v296.go16
-rw-r--r--models/migrations/v1_22/v298.go10
-rw-r--r--models/migrations/v1_23/main_test.go14
-rw-r--r--models/migrations/v1_23/v299.go18
-rw-r--r--models/migrations/v1_23/v300.go17
-rw-r--r--models/migrations/v1_23/v301.go14
-rw-r--r--models/migrations/v1_23/v302.go18
-rw-r--r--models/migrations/v1_6/v70.go110
-rw-r--r--models/migrations/v1_6/v71.go79
-rw-r--r--models/migrations/v1_6/v72.go30
-rw-r--r--models/migrations/v1_7/v73.go18
-rw-r--r--models/migrations/v1_7/v74.go15
-rw-r--r--models/migrations/v1_7/v75.go32
-rw-r--r--models/migrations/v1_8/v76.go74
-rw-r--r--models/migrations/v1_8/v77.go16
-rw-r--r--models/migrations/v1_8/v78.go43
-rw-r--r--models/migrations/v1_8/v79.go25
-rw-r--r--models/migrations/v1_8/v80.go16
-rw-r--r--models/migrations/v1_8/v81.go28
-rw-r--r--models/migrations/v1_9/v82.go133
-rw-r--r--models/migrations/v1_9/v83.go27
-rw-r--r--models/migrations/v1_9/v84.go17
-rw-r--r--models/migrations/v1_9/v85.go118
-rw-r--r--models/migrations/v1_9/v86.go16
-rw-r--r--models/migrations/v1_9/v87.go17
-rw-r--r--models/org.go102
-rw-r--r--models/org_team.go551
-rw-r--r--models/org_team_test.go170
-rw-r--r--models/org_test.go62
-rw-r--r--models/organization/TestInconsistentOwnerTeam/team.yml10
-rw-r--r--models/organization/TestInconsistentOwnerTeam/team_unit.yml59
-rw-r--r--models/organization/main_test.go21
-rw-r--r--models/organization/mini_org.go78
-rw-r--r--models/organization/org.go833
-rw-r--r--models/organization/org_repo.go17
-rw-r--r--models/organization/org_test.go548
-rw-r--r--models/organization/org_user.go138
-rw-r--r--models/organization/org_user_test.go155
-rw-r--r--models/organization/team.go310
-rw-r--r--models/organization/team_invite.go161
-rw-r--r--models/organization/team_invite_test.go49
-rw-r--r--models/organization/team_list.go128
-rw-r--r--models/organization/team_repo.go85
-rw-r--r--models/organization/team_test.go250
-rw-r--r--models/organization/team_unit.go51
-rw-r--r--models/organization/team_user.go89
-rw-r--r--models/packages/alpine/search.go53
-rw-r--r--models/packages/conan/references.go170
-rw-r--r--models/packages/conan/search.go149
-rw-r--r--models/packages/conda/search.go63
-rw-r--r--models/packages/container/const.go9
-rw-r--r--models/packages/container/search.go285
-rw-r--r--models/packages/cran/search.go90
-rw-r--r--models/packages/debian/search.go157
-rw-r--r--models/packages/debian/search_test.go93
-rw-r--r--models/packages/descriptor.go260
-rw-r--r--models/packages/nuget/search.go70
-rw-r--r--models/packages/package.go351
-rw-r--r--models/packages/package_blob.go154
-rw-r--r--models/packages/package_blob_upload.go79
-rw-r--r--models/packages/package_cleanup_rule.go109
-rw-r--r--models/packages/package_file.go232
-rw-r--r--models/packages/package_property.go121
-rw-r--r--models/packages/package_test.go319
-rw-r--r--models/packages/package_version.go348
-rw-r--r--models/packages/rpm/search.go23
-rw-r--r--models/perm/access/access.go250
-rw-r--r--models/perm/access/access_test.go127
-rw-r--r--models/perm/access/main_test.go20
-rw-r--r--models/perm/access/repo_permission.go450
-rw-r--r--models/perm/access_mode.go57
-rw-r--r--models/project/column.go359
-rw-r--r--models/project/column_test.go128
-rw-r--r--models/project/issue.go143
-rw-r--r--models/project/main_test.go23
-rw-r--r--models/project/project.go451
-rw-r--r--models/project/project_test.go124
-rw-r--r--models/project/template.go45
-rw-r--r--models/pull/automerge.go97
-rw-r--r--models/pull/review_state.go139
-rw-r--r--models/quota/default.go25
-rw-r--r--models/quota/errors.go127
-rw-r--r--models/quota/group.go401
-rw-r--r--models/quota/limit_subject.go69
-rw-r--r--models/quota/quota.go36
-rw-r--r--models/quota/quota_group_test.go208
-rw-r--r--models/quota/quota_rule_test.go304
-rw-r--r--models/quota/rule.go127
-rw-r--r--models/quota/used.go252
-rw-r--r--models/repo.go362
-rw-r--r--models/repo/TestSearchRepositoryIDsByCondition/repository.yml30
-rw-r--r--models/repo/archive_download_count.go90
-rw-r--r--models/repo/archive_download_count_test.go65
-rw-r--r--models/repo/archiver.go139
-rw-r--r--models/repo/attachment.go287
-rw-r--r--models/repo/attachment_test.go105
-rw-r--r--models/repo/avatar.go96
-rw-r--r--models/repo/collaboration.go170
-rw-r--r--models/repo/collaboration_test.go186
-rw-r--r--models/repo/following_repo.go39
-rw-r--r--models/repo/following_repo_test.go31
-rw-r--r--models/repo/fork.go120
-rw-r--r--models/repo/fork_test.go34
-rw-r--r--models/repo/git.go36
-rw-r--r--models/repo/issue.go60
-rw-r--r--models/repo/language_stats.go242
-rw-r--r--models/repo/main_test.go21
-rw-r--r--models/repo/mirror.go123
-rw-r--r--models/repo/pushmirror.go188
-rw-r--r--models/repo/pushmirror_test.go79
-rw-r--r--models/repo/redirect.go86
-rw-r--r--models/repo/redirect_test.go78
-rw-r--r--models/repo/release.go566
-rw-r--r--models/repo/release_test.go27
-rw-r--r--models/repo/repo.go951
-rw-r--r--models/repo/repo_flags.go102
-rw-r--r--models/repo/repo_flags_test.go115
-rw-r--r--models/repo/repo_indexer.go114
-rw-r--r--models/repo/repo_list.go757
-rw-r--r--models/repo/repo_list_test.go450
-rw-r--r--models/repo/repo_repository.go60
-rw-r--r--models/repo/repo_test.go230
-rw-r--r--models/repo/repo_unit.go317
-rw-r--r--models/repo/repo_unit_test.go39
-rw-r--r--models/repo/search.go53
-rw-r--r--models/repo/star.go101
-rw-r--r--models/repo/star_test.go72
-rw-r--r--models/repo/topic.go389
-rw-r--r--models/repo/topic_test.go83
-rw-r--r--models/repo/update.go145
-rw-r--r--models/repo/upload.go175
-rw-r--r--models/repo/user_repo.go197
-rw-r--r--models/repo/user_repo_test.go96
-rw-r--r--models/repo/watch.go190
-rw-r--r--models/repo/watch_test.go153
-rw-r--r--models/repo/wiki.go96
-rw-r--r--models/repo/wiki_test.go46
-rw-r--r--models/repo_test.go24
-rw-r--r--models/repo_transfer.go185
-rw-r--r--models/repo_transfer_test.go28
-rw-r--r--models/secret/secret.go167
-rw-r--r--models/shared/types/ownertype.go29
-rw-r--r--models/system/appstate.go56
-rw-r--r--models/system/main_test.go19
-rw-r--r--models/system/notice.go128
-rw-r--r--models/system/notice_test.go110
-rw-r--r--models/system/setting.go152
-rw-r--r--models/system/setting_test.go52
-rw-r--r--models/unit/unit.go437
-rw-r--r--models/unit/unit_test.go96
-rw-r--r--models/unittest/consistency.go192
-rw-r--r--models/unittest/fixtures.go144
-rw-r--r--models/unittest/fscopy.go102
-rw-r--r--models/unittest/mock_http.go115
-rw-r--r--models/unittest/reflection.go40
-rw-r--r--models/unittest/testdb.go267
-rw-r--r--models/unittest/unit_tests.go164
-rw-r--r--models/user/avatar.go115
-rw-r--r--models/user/badge.go41
-rw-r--r--models/user/block.go91
-rw-r--r--models/user/block_test.go78
-rw-r--r--models/user/email_address.go483
-rw-r--r--models/user/email_address_test.go222
-rw-r--r--models/user/error.go109
-rw-r--r--models/user/external_login_user.go184
-rw-r--r--models/user/federated_user.go35
-rw-r--r--models/user/federated_user_test.go29
-rw-r--r--models/user/fixtures/user.yml36
-rw-r--r--models/user/follow.go85
-rw-r--r--models/user/follow_test.go24
-rw-r--r--models/user/list.go83
-rw-r--r--models/user/main_test.go19
-rw-r--r--models/user/must_change_password.go49
-rw-r--r--models/user/openid.go111
-rw-r--r--models/user/openid_test.go68
-rw-r--r--models/user/redirect.go87
-rw-r--r--models/user/redirect_test.go26
-rw-r--r--models/user/search.go178
-rw-r--r--models/user/setting.go212
-rw-r--r--models/user/setting_keys.go17
-rw-r--r--models/user/setting_test.go61
-rw-r--r--models/user/user.go1365
-rw-r--r--models/user/user_repository.go83
-rw-r--r--models/user/user_system.go97
-rw-r--r--models/user/user_test.go781
-rw-r--r--models/user/user_update.go15
-rw-r--r--models/webhook/hooktask.go262
-rw-r--r--models/webhook/main_test.go19
-rw-r--r--models/webhook/webhook.go516
-rw-r--r--models/webhook/webhook_system.go83
-rw-r--r--models/webhook/webhook_test.go350
769 files changed, 89109 insertions, 0 deletions
diff --git a/models/actions/artifact.go b/models/actions/artifact.go
new file mode 100644
index 0000000..0bc66ba
--- /dev/null
+++ b/models/actions/artifact.go
@@ -0,0 +1,183 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+// This artifact server is inspired by https://github.com/nektos/act/blob/master/pkg/artifacts/server.go.
+// It updates url setting and uses ObjectStore to handle artifacts persistence.
+
+package actions
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ArtifactStatus is the status of an artifact, uploading, expired or need-delete
+type ArtifactStatus int64
+
+const (
+ ArtifactStatusUploadPending ArtifactStatus = iota + 1 // 1, ArtifactStatusUploadPending is the status of an artifact upload that is pending
+ ArtifactStatusUploadConfirmed // 2, ArtifactStatusUploadConfirmed is the status of an artifact upload that is confirmed
+ ArtifactStatusUploadError // 3, ArtifactStatusUploadError is the status of an artifact upload that is errored
+ ArtifactStatusExpired // 4, ArtifactStatusExpired is the status of an artifact that is expired
+ ArtifactStatusPendingDeletion // 5, ArtifactStatusPendingDeletion is the status of an artifact that is pending deletion
+ ArtifactStatusDeleted // 6, ArtifactStatusDeleted is the status of an artifact that is deleted
+)
+
+func init() {
+ db.RegisterModel(new(ActionArtifact))
+}
+
+// ActionArtifact is a file that is stored in the artifact storage.
+type ActionArtifact struct {
+ ID int64 `xorm:"pk autoincr"`
+ RunID int64 `xorm:"index unique(runid_name_path)"` // The run id of the artifact
+ RunnerID int64
+ RepoID int64 `xorm:"index"`
+ OwnerID int64
+ CommitSHA string
+ StoragePath string // The path to the artifact in the storage
+ FileSize int64 // The size of the artifact in bytes
+ FileCompressedSize int64 // The size of the artifact in bytes after gzip compression
+ ContentEncoding string // The content encoding of the artifact
+ ArtifactPath string `xorm:"index unique(runid_name_path)"` // The path to the artifact when runner uploads it
+ ArtifactName string `xorm:"index unique(runid_name_path)"` // The name of the artifact when runner uploads it
+ Status int64 `xorm:"index"` // The status of the artifact, uploading, expired or need-delete
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated index"`
+ ExpiredUnix timeutil.TimeStamp `xorm:"index"` // The time when the artifact will be expired
+}
+
+func CreateArtifact(ctx context.Context, t *ActionTask, artifactName, artifactPath string, expiredDays int64) (*ActionArtifact, error) {
+ if err := t.LoadJob(ctx); err != nil {
+ return nil, err
+ }
+ artifact, err := getArtifactByNameAndPath(ctx, t.Job.RunID, artifactName, artifactPath)
+ if errors.Is(err, util.ErrNotExist) {
+ artifact := &ActionArtifact{
+ ArtifactName: artifactName,
+ ArtifactPath: artifactPath,
+ RunID: t.Job.RunID,
+ RunnerID: t.RunnerID,
+ RepoID: t.RepoID,
+ OwnerID: t.OwnerID,
+ CommitSHA: t.CommitSHA,
+ Status: int64(ArtifactStatusUploadPending),
+ ExpiredUnix: timeutil.TimeStamp(time.Now().Unix() + timeutil.Day*expiredDays),
+ }
+ if _, err := db.GetEngine(ctx).Insert(artifact); err != nil {
+ return nil, err
+ }
+ return artifact, nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ if _, err := db.GetEngine(ctx).ID(artifact.ID).Cols("expired_unix").Update(&ActionArtifact{
+ ExpiredUnix: timeutil.TimeStamp(time.Now().Unix() + timeutil.Day*expiredDays),
+ }); err != nil {
+ return nil, err
+ }
+
+ return artifact, nil
+}
+
+func getArtifactByNameAndPath(ctx context.Context, runID int64, name, fpath string) (*ActionArtifact, error) {
+ var art ActionArtifact
+ has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ? AND artifact_path = ?", runID, name, fpath).Get(&art)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, util.ErrNotExist
+ }
+ return &art, nil
+}
+
+// UpdateArtifactByID updates an artifact by id
+func UpdateArtifactByID(ctx context.Context, id int64, art *ActionArtifact) error {
+ art.ID = id
+ _, err := db.GetEngine(ctx).ID(id).AllCols().Update(art)
+ return err
+}
+
+type FindArtifactsOptions struct {
+ db.ListOptions
+ RepoID int64
+ RunID int64
+ ArtifactName string
+ Status int
+}
+
+func (opts FindArtifactsOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.RunID > 0 {
+ cond = cond.And(builder.Eq{"run_id": opts.RunID})
+ }
+ if opts.ArtifactName != "" {
+ cond = cond.And(builder.Eq{"artifact_name": opts.ArtifactName})
+ }
+ if opts.Status > 0 {
+ cond = cond.And(builder.Eq{"status": opts.Status})
+ }
+
+ return cond
+}
+
+// ActionArtifactMeta is the meta data of an artifact
+type ActionArtifactMeta struct {
+ ArtifactName string
+ FileSize int64
+ Status ArtifactStatus
+}
+
+// ListUploadedArtifactsMeta returns all uploaded artifacts meta of a run
+func ListUploadedArtifactsMeta(ctx context.Context, runID int64) ([]*ActionArtifactMeta, error) {
+ arts := make([]*ActionArtifactMeta, 0, 10)
+ return arts, db.GetEngine(ctx).Table("action_artifact").
+ Where("run_id=? AND (status=? OR status=?)", runID, ArtifactStatusUploadConfirmed, ArtifactStatusExpired).
+ GroupBy("artifact_name").
+ Select("artifact_name, sum(file_size) as file_size, max(status) as status").
+ Find(&arts)
+}
+
+// ListNeedExpiredArtifacts returns all need expired artifacts but not deleted
+func ListNeedExpiredArtifacts(ctx context.Context) ([]*ActionArtifact, error) {
+ arts := make([]*ActionArtifact, 0, 10)
+ return arts, db.GetEngine(ctx).
+ Where("expired_unix < ? AND status = ?", timeutil.TimeStamp(time.Now().Unix()), ArtifactStatusUploadConfirmed).Find(&arts)
+}
+
+// ListPendingDeleteArtifacts returns all artifacts in pending-delete status.
+// limit is the max number of artifacts to return.
+func ListPendingDeleteArtifacts(ctx context.Context, limit int) ([]*ActionArtifact, error) {
+ arts := make([]*ActionArtifact, 0, limit)
+ return arts, db.GetEngine(ctx).
+ Where("status = ?", ArtifactStatusPendingDeletion).Limit(limit).Find(&arts)
+}
+
+// SetArtifactExpired sets an artifact to expired
+func SetArtifactExpired(ctx context.Context, artifactID int64) error {
+ _, err := db.GetEngine(ctx).Where("id=? AND status = ?", artifactID, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: int64(ArtifactStatusExpired)})
+ return err
+}
+
+// SetArtifactNeedDelete sets an artifact to need-delete, cron job will delete it
+func SetArtifactNeedDelete(ctx context.Context, runID int64, name string) error {
+ _, err := db.GetEngine(ctx).Where("run_id=? AND artifact_name=? AND status = ?", runID, name, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: int64(ArtifactStatusPendingDeletion)})
+ return err
+}
+
+// SetArtifactDeleted sets an artifact to deleted
+func SetArtifactDeleted(ctx context.Context, artifactID int64) error {
+ _, err := db.GetEngine(ctx).ID(artifactID).Cols("status").Update(&ActionArtifact{Status: int64(ArtifactStatusDeleted)})
+ return err
+}
diff --git a/models/actions/forgejo.go b/models/actions/forgejo.go
new file mode 100644
index 0000000..5ea77f4
--- /dev/null
+++ b/models/actions/forgejo.go
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "crypto/subtle"
+ "fmt"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+
+ gouuid "github.com/google/uuid"
+)
+
+func RegisterRunner(ctx context.Context, ownerID, repoID int64, token string, labels *[]string, name, version string) (*ActionRunner, error) {
+ uuid, err := gouuid.FromBytes([]byte(token[:16]))
+ if err != nil {
+ return nil, fmt.Errorf("gouuid.FromBytes %v", err)
+ }
+ uuidString := uuid.String()
+
+ var runner ActionRunner
+
+ has, err := db.GetEngine(ctx).Where("uuid=?", uuidString).Get(&runner)
+ if err != nil {
+ return nil, fmt.Errorf("GetRunner %v", err)
+ }
+
+ var mustUpdateSecret bool
+ if has {
+ //
+ // The runner exists, check if the rest of the token has changed.
+ //
+ mustUpdateSecret = subtle.ConstantTimeCompare(
+ []byte(runner.TokenHash),
+ []byte(auth_model.HashToken(token, runner.TokenSalt)),
+ ) != 1
+ } else {
+ //
+ // The runner does not exist yet, create it
+ //
+ runner = ActionRunner{
+ UUID: uuidString,
+ AgentLabels: []string{},
+ }
+
+ if err := runner.UpdateSecret(token); err != nil {
+ return &runner, fmt.Errorf("can't set new runner's secret: %w", err)
+ }
+
+ if err := CreateRunner(ctx, &runner); err != nil {
+ return &runner, fmt.Errorf("can't create new runner %w", err)
+ }
+ }
+
+ //
+ // Update the existing runner
+ //
+ name, _ = util.SplitStringAtByteN(name, 255)
+
+ cols := []string{"name", "owner_id", "repo_id", "version"}
+ runner.Name = name
+ runner.OwnerID = ownerID
+ runner.RepoID = repoID
+ runner.Version = version
+ if labels != nil {
+ runner.AgentLabels = *labels
+ cols = append(cols, "agent_labels")
+ }
+ if mustUpdateSecret {
+ if err := runner.UpdateSecret(token); err != nil {
+ return &runner, fmt.Errorf("can't change runner's secret: %w", err)
+ }
+ cols = append(cols, "token_hash", "token_salt")
+ }
+
+ if err := UpdateRunner(ctx, &runner, cols...); err != nil {
+ return &runner, fmt.Errorf("can't update the runner %+v %w", runner, err)
+ }
+
+ return &runner, nil
+}
diff --git a/models/actions/forgejo_test.go b/models/actions/forgejo_test.go
new file mode 100644
index 0000000..9295fc6
--- /dev/null
+++ b/models/actions/forgejo_test.go
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "crypto/subtle"
+ "testing"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestActions_RegisterRunner_Token(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ ownerID := int64(0)
+ repoID := int64(0)
+ token := "0123456789012345678901234567890123456789"
+ labels := []string{}
+ name := "runner"
+ version := "v1.2.3"
+ runner, err := RegisterRunner(db.DefaultContext, ownerID, repoID, token, &labels, name, version)
+ require.NoError(t, err)
+ assert.EqualValues(t, name, runner.Name)
+
+ assert.EqualValues(t, 1, subtle.ConstantTimeCompare([]byte(runner.TokenHash), []byte(auth_model.HashToken(token, runner.TokenSalt))), "the token cannot be verified with the same method as routers/api/actions/runner/interceptor.go as of 8228751c55d6a4263f0fec2932ca16181c09c97d")
+}
+
+// TestActions_RegisterRunner_TokenUpdate tests that a token's secret is updated
+// when a runner already exists and RegisterRunner is called with a token
+// parameter whose first 16 bytes match that record but where the last 24 bytes
+// do not match.
+func TestActions_RegisterRunner_TokenUpdate(t *testing.T) {
+ const recordID = 12345678
+ oldToken := "7e577e577e577e57feedfacefeedfacefeedface"
+ newToken := "7e577e577e577e57deadbeefdeadbeefdeadbeef"
+ require.NoError(t, unittest.PrepareTestDatabase())
+ before := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+ require.Equal(t,
+ before.TokenHash, auth_model.HashToken(oldToken, before.TokenSalt),
+ "the initial token should match the runner's secret",
+ )
+
+ RegisterRunner(db.DefaultContext, before.OwnerID, before.RepoID, newToken, nil, before.Name, before.Version)
+
+ after := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+
+ assert.Equal(t, before.UUID, after.UUID)
+ assert.NotEqual(t,
+ after.TokenHash, auth_model.HashToken(oldToken, after.TokenSalt),
+ "the old token can still be verified",
+ )
+ assert.Equal(t,
+ after.TokenHash, auth_model.HashToken(newToken, after.TokenSalt),
+ "the new token cannot be verified",
+ )
+}
+
+func TestActions_RegisterRunner_CreateWithLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ ownerID := int64(0)
+ repoID := int64(0)
+ token := "0123456789012345678901234567890123456789"
+ name := "runner"
+ version := "v1.2.3"
+ labels := []string{"woop", "doop"}
+ labelsCopy := labels // labels may be affected by the tested function so we copy them
+
+ runner, err := RegisterRunner(db.DefaultContext, ownerID, repoID, token, &labels, name, version)
+ require.NoError(t, err)
+
+ // Check that the returned record has been updated, except for the labels
+ assert.EqualValues(t, ownerID, runner.OwnerID)
+ assert.EqualValues(t, repoID, runner.RepoID)
+ assert.EqualValues(t, name, runner.Name)
+ assert.EqualValues(t, version, runner.Version)
+ assert.EqualValues(t, labelsCopy, runner.AgentLabels)
+
+ // Check that whatever is in the DB has been updated, except for the labels
+ after := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: runner.ID})
+ assert.EqualValues(t, ownerID, after.OwnerID)
+ assert.EqualValues(t, repoID, after.RepoID)
+ assert.EqualValues(t, name, after.Name)
+ assert.EqualValues(t, version, after.Version)
+ assert.EqualValues(t, labelsCopy, after.AgentLabels)
+}
+
+func TestActions_RegisterRunner_CreateWithoutLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ ownerID := int64(0)
+ repoID := int64(0)
+ token := "0123456789012345678901234567890123456789"
+ name := "runner"
+ version := "v1.2.3"
+
+ runner, err := RegisterRunner(db.DefaultContext, ownerID, repoID, token, nil, name, version)
+ require.NoError(t, err)
+
+ // Check that the returned record has been updated, except for the labels
+ assert.EqualValues(t, ownerID, runner.OwnerID)
+ assert.EqualValues(t, repoID, runner.RepoID)
+ assert.EqualValues(t, name, runner.Name)
+ assert.EqualValues(t, version, runner.Version)
+ assert.EqualValues(t, []string{}, runner.AgentLabels)
+
+ // Check that whatever is in the DB has been updated, except for the labels
+ after := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: runner.ID})
+ assert.EqualValues(t, ownerID, after.OwnerID)
+ assert.EqualValues(t, repoID, after.RepoID)
+ assert.EqualValues(t, name, after.Name)
+ assert.EqualValues(t, version, after.Version)
+ assert.EqualValues(t, []string{}, after.AgentLabels)
+}
+
+func TestActions_RegisterRunner_UpdateWithLabels(t *testing.T) {
+ const recordID = 12345678
+ token := "7e577e577e577e57feedfacefeedfacefeedface"
+ require.NoError(t, unittest.PrepareTestDatabase())
+ unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+
+ newOwnerID := int64(1)
+ newRepoID := int64(1)
+ newName := "rennur"
+ newVersion := "v4.5.6"
+ newLabels := []string{"warp", "darp"}
+ labelsCopy := newLabels // labels may be affected by the tested function so we copy them
+
+ runner, err := RegisterRunner(db.DefaultContext, newOwnerID, newRepoID, token, &newLabels, newName, newVersion)
+ require.NoError(t, err)
+
+ // Check that the returned record has been updated
+ assert.EqualValues(t, newOwnerID, runner.OwnerID)
+ assert.EqualValues(t, newRepoID, runner.RepoID)
+ assert.EqualValues(t, newName, runner.Name)
+ assert.EqualValues(t, newVersion, runner.Version)
+ assert.EqualValues(t, labelsCopy, runner.AgentLabels)
+
+ // Check that whatever is in the DB has been updated
+ after := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+ assert.EqualValues(t, newOwnerID, after.OwnerID)
+ assert.EqualValues(t, newRepoID, after.RepoID)
+ assert.EqualValues(t, newName, after.Name)
+ assert.EqualValues(t, newVersion, after.Version)
+ assert.EqualValues(t, labelsCopy, after.AgentLabels)
+}
+
+func TestActions_RegisterRunner_UpdateWithoutLabels(t *testing.T) {
+ const recordID = 12345678
+ token := "7e577e577e577e57feedfacefeedfacefeedface"
+ require.NoError(t, unittest.PrepareTestDatabase())
+ before := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+
+ newOwnerID := int64(1)
+ newRepoID := int64(1)
+ newName := "rennur"
+ newVersion := "v4.5.6"
+
+ runner, err := RegisterRunner(db.DefaultContext, newOwnerID, newRepoID, token, nil, newName, newVersion)
+ require.NoError(t, err)
+
+ // Check that the returned record has been updated, except for the labels
+ assert.EqualValues(t, newOwnerID, runner.OwnerID)
+ assert.EqualValues(t, newRepoID, runner.RepoID)
+ assert.EqualValues(t, newName, runner.Name)
+ assert.EqualValues(t, newVersion, runner.Version)
+ assert.EqualValues(t, before.AgentLabels, runner.AgentLabels)
+
+ // Check that whatever is in the DB has been updated, except for the labels
+ after := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+ assert.EqualValues(t, newOwnerID, after.OwnerID)
+ assert.EqualValues(t, newRepoID, after.RepoID)
+ assert.EqualValues(t, newName, after.Name)
+ assert.EqualValues(t, newVersion, after.Version)
+ assert.EqualValues(t, before.AgentLabels, after.AgentLabels)
+}
diff --git a/models/actions/main_test.go b/models/actions/main_test.go
new file mode 100644
index 0000000..3cfb395
--- /dev/null
+++ b/models/actions/main_test.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m, &unittest.TestOptions{
+ FixtureFiles: []string{
+ "action_runner.yml",
+ "action_runner_token.yml",
+ },
+ })
+}
diff --git a/models/actions/run.go b/models/actions/run.go
new file mode 100644
index 0000000..f637634
--- /dev/null
+++ b/models/actions/run.go
@@ -0,0 +1,425 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/json"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "github.com/nektos/act/pkg/jobparser"
+ "xorm.io/builder"
+)
+
+// ActionRun represents a run of a workflow file
+type ActionRun struct {
+ ID int64
+ Title string
+ RepoID int64 `xorm:"index unique(repo_index)"`
+ Repo *repo_model.Repository `xorm:"-"`
+ OwnerID int64 `xorm:"index"`
+ WorkflowID string `xorm:"index"` // the name of workflow file
+ Index int64 `xorm:"index unique(repo_index)"` // a unique number for each run of a repository
+ TriggerUserID int64 `xorm:"index"`
+ TriggerUser *user_model.User `xorm:"-"`
+ ScheduleID int64
+ Ref string `xorm:"index"` // the commit/tag/… that caused the run
+ CommitSHA string
+ IsForkPullRequest bool // If this is triggered by a PR from a forked repository or an untrusted user, we need to check if it is approved and limit permissions when running the workflow.
+ NeedApproval bool // may need approval if it's a fork pull request
+ ApprovedBy int64 `xorm:"index"` // who approved
+ Event webhook_module.HookEventType // the webhook event that causes the workflow to run
+ EventPayload string `xorm:"LONGTEXT"`
+ TriggerEvent string // the trigger event defined in the `on` configuration of the triggered workflow
+ Status Status `xorm:"index"`
+ Version int `xorm:"version default 0"` // Status could be updated concomitantly, so an optimistic lock is needed
+ // Started and Stopped is used for recording last run time, if rerun happened, they will be reset to 0
+ Started timeutil.TimeStamp
+ Stopped timeutil.TimeStamp
+ // PreviousDuration is used for recording previous duration
+ PreviousDuration time.Duration
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionRun))
+ db.RegisterModel(new(ActionRunIndex))
+}
+
+func (run *ActionRun) HTMLURL() string {
+ if run.Repo == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s/actions/runs/%d", run.Repo.HTMLURL(), run.Index)
+}
+
+func (run *ActionRun) Link() string {
+ if run.Repo == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s/actions/runs/%d", run.Repo.Link(), run.Index)
+}
+
+// RefLink return the url of run's ref
+func (run *ActionRun) RefLink() string {
+ refName := git.RefName(run.Ref)
+ if refName.IsPull() {
+ return run.Repo.Link() + "/pulls/" + refName.ShortName()
+ }
+ return git.RefURL(run.Repo.Link(), run.Ref)
+}
+
+// PrettyRef return #id for pull ref or ShortName for others
+func (run *ActionRun) PrettyRef() string {
+ refName := git.RefName(run.Ref)
+ if refName.IsPull() {
+ return "#" + strings.TrimSuffix(strings.TrimPrefix(run.Ref, git.PullPrefix), "/head")
+ }
+ return refName.ShortName()
+}
+
+// LoadAttributes load Repo TriggerUser if not loaded
+func (run *ActionRun) LoadAttributes(ctx context.Context) error {
+ if run == nil {
+ return nil
+ }
+
+ if err := run.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if err := run.Repo.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ if run.TriggerUser == nil {
+ u, err := user_model.GetPossibleUserByID(ctx, run.TriggerUserID)
+ if err != nil {
+ return err
+ }
+ run.TriggerUser = u
+ }
+
+ return nil
+}
+
+func (run *ActionRun) LoadRepo(ctx context.Context) error {
+ if run == nil || run.Repo != nil {
+ return nil
+ }
+
+ repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
+ if err != nil {
+ return err
+ }
+ run.Repo = repo
+ return nil
+}
+
+func (run *ActionRun) Duration() time.Duration {
+ return calculateDuration(run.Started, run.Stopped, run.Status) + run.PreviousDuration
+}
+
+func (run *ActionRun) GetPushEventPayload() (*api.PushPayload, error) {
+ if run.Event == webhook_module.HookEventPush {
+ var payload api.PushPayload
+ if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
+ return nil, err
+ }
+ return &payload, nil
+ }
+ return nil, fmt.Errorf("event %s is not a push event", run.Event)
+}
+
+func (run *ActionRun) GetPullRequestEventPayload() (*api.PullRequestPayload, error) {
+ if run.Event == webhook_module.HookEventPullRequest ||
+ run.Event == webhook_module.HookEventPullRequestSync ||
+ run.Event == webhook_module.HookEventPullRequestAssign ||
+ run.Event == webhook_module.HookEventPullRequestMilestone ||
+ run.Event == webhook_module.HookEventPullRequestLabel {
+ var payload api.PullRequestPayload
+ if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
+ return nil, err
+ }
+ return &payload, nil
+ }
+ return nil, fmt.Errorf("event %s is not a pull request event", run.Event)
+}
+
+func updateRepoRunsNumbers(ctx context.Context, repo *repo_model.Repository) error {
+ _, err := db.GetEngine(ctx).ID(repo.ID).
+ SetExpr("num_action_runs",
+ builder.Select("count(*)").From("action_run").
+ Where(builder.Eq{"repo_id": repo.ID}),
+ ).
+ SetExpr("num_closed_action_runs",
+ builder.Select("count(*)").From("action_run").
+ Where(builder.Eq{
+ "repo_id": repo.ID,
+ }.And(
+ builder.In("status",
+ StatusSuccess,
+ StatusFailure,
+ StatusCancelled,
+ StatusSkipped,
+ ),
+ ),
+ ),
+ ).
+ Update(repo)
+ return err
+}
+
+// CancelPreviousJobs cancels all previous jobs of the same repository, reference, workflow, and event.
+// It's useful when a new run is triggered, and all previous runs needn't be continued anymore.
+func CancelPreviousJobs(ctx context.Context, repoID int64, ref, workflowID string, event webhook_module.HookEventType) error {
+ // Find all runs in the specified repository, reference, and workflow with non-final status
+ runs, total, err := db.FindAndCount[ActionRun](ctx, FindRunOptions{
+ RepoID: repoID,
+ Ref: ref,
+ WorkflowID: workflowID,
+ TriggerEvent: event,
+ Status: []Status{StatusRunning, StatusWaiting, StatusBlocked},
+ })
+ if err != nil {
+ return err
+ }
+
+ // If there are no runs found, there's no need to proceed with cancellation, so return nil.
+ if total == 0 {
+ return nil
+ }
+
+ // Iterate over each found run and cancel its associated jobs.
+ for _, run := range runs {
+ // Find all jobs associated with the current run.
+ jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{
+ RunID: run.ID,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Iterate over each job and attempt to cancel it.
+ for _, job := range jobs {
+ // Skip jobs that are already in a terminal state (completed, cancelled, etc.).
+ status := job.Status
+ if status.IsDone() {
+ continue
+ }
+
+ // If the job has no associated task (probably an error), set its status to 'Cancelled' and stop it.
+ if job.TaskID == 0 {
+ job.Status = StatusCancelled
+ job.Stopped = timeutil.TimeStampNow()
+
+ // Update the job's status and stopped time in the database.
+ n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}, "status", "stopped")
+ if err != nil {
+ return err
+ }
+
+ // If the update affected 0 rows, it means the job has changed in the meantime, so we need to try again.
+ if n == 0 {
+ return fmt.Errorf("job has changed, try again")
+ }
+
+ // Continue with the next job.
+ continue
+ }
+
+ // If the job has an associated task, try to stop the task, effectively cancelling the job.
+ if err := StopTask(ctx, job.TaskID, StatusCancelled); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Return nil to indicate successful cancellation of all running and waiting jobs.
+ return nil
+}
+
+// InsertRun inserts a run
+func InsertRun(ctx context.Context, run *ActionRun, jobs []*jobparser.SingleWorkflow) error {
+ ctx, commiter, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer commiter.Close()
+
+ index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID)
+ if err != nil {
+ return err
+ }
+ run.Index = index
+
+ if err := db.Insert(ctx, run); err != nil {
+ return err
+ }
+
+ if run.Repo == nil {
+ repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
+ if err != nil {
+ return err
+ }
+ run.Repo = repo
+ }
+
+ if err := updateRepoRunsNumbers(ctx, run.Repo); err != nil {
+ return err
+ }
+
+ runJobs := make([]*ActionRunJob, 0, len(jobs))
+ var hasWaiting bool
+ for _, v := range jobs {
+ id, job := v.Job()
+ needs := job.Needs()
+ if err := v.SetJob(id, job.EraseNeeds()); err != nil {
+ return err
+ }
+ payload, _ := v.Marshal()
+ status := StatusWaiting
+ if len(needs) > 0 || run.NeedApproval {
+ status = StatusBlocked
+ } else {
+ hasWaiting = true
+ }
+ job.Name, _ = util.SplitStringAtByteN(job.Name, 255)
+ runJobs = append(runJobs, &ActionRunJob{
+ RunID: run.ID,
+ RepoID: run.RepoID,
+ OwnerID: run.OwnerID,
+ CommitSHA: run.CommitSHA,
+ IsForkPullRequest: run.IsForkPullRequest,
+ Name: job.Name,
+ WorkflowPayload: payload,
+ JobID: id,
+ Needs: needs,
+ RunsOn: job.RunsOn(),
+ Status: status,
+ })
+ }
+ if err := db.Insert(ctx, runJobs); err != nil {
+ return err
+ }
+
+ // if there is a job in the waiting status, increase tasks version.
+ if hasWaiting {
+ if err := IncreaseTaskVersion(ctx, run.OwnerID, run.RepoID); err != nil {
+ return err
+ }
+ }
+
+ return commiter.Commit()
+}
+
+func GetLatestRun(ctx context.Context, repoID int64) (*ActionRun, error) {
+ var run ActionRun
+ has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).OrderBy("id DESC").Limit(1).Get(&run)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("latest run: %w", util.ErrNotExist)
+ }
+ return &run, nil
+}
+
+func GetLatestRunForBranchAndWorkflow(ctx context.Context, repoID int64, branch, workflowFile, event string) (*ActionRun, error) {
+ var run ActionRun
+ q := db.GetEngine(ctx).Where("repo_id=?", repoID).And("workflow_id=?", workflowFile)
+ if event != "" {
+ q = q.And("event=?", event)
+ }
+ if branch != "" {
+ q = q.And("ref=?", branch)
+ }
+ has, err := q.Desc("id").Get(&run)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, util.NewNotExistErrorf("run with repo_id %d, ref %s, event %s, workflow_id %s", repoID, branch, event, workflowFile)
+ }
+ return &run, nil
+}
+
+func GetRunByID(ctx context.Context, id int64) (*ActionRun, error) {
+ var run ActionRun
+ has, err := db.GetEngine(ctx).Where("id=?", id).Get(&run)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("run with id %d: %w", id, util.ErrNotExist)
+ }
+
+ return &run, nil
+}
+
+func GetRunByIndex(ctx context.Context, repoID, index int64) (*ActionRun, error) {
+ run := &ActionRun{
+ RepoID: repoID,
+ Index: index,
+ }
+ has, err := db.GetEngine(ctx).Get(run)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("run with index %d %d: %w", repoID, index, util.ErrNotExist)
+ }
+
+ return run, nil
+}
+
+// UpdateRun updates a run.
+// It requires the inputted run has Version set.
+// It will return error if the version is not matched (it means the run has been changed after loaded).
+func UpdateRun(ctx context.Context, run *ActionRun, cols ...string) error {
+ sess := db.GetEngine(ctx).ID(run.ID)
+ if len(cols) > 0 {
+ sess.Cols(cols...)
+ }
+ affected, err := sess.Update(run)
+ if err != nil {
+ return err
+ }
+ if affected == 0 {
+ return fmt.Errorf("run has changed")
+ // It's impossible that the run is not found, since Gitea never deletes runs.
+ }
+
+ if run.Status != 0 || slices.Contains(cols, "status") {
+ if run.RepoID == 0 {
+ run, err = GetRunByID(ctx, run.ID)
+ if err != nil {
+ return err
+ }
+ }
+ if run.Repo == nil {
+ repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
+ if err != nil {
+ return err
+ }
+ run.Repo = repo
+ }
+ if err := updateRepoRunsNumbers(ctx, run.Repo); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type ActionRunIndex db.ResourceIndex
diff --git a/models/actions/run_job.go b/models/actions/run_job.go
new file mode 100644
index 0000000..4b86640
--- /dev/null
+++ b/models/actions/run_job.go
@@ -0,0 +1,180 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ActionRunJob represents a job of a run
+type ActionRunJob struct {
+ ID int64
+ RunID int64 `xorm:"index"`
+ Run *ActionRun `xorm:"-"`
+ RepoID int64 `xorm:"index"`
+ OwnerID int64 `xorm:"index"`
+ CommitSHA string `xorm:"index"`
+ IsForkPullRequest bool
+ Name string `xorm:"VARCHAR(255)"`
+ Attempt int64
+ WorkflowPayload []byte
+ JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id
+ Needs []string `xorm:"JSON TEXT"`
+ RunsOn []string `xorm:"JSON TEXT"`
+ TaskID int64 // the latest task of the job
+ Status Status `xorm:"index"`
+ Started timeutil.TimeStamp
+ Stopped timeutil.TimeStamp
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated index"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionRunJob))
+}
+
+func (job *ActionRunJob) Duration() time.Duration {
+ return calculateDuration(job.Started, job.Stopped, job.Status)
+}
+
+func (job *ActionRunJob) LoadRun(ctx context.Context) error {
+ if job.Run == nil {
+ run, err := GetRunByID(ctx, job.RunID)
+ if err != nil {
+ return err
+ }
+ job.Run = run
+ }
+ return nil
+}
+
+// LoadAttributes load Run if not loaded
+func (job *ActionRunJob) LoadAttributes(ctx context.Context) error {
+ if job == nil {
+ return nil
+ }
+
+ if err := job.LoadRun(ctx); err != nil {
+ return err
+ }
+
+ return job.Run.LoadAttributes(ctx)
+}
+
+func GetRunJobByID(ctx context.Context, id int64) (*ActionRunJob, error) {
+ var job ActionRunJob
+ has, err := db.GetEngine(ctx).Where("id=?", id).Get(&job)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("run job with id %d: %w", id, util.ErrNotExist)
+ }
+
+ return &job, nil
+}
+
+func GetRunJobsByRunID(ctx context.Context, runID int64) ([]*ActionRunJob, error) {
+ var jobs []*ActionRunJob
+ if err := db.GetEngine(ctx).Where("run_id=?", runID).OrderBy("id").Find(&jobs); err != nil {
+ return nil, err
+ }
+ return jobs, nil
+}
+
+func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, cols ...string) (int64, error) {
+ e := db.GetEngine(ctx)
+
+ sess := e.ID(job.ID)
+ if len(cols) > 0 {
+ sess.Cols(cols...)
+ }
+
+ if cond != nil {
+ sess.Where(cond)
+ }
+
+ affected, err := sess.Update(job)
+ if err != nil {
+ return 0, err
+ }
+
+ if affected == 0 || (!slices.Contains(cols, "status") && job.Status == 0) {
+ return affected, nil
+ }
+
+ if affected != 0 && slices.Contains(cols, "status") && job.Status.IsWaiting() {
+ // if the status of job changes to waiting again, increase tasks version.
+ if err := IncreaseTaskVersion(ctx, job.OwnerID, job.RepoID); err != nil {
+ return 0, err
+ }
+ }
+
+ if job.RunID == 0 {
+ var err error
+ if job, err = GetRunJobByID(ctx, job.ID); err != nil {
+ return 0, err
+ }
+ }
+
+ {
+ // Other goroutines may aggregate the status of the run and update it too.
+ // So we need load the run and its jobs before updating the run.
+ run, err := GetRunByID(ctx, job.RunID)
+ if err != nil {
+ return 0, err
+ }
+ jobs, err := GetRunJobsByRunID(ctx, job.RunID)
+ if err != nil {
+ return 0, err
+ }
+ run.Status = aggregateJobStatus(jobs)
+ if run.Started.IsZero() && run.Status.IsRunning() {
+ run.Started = timeutil.TimeStampNow()
+ }
+ if run.Stopped.IsZero() && run.Status.IsDone() {
+ run.Stopped = timeutil.TimeStampNow()
+ }
+ if err := UpdateRun(ctx, run, "status", "started", "stopped"); err != nil {
+ return 0, fmt.Errorf("update run %d: %w", run.ID, err)
+ }
+ }
+
+ return affected, nil
+}
+
+func aggregateJobStatus(jobs []*ActionRunJob) Status {
+ allDone := true
+ allWaiting := true
+ hasFailure := false
+ for _, job := range jobs {
+ if !job.Status.IsDone() {
+ allDone = false
+ }
+ if job.Status != StatusWaiting && !job.Status.IsDone() {
+ allWaiting = false
+ }
+ if job.Status == StatusFailure || job.Status == StatusCancelled {
+ hasFailure = true
+ }
+ }
+ if allDone {
+ if hasFailure {
+ return StatusFailure
+ }
+ return StatusSuccess
+ }
+ if allWaiting {
+ return StatusWaiting
+ }
+ return StatusRunning
+}
diff --git a/models/actions/run_job_list.go b/models/actions/run_job_list.go
new file mode 100644
index 0000000..6c5d3b3
--- /dev/null
+++ b/models/actions/run_job_list.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+type ActionJobList []*ActionRunJob
+
+func (jobs ActionJobList) GetRunIDs() []int64 {
+ return container.FilterSlice(jobs, func(j *ActionRunJob) (int64, bool) {
+ return j.RunID, j.RunID != 0
+ })
+}
+
+func (jobs ActionJobList) LoadRuns(ctx context.Context, withRepo bool) error {
+ runIDs := jobs.GetRunIDs()
+ runs := make(map[int64]*ActionRun, len(runIDs))
+ if err := db.GetEngine(ctx).In("id", runIDs).Find(&runs); err != nil {
+ return err
+ }
+ for _, j := range jobs {
+ if j.RunID > 0 && j.Run == nil {
+ j.Run = runs[j.RunID]
+ }
+ }
+ if withRepo {
+ var runsList RunList = make([]*ActionRun, 0, len(runs))
+ for _, r := range runs {
+ runsList = append(runsList, r)
+ }
+ return runsList.LoadRepos(ctx)
+ }
+ return nil
+}
+
+func (jobs ActionJobList) LoadAttributes(ctx context.Context, withRepo bool) error {
+ return jobs.LoadRuns(ctx, withRepo)
+}
+
+type FindRunJobOptions struct {
+ db.ListOptions
+ RunID int64
+ RepoID int64
+ OwnerID int64
+ CommitSHA string
+ Statuses []Status
+ UpdatedBefore timeutil.TimeStamp
+}
+
+func (opts FindRunJobOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RunID > 0 {
+ cond = cond.And(builder.Eq{"run_id": opts.RunID})
+ }
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+ if opts.CommitSHA != "" {
+ cond = cond.And(builder.Eq{"commit_sha": opts.CommitSHA})
+ }
+ if len(opts.Statuses) > 0 {
+ cond = cond.And(builder.In("status", opts.Statuses))
+ }
+ if opts.UpdatedBefore > 0 {
+ cond = cond.And(builder.Lt{"updated": opts.UpdatedBefore})
+ }
+ return cond
+}
diff --git a/models/actions/run_list.go b/models/actions/run_list.go
new file mode 100644
index 0000000..4046c7d
--- /dev/null
+++ b/models/actions/run_list.go
@@ -0,0 +1,138 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "xorm.io/builder"
+)
+
+type RunList []*ActionRun
+
+// GetUserIDs returns a slice of user's id
+func (runs RunList) GetUserIDs() []int64 {
+ return container.FilterSlice(runs, func(run *ActionRun) (int64, bool) {
+ return run.TriggerUserID, true
+ })
+}
+
+func (runs RunList) GetRepoIDs() []int64 {
+ return container.FilterSlice(runs, func(run *ActionRun) (int64, bool) {
+ return run.RepoID, true
+ })
+}
+
+func (runs RunList) LoadTriggerUser(ctx context.Context) error {
+ userIDs := runs.GetUserIDs()
+ users := make(map[int64]*user_model.User, len(userIDs))
+ if err := db.GetEngine(ctx).In("id", userIDs).Find(&users); err != nil {
+ return err
+ }
+ for _, run := range runs {
+ if run.TriggerUserID == user_model.ActionsUserID {
+ run.TriggerUser = user_model.NewActionsUser()
+ } else {
+ run.TriggerUser = users[run.TriggerUserID]
+ if run.TriggerUser == nil {
+ run.TriggerUser = user_model.NewGhostUser()
+ }
+ }
+ }
+ return nil
+}
+
+func (runs RunList) LoadRepos(ctx context.Context) error {
+ repoIDs := runs.GetRepoIDs()
+ repos, err := repo_model.GetRepositoriesMapByIDs(ctx, repoIDs)
+ if err != nil {
+ return err
+ }
+ for _, run := range runs {
+ run.Repo = repos[run.RepoID]
+ }
+ return nil
+}
+
+type FindRunOptions struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64
+ WorkflowID string
+ Ref string // the commit/tag/… that caused this workflow
+ TriggerUserID int64
+ TriggerEvent webhook_module.HookEventType
+ Approved bool // not util.OptionalBool, it works only when it's true
+ Status []Status
+}
+
+func (opts FindRunOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+ if opts.WorkflowID != "" {
+ cond = cond.And(builder.Eq{"workflow_id": opts.WorkflowID})
+ }
+ if opts.TriggerUserID > 0 {
+ cond = cond.And(builder.Eq{"trigger_user_id": opts.TriggerUserID})
+ }
+ if opts.Approved {
+ cond = cond.And(builder.Gt{"approved_by": 0})
+ }
+ if len(opts.Status) > 0 {
+ cond = cond.And(builder.In("status", opts.Status))
+ }
+ if opts.Ref != "" {
+ cond = cond.And(builder.Eq{"ref": opts.Ref})
+ }
+ if opts.TriggerEvent != "" {
+ cond = cond.And(builder.Eq{"trigger_event": opts.TriggerEvent})
+ }
+ return cond
+}
+
+func (opts FindRunOptions) ToOrders() string {
+ return "`id` DESC"
+}
+
+type StatusInfo struct {
+ Status int
+ DisplayedStatus string
+}
+
+// GetStatusInfoList returns a slice of StatusInfo
+func GetStatusInfoList(ctx context.Context) []StatusInfo {
+ // same as those in aggregateJobStatus
+ allStatus := []Status{StatusSuccess, StatusFailure, StatusWaiting, StatusRunning}
+ statusInfoList := make([]StatusInfo, 0, 4)
+ for _, s := range allStatus {
+ statusInfoList = append(statusInfoList, StatusInfo{
+ Status: int(s),
+ DisplayedStatus: s.String(),
+ })
+ }
+ return statusInfoList
+}
+
+// GetActors returns a slice of Actors
+func GetActors(ctx context.Context, repoID int64) ([]*user_model.User, error) {
+ actors := make([]*user_model.User, 0, 10)
+
+ return actors, db.GetEngine(ctx).Where(builder.In("id", builder.Select("`action_run`.trigger_user_id").From("`action_run`").
+ GroupBy("`action_run`.trigger_user_id").
+ Where(builder.Eq{"`action_run`.repo_id": repoID}))).
+ Cols("id", "name", "full_name", "avatar", "avatar_email", "use_custom_avatar").
+ OrderBy(user_model.GetOrderByName()).
+ Find(&actors)
+}
diff --git a/models/actions/runner.go b/models/actions/runner.go
new file mode 100644
index 0000000..175f211
--- /dev/null
+++ b/models/actions/runner.go
@@ -0,0 +1,362 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "time"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/shared/types"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/translation"
+ "code.gitea.io/gitea/modules/util"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+ "xorm.io/builder"
+)
+
+// ActionRunner represents runner machines
+//
+// It can be:
+// 1. global runner, OwnerID is 0 and RepoID is 0
+// 2. org/user level runner, OwnerID is org/user ID and RepoID is 0
+// 3. repo level runner, OwnerID is 0 and RepoID is repo ID
+//
+// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
+// or it will be complicated to find runners belonging to a specific owner.
+// For example, conditions like `OwnerID = 1` will also return runner {OwnerID: 1, RepoID: 1},
+// but it's a repo level runner, not an org/user level runner.
+// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level runners.
+type ActionRunner struct {
+ ID int64
+ UUID string `xorm:"CHAR(36) UNIQUE"`
+ Name string `xorm:"VARCHAR(255)"`
+ Version string `xorm:"VARCHAR(64)"`
+ OwnerID int64 `xorm:"index"`
+ Owner *user_model.User `xorm:"-"`
+ RepoID int64 `xorm:"index"`
+ Repo *repo_model.Repository `xorm:"-"`
+ Description string `xorm:"TEXT"`
+ Base int // 0 native 1 docker 2 virtual machine
+ RepoRange string // glob match which repositories could use this runner
+
+ Token string `xorm:"-"`
+ TokenHash string `xorm:"UNIQUE"` // sha256 of token
+ TokenSalt string
+ // TokenLastEight string `xorm:"token_last_eight"` // it's unnecessary because we don't find runners by token
+
+ LastOnline timeutil.TimeStamp `xorm:"index"`
+ LastActive timeutil.TimeStamp `xorm:"index"`
+
+ // Store labels defined in state file (default: .runner file) of `act_runner`
+ AgentLabels []string `xorm:"TEXT"`
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ Deleted timeutil.TimeStamp `xorm:"deleted"`
+}
+
+const (
+ RunnerOfflineTime = time.Minute
+ RunnerIdleTime = 10 * time.Second
+)
+
+// BelongsToOwnerName before calling, should guarantee that all attributes are loaded
+func (r *ActionRunner) BelongsToOwnerName() string {
+ if r.RepoID != 0 {
+ return r.Repo.FullName()
+ }
+ if r.OwnerID != 0 {
+ return r.Owner.Name
+ }
+ return ""
+}
+
+func (r *ActionRunner) BelongsToOwnerType() types.OwnerType {
+ if r.RepoID != 0 {
+ return types.OwnerTypeRepository
+ }
+ if r.OwnerID != 0 {
+ if r.Owner.Type == user_model.UserTypeOrganization {
+ return types.OwnerTypeOrganization
+ } else if r.Owner.Type == user_model.UserTypeIndividual {
+ return types.OwnerTypeIndividual
+ }
+ }
+ return types.OwnerTypeSystemGlobal
+}
+
+// if the logic here changed, you should also modify FindRunnerOptions.ToCond
+func (r *ActionRunner) Status() runnerv1.RunnerStatus {
+ if time.Since(r.LastOnline.AsTime()) > RunnerOfflineTime {
+ return runnerv1.RunnerStatus_RUNNER_STATUS_OFFLINE
+ }
+ if time.Since(r.LastActive.AsTime()) > RunnerIdleTime {
+ return runnerv1.RunnerStatus_RUNNER_STATUS_IDLE
+ }
+ return runnerv1.RunnerStatus_RUNNER_STATUS_ACTIVE
+}
+
+func (r *ActionRunner) StatusName() string {
+ return strings.ToLower(strings.TrimPrefix(r.Status().String(), "RUNNER_STATUS_"))
+}
+
+func (r *ActionRunner) StatusLocaleName(lang translation.Locale) string {
+ return lang.TrString("actions.runners.status." + r.StatusName())
+}
+
+func (r *ActionRunner) IsOnline() bool {
+ status := r.Status()
+ if status == runnerv1.RunnerStatus_RUNNER_STATUS_IDLE || status == runnerv1.RunnerStatus_RUNNER_STATUS_ACTIVE {
+ return true
+ }
+ return false
+}
+
+// Editable checks if the runner is editable by the user
+func (r *ActionRunner) Editable(ownerID, repoID int64) bool {
+ if ownerID == 0 && repoID == 0 {
+ return true
+ }
+ if ownerID > 0 && r.OwnerID == ownerID {
+ return true
+ }
+ return repoID > 0 && r.RepoID == repoID
+}
+
+// LoadAttributes loads the attributes of the runner
+func (r *ActionRunner) LoadAttributes(ctx context.Context) error {
+ if r.OwnerID > 0 {
+ var user user_model.User
+ has, err := db.GetEngine(ctx).ID(r.OwnerID).Get(&user)
+ if err != nil {
+ return err
+ }
+ if has {
+ r.Owner = &user
+ }
+ }
+ if r.RepoID > 0 {
+ var repo repo_model.Repository
+ has, err := db.GetEngine(ctx).ID(r.RepoID).Get(&repo)
+ if err != nil {
+ return err
+ }
+ if has {
+ r.Repo = &repo
+ }
+ }
+ return nil
+}
+
+func (r *ActionRunner) GenerateToken() (err error) {
+ r.Token, r.TokenSalt, r.TokenHash, _, err = generateSaltedToken()
+ return err
+}
+
+// UpdateSecret updates the hash based on the specified token. It does not
+// ensure that the runner's UUID matches the first 16 bytes of the token.
+func (r *ActionRunner) UpdateSecret(token string) error {
+ saltBytes, err := util.CryptoRandomBytes(16)
+ if err != nil {
+ return fmt.Errorf("CryptoRandomBytes %v", err)
+ }
+
+ salt := hex.EncodeToString(saltBytes)
+
+ r.Token = token
+ r.TokenSalt = salt
+ r.TokenHash = auth_model.HashToken(token, salt)
+ return nil
+}
+
+func init() {
+ db.RegisterModel(&ActionRunner{})
+}
+
+type FindRunnerOptions struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64 // it will be ignored if RepoID is set
+ Sort string
+ Filter string
+ IsOnline optional.Option[bool]
+ WithAvailable bool // not only runners belong to, but also runners can be used
+}
+
+func (opts FindRunnerOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+
+ if opts.RepoID > 0 {
+ c := builder.NewCond().And(builder.Eq{"repo_id": opts.RepoID})
+ if opts.WithAvailable {
+ c = c.Or(builder.Eq{"owner_id": builder.Select("owner_id").From("repository").Where(builder.Eq{"id": opts.RepoID})})
+ c = c.Or(builder.Eq{"repo_id": 0, "owner_id": 0})
+ }
+ cond = cond.And(c)
+ } else if opts.OwnerID > 0 { // OwnerID is ignored if RepoID is set
+ c := builder.NewCond().And(builder.Eq{"owner_id": opts.OwnerID})
+ if opts.WithAvailable {
+ c = c.Or(builder.Eq{"repo_id": 0, "owner_id": 0})
+ }
+ cond = cond.And(c)
+ }
+
+ if opts.Filter != "" {
+ cond = cond.And(builder.Like{"name", opts.Filter})
+ }
+
+ if opts.IsOnline.Has() {
+ if opts.IsOnline.Value() {
+ cond = cond.And(builder.Gt{"last_online": time.Now().Add(-RunnerOfflineTime).Unix()})
+ } else {
+ cond = cond.And(builder.Lte{"last_online": time.Now().Add(-RunnerOfflineTime).Unix()})
+ }
+ }
+ return cond
+}
+
+func (opts FindRunnerOptions) ToOrders() string {
+ switch opts.Sort {
+ case "online":
+ return "last_online DESC"
+ case "offline":
+ return "last_online ASC"
+ case "alphabetically":
+ return "name ASC"
+ case "reversealphabetically":
+ return "name DESC"
+ case "newest":
+ return "id DESC"
+ case "oldest":
+ return "id ASC"
+ }
+ return "last_online DESC"
+}
+
+// GetRunnerByUUID returns a runner via uuid
+func GetRunnerByUUID(ctx context.Context, uuid string) (*ActionRunner, error) {
+ var runner ActionRunner
+ has, err := db.GetEngine(ctx).Where("uuid=?", uuid).Get(&runner)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("runner with uuid %s: %w", uuid, util.ErrNotExist)
+ }
+ return &runner, nil
+}
+
+// GetRunnerByID returns a runner via id
+func GetRunnerByID(ctx context.Context, id int64) (*ActionRunner, error) {
+ var runner ActionRunner
+ has, err := db.GetEngine(ctx).Where("id=?", id).Get(&runner)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("runner with id %d: %w", id, util.ErrNotExist)
+ }
+ return &runner, nil
+}
+
+// UpdateRunner updates runner's information.
+func UpdateRunner(ctx context.Context, r *ActionRunner, cols ...string) error {
+ e := db.GetEngine(ctx)
+ var err error
+ if len(cols) == 0 {
+ _, err = e.ID(r.ID).AllCols().Update(r)
+ } else {
+ _, err = e.ID(r.ID).Cols(cols...).Update(r)
+ }
+ return err
+}
+
+// DeleteRunner deletes a runner by given ID.
+func DeleteRunner(ctx context.Context, id int64) error {
+ runner, err := GetRunnerByID(ctx, id)
+ if err != nil {
+ return err
+ }
+
+ // Replace the UUID, which was either based on the secret's first 16 bytes or an UUIDv4,
+ // with a sequence of 8 0xff bytes followed by the little-endian version of the record's
+ // identifier. This will prevent the deleted record's identifier from colliding with any
+ // new record.
+ b := make([]byte, 8)
+ binary.LittleEndian.PutUint64(b, uint64(id))
+ runner.UUID = fmt.Sprintf("ffffffff-ffff-ffff-%.2x%.2x-%.2x%.2x%.2x%.2x%.2x%.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7])
+
+ err = UpdateRunner(ctx, runner, "UUID")
+ if err != nil {
+ return err
+ }
+
+ _, err = db.DeleteByID[ActionRunner](ctx, id)
+ return err
+}
+
+// CreateRunner creates new runner.
+func CreateRunner(ctx context.Context, t *ActionRunner) error {
+ if t.OwnerID != 0 && t.RepoID != 0 {
+ // It's trying to create a runner that belongs to a repository, but OwnerID has been set accidentally.
+ // Remove OwnerID to avoid confusion; it's not worth returning an error here.
+ t.OwnerID = 0
+ }
+ return db.Insert(ctx, t)
+}
+
+func CountRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {
+ // Only affect action runners were a owner ID is set, as actions runners
+ // could also be created on a repository.
+ return db.GetEngine(ctx).Table("action_runner").
+ Join("LEFT", "`user`", "`action_runner`.owner_id = `user`.id").
+ Where("`action_runner`.owner_id != ?", 0).
+ And(builder.IsNull{"`user`.id"}).
+ Count(new(ActionRunner))
+}
+
+func FixRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {
+ subQuery := builder.Select("`action_runner`.id").
+ From("`action_runner`").
+ Join("LEFT", "`user`", "`action_runner`.owner_id = `user`.id").
+ Where(builder.Neq{"`action_runner`.owner_id": 0}).
+ And(builder.IsNull{"`user`.id"})
+ b := builder.Delete(builder.In("id", subQuery)).From("`action_runner`")
+ res, err := db.GetEngine(ctx).Exec(b)
+ if err != nil {
+ return 0, err
+ }
+ return res.RowsAffected()
+}
+
+func CountRunnersWithoutBelongingRepo(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Table("action_runner").
+ Join("LEFT", "`repository`", "`action_runner`.repo_id = `repository`.id").
+ Where("`action_runner`.repo_id != ?", 0).
+ And(builder.IsNull{"`repository`.id"}).
+ Count(new(ActionRunner))
+}
+
+func FixRunnersWithoutBelongingRepo(ctx context.Context) (int64, error) {
+ subQuery := builder.Select("`action_runner`.id").
+ From("`action_runner`").
+ Join("LEFT", "`repository`", "`action_runner`.repo_id = `repository`.id").
+ Where(builder.Neq{"`action_runner`.repo_id": 0}).
+ And(builder.IsNull{"`repository`.id"})
+ b := builder.Delete(builder.In("id", subQuery)).From("`action_runner`")
+ res, err := db.GetEngine(ctx).Exec(b)
+ if err != nil {
+ return 0, err
+ }
+ return res.RowsAffected()
+}
diff --git a/models/actions/runner_list.go b/models/actions/runner_list.go
new file mode 100644
index 0000000..3ef8ebb
--- /dev/null
+++ b/models/actions/runner_list.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+)
+
+type RunnerList []*ActionRunner
+
+// GetUserIDs returns a slice of user's id
+func (runners RunnerList) GetUserIDs() []int64 {
+ return container.FilterSlice(runners, func(runner *ActionRunner) (int64, bool) {
+ return runner.OwnerID, runner.OwnerID != 0
+ })
+}
+
+func (runners RunnerList) LoadOwners(ctx context.Context) error {
+ userIDs := runners.GetUserIDs()
+ users := make(map[int64]*user_model.User, len(userIDs))
+ if err := db.GetEngine(ctx).In("id", userIDs).Find(&users); err != nil {
+ return err
+ }
+ for _, runner := range runners {
+ if runner.OwnerID > 0 && runner.Owner == nil {
+ runner.Owner = users[runner.OwnerID]
+ }
+ }
+ return nil
+}
+
+func (runners RunnerList) getRepoIDs() []int64 {
+ return container.FilterSlice(runners, func(runner *ActionRunner) (int64, bool) {
+ return runner.RepoID, runner.RepoID > 0
+ })
+}
+
+func (runners RunnerList) LoadRepos(ctx context.Context) error {
+ repoIDs := runners.getRepoIDs()
+ repos := make(map[int64]*repo_model.Repository, len(repoIDs))
+ if err := db.GetEngine(ctx).In("id", repoIDs).Find(&repos); err != nil {
+ return err
+ }
+
+ for _, runner := range runners {
+ if runner.RepoID > 0 && runner.Repo == nil {
+ runner.Repo = repos[runner.RepoID]
+ }
+ }
+ return nil
+}
+
+func (runners RunnerList) LoadAttributes(ctx context.Context) error {
+ if err := runners.LoadOwners(ctx); err != nil {
+ return err
+ }
+
+ return runners.LoadRepos(ctx)
+}
diff --git a/models/actions/runner_test.go b/models/actions/runner_test.go
new file mode 100644
index 0000000..26ef4c4
--- /dev/null
+++ b/models/actions/runner_test.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "encoding/binary"
+ "fmt"
+ "testing"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestUpdateSecret checks that ActionRunner.UpdateSecret() sets the Token,
+// TokenSalt and TokenHash fields based on the specified token.
+func TestUpdateSecret(t *testing.T) {
+ runner := ActionRunner{}
+ token := "0123456789012345678901234567890123456789"
+
+ err := runner.UpdateSecret(token)
+
+ require.NoError(t, err)
+ assert.Equal(t, token, runner.Token)
+ assert.Regexp(t, "^[0-9a-f]{32}$", runner.TokenSalt)
+ assert.Equal(t, runner.TokenHash, auth_model.HashToken(token, runner.TokenSalt))
+}
+
+func TestDeleteRunner(t *testing.T) {
+ const recordID = 12345678
+ require.NoError(t, unittest.PrepareTestDatabase())
+ before := unittest.AssertExistsAndLoadBean(t, &ActionRunner{ID: recordID})
+
+ err := DeleteRunner(db.DefaultContext, recordID)
+ require.NoError(t, err)
+
+ var after ActionRunner
+ found, err := db.GetEngine(db.DefaultContext).ID(recordID).Unscoped().Get(&after)
+ require.NoError(t, err)
+ assert.True(t, found)
+
+ // Most fields (namely Name, Version, OwnerID, RepoID, Description, Base, RepoRange,
+ // TokenHash, TokenSalt, LastOnline, LastActive, AgentLabels and Created) are unaffected
+ assert.Equal(t, before.Name, after.Name)
+ assert.Equal(t, before.Version, after.Version)
+ assert.Equal(t, before.OwnerID, after.OwnerID)
+ assert.Equal(t, before.RepoID, after.RepoID)
+ assert.Equal(t, before.Description, after.Description)
+ assert.Equal(t, before.Base, after.Base)
+ assert.Equal(t, before.RepoRange, after.RepoRange)
+ assert.Equal(t, before.TokenHash, after.TokenHash)
+ assert.Equal(t, before.TokenSalt, after.TokenSalt)
+ assert.Equal(t, before.LastOnline, after.LastOnline)
+ assert.Equal(t, before.LastActive, after.LastActive)
+ assert.Equal(t, before.AgentLabels, after.AgentLabels)
+ assert.Equal(t, before.Created, after.Created)
+
+ // Deleted contains a value
+ assert.NotNil(t, after.Deleted)
+
+ // UUID was modified
+ assert.NotEqual(t, before.UUID, after.UUID)
+ // UUID starts with ffffffff-ffff-ffff-
+ assert.Equal(t, "ffffffff-ffff-ffff-", after.UUID[:19])
+ // UUID ends with LE binary representation of record ID
+ idAsBinary := make([]byte, 8)
+ binary.LittleEndian.PutUint64(idAsBinary, uint64(recordID))
+ idAsHexadecimal := fmt.Sprintf("%.2x%.2x-%.2x%.2x%.2x%.2x%.2x%.2x", idAsBinary[0],
+ idAsBinary[1], idAsBinary[2], idAsBinary[3], idAsBinary[4], idAsBinary[5],
+ idAsBinary[6], idAsBinary[7])
+ assert.Equal(t, idAsHexadecimal, after.UUID[19:])
+}
diff --git a/models/actions/runner_token.go b/models/actions/runner_token.go
new file mode 100644
index 0000000..fd6ba7e
--- /dev/null
+++ b/models/actions/runner_token.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ActionRunnerToken represents runner tokens
+//
+// It can be:
+// 1. global token, OwnerID is 0 and RepoID is 0
+// 2. org/user level token, OwnerID is org/user ID and RepoID is 0
+// 3. repo level token, OwnerID is 0 and RepoID is repo ID
+//
+// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
+// or it will be complicated to find tokens belonging to a specific owner.
+// For example, conditions like `OwnerID = 1` will also return token {OwnerID: 1, RepoID: 1},
+// but it's a repo level token, not an org/user level token.
+// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level tokens.
+type ActionRunnerToken struct {
+ ID int64
+ Token string `xorm:"UNIQUE"`
+ OwnerID int64 `xorm:"index"`
+ Owner *user_model.User `xorm:"-"`
+ RepoID int64 `xorm:"index"`
+ Repo *repo_model.Repository `xorm:"-"`
+ IsActive bool // true means it can be used
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ Deleted timeutil.TimeStamp `xorm:"deleted"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionRunnerToken))
+}
+
+// GetRunnerToken returns a action runner via token
+func GetRunnerToken(ctx context.Context, token string) (*ActionRunnerToken, error) {
+ var runnerToken ActionRunnerToken
+ has, err := db.GetEngine(ctx).Where("token=?", token).Get(&runnerToken)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("runner token %q: %w", token, util.ErrNotExist)
+ }
+ return &runnerToken, nil
+}
+
+// UpdateRunnerToken updates runner token information.
+func UpdateRunnerToken(ctx context.Context, r *ActionRunnerToken, cols ...string) (err error) {
+ e := db.GetEngine(ctx)
+
+ if len(cols) == 0 {
+ _, err = e.ID(r.ID).AllCols().Update(r)
+ } else {
+ _, err = e.ID(r.ID).Cols(cols...).Update(r)
+ }
+ return err
+}
+
+// NewRunnerToken creates a new active runner token and invalidate all old tokens
+// ownerID will be ignored and treated as 0 if repoID is non-zero.
+func NewRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerToken, error) {
+ if ownerID != 0 && repoID != 0 {
+ // It's trying to create a runner token that belongs to a repository, but OwnerID has been set accidentally.
+ // Remove OwnerID to avoid confusion; it's not worth returning an error here.
+ ownerID = 0
+ }
+
+ token, err := util.CryptoRandomString(40)
+ if err != nil {
+ return nil, err
+ }
+ runnerToken := &ActionRunnerToken{
+ OwnerID: ownerID,
+ RepoID: repoID,
+ IsActive: true,
+ Token: token,
+ }
+
+ return runnerToken, db.WithTx(ctx, func(ctx context.Context) error {
+ if _, err := db.GetEngine(ctx).Where("owner_id =? AND repo_id = ?", ownerID, repoID).Cols("is_active").Update(&ActionRunnerToken{
+ IsActive: false,
+ }); err != nil {
+ return err
+ }
+
+ _, err = db.GetEngine(ctx).Insert(runnerToken)
+ return err
+ })
+}
+
+// GetLatestRunnerToken returns the latest runner token
+func GetLatestRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerToken, error) {
+ if ownerID != 0 && repoID != 0 {
+ // It's trying to get a runner token that belongs to a repository, but OwnerID has been set accidentally.
+ // Remove OwnerID to avoid confusion; it's not worth returning an error here.
+ ownerID = 0
+ }
+
+ var runnerToken ActionRunnerToken
+ has, err := db.GetEngine(ctx).Where("owner_id=? AND repo_id=?", ownerID, repoID).
+ OrderBy("id DESC").Get(&runnerToken)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("runner token: %w", util.ErrNotExist)
+ }
+ return &runnerToken, nil
+}
diff --git a/models/actions/runner_token_test.go b/models/actions/runner_token_test.go
new file mode 100644
index 0000000..35c9a9d
--- /dev/null
+++ b/models/actions/runner_token_test.go
@@ -0,0 +1,41 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetLatestRunnerToken(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token := unittest.AssertExistsAndLoadBean(t, &ActionRunnerToken{ID: 3})
+ expectedToken, err := GetLatestRunnerToken(db.DefaultContext, 1, 0)
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedToken, token)
+}
+
+func TestNewRunnerToken(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token, err := NewRunnerToken(db.DefaultContext, 1, 0)
+ require.NoError(t, err)
+ expectedToken, err := GetLatestRunnerToken(db.DefaultContext, 1, 0)
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedToken, token)
+}
+
+func TestUpdateRunnerToken(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token := unittest.AssertExistsAndLoadBean(t, &ActionRunnerToken{ID: 3})
+ token.IsActive = true
+ require.NoError(t, UpdateRunnerToken(db.DefaultContext, token))
+ expectedToken, err := GetLatestRunnerToken(db.DefaultContext, 1, 0)
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedToken, token)
+}
diff --git a/models/actions/schedule.go b/models/actions/schedule.go
new file mode 100644
index 0000000..acb9961
--- /dev/null
+++ b/models/actions/schedule.go
@@ -0,0 +1,140 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+)
+
+// ActionSchedule represents a schedule of a workflow file
+type ActionSchedule struct {
+ ID int64
+ Title string
+ Specs []string
+ RepoID int64 `xorm:"index"`
+ Repo *repo_model.Repository `xorm:"-"`
+ OwnerID int64 `xorm:"index"`
+ WorkflowID string
+ TriggerUserID int64
+ TriggerUser *user_model.User `xorm:"-"`
+ Ref string
+ CommitSHA string
+ Event webhook_module.HookEventType
+ EventPayload string `xorm:"LONGTEXT"`
+ Content []byte
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionSchedule))
+}
+
+// GetSchedulesMapByIDs returns the schedules by given id slice.
+func GetSchedulesMapByIDs(ctx context.Context, ids []int64) (map[int64]*ActionSchedule, error) {
+ schedules := make(map[int64]*ActionSchedule, len(ids))
+ return schedules, db.GetEngine(ctx).In("id", ids).Find(&schedules)
+}
+
+// GetReposMapByIDs returns the repos by given id slice.
+func GetReposMapByIDs(ctx context.Context, ids []int64) (map[int64]*repo_model.Repository, error) {
+ repos := make(map[int64]*repo_model.Repository, len(ids))
+ return repos, db.GetEngine(ctx).In("id", ids).Find(&repos)
+}
+
+// CreateScheduleTask creates new schedule task.
+func CreateScheduleTask(ctx context.Context, rows []*ActionSchedule) error {
+ // Return early if there are no rows to insert
+ if len(rows) == 0 {
+ return nil
+ }
+
+ // Begin transaction
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // Loop through each schedule row
+ for _, row := range rows {
+ // Create new schedule row
+ if err = db.Insert(ctx, row); err != nil {
+ return err
+ }
+
+ // Loop through each schedule spec and create a new spec row
+ now := time.Now()
+
+ for _, spec := range row.Specs {
+ specRow := &ActionScheduleSpec{
+ RepoID: row.RepoID,
+ ScheduleID: row.ID,
+ Spec: spec,
+ }
+ // Parse the spec and check for errors
+ schedule, err := specRow.Parse()
+ if err != nil {
+ continue // skip to the next spec if there's an error
+ }
+
+ specRow.Next = timeutil.TimeStamp(schedule.Next(now).Unix())
+
+ // Insert the new schedule spec row
+ if err = db.Insert(ctx, specRow); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Commit transaction
+ return committer.Commit()
+}
+
+func DeleteScheduleTaskByRepo(ctx context.Context, id int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err := db.GetEngine(ctx).Delete(&ActionSchedule{RepoID: id}); err != nil {
+ return err
+ }
+
+ if _, err := db.GetEngine(ctx).Delete(&ActionScheduleSpec{RepoID: id}); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func CleanRepoScheduleTasks(ctx context.Context, repo *repo_model.Repository, cancelPreviousJobs bool) error {
+ // If actions disabled when there is schedule task, this will remove the outdated schedule tasks
+ // There is no other place we can do this because the app.ini will be changed manually
+ if err := DeleteScheduleTaskByRepo(ctx, repo.ID); err != nil {
+ return fmt.Errorf("DeleteCronTaskByRepo: %v", err)
+ }
+ if cancelPreviousJobs {
+ // cancel running cron jobs of this repository and delete old schedules
+ if err := CancelPreviousJobs(
+ ctx,
+ repo.ID,
+ repo.DefaultBranch,
+ "",
+ webhook_module.HookEventSchedule,
+ ); err != nil {
+ return fmt.Errorf("CancelPreviousJobs: %v", err)
+ }
+ }
+ return nil
+}
diff --git a/models/actions/schedule_list.go b/models/actions/schedule_list.go
new file mode 100644
index 0000000..5361b94
--- /dev/null
+++ b/models/actions/schedule_list.go
@@ -0,0 +1,83 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+
+ "xorm.io/builder"
+)
+
+type ScheduleList []*ActionSchedule
+
+// GetUserIDs returns a slice of user's id
+func (schedules ScheduleList) GetUserIDs() []int64 {
+ return container.FilterSlice(schedules, func(schedule *ActionSchedule) (int64, bool) {
+ return schedule.TriggerUserID, true
+ })
+}
+
+func (schedules ScheduleList) GetRepoIDs() []int64 {
+ return container.FilterSlice(schedules, func(schedule *ActionSchedule) (int64, bool) {
+ return schedule.RepoID, true
+ })
+}
+
+func (schedules ScheduleList) LoadTriggerUser(ctx context.Context) error {
+ userIDs := schedules.GetUserIDs()
+ users := make(map[int64]*user_model.User, len(userIDs))
+ if err := db.GetEngine(ctx).In("id", userIDs).Find(&users); err != nil {
+ return err
+ }
+ for _, schedule := range schedules {
+ if schedule.TriggerUserID == user_model.ActionsUserID {
+ schedule.TriggerUser = user_model.NewActionsUser()
+ } else {
+ schedule.TriggerUser = users[schedule.TriggerUserID]
+ if schedule.TriggerUser == nil {
+ schedule.TriggerUser = user_model.NewGhostUser()
+ }
+ }
+ }
+ return nil
+}
+
+func (schedules ScheduleList) LoadRepos(ctx context.Context) error {
+ repoIDs := schedules.GetRepoIDs()
+ repos, err := repo_model.GetRepositoriesMapByIDs(ctx, repoIDs)
+ if err != nil {
+ return err
+ }
+ for _, schedule := range schedules {
+ schedule.Repo = repos[schedule.RepoID]
+ }
+ return nil
+}
+
+type FindScheduleOptions struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64
+}
+
+func (opts FindScheduleOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+
+ return cond
+}
+
+func (opts FindScheduleOptions) ToOrders() string {
+ return "`id` DESC"
+}
diff --git a/models/actions/schedule_spec.go b/models/actions/schedule_spec.go
new file mode 100644
index 0000000..923e5f7
--- /dev/null
+++ b/models/actions/schedule_spec.go
@@ -0,0 +1,73 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/robfig/cron/v3"
+)
+
+// ActionScheduleSpec represents a schedule spec of a workflow file
+type ActionScheduleSpec struct {
+ ID int64
+ RepoID int64 `xorm:"index"`
+ Repo *repo_model.Repository `xorm:"-"`
+ ScheduleID int64 `xorm:"index"`
+ Schedule *ActionSchedule `xorm:"-"`
+
+ // Next time the job will run, or the zero time if Cron has not been
+ // started or this entry's schedule is unsatisfiable
+ Next timeutil.TimeStamp `xorm:"index"`
+ // Prev is the last time this job was run, or the zero time if never.
+ Prev timeutil.TimeStamp
+ Spec string
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+// Parse parses the spec and returns a cron.Schedule
+// Unlike the default cron parser, Parse uses UTC timezone as the default if none is specified.
+func (s *ActionScheduleSpec) Parse() (cron.Schedule, error) {
+ parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)
+ schedule, err := parser.Parse(s.Spec)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the spec has specified a timezone, use it
+ if strings.HasPrefix(s.Spec, "TZ=") || strings.HasPrefix(s.Spec, "CRON_TZ=") {
+ return schedule, nil
+ }
+
+ specSchedule, ok := schedule.(*cron.SpecSchedule)
+ // If it's not a spec schedule, like "@every 5m", timezone is not relevant
+ if !ok {
+ return schedule, nil
+ }
+
+ // Set the timezone to UTC
+ specSchedule.Location = time.UTC
+ return specSchedule, nil
+}
+
+func init() {
+ db.RegisterModel(new(ActionScheduleSpec))
+}
+
+func UpdateScheduleSpec(ctx context.Context, spec *ActionScheduleSpec, cols ...string) error {
+ sess := db.GetEngine(ctx).ID(spec.ID)
+ if len(cols) > 0 {
+ sess.Cols(cols...)
+ }
+ _, err := sess.Update(spec)
+ return err
+}
diff --git a/models/actions/schedule_spec_list.go b/models/actions/schedule_spec_list.go
new file mode 100644
index 0000000..4dc43f9
--- /dev/null
+++ b/models/actions/schedule_spec_list.go
@@ -0,0 +1,105 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/container"
+
+ "xorm.io/builder"
+)
+
+type SpecList []*ActionScheduleSpec
+
+func (specs SpecList) GetScheduleIDs() []int64 {
+ return container.FilterSlice(specs, func(spec *ActionScheduleSpec) (int64, bool) {
+ return spec.ScheduleID, true
+ })
+}
+
+func (specs SpecList) LoadSchedules(ctx context.Context) error {
+ if len(specs) == 0 {
+ return nil
+ }
+
+ scheduleIDs := specs.GetScheduleIDs()
+ schedules, err := GetSchedulesMapByIDs(ctx, scheduleIDs)
+ if err != nil {
+ return err
+ }
+ for _, spec := range specs {
+ spec.Schedule = schedules[spec.ScheduleID]
+ }
+
+ repoIDs := specs.GetRepoIDs()
+ repos, err := GetReposMapByIDs(ctx, repoIDs)
+ if err != nil {
+ return err
+ }
+ for _, spec := range specs {
+ spec.Repo = repos[spec.RepoID]
+ }
+
+ return nil
+}
+
+func (specs SpecList) GetRepoIDs() []int64 {
+ return container.FilterSlice(specs, func(spec *ActionScheduleSpec) (int64, bool) {
+ return spec.RepoID, true
+ })
+}
+
+func (specs SpecList) LoadRepos(ctx context.Context) error {
+ if len(specs) == 0 {
+ return nil
+ }
+
+ repoIDs := specs.GetRepoIDs()
+ repos, err := repo_model.GetRepositoriesMapByIDs(ctx, repoIDs)
+ if err != nil {
+ return err
+ }
+ for _, spec := range specs {
+ spec.Repo = repos[spec.RepoID]
+ }
+ return nil
+}
+
+type FindSpecOptions struct {
+ db.ListOptions
+ RepoID int64
+ Next int64
+}
+
+func (opts FindSpecOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+
+ if opts.Next > 0 {
+ cond = cond.And(builder.Lte{"next": opts.Next})
+ }
+
+ return cond
+}
+
+func (opts FindSpecOptions) ToOrders() string {
+ return "`id` DESC"
+}
+
+func FindSpecs(ctx context.Context, opts FindSpecOptions) (SpecList, int64, error) {
+ specs, total, err := db.FindAndCount[ActionScheduleSpec](ctx, opts)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if err := SpecList(specs).LoadSchedules(ctx); err != nil {
+ return nil, 0, err
+ }
+ return specs, total, nil
+}
diff --git a/models/actions/schedule_spec_test.go b/models/actions/schedule_spec_test.go
new file mode 100644
index 0000000..0c26fce
--- /dev/null
+++ b/models/actions/schedule_spec_test.go
@@ -0,0 +1,71 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestActionScheduleSpec_Parse(t *testing.T) {
+ // Mock the local timezone is not UTC
+ local := time.Local
+ tz, err := time.LoadLocation("Asia/Shanghai")
+ require.NoError(t, err)
+ defer func() {
+ time.Local = local
+ }()
+ time.Local = tz
+
+ now, err := time.Parse(time.RFC3339, "2024-07-31T15:47:55+08:00")
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ spec string
+ want string
+ wantErr assert.ErrorAssertionFunc
+ }{
+ {
+ name: "regular",
+ spec: "0 10 * * *",
+ want: "2024-07-31T10:00:00Z",
+ wantErr: assert.NoError,
+ },
+ {
+ name: "invalid",
+ spec: "0 10 * *",
+ want: "",
+ wantErr: assert.Error,
+ },
+ {
+ name: "with timezone",
+ spec: "TZ=America/New_York 0 10 * * *",
+ want: "2024-07-31T14:00:00Z",
+ wantErr: assert.NoError,
+ },
+ {
+ name: "timezone irrelevant",
+ spec: "@every 5m",
+ want: "2024-07-31T07:52:55Z",
+ wantErr: assert.NoError,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := &ActionScheduleSpec{
+ Spec: tt.spec,
+ }
+ got, err := s.Parse()
+ tt.wantErr(t, err)
+
+ if err == nil {
+ assert.Equal(t, tt.want, got.Next(now).UTC().Format(time.RFC3339))
+ }
+ })
+ }
+}
diff --git a/models/actions/status.go b/models/actions/status.go
new file mode 100644
index 0000000..eda2234
--- /dev/null
+++ b/models/actions/status.go
@@ -0,0 +1,104 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "code.gitea.io/gitea/modules/translation"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+)
+
+// Status represents the status of ActionRun, ActionRunJob, ActionTask, or ActionTaskStep
+type Status int
+
+const (
+ StatusUnknown Status = iota // 0, consistent with runnerv1.Result_RESULT_UNSPECIFIED
+ StatusSuccess // 1, consistent with runnerv1.Result_RESULT_SUCCESS
+ StatusFailure // 2, consistent with runnerv1.Result_RESULT_FAILURE
+ StatusCancelled // 3, consistent with runnerv1.Result_RESULT_CANCELLED
+ StatusSkipped // 4, consistent with runnerv1.Result_RESULT_SKIPPED
+ StatusWaiting // 5, isn't a runnerv1.Result
+ StatusRunning // 6, isn't a runnerv1.Result
+ StatusBlocked // 7, isn't a runnerv1.Result
+)
+
+var statusNames = map[Status]string{
+ StatusUnknown: "unknown",
+ StatusWaiting: "waiting",
+ StatusRunning: "running",
+ StatusSuccess: "success",
+ StatusFailure: "failure",
+ StatusCancelled: "cancelled",
+ StatusSkipped: "skipped",
+ StatusBlocked: "blocked",
+}
+
+// String returns the string name of the Status
+func (s Status) String() string {
+ return statusNames[s]
+}
+
+// LocaleString returns the locale string name of the Status
+func (s Status) LocaleString(lang translation.Locale) string {
+ return lang.TrString("actions.status." + s.String())
+}
+
+// IsDone returns whether the Status is final
+func (s Status) IsDone() bool {
+ return s.In(StatusSuccess, StatusFailure, StatusCancelled, StatusSkipped)
+}
+
+// HasRun returns whether the Status is a result of running
+func (s Status) HasRun() bool {
+ return s.In(StatusSuccess, StatusFailure)
+}
+
+func (s Status) IsUnknown() bool {
+ return s == StatusUnknown
+}
+
+func (s Status) IsSuccess() bool {
+ return s == StatusSuccess
+}
+
+func (s Status) IsFailure() bool {
+ return s == StatusFailure
+}
+
+func (s Status) IsCancelled() bool {
+ return s == StatusCancelled
+}
+
+func (s Status) IsSkipped() bool {
+ return s == StatusSkipped
+}
+
+func (s Status) IsWaiting() bool {
+ return s == StatusWaiting
+}
+
+func (s Status) IsRunning() bool {
+ return s == StatusRunning
+}
+
+func (s Status) IsBlocked() bool {
+ return s == StatusBlocked
+}
+
+// In returns whether s is one of the given statuses
+func (s Status) In(statuses ...Status) bool {
+ for _, v := range statuses {
+ if s == v {
+ return true
+ }
+ }
+ return false
+}
+
+func (s Status) AsResult() runnerv1.Result {
+ if s.IsDone() {
+ return runnerv1.Result(s)
+ }
+ return runnerv1.Result_RESULT_UNSPECIFIED
+}
diff --git a/models/actions/task.go b/models/actions/task.go
new file mode 100644
index 0000000..8d41a63
--- /dev/null
+++ b/models/actions/task.go
@@ -0,0 +1,527 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "crypto/subtle"
+ "fmt"
+ "time"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+ lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/nektos/act/pkg/jobparser"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "xorm.io/builder"
+)
+
+// ActionTask represents a distribution of job
+type ActionTask struct {
+ ID int64
+ JobID int64
+ Job *ActionRunJob `xorm:"-"`
+ Steps []*ActionTaskStep `xorm:"-"`
+ Attempt int64
+ RunnerID int64 `xorm:"index"`
+ Status Status `xorm:"index"`
+ Started timeutil.TimeStamp `xorm:"index"`
+ Stopped timeutil.TimeStamp `xorm:"index(stopped_log_expired)"`
+
+ RepoID int64 `xorm:"index"`
+ OwnerID int64 `xorm:"index"`
+ CommitSHA string `xorm:"index"`
+ IsForkPullRequest bool
+
+ Token string `xorm:"-"`
+ TokenHash string `xorm:"UNIQUE"` // sha256 of token
+ TokenSalt string
+ TokenLastEight string `xorm:"index token_last_eight"`
+
+ LogFilename string // file name of log
+ LogInStorage bool // read log from database or from storage
+ LogLength int64 // lines count
+ LogSize int64 // blob size
+ LogIndexes LogIndexes `xorm:"LONGBLOB"` // line number to offset
+ LogExpired bool `xorm:"index(stopped_log_expired)"` // files that are too old will be deleted
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated index"`
+}
+
+var successfulTokenTaskCache *lru.Cache[string, any]
+
+func init() {
+ db.RegisterModel(new(ActionTask), func() error {
+ if setting.SuccessfulTokensCacheSize > 0 {
+ var err error
+ successfulTokenTaskCache, err = lru.New[string, any](setting.SuccessfulTokensCacheSize)
+ if err != nil {
+ return fmt.Errorf("unable to allocate Task cache: %v", err)
+ }
+ } else {
+ successfulTokenTaskCache = nil
+ }
+ return nil
+ })
+}
+
+func (task *ActionTask) Duration() time.Duration {
+ return calculateDuration(task.Started, task.Stopped, task.Status)
+}
+
+func (task *ActionTask) IsStopped() bool {
+ return task.Stopped > 0
+}
+
+func (task *ActionTask) GetRunLink() string {
+ if task.Job == nil || task.Job.Run == nil {
+ return ""
+ }
+ return task.Job.Run.Link()
+}
+
+func (task *ActionTask) GetCommitLink() string {
+ if task.Job == nil || task.Job.Run == nil || task.Job.Run.Repo == nil {
+ return ""
+ }
+ return task.Job.Run.Repo.CommitLink(task.CommitSHA)
+}
+
+func (task *ActionTask) GetRepoName() string {
+ if task.Job == nil || task.Job.Run == nil || task.Job.Run.Repo == nil {
+ return ""
+ }
+ return task.Job.Run.Repo.FullName()
+}
+
+func (task *ActionTask) GetRepoLink() string {
+ if task.Job == nil || task.Job.Run == nil || task.Job.Run.Repo == nil {
+ return ""
+ }
+ return task.Job.Run.Repo.Link()
+}
+
+func (task *ActionTask) LoadJob(ctx context.Context) error {
+ if task.Job == nil {
+ job, err := GetRunJobByID(ctx, task.JobID)
+ if err != nil {
+ return err
+ }
+ task.Job = job
+ }
+ return nil
+}
+
+// LoadAttributes load Job Steps if not loaded
+func (task *ActionTask) LoadAttributes(ctx context.Context) error {
+ if task == nil {
+ return nil
+ }
+ if err := task.LoadJob(ctx); err != nil {
+ return err
+ }
+
+ if err := task.Job.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ if task.Steps == nil { // be careful, an empty slice (not nil) also means loaded
+ steps, err := GetTaskStepsByTaskID(ctx, task.ID)
+ if err != nil {
+ return err
+ }
+ task.Steps = steps
+ }
+
+ return nil
+}
+
+func (task *ActionTask) GenerateToken() (err error) {
+ task.Token, task.TokenSalt, task.TokenHash, task.TokenLastEight, err = generateSaltedToken()
+ return err
+}
+
+func GetTaskByID(ctx context.Context, id int64) (*ActionTask, error) {
+ var task ActionTask
+ has, err := db.GetEngine(ctx).Where("id=?", id).Get(&task)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("task with id %d: %w", id, util.ErrNotExist)
+ }
+
+ return &task, nil
+}
+
+func GetRunningTaskByToken(ctx context.Context, token string) (*ActionTask, error) {
+ errNotExist := fmt.Errorf("task with token %q: %w", token, util.ErrNotExist)
+ if token == "" {
+ return nil, errNotExist
+ }
+ // A token is defined as being SHA1 sum these are 40 hexadecimal bytes long
+ if len(token) != 40 {
+ return nil, errNotExist
+ }
+ for _, x := range []byte(token) {
+ if x < '0' || (x > '9' && x < 'a') || x > 'f' {
+ return nil, errNotExist
+ }
+ }
+
+ lastEight := token[len(token)-8:]
+
+ if id := getTaskIDFromCache(token); id > 0 {
+ task := &ActionTask{
+ TokenLastEight: lastEight,
+ }
+ // Re-get the task from the db in case it has been deleted in the intervening period
+ has, err := db.GetEngine(ctx).ID(id).Get(task)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return task, nil
+ }
+ successfulTokenTaskCache.Remove(token)
+ }
+
+ var tasks []*ActionTask
+ err := db.GetEngine(ctx).Where("token_last_eight = ? AND status = ?", lastEight, StatusRunning).Find(&tasks)
+ if err != nil {
+ return nil, err
+ } else if len(tasks) == 0 {
+ return nil, errNotExist
+ }
+
+ for _, t := range tasks {
+ tempHash := auth_model.HashToken(token, t.TokenSalt)
+ if subtle.ConstantTimeCompare([]byte(t.TokenHash), []byte(tempHash)) == 1 {
+ if successfulTokenTaskCache != nil {
+ successfulTokenTaskCache.Add(token, t.ID)
+ }
+ return t, nil
+ }
+ }
+ return nil, errNotExist
+}
+
+func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask, bool, error) {
+ ctx, commiter, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, false, err
+ }
+ defer commiter.Close()
+
+ e := db.GetEngine(ctx)
+
+ jobCond := builder.NewCond()
+ if runner.RepoID != 0 {
+ jobCond = builder.Eq{"repo_id": runner.RepoID}
+ } else if runner.OwnerID != 0 {
+ jobCond = builder.In("repo_id", builder.Select("`repository`.id").From("repository").
+ Join("INNER", "repo_unit", "`repository`.id = `repo_unit`.repo_id").
+ Where(builder.Eq{"`repository`.owner_id": runner.OwnerID, "`repo_unit`.type": unit.TypeActions}))
+ }
+ if jobCond.IsValid() {
+ jobCond = builder.In("run_id", builder.Select("id").From("action_run").Where(jobCond))
+ }
+
+ var jobs []*ActionRunJob
+ if err := e.Where("task_id=? AND status=?", 0, StatusWaiting).And(jobCond).Asc("updated", "id").Find(&jobs); err != nil {
+ return nil, false, err
+ }
+
+ // TODO: a more efficient way to filter labels
+ var job *ActionRunJob
+ log.Trace("runner labels: %v", runner.AgentLabels)
+ for _, v := range jobs {
+ if isSubset(runner.AgentLabels, v.RunsOn) {
+ job = v
+ break
+ }
+ }
+ if job == nil {
+ return nil, false, nil
+ }
+ if err := job.LoadAttributes(ctx); err != nil {
+ return nil, false, err
+ }
+
+ now := timeutil.TimeStampNow()
+ job.Attempt++
+ job.Started = now
+ job.Status = StatusRunning
+
+ task := &ActionTask{
+ JobID: job.ID,
+ Attempt: job.Attempt,
+ RunnerID: runner.ID,
+ Started: now,
+ Status: StatusRunning,
+ RepoID: job.RepoID,
+ OwnerID: job.OwnerID,
+ CommitSHA: job.CommitSHA,
+ IsForkPullRequest: job.IsForkPullRequest,
+ }
+ if err := task.GenerateToken(); err != nil {
+ return nil, false, err
+ }
+
+ var workflowJob *jobparser.Job
+ if gots, err := jobparser.Parse(job.WorkflowPayload); err != nil {
+ return nil, false, fmt.Errorf("parse workflow of job %d: %w", job.ID, err)
+ } else if len(gots) != 1 {
+ return nil, false, fmt.Errorf("workflow of job %d: not single workflow", job.ID)
+ } else { //nolint:revive
+ _, workflowJob = gots[0].Job()
+ }
+
+ if _, err := e.Insert(task); err != nil {
+ return nil, false, err
+ }
+
+ task.LogFilename = logFileName(job.Run.Repo.FullName(), task.ID)
+ if err := UpdateTask(ctx, task, "log_filename"); err != nil {
+ return nil, false, err
+ }
+
+ if len(workflowJob.Steps) > 0 {
+ steps := make([]*ActionTaskStep, len(workflowJob.Steps))
+ for i, v := range workflowJob.Steps {
+ name, _ := util.SplitStringAtByteN(v.String(), 255)
+ steps[i] = &ActionTaskStep{
+ Name: name,
+ TaskID: task.ID,
+ Index: int64(i),
+ RepoID: task.RepoID,
+ Status: StatusWaiting,
+ }
+ }
+ if _, err := e.Insert(steps); err != nil {
+ return nil, false, err
+ }
+ task.Steps = steps
+ }
+
+ job.TaskID = task.ID
+ if n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}); err != nil {
+ return nil, false, err
+ } else if n != 1 {
+ return nil, false, nil
+ }
+
+ task.Job = job
+
+ if err := commiter.Commit(); err != nil {
+ return nil, false, err
+ }
+
+ return task, true, nil
+}
+
+func UpdateTask(ctx context.Context, task *ActionTask, cols ...string) error {
+ sess := db.GetEngine(ctx).ID(task.ID)
+ if len(cols) > 0 {
+ sess.Cols(cols...)
+ }
+ _, err := sess.Update(task)
+ return err
+}
+
+// UpdateTaskByState updates the task by the state.
+// It will always update the task if the state is not final, even there is no change.
+// So it will update ActionTask.Updated to avoid the task being judged as a zombie task.
+func UpdateTaskByState(ctx context.Context, state *runnerv1.TaskState) (*ActionTask, error) {
+ stepStates := map[int64]*runnerv1.StepState{}
+ for _, v := range state.Steps {
+ stepStates[v.Id] = v
+ }
+
+ ctx, commiter, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer commiter.Close()
+
+ e := db.GetEngine(ctx)
+
+ task := &ActionTask{}
+ if has, err := e.ID(state.Id).Get(task); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, util.ErrNotExist
+ }
+
+ if task.Status.IsDone() {
+ // the state is final, do nothing
+ return task, nil
+ }
+
+ // state.Result is not unspecified means the task is finished
+ if state.Result != runnerv1.Result_RESULT_UNSPECIFIED {
+ task.Status = Status(state.Result)
+ task.Stopped = timeutil.TimeStamp(state.StoppedAt.AsTime().Unix())
+ if err := UpdateTask(ctx, task, "status", "stopped"); err != nil {
+ return nil, err
+ }
+ if _, err := UpdateRunJob(ctx, &ActionRunJob{
+ ID: task.JobID,
+ Status: task.Status,
+ Stopped: task.Stopped,
+ }, nil); err != nil {
+ return nil, err
+ }
+ } else {
+ // Force update ActionTask.Updated to avoid the task being judged as a zombie task
+ task.Updated = timeutil.TimeStampNow()
+ if err := UpdateTask(ctx, task, "updated"); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := task.LoadAttributes(ctx); err != nil {
+ return nil, err
+ }
+
+ for _, step := range task.Steps {
+ var result runnerv1.Result
+ if v, ok := stepStates[step.Index]; ok {
+ result = v.Result
+ step.LogIndex = v.LogIndex
+ step.LogLength = v.LogLength
+ step.Started = convertTimestamp(v.StartedAt)
+ step.Stopped = convertTimestamp(v.StoppedAt)
+ }
+ if result != runnerv1.Result_RESULT_UNSPECIFIED {
+ step.Status = Status(result)
+ } else if step.Started != 0 {
+ step.Status = StatusRunning
+ }
+ if _, err := e.ID(step.ID).Update(step); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := commiter.Commit(); err != nil {
+ return nil, err
+ }
+
+ return task, nil
+}
+
+func StopTask(ctx context.Context, taskID int64, status Status) error {
+ if !status.IsDone() {
+ return fmt.Errorf("cannot stop task with status %v", status)
+ }
+ e := db.GetEngine(ctx)
+
+ task := &ActionTask{}
+ if has, err := e.ID(taskID).Get(task); err != nil {
+ return err
+ } else if !has {
+ return util.ErrNotExist
+ }
+ if task.Status.IsDone() {
+ return nil
+ }
+
+ now := timeutil.TimeStampNow()
+ task.Status = status
+ task.Stopped = now
+ if _, err := UpdateRunJob(ctx, &ActionRunJob{
+ ID: task.JobID,
+ Status: task.Status,
+ Stopped: task.Stopped,
+ }, nil); err != nil {
+ return err
+ }
+
+ if err := UpdateTask(ctx, task, "status", "stopped"); err != nil {
+ return err
+ }
+
+ if err := task.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ for _, step := range task.Steps {
+ if !step.Status.IsDone() {
+ step.Status = status
+ if step.Started == 0 {
+ step.Started = now
+ }
+ step.Stopped = now
+ }
+ if _, err := e.ID(step.ID).Update(step); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func FindOldTasksToExpire(ctx context.Context, olderThan timeutil.TimeStamp, limit int) ([]*ActionTask, error) {
+ e := db.GetEngine(ctx)
+
+ tasks := make([]*ActionTask, 0, limit)
+ // Check "stopped > 0" to avoid deleting tasks that are still running
+ return tasks, e.Where("stopped > 0 AND stopped < ? AND log_expired = ?", olderThan, false).
+ Limit(limit).
+ Find(&tasks)
+}
+
+func isSubset(set, subset []string) bool {
+ m := make(container.Set[string], len(set))
+ for _, v := range set {
+ m.Add(v)
+ }
+
+ for _, v := range subset {
+ if !m.Contains(v) {
+ return false
+ }
+ }
+ return true
+}
+
+func convertTimestamp(timestamp *timestamppb.Timestamp) timeutil.TimeStamp {
+ if timestamp.GetSeconds() == 0 && timestamp.GetNanos() == 0 {
+ return timeutil.TimeStamp(0)
+ }
+ return timeutil.TimeStamp(timestamp.AsTime().Unix())
+}
+
+func logFileName(repoFullName string, taskID int64) string {
+ ret := fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
+
+ if setting.Actions.LogCompression.IsZstd() {
+ ret += ".zst"
+ }
+
+ return ret
+}
+
+func getTaskIDFromCache(token string) int64 {
+ if successfulTokenTaskCache == nil {
+ return 0
+ }
+ tInterface, ok := successfulTokenTaskCache.Get(token)
+ if !ok {
+ return 0
+ }
+ t, ok := tInterface.(int64)
+ if !ok {
+ return 0
+ }
+ return t
+}
diff --git a/models/actions/task_list.go b/models/actions/task_list.go
new file mode 100644
index 0000000..df4b43c
--- /dev/null
+++ b/models/actions/task_list.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+type TaskList []*ActionTask
+
+func (tasks TaskList) GetJobIDs() []int64 {
+ return container.FilterSlice(tasks, func(t *ActionTask) (int64, bool) {
+ return t.JobID, t.JobID != 0
+ })
+}
+
+func (tasks TaskList) LoadJobs(ctx context.Context) error {
+ jobIDs := tasks.GetJobIDs()
+ jobs := make(map[int64]*ActionRunJob, len(jobIDs))
+ if err := db.GetEngine(ctx).In("id", jobIDs).Find(&jobs); err != nil {
+ return err
+ }
+ for _, t := range tasks {
+ if t.JobID > 0 && t.Job == nil {
+ t.Job = jobs[t.JobID]
+ }
+ }
+
+ // TODO: Replace with "ActionJobList(maps.Values(jobs))" once available
+ var jobsList ActionJobList = make([]*ActionRunJob, 0, len(jobs))
+ for _, j := range jobs {
+ jobsList = append(jobsList, j)
+ }
+ return jobsList.LoadAttributes(ctx, true)
+}
+
+func (tasks TaskList) LoadAttributes(ctx context.Context) error {
+ return tasks.LoadJobs(ctx)
+}
+
+type FindTaskOptions struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64
+ CommitSHA string
+ Status Status
+ UpdatedBefore timeutil.TimeStamp
+ StartedBefore timeutil.TimeStamp
+ RunnerID int64
+}
+
+func (opts FindTaskOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+ if opts.CommitSHA != "" {
+ cond = cond.And(builder.Eq{"commit_sha": opts.CommitSHA})
+ }
+ if opts.Status > StatusUnknown {
+ cond = cond.And(builder.Eq{"status": opts.Status})
+ }
+ if opts.UpdatedBefore > 0 {
+ cond = cond.And(builder.Lt{"updated": opts.UpdatedBefore})
+ }
+ if opts.StartedBefore > 0 {
+ cond = cond.And(builder.Lt{"started": opts.StartedBefore})
+ }
+ if opts.RunnerID > 0 {
+ cond = cond.And(builder.Eq{"runner_id": opts.RunnerID})
+ }
+ return cond
+}
+
+func (opts FindTaskOptions) ToOrders() string {
+ return "`id` DESC"
+}
diff --git a/models/actions/task_output.go b/models/actions/task_output.go
new file mode 100644
index 0000000..eab5b93
--- /dev/null
+++ b/models/actions/task_output.go
@@ -0,0 +1,55 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// ActionTaskOutput represents an output of ActionTask.
+// So the outputs are bound to a task, that means when a completed job has been rerun,
+// the outputs of the job will be reset because the task is new.
+// It's by design, to avoid the outputs of the old task to be mixed with the new task.
+type ActionTaskOutput struct {
+ ID int64
+ TaskID int64 `xorm:"INDEX UNIQUE(task_id_output_key)"`
+ OutputKey string `xorm:"VARCHAR(255) UNIQUE(task_id_output_key)"`
+ OutputValue string `xorm:"MEDIUMTEXT"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionTaskOutput))
+}
+
+// FindTaskOutputByTaskID returns the outputs of the task.
+func FindTaskOutputByTaskID(ctx context.Context, taskID int64) ([]*ActionTaskOutput, error) {
+ var outputs []*ActionTaskOutput
+ return outputs, db.GetEngine(ctx).Where("task_id=?", taskID).Find(&outputs)
+}
+
+// FindTaskOutputKeyByTaskID returns the keys of the outputs of the task.
+func FindTaskOutputKeyByTaskID(ctx context.Context, taskID int64) ([]string, error) {
+ var keys []string
+ return keys, db.GetEngine(ctx).Table(ActionTaskOutput{}).Where("task_id=?", taskID).Cols("output_key").Find(&keys)
+}
+
+// InsertTaskOutputIfNotExist inserts a new task output if it does not exist.
+func InsertTaskOutputIfNotExist(ctx context.Context, taskID int64, key, value string) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ sess := db.GetEngine(ctx)
+ if exist, err := sess.Exist(&ActionTaskOutput{TaskID: taskID, OutputKey: key}); err != nil {
+ return err
+ } else if exist {
+ return nil
+ }
+ _, err := sess.Insert(&ActionTaskOutput{
+ TaskID: taskID,
+ OutputKey: key,
+ OutputValue: value,
+ })
+ return err
+ })
+}
diff --git a/models/actions/task_step.go b/models/actions/task_step.go
new file mode 100644
index 0000000..3af1fe3
--- /dev/null
+++ b/models/actions/task_step.go
@@ -0,0 +1,41 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// ActionTaskStep represents a step of ActionTask
+type ActionTaskStep struct {
+ ID int64
+ Name string `xorm:"VARCHAR(255)"`
+ TaskID int64 `xorm:"index unique(task_index)"`
+ Index int64 `xorm:"index unique(task_index)"`
+ RepoID int64 `xorm:"index"`
+ Status Status `xorm:"index"`
+ LogIndex int64
+ LogLength int64
+ Started timeutil.TimeStamp
+ Stopped timeutil.TimeStamp
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+func (step *ActionTaskStep) Duration() time.Duration {
+ return calculateDuration(step.Started, step.Stopped, step.Status)
+}
+
+func init() {
+ db.RegisterModel(new(ActionTaskStep))
+}
+
+func GetTaskStepsByTaskID(ctx context.Context, taskID int64) ([]*ActionTaskStep, error) {
+ var steps []*ActionTaskStep
+ return steps, db.GetEngine(ctx).Where("task_id=?", taskID).OrderBy("`index` ASC").Find(&steps)
+}
diff --git a/models/actions/tasks_version.go b/models/actions/tasks_version.go
new file mode 100644
index 0000000..d8df353
--- /dev/null
+++ b/models/actions/tasks_version.go
@@ -0,0 +1,105 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// ActionTasksVersion
+// If both ownerID and repoID is zero, its scope is global.
+// If ownerID is not zero and repoID is zero, its scope is org (there is no user-level runner currently).
+// If ownerID is zero and repoID is not zero, its scope is repo.
+type ActionTasksVersion struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(owner_repo)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(owner_repo)"`
+ Version int64
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionTasksVersion))
+}
+
+func GetTasksVersionByScope(ctx context.Context, ownerID, repoID int64) (int64, error) {
+ var tasksVersion ActionTasksVersion
+ has, err := db.GetEngine(ctx).Where("owner_id = ? AND repo_id = ?", ownerID, repoID).Get(&tasksVersion)
+ if err != nil {
+ return 0, err
+ } else if !has {
+ return 0, nil
+ }
+ return tasksVersion.Version, err
+}
+
+func insertTasksVersion(ctx context.Context, ownerID, repoID int64) (*ActionTasksVersion, error) {
+ tasksVersion := &ActionTasksVersion{
+ OwnerID: ownerID,
+ RepoID: repoID,
+ Version: 1,
+ }
+ if _, err := db.GetEngine(ctx).Insert(tasksVersion); err != nil {
+ return nil, err
+ }
+ return tasksVersion, nil
+}
+
+func increaseTasksVersionByScope(ctx context.Context, ownerID, repoID int64) error {
+ result, err := db.GetEngine(ctx).Exec("UPDATE action_tasks_version SET version = version + 1 WHERE owner_id = ? AND repo_id = ?", ownerID, repoID)
+ if err != nil {
+ return err
+ }
+ affected, err := result.RowsAffected()
+ if err != nil {
+ return err
+ }
+
+ if affected == 0 {
+ // if update sql does not affect any rows, the database may be broken,
+ // so re-insert the row of version data here.
+ if _, err := insertTasksVersion(ctx, ownerID, repoID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func IncreaseTaskVersion(ctx context.Context, ownerID, repoID int64) error {
+ ctx, commiter, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer commiter.Close()
+
+ // 1. increase global
+ if err := increaseTasksVersionByScope(ctx, 0, 0); err != nil {
+ log.Error("IncreaseTasksVersionByScope(Global): %v", err)
+ return err
+ }
+
+ // 2. increase owner
+ if ownerID > 0 {
+ if err := increaseTasksVersionByScope(ctx, ownerID, 0); err != nil {
+ log.Error("IncreaseTasksVersionByScope(Owner): %v", err)
+ return err
+ }
+ }
+
+ // 3. increase repo
+ if repoID > 0 {
+ if err := increaseTasksVersionByScope(ctx, 0, repoID); err != nil {
+ log.Error("IncreaseTasksVersionByScope(Repo): %v", err)
+ return err
+ }
+ }
+
+ return commiter.Commit()
+}
diff --git a/models/actions/utils.go b/models/actions/utils.go
new file mode 100644
index 0000000..1265794
--- /dev/null
+++ b/models/actions/utils.go
@@ -0,0 +1,84 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+func generateSaltedToken() (string, string, string, string, error) {
+ salt, err := util.CryptoRandomString(10)
+ if err != nil {
+ return "", "", "", "", err
+ }
+ buf, err := util.CryptoRandomBytes(20)
+ if err != nil {
+ return "", "", "", "", err
+ }
+ token := hex.EncodeToString(buf)
+ hash := auth_model.HashToken(token, salt)
+ return token, salt, hash, token[len(token)-8:], nil
+}
+
+/*
+LogIndexes is the index for mapping log line number to buffer offset.
+Because it uses varint encoding, it is impossible to predict its size.
+But we can make a simple estimate with an assumption that each log line has 200 byte, then:
+| lines | file size | index size |
+|-----------|---------------------|--------------------|
+| 100 | 20 KiB(20000) | 258 B(258) |
+| 1000 | 195 KiB(200000) | 2.9 KiB(2958) |
+| 10000 | 1.9 MiB(2000000) | 34 KiB(34715) |
+| 100000 | 19 MiB(20000000) | 386 KiB(394715) |
+| 1000000 | 191 MiB(200000000) | 4.1 MiB(4323626) |
+| 10000000 | 1.9 GiB(2000000000) | 47 MiB(49323626) |
+| 100000000 | 19 GiB(20000000000) | 490 MiB(513424280) |
+*/
+type LogIndexes []int64
+
+func (indexes *LogIndexes) FromDB(b []byte) error {
+ reader := bytes.NewReader(b)
+ for {
+ v, err := binary.ReadVarint(reader)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return fmt.Errorf("binary ReadVarint: %w", err)
+ }
+ *indexes = append(*indexes, v)
+ }
+}
+
+func (indexes *LogIndexes) ToDB() ([]byte, error) {
+ buf, i := make([]byte, binary.MaxVarintLen64*len(*indexes)), 0
+ for _, v := range *indexes {
+ n := binary.PutVarint(buf[i:], v)
+ i += n
+ }
+ return buf[:i], nil
+}
+
+var timeSince = time.Since
+
+func calculateDuration(started, stopped timeutil.TimeStamp, status Status) time.Duration {
+ if started == 0 {
+ return 0
+ }
+ s := started.AsTime()
+ if status.IsDone() {
+ return stopped.AsTime().Sub(s)
+ }
+ return timeSince(s).Truncate(time.Second)
+}
diff --git a/models/actions/utils_test.go b/models/actions/utils_test.go
new file mode 100644
index 0000000..98c048d
--- /dev/null
+++ b/models/actions/utils_test.go
@@ -0,0 +1,90 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLogIndexes_ToDB(t *testing.T) {
+ tests := []struct {
+ indexes LogIndexes
+ }{
+ {
+ indexes: []int64{1, 2, 0, -1, -2, math.MaxInt64, math.MinInt64},
+ },
+ }
+ for _, tt := range tests {
+ t.Run("", func(t *testing.T) {
+ got, err := tt.indexes.ToDB()
+ require.NoError(t, err)
+
+ indexes := LogIndexes{}
+ require.NoError(t, indexes.FromDB(got))
+
+ assert.Equal(t, tt.indexes, indexes)
+ })
+ }
+}
+
+func Test_calculateDuration(t *testing.T) {
+ oldTimeSince := timeSince
+ defer func() {
+ timeSince = oldTimeSince
+ }()
+
+ timeSince = func(t time.Time) time.Duration {
+ return timeutil.TimeStamp(1000).AsTime().Sub(t)
+ }
+ type args struct {
+ started timeutil.TimeStamp
+ stopped timeutil.TimeStamp
+ status Status
+ }
+ tests := []struct {
+ name string
+ args args
+ want time.Duration
+ }{
+ {
+ name: "unknown",
+ args: args{
+ started: 0,
+ stopped: 0,
+ status: StatusUnknown,
+ },
+ want: 0,
+ },
+ {
+ name: "running",
+ args: args{
+ started: 500,
+ stopped: 0,
+ status: StatusRunning,
+ },
+ want: 500 * time.Second,
+ },
+ {
+ name: "done",
+ args: args{
+ started: 500,
+ stopped: 600,
+ status: StatusSuccess,
+ },
+ want: 100 * time.Second,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equalf(t, tt.want, calculateDuration(tt.args.started, tt.args.stopped, tt.args.status), "calculateDuration(%v, %v, %v)", tt.args.started, tt.args.stopped, tt.args.status)
+ })
+ }
+}
diff --git a/models/actions/variable.go b/models/actions/variable.go
new file mode 100644
index 0000000..d0f917d
--- /dev/null
+++ b/models/actions/variable.go
@@ -0,0 +1,139 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+// ActionVariable represents a variable that can be used in actions
+//
+// It can be:
+// 1. global variable, OwnerID is 0 and RepoID is 0
+// 2. org/user level variable, OwnerID is org/user ID and RepoID is 0
+// 3. repo level variable, OwnerID is 0 and RepoID is repo ID
+//
+// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
+// or it will be complicated to find variables belonging to a specific owner.
+// For example, conditions like `OwnerID = 1` will also return variable {OwnerID: 1, RepoID: 1},
+// but it's a repo level variable, not an org/user level variable.
+// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level variables.
+type ActionVariable struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(owner_repo_name)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name)"`
+ Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"`
+ Data string `xorm:"LONGTEXT NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ActionVariable))
+}
+
+func InsertVariable(ctx context.Context, ownerID, repoID int64, name, data string) (*ActionVariable, error) {
+ if ownerID != 0 && repoID != 0 {
+ // It's trying to create a variable that belongs to a repository, but OwnerID has been set accidentally.
+ // Remove OwnerID to avoid confusion; it's not worth returning an error here.
+ ownerID = 0
+ }
+
+ variable := &ActionVariable{
+ OwnerID: ownerID,
+ RepoID: repoID,
+ Name: strings.ToUpper(name),
+ Data: data,
+ }
+ return variable, db.Insert(ctx, variable)
+}
+
+type FindVariablesOpts struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64 // it will be ignored if RepoID is set
+ Name string
+}
+
+func (opts FindVariablesOpts) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ // Since we now support instance-level variables,
+ // there is no need to check for null values for `owner_id` and `repo_id`
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ if opts.RepoID != 0 { // if RepoID is set
+ // ignore OwnerID and treat it as 0
+ cond = cond.And(builder.Eq{"owner_id": 0})
+ } else {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+
+ if opts.Name != "" {
+ cond = cond.And(builder.Eq{"name": strings.ToUpper(opts.Name)})
+ }
+ return cond
+}
+
+func FindVariables(ctx context.Context, opts FindVariablesOpts) ([]*ActionVariable, error) {
+ return db.Find[ActionVariable](ctx, opts)
+}
+
+func UpdateVariable(ctx context.Context, variable *ActionVariable) (bool, error) {
+ count, err := db.GetEngine(ctx).ID(variable.ID).Cols("name", "data").
+ Update(&ActionVariable{
+ Name: variable.Name,
+ Data: variable.Data,
+ })
+ return count != 0, err
+}
+
+func DeleteVariable(ctx context.Context, id int64) error {
+ if _, err := db.DeleteByID[ActionVariable](ctx, id); err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetVariablesOfRun(ctx context.Context, run *ActionRun) (map[string]string, error) {
+ variables := map[string]string{}
+
+ if err := run.LoadRepo(ctx); err != nil {
+ log.Error("LoadRepo: %v", err)
+ return nil, err
+ }
+
+ // Global
+ globalVariables, err := db.Find[ActionVariable](ctx, FindVariablesOpts{})
+ if err != nil {
+ log.Error("find global variables: %v", err)
+ return nil, err
+ }
+
+ // Org / User level
+ ownerVariables, err := db.Find[ActionVariable](ctx, FindVariablesOpts{OwnerID: run.Repo.OwnerID})
+ if err != nil {
+ log.Error("find variables of org: %d, error: %v", run.Repo.OwnerID, err)
+ return nil, err
+ }
+
+ // Repo level
+ repoVariables, err := db.Find[ActionVariable](ctx, FindVariablesOpts{RepoID: run.RepoID})
+ if err != nil {
+ log.Error("find variables of repo: %d, error: %v", run.RepoID, err)
+ return nil, err
+ }
+
+ // Level precedence: Repo > Org / User > Global
+ for _, v := range append(globalVariables, append(ownerVariables, repoVariables...)...) {
+ variables[v.Name] = v.Data
+ }
+
+ return variables, nil
+}
diff --git a/models/activities/action.go b/models/activities/action.go
new file mode 100644
index 0000000..dd67b98
--- /dev/null
+++ b/models/activities/action.go
@@ -0,0 +1,777 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "path"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+ "xorm.io/xorm/schemas"
+)
+
+// ActionType represents the type of an action.
+type ActionType int
+
+// Possible action types.
+const (
+ ActionCreateRepo ActionType = iota + 1 // 1
+ ActionRenameRepo // 2
+ ActionStarRepo // 3
+ ActionWatchRepo // 4
+ ActionCommitRepo // 5
+ ActionCreateIssue // 6
+ ActionCreatePullRequest // 7
+ ActionTransferRepo // 8
+ ActionPushTag // 9
+ ActionCommentIssue // 10
+ ActionMergePullRequest // 11
+ ActionCloseIssue // 12
+ ActionReopenIssue // 13
+ ActionClosePullRequest // 14
+ ActionReopenPullRequest // 15
+ ActionDeleteTag // 16
+ ActionDeleteBranch // 17
+ ActionMirrorSyncPush // 18
+ ActionMirrorSyncCreate // 19
+ ActionMirrorSyncDelete // 20
+ ActionApprovePullRequest // 21
+ ActionRejectPullRequest // 22
+ ActionCommentPull // 23
+ ActionPublishRelease // 24
+ ActionPullReviewDismissed // 25
+ ActionPullRequestReadyForReview // 26
+ ActionAutoMergePullRequest // 27
+)
+
+func (at ActionType) String() string {
+ switch at {
+ case ActionCreateRepo:
+ return "create_repo"
+ case ActionRenameRepo:
+ return "rename_repo"
+ case ActionStarRepo:
+ return "star_repo"
+ case ActionWatchRepo:
+ return "watch_repo"
+ case ActionCommitRepo:
+ return "commit_repo"
+ case ActionCreateIssue:
+ return "create_issue"
+ case ActionCreatePullRequest:
+ return "create_pull_request"
+ case ActionTransferRepo:
+ return "transfer_repo"
+ case ActionPushTag:
+ return "push_tag"
+ case ActionCommentIssue:
+ return "comment_issue"
+ case ActionMergePullRequest:
+ return "merge_pull_request"
+ case ActionCloseIssue:
+ return "close_issue"
+ case ActionReopenIssue:
+ return "reopen_issue"
+ case ActionClosePullRequest:
+ return "close_pull_request"
+ case ActionReopenPullRequest:
+ return "reopen_pull_request"
+ case ActionDeleteTag:
+ return "delete_tag"
+ case ActionDeleteBranch:
+ return "delete_branch"
+ case ActionMirrorSyncPush:
+ return "mirror_sync_push"
+ case ActionMirrorSyncCreate:
+ return "mirror_sync_create"
+ case ActionMirrorSyncDelete:
+ return "mirror_sync_delete"
+ case ActionApprovePullRequest:
+ return "approve_pull_request"
+ case ActionRejectPullRequest:
+ return "reject_pull_request"
+ case ActionCommentPull:
+ return "comment_pull"
+ case ActionPublishRelease:
+ return "publish_release"
+ case ActionPullReviewDismissed:
+ return "pull_review_dismissed"
+ case ActionPullRequestReadyForReview:
+ return "pull_request_ready_for_review"
+ case ActionAutoMergePullRequest:
+ return "auto_merge_pull_request"
+ default:
+ return "action-" + strconv.Itoa(int(at))
+ }
+}
+
+func (at ActionType) InActions(actions ...string) bool {
+ for _, action := range actions {
+ if action == at.String() {
+ return true
+ }
+ }
+ return false
+}
+
+// Action represents user operation type and other information to
+// repository. It implemented interface base.Actioner so that can be
+// used in template render.
+type Action struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"INDEX"` // Receiver user id.
+ OpType ActionType
+ ActUserID int64 // Action user id.
+ ActUser *user_model.User `xorm:"-"`
+ RepoID int64
+ Repo *repo_model.Repository `xorm:"-"`
+ CommentID int64 `xorm:"INDEX"`
+ Comment *issues_model.Comment `xorm:"-"`
+ Issue *issues_model.Issue `xorm:"-"` // get the issue id from content
+ IsDeleted bool `xorm:"NOT NULL DEFAULT false"`
+ RefName string
+ IsPrivate bool `xorm:"NOT NULL DEFAULT false"`
+ Content string `xorm:"TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+func init() {
+ db.RegisterModel(new(Action))
+}
+
+// TableIndices implements xorm's TableIndices interface
+func (a *Action) TableIndices() []*schemas.Index {
+ repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType)
+ repoIndex.AddColumn("repo_id", "user_id", "is_deleted")
+
+ actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType)
+ actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted")
+
+ cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType)
+ cudIndex.AddColumn("created_unix", "user_id", "is_deleted")
+
+ indices := []*schemas.Index{actUserIndex, repoIndex, cudIndex}
+
+ return indices
+}
+
+// GetOpType gets the ActionType of this action.
+func (a *Action) GetOpType() ActionType {
+ return a.OpType
+}
+
+// LoadActUser loads a.ActUser
+func (a *Action) LoadActUser(ctx context.Context) {
+ if a.ActUser != nil {
+ return
+ }
+ var err error
+ a.ActUser, err = user_model.GetUserByID(ctx, a.ActUserID)
+ if err == nil {
+ return
+ } else if user_model.IsErrUserNotExist(err) {
+ a.ActUser = user_model.NewGhostUser()
+ } else {
+ log.Error("GetUserByID(%d): %v", a.ActUserID, err)
+ }
+}
+
+func (a *Action) loadRepo(ctx context.Context) {
+ if a.Repo != nil {
+ return
+ }
+ var err error
+ a.Repo, err = repo_model.GetRepositoryByID(ctx, a.RepoID)
+ if err != nil {
+ log.Error("repo_model.GetRepositoryByID(%d): %v", a.RepoID, err)
+ }
+}
+
+// GetActFullName gets the action's user full name.
+func (a *Action) GetActFullName(ctx context.Context) string {
+ a.LoadActUser(ctx)
+ return a.ActUser.FullName
+}
+
+// GetActUserName gets the action's user name.
+func (a *Action) GetActUserName(ctx context.Context) string {
+ a.LoadActUser(ctx)
+ return a.ActUser.Name
+}
+
+// ShortActUserName gets the action's user name trimmed to max 20
+// chars.
+func (a *Action) ShortActUserName(ctx context.Context) string {
+ return base.EllipsisString(a.GetActUserName(ctx), 20)
+}
+
+// GetActDisplayName gets the action's display name based on DEFAULT_SHOW_FULL_NAME, or falls back to the username if it is blank.
+func (a *Action) GetActDisplayName(ctx context.Context) string {
+ if setting.UI.DefaultShowFullName {
+ trimmedFullName := strings.TrimSpace(a.GetActFullName(ctx))
+ if len(trimmedFullName) > 0 {
+ return trimmedFullName
+ }
+ }
+ return a.ShortActUserName(ctx)
+}
+
+// GetActDisplayNameTitle gets the action's display name used for the title (tooltip) based on DEFAULT_SHOW_FULL_NAME
+func (a *Action) GetActDisplayNameTitle(ctx context.Context) string {
+ if setting.UI.DefaultShowFullName {
+ return a.ShortActUserName(ctx)
+ }
+ return a.GetActFullName(ctx)
+}
+
+// GetRepoUserName returns the name of the action repository owner.
+func (a *Action) GetRepoUserName(ctx context.Context) string {
+ a.loadRepo(ctx)
+ if a.Repo == nil {
+ return "(non-existing-repo)"
+ }
+ return a.Repo.OwnerName
+}
+
+// ShortRepoUserName returns the name of the action repository owner
+// trimmed to max 20 chars.
+func (a *Action) ShortRepoUserName(ctx context.Context) string {
+ return base.EllipsisString(a.GetRepoUserName(ctx), 20)
+}
+
+// GetRepoName returns the name of the action repository.
+func (a *Action) GetRepoName(ctx context.Context) string {
+ a.loadRepo(ctx)
+ if a.Repo == nil {
+ return "(non-existing-repo)"
+ }
+ return a.Repo.Name
+}
+
+// ShortRepoName returns the name of the action repository
+// trimmed to max 33 chars.
+func (a *Action) ShortRepoName(ctx context.Context) string {
+ return base.EllipsisString(a.GetRepoName(ctx), 33)
+}
+
+// GetRepoPath returns the virtual path to the action repository.
+func (a *Action) GetRepoPath(ctx context.Context) string {
+ return path.Join(a.GetRepoUserName(ctx), a.GetRepoName(ctx))
+}
+
+// ShortRepoPath returns the virtual path to the action repository
+// trimmed to max 20 + 1 + 33 chars.
+func (a *Action) ShortRepoPath(ctx context.Context) string {
+ return path.Join(a.ShortRepoUserName(ctx), a.ShortRepoName(ctx))
+}
+
+// GetRepoLink returns relative link to action repository.
+func (a *Action) GetRepoLink(ctx context.Context) string {
+ // path.Join will skip empty strings
+ return path.Join(setting.AppSubURL, "/", url.PathEscape(a.GetRepoUserName(ctx)), url.PathEscape(a.GetRepoName(ctx)))
+}
+
+// GetRepoAbsoluteLink returns the absolute link to action repository.
+func (a *Action) GetRepoAbsoluteLink(ctx context.Context) string {
+ return setting.AppURL + url.PathEscape(a.GetRepoUserName(ctx)) + "/" + url.PathEscape(a.GetRepoName(ctx))
+}
+
+func (a *Action) loadComment(ctx context.Context) (err error) {
+ if a.CommentID == 0 || a.Comment != nil {
+ return nil
+ }
+ a.Comment, err = issues_model.GetCommentByID(ctx, a.CommentID)
+ return err
+}
+
+// GetCommentHTMLURL returns link to action comment.
+func (a *Action) GetCommentHTMLURL(ctx context.Context) string {
+ if a == nil {
+ return "#"
+ }
+ _ = a.loadComment(ctx)
+ if a.Comment != nil {
+ return a.Comment.HTMLURL(ctx)
+ }
+
+ if err := a.LoadIssue(ctx); err != nil || a.Issue == nil {
+ return "#"
+ }
+ if err := a.Issue.LoadRepo(ctx); err != nil {
+ return "#"
+ }
+
+ return a.Issue.HTMLURL()
+}
+
+// GetCommentLink returns link to action comment.
+func (a *Action) GetCommentLink(ctx context.Context) string {
+ if a == nil {
+ return "#"
+ }
+ _ = a.loadComment(ctx)
+ if a.Comment != nil {
+ return a.Comment.Link(ctx)
+ }
+
+ if err := a.LoadIssue(ctx); err != nil || a.Issue == nil {
+ return "#"
+ }
+ if err := a.Issue.LoadRepo(ctx); err != nil {
+ return "#"
+ }
+
+ return a.Issue.Link()
+}
+
+// GetBranch returns the action's repository branch.
+func (a *Action) GetBranch() string {
+ return strings.TrimPrefix(a.RefName, git.BranchPrefix)
+}
+
+// GetRefLink returns the action's ref link.
+func (a *Action) GetRefLink(ctx context.Context) string {
+ return git.RefURL(a.GetRepoLink(ctx), a.RefName)
+}
+
+// GetTag returns the action's repository tag.
+func (a *Action) GetTag() string {
+ return strings.TrimPrefix(a.RefName, git.TagPrefix)
+}
+
+// GetContent returns the action's content.
+func (a *Action) GetContent() string {
+ return a.Content
+}
+
+// GetCreate returns the action creation time.
+func (a *Action) GetCreate() time.Time {
+ return a.CreatedUnix.AsTime()
+}
+
+func (a *Action) IsIssueEvent() bool {
+ return a.OpType.InActions("comment_issue", "approve_pull_request", "reject_pull_request", "comment_pull", "merge_pull_request")
+}
+
+// GetIssueInfos returns a list of associated information with the action.
+func (a *Action) GetIssueInfos() []string {
+ // make sure it always returns 3 elements, because there are some access to the a[1] and a[2] without checking the length
+ ret := strings.SplitN(a.Content, "|", 3)
+ for len(ret) < 3 {
+ ret = append(ret, "")
+ }
+ return ret
+}
+
+func (a *Action) getIssueIndex() int64 {
+ infos := a.GetIssueInfos()
+ if len(infos) == 0 {
+ return 0
+ }
+ index, _ := strconv.ParseInt(infos[0], 10, 64)
+ return index
+}
+
+func (a *Action) LoadIssue(ctx context.Context) error {
+ if a.Issue != nil {
+ return nil
+ }
+ if index := a.getIssueIndex(); index > 0 {
+ issue, err := issues_model.GetIssueByIndex(ctx, a.RepoID, index)
+ if err != nil {
+ return err
+ }
+ a.Issue = issue
+ a.Issue.Repo = a.Repo
+ }
+ return nil
+}
+
+// GetIssueTitle returns the title of first issue associated with the action.
+func (a *Action) GetIssueTitle(ctx context.Context) string {
+ if err := a.LoadIssue(ctx); err != nil {
+ log.Error("LoadIssue: %v", err)
+ return "<500 when get issue>"
+ }
+ if a.Issue == nil {
+ return "<Issue not found>"
+ }
+ return a.Issue.Title
+}
+
+// GetIssueContent returns the content of first issue associated with this action.
+func (a *Action) GetIssueContent(ctx context.Context) string {
+ if err := a.LoadIssue(ctx); err != nil {
+ log.Error("LoadIssue: %v", err)
+ return "<500 when get issue>"
+ }
+ if a.Issue == nil {
+ return "<Content not found>"
+ }
+ return a.Issue.Content
+}
+
+// GetFeedsOptions options for retrieving feeds
+type GetFeedsOptions struct {
+ db.ListOptions
+ RequestedUser *user_model.User // the user we want activity for
+ RequestedTeam *organization.Team // the team we want activity for
+ RequestedRepo *repo_model.Repository // the repo we want activity for
+ Actor *user_model.User // the user viewing the activity
+ IncludePrivate bool // include private actions
+ OnlyPerformedBy bool // only actions performed by requested user
+ OnlyPerformedByActor bool // only actions performed by the original actor
+ IncludeDeleted bool // include deleted actions
+ Date string // the day we want activity for: YYYY-MM-DD
+}
+
+// GetFeeds returns actions according to the provided options
+func GetFeeds(ctx context.Context, opts GetFeedsOptions) (ActionList, int64, error) {
+ if opts.RequestedUser == nil && opts.RequestedTeam == nil && opts.RequestedRepo == nil {
+ return nil, 0, fmt.Errorf("need at least one of these filters: RequestedUser, RequestedTeam, RequestedRepo")
+ }
+
+ cond, err := activityQueryCondition(ctx, opts)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ sess := db.GetEngine(ctx).Where(cond).
+ Select("`action`.*"). // this line will avoid select other joined table's columns
+ Join("INNER", "repository", "`repository`.id = `action`.repo_id")
+
+ opts.SetDefaultValues()
+ sess = db.SetSessionPagination(sess, &opts)
+
+ actions := make([]*Action, 0, opts.PageSize)
+ count, err := sess.Desc("`action`.created_unix").FindAndCount(&actions)
+ if err != nil {
+ return nil, 0, fmt.Errorf("FindAndCount: %w", err)
+ }
+
+ if err := ActionList(actions).LoadAttributes(ctx); err != nil {
+ return nil, 0, fmt.Errorf("LoadAttributes: %w", err)
+ }
+
+ return actions, count, nil
+}
+
+// ActivityReadable return whether doer can read activities of user
+func ActivityReadable(user, doer *user_model.User) bool {
+ return !user.KeepActivityPrivate ||
+ doer != nil && (doer.IsAdmin || user.ID == doer.ID)
+}
+
+func activityQueryCondition(ctx context.Context, opts GetFeedsOptions) (builder.Cond, error) {
+ cond := builder.NewCond()
+
+ if opts.OnlyPerformedByActor {
+ cond = cond.And(builder.Expr("`action`.user_id = `action`.act_user_id"))
+ }
+
+ if opts.RequestedTeam != nil && opts.RequestedUser == nil {
+ org, err := user_model.GetUserByID(ctx, opts.RequestedTeam.OrgID)
+ if err != nil {
+ return nil, err
+ }
+ opts.RequestedUser = org
+ }
+
+ // check activity visibility for actor ( similar to activityReadable() )
+ if opts.Actor == nil {
+ cond = cond.And(builder.In("act_user_id",
+ builder.Select("`user`.id").Where(
+ builder.Eq{"keep_activity_private": false, "visibility": structs.VisibleTypePublic},
+ ).From("`user`"),
+ ))
+ } else if !opts.Actor.IsAdmin {
+ uidCond := builder.Select("`user`.id").From("`user`").Where(
+ builder.Eq{"keep_activity_private": false}.
+ And(builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))).
+ Or(builder.Eq{"id": opts.Actor.ID})
+
+ if opts.RequestedUser != nil {
+ if opts.RequestedUser.IsOrganization() {
+ // An organization can always see the activities whose `act_user_id` is the same as its id.
+ uidCond = uidCond.Or(builder.Eq{"id": opts.RequestedUser.ID})
+ } else {
+ // A user can always see the activities of the organizations to which the user belongs.
+ uidCond = uidCond.Or(
+ builder.Eq{"type": user_model.UserTypeOrganization}.
+ And(builder.In("`user`.id", builder.Select("org_id").
+ Where(builder.Eq{"uid": opts.RequestedUser.ID}).
+ From("team_user"))),
+ )
+ }
+ }
+
+ cond = cond.And(builder.In("act_user_id", uidCond))
+ }
+
+ // check readable repositories by doer/actor
+ if opts.Actor == nil || !opts.Actor.IsAdmin {
+ cond = cond.And(builder.In("repo_id", repo_model.AccessibleRepoIDsQuery(opts.Actor)))
+ }
+
+ if opts.RequestedRepo != nil {
+ cond = cond.And(builder.Eq{"repo_id": opts.RequestedRepo.ID})
+ }
+
+ if opts.RequestedTeam != nil {
+ env := organization.OrgFromUser(opts.RequestedUser).AccessibleTeamReposEnv(ctx, opts.RequestedTeam)
+ teamRepoIDs, err := env.RepoIDs(1, opts.RequestedUser.NumRepos)
+ if err != nil {
+ return nil, fmt.Errorf("GetTeamRepositories: %w", err)
+ }
+ cond = cond.And(builder.In("repo_id", teamRepoIDs))
+ }
+
+ if opts.RequestedUser != nil {
+ cond = cond.And(builder.Eq{"user_id": opts.RequestedUser.ID})
+
+ if opts.OnlyPerformedBy {
+ cond = cond.And(builder.Eq{"act_user_id": opts.RequestedUser.ID})
+ }
+ }
+
+ if !opts.IncludePrivate {
+ cond = cond.And(builder.Eq{"`action`.is_private": false})
+ }
+ if !opts.IncludeDeleted {
+ cond = cond.And(builder.Eq{"is_deleted": false})
+ }
+
+ if opts.Date != "" {
+ dateLow, err := time.ParseInLocation("2006-01-02", opts.Date, setting.DefaultUILocation)
+ if err != nil {
+ log.Warn("Unable to parse %s, filter not applied: %v", opts.Date, err)
+ } else {
+ dateHigh := dateLow.Add(86399000000000) // 23h59m59s
+
+ cond = cond.And(builder.Gte{"`action`.created_unix": dateLow.Unix()})
+ cond = cond.And(builder.Lte{"`action`.created_unix": dateHigh.Unix()})
+ }
+ }
+
+ return cond, nil
+}
+
+// DeleteOldActions deletes all old actions from database.
+func DeleteOldActions(ctx context.Context, olderThan time.Duration) (err error) {
+ if olderThan <= 0 {
+ return nil
+ }
+
+ _, err = db.GetEngine(ctx).Where("created_unix < ?", time.Now().Add(-olderThan).Unix()).Delete(&Action{})
+ return err
+}
+
+// NotifyWatchers creates batch of actions for every watcher.
+func NotifyWatchers(ctx context.Context, actions ...*Action) error {
+ var watchers []*repo_model.Watch
+ var repo *repo_model.Repository
+ var err error
+ var permCode []bool
+ var permIssue []bool
+ var permPR []bool
+
+ e := db.GetEngine(ctx)
+
+ for _, act := range actions {
+ repoChanged := repo == nil || repo.ID != act.RepoID
+
+ if repoChanged {
+ // Add feeds for user self and all watchers.
+ watchers, err = repo_model.GetWatchers(ctx, act.RepoID)
+ if err != nil {
+ return fmt.Errorf("get watchers: %w", err)
+ }
+
+ // Be aware that optimizing this correctly into the `GetWatchers` SQL
+ // query is for most cases less performant than doing this.
+ blockedDoerUserIDs, err := user_model.ListBlockedByUsersID(ctx, act.ActUserID)
+ if err != nil {
+ return fmt.Errorf("user_model.ListBlockedByUsersID: %w", err)
+ }
+
+ if len(blockedDoerUserIDs) > 0 {
+ excludeWatcherIDs := make(container.Set[int64], len(blockedDoerUserIDs))
+ excludeWatcherIDs.AddMultiple(blockedDoerUserIDs...)
+ watchers = slices.DeleteFunc(watchers, func(v *repo_model.Watch) bool {
+ return excludeWatcherIDs.Contains(v.UserID)
+ })
+ }
+ }
+
+ // Add feed for actioner.
+ act.UserID = act.ActUserID
+ if _, err = e.Insert(act); err != nil {
+ return fmt.Errorf("insert new actioner: %w", err)
+ }
+
+ if repoChanged {
+ act.loadRepo(ctx)
+ repo = act.Repo
+
+ // check repo owner exist.
+ if err := act.Repo.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("can't get repo owner: %w", err)
+ }
+ } else if act.Repo == nil {
+ act.Repo = repo
+ }
+
+ // Add feed for organization
+ if act.Repo.Owner.IsOrganization() && act.ActUserID != act.Repo.Owner.ID {
+ act.ID = 0
+ act.UserID = act.Repo.Owner.ID
+ if err = db.Insert(ctx, act); err != nil {
+ return fmt.Errorf("insert new actioner: %w", err)
+ }
+ }
+
+ if repoChanged {
+ permCode = make([]bool, len(watchers))
+ permIssue = make([]bool, len(watchers))
+ permPR = make([]bool, len(watchers))
+ for i, watcher := range watchers {
+ user, err := user_model.GetUserByID(ctx, watcher.UserID)
+ if err != nil {
+ permCode[i] = false
+ permIssue[i] = false
+ permPR[i] = false
+ continue
+ }
+ perm, err := access_model.GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ permCode[i] = false
+ permIssue[i] = false
+ permPR[i] = false
+ continue
+ }
+ permCode[i] = perm.CanRead(unit.TypeCode)
+ permIssue[i] = perm.CanRead(unit.TypeIssues)
+ permPR[i] = perm.CanRead(unit.TypePullRequests)
+ }
+ }
+
+ for i, watcher := range watchers {
+ if act.ActUserID == watcher.UserID {
+ continue
+ }
+ act.ID = 0
+ act.UserID = watcher.UserID
+ act.Repo.Units = nil
+
+ switch act.OpType {
+ case ActionCommitRepo, ActionPushTag, ActionDeleteTag, ActionPublishRelease, ActionDeleteBranch:
+ if !permCode[i] {
+ continue
+ }
+ case ActionCreateIssue, ActionCommentIssue, ActionCloseIssue, ActionReopenIssue:
+ if !permIssue[i] {
+ continue
+ }
+ case ActionCreatePullRequest, ActionCommentPull, ActionMergePullRequest, ActionClosePullRequest, ActionReopenPullRequest, ActionAutoMergePullRequest:
+ if !permPR[i] {
+ continue
+ }
+ }
+
+ if err = db.Insert(ctx, act); err != nil {
+ return fmt.Errorf("insert new action: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+// NotifyWatchersActions creates batch of actions for every watcher.
+func NotifyWatchersActions(ctx context.Context, acts []*Action) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ for _, act := range acts {
+ if err := NotifyWatchers(ctx, act); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
+
+// DeleteIssueActions delete all actions related with issueID
+func DeleteIssueActions(ctx context.Context, repoID, issueID, issueIndex int64) error {
+ // delete actions assigned to this issue
+ e := db.GetEngine(ctx)
+
+ // MariaDB has a performance bug: https://jira.mariadb.org/browse/MDEV-16289
+ // so here it uses "DELETE ... WHERE IN" with pre-queried IDs.
+ var lastCommentID int64
+ commentIDs := make([]int64, 0, db.DefaultMaxInSize)
+ for {
+ commentIDs = commentIDs[:0]
+ err := e.Select("`id`").Table(&issues_model.Comment{}).
+ Where(builder.Eq{"issue_id": issueID}).And("`id` > ?", lastCommentID).
+ OrderBy("`id`").Limit(db.DefaultMaxInSize).
+ Find(&commentIDs)
+ if err != nil {
+ return err
+ } else if len(commentIDs) == 0 {
+ break
+ } else if _, err = db.GetEngine(ctx).In("comment_id", commentIDs).Delete(&Action{}); err != nil {
+ return err
+ }
+ lastCommentID = commentIDs[len(commentIDs)-1]
+ }
+
+ _, err := e.Where("repo_id = ?", repoID).
+ In("op_type", ActionCreateIssue, ActionCreatePullRequest).
+ Where("content LIKE ?", strconv.FormatInt(issueIndex, 10)+"|%"). // "IssueIndex|content..."
+ Delete(&Action{})
+ return err
+}
+
+// CountActionCreatedUnixString count actions where created_unix is an empty string
+func CountActionCreatedUnixString(ctx context.Context) (int64, error) {
+ if setting.Database.Type.IsSQLite3() {
+ return db.GetEngine(ctx).Where(`created_unix = ""`).Count(new(Action))
+ }
+ return 0, nil
+}
+
+// FixActionCreatedUnixString set created_unix to zero if it is an empty string
+func FixActionCreatedUnixString(ctx context.Context) (int64, error) {
+ if setting.Database.Type.IsSQLite3() {
+ res, err := db.GetEngine(ctx).Exec(`UPDATE action SET created_unix = 0 WHERE created_unix = ""`)
+ if err != nil {
+ return 0, err
+ }
+ return res.RowsAffected()
+ }
+ return 0, nil
+}
diff --git a/models/activities/action_list.go b/models/activities/action_list.go
new file mode 100644
index 0000000..aafb7f8
--- /dev/null
+++ b/models/activities/action_list.go
@@ -0,0 +1,203 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ActionList defines a list of actions
+type ActionList []*Action
+
+func (actions ActionList) getUserIDs() []int64 {
+ return container.FilterSlice(actions, func(action *Action) (int64, bool) {
+ return action.ActUserID, true
+ })
+}
+
+func (actions ActionList) LoadActUsers(ctx context.Context) (map[int64]*user_model.User, error) {
+ if len(actions) == 0 {
+ return nil, nil
+ }
+
+ userIDs := actions.getUserIDs()
+ userMaps := make(map[int64]*user_model.User, len(userIDs))
+ err := db.GetEngine(ctx).
+ In("id", userIDs).
+ Find(&userMaps)
+ if err != nil {
+ return nil, fmt.Errorf("find user: %w", err)
+ }
+
+ for _, action := range actions {
+ action.ActUser = userMaps[action.ActUserID]
+ }
+ return userMaps, nil
+}
+
+func (actions ActionList) getRepoIDs() []int64 {
+ return container.FilterSlice(actions, func(action *Action) (int64, bool) {
+ return action.RepoID, true
+ })
+}
+
+func (actions ActionList) LoadRepositories(ctx context.Context) error {
+ if len(actions) == 0 {
+ return nil
+ }
+
+ repoIDs := actions.getRepoIDs()
+ repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs))
+ err := db.GetEngine(ctx).In("id", repoIDs).Find(&repoMaps)
+ if err != nil {
+ return fmt.Errorf("find repository: %w", err)
+ }
+ for _, action := range actions {
+ action.Repo = repoMaps[action.RepoID]
+ }
+ repos := repo_model.RepositoryList(util.ValuesOfMap(repoMaps))
+ return repos.LoadUnits(ctx)
+}
+
+func (actions ActionList) loadRepoOwner(ctx context.Context, userMap map[int64]*user_model.User) (err error) {
+ if userMap == nil {
+ userMap = make(map[int64]*user_model.User)
+ }
+
+ missingUserIDs := container.FilterSlice(actions, func(action *Action) (int64, bool) {
+ if action.Repo == nil {
+ return 0, false
+ }
+ _, alreadyLoaded := userMap[action.Repo.OwnerID]
+ return action.Repo.OwnerID, !alreadyLoaded
+ })
+ if len(missingUserIDs) == 0 {
+ return nil
+ }
+
+ if err := db.GetEngine(ctx).
+ In("id", missingUserIDs).
+ Find(&userMap); err != nil {
+ return fmt.Errorf("find user: %w", err)
+ }
+
+ for _, action := range actions {
+ if action.Repo != nil {
+ action.Repo.Owner = userMap[action.Repo.OwnerID]
+ }
+ }
+
+ return nil
+}
+
+// LoadAttributes loads all attributes
+func (actions ActionList) LoadAttributes(ctx context.Context) error {
+ // the load sequence cannot be changed because of the dependencies
+ userMap, err := actions.LoadActUsers(ctx)
+ if err != nil {
+ return err
+ }
+ if err := actions.LoadRepositories(ctx); err != nil {
+ return err
+ }
+ if err := actions.loadRepoOwner(ctx, userMap); err != nil {
+ return err
+ }
+ if err := actions.LoadIssues(ctx); err != nil {
+ return err
+ }
+ return actions.LoadComments(ctx)
+}
+
+func (actions ActionList) LoadComments(ctx context.Context) error {
+ if len(actions) == 0 {
+ return nil
+ }
+
+ commentIDs := make([]int64, 0, len(actions))
+ for _, action := range actions {
+ if action.CommentID > 0 {
+ commentIDs = append(commentIDs, action.CommentID)
+ }
+ }
+ if len(commentIDs) == 0 {
+ return nil
+ }
+
+ commentsMap := make(map[int64]*issues_model.Comment, len(commentIDs))
+ if err := db.GetEngine(ctx).In("id", commentIDs).Find(&commentsMap); err != nil {
+ return fmt.Errorf("find comment: %w", err)
+ }
+
+ for _, action := range actions {
+ if action.CommentID > 0 {
+ action.Comment = commentsMap[action.CommentID]
+ if action.Comment != nil {
+ action.Comment.Issue = action.Issue
+ }
+ }
+ }
+ return nil
+}
+
+func (actions ActionList) LoadIssues(ctx context.Context) error {
+ if len(actions) == 0 {
+ return nil
+ }
+
+ conditions := builder.NewCond()
+ issueNum := 0
+ for _, action := range actions {
+ if action.IsIssueEvent() {
+ infos := action.GetIssueInfos()
+ if len(infos) == 0 {
+ continue
+ }
+ index, _ := strconv.ParseInt(infos[0], 10, 64)
+ if index > 0 {
+ conditions = conditions.Or(builder.Eq{
+ "repo_id": action.RepoID,
+ "`index`": index,
+ })
+ issueNum++
+ }
+ }
+ }
+ if !conditions.IsValid() {
+ return nil
+ }
+
+ issuesMap := make(map[string]*issues_model.Issue, issueNum)
+ issues := make([]*issues_model.Issue, 0, issueNum)
+ if err := db.GetEngine(ctx).Where(conditions).Find(&issues); err != nil {
+ return fmt.Errorf("find issue: %w", err)
+ }
+ for _, issue := range issues {
+ issuesMap[fmt.Sprintf("%d-%d", issue.RepoID, issue.Index)] = issue
+ }
+
+ for _, action := range actions {
+ if !action.IsIssueEvent() {
+ continue
+ }
+ if index := action.getIssueIndex(); index > 0 {
+ if issue, ok := issuesMap[fmt.Sprintf("%d-%d", action.RepoID, index)]; ok {
+ action.Issue = issue
+ action.Issue.Repo = action.Repo
+ }
+ }
+ }
+ return nil
+}
diff --git a/models/activities/action_test.go b/models/activities/action_test.go
new file mode 100644
index 0000000..4ce030d
--- /dev/null
+++ b/models/activities/action_test.go
@@ -0,0 +1,320 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities_test
+
+import (
+ "fmt"
+ "path"
+ "testing"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ issue_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAction_GetRepoPath(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+ action := &activities_model.Action{RepoID: repo.ID}
+ assert.Equal(t, path.Join(owner.Name, repo.Name), action.GetRepoPath(db.DefaultContext))
+}
+
+func TestAction_GetRepoLink(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+ comment := unittest.AssertExistsAndLoadBean(t, &issue_model.Comment{ID: 2})
+ action := &activities_model.Action{RepoID: repo.ID, CommentID: comment.ID}
+ setting.AppSubURL = "/suburl"
+ expected := path.Join(setting.AppSubURL, owner.Name, repo.Name)
+ assert.Equal(t, expected, action.GetRepoLink(db.DefaultContext))
+ assert.Equal(t, repo.HTMLURL(), action.GetRepoAbsoluteLink(db.DefaultContext))
+ assert.Equal(t, comment.HTMLURL(db.DefaultContext), action.GetCommentHTMLURL(db.DefaultContext))
+}
+
+func TestGetFeeds(t *testing.T) {
+ // test with an individual user
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ actions, count, err := activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedUser: user,
+ Actor: user,
+ IncludePrivate: true,
+ OnlyPerformedBy: false,
+ IncludeDeleted: true,
+ })
+ require.NoError(t, err)
+ if assert.Len(t, actions, 1) {
+ assert.EqualValues(t, 1, actions[0].ID)
+ assert.EqualValues(t, user.ID, actions[0].UserID)
+ }
+ assert.Equal(t, int64(1), count)
+
+ actions, count, err = activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedUser: user,
+ Actor: user,
+ IncludePrivate: false,
+ OnlyPerformedBy: false,
+ })
+ require.NoError(t, err)
+ assert.Empty(t, actions)
+ assert.Equal(t, int64(0), count)
+}
+
+func TestGetFeedsForRepos(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ privRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ pubRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 8})
+
+ // private repo & no login
+ actions, count, err := activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedRepo: privRepo,
+ IncludePrivate: true,
+ })
+ require.NoError(t, err)
+ assert.Empty(t, actions)
+ assert.Equal(t, int64(0), count)
+
+ // public repo & no login
+ actions, count, err = activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedRepo: pubRepo,
+ IncludePrivate: true,
+ })
+ require.NoError(t, err)
+ assert.Len(t, actions, 1)
+ assert.Equal(t, int64(1), count)
+
+ // private repo and login
+ actions, count, err = activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedRepo: privRepo,
+ IncludePrivate: true,
+ Actor: user,
+ })
+ require.NoError(t, err)
+ assert.Len(t, actions, 1)
+ assert.Equal(t, int64(1), count)
+
+ // public repo & login
+ actions, count, err = activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedRepo: pubRepo,
+ IncludePrivate: true,
+ Actor: user,
+ })
+ require.NoError(t, err)
+ assert.Len(t, actions, 1)
+ assert.Equal(t, int64(1), count)
+}
+
+func TestGetFeeds2(t *testing.T) {
+ // test with an organization user
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ actions, count, err := activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedUser: org,
+ Actor: user,
+ IncludePrivate: true,
+ OnlyPerformedBy: false,
+ IncludeDeleted: true,
+ })
+ require.NoError(t, err)
+ assert.Len(t, actions, 1)
+ if assert.Len(t, actions, 1) {
+ assert.EqualValues(t, 2, actions[0].ID)
+ assert.EqualValues(t, org.ID, actions[0].UserID)
+ }
+ assert.Equal(t, int64(1), count)
+
+ actions, count, err = activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedUser: org,
+ Actor: user,
+ IncludePrivate: false,
+ OnlyPerformedBy: false,
+ IncludeDeleted: true,
+ })
+ require.NoError(t, err)
+ assert.Empty(t, actions)
+ assert.Equal(t, int64(0), count)
+}
+
+func TestActivityReadable(t *testing.T) {
+ tt := []struct {
+ desc string
+ user *user_model.User
+ doer *user_model.User
+ result bool
+ }{{
+ desc: "user should see own activity",
+ user: &user_model.User{ID: 1},
+ doer: &user_model.User{ID: 1},
+ result: true,
+ }, {
+ desc: "anon should see activity if public",
+ user: &user_model.User{ID: 1},
+ result: true,
+ }, {
+ desc: "anon should NOT see activity",
+ user: &user_model.User{ID: 1, KeepActivityPrivate: true},
+ result: false,
+ }, {
+ desc: "user should see own activity if private too",
+ user: &user_model.User{ID: 1, KeepActivityPrivate: true},
+ doer: &user_model.User{ID: 1},
+ result: true,
+ }, {
+ desc: "other user should NOT see activity",
+ user: &user_model.User{ID: 1, KeepActivityPrivate: true},
+ doer: &user_model.User{ID: 2},
+ result: false,
+ }, {
+ desc: "admin should see activity",
+ user: &user_model.User{ID: 1, KeepActivityPrivate: true},
+ doer: &user_model.User{ID: 2, IsAdmin: true},
+ result: true,
+ }}
+ for _, test := range tt {
+ assert.Equal(t, test.result, activities_model.ActivityReadable(test.user, test.doer), test.desc)
+ }
+}
+
+func TestNotifyWatchers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ action := &activities_model.Action{
+ ActUserID: 8,
+ RepoID: 1,
+ OpType: activities_model.ActionStarRepo,
+ }
+ require.NoError(t, activities_model.NotifyWatchers(db.DefaultContext, action))
+
+ // One watchers are inactive, thus action is only created for user 8, 1, 4, 11
+ unittest.AssertExistsAndLoadBean(t, &activities_model.Action{
+ ActUserID: action.ActUserID,
+ UserID: 8,
+ RepoID: action.RepoID,
+ OpType: action.OpType,
+ })
+ unittest.AssertExistsAndLoadBean(t, &activities_model.Action{
+ ActUserID: action.ActUserID,
+ UserID: 1,
+ RepoID: action.RepoID,
+ OpType: action.OpType,
+ })
+ unittest.AssertExistsAndLoadBean(t, &activities_model.Action{
+ ActUserID: action.ActUserID,
+ UserID: 4,
+ RepoID: action.RepoID,
+ OpType: action.OpType,
+ })
+ unittest.AssertExistsAndLoadBean(t, &activities_model.Action{
+ ActUserID: action.ActUserID,
+ UserID: 11,
+ RepoID: action.RepoID,
+ OpType: action.OpType,
+ })
+}
+
+func TestGetFeedsCorrupted(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ unittest.AssertExistsAndLoadBean(t, &activities_model.Action{
+ ID: 8,
+ RepoID: 1700,
+ })
+
+ actions, count, err := activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedUser: user,
+ Actor: user,
+ IncludePrivate: true,
+ })
+ require.NoError(t, err)
+ assert.Empty(t, actions)
+ assert.Equal(t, int64(0), count)
+}
+
+func TestConsistencyUpdateAction(t *testing.T) {
+ if !setting.Database.Type.IsSQLite3() {
+ t.Skip("Test is only for SQLite database.")
+ }
+ require.NoError(t, unittest.PrepareTestDatabase())
+ id := 8
+ unittest.AssertExistsAndLoadBean(t, &activities_model.Action{
+ ID: int64(id),
+ })
+ _, err := db.GetEngine(db.DefaultContext).Exec(`UPDATE action SET created_unix = "" WHERE id = ?`, id)
+ require.NoError(t, err)
+ actions := make([]*activities_model.Action, 0, 1)
+ //
+ // XORM returns an error when created_unix is a string
+ //
+ err = db.GetEngine(db.DefaultContext).Where("id = ?", id).Find(&actions)
+ require.ErrorContains(t, err, "type string to a int64: invalid syntax")
+
+ //
+ // Get rid of incorrectly set created_unix
+ //
+ count, err := activities_model.CountActionCreatedUnixString(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count)
+ count, err = activities_model.FixActionCreatedUnixString(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count)
+
+ count, err = activities_model.CountActionCreatedUnixString(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+ count, err = activities_model.FixActionCreatedUnixString(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+
+ //
+ // XORM must be happy now
+ //
+ require.NoError(t, db.GetEngine(db.DefaultContext).Where("id = ?", id).Find(&actions))
+ unittest.CheckConsistencyFor(t, &activities_model.Action{})
+}
+
+func TestDeleteIssueActions(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // load an issue
+ issue := unittest.AssertExistsAndLoadBean(t, &issue_model.Issue{ID: 4})
+ assert.NotEqualValues(t, issue.ID, issue.Index) // it needs to use different ID/Index to test the DeleteIssueActions to delete some actions by IssueIndex
+
+ // insert a comment
+ err := db.Insert(db.DefaultContext, &issue_model.Comment{Type: issue_model.CommentTypeComment, IssueID: issue.ID})
+ require.NoError(t, err)
+ comment := unittest.AssertExistsAndLoadBean(t, &issue_model.Comment{Type: issue_model.CommentTypeComment, IssueID: issue.ID})
+
+ // truncate action table and insert some actions
+ err = db.TruncateBeans(db.DefaultContext, &activities_model.Action{})
+ require.NoError(t, err)
+ err = db.Insert(db.DefaultContext, &activities_model.Action{
+ OpType: activities_model.ActionCommentIssue,
+ CommentID: comment.ID,
+ })
+ require.NoError(t, err)
+ err = db.Insert(db.DefaultContext, &activities_model.Action{
+ OpType: activities_model.ActionCreateIssue,
+ RepoID: issue.RepoID,
+ Content: fmt.Sprintf("%d|content...", issue.Index),
+ })
+ require.NoError(t, err)
+
+ // assert that the actions exist, then delete them
+ unittest.AssertCount(t, &activities_model.Action{}, 2)
+ require.NoError(t, activities_model.DeleteIssueActions(db.DefaultContext, issue.RepoID, issue.ID, issue.Index))
+ unittest.AssertCount(t, &activities_model.Action{}, 0)
+}
diff --git a/models/activities/main_test.go b/models/activities/main_test.go
new file mode 100644
index 0000000..43afb84
--- /dev/null
+++ b/models/activities/main_test.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/activities/notification.go b/models/activities/notification.go
new file mode 100644
index 0000000..09cc640
--- /dev/null
+++ b/models/activities/notification.go
@@ -0,0 +1,407 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strconv"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+type (
+ // NotificationStatus is the status of the notification (read or unread)
+ NotificationStatus uint8
+ // NotificationSource is the source of the notification (issue, PR, commit, etc)
+ NotificationSource uint8
+)
+
+const (
+ // NotificationStatusUnread represents an unread notification
+ NotificationStatusUnread NotificationStatus = iota + 1
+ // NotificationStatusRead represents a read notification
+ NotificationStatusRead
+ // NotificationStatusPinned represents a pinned notification
+ NotificationStatusPinned
+)
+
+const (
+ // NotificationSourceIssue is a notification of an issue
+ NotificationSourceIssue NotificationSource = iota + 1
+ // NotificationSourcePullRequest is a notification of a pull request
+ NotificationSourcePullRequest
+ // NotificationSourceCommit is a notification of a commit
+ NotificationSourceCommit
+ // NotificationSourceRepository is a notification for a repository
+ NotificationSourceRepository
+)
+
+// Notification represents a notification
+type Notification struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"INDEX NOT NULL"`
+ RepoID int64 `xorm:"INDEX NOT NULL"`
+
+ Status NotificationStatus `xorm:"SMALLINT INDEX NOT NULL"`
+ Source NotificationSource `xorm:"SMALLINT INDEX NOT NULL"`
+
+ IssueID int64 `xorm:"INDEX NOT NULL"`
+ CommitID string `xorm:"INDEX"`
+ CommentID int64
+
+ UpdatedBy int64 `xorm:"INDEX NOT NULL"`
+
+ Issue *issues_model.Issue `xorm:"-"`
+ Repository *repo_model.Repository `xorm:"-"`
+ Comment *issues_model.Comment `xorm:"-"`
+ User *user_model.User `xorm:"-"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated INDEX NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(Notification))
+}
+
+// CreateRepoTransferNotification creates notification for the user a repository was transferred to
+func CreateRepoTransferNotification(ctx context.Context, doer, newOwner *user_model.User, repo *repo_model.Repository) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ var notify []*Notification
+
+ if newOwner.IsOrganization() {
+ users, err := organization.GetUsersWhoCanCreateOrgRepo(ctx, newOwner.ID)
+ if err != nil || len(users) == 0 {
+ return err
+ }
+ for i := range users {
+ notify = append(notify, &Notification{
+ UserID: i,
+ RepoID: repo.ID,
+ Status: NotificationStatusUnread,
+ UpdatedBy: doer.ID,
+ Source: NotificationSourceRepository,
+ })
+ }
+ } else {
+ notify = []*Notification{{
+ UserID: newOwner.ID,
+ RepoID: repo.ID,
+ Status: NotificationStatusUnread,
+ UpdatedBy: doer.ID,
+ Source: NotificationSourceRepository,
+ }}
+ }
+
+ return db.Insert(ctx, notify)
+ })
+}
+
+func createIssueNotification(ctx context.Context, userID int64, issue *issues_model.Issue, commentID, updatedByID int64) error {
+ notification := &Notification{
+ UserID: userID,
+ RepoID: issue.RepoID,
+ Status: NotificationStatusUnread,
+ IssueID: issue.ID,
+ CommentID: commentID,
+ UpdatedBy: updatedByID,
+ }
+
+ if issue.IsPull {
+ notification.Source = NotificationSourcePullRequest
+ } else {
+ notification.Source = NotificationSourceIssue
+ }
+
+ return db.Insert(ctx, notification)
+}
+
+func updateIssueNotification(ctx context.Context, userID, issueID, commentID, updatedByID int64) error {
+ notification, err := GetIssueNotification(ctx, userID, issueID)
+ if err != nil {
+ return err
+ }
+
+ // NOTICE: Only update comment id when the before notification on this issue is read, otherwise you may miss some old comments.
+ // But we need update update_by so that the notification will be reorder
+ var cols []string
+ if notification.Status == NotificationStatusRead {
+ notification.Status = NotificationStatusUnread
+ notification.CommentID = commentID
+ cols = []string{"status", "update_by", "comment_id"}
+ } else {
+ notification.UpdatedBy = updatedByID
+ cols = []string{"update_by"}
+ }
+
+ _, err = db.GetEngine(ctx).ID(notification.ID).Cols(cols...).Update(notification)
+ return err
+}
+
+// GetIssueNotification return the notification about an issue
+func GetIssueNotification(ctx context.Context, userID, issueID int64) (*Notification, error) {
+ notification := new(Notification)
+ _, err := db.GetEngine(ctx).
+ Where("user_id = ?", userID).
+ And("issue_id = ?", issueID).
+ Get(notification)
+ return notification, err
+}
+
+// LoadAttributes load Repo Issue User and Comment if not loaded
+func (n *Notification) LoadAttributes(ctx context.Context) (err error) {
+ if err = n.loadRepo(ctx); err != nil {
+ return err
+ }
+ if err = n.loadIssue(ctx); err != nil {
+ return err
+ }
+ if err = n.loadUser(ctx); err != nil {
+ return err
+ }
+ if err = n.loadComment(ctx); err != nil {
+ return err
+ }
+ return err
+}
+
+func (n *Notification) loadRepo(ctx context.Context) (err error) {
+ if n.Repository == nil {
+ n.Repository, err = repo_model.GetRepositoryByID(ctx, n.RepoID)
+ if err != nil {
+ return fmt.Errorf("getRepositoryByID [%d]: %w", n.RepoID, err)
+ }
+ }
+ return nil
+}
+
+func (n *Notification) loadIssue(ctx context.Context) (err error) {
+ if n.Issue == nil && n.IssueID != 0 {
+ n.Issue, err = issues_model.GetIssueByID(ctx, n.IssueID)
+ if err != nil {
+ return fmt.Errorf("getIssueByID [%d]: %w", n.IssueID, err)
+ }
+ return n.Issue.LoadAttributes(ctx)
+ }
+ return nil
+}
+
+func (n *Notification) loadComment(ctx context.Context) (err error) {
+ if n.Comment == nil && n.CommentID != 0 {
+ n.Comment, err = issues_model.GetCommentByID(ctx, n.CommentID)
+ if err != nil {
+ if issues_model.IsErrCommentNotExist(err) {
+ return issues_model.ErrCommentNotExist{
+ ID: n.CommentID,
+ IssueID: n.IssueID,
+ }
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (n *Notification) loadUser(ctx context.Context) (err error) {
+ if n.User == nil {
+ n.User, err = user_model.GetUserByID(ctx, n.UserID)
+ if err != nil {
+ return fmt.Errorf("getUserByID [%d]: %w", n.UserID, err)
+ }
+ }
+ return nil
+}
+
+// GetRepo returns the repo of the notification
+func (n *Notification) GetRepo(ctx context.Context) (*repo_model.Repository, error) {
+ return n.Repository, n.loadRepo(ctx)
+}
+
+// GetIssue returns the issue of the notification
+func (n *Notification) GetIssue(ctx context.Context) (*issues_model.Issue, error) {
+ return n.Issue, n.loadIssue(ctx)
+}
+
+// HTMLURL formats a URL-string to the notification
+func (n *Notification) HTMLURL(ctx context.Context) string {
+ switch n.Source {
+ case NotificationSourceIssue, NotificationSourcePullRequest:
+ if n.Comment != nil {
+ return n.Comment.HTMLURL(ctx)
+ }
+ return n.Issue.HTMLURL()
+ case NotificationSourceCommit:
+ return n.Repository.HTMLURL() + "/commit/" + url.PathEscape(n.CommitID)
+ case NotificationSourceRepository:
+ return n.Repository.HTMLURL()
+ }
+ return ""
+}
+
+// Link formats a relative URL-string to the notification
+func (n *Notification) Link(ctx context.Context) string {
+ switch n.Source {
+ case NotificationSourceIssue, NotificationSourcePullRequest:
+ if n.Comment != nil {
+ return n.Comment.Link(ctx)
+ }
+ return n.Issue.Link()
+ case NotificationSourceCommit:
+ return n.Repository.Link() + "/commit/" + url.PathEscape(n.CommitID)
+ case NotificationSourceRepository:
+ return n.Repository.Link()
+ }
+ return ""
+}
+
+// APIURL formats a URL-string to the notification
+func (n *Notification) APIURL() string {
+ return setting.AppURL + "api/v1/notifications/threads/" + strconv.FormatInt(n.ID, 10)
+}
+
+func notificationExists(notifications []*Notification, issueID, userID int64) bool {
+ for _, notification := range notifications {
+ if notification.IssueID == issueID && notification.UserID == userID {
+ return true
+ }
+ }
+
+ return false
+}
+
+// UserIDCount is a simple coalition of UserID and Count
+type UserIDCount struct {
+ UserID int64
+ Count int64
+}
+
+// GetUIDsAndNotificationCounts returns the unread counts for every user between the two provided times.
+// It must return all user IDs which appear during the period, including count=0 for users who have read all.
+func GetUIDsAndNotificationCounts(ctx context.Context, since, until timeutil.TimeStamp) ([]UserIDCount, error) {
+ sql := `SELECT user_id, sum(case when status= ? then 1 else 0 end) AS count FROM notification ` +
+ `WHERE user_id IN (SELECT user_id FROM notification WHERE updated_unix >= ? AND ` +
+ `updated_unix < ?) GROUP BY user_id`
+ var res []UserIDCount
+ return res, db.GetEngine(ctx).SQL(sql, NotificationStatusUnread, since, until).Find(&res)
+}
+
+// SetIssueReadBy sets issue to be read by given user.
+func SetIssueReadBy(ctx context.Context, issueID, userID int64) error {
+ if err := issues_model.UpdateIssueUserByRead(ctx, userID, issueID); err != nil {
+ return err
+ }
+
+ return setIssueNotificationStatusReadIfUnread(ctx, userID, issueID)
+}
+
+func setIssueNotificationStatusReadIfUnread(ctx context.Context, userID, issueID int64) error {
+ notification, err := GetIssueNotification(ctx, userID, issueID)
+ // ignore if not exists
+ if err != nil {
+ return nil
+ }
+
+ if notification.Status != NotificationStatusUnread {
+ return nil
+ }
+
+ notification.Status = NotificationStatusRead
+
+ _, err = db.GetEngine(ctx).ID(notification.ID).Cols("status").Update(notification)
+ return err
+}
+
+// SetRepoReadBy sets repo to be visited by given user.
+func SetRepoReadBy(ctx context.Context, userID, repoID int64) error {
+ _, err := db.GetEngine(ctx).Where(builder.Eq{
+ "user_id": userID,
+ "status": NotificationStatusUnread,
+ "source": NotificationSourceRepository,
+ "repo_id": repoID,
+ }).Cols("status").Update(&Notification{Status: NotificationStatusRead})
+ return err
+}
+
+// SetNotificationStatus change the notification status
+func SetNotificationStatus(ctx context.Context, notificationID int64, user *user_model.User, status NotificationStatus) (*Notification, error) {
+ notification, err := GetNotificationByID(ctx, notificationID)
+ if err != nil {
+ return notification, err
+ }
+
+ if notification.UserID != user.ID {
+ return nil, fmt.Errorf("Can't change notification of another user: %d, %d", notification.UserID, user.ID)
+ }
+
+ notification.Status = status
+
+ _, err = db.GetEngine(ctx).ID(notificationID).Update(notification)
+ return notification, err
+}
+
+// GetNotificationByID return notification by ID
+func GetNotificationByID(ctx context.Context, notificationID int64) (*Notification, error) {
+ notification := new(Notification)
+ ok, err := db.GetEngine(ctx).
+ Where("id = ?", notificationID).
+ Get(notification)
+ if err != nil {
+ return nil, err
+ }
+
+ if !ok {
+ return nil, db.ErrNotExist{Resource: "notification", ID: notificationID}
+ }
+
+ return notification, nil
+}
+
+// UpdateNotificationStatuses updates the statuses of all of a user's notifications that are of the currentStatus type to the desiredStatus
+func UpdateNotificationStatuses(ctx context.Context, user *user_model.User, currentStatus, desiredStatus NotificationStatus) error {
+ n := &Notification{Status: desiredStatus, UpdatedBy: user.ID}
+ _, err := db.GetEngine(ctx).
+ Where("user_id = ? AND status = ?", user.ID, currentStatus).
+ Cols("status", "updated_by", "updated_unix").
+ Update(n)
+ return err
+}
+
+// LoadIssuePullRequests loads all issues' pull requests if possible
+func (nl NotificationList) LoadIssuePullRequests(ctx context.Context) error {
+ issues := make(map[int64]*issues_model.Issue, len(nl))
+ for _, notification := range nl {
+ if notification.Issue != nil && notification.Issue.IsPull && notification.Issue.PullRequest == nil {
+ issues[notification.Issue.ID] = notification.Issue
+ }
+ }
+
+ if len(issues) == 0 {
+ return nil
+ }
+
+ pulls, err := issues_model.GetPullRequestByIssueIDs(ctx, util.KeysOfMap(issues))
+ if err != nil {
+ return err
+ }
+
+ for _, pull := range pulls {
+ if issue := issues[pull.IssueID]; issue != nil {
+ issue.PullRequest = pull
+ issue.PullRequest.Issue = issue
+ }
+ }
+
+ return nil
+}
diff --git a/models/activities/notification_list.go b/models/activities/notification_list.go
new file mode 100644
index 0000000..32d2a5c
--- /dev/null
+++ b/models/activities/notification_list.go
@@ -0,0 +1,476 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+)
+
+// FindNotificationOptions represent the filters for notifications. If an ID is 0 it will be ignored.
+type FindNotificationOptions struct {
+ db.ListOptions
+ UserID int64
+ RepoID int64
+ IssueID int64
+ Status []NotificationStatus
+ Source []NotificationSource
+ UpdatedAfterUnix int64
+ UpdatedBeforeUnix int64
+}
+
+// ToCond will convert each condition into a xorm-Cond
+func (opts FindNotificationOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.UserID != 0 {
+ cond = cond.And(builder.Eq{"notification.user_id": opts.UserID})
+ }
+ if opts.RepoID != 0 {
+ cond = cond.And(builder.Eq{"notification.repo_id": opts.RepoID})
+ }
+ if opts.IssueID != 0 {
+ cond = cond.And(builder.Eq{"notification.issue_id": opts.IssueID})
+ }
+ if len(opts.Status) > 0 {
+ if len(opts.Status) == 1 {
+ cond = cond.And(builder.Eq{"notification.status": opts.Status[0]})
+ } else {
+ cond = cond.And(builder.In("notification.status", opts.Status))
+ }
+ }
+ if len(opts.Source) > 0 {
+ cond = cond.And(builder.In("notification.source", opts.Source))
+ }
+ if opts.UpdatedAfterUnix != 0 {
+ cond = cond.And(builder.Gte{"notification.updated_unix": opts.UpdatedAfterUnix})
+ }
+ if opts.UpdatedBeforeUnix != 0 {
+ cond = cond.And(builder.Lte{"notification.updated_unix": opts.UpdatedBeforeUnix})
+ }
+ return cond
+}
+
+func (opts FindNotificationOptions) ToOrders() string {
+ return "notification.updated_unix DESC"
+}
+
+// CreateOrUpdateIssueNotifications creates an issue notification
+// for each watcher, or updates it if already exists
+// receiverID > 0 just send to receiver, else send to all watcher
+func CreateOrUpdateIssueNotifications(ctx context.Context, issueID, commentID, notificationAuthorID, receiverID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := createOrUpdateIssueNotifications(ctx, issueID, commentID, notificationAuthorID, receiverID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func createOrUpdateIssueNotifications(ctx context.Context, issueID, commentID, notificationAuthorID, receiverID int64) error {
+ // init
+ var toNotify container.Set[int64]
+ notifications, err := db.Find[Notification](ctx, FindNotificationOptions{
+ IssueID: issueID,
+ })
+ if err != nil {
+ return err
+ }
+
+ issue, err := issues_model.GetIssueByID(ctx, issueID)
+ if err != nil {
+ return err
+ }
+
+ if receiverID > 0 {
+ toNotify = make(container.Set[int64], 1)
+ toNotify.Add(receiverID)
+ } else {
+ toNotify = make(container.Set[int64], 32)
+ issueWatches, err := issues_model.GetIssueWatchersIDs(ctx, issueID, true)
+ if err != nil {
+ return err
+ }
+ toNotify.AddMultiple(issueWatches...)
+ if !(issue.IsPull && issues_model.HasWorkInProgressPrefix(issue.Title)) {
+ repoWatches, err := repo_model.GetRepoWatchersIDs(ctx, issue.RepoID)
+ if err != nil {
+ return err
+ }
+ toNotify.AddMultiple(repoWatches...)
+ }
+ issueParticipants, err := issue.GetParticipantIDsByIssue(ctx)
+ if err != nil {
+ return err
+ }
+ toNotify.AddMultiple(issueParticipants...)
+
+ // dont notify user who cause notification
+ delete(toNotify, notificationAuthorID)
+ // explicit unwatch on issue
+ issueUnWatches, err := issues_model.GetIssueWatchersIDs(ctx, issueID, false)
+ if err != nil {
+ return err
+ }
+ for _, id := range issueUnWatches {
+ toNotify.Remove(id)
+ }
+ // Remove users who have the notification author blocked.
+ blockedAuthorIDs, err := user_model.ListBlockedByUsersID(ctx, notificationAuthorID)
+ if err != nil {
+ return err
+ }
+ for _, id := range blockedAuthorIDs {
+ toNotify.Remove(id)
+ }
+ }
+
+ err = issue.LoadRepo(ctx)
+ if err != nil {
+ return err
+ }
+
+ // notify
+ for userID := range toNotify {
+ issue.Repo.Units = nil
+ user, err := user_model.GetUserByID(ctx, userID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ continue
+ }
+
+ return err
+ }
+ if issue.IsPull && !access_model.CheckRepoUnitUser(ctx, issue.Repo, user, unit.TypePullRequests) {
+ continue
+ }
+ if !issue.IsPull && !access_model.CheckRepoUnitUser(ctx, issue.Repo, user, unit.TypeIssues) {
+ continue
+ }
+
+ if notificationExists(notifications, issue.ID, userID) {
+ if err = updateIssueNotification(ctx, userID, issue.ID, commentID, notificationAuthorID); err != nil {
+ return err
+ }
+ continue
+ }
+ if err = createIssueNotification(ctx, userID, issue, commentID, notificationAuthorID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NotificationList contains a list of notifications
+type NotificationList []*Notification
+
+// LoadAttributes load Repo Issue User and Comment if not loaded
+func (nl NotificationList) LoadAttributes(ctx context.Context) error {
+ if _, _, err := nl.LoadRepos(ctx); err != nil {
+ return err
+ }
+ if _, err := nl.LoadIssues(ctx); err != nil {
+ return err
+ }
+ if _, err := nl.LoadUsers(ctx); err != nil {
+ return err
+ }
+ if _, err := nl.LoadComments(ctx); err != nil {
+ return err
+ }
+ return nil
+}
+
+// getPendingRepoIDs returns all the repositoty ids which haven't been loaded
+func (nl NotificationList) getPendingRepoIDs() []int64 {
+ return container.FilterSlice(nl, func(n *Notification) (int64, bool) {
+ return n.RepoID, n.Repository == nil
+ })
+}
+
+// LoadRepos loads repositories from database
+func (nl NotificationList) LoadRepos(ctx context.Context) (repo_model.RepositoryList, []int, error) {
+ if len(nl) == 0 {
+ return repo_model.RepositoryList{}, []int{}, nil
+ }
+
+ repoIDs := nl.getPendingRepoIDs()
+ repos := make(map[int64]*repo_model.Repository, len(repoIDs))
+ left := len(repoIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", repoIDs[:limit]).
+ Rows(new(repo_model.Repository))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for rows.Next() {
+ var repo repo_model.Repository
+ err = rows.Scan(&repo)
+ if err != nil {
+ rows.Close()
+ return nil, nil, err
+ }
+
+ repos[repo.ID] = &repo
+ }
+ _ = rows.Close()
+
+ left -= limit
+ repoIDs = repoIDs[limit:]
+ }
+
+ failed := []int{}
+
+ reposList := make(repo_model.RepositoryList, 0, len(repoIDs))
+ for i, notification := range nl {
+ if notification.Repository == nil {
+ notification.Repository = repos[notification.RepoID]
+ }
+ if notification.Repository == nil {
+ log.Error("Notification[%d]: RepoID: %d not found", notification.ID, notification.RepoID)
+ failed = append(failed, i)
+ continue
+ }
+ var found bool
+ for _, r := range reposList {
+ if r.ID == notification.RepoID {
+ found = true
+ break
+ }
+ }
+ if !found {
+ reposList = append(reposList, notification.Repository)
+ }
+ }
+ return reposList, failed, nil
+}
+
+func (nl NotificationList) getPendingIssueIDs() []int64 {
+ ids := make(container.Set[int64], len(nl))
+ for _, notification := range nl {
+ if notification.Issue != nil {
+ continue
+ }
+ ids.Add(notification.IssueID)
+ }
+ return ids.Values()
+}
+
+// LoadIssues loads issues from database
+func (nl NotificationList) LoadIssues(ctx context.Context) ([]int, error) {
+ if len(nl) == 0 {
+ return []int{}, nil
+ }
+
+ issueIDs := nl.getPendingIssueIDs()
+ issues := make(map[int64]*issues_model.Issue, len(issueIDs))
+ left := len(issueIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", issueIDs[:limit]).
+ Rows(new(issues_model.Issue))
+ if err != nil {
+ return nil, err
+ }
+
+ for rows.Next() {
+ var issue issues_model.Issue
+ err = rows.Scan(&issue)
+ if err != nil {
+ rows.Close()
+ return nil, err
+ }
+
+ issues[issue.ID] = &issue
+ }
+ _ = rows.Close()
+
+ left -= limit
+ issueIDs = issueIDs[limit:]
+ }
+
+ failures := []int{}
+
+ for i, notification := range nl {
+ if notification.Issue == nil {
+ notification.Issue = issues[notification.IssueID]
+ if notification.Issue == nil {
+ if notification.IssueID != 0 {
+ log.Error("Notification[%d]: IssueID: %d Not Found", notification.ID, notification.IssueID)
+ failures = append(failures, i)
+ }
+ continue
+ }
+ notification.Issue.Repo = notification.Repository
+ }
+ }
+ return failures, nil
+}
+
+// Without returns the notification list without the failures
+func (nl NotificationList) Without(failures []int) NotificationList {
+ if len(failures) == 0 {
+ return nl
+ }
+ remaining := make([]*Notification, 0, len(nl))
+ last := -1
+ var i int
+ for _, i = range failures {
+ remaining = append(remaining, nl[last+1:i]...)
+ last = i
+ }
+ if len(nl) > i {
+ remaining = append(remaining, nl[i+1:]...)
+ }
+ return remaining
+}
+
+func (nl NotificationList) getPendingCommentIDs() []int64 {
+ ids := make(container.Set[int64], len(nl))
+ for _, notification := range nl {
+ if notification.CommentID == 0 || notification.Comment != nil {
+ continue
+ }
+ ids.Add(notification.CommentID)
+ }
+ return ids.Values()
+}
+
+func (nl NotificationList) getUserIDs() []int64 {
+ ids := make(container.Set[int64], len(nl))
+ for _, notification := range nl {
+ if notification.UserID == 0 || notification.User != nil {
+ continue
+ }
+ ids.Add(notification.UserID)
+ }
+ return ids.Values()
+}
+
+// LoadUsers loads users from database
+func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) {
+ if len(nl) == 0 {
+ return []int{}, nil
+ }
+
+ userIDs := nl.getUserIDs()
+ users := make(map[int64]*user_model.User, len(userIDs))
+ left := len(userIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", userIDs[:limit]).
+ Rows(new(user_model.User))
+ if err != nil {
+ return nil, err
+ }
+
+ for rows.Next() {
+ var user user_model.User
+ err = rows.Scan(&user)
+ if err != nil {
+ rows.Close()
+ return nil, err
+ }
+
+ users[user.ID] = &user
+ }
+ _ = rows.Close()
+
+ left -= limit
+ userIDs = userIDs[limit:]
+ }
+
+ failures := []int{}
+ for i, notification := range nl {
+ if notification.UserID > 0 && notification.User == nil && users[notification.UserID] != nil {
+ notification.User = users[notification.UserID]
+ if notification.User == nil {
+ log.Error("Notification[%d]: UserID[%d] failed to load", notification.ID, notification.UserID)
+ failures = append(failures, i)
+ continue
+ }
+ }
+ }
+ return failures, nil
+}
+
+// LoadComments loads comments from database
+func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) {
+ if len(nl) == 0 {
+ return []int{}, nil
+ }
+
+ commentIDs := nl.getPendingCommentIDs()
+ comments := make(map[int64]*issues_model.Comment, len(commentIDs))
+ left := len(commentIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", commentIDs[:limit]).
+ Rows(new(issues_model.Comment))
+ if err != nil {
+ return nil, err
+ }
+
+ for rows.Next() {
+ var comment issues_model.Comment
+ err = rows.Scan(&comment)
+ if err != nil {
+ rows.Close()
+ return nil, err
+ }
+
+ comments[comment.ID] = &comment
+ }
+ _ = rows.Close()
+
+ left -= limit
+ commentIDs = commentIDs[limit:]
+ }
+
+ failures := []int{}
+ for i, notification := range nl {
+ if notification.CommentID > 0 && notification.Comment == nil && comments[notification.CommentID] != nil {
+ notification.Comment = comments[notification.CommentID]
+ if notification.Comment == nil {
+ log.Error("Notification[%d]: CommentID[%d] failed to load", notification.ID, notification.CommentID)
+ failures = append(failures, i)
+ continue
+ }
+ notification.Comment.Issue = notification.Issue
+ }
+ }
+ return failures, nil
+}
diff --git a/models/activities/notification_test.go b/models/activities/notification_test.go
new file mode 100644
index 0000000..3ff223d
--- /dev/null
+++ b/models/activities/notification_test.go
@@ -0,0 +1,141 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities_test
+
+import (
+ "context"
+ "testing"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCreateOrUpdateIssueNotifications(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+
+ require.NoError(t, activities_model.CreateOrUpdateIssueNotifications(db.DefaultContext, issue.ID, 0, 2, 0))
+
+ // User 9 is inactive, thus notifications for user 1 and 4 are created
+ notf := unittest.AssertExistsAndLoadBean(t, &activities_model.Notification{UserID: 1, IssueID: issue.ID})
+ assert.Equal(t, activities_model.NotificationStatusUnread, notf.Status)
+ unittest.CheckConsistencyFor(t, &issues_model.Issue{ID: issue.ID})
+
+ notf = unittest.AssertExistsAndLoadBean(t, &activities_model.Notification{UserID: 4, IssueID: issue.ID})
+ assert.Equal(t, activities_model.NotificationStatusUnread, notf.Status)
+}
+
+func TestNotificationsForUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ notfs, err := db.Find[activities_model.Notification](db.DefaultContext, activities_model.FindNotificationOptions{
+ UserID: user.ID,
+ Status: []activities_model.NotificationStatus{
+ activities_model.NotificationStatusRead,
+ activities_model.NotificationStatusUnread,
+ },
+ })
+ require.NoError(t, err)
+ if assert.Len(t, notfs, 3) {
+ assert.EqualValues(t, 5, notfs[0].ID)
+ assert.EqualValues(t, user.ID, notfs[0].UserID)
+ assert.EqualValues(t, 4, notfs[1].ID)
+ assert.EqualValues(t, user.ID, notfs[1].UserID)
+ assert.EqualValues(t, 2, notfs[2].ID)
+ assert.EqualValues(t, user.ID, notfs[2].UserID)
+ }
+}
+
+func TestNotification_GetRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ notf := unittest.AssertExistsAndLoadBean(t, &activities_model.Notification{RepoID: 1})
+ repo, err := notf.GetRepo(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, repo, notf.Repository)
+ assert.EqualValues(t, notf.RepoID, repo.ID)
+}
+
+func TestNotification_GetIssue(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ notf := unittest.AssertExistsAndLoadBean(t, &activities_model.Notification{RepoID: 1})
+ issue, err := notf.GetIssue(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, issue, notf.Issue)
+ assert.EqualValues(t, notf.IssueID, issue.ID)
+}
+
+func TestGetNotificationCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ cnt, err := db.Count[activities_model.Notification](db.DefaultContext, activities_model.FindNotificationOptions{
+ UserID: user.ID,
+ Status: []activities_model.NotificationStatus{
+ activities_model.NotificationStatusRead,
+ },
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, cnt)
+
+ cnt, err = db.Count[activities_model.Notification](db.DefaultContext, activities_model.FindNotificationOptions{
+ UserID: user.ID,
+ Status: []activities_model.NotificationStatus{
+ activities_model.NotificationStatusUnread,
+ },
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, cnt)
+}
+
+func TestSetNotificationStatus(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ notf := unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{UserID: user.ID, Status: activities_model.NotificationStatusRead})
+ _, err := activities_model.SetNotificationStatus(db.DefaultContext, notf.ID, user, activities_model.NotificationStatusPinned)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{ID: notf.ID, Status: activities_model.NotificationStatusPinned})
+
+ _, err = activities_model.SetNotificationStatus(db.DefaultContext, 1, user, activities_model.NotificationStatusRead)
+ require.Error(t, err)
+ _, err = activities_model.SetNotificationStatus(db.DefaultContext, unittest.NonexistentID, user, activities_model.NotificationStatusRead)
+ require.Error(t, err)
+}
+
+func TestUpdateNotificationStatuses(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ notfUnread := unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{UserID: user.ID, Status: activities_model.NotificationStatusUnread})
+ notfRead := unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{UserID: user.ID, Status: activities_model.NotificationStatusRead})
+ notfPinned := unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{UserID: user.ID, Status: activities_model.NotificationStatusPinned})
+ require.NoError(t, activities_model.UpdateNotificationStatuses(db.DefaultContext, user, activities_model.NotificationStatusUnread, activities_model.NotificationStatusRead))
+ unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{ID: notfUnread.ID, Status: activities_model.NotificationStatusRead})
+ unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{ID: notfRead.ID, Status: activities_model.NotificationStatusRead})
+ unittest.AssertExistsAndLoadBean(t,
+ &activities_model.Notification{ID: notfPinned.ID, Status: activities_model.NotificationStatusPinned})
+}
+
+func TestSetIssueReadBy(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+ require.NoError(t, db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ return activities_model.SetIssueReadBy(ctx, issue.ID, user.ID)
+ }))
+
+ nt, err := activities_model.GetIssueNotification(db.DefaultContext, user.ID, issue.ID)
+ require.NoError(t, err)
+ assert.EqualValues(t, activities_model.NotificationStatusRead, nt.Status)
+}
diff --git a/models/activities/repo_activity.go b/models/activities/repo_activity.go
new file mode 100644
index 0000000..ffa709a
--- /dev/null
+++ b/models/activities/repo_activity.go
@@ -0,0 +1,391 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+
+ "xorm.io/xorm"
+)
+
+// ActivityAuthorData represents statistical git commit count data
+type ActivityAuthorData struct {
+ Name string `json:"name"`
+ Login string `json:"login"`
+ AvatarLink string `json:"avatar_link"`
+ HomeLink string `json:"home_link"`
+ Commits int64 `json:"commits"`
+}
+
+// ActivityStats represents issue and pull request information.
+type ActivityStats struct {
+ OpenedPRs issues_model.PullRequestList
+ OpenedPRAuthorCount int64
+ MergedPRs issues_model.PullRequestList
+ MergedPRAuthorCount int64
+ ActiveIssues issues_model.IssueList
+ OpenedIssues issues_model.IssueList
+ OpenedIssueAuthorCount int64
+ ClosedIssues issues_model.IssueList
+ ClosedIssueAuthorCount int64
+ UnresolvedIssues issues_model.IssueList
+ PublishedReleases []*repo_model.Release
+ PublishedReleaseAuthorCount int64
+ Code *git.CodeActivityStats
+}
+
+// GetActivityStats return stats for repository at given time range
+func GetActivityStats(ctx context.Context, repo *repo_model.Repository, timeFrom time.Time, releases, issues, prs, code bool) (*ActivityStats, error) {
+ stats := &ActivityStats{Code: &git.CodeActivityStats{}}
+ if releases {
+ if err := stats.FillReleases(ctx, repo.ID, timeFrom); err != nil {
+ return nil, fmt.Errorf("FillReleases: %w", err)
+ }
+ }
+ if prs {
+ if err := stats.FillPullRequests(ctx, repo.ID, timeFrom); err != nil {
+ return nil, fmt.Errorf("FillPullRequests: %w", err)
+ }
+ }
+ if issues {
+ if err := stats.FillIssues(ctx, repo.ID, timeFrom); err != nil {
+ return nil, fmt.Errorf("FillIssues: %w", err)
+ }
+ }
+ if err := stats.FillUnresolvedIssues(ctx, repo.ID, timeFrom, issues, prs); err != nil {
+ return nil, fmt.Errorf("FillUnresolvedIssues: %w", err)
+ }
+ if code {
+ gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, repo)
+ if err != nil {
+ return nil, fmt.Errorf("OpenRepository: %w", err)
+ }
+ defer closer.Close()
+
+ code, err := gitRepo.GetCodeActivityStats(timeFrom, repo.DefaultBranch)
+ if err != nil {
+ return nil, fmt.Errorf("FillFromGit: %w", err)
+ }
+ stats.Code = code
+ }
+ return stats, nil
+}
+
+// GetActivityStatsTopAuthors returns top author stats for git commits for all branches
+func GetActivityStatsTopAuthors(ctx context.Context, repo *repo_model.Repository, timeFrom time.Time, count int) ([]*ActivityAuthorData, error) {
+ gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, repo)
+ if err != nil {
+ return nil, fmt.Errorf("OpenRepository: %w", err)
+ }
+ defer closer.Close()
+
+ code, err := gitRepo.GetCodeActivityStats(timeFrom, "")
+ if err != nil {
+ return nil, fmt.Errorf("FillFromGit: %w", err)
+ }
+ if code.Authors == nil {
+ return nil, nil
+ }
+ users := make(map[int64]*ActivityAuthorData)
+ var unknownUserID int64
+ unknownUserAvatarLink := user_model.NewGhostUser().AvatarLink(ctx)
+ for _, v := range code.Authors {
+ if len(v.Email) == 0 {
+ continue
+ }
+ u, err := user_model.GetUserByEmail(ctx, v.Email)
+ if u == nil || user_model.IsErrUserNotExist(err) {
+ unknownUserID--
+ users[unknownUserID] = &ActivityAuthorData{
+ Name: v.Name,
+ AvatarLink: unknownUserAvatarLink,
+ Commits: v.Commits,
+ }
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ if user, ok := users[u.ID]; !ok {
+ users[u.ID] = &ActivityAuthorData{
+ Name: u.DisplayName(),
+ Login: u.LowerName,
+ AvatarLink: u.AvatarLink(ctx),
+ HomeLink: u.HomeLink(),
+ Commits: v.Commits,
+ }
+ } else {
+ user.Commits += v.Commits
+ }
+ }
+ v := make([]*ActivityAuthorData, 0, len(users))
+ for _, u := range users {
+ v = append(v, u)
+ }
+
+ sort.Slice(v, func(i, j int) bool {
+ return v[i].Commits > v[j].Commits
+ })
+
+ cnt := count
+ if cnt > len(v) {
+ cnt = len(v)
+ }
+
+ return v[:cnt], nil
+}
+
+// ActivePRCount returns total active pull request count
+func (stats *ActivityStats) ActivePRCount() int {
+ return stats.OpenedPRCount() + stats.MergedPRCount()
+}
+
+// OpenedPRCount returns opened pull request count
+func (stats *ActivityStats) OpenedPRCount() int {
+ return len(stats.OpenedPRs)
+}
+
+// OpenedPRPerc returns opened pull request percents from total active
+func (stats *ActivityStats) OpenedPRPerc() int {
+ return int(float32(stats.OpenedPRCount()) / float32(stats.ActivePRCount()) * 100.0)
+}
+
+// MergedPRCount returns merged pull request count
+func (stats *ActivityStats) MergedPRCount() int {
+ return len(stats.MergedPRs)
+}
+
+// MergedPRPerc returns merged pull request percent from total active
+func (stats *ActivityStats) MergedPRPerc() int {
+ return int(float32(stats.MergedPRCount()) / float32(stats.ActivePRCount()) * 100.0)
+}
+
+// ActiveIssueCount returns total active issue count
+func (stats *ActivityStats) ActiveIssueCount() int {
+ return len(stats.ActiveIssues)
+}
+
+// OpenedIssueCount returns open issue count
+func (stats *ActivityStats) OpenedIssueCount() int {
+ return len(stats.OpenedIssues)
+}
+
+// OpenedIssuePerc returns open issue count percent from total active
+func (stats *ActivityStats) OpenedIssuePerc() int {
+ return int(float32(stats.OpenedIssueCount()) / float32(stats.ActiveIssueCount()) * 100.0)
+}
+
+// ClosedIssueCount returns closed issue count
+func (stats *ActivityStats) ClosedIssueCount() int {
+ return len(stats.ClosedIssues)
+}
+
+// ClosedIssuePerc returns closed issue count percent from total active
+func (stats *ActivityStats) ClosedIssuePerc() int {
+ return int(float32(stats.ClosedIssueCount()) / float32(stats.ActiveIssueCount()) * 100.0)
+}
+
+// UnresolvedIssueCount returns unresolved issue and pull request count
+func (stats *ActivityStats) UnresolvedIssueCount() int {
+ return len(stats.UnresolvedIssues)
+}
+
+// PublishedReleaseCount returns published release count
+func (stats *ActivityStats) PublishedReleaseCount() int {
+ return len(stats.PublishedReleases)
+}
+
+// FillPullRequests returns pull request information for activity page
+func (stats *ActivityStats) FillPullRequests(ctx context.Context, repoID int64, fromTime time.Time) error {
+ var err error
+ var count int64
+
+ // Merged pull requests
+ sess := pullRequestsForActivityStatement(ctx, repoID, fromTime, true)
+ sess.OrderBy("pull_request.merged_unix DESC")
+ stats.MergedPRs = make(issues_model.PullRequestList, 0)
+ if err = sess.Find(&stats.MergedPRs); err != nil {
+ return err
+ }
+ if err = stats.MergedPRs.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ // Merged pull request authors
+ sess = pullRequestsForActivityStatement(ctx, repoID, fromTime, true)
+ if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("pull_request").Get(&count); err != nil {
+ return err
+ }
+ stats.MergedPRAuthorCount = count
+
+ // Opened pull requests
+ sess = pullRequestsForActivityStatement(ctx, repoID, fromTime, false)
+ sess.OrderBy("issue.created_unix ASC")
+ stats.OpenedPRs = make(issues_model.PullRequestList, 0)
+ if err = sess.Find(&stats.OpenedPRs); err != nil {
+ return err
+ }
+ if err = stats.OpenedPRs.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ // Opened pull request authors
+ sess = pullRequestsForActivityStatement(ctx, repoID, fromTime, false)
+ if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("pull_request").Get(&count); err != nil {
+ return err
+ }
+ stats.OpenedPRAuthorCount = count
+
+ return nil
+}
+
+func pullRequestsForActivityStatement(ctx context.Context, repoID int64, fromTime time.Time, merged bool) *xorm.Session {
+ sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", repoID).
+ Join("INNER", "issue", "pull_request.issue_id = issue.id")
+
+ if merged {
+ sess.And("pull_request.has_merged = ?", true)
+ sess.And("pull_request.merged_unix >= ?", fromTime.Unix())
+ } else {
+ sess.And("issue.is_closed = ?", false)
+ sess.And("issue.created_unix >= ?", fromTime.Unix())
+ }
+
+ return sess
+}
+
+// FillIssues returns issue information for activity page
+func (stats *ActivityStats) FillIssues(ctx context.Context, repoID int64, fromTime time.Time) error {
+ var err error
+ var count int64
+
+ // Closed issues
+ sess := issuesForActivityStatement(ctx, repoID, fromTime, true, false)
+ sess.OrderBy("issue.closed_unix DESC")
+ stats.ClosedIssues = make(issues_model.IssueList, 0)
+ if err = sess.Find(&stats.ClosedIssues); err != nil {
+ return err
+ }
+
+ // Closed issue authors
+ sess = issuesForActivityStatement(ctx, repoID, fromTime, true, false)
+ if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("issue").Get(&count); err != nil {
+ return err
+ }
+ stats.ClosedIssueAuthorCount = count
+
+ // New issues
+ sess = newlyCreatedIssues(ctx, repoID, fromTime)
+ sess.OrderBy("issue.created_unix ASC")
+ stats.OpenedIssues = make(issues_model.IssueList, 0)
+ if err = sess.Find(&stats.OpenedIssues); err != nil {
+ return err
+ }
+
+ // Active issues
+ sess = activeIssues(ctx, repoID, fromTime)
+ sess.OrderBy("issue.created_unix ASC")
+ stats.ActiveIssues = make(issues_model.IssueList, 0)
+ if err = sess.Find(&stats.ActiveIssues); err != nil {
+ return err
+ }
+
+ // Opened issue authors
+ sess = issuesForActivityStatement(ctx, repoID, fromTime, false, false)
+ if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("issue").Get(&count); err != nil {
+ return err
+ }
+ stats.OpenedIssueAuthorCount = count
+
+ return nil
+}
+
+// FillUnresolvedIssues returns unresolved issue and pull request information for activity page
+func (stats *ActivityStats) FillUnresolvedIssues(ctx context.Context, repoID int64, fromTime time.Time, issues, prs bool) error {
+ // Check if we need to select anything
+ if !issues && !prs {
+ return nil
+ }
+ sess := issuesForActivityStatement(ctx, repoID, fromTime, false, true)
+ if !issues || !prs {
+ sess.And("issue.is_pull = ?", prs)
+ }
+ sess.OrderBy("issue.updated_unix DESC")
+ stats.UnresolvedIssues = make(issues_model.IssueList, 0)
+ return sess.Find(&stats.UnresolvedIssues)
+}
+
+func newlyCreatedIssues(ctx context.Context, repoID int64, fromTime time.Time) *xorm.Session {
+ sess := db.GetEngine(ctx).Where("issue.repo_id = ?", repoID).
+ And("issue.is_pull = ?", false). // Retain the is_pull check to exclude pull requests
+ And("issue.created_unix >= ?", fromTime.Unix()) // Include all issues created after fromTime
+
+ return sess
+}
+
+func activeIssues(ctx context.Context, repoID int64, fromTime time.Time) *xorm.Session {
+ sess := db.GetEngine(ctx).Where("issue.repo_id = ?", repoID).
+ And("issue.is_pull = ?", false).
+ And("issue.created_unix >= ? OR issue.closed_unix >= ?", fromTime.Unix(), fromTime.Unix())
+
+ return sess
+}
+
+func issuesForActivityStatement(ctx context.Context, repoID int64, fromTime time.Time, closed, unresolved bool) *xorm.Session {
+ sess := db.GetEngine(ctx).Where("issue.repo_id = ?", repoID).
+ And("issue.is_closed = ?", closed)
+
+ if !unresolved {
+ sess.And("issue.is_pull = ?", false)
+ if closed {
+ sess.And("issue.closed_unix >= ?", fromTime.Unix())
+ } else {
+ sess.And("issue.created_unix >= ?", fromTime.Unix())
+ }
+ } else {
+ sess.And("issue.created_unix < ?", fromTime.Unix())
+ sess.And("issue.updated_unix >= ?", fromTime.Unix())
+ }
+
+ return sess
+}
+
+// FillReleases returns release information for activity page
+func (stats *ActivityStats) FillReleases(ctx context.Context, repoID int64, fromTime time.Time) error {
+ var err error
+ var count int64
+
+ // Published releases list
+ sess := releasesForActivityStatement(ctx, repoID, fromTime)
+ sess.OrderBy("`release`.created_unix DESC")
+ stats.PublishedReleases = make([]*repo_model.Release, 0)
+ if err = sess.Find(&stats.PublishedReleases); err != nil {
+ return err
+ }
+
+ // Published releases authors
+ sess = releasesForActivityStatement(ctx, repoID, fromTime)
+ if _, err = sess.Select("count(distinct `release`.publisher_id) as `count`").Table("release").Get(&count); err != nil {
+ return err
+ }
+ stats.PublishedReleaseAuthorCount = count
+
+ return nil
+}
+
+func releasesForActivityStatement(ctx context.Context, repoID int64, fromTime time.Time) *xorm.Session {
+ return db.GetEngine(ctx).Where("`release`.repo_id = ?", repoID).
+ And("`release`.is_draft = ?", false).
+ And("`release`.created_unix >= ?", fromTime.Unix())
+}
diff --git a/models/activities/repo_activity_test.go b/models/activities/repo_activity_test.go
new file mode 100644
index 0000000..06cd0e1
--- /dev/null
+++ b/models/activities/repo_activity_test.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetActivityStats(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ stats, err := GetActivityStats(db.DefaultContext, repo, time.Unix(0, 0), true, true, true, true)
+ require.NoError(t, err)
+
+ assert.EqualValues(t, 2, stats.ActiveIssueCount())
+ assert.EqualValues(t, 2, stats.OpenedIssueCount())
+ assert.EqualValues(t, 0, stats.ClosedIssueCount())
+ assert.EqualValues(t, 3, stats.ActivePRCount())
+}
diff --git a/models/activities/statistic.go b/models/activities/statistic.go
new file mode 100644
index 0000000..ff81ad7
--- /dev/null
+++ b/models/activities/statistic.go
@@ -0,0 +1,120 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ project_model "code.gitea.io/gitea/models/project"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/models/webhook"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// Statistic contains the database statistics
+type Statistic struct {
+ Counter struct {
+ User, Org, PublicKey,
+ Repo, Watch, Star, Access,
+ Issue, IssueClosed, IssueOpen,
+ Comment, Oauth, Follow,
+ Mirror, Release, AuthSource, Webhook,
+ Milestone, Label, HookTask,
+ Team, UpdateTask, Project,
+ ProjectColumn, Attachment,
+ Branches, Tags, CommitStatus int64
+ IssueByLabel []IssueByLabelCount
+ IssueByRepository []IssueByRepositoryCount
+ }
+}
+
+// IssueByLabelCount contains the number of issue group by label
+type IssueByLabelCount struct {
+ Count int64
+ Label string
+}
+
+// IssueByRepositoryCount contains the number of issue group by repository
+type IssueByRepositoryCount struct {
+ Count int64
+ OwnerName string
+ Repository string
+}
+
+// GetStatistic returns the database statistics
+func GetStatistic(ctx context.Context) (stats Statistic) {
+ e := db.GetEngine(ctx)
+ stats.Counter.User = user_model.CountUsers(ctx, nil)
+ stats.Counter.Org, _ = db.Count[organization.Organization](ctx, organization.FindOrgOptions{IncludePrivate: true})
+ stats.Counter.PublicKey, _ = e.Count(new(asymkey_model.PublicKey))
+ stats.Counter.Repo, _ = repo_model.CountRepositories(ctx, repo_model.CountRepositoryOptions{})
+ stats.Counter.Watch, _ = e.Count(new(repo_model.Watch))
+ stats.Counter.Star, _ = e.Count(new(repo_model.Star))
+ stats.Counter.Access, _ = e.Count(new(access_model.Access))
+ stats.Counter.Branches, _ = e.Count(new(git_model.Branch))
+ stats.Counter.Tags, _ = e.Where("is_draft=?", false).Count(new(repo_model.Release))
+ stats.Counter.CommitStatus, _ = e.Count(new(git_model.CommitStatus))
+
+ type IssueCount struct {
+ Count int64
+ IsClosed bool
+ }
+
+ if setting.Metrics.EnabledIssueByLabel {
+ stats.Counter.IssueByLabel = []IssueByLabelCount{}
+
+ _ = e.Select("COUNT(*) AS count, l.name AS label").
+ Join("LEFT", "label l", "l.id=il.label_id").
+ Table("issue_label il").
+ GroupBy("l.name").
+ Find(&stats.Counter.IssueByLabel)
+ }
+
+ if setting.Metrics.EnabledIssueByRepository {
+ stats.Counter.IssueByRepository = []IssueByRepositoryCount{}
+
+ _ = e.Select("COUNT(*) AS count, r.owner_name, r.name AS repository").
+ Join("LEFT", "repository r", "r.id=i.repo_id").
+ Table("issue i").
+ GroupBy("r.owner_name, r.name").
+ Find(&stats.Counter.IssueByRepository)
+ }
+
+ var issueCounts []IssueCount
+
+ _ = e.Select("COUNT(*) AS count, is_closed").Table("issue").GroupBy("is_closed").Find(&issueCounts)
+ for _, c := range issueCounts {
+ if c.IsClosed {
+ stats.Counter.IssueClosed = c.Count
+ } else {
+ stats.Counter.IssueOpen = c.Count
+ }
+ }
+
+ stats.Counter.Issue = stats.Counter.IssueClosed + stats.Counter.IssueOpen
+
+ stats.Counter.Comment, _ = e.Count(new(issues_model.Comment))
+ stats.Counter.Oauth = 0
+ stats.Counter.Follow, _ = e.Count(new(user_model.Follow))
+ stats.Counter.Mirror, _ = e.Count(new(repo_model.Mirror))
+ stats.Counter.Release, _ = e.Count(new(repo_model.Release))
+ stats.Counter.AuthSource, _ = db.Count[auth.Source](ctx, auth.FindSourcesOptions{})
+ stats.Counter.Webhook, _ = e.Count(new(webhook.Webhook))
+ stats.Counter.Milestone, _ = e.Count(new(issues_model.Milestone))
+ stats.Counter.Label, _ = e.Count(new(issues_model.Label))
+ stats.Counter.HookTask, _ = e.Count(new(webhook.HookTask))
+ stats.Counter.Team, _ = e.Count(new(organization.Team))
+ stats.Counter.Attachment, _ = e.Count(new(repo_model.Attachment))
+ stats.Counter.Project, _ = e.Count(new(project_model.Project))
+ stats.Counter.ProjectColumn, _ = e.Count(new(project_model.Column))
+ return stats
+}
diff --git a/models/activities/user_heatmap.go b/models/activities/user_heatmap.go
new file mode 100644
index 0000000..080075d
--- /dev/null
+++ b/models/activities/user_heatmap.go
@@ -0,0 +1,78 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// UserHeatmapData represents the data needed to create a heatmap
+type UserHeatmapData struct {
+ Timestamp timeutil.TimeStamp `json:"timestamp"`
+ Contributions int64 `json:"contributions"`
+}
+
+// GetUserHeatmapDataByUser returns an array of UserHeatmapData
+func GetUserHeatmapDataByUser(ctx context.Context, user, doer *user_model.User) ([]*UserHeatmapData, error) {
+ return getUserHeatmapData(ctx, user, nil, doer)
+}
+
+// GetUserHeatmapDataByUserTeam returns an array of UserHeatmapData
+func GetUserHeatmapDataByUserTeam(ctx context.Context, user *user_model.User, team *organization.Team, doer *user_model.User) ([]*UserHeatmapData, error) {
+ return getUserHeatmapData(ctx, user, team, doer)
+}
+
+func getUserHeatmapData(ctx context.Context, user *user_model.User, team *organization.Team, doer *user_model.User) ([]*UserHeatmapData, error) {
+ hdata := make([]*UserHeatmapData, 0)
+
+ if !ActivityReadable(user, doer) {
+ return hdata, nil
+ }
+
+ // Group by 15 minute intervals which will allow the client to accurately shift the timestamp to their timezone.
+ // The interval is based on the fact that there are timezones such as UTC +5:30 and UTC +12:45.
+ groupBy := "created_unix / 900 * 900"
+ if setting.Database.Type.IsMySQL() {
+ groupBy = "created_unix DIV 900 * 900"
+ }
+
+ cond, err := activityQueryCondition(ctx, GetFeedsOptions{
+ RequestedUser: user,
+ RequestedTeam: team,
+ Actor: doer,
+ IncludePrivate: true, // don't filter by private, as we already filter by repo access
+ IncludeDeleted: true,
+ // * Heatmaps for individual users only include actions that the user themself did.
+ // * For organizations actions by all users that were made in owned
+ // repositories are counted.
+ OnlyPerformedBy: !user.IsOrganization(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return hdata, db.GetEngine(ctx).
+ Select(groupBy+" AS timestamp, count(user_id) as contributions").
+ Table("action").
+ Where(cond).
+ And("created_unix > ?", timeutil.TimeStampNow()-31536000).
+ GroupBy("timestamp").
+ OrderBy("timestamp").
+ Find(&hdata)
+}
+
+// GetTotalContributionsInHeatmap returns the total number of contributions in a heatmap
+func GetTotalContributionsInHeatmap(hdata []*UserHeatmapData) int64 {
+ var total int64
+ for _, v := range hdata {
+ total += v.Contributions
+ }
+ return total
+}
diff --git a/models/activities/user_heatmap_test.go b/models/activities/user_heatmap_test.go
new file mode 100644
index 0000000..316ea7d
--- /dev/null
+++ b/models/activities/user_heatmap_test.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activities_test
+
+import (
+ "testing"
+ "time"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetUserHeatmapDataByUser(t *testing.T) {
+ testCases := []struct {
+ desc string
+ userID int64
+ doerID int64
+ CountResult int
+ JSONResult string
+ }{
+ {
+ "self looks at action in private repo",
+ 2, 2, 1, `[{"timestamp":1603227600,"contributions":1}]`,
+ },
+ {
+ "admin looks at action in private repo",
+ 2, 1, 1, `[{"timestamp":1603227600,"contributions":1}]`,
+ },
+ {
+ "other user looks at action in private repo",
+ 2, 3, 0, `[]`,
+ },
+ {
+ "nobody looks at action in private repo",
+ 2, 0, 0, `[]`,
+ },
+ {
+ "collaborator looks at action in private repo",
+ 16, 15, 1, `[{"timestamp":1603267200,"contributions":1}]`,
+ },
+ {
+ "no action action not performed by target user",
+ 3, 3, 0, `[]`,
+ },
+ {
+ "multiple actions performed with two grouped together",
+ 10, 10, 3, `[{"timestamp":1603009800,"contributions":1},{"timestamp":1603010700,"contributions":2}]`,
+ },
+ }
+ // Prepare
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Mock time
+ timeutil.MockSet(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC))
+ defer timeutil.MockUnset()
+
+ for _, tc := range testCases {
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: tc.userID})
+
+ doer := &user_model.User{ID: tc.doerID}
+ _, err := unittest.LoadBeanIfExists(doer)
+ require.NoError(t, err)
+ if tc.doerID == 0 {
+ doer = nil
+ }
+
+ // get the action for comparison
+ actions, count, err := activities_model.GetFeeds(db.DefaultContext, activities_model.GetFeedsOptions{
+ RequestedUser: user,
+ Actor: doer,
+ IncludePrivate: true,
+ OnlyPerformedBy: true,
+ IncludeDeleted: true,
+ })
+ require.NoError(t, err)
+
+ // Get the heatmap and compare
+ heatmap, err := activities_model.GetUserHeatmapDataByUser(db.DefaultContext, user, doer)
+ var contributions int
+ for _, hm := range heatmap {
+ contributions += int(hm.Contributions)
+ }
+ require.NoError(t, err)
+ assert.Len(t, actions, contributions, "invalid action count: did the test data became too old?")
+ assert.Equal(t, count, int64(contributions))
+ assert.Equal(t, tc.CountResult, contributions, tc.desc)
+
+ // Test JSON rendering
+ jsonData, err := json.Marshal(heatmap)
+ require.NoError(t, err)
+ assert.Equal(t, tc.JSONResult, string(jsonData))
+ }
+}
diff --git a/models/admin/task.go b/models/admin/task.go
new file mode 100644
index 0000000..c8bc95f
--- /dev/null
+++ b/models/admin/task.go
@@ -0,0 +1,232 @@
+// Copyright 2019 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/migration"
+ "code.gitea.io/gitea/modules/secret"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// Task represents a task
+type Task struct {
+ ID int64
+ DoerID int64 `xorm:"index"` // operator
+ Doer *user_model.User `xorm:"-"`
+ OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero
+ Owner *user_model.User `xorm:"-"`
+ RepoID int64 `xorm:"index"`
+ Repo *repo_model.Repository `xorm:"-"`
+ Type structs.TaskType
+ Status structs.TaskStatus `xorm:"index"`
+ StartTime timeutil.TimeStamp
+ EndTime timeutil.TimeStamp
+ PayloadContent string `xorm:"TEXT"`
+ Message string `xorm:"TEXT"` // if task failed, saved the error reason, it could be a JSON string of TranslatableMessage or a plain message
+ Created timeutil.TimeStamp `xorm:"created"`
+}
+
+func init() {
+ db.RegisterModel(new(Task))
+}
+
+// TranslatableMessage represents JSON struct that can be translated with a Locale
+type TranslatableMessage struct {
+ Format string
+ Args []any `json:"omitempty"`
+}
+
+// LoadRepo loads repository of the task
+func (task *Task) LoadRepo(ctx context.Context) error {
+ if task.Repo != nil {
+ return nil
+ }
+ var repo repo_model.Repository
+ has, err := db.GetEngine(ctx).ID(task.RepoID).Get(&repo)
+ if err != nil {
+ return err
+ } else if !has {
+ return repo_model.ErrRepoNotExist{
+ ID: task.RepoID,
+ }
+ }
+ task.Repo = &repo
+ return nil
+}
+
+// LoadDoer loads do user
+func (task *Task) LoadDoer(ctx context.Context) error {
+ if task.Doer != nil {
+ return nil
+ }
+
+ var doer user_model.User
+ has, err := db.GetEngine(ctx).ID(task.DoerID).Get(&doer)
+ if err != nil {
+ return err
+ } else if !has {
+ return user_model.ErrUserNotExist{
+ UID: task.DoerID,
+ }
+ }
+ task.Doer = &doer
+
+ return nil
+}
+
+// LoadOwner loads owner user
+func (task *Task) LoadOwner(ctx context.Context) error {
+ if task.Owner != nil {
+ return nil
+ }
+
+ var owner user_model.User
+ has, err := db.GetEngine(ctx).ID(task.OwnerID).Get(&owner)
+ if err != nil {
+ return err
+ } else if !has {
+ return user_model.ErrUserNotExist{
+ UID: task.OwnerID,
+ }
+ }
+ task.Owner = &owner
+
+ return nil
+}
+
+// UpdateCols updates some columns
+func (task *Task) UpdateCols(ctx context.Context, cols ...string) error {
+ _, err := db.GetEngine(ctx).ID(task.ID).Cols(cols...).Update(task)
+ return err
+}
+
+// MigrateConfig returns task config when migrate repository
+func (task *Task) MigrateConfig() (*migration.MigrateOptions, error) {
+ if task.Type == structs.TaskTypeMigrateRepo {
+ var opts migration.MigrateOptions
+ err := json.Unmarshal([]byte(task.PayloadContent), &opts)
+ if err != nil {
+ return nil, err
+ }
+
+ // decrypt credentials
+ if opts.CloneAddrEncrypted != "" {
+ if opts.CloneAddr, err = secret.DecryptSecret(setting.SecretKey, opts.CloneAddrEncrypted); err != nil {
+ return nil, err
+ }
+ }
+ if opts.AuthPasswordEncrypted != "" {
+ if opts.AuthPassword, err = secret.DecryptSecret(setting.SecretKey, opts.AuthPasswordEncrypted); err != nil {
+ return nil, err
+ }
+ }
+ if opts.AuthTokenEncrypted != "" {
+ if opts.AuthToken, err = secret.DecryptSecret(setting.SecretKey, opts.AuthTokenEncrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ return &opts, nil
+ }
+ return nil, fmt.Errorf("Task type is %s, not Migrate Repo", task.Type.Name())
+}
+
+// ErrTaskDoesNotExist represents a "TaskDoesNotExist" kind of error.
+type ErrTaskDoesNotExist struct {
+ ID int64
+ RepoID int64
+ Type structs.TaskType
+}
+
+// IsErrTaskDoesNotExist checks if an error is a ErrTaskDoesNotExist.
+func IsErrTaskDoesNotExist(err error) bool {
+ _, ok := err.(ErrTaskDoesNotExist)
+ return ok
+}
+
+func (err ErrTaskDoesNotExist) Error() string {
+ return fmt.Sprintf("task does not exist [id: %d, repo_id: %d, type: %d]",
+ err.ID, err.RepoID, err.Type)
+}
+
+func (err ErrTaskDoesNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// GetMigratingTask returns the migrating task by repo's id
+func GetMigratingTask(ctx context.Context, repoID int64) (*Task, error) {
+ task := Task{
+ RepoID: repoID,
+ Type: structs.TaskTypeMigrateRepo,
+ }
+ has, err := db.GetEngine(ctx).Get(&task)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrTaskDoesNotExist{0, repoID, task.Type}
+ }
+ return &task, nil
+}
+
+// GetMigratingTaskByID returns the migrating task by repo's id
+func GetMigratingTaskByID(ctx context.Context, id, doerID int64) (*Task, *migration.MigrateOptions, error) {
+ task := Task{
+ ID: id,
+ DoerID: doerID,
+ Type: structs.TaskTypeMigrateRepo,
+ }
+ has, err := db.GetEngine(ctx).Get(&task)
+ if err != nil {
+ return nil, nil, err
+ } else if !has {
+ return nil, nil, ErrTaskDoesNotExist{id, 0, task.Type}
+ }
+
+ var opts migration.MigrateOptions
+ if err := json.Unmarshal([]byte(task.PayloadContent), &opts); err != nil {
+ return nil, nil, err
+ }
+ return &task, &opts, nil
+}
+
+// CreateTask creates a task on database
+func CreateTask(ctx context.Context, task *Task) error {
+ return db.Insert(ctx, task)
+}
+
+// FinishMigrateTask updates database when migrate task finished
+func FinishMigrateTask(ctx context.Context, task *Task) error {
+ task.Status = structs.TaskStatusFinished
+ task.EndTime = timeutil.TimeStampNow()
+
+ // delete credentials when we're done, they're a liability.
+ conf, err := task.MigrateConfig()
+ if err != nil {
+ return err
+ }
+ conf.AuthPassword = ""
+ conf.AuthToken = ""
+ conf.CloneAddr = util.SanitizeCredentialURLs(conf.CloneAddr)
+ conf.AuthPasswordEncrypted = ""
+ conf.AuthTokenEncrypted = ""
+ conf.CloneAddrEncrypted = ""
+ confBytes, err := json.Marshal(conf)
+ if err != nil {
+ return err
+ }
+ task.PayloadContent = string(confBytes)
+
+ _, err = db.GetEngine(ctx).ID(task.ID).Cols("status", "end_time", "payload_content").Update(task)
+ return err
+}
diff --git a/models/asymkey/error.go b/models/asymkey/error.go
new file mode 100644
index 0000000..03bc823
--- /dev/null
+++ b/models/asymkey/error.go
@@ -0,0 +1,318 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrKeyUnableVerify represents a "KeyUnableVerify" kind of error.
+type ErrKeyUnableVerify struct {
+ Result string
+}
+
+// IsErrKeyUnableVerify checks if an error is a ErrKeyUnableVerify.
+func IsErrKeyUnableVerify(err error) bool {
+ _, ok := err.(ErrKeyUnableVerify)
+ return ok
+}
+
+func (err ErrKeyUnableVerify) Error() string {
+ return fmt.Sprintf("Unable to verify key content [result: %s]", err.Result)
+}
+
+// ErrKeyIsPrivate is returned when the provided key is a private key not a public key
+var ErrKeyIsPrivate = util.NewSilentWrapErrorf(util.ErrInvalidArgument, "the provided key is a private key")
+
+// ErrKeyNotExist represents a "KeyNotExist" kind of error.
+type ErrKeyNotExist struct {
+ ID int64
+}
+
+// IsErrKeyNotExist checks if an error is a ErrKeyNotExist.
+func IsErrKeyNotExist(err error) bool {
+ _, ok := err.(ErrKeyNotExist)
+ return ok
+}
+
+func (err ErrKeyNotExist) Error() string {
+ return fmt.Sprintf("public key does not exist [id: %d]", err.ID)
+}
+
+func (err ErrKeyNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrKeyAlreadyExist represents a "KeyAlreadyExist" kind of error.
+type ErrKeyAlreadyExist struct {
+ OwnerID int64
+ Fingerprint string
+ Content string
+}
+
+// IsErrKeyAlreadyExist checks if an error is a ErrKeyAlreadyExist.
+func IsErrKeyAlreadyExist(err error) bool {
+ _, ok := err.(ErrKeyAlreadyExist)
+ return ok
+}
+
+func (err ErrKeyAlreadyExist) Error() string {
+ return fmt.Sprintf("public key already exists [owner_id: %d, finger_print: %s, content: %s]",
+ err.OwnerID, err.Fingerprint, err.Content)
+}
+
+func (err ErrKeyAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrKeyNameAlreadyUsed represents a "KeyNameAlreadyUsed" kind of error.
+type ErrKeyNameAlreadyUsed struct {
+ OwnerID int64
+ Name string
+}
+
+// IsErrKeyNameAlreadyUsed checks if an error is a ErrKeyNameAlreadyUsed.
+func IsErrKeyNameAlreadyUsed(err error) bool {
+ _, ok := err.(ErrKeyNameAlreadyUsed)
+ return ok
+}
+
+func (err ErrKeyNameAlreadyUsed) Error() string {
+ return fmt.Sprintf("public key already exists [owner_id: %d, name: %s]", err.OwnerID, err.Name)
+}
+
+func (err ErrKeyNameAlreadyUsed) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrGPGNoEmailFound represents a "ErrGPGNoEmailFound" kind of error.
+type ErrGPGNoEmailFound struct {
+ FailedEmails []string
+ ID string
+}
+
+// IsErrGPGNoEmailFound checks if an error is a ErrGPGNoEmailFound.
+func IsErrGPGNoEmailFound(err error) bool {
+ _, ok := err.(ErrGPGNoEmailFound)
+ return ok
+}
+
+func (err ErrGPGNoEmailFound) Error() string {
+ return fmt.Sprintf("none of the emails attached to the GPG key could be found: %v", err.FailedEmails)
+}
+
+// ErrGPGInvalidTokenSignature represents a "ErrGPGInvalidTokenSignature" kind of error.
+type ErrGPGInvalidTokenSignature struct {
+ Wrapped error
+ ID string
+}
+
+// IsErrGPGInvalidTokenSignature checks if an error is a ErrGPGInvalidTokenSignature.
+func IsErrGPGInvalidTokenSignature(err error) bool {
+ _, ok := err.(ErrGPGInvalidTokenSignature)
+ return ok
+}
+
+func (err ErrGPGInvalidTokenSignature) Error() string {
+ return "the provided signature does not sign the token with the provided key"
+}
+
+// ErrGPGKeyParsing represents a "ErrGPGKeyParsing" kind of error.
+type ErrGPGKeyParsing struct {
+ ParseError error
+}
+
+// IsErrGPGKeyParsing checks if an error is a ErrGPGKeyParsing.
+func IsErrGPGKeyParsing(err error) bool {
+ _, ok := err.(ErrGPGKeyParsing)
+ return ok
+}
+
+func (err ErrGPGKeyParsing) Error() string {
+ return fmt.Sprintf("failed to parse gpg key %s", err.ParseError.Error())
+}
+
+// ErrGPGKeyNotExist represents a "GPGKeyNotExist" kind of error.
+type ErrGPGKeyNotExist struct {
+ ID int64
+}
+
+// IsErrGPGKeyNotExist checks if an error is a ErrGPGKeyNotExist.
+func IsErrGPGKeyNotExist(err error) bool {
+ _, ok := err.(ErrGPGKeyNotExist)
+ return ok
+}
+
+func (err ErrGPGKeyNotExist) Error() string {
+ return fmt.Sprintf("public gpg key does not exist [id: %d]", err.ID)
+}
+
+func (err ErrGPGKeyNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrGPGKeyImportNotExist represents a "GPGKeyImportNotExist" kind of error.
+type ErrGPGKeyImportNotExist struct {
+ ID string
+}
+
+// IsErrGPGKeyImportNotExist checks if an error is a ErrGPGKeyImportNotExist.
+func IsErrGPGKeyImportNotExist(err error) bool {
+ _, ok := err.(ErrGPGKeyImportNotExist)
+ return ok
+}
+
+func (err ErrGPGKeyImportNotExist) Error() string {
+ return fmt.Sprintf("public gpg key import does not exist [id: %s]", err.ID)
+}
+
+func (err ErrGPGKeyImportNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrGPGKeyIDAlreadyUsed represents a "GPGKeyIDAlreadyUsed" kind of error.
+type ErrGPGKeyIDAlreadyUsed struct {
+ KeyID string
+}
+
+// IsErrGPGKeyIDAlreadyUsed checks if an error is a ErrKeyNameAlreadyUsed.
+func IsErrGPGKeyIDAlreadyUsed(err error) bool {
+ _, ok := err.(ErrGPGKeyIDAlreadyUsed)
+ return ok
+}
+
+func (err ErrGPGKeyIDAlreadyUsed) Error() string {
+ return fmt.Sprintf("public key already exists [key_id: %s]", err.KeyID)
+}
+
+func (err ErrGPGKeyIDAlreadyUsed) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrGPGKeyAccessDenied represents a "GPGKeyAccessDenied" kind of Error.
+type ErrGPGKeyAccessDenied struct {
+ UserID int64
+ KeyID int64
+}
+
+// IsErrGPGKeyAccessDenied checks if an error is a ErrGPGKeyAccessDenied.
+func IsErrGPGKeyAccessDenied(err error) bool {
+ _, ok := err.(ErrGPGKeyAccessDenied)
+ return ok
+}
+
+// Error pretty-prints an error of type ErrGPGKeyAccessDenied.
+func (err ErrGPGKeyAccessDenied) Error() string {
+ return fmt.Sprintf("user does not have access to the key [user_id: %d, key_id: %d]",
+ err.UserID, err.KeyID)
+}
+
+func (err ErrGPGKeyAccessDenied) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrKeyAccessDenied represents a "KeyAccessDenied" kind of error.
+type ErrKeyAccessDenied struct {
+ UserID int64
+ KeyID int64
+ Note string
+}
+
+// IsErrKeyAccessDenied checks if an error is a ErrKeyAccessDenied.
+func IsErrKeyAccessDenied(err error) bool {
+ _, ok := err.(ErrKeyAccessDenied)
+ return ok
+}
+
+func (err ErrKeyAccessDenied) Error() string {
+ return fmt.Sprintf("user does not have access to the key [user_id: %d, key_id: %d, note: %s]",
+ err.UserID, err.KeyID, err.Note)
+}
+
+func (err ErrKeyAccessDenied) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrDeployKeyNotExist represents a "DeployKeyNotExist" kind of error.
+type ErrDeployKeyNotExist struct {
+ ID int64
+ KeyID int64
+ RepoID int64
+}
+
+// IsErrDeployKeyNotExist checks if an error is a ErrDeployKeyNotExist.
+func IsErrDeployKeyNotExist(err error) bool {
+ _, ok := err.(ErrDeployKeyNotExist)
+ return ok
+}
+
+func (err ErrDeployKeyNotExist) Error() string {
+ return fmt.Sprintf("Deploy key does not exist [id: %d, key_id: %d, repo_id: %d]", err.ID, err.KeyID, err.RepoID)
+}
+
+func (err ErrDeployKeyNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrDeployKeyAlreadyExist represents a "DeployKeyAlreadyExist" kind of error.
+type ErrDeployKeyAlreadyExist struct {
+ KeyID int64
+ RepoID int64
+}
+
+// IsErrDeployKeyAlreadyExist checks if an error is a ErrDeployKeyAlreadyExist.
+func IsErrDeployKeyAlreadyExist(err error) bool {
+ _, ok := err.(ErrDeployKeyAlreadyExist)
+ return ok
+}
+
+func (err ErrDeployKeyAlreadyExist) Error() string {
+ return fmt.Sprintf("public key already exists [key_id: %d, repo_id: %d]", err.KeyID, err.RepoID)
+}
+
+func (err ErrDeployKeyAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrDeployKeyNameAlreadyUsed represents a "DeployKeyNameAlreadyUsed" kind of error.
+type ErrDeployKeyNameAlreadyUsed struct {
+ RepoID int64
+ Name string
+}
+
+// IsErrDeployKeyNameAlreadyUsed checks if an error is a ErrDeployKeyNameAlreadyUsed.
+func IsErrDeployKeyNameAlreadyUsed(err error) bool {
+ _, ok := err.(ErrDeployKeyNameAlreadyUsed)
+ return ok
+}
+
+func (err ErrDeployKeyNameAlreadyUsed) Error() string {
+ return fmt.Sprintf("public key with name already exists [repo_id: %d, name: %s]", err.RepoID, err.Name)
+}
+
+func (err ErrDeployKeyNameAlreadyUsed) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrSSHInvalidTokenSignature represents a "ErrSSHInvalidTokenSignature" kind of error.
+type ErrSSHInvalidTokenSignature struct {
+ Wrapped error
+ Fingerprint string
+}
+
+// IsErrSSHInvalidTokenSignature checks if an error is a ErrSSHInvalidTokenSignature.
+func IsErrSSHInvalidTokenSignature(err error) bool {
+ _, ok := err.(ErrSSHInvalidTokenSignature)
+ return ok
+}
+
+func (err ErrSSHInvalidTokenSignature) Error() string {
+ return "the provided signature does not sign the token with the provided key"
+}
+
+func (err ErrSSHInvalidTokenSignature) Unwrap() error {
+ return util.ErrInvalidArgument
+}
diff --git a/models/asymkey/gpg_key.go b/models/asymkey/gpg_key.go
new file mode 100644
index 0000000..6e2914e
--- /dev/null
+++ b/models/asymkey/gpg_key.go
@@ -0,0 +1,273 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+ "xorm.io/builder"
+)
+
+// GPGKey represents a GPG key.
+type GPGKey struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"INDEX NOT NULL"`
+ KeyID string `xorm:"INDEX CHAR(16) NOT NULL"`
+ PrimaryKeyID string `xorm:"CHAR(16)"`
+ Content string `xorm:"MEDIUMTEXT NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ ExpiredUnix timeutil.TimeStamp
+ AddedUnix timeutil.TimeStamp
+ SubsKey []*GPGKey `xorm:"-"`
+ Emails []*user_model.EmailAddress
+ Verified bool `xorm:"NOT NULL DEFAULT false"`
+ CanSign bool
+ CanEncryptComms bool
+ CanEncryptStorage bool
+ CanCertify bool
+}
+
+func init() {
+ db.RegisterModel(new(GPGKey))
+}
+
+// BeforeInsert will be invoked by XORM before inserting a record
+func (key *GPGKey) BeforeInsert() {
+ key.AddedUnix = timeutil.TimeStampNow()
+}
+
+func (key *GPGKey) LoadSubKeys(ctx context.Context) error {
+ if err := db.GetEngine(ctx).Where("primary_key_id=?", key.KeyID).Find(&key.SubsKey); err != nil {
+ return fmt.Errorf("find Sub GPGkeys[%s]: %v", key.KeyID, err)
+ }
+ return nil
+}
+
+// PaddedKeyID show KeyID padded to 16 characters
+func (key *GPGKey) PaddedKeyID() string {
+ return PaddedKeyID(key.KeyID)
+}
+
+// PaddedKeyID show KeyID padded to 16 characters
+func PaddedKeyID(keyID string) string {
+ if len(keyID) > 15 {
+ return keyID
+ }
+ zeros := "0000000000000000"
+ return zeros[0:16-len(keyID)] + keyID
+}
+
+type FindGPGKeyOptions struct {
+ db.ListOptions
+ OwnerID int64
+ KeyID string
+ IncludeSubKeys bool
+}
+
+func (opts FindGPGKeyOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if !opts.IncludeSubKeys {
+ cond = cond.And(builder.Eq{"primary_key_id": ""})
+ }
+
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+ if opts.KeyID != "" {
+ cond = cond.And(builder.Eq{"key_id": opts.KeyID})
+ }
+ return cond
+}
+
+func GetGPGKeyForUserByID(ctx context.Context, ownerID, keyID int64) (*GPGKey, error) {
+ key := new(GPGKey)
+ has, err := db.GetEngine(ctx).Where("id=? AND owner_id=?", keyID, ownerID).Get(key)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrGPGKeyNotExist{keyID}
+ }
+ return key, nil
+}
+
+// GPGKeyToEntity retrieve the imported key and the traducted entity
+func GPGKeyToEntity(ctx context.Context, k *GPGKey) (*openpgp.Entity, error) {
+ impKey, err := GetGPGImportByKeyID(ctx, k.KeyID)
+ if err != nil {
+ return nil, err
+ }
+ keys, err := checkArmoredGPGKeyString(impKey.Content)
+ if err != nil {
+ return nil, err
+ }
+ return keys[0], err
+}
+
+// parseSubGPGKey parse a sub Key
+func parseSubGPGKey(ownerID int64, primaryID string, pubkey *packet.PublicKey, expiry time.Time) (*GPGKey, error) {
+ content, err := base64EncPubKey(pubkey)
+ if err != nil {
+ return nil, err
+ }
+ return &GPGKey{
+ OwnerID: ownerID,
+ KeyID: pubkey.KeyIdString(),
+ PrimaryKeyID: primaryID,
+ Content: content,
+ CreatedUnix: timeutil.TimeStamp(pubkey.CreationTime.Unix()),
+ ExpiredUnix: timeutil.TimeStamp(expiry.Unix()),
+ CanSign: pubkey.CanSign(),
+ CanEncryptComms: pubkey.PubKeyAlgo.CanEncrypt(),
+ CanEncryptStorage: pubkey.PubKeyAlgo.CanEncrypt(),
+ CanCertify: pubkey.PubKeyAlgo.CanSign(),
+ }, nil
+}
+
+// parseGPGKey parse a PrimaryKey entity (primary key + subs keys + self-signature)
+func parseGPGKey(ctx context.Context, ownerID int64, e *openpgp.Entity, verified bool) (*GPGKey, error) {
+ pubkey := e.PrimaryKey
+ expiry := getExpiryTime(e)
+
+ // Parse Subkeys
+ subkeys := make([]*GPGKey, len(e.Subkeys))
+ for i, k := range e.Subkeys {
+ subKeyExpiry := expiry
+ if k.Sig.KeyLifetimeSecs != nil {
+ subKeyExpiry = k.PublicKey.CreationTime.Add(time.Duration(*k.Sig.KeyLifetimeSecs) * time.Second)
+ }
+
+ subs, err := parseSubGPGKey(ownerID, pubkey.KeyIdString(), k.PublicKey, subKeyExpiry)
+ if err != nil {
+ return nil, ErrGPGKeyParsing{ParseError: err}
+ }
+ subkeys[i] = subs
+ }
+
+ // Check emails
+ userEmails, err := user_model.GetEmailAddresses(ctx, ownerID)
+ if err != nil {
+ return nil, err
+ }
+
+ emails := make([]*user_model.EmailAddress, 0, len(e.Identities))
+ for _, ident := range e.Identities {
+ // Check if the identity is revoked.
+ if ident.Revoked(time.Now()) {
+ continue
+ }
+ email := strings.ToLower(strings.TrimSpace(ident.UserId.Email))
+ for _, e := range userEmails {
+ if e.IsActivated && e.LowerEmail == email {
+ emails = append(emails, e)
+ break
+ }
+ }
+ }
+
+ if !verified {
+ // In the case no email as been found
+ if len(emails) == 0 {
+ failedEmails := make([]string, 0, len(e.Identities))
+ for _, ident := range e.Identities {
+ failedEmails = append(failedEmails, ident.UserId.Email)
+ }
+ return nil, ErrGPGNoEmailFound{failedEmails, e.PrimaryKey.KeyIdString()}
+ }
+ }
+
+ content, err := base64EncPubKey(pubkey)
+ if err != nil {
+ return nil, err
+ }
+ return &GPGKey{
+ OwnerID: ownerID,
+ KeyID: pubkey.KeyIdString(),
+ PrimaryKeyID: "",
+ Content: content,
+ CreatedUnix: timeutil.TimeStamp(pubkey.CreationTime.Unix()),
+ ExpiredUnix: timeutil.TimeStamp(expiry.Unix()),
+ Emails: emails,
+ SubsKey: subkeys,
+ Verified: verified,
+ CanSign: pubkey.CanSign(),
+ CanEncryptComms: pubkey.PubKeyAlgo.CanEncrypt(),
+ CanEncryptStorage: pubkey.PubKeyAlgo.CanEncrypt(),
+ CanCertify: pubkey.PubKeyAlgo.CanSign(),
+ }, nil
+}
+
+// deleteGPGKey does the actual key deletion
+func deleteGPGKey(ctx context.Context, keyID string) (int64, error) {
+ if keyID == "" {
+ return 0, fmt.Errorf("empty KeyId forbidden") // Should never happen but just to be sure
+ }
+ // Delete imported key
+ n, err := db.GetEngine(ctx).Where("key_id=?", keyID).Delete(new(GPGKeyImport))
+ if err != nil {
+ return n, err
+ }
+ return db.GetEngine(ctx).Where("key_id=?", keyID).Or("primary_key_id=?", keyID).Delete(new(GPGKey))
+}
+
+// DeleteGPGKey deletes GPG key information in database.
+func DeleteGPGKey(ctx context.Context, doer *user_model.User, id int64) (err error) {
+ key, err := GetGPGKeyForUserByID(ctx, doer.ID, id)
+ if err != nil {
+ if IsErrGPGKeyNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetPublicKeyByID: %w", err)
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err = deleteGPGKey(ctx, key.KeyID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func checkKeyEmails(ctx context.Context, email string, keys ...*GPGKey) (bool, string) {
+ uid := int64(0)
+ var userEmails []*user_model.EmailAddress
+ var user *user_model.User
+ for _, key := range keys {
+ for _, e := range key.Emails {
+ if e.IsActivated && (email == "" || strings.EqualFold(e.Email, email)) {
+ return true, e.Email
+ }
+ }
+ if key.Verified && key.OwnerID != 0 {
+ if uid != key.OwnerID {
+ userEmails, _ = user_model.GetEmailAddresses(ctx, key.OwnerID)
+ uid = key.OwnerID
+ user = &user_model.User{ID: uid}
+ _, _ = user_model.GetUser(ctx, user)
+ }
+ for _, e := range userEmails {
+ if e.IsActivated && (email == "" || strings.EqualFold(e.Email, email)) {
+ return true, e.Email
+ }
+ }
+ if user.KeepEmailPrivate && strings.EqualFold(email, user.GetEmail()) {
+ return true, user.GetEmail()
+ }
+ }
+ }
+ return false, email
+}
diff --git a/models/asymkey/gpg_key_add.go b/models/asymkey/gpg_key_add.go
new file mode 100644
index 0000000..6c0f6e0
--- /dev/null
+++ b/models/asymkey/gpg_key_add.go
@@ -0,0 +1,167 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+)
+
+// __________________ ________ ____ __.
+// / _____/\______ \/ _____/ | |/ _|____ ___.__.
+// / \ ___ | ___/ \ ___ | <_/ __ < | |
+// \ \_\ \| | \ \_\ \ | | \ ___/\___ |
+// \______ /|____| \______ / |____|__ \___ > ____|
+// \/ \/ \/ \/\/
+// _____ .___ .___
+// / _ \ __| _/__| _/
+// / /_\ \ / __ |/ __ |
+// / | \/ /_/ / /_/ |
+// \____|__ /\____ \____ |
+// \/ \/ \/
+
+// This file contains functions relating to adding GPG Keys
+
+// addGPGKey add key, import and subkeys to database
+func addGPGKey(ctx context.Context, key *GPGKey, content string) (err error) {
+ // Add GPGKeyImport
+ if err = db.Insert(ctx, &GPGKeyImport{
+ KeyID: key.KeyID,
+ Content: content,
+ }); err != nil {
+ return err
+ }
+ // Save GPG primary key.
+ if err = db.Insert(ctx, key); err != nil {
+ return err
+ }
+ // Save GPG subs key.
+ for _, subkey := range key.SubsKey {
+ if err := addGPGSubKey(ctx, subkey); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// addGPGSubKey add subkeys to database
+func addGPGSubKey(ctx context.Context, key *GPGKey) (err error) {
+ // Save GPG primary key.
+ if err = db.Insert(ctx, key); err != nil {
+ return err
+ }
+ // Save GPG subs key.
+ for _, subkey := range key.SubsKey {
+ if err := addGPGSubKey(ctx, subkey); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddGPGKey adds new public key to database.
+func AddGPGKey(ctx context.Context, ownerID int64, content, token, signature string) ([]*GPGKey, error) {
+ ekeys, err := checkArmoredGPGKeyString(content)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ keys := make([]*GPGKey, 0, len(ekeys))
+
+ verified := false
+ // Handle provided signature
+ if signature != "" {
+ signer, err := openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token), strings.NewReader(signature), nil)
+ if err != nil {
+ signer, err = openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token+"\n"), strings.NewReader(signature), nil)
+ }
+ if err != nil {
+ signer, err = openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token+"\r\n"), strings.NewReader(signature), nil)
+ }
+ if err != nil {
+ log.Error("Unable to validate token signature. Error: %v", err)
+ return nil, ErrGPGInvalidTokenSignature{
+ ID: ekeys[0].PrimaryKey.KeyIdString(),
+ Wrapped: err,
+ }
+ }
+ ekeys = []*openpgp.Entity{signer}
+ verified = true
+ }
+
+ if len(ekeys) > 1 {
+ id2key := map[string]*openpgp.Entity{}
+ newEKeys := make([]*openpgp.Entity, 0, len(ekeys))
+ for _, ekey := range ekeys {
+ id := ekey.PrimaryKey.KeyIdString()
+ if original, has := id2key[id]; has {
+ // Coalesce this with the other one
+ for _, subkey := range ekey.Subkeys {
+ if subkey.PublicKey == nil {
+ continue
+ }
+ found := false
+
+ for _, originalSubkey := range original.Subkeys {
+ if originalSubkey.PublicKey == nil {
+ continue
+ }
+ if originalSubkey.PublicKey.KeyId == subkey.PublicKey.KeyId {
+ found = true
+ break
+ }
+ }
+ if !found {
+ original.Subkeys = append(original.Subkeys, subkey)
+ }
+ }
+ for name, identity := range ekey.Identities {
+ if _, has := original.Identities[name]; has {
+ continue
+ }
+ original.Identities[name] = identity
+ }
+ continue
+ }
+ id2key[id] = ekey
+ newEKeys = append(newEKeys, ekey)
+ }
+ ekeys = newEKeys
+ }
+
+ for _, ekey := range ekeys {
+ // Key ID cannot be duplicated.
+ has, err := db.GetEngine(ctx).Where("key_id=?", ekey.PrimaryKey.KeyIdString()).
+ Get(new(GPGKey))
+ if err != nil {
+ return nil, err
+ } else if has {
+ return nil, ErrGPGKeyIDAlreadyUsed{ekey.PrimaryKey.KeyIdString()}
+ }
+
+ // Get DB session
+
+ key, err := parseGPGKey(ctx, ownerID, ekey, verified)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = addGPGKey(ctx, key, content); err != nil {
+ return nil, err
+ }
+ keys = append(keys, key)
+ }
+ return keys, committer.Commit()
+}
diff --git a/models/asymkey/gpg_key_commit_verification.go b/models/asymkey/gpg_key_commit_verification.go
new file mode 100644
index 0000000..9aa6064
--- /dev/null
+++ b/models/asymkey/gpg_key_commit_verification.go
@@ -0,0 +1,63 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+)
+
+// __________________ ________ ____ __.
+// / _____/\______ \/ _____/ | |/ _|____ ___.__.
+// / \ ___ | ___/ \ ___ | <_/ __ < | |
+// \ \_\ \| | \ \_\ \ | | \ ___/\___ |
+// \______ /|____| \______ / |____|__ \___ > ____|
+// \/ \/ \/ \/\/
+// _________ .__ __
+// \_ ___ \ ____ _____ _____ |__|/ |_
+// / \ \/ / _ \ / \ / \| \ __\
+// \ \___( <_> ) Y Y \ Y Y \ || |
+// \______ /\____/|__|_| /__|_| /__||__|
+// \/ \/ \/
+// ____ ____ .__ _____.__ __ .__
+// \ \ / /___________|__|/ ____\__| ____ _____ _/ |_|__| ____ ____
+// \ Y // __ \_ __ \ \ __\| |/ ___\\__ \\ __\ |/ _ \ / \
+// \ /\ ___/| | \/ || | | \ \___ / __ \| | | ( <_> ) | \
+// \___/ \___ >__| |__||__| |__|\___ >____ /__| |__|\____/|___| /
+// \/ \/ \/ \/
+
+// This file provides functions relating commit verification
+
+// SignCommit represents a commit with validation of signature.
+type SignCommit struct {
+ Verification *ObjectVerification
+ *user_model.UserCommit
+}
+
+// ParseCommitsWithSignature checks if signaute of commits are corresponding to users gpg keys.
+func ParseCommitsWithSignature(ctx context.Context, oldCommits []*user_model.UserCommit, repoTrustModel repo_model.TrustModelType, isOwnerMemberCollaborator func(*user_model.User) (bool, error)) []*SignCommit {
+ newCommits := make([]*SignCommit, 0, len(oldCommits))
+ keyMap := map[string]bool{}
+
+ for _, c := range oldCommits {
+ o := commitToGitObject(c.Commit)
+ signCommit := &SignCommit{
+ UserCommit: c,
+ Verification: ParseObjectWithSignature(ctx, &o),
+ }
+
+ _ = CalculateTrustStatus(signCommit.Verification, repoTrustModel, isOwnerMemberCollaborator, &keyMap)
+
+ newCommits = append(newCommits, signCommit)
+ }
+ return newCommits
+}
+
+func ParseCommitWithSignature(ctx context.Context, c *git.Commit) *ObjectVerification {
+ o := commitToGitObject(c)
+ return ParseObjectWithSignature(ctx, &o)
+}
diff --git a/models/asymkey/gpg_key_common.go b/models/asymkey/gpg_key_common.go
new file mode 100644
index 0000000..db1912c
--- /dev/null
+++ b/models/asymkey/gpg_key_common.go
@@ -0,0 +1,146 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/base64"
+ "fmt"
+ "hash"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+)
+
+// __________________ ________ ____ __.
+// / _____/\______ \/ _____/ | |/ _|____ ___.__.
+// / \ ___ | ___/ \ ___ | <_/ __ < | |
+// \ \_\ \| | \ \_\ \ | | \ ___/\___ |
+// \______ /|____| \______ / |____|__ \___ > ____|
+// \/ \/ \/ \/\/
+// _________
+// \_ ___ \ ____ _____ _____ ____ ____
+// / \ \/ / _ \ / \ / \ / _ \ / \
+// \ \___( <_> ) Y Y \ Y Y ( <_> ) | \
+// \______ /\____/|__|_| /__|_| /\____/|___| /
+// \/ \/ \/ \/
+
+// This file provides common functions relating to GPG Keys
+
+// checkArmoredGPGKeyString checks if the given key string is a valid GPG armored key.
+// The function returns the actual public key on success
+func checkArmoredGPGKeyString(content string) (openpgp.EntityList, error) {
+ list, err := openpgp.ReadArmoredKeyRing(strings.NewReader(content))
+ if err != nil {
+ return nil, ErrGPGKeyParsing{err}
+ }
+ return list, nil
+}
+
+// base64EncPubKey encode public key content to base 64
+func base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
+ var w bytes.Buffer
+ err := pubkey.Serialize(&w)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(w.Bytes()), nil
+}
+
+func readerFromBase64(s string) (io.Reader, error) {
+ bs, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewBuffer(bs), nil
+}
+
+// base64DecPubKey decode public key content from base 64
+func base64DecPubKey(content string) (*packet.PublicKey, error) {
+ b, err := readerFromBase64(content)
+ if err != nil {
+ return nil, err
+ }
+ // Read key
+ p, err := packet.Read(b)
+ if err != nil {
+ return nil, err
+ }
+ // Check type
+ pkey, ok := p.(*packet.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("key is not a public key")
+ }
+ return pkey, nil
+}
+
+// getExpiryTime extract the expire time of primary key based on sig
+func getExpiryTime(e *openpgp.Entity) time.Time {
+ expiry := time.Time{}
+ // Extract self-sign for expire date based on : https://github.com/golang/crypto/blob/master/openpgp/keys.go#L165
+ var selfSig *packet.Signature
+ for _, ident := range e.Identities {
+ if selfSig == nil {
+ selfSig = ident.SelfSignature
+ } else if ident.SelfSignature != nil && ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
+ selfSig = ident.SelfSignature
+ break
+ }
+ }
+ if selfSig.KeyLifetimeSecs != nil {
+ expiry = e.PrimaryKey.CreationTime.Add(time.Duration(*selfSig.KeyLifetimeSecs) * time.Second)
+ }
+ return expiry
+}
+
+func populateHash(hashFunc crypto.Hash, msg []byte) (hash.Hash, error) {
+ h := hashFunc.New()
+ if _, err := h.Write(msg); err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+// readArmoredSign read an armored signature block with the given type. https://sourcegraph.com/github.com/golang/crypto/-/blob/openpgp/read.go#L24:6-24:17
+func readArmoredSign(r io.Reader) (body io.Reader, err error) {
+ block, err := armor.Decode(r)
+ if err != nil {
+ return nil, err
+ }
+ if block.Type != openpgp.SignatureType {
+ return nil, fmt.Errorf("expected %q, got: %s", openpgp.SignatureType, block.Type)
+ }
+ return block.Body, nil
+}
+
+func extractSignature(s string) (*packet.Signature, error) {
+ r, err := readArmoredSign(strings.NewReader(s))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to read signature armor")
+ }
+ p, err := packet.Read(r)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to read signature packet")
+ }
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ return nil, fmt.Errorf("Packet is not a signature")
+ }
+ return sig, nil
+}
+
+func tryGetKeyIDFromSignature(sig *packet.Signature) string {
+ if sig.IssuerKeyId != nil && (*sig.IssuerKeyId) != 0 {
+ return fmt.Sprintf("%016X", *sig.IssuerKeyId)
+ }
+ if len(sig.IssuerFingerprint) > 0 {
+ return fmt.Sprintf("%016X", sig.IssuerFingerprint[12:20])
+ }
+ return ""
+}
diff --git a/models/asymkey/gpg_key_import.go b/models/asymkey/gpg_key_import.go
new file mode 100644
index 0000000..c9d46d2
--- /dev/null
+++ b/models/asymkey/gpg_key_import.go
@@ -0,0 +1,47 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// __________________ ________ ____ __.
+// / _____/\______ \/ _____/ | |/ _|____ ___.__.
+// / \ ___ | ___/ \ ___ | <_/ __ < | |
+// \ \_\ \| | \ \_\ \ | | \ ___/\___ |
+// \______ /|____| \______ / |____|__ \___ > ____|
+// \/ \/ \/ \/\/
+// .___ __
+// | | _____ ______ ____________/ |_
+// | |/ \\____ \ / _ \_ __ \ __\
+// | | Y Y \ |_> > <_> ) | \/| |
+// |___|__|_| / __/ \____/|__| |__|
+// \/|__|
+
+// This file contains functions related to the original import of a key
+
+// GPGKeyImport the original import of key
+type GPGKeyImport struct {
+ KeyID string `xorm:"pk CHAR(16) NOT NULL"`
+ Content string `xorm:"MEDIUMTEXT NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(GPGKeyImport))
+}
+
+// GetGPGImportByKeyID returns the import public armored key by given KeyID.
+func GetGPGImportByKeyID(ctx context.Context, keyID string) (*GPGKeyImport, error) {
+ key := new(GPGKeyImport)
+ has, err := db.GetEngine(ctx).ID(keyID).Get(key)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrGPGKeyImportNotExist{keyID}
+ }
+ return key, nil
+}
diff --git a/models/asymkey/gpg_key_list.go b/models/asymkey/gpg_key_list.go
new file mode 100644
index 0000000..89548e4
--- /dev/null
+++ b/models/asymkey/gpg_key_list.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+type GPGKeyList []*GPGKey
+
+func (keys GPGKeyList) keyIDs() []string {
+ ids := make([]string, len(keys))
+ for i, key := range keys {
+ ids[i] = key.KeyID
+ }
+ return ids
+}
+
+func (keys GPGKeyList) LoadSubKeys(ctx context.Context) error {
+ subKeys := make([]*GPGKey, 0, len(keys))
+ if err := db.GetEngine(ctx).In("primary_key_id", keys.keyIDs()).Find(&subKeys); err != nil {
+ return err
+ }
+ subKeysMap := make(map[string][]*GPGKey, len(subKeys))
+ for _, key := range subKeys {
+ subKeysMap[key.PrimaryKeyID] = append(subKeysMap[key.PrimaryKeyID], key)
+ }
+
+ for _, key := range keys {
+ if subKeys, ok := subKeysMap[key.KeyID]; ok {
+ key.SubsKey = subKeys
+ }
+ }
+ return nil
+}
diff --git a/models/asymkey/gpg_key_object_verification.go b/models/asymkey/gpg_key_object_verification.go
new file mode 100644
index 0000000..24d72a5
--- /dev/null
+++ b/models/asymkey/gpg_key_object_verification.go
@@ -0,0 +1,520 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "fmt"
+ "hash"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+)
+
+// This file provides functions related to object (commit, tag) verification
+
+// ObjectVerification represents a commit validation of signature
+type ObjectVerification struct {
+ Verified bool
+ Warning bool
+ Reason string
+ SigningUser *user_model.User
+ CommittingUser *user_model.User
+ SigningEmail string
+ SigningKey *GPGKey
+ SigningSSHKey *PublicKey
+ TrustStatus string
+}
+
+const (
+ // BadSignature is used as the reason when the signature has a KeyID that is in the db
+ // but no key that has that ID verifies the signature. This is a suspicious failure.
+ BadSignature = "gpg.error.probable_bad_signature"
+ // BadDefaultSignature is used as the reason when the signature has a KeyID that matches the
+ // default Key but is not verified by the default key. This is a suspicious failure.
+ BadDefaultSignature = "gpg.error.probable_bad_default_signature"
+ // NoKeyFound is used as the reason when no key can be found to verify the signature.
+ NoKeyFound = "gpg.error.no_gpg_keys_found"
+)
+
+type GitObject struct {
+ ID git.ObjectID
+ Committer *git.Signature
+ Signature *git.ObjectSignature
+ Commit *git.Commit
+}
+
+func commitToGitObject(c *git.Commit) GitObject {
+ return GitObject{
+ ID: c.ID,
+ Committer: c.Committer,
+ Signature: c.Signature,
+ Commit: c,
+ }
+}
+
+func tagToGitObject(t *git.Tag, gitRepo *git.Repository) GitObject {
+ commit, _ := t.Commit(gitRepo)
+ return GitObject{
+ ID: t.ID,
+ Committer: t.Tagger,
+ Signature: t.Signature,
+ Commit: commit,
+ }
+}
+
+// ParseObjectWithSignature check if signature is good against keystore.
+func ParseObjectWithSignature(ctx context.Context, c *GitObject) *ObjectVerification {
+ var committer *user_model.User
+ if c.Committer != nil {
+ var err error
+ // Find Committer account
+ committer, err = user_model.GetUserByEmail(ctx, c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not
+ if err != nil { // Skipping not user for committer
+ committer = &user_model.User{
+ Name: c.Committer.Name,
+ Email: c.Committer.Email,
+ }
+ // We can expect this to often be an ErrUserNotExist. in the case
+ // it is not, however, it is important to log it.
+ if !user_model.IsErrUserNotExist(err) {
+ log.Error("GetUserByEmail: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.no_committer_account",
+ }
+ }
+ }
+ }
+
+ // If no signature just report the committer
+ if c.Signature == nil {
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false, // Default value
+ Reason: "gpg.error.not_signed_commit", // Default value
+ }
+ }
+
+ // If this a SSH signature handle it differently
+ if strings.HasPrefix(c.Signature.Signature, "-----BEGIN SSH SIGNATURE-----") {
+ return ParseObjectWithSSHSignature(ctx, c, committer)
+ }
+
+ // Parsing signature
+ sig, err := extractSignature(c.Signature.Signature)
+ if err != nil { // Skipping failed to extract sign
+ log.Error("SignatureRead err: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.extract_sign",
+ }
+ }
+
+ keyID := tryGetKeyIDFromSignature(sig)
+ defaultReason := NoKeyFound
+
+ // First check if the sig has a keyID and if so just look at that
+ if commitVerification := hashAndVerifyForKeyID(
+ ctx,
+ sig,
+ c.Signature.Payload,
+ committer,
+ keyID,
+ setting.AppName,
+ ""); commitVerification != nil {
+ if commitVerification.Reason == BadSignature {
+ defaultReason = BadSignature
+ } else {
+ return commitVerification
+ }
+ }
+
+ // Now try to associate the signature with the committer, if present
+ if committer.ID != 0 {
+ keys, err := db.Find[GPGKey](ctx, FindGPGKeyOptions{
+ OwnerID: committer.ID,
+ })
+ if err != nil { // Skipping failed to get gpg keys of user
+ log.Error("ListGPGKeys: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.failed_retrieval_gpg_keys",
+ }
+ }
+
+ if err := GPGKeyList(keys).LoadSubKeys(ctx); err != nil {
+ log.Error("LoadSubKeys: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.failed_retrieval_gpg_keys",
+ }
+ }
+
+ committerEmailAddresses, _ := user_model.GetEmailAddresses(ctx, committer.ID)
+ activated := false
+ for _, e := range committerEmailAddresses {
+ if e.IsActivated && strings.EqualFold(e.Email, c.Committer.Email) {
+ activated = true
+ break
+ }
+ }
+
+ for _, k := range keys {
+ // Pre-check (& optimization) that emails attached to key can be attached to the committer email and can validate
+ canValidate := false
+ email := ""
+ if k.Verified && activated {
+ canValidate = true
+ email = c.Committer.Email
+ }
+ if !canValidate {
+ for _, e := range k.Emails {
+ if e.IsActivated && strings.EqualFold(e.Email, c.Committer.Email) {
+ canValidate = true
+ email = e.Email
+ break
+ }
+ }
+ }
+ if !canValidate {
+ continue // Skip this key
+ }
+
+ commitVerification := hashAndVerifyWithSubKeysObjectVerification(sig, c.Signature.Payload, k, committer, committer, email)
+ if commitVerification != nil {
+ return commitVerification
+ }
+ }
+ }
+
+ if setting.Repository.Signing.SigningKey != "" && setting.Repository.Signing.SigningKey != "default" && setting.Repository.Signing.SigningKey != "none" {
+ // OK we should try the default key
+ gpgSettings := git.GPGSettings{
+ Sign: true,
+ KeyID: setting.Repository.Signing.SigningKey,
+ Name: setting.Repository.Signing.SigningName,
+ Email: setting.Repository.Signing.SigningEmail,
+ }
+ if err := gpgSettings.LoadPublicKeyContent(); err != nil {
+ log.Error("Error getting default signing key: %s %v", gpgSettings.KeyID, err)
+ } else if commitVerification := verifyWithGPGSettings(ctx, &gpgSettings, sig, c.Signature.Payload, committer, keyID); commitVerification != nil {
+ if commitVerification.Reason == BadSignature {
+ defaultReason = BadSignature
+ } else {
+ return commitVerification
+ }
+ }
+ }
+
+ defaultGPGSettings, err := c.Commit.GetRepositoryDefaultPublicGPGKey(false)
+ if err != nil {
+ log.Error("Error getting default public gpg key: %v", err)
+ } else if defaultGPGSettings == nil {
+ log.Warn("Unable to get defaultGPGSettings for unattached commit: %s", c.Commit.ID.String())
+ } else if defaultGPGSettings.Sign {
+ if commitVerification := verifyWithGPGSettings(ctx, defaultGPGSettings, sig, c.Signature.Payload, committer, keyID); commitVerification != nil {
+ if commitVerification.Reason == BadSignature {
+ defaultReason = BadSignature
+ } else {
+ return commitVerification
+ }
+ }
+ }
+
+ return &ObjectVerification{ // Default at this stage
+ CommittingUser: committer,
+ Verified: false,
+ Warning: defaultReason != NoKeyFound,
+ Reason: defaultReason,
+ SigningKey: &GPGKey{
+ KeyID: keyID,
+ },
+ }
+}
+
+func verifyWithGPGSettings(ctx context.Context, gpgSettings *git.GPGSettings, sig *packet.Signature, payload string, committer *user_model.User, keyID string) *ObjectVerification {
+ // First try to find the key in the db
+ if commitVerification := hashAndVerifyForKeyID(ctx, sig, payload, committer, gpgSettings.KeyID, gpgSettings.Name, gpgSettings.Email); commitVerification != nil {
+ return commitVerification
+ }
+
+ // Otherwise we have to parse the key
+ ekeys, err := checkArmoredGPGKeyString(gpgSettings.PublicKeyContent)
+ if err != nil {
+ log.Error("Unable to get default signing key: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.generate_hash",
+ }
+ }
+ for _, ekey := range ekeys {
+ pubkey := ekey.PrimaryKey
+ content, err := base64EncPubKey(pubkey)
+ if err != nil {
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.generate_hash",
+ }
+ }
+ k := &GPGKey{
+ Content: content,
+ CanSign: pubkey.CanSign(),
+ KeyID: pubkey.KeyIdString(),
+ }
+ for _, subKey := range ekey.Subkeys {
+ content, err := base64EncPubKey(subKey.PublicKey)
+ if err != nil {
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.generate_hash",
+ }
+ }
+ k.SubsKey = append(k.SubsKey, &GPGKey{
+ Content: content,
+ CanSign: subKey.PublicKey.CanSign(),
+ KeyID: subKey.PublicKey.KeyIdString(),
+ })
+ }
+ if commitVerification := hashAndVerifyWithSubKeysObjectVerification(sig, payload, k, committer, &user_model.User{
+ Name: gpgSettings.Name,
+ Email: gpgSettings.Email,
+ }, gpgSettings.Email); commitVerification != nil {
+ return commitVerification
+ }
+ if keyID == k.KeyID {
+ // This is a bad situation ... We have a key id that matches our default key but the signature doesn't match.
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Warning: true,
+ Reason: BadSignature,
+ }
+ }
+ }
+ return nil
+}
+
+func verifySign(s *packet.Signature, h hash.Hash, k *GPGKey) error {
+ // Check if key can sign
+ if !k.CanSign {
+ return fmt.Errorf("key can not sign")
+ }
+ // Decode key
+ pkey, err := base64DecPubKey(k.Content)
+ if err != nil {
+ return err
+ }
+ return pkey.VerifySignature(h, s)
+}
+
+func hashAndVerify(sig *packet.Signature, payload string, k *GPGKey) (*GPGKey, error) {
+ // Generating hash of commit
+ hash, err := populateHash(sig.Hash, []byte(payload))
+ if err != nil { // Skipping as failed to generate hash
+ log.Error("PopulateHash: %v", err)
+ return nil, err
+ }
+ // We will ignore errors in verification as they don't need to be propagated up
+ err = verifySign(sig, hash, k)
+ if err != nil {
+ return nil, nil
+ }
+ return k, nil
+}
+
+func hashAndVerifyWithSubKeys(sig *packet.Signature, payload string, k *GPGKey) (*GPGKey, error) {
+ verified, err := hashAndVerify(sig, payload, k)
+ if err != nil || verified != nil {
+ return verified, err
+ }
+ for _, sk := range k.SubsKey {
+ verified, err := hashAndVerify(sig, payload, sk)
+ if err != nil || verified != nil {
+ return verified, err
+ }
+ }
+ return nil, nil
+}
+
+func hashAndVerifyWithSubKeysObjectVerification(sig *packet.Signature, payload string, k *GPGKey, committer, signer *user_model.User, email string) *ObjectVerification {
+ key, err := hashAndVerifyWithSubKeys(sig, payload, k)
+ if err != nil { // Skipping failed to generate hash
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.generate_hash",
+ }
+ }
+
+ if key != nil {
+ return &ObjectVerification{ // Everything is ok
+ CommittingUser: committer,
+ Verified: true,
+ Reason: fmt.Sprintf("%s / %s", signer.Name, key.KeyID),
+ SigningUser: signer,
+ SigningKey: key,
+ SigningEmail: email,
+ }
+ }
+ return nil
+}
+
+func hashAndVerifyForKeyID(ctx context.Context, sig *packet.Signature, payload string, committer *user_model.User, keyID, name, email string) *ObjectVerification {
+ if keyID == "" {
+ return nil
+ }
+ keys, err := db.Find[GPGKey](ctx, FindGPGKeyOptions{
+ KeyID: keyID,
+ IncludeSubKeys: true,
+ })
+ if err != nil {
+ log.Error("GetGPGKeysByKeyID: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.failed_retrieval_gpg_keys",
+ }
+ }
+ if len(keys) == 0 {
+ return nil
+ }
+ for _, key := range keys {
+ var primaryKeys []*GPGKey
+ if key.PrimaryKeyID != "" {
+ primaryKeys, err = db.Find[GPGKey](ctx, FindGPGKeyOptions{
+ KeyID: key.PrimaryKeyID,
+ IncludeSubKeys: true,
+ })
+ if err != nil {
+ log.Error("GetGPGKeysByKeyID: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.failed_retrieval_gpg_keys",
+ }
+ }
+ }
+
+ activated, email := checkKeyEmails(ctx, email, append([]*GPGKey{key}, primaryKeys...)...)
+ if !activated {
+ continue
+ }
+
+ signer := &user_model.User{
+ Name: name,
+ Email: email,
+ }
+ if key.OwnerID != 0 {
+ owner, err := user_model.GetUserByID(ctx, key.OwnerID)
+ if err == nil {
+ signer = owner
+ } else if !user_model.IsErrUserNotExist(err) {
+ log.Error("Failed to user_model.GetUserByID: %d for key ID: %d (%s) %v", key.OwnerID, key.ID, key.KeyID, err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.no_committer_account",
+ }
+ }
+ }
+ commitVerification := hashAndVerifyWithSubKeysObjectVerification(sig, payload, key, committer, signer, email)
+ if commitVerification != nil {
+ return commitVerification
+ }
+ }
+ // This is a bad situation ... We have a key id that is in our database but the signature doesn't match.
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Warning: true,
+ Reason: BadSignature,
+ }
+}
+
+// CalculateTrustStatus will calculate the TrustStatus for a commit verification within a repository
+// There are several trust models in Gitea
+func CalculateTrustStatus(verification *ObjectVerification, repoTrustModel repo_model.TrustModelType, isOwnerMemberCollaborator func(*user_model.User) (bool, error), keyMap *map[string]bool) error {
+ if !verification.Verified {
+ return nil
+ }
+
+ // In the Committer trust model a signature is trusted if it matches the committer
+ // - it doesn't matter if they're a collaborator, the owner, Gitea or Github
+ // NB: This model is commit verification only
+ if repoTrustModel == repo_model.CommitterTrustModel {
+ // default to "unmatched"
+ verification.TrustStatus = "unmatched"
+
+ // We can only verify against users in our database but the default key will match
+ // against by email if it is not in the db.
+ if (verification.SigningUser.ID != 0 &&
+ verification.CommittingUser.ID == verification.SigningUser.ID) ||
+ (verification.SigningUser.ID == 0 && verification.CommittingUser.ID == 0 &&
+ verification.SigningUser.Email == verification.CommittingUser.Email) {
+ verification.TrustStatus = "trusted"
+ }
+ return nil
+ }
+
+ // Now we drop to the more nuanced trust models...
+ verification.TrustStatus = "trusted"
+
+ if verification.SigningUser.ID == 0 {
+ // This commit is signed by the default key - but this key is not assigned to a user in the DB.
+
+ // However in the repo_model.CollaboratorCommitterTrustModel we cannot mark this as trusted
+ // unless the default key matches the email of a non-user.
+ if repoTrustModel == repo_model.CollaboratorCommitterTrustModel && (verification.CommittingUser.ID != 0 ||
+ verification.SigningUser.Email != verification.CommittingUser.Email) {
+ verification.TrustStatus = "untrusted"
+ }
+ return nil
+ }
+
+ // Check we actually have a GPG SigningKey
+ var err error
+ if verification.SigningKey != nil {
+ var isMember bool
+ if keyMap != nil {
+ var has bool
+ isMember, has = (*keyMap)[verification.SigningKey.KeyID]
+ if !has {
+ isMember, err = isOwnerMemberCollaborator(verification.SigningUser)
+ (*keyMap)[verification.SigningKey.KeyID] = isMember
+ }
+ } else {
+ isMember, err = isOwnerMemberCollaborator(verification.SigningUser)
+ }
+
+ if !isMember {
+ verification.TrustStatus = "untrusted"
+ if verification.CommittingUser.ID != verification.SigningUser.ID {
+ // The committing user and the signing user are not the same
+ // This should be marked as questionable unless the signing user is a collaborator/team member etc.
+ verification.TrustStatus = "unmatched"
+ }
+ } else if repoTrustModel == repo_model.CollaboratorCommitterTrustModel && verification.CommittingUser.ID != verification.SigningUser.ID {
+ // The committing user and the signing user are not the same and our trustmodel states that they must match
+ verification.TrustStatus = "unmatched"
+ }
+ }
+
+ return err
+}
diff --git a/models/asymkey/gpg_key_tag_verification.go b/models/asymkey/gpg_key_tag_verification.go
new file mode 100644
index 0000000..5fd3983
--- /dev/null
+++ b/models/asymkey/gpg_key_tag_verification.go
@@ -0,0 +1,15 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/modules/git"
+)
+
+func ParseTagWithSignature(ctx context.Context, gitRepo *git.Repository, t *git.Tag) *ObjectVerification {
+ o := tagToGitObject(t, gitRepo)
+ return ParseObjectWithSignature(ctx, &o)
+}
diff --git a/models/asymkey/gpg_key_test.go b/models/asymkey/gpg_key_test.go
new file mode 100644
index 0000000..e9aa9cf
--- /dev/null
+++ b/models/asymkey/gpg_key_test.go
@@ -0,0 +1,466 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCheckArmoredGPGKeyString(t *testing.T) {
+ testGPGArmor := `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFh91QoBCADciaDd7aqegYkn4ZIG7J0p1CRwpqMGjxFroJEMg6M1ZiuEVTRv
+z49P4kcr1+98NvFmcNc+x5uJgvPCwr/N8ZW5nqBUs2yrklbFF4MeQomyZJJegP8m
+/dsRT3BwIT8YMUtJuCj0iqD9vuKYfjrztcMgC1sYwcE9E9OlA0pWBvUdU2i0TIB1
+vOq6slWGvHHa5l5gPfm09idlVxfH5+I+L1uIMx5ovbiVVU5x2f1AR1T18f0t2TVN
+0agFTyuoYE1ATmvJHmMcsfgM1Gpd9hIlr9vlupT2kKTPoNzVzsJsOU6Ku/Lf/bac
+mF+TfSbRCtmG7dkYZ4metLj7zG/WkW8IvJARABEBAAG0HUFudG9pbmUgR0lSQVJE
+IDxzYXBrQHNhcGsuZnI+iQFUBBMBCAA+FiEEEIOwJg/1vpF1itJ4roJVuKDYKOQF
+Alh91QoCGwMFCQPCZwAFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQroJVuKDY
+KORreggAlIkC2QjHP5tb7b0+LksB2JMXdY+UzZBcJxtNmvA7gNQaGvWRrhrbePpa
+MKDP+3A4BPDBsWFbbB7N56vQ5tROpmWbNKuFOVER4S1bj0JZV0E+xkDLqt9QwQtQ
+ojd7oIZJwDUwdud1PvCza2mjgBqqiFE+twbc3i9xjciCGspMniUul1eQYLxRJ0w+
+sbvSOUnujnq5ByMSz9ij00O6aiPfNQS5oB5AALfpjYZDvWAAljLVrtmlQJWZ6dZo
+T/YNwsW2dECPuti8+Nmu5FxPGDTXxdbnRaeJTQ3T6q1oUVAv7yTXBx5NXfXkMa5i
+iEayQIH8Joq5Ev5ja/lRGQQhArMQ2bkBDQRYfdUKAQgAv7B3coLSrOQbuTZSlgWE
+QeT+7DWbmqE1LAQA1pQPcUPXLBUVd60amZJxF9nzUYcY83ylDi0gUNJS+DJGOXpT
+pzX2IOuOMGbtUSeKwg5s9O4SUO7f2yCc3RGaegER5zgESxelmOXG+b/hoNt7JbdU
+JtxcnLr91Jw2PBO/Xf0ZKJ01CQG2Yzdrrj6jnrHyx94seHy0i6xH1o0OuvfVMLfN
+/Vbb/ZHh6ym2wHNqRX62b0VAbchcJXX/MEehXGknKTkO6dDUd+mhRgWMf9ZGRFWx
+ag4qALimkf1FXtAyD0vxFYeyoWUQzrOvUsm2BxIN/986R08fhkBQnp5nz07mrU02
+cQARAQABiQE8BBgBCAAmFiEEEIOwJg/1vpF1itJ4roJVuKDYKOQFAlh91QoCGwwF
+CQPCZwAACgkQroJVuKDYKOT32wf/UZqMdPn5OhyhffFzjQx7wolrf92WkF2JkxtH
+6c3Htjlt/p5RhtKEeErSrNAxB4pqB7dznHaJXiOdWEZtRVXXjlNHjrokGTesqtKk
+lHWtK62/MuyLdr+FdCl68F3ewuT2iu/MDv+D4HPqA47zma9xVgZ9ZNwJOpv3fCOo
+RfY66UjGEnfgYifgtI5S84/mp2jaSc9UNvlZB6RSf8cfbJUL74kS2lq+xzSlf0yP
+Av844q/BfRuVsJsK1NDNG09LC30B0l3LKBqlrRmRTUMHtgchdX2dY+p7GPOoSzlR
+MkM/fdpyc2hY7Dl/+qFmN5MG5yGmMpQcX+RNNR222ibNC1D3wg==
+=i9b7
+-----END PGP PUBLIC KEY BLOCK-----`
+
+ key, err := checkArmoredGPGKeyString(testGPGArmor)
+ require.NoError(t, err, "Could not parse a valid GPG public armored rsa key", key)
+ // TODO verify value of key
+}
+
+func TestCheckArmoredbrainpoolP256r1GPGKeyString(t *testing.T) {
+ testGPGArmor := `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v2
+
+mFMEV6HwkhMJKyQDAwIIAQEHAgMEUsvJO/j5dFMRRj67qeZC9fSKBsGZdOHRj2+6
+8wssmbUuLTfT/ZjIbExETyY8hFnURRGpD2Ifyz0cKjXcbXfJtrQTRm9vYmFyIDxm
+b29AYmFyLmRlPoh/BBMTCAAnBQJZOsDIAhsDBQkJZgGABQsJCAcCBhUICQoLAgQW
+AgMBAh4BAheAAAoJEGuJTd/DBMzmNVQA/2beUrv1yU4gyvCiPDEm3pK42cSfaL5D
+muCtPCUg9hlWAP4yq6M78NW8STfsXgn6oeziMYiHSTmV14nOamLuwwDWM7hXBFeh
+8JISCSskAwMCCAEBBwIDBG3A+XfINAZp1CTse2mRNgeUE5DbUtEpO8ALXKA1UQsQ
+DLKq27b7zTgawgXIGUGP6mWsJ5oH7MNAJ/uKTsYmX40DAQgHiGcEGBMIAA8FAleh
+8JICGwwFCQlmAYAACgkQa4lN38MEzOZwKAD/QKyerAgcvzzLaqvtap3XvpYcw9tc
+OyjLLnFQiVmq7kEA/0z0CQe3ZQiQIq5zrs7Nh1XRkFAo8GlU/SGC9XFFi722
+=ZiSe
+-----END PGP PUBLIC KEY BLOCK-----`
+
+ key, err := checkArmoredGPGKeyString(testGPGArmor)
+ require.NoError(t, err, "Could not parse a valid GPG public armored brainpoolP256r1 key", key)
+ // TODO verify value of key
+}
+
+func TestExtractSignature(t *testing.T) {
+ testGPGArmor := `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFh91QoBCADciaDd7aqegYkn4ZIG7J0p1CRwpqMGjxFroJEMg6M1ZiuEVTRv
+z49P4kcr1+98NvFmcNc+x5uJgvPCwr/N8ZW5nqBUs2yrklbFF4MeQomyZJJegP8m
+/dsRT3BwIT8YMUtJuCj0iqD9vuKYfjrztcMgC1sYwcE9E9OlA0pWBvUdU2i0TIB1
+vOq6slWGvHHa5l5gPfm09idlVxfH5+I+L1uIMx5ovbiVVU5x2f1AR1T18f0t2TVN
+0agFTyuoYE1ATmvJHmMcsfgM1Gpd9hIlr9vlupT2kKTPoNzVzsJsOU6Ku/Lf/bac
+mF+TfSbRCtmG7dkYZ4metLj7zG/WkW8IvJARABEBAAG0HUFudG9pbmUgR0lSQVJE
+IDxzYXBrQHNhcGsuZnI+iQFUBBMBCAA+FiEEEIOwJg/1vpF1itJ4roJVuKDYKOQF
+Alh91QoCGwMFCQPCZwAFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQroJVuKDY
+KORreggAlIkC2QjHP5tb7b0+LksB2JMXdY+UzZBcJxtNmvA7gNQaGvWRrhrbePpa
+MKDP+3A4BPDBsWFbbB7N56vQ5tROpmWbNKuFOVER4S1bj0JZV0E+xkDLqt9QwQtQ
+ojd7oIZJwDUwdud1PvCza2mjgBqqiFE+twbc3i9xjciCGspMniUul1eQYLxRJ0w+
+sbvSOUnujnq5ByMSz9ij00O6aiPfNQS5oB5AALfpjYZDvWAAljLVrtmlQJWZ6dZo
+T/YNwsW2dECPuti8+Nmu5FxPGDTXxdbnRaeJTQ3T6q1oUVAv7yTXBx5NXfXkMa5i
+iEayQIH8Joq5Ev5ja/lRGQQhArMQ2bkBDQRYfdUKAQgAv7B3coLSrOQbuTZSlgWE
+QeT+7DWbmqE1LAQA1pQPcUPXLBUVd60amZJxF9nzUYcY83ylDi0gUNJS+DJGOXpT
+pzX2IOuOMGbtUSeKwg5s9O4SUO7f2yCc3RGaegER5zgESxelmOXG+b/hoNt7JbdU
+JtxcnLr91Jw2PBO/Xf0ZKJ01CQG2Yzdrrj6jnrHyx94seHy0i6xH1o0OuvfVMLfN
+/Vbb/ZHh6ym2wHNqRX62b0VAbchcJXX/MEehXGknKTkO6dDUd+mhRgWMf9ZGRFWx
+ag4qALimkf1FXtAyD0vxFYeyoWUQzrOvUsm2BxIN/986R08fhkBQnp5nz07mrU02
+cQARAQABiQE8BBgBCAAmFiEEEIOwJg/1vpF1itJ4roJVuKDYKOQFAlh91QoCGwwF
+CQPCZwAACgkQroJVuKDYKOT32wf/UZqMdPn5OhyhffFzjQx7wolrf92WkF2JkxtH
+6c3Htjlt/p5RhtKEeErSrNAxB4pqB7dznHaJXiOdWEZtRVXXjlNHjrokGTesqtKk
+lHWtK62/MuyLdr+FdCl68F3ewuT2iu/MDv+D4HPqA47zma9xVgZ9ZNwJOpv3fCOo
+RfY66UjGEnfgYifgtI5S84/mp2jaSc9UNvlZB6RSf8cfbJUL74kS2lq+xzSlf0yP
+Av844q/BfRuVsJsK1NDNG09LC30B0l3LKBqlrRmRTUMHtgchdX2dY+p7GPOoSzlR
+MkM/fdpyc2hY7Dl/+qFmN5MG5yGmMpQcX+RNNR222ibNC1D3wg==
+=i9b7
+-----END PGP PUBLIC KEY BLOCK-----`
+ keys, err := checkArmoredGPGKeyString(testGPGArmor)
+ if !assert.NotEmpty(t, keys) {
+ return
+ }
+ ekey := keys[0]
+ require.NoError(t, err, "Could not parse a valid GPG armored key", ekey)
+
+ pubkey := ekey.PrimaryKey
+ content, err := base64EncPubKey(pubkey)
+ require.NoError(t, err, "Could not base64 encode a valid PublicKey content", ekey)
+
+ key := &GPGKey{
+ KeyID: pubkey.KeyIdString(),
+ Content: content,
+ CreatedUnix: timeutil.TimeStamp(pubkey.CreationTime.Unix()),
+ CanSign: pubkey.CanSign(),
+ CanEncryptComms: pubkey.PubKeyAlgo.CanEncrypt(),
+ CanEncryptStorage: pubkey.PubKeyAlgo.CanEncrypt(),
+ CanCertify: pubkey.PubKeyAlgo.CanSign(),
+ }
+
+ cannotsignkey := &GPGKey{
+ KeyID: pubkey.KeyIdString(),
+ Content: content,
+ CreatedUnix: timeutil.TimeStamp(pubkey.CreationTime.Unix()),
+ CanSign: false,
+ CanEncryptComms: false,
+ CanEncryptStorage: false,
+ CanCertify: false,
+ }
+
+ testGoodSigArmor := `-----BEGIN PGP SIGNATURE-----
+
+iQEzBAABCAAdFiEEEIOwJg/1vpF1itJ4roJVuKDYKOQFAljAiQIACgkQroJVuKDY
+KORvCgf6A/Ehh0r7QbO2tFEghT+/Ab+bN7jRN3zP9ed6/q/ophYmkrU0NibtbJH9
+AwFVdHxCmj78SdiRjaTKyevklXw34nvMftmvnOI4lBNUdw6KWl25/n/7wN0l2oZW
+rW3UawYpZgodXiLTYarfEimkDQmT67ArScjRA6lLbkEYKO0VdwDu+Z6yBUH3GWtm
+45RkXpnsF6AXUfuD7YxnfyyDE1A7g7zj4vVYUAfWukJjqow/LsCUgETETJOqj9q3
+52/oQDs04fVkIEtCDulcY+K/fKlukBPJf9WceNDEqiENUzN/Z1y0E+tJ07cSy4bk
+yIJb+d0OAaG8bxloO7nJq4Res1Qa8Q==
+=puvG
+-----END PGP SIGNATURE-----`
+ testGoodPayload := `tree 56ae8d2799882b20381fc11659db06c16c68c61a
+parent c7870c39e4e6b247235ca005797703ec4254613f
+author Antoine GIRARD <sapk@sapk.fr> 1489012989 +0100
+committer Antoine GIRARD <sapk@sapk.fr> 1489012989 +0100
+
+Goog GPG
+`
+
+ testBadSigArmor := `-----BEGIN PGP SIGNATURE-----
+
+iQEzBAABCAAdFiEE5yr4rn9ulbdMxJFiPYI/ySNrtNkFAljAiYkACgkQPYI/ySNr
+tNmDdQf+NXhVRiOGt0GucpjJCGrOnK/qqVUmQyRUfrqzVUdb/1/Ws84V5/wE547I
+6z3oxeBKFsJa1CtIlxYaUyVhYnDzQtphJzub+Aw3UG0E2ywiE+N7RCa1Ufl7pPxJ
+U0SD6gvNaeTDQV/Wctu8v8DkCtEd3N8cMCDWhvy/FQEDztVtzm8hMe0Vdm0ozEH6
+P0W93sDNkLC5/qpWDN44sFlYDstW5VhMrnF0r/ohfaK2kpYHhkPk7WtOoHSUwQSg
+c4gfhjvXIQrWFnII1Kr5jFGlmgNSR02qpb31VGkMzSnBhWVf2OaHS/kI49QHJakq
+AhVDEnoYLCgoDGg9c3p1Ll2452/c6Q==
+=uoGV
+-----END PGP SIGNATURE-----`
+ testBadPayload := `tree 3074ff04951956a974e8b02d57733b0766f7cf6c
+parent fd3577542f7ad1554c7c7c0eb86bb57a1324ad91
+author Antoine GIRARD <sapk@sapk.fr> 1489013107 +0100
+committer Antoine GIRARD <sapk@sapk.fr> 1489013107 +0100
+
+Unknown GPG key with good email
+`
+ // Reading Sign
+ goodSig, err := extractSignature(testGoodSigArmor)
+ require.NoError(t, err, "Could not parse a valid GPG armored signature", testGoodSigArmor)
+ badSig, err := extractSignature(testBadSigArmor)
+ require.NoError(t, err, "Could not parse a valid GPG armored signature", testBadSigArmor)
+
+ // Generating hash of commit
+ goodHash, err := populateHash(goodSig.Hash, []byte(testGoodPayload))
+ require.NoError(t, err, "Could not generate a valid hash of payload", testGoodPayload)
+ badHash, err := populateHash(badSig.Hash, []byte(testBadPayload))
+ require.NoError(t, err, "Could not generate a valid hash of payload", testBadPayload)
+
+ // Verify
+ err = verifySign(goodSig, goodHash, key)
+ require.NoError(t, err, "Could not validate a good signature")
+ err = verifySign(badSig, badHash, key)
+ require.Error(t, err, "Validate a bad signature")
+ err = verifySign(goodSig, goodHash, cannotsignkey)
+ require.Error(t, err, "Validate a bad signature with a kay that can not sign")
+}
+
+func TestCheckGPGUserEmail(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ _ = unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ testEmailWithUpperCaseLetters := `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQENBFlEBvMBCADe+EQcfv/aKbMFy7YB8e/DE+hY39sfjvdvSgeXtNhfmYvIOUjT
+ORMCvce2Oxzb3HTI0rjYsJpzo9jEQ53dB3vdr0ne5Juby6N7QPjof3NR+ko50Ki2
+0ilOjYuA0v6VHLIn70UBa9NEf+XDuE7P+Lbtl2L9B9OMXtcTAZoA3cJySgtNFNIG
+AVefPi8LeOcekL39wxJEA8OzdCyO5oENEwAG1tzjy9DDNJf74/dBBh2NiXeSeMxZ
+RYeYzqEa2UTDP1fkUl7d2/hV36cKZWZr+l4SQ5bM7HeLj2SsfabLfqKoVWgkfAzQ
+VwtkbRpzMiDLMte2ZAyTJUc+77YbFoyAmOcjABEBAAG0HFVzZXIgT25lIDxVc2Vy
+MUBFeGFtcGxlLmNvbT6JATgEEwECACIFAllEBvMCGwMGCwkIBwMCBhUIAgkKCwQW
+AgMBAh4BAheAAAoJEFMOzOY274DFw5EIAKc4jiYaMb1HDKrSv0tphgNxPFEY83/J
+9CZggO7BINxlb7z/lH1i0U2h2Ha9E3VJTJQF80zBCaIvtU2UNrgVmSKoc0BdE/2S
+rS9MAl29sXxf1BfvXHu12Suvo8O/ZFP45Vm/3kkHuasHyOV1GwUWnynt1qo0zUEn
+WMIcB8USlmMT1TnSb10YKBd/BpGF3crFDJLfAHRumZUk4knDDWUOWy5RCOG8cedc
+VTAhfdoKRRO3PchOfz6Rls/hew12mRNayqxuLQl2+BX+BWu+25dR3qyiS+twLbk6
+Rjpb0S+RQTkYIUoI0SEZpxcTZso11xF5KNpKZ9aAoiLJqkNF5h4oPSe5AQ0EWUQG
+8wEIALiMMqh3NF3ON/z7hQfeU24bCl/WdfJwCR9CWU/jx4X4gZq2C2aGtytGN5g/
+qoYQ3poTOPzh/4Dvs+r6CtHqi0CvPiEOfSxzmaK+F+vA0GMn2i3Sx5gq/VB0mr+j
+RIYMCjf68Tifo2RAT0VDzn6t304l5+VPr4OgbobMRH+wDe7Hhd2pZXl7ty8DooBn
+vqaqoKgdiccUXGBKe4Oihl/oZ4qrYH6K4ACP1Sco1rs4mNeKDAW8k/Y7zLjg6d59
+g0YQ1YI+CX/bKB7/cpMHLupyMLqvCcqIpjBXRJNMdjuMHgKckjr89DwnqXqgXz7W
+u0B39MZQn9nn6vq8BdkoDFgrTQ8AEQEAAYkBHwQYAQIACQUCWUQG8wIbDAAKCRBT
+DszmNu+Axf4IB/0S9NTc6kpwW+ZPZQNTWR5oKDEaXVCRLccOlkt33txMvk/z2jNM
+trEke99ss5L1bRyWB5fRA+XVsPmW9kIk8pmGFmxqp2nSxr9m9rlL5oTYH8u6dfSm
+zwGhqkfITjPI7hyNN52PLANwoS0o4dLzIE65ewigx6cnRlrT2IENObxG/tlxaYg1
+NHahJX0uFlVk0W0bLBrs3fTDw1lS/N8HpyQb+5ryQmiIb2a48aygCS/h2qeRlX1d
+Q0KHb+QcycSgbDx0ZAvdIacuKvBBcbxrsmFUI4LR+oIup0G9gUc0roPvr014jYQL
+7f8r/8fpcN8t+I/41QHCs6L/BEIdTHW3rTQ6
+=zHo9
+-----END PGP PUBLIC KEY BLOCK-----`
+
+ keys, err := AddGPGKey(db.DefaultContext, 1, testEmailWithUpperCaseLetters, "", "")
+ require.NoError(t, err)
+ if assert.NotEmpty(t, keys) {
+ key := keys[0]
+ if assert.Len(t, key.Emails, 1) {
+ assert.Equal(t, "user1@example.com", key.Emails[0].Email)
+ }
+ }
+}
+
+func TestCheckGPGRevokedIdentity(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ require.NoError(t, db.Insert(db.DefaultContext, &user_model.EmailAddress{UID: 1, Email: "no-reply@golang.com", IsActivated: true}))
+ require.NoError(t, db.Insert(db.DefaultContext, &user_model.EmailAddress{UID: 1, Email: "revoked@golang.com", IsActivated: true}))
+ _ = unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ revokedUserKey := `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e
+DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/
+uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW
+ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx
+nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ
+x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg
+PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy
+9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
+1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2
+depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl
+aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2
+DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa
+XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU
+8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2
+b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD
+BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG
+0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N
+s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb
+tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0
+BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE
+/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7
+kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z
+VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa
+PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ
+snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi
+bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8
+K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X
+8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+
+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb
+OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l
+QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V
+yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U
+heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
+7qTZOahrETw=
+=IKnw
+-----END PGP PUBLIC KEY BLOCK-----
+`
+
+ keys, err := AddGPGKey(db.DefaultContext, 1, revokedUserKey, "", "")
+ require.NoError(t, err)
+ assert.Len(t, keys, 1)
+ assert.Len(t, keys[0].Emails, 1)
+ assert.EqualValues(t, "no-reply@golang.com", keys[0].Emails[0].Email)
+
+ primaryKeyID := "D68172F48E9C5283"
+ // Assert primary key
+ unittest.AssertExistsAndLoadBean(t, &GPGKey{OwnerID: 1, KeyID: primaryKeyID, Content: "xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAE="})
+ // Assert subkey
+ unittest.AssertExistsAndLoadBean(t, &GPGKey{OwnerID: 1, KeyID: "2C56900BE5486AF8", PrimaryKeyID: primaryKeyID, Content: "zsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFmyL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxbYmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmtamHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuMu5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/Y5UWmEdBG6IzABEBAAE="})
+}
+
+func TestCheckGParseGPGExpire(t *testing.T) {
+ testIssue6599 := `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFlFJRsBEAClNcRT5El+EaTtQEYs/eNAhr/bqiyt6fPMtabDq2x6a8wFWMX0
+yhRh4vZuLzhi95DU/pmhZARt0W15eiN0AhWdOKxry1KtZNiZBzMm1f0qZJMuBG8g
+YJ7aRkCqdWRxy1Q+U/yhr6z7ucD8/yn7u5wke/jsPdF/L8I/HKNHoawI1FcMC9v+
+QoG3pIX8NVGdzaUYygFG1Gxofc3pb3i4pcpOUxpOP12t6PfwTCoAWZtRLgxTdwWn
+DGvY6SCIIIxn4AC6u3+tHz9HDXx+4eiB7VxMsiIsEuHW9DVBzen9jFNNjRnNaFkL
+pTAFOyGsSzGRGhuJpb7j7hByoWkaItqaw+clnzVrDqhfbxS1B8dmgMANh9pzNsv7
+J/OnNdGsbgDX5RytSKMaXclK2ZGH6Txatgezo167z6EdthNR1daj1QfqWADiqKbR
+UXp7Xz9b+/CBedUNEXPbIExva9mPsFJo2IEntRGtdhhjuO4a6HLG7k1i0o0dHxqb
+a9HrOW7fO902L7JHIgnjpDWDGLGGnVGcGWdEEZggfpnvjxADeTgyMb2XkALTQ0GG
+yRywByxG8/zjXeEkqUng/mxNbBCcHcuIRVsqYwGQLiLubYxnRudqtNst8Tdu+0+q
+AL0bb8ueQC1M3WHsMUxvTjknFJdJzRicNyLf6AdfRv6yy6Ra+t4SFoSbsQARAQAB
+tB90YXN0eXRlYSA8dGFzdHl0ZWFAdGFzdHl0ZWEuZGU+iQJXBBMBCABBAhsDBQsJ
+CAcCBhUICQoLAgQWAgMBAh4BAheAAhkBFiEE1bTEO0ioefY1KTbmWTRuDqNcZ+UF
+Alyo2K0FCQVE5xIACgkQWTRuDqNcZ+UTFA/+IygU02oz19tRVNgVmKyXv1GhnkaY
+O/oGxp7cRGJ0gf0bjhbJpFf4+6OHaS0ei47Qp8XTuStfWry6V6rXLSV/ZOOhFaCq
+VpFvoG2JcPZbSTB+CR/lL5fWwx3w5PAOUwipGRFs7mYLgy8U/E3U7u+ioP4ZqCXS
+heclyXAGNlrjUwvwOWRLxvcEQr4ztQR0Lk2tv1QYYDzbaXUSdnsM1YK9YpYP7BE2
+luKtwwXaubdwcXPs96FEmGLGfsWC/dWnAxkYXPo9q7O6c5GKbGiP3xFhBaBCzzm0
+PAqAJ+NyIWL63yI1aNNz4xC1marU7UPLzBnv5fG1WdscYqAbj8XbZ96mPPM80y0A
+j5/7YecRXce4yedxRHhi3bD8MEzDMHWfkQPpWCZj/KwjDFiZwSMgpQUqeAllDKQx
+Ld0CLkLuUe20b+/5h6dGtGpoACkoOPxMl6zi9uihztvR5iYdkwnmcxKmnEtz+WV4
+1efhS3QRZro3QAHjhCqU1Xjl0hnwSCgP5nUhTq6dJqgeZ7c5D4Uhg55MXwQ68Oe4
+NrQfhdO8IOSVPDPDEeQ2kuP7/HEZsjKZBMKhKoUcdXM6y9T2tYw3wv5JDuDxT2Q1
+3IuFVr1uFm/spVyFCpPpPSQM1wfdtoPLRjiJ/KVh777AWUlywP2b7cWyKShYJb4P
+QzTQ/udx94916cSJAlQEEwEIAD4WIQTVtMQ7SKh59jUpNuZZNG4Oo1xn5QUCWUUl
+GwIbAwUJA8ORBQULCQgHAgYVCAkKCwIEFgIDAQIeAQIXgAAKCRBZNG4Oo1xn5Uoa
+D/9tdmXECDZS1th0xmdNIsecxhI9dBGJyaJwfhH7UVkL+e86EsmTSzyJhBAepDDe
+4wTEaW/NnjVX+ulO7rKFN4/qvSCOaeIdP0MEn7zfZVVKG8gMW4mb/piLvUnsZvsM
+eWfv9AL/b3H1MRkl9S6XsE0ove72pmbBSZEhh2rNHqf+tIGr/RTtn80efTv3w+75
+0UJtaFPsAKoAzNRy+ouhf9IHy9pEMJRA/hZ0Ho04QCDAC65mWz7iwI7v9VRDVfng
+UjJPJahoM4vTpB30vJiFYT2oFTgdxGckfEUezsk8Rx/o6x4u6igKypPbeqM/7SMw
+H61sCWR7nHJhCK55WeEIbzHEhwCZTf1pgvHj5oGUOjzksp2DmFV3ma3WCh8JyqyA
+zw2OvOXBlayIaGIoyD5tSHS40rTi9JmOUfhg6WPN3MIrvsSVEV7JNdiZs/Tb07eQ
+l71O7wv/LXZZCYP5NLV0PJbN2pHMf8cysWulfHN/mNgpEiLJpPBYVVyVbzWLg54X
+FcNQMrT70kRF4M2GBRahXchkWi6+1pd3jPtvCFfcNiYBnHcrKu2R/UdSpFYdclDi
+y6u7xMxXt0AVeLLtlXq7+ChOANMH5aPdUjCXeQDNJawLx41KL9fETsjScodmmpKi
+SNhkC03FNfbkPJzZthoTxCfUBQeHYWgDpN3Gjb/OdSWC34kCVwQTAQgAQQIbAwUJ
+A8ORBQULCQgHAgYVCAkKCwIEFgIDAQIeAQIXgBYhBNW0xDtIqHn2NSk25lk0bg6j
+XGflBQJcqNWQAhkBAAoJEFk0bg6jXGfldcEP/iz4UbJPd/kr8D008ky7vI7hnYs8
+VQIxL6ljQJ75XmVx/Lz1MVo4Vdsu6+qEta5gvqbGwjuEugaHcFVbHCZEBKI0QHSQ
+UNHfXT8eZP/BwwFWawUokLTbF//Dg5xd5ejo/TeltNleyq1r0AoxcoMv1srrY4yK
+GvWE5V8SVSi/E71y4VarS58ZH3NZ6sW5slnYvgAHTVgOjkVvMYk5JmrWsFsycYf8
+Rs5BvCuXQpUV9N8UFfW8pAxYhLvUTqhf34m24syyFn9j1udEO1c+IeX7h7hX2CFL
++P6wS9Ok2Z++IKvhIXLy/OoBULxKXjM04aLxDDlRW3qEyeLKvbFiEHGSnlaDz27L
+LBAGGRxzLLr0g1evV33AHUU2N8pATnzXHJaRiMjExjRi5IkHjbiEaxiqIwr8CSnS
+4RlZ+owxhJ/4MjnsqBL3ELhkSnN+HGkPBQkbFDhCm0ICm78EK2x4+bWo/YUUfoky
+Hq92XB6RNbO0RcdGyltFsJ02Ev20Hc4MClF7jT7xm7VJfbeYNmxZ6GNXZ7kEsl87
+7qzFtr2BcEfw/ieyyoOrwAC9FBJc/9CALex3p3TGWpM43C+IdqZIsr9QHAzvJfY7
+/n5/wJyCPhIZSSE3b8PZRIAdh6NA2IF877OCzIl2UFUNJE1zaEcTvjxZzCZ1SHGU
+YzQeSbODHUuPDbhytBJnZW50b29AdGFzdHl0ZWEuZGWJAlQEEwEIAD4CGwMFCwkI
+BwIGFQoJCAsCBBYCAwECHgECF4AWIQTVtMQ7SKh59jUpNuZZNG4Oo1xn5QUCXKjY
+rQUJBUTnEgAKCRBZNG4Oo1xn5VhkD/42pGYstRMvrO37wJDnnLDm+ZPb0RGy80Ru
+Nt3S6OmU3TFuU9mj/FBc8VNs6xr0CCMVVM/CXX1gXCHhADss1YDaOcRsl5wVJ6EF
+tbpEXT/USMw3dV4Y8OYUSNxyEitzKt25CnOdWGPYaJG3YOtAR0qwopMiAgLrgLy9
+mugXqnrykF7yN27i6iRi2Jk9K7tSb4owpw1kuToJrNGThAkz+3nvXG5oRiYFTlH3
+pATx34r+QOg1o3giomP49cP4ohxvQFP90w2/cURhLqEKdR6N1X0bTXRQvy8G+4Wl
+QMl8WYPzQUrKGMgj/f7Uhb3pFFLCcnCaYFdUj+fvshg5NMLGVztENz9x7Vr5n51o
+Hj9WuM3s65orKrGhMUk4NJCsQWJUHnSNsEXsuir9ocwCv4unIJuoOukNJigL4d5o
+i0fKPKuLpdIah1dmcrWLIoid0wPeA8unKQg3h6VL5KXpUudo8CiPw/kk1KTLtYQR
+7lezb1oldqfWgGHmqnOK+u6sOhxGj2fcrTi4139ULMph+LCIB3JEtgaaw4lTTt0t
+S8h6db6LalzsQyL2sIHgl/rmLmZ5sqZhmi/DsAjZWfpz+inUP6rgap+OgAmtLCit
+BwsDAy7ux44mUNtW1KExuY2W/bmSLlV28H+fHJ3fhpHDQMNAFYc5n4NgTe6eT/KY
+WA4KGfp7KYkCVAQTAQgAPhYhBNW0xDtIqHn2NSk25lk0bg6jXGflBQJcqNTKAhsD
+BQkDw5EFBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEFk0bg6jXGflazAP/iae
+7/PIaWhIyDw14NvyJG4D8FMSV9bC1cJ+ICo0qkx0dxcZMsxTp7fD8ODaSWzJEI4X
+mGDvJp5fJ7ZALFhp7IBIsj9CHRWyVBCzwhnAXgSmGF+qzBFE7WjQORdn5ytTiWAN
+PqyJV0sAw46jLJNvYv/LaFb2bzR/z6U1wQ2qvqXZj8vh2eLvY2XfQa1HnKaPi8h9
+OqtLM80/6uai2scdYAI6usB8wxTJY2b2B8flDB7c8DruCDRL1QmrK5o70yIIai2c
+4fXHHglulT9GnwD01a5DA2dgn5nxb81xgofgofXQjIOYARUKvcuZsF/tsR5S+C5k
+CJnq8V9xdABbWz/FvwXz7ejf2jPtAnD6gcvuPnLX/dsxFHio2n4HHzXboUrVMKid
+zcvuIrmlNtvKHYGxC9Dk3vNM+9rTlaY2BRt0zkgakDpMhqFu6A/TCEDZK0ukQLtc
+h0g806AWding6gr4vQDeX6dSCuJMFKTu/2q85R1w2vGuyWYSm6QR6sM+KumOX3vJ
+c/zvOodhRWXQBWYHTuSw6QGDCI115lWO8DAK4T6u7SVXfthHKm+38dpDH1tSfcHo
+KaG7XJKExEPgdcNLvJIN/xCx5lX6fy0ohj7oF1dEpeBpIgqTC0l5I8bLAjcLKZl9
+4YwJSSS8aTedptCmBTAHWd6y3W/hgFJrdKsqbHVGuQINBFlFJRsBEAC1EFjL9rvn
+O9UIJ2dfaPdfm2GjH/sKfOInfWp4KKEDWtS59Pssld4gnjcmDNgunYYhHYcok61K
+9J4x33KvkNAhEbw9y5AGW0tb7p2I6NxiOaWZjmZbg7AJMBFenipdUXBEjbu4LzEd
+yyIm3/lQiV4bW6GR14cKdQLZm/inVmbEaGSpq2g19WA+X7SwBxzZR9O80Iohm3RL
+X8Z8lXzUj/fUWCCstfXZwdy4vbZv8ms7kmq+3TUOwOiVavgWYhbal+nO0kLdVFbb
+i7YRvZh6afxfgMyJ3v1goXvsW1W8jno2ikUmkwZiiPY/cKOPmOwEzj3hl73i6qrx
+vm9SjEwEzI/gFXlJD8cOKMc6/g8kUeCepDfdKjgo1SYynLUk4NW9QeucJo6BSPEP
+llamHsTaUGzT4tj9qZqAQ0dwSnWYvyi19EMCGssLoy7bAoNueHOYZtHN5TskKShQ
+XzEG9IRZvXGmaWAT17sFesqXK0g47jQswmwobDsXyvXJfree36jQRj7SAVVK44Im
+bqBe6BT9QYIBkfThAWjwTibg0P1CPGk5TPpssAQgM3jxXVEyD6iKCS4LKWrtm+Sk
+MlGaPNyO8OcwHp6p5QaYAE6vlSfT8fsZ0iGd06ua5miZRbkM2i94/jVKvZLRvWv4
+S8SMZemAYnVMc0YFWEJCbaKdZp35rb5e4QARAQABiQI8BBgBCAAmAhsMFiEE1bTE
+O0ioefY1KTbmWTRuDqNcZ+UFAlyo2PAFCQVE51UACgkQWTRuDqNcZ+V+Hg/9HhVI
+No0ID4o8y0jlhyNg8n/Fy08uDALQ6JlbN6buLw+IYU75GTDIysGjx+9bgt+Mjvtp
+bbWkeT6okKkyB3H/x7w7v9GTYWlnzMA/KwHF7L7Wqy0afcVjg+fchWXPJQ3H5Jxh
+bcX3FKkIN9kpfdHN87C8//s4LzDOWeYCxFwkxkbx4tc1K4HhezpvYDKiLmFMVbaU
+qB0pzP8IM3hU1GJeAC2skfjstuaKJPuF895aFSF6++DYodXBFu3UlSJbJGfDEBYC
+9PgSrxX1qlNUFw+6Hr2uSdPnmcKgCDFGhxB1d/Z2Xa/QFhvuj7U38eyqla3dzXxu
+4+/9BOoJwdyRlUxd1Jcy3q7l8V4Hk1vMwKICdXBadAcAgSi0ImXt7UttpTYB7WNV
+nlFmFFi8eVnmMll08LWV6LygG8GBSzW5NUZnUhxHbFVFcEuHo6W1lIEgJooOnGwd
+H2rqKXpkcv86q7ODxdt9nb0txUPzgukusHes6Q0cnTMWcd0YT75frKjjK6TK8KZA
+XMH0zobogpnr/n2ji87cn9sSlL3/2NtxfAwqyDWomECKOtKYfx10OPjrPrScDFG0
+aF6w50Xg5DH/I38zzBVanEgwzWHosIVKNQHgoSYijErnShbRefA8+zCsyn0q/9Rg
+cToAM7X3ro+tQQHWDIhiayHvJMeGN/R/u1U4Kv25BK4EW0zMChEMALGnffpA/rz6
+oRXV++syFI6AaByfiatYgKh+d2LkhyeAAnp93VBV8c2YArsSp7XookhxlRA7XAGw
+x71VKouHjdcMpZM76OcEJgC2fKCbsLrMhkjKOjux6Lru1mY4bFmXBxex0pssvIoc
+zefV00qVvQ0e2JkvUmuKKIplyH0GAapDRnF3R8/doNNUXfVufHButKHlmK7yaFkK
+UBXLFUc3c8mCm/UQcMrFYrlyRNd6Axir2LpD8ya8gIwOM49nH+DDSla4d23zP+4M
+kTaWZ5QlX4FGN8kfPE4rzVxhCP0jtC5m2oqFp8dIKtxzX836YkHG7wlAPsaoPmhl
+kJMylGSwvjRvjxNLHWodMJfrQgajnW0UEd1XrfO48i/OD3f1Z22/sHRY2VejD4KJ
+49QBienKCUlNbZRfpaGOQn2HqbOX6/wUfS/83rhBVNrsU2kNb/+6OKsJV2YtokPK
+saS88q8225YEcsDLPS/3V5VrFW0CQwXJM4AbVweHhE7486VtSfkQswEAjTMJSbTO
+4IgjWYDaQ57m77bc4N9z0oCWaChlaAjdzSsL/0JQx5GJXUcxW1GvEGhP/Fx1IFd3
+oCR8OmY6oZHYmB1fNvFLSmJN0dJcQjm3hebrSQiWg/JvVAlF2S7f+j0pjeki09kM
+0RqAHOkDpLeY6ifU8+QW5DP5yh8d9ZDc4wjPdz53ycwJzaMqESOIr9eHYtOWN6Hi
+0rItsMN8FB5A70te1IcKG5UWh3cCRg7fEbKVofIYTSU2V98RLkp+iEHLKfa6wObx
+Mt60OVU/xbrO28w93cLpWUIH1Csow3k3wSbNmw3d9mWc7cVESct+IM5W4ZSYMcjG
+cvcMELWCwuT1mPSkR0hv2oz5xFOBlUV1KUViIcxpKzTrjj69JAaBbJ3f5OEfEbj/
+G+aa30EoddPBhwF7XnQUeC/DLRJQh2MH1ohMnkpBttDipHOuFS1CZh8xoxr/8moW
+nj5FRG+FAZeCmcqj5PE+du7KF2XRPBlxhc1Nu+kPejlr6qa5qdwo4MzfuzmxWmvc
+WQuNMtaPqQvYL1A09MH0uMH65MtJNsqbSvHa5AwAlletPw6Wr0qrBLBCmOpNf+Q7
+7nBQBrK5VPMcto9IkGB4/bwhx7gQ0O2dD4dD4DPpGY9p52KpOG2ECoCWMtbsPD2P
+bs+WNHN8V+3ZCxZukEj25wDhc5941P01BhKVFevGLHyYNWk34mQk7RdHj9OiEL8n
+GpQ9l/R58+mvVwarzs898/y5onQieWi0Zu3WfMvjTOG3D3NIKMuthzRytfV5C/tJ
++W5ZX/jLVR3bzvzx8Pnpvf602xCST9/7LbgFhljfXQq0bq0d9si9hvyaMOh1PQFU
+2+PzmWtHcsiVoyXfQp6ztJYFkoYaaD+Mc2jWG2Qy9kAyUGTXj/WfkPn7hr5hvuwk
+0kNDSan8NY2f1mtG253qr6fMOmCgrUfaumpafd9xIJ65x1G2BGAr8bzjLJufEUaG
+D2wBYWE6tlRqT4j7u6u9vRjShKH+A1UpLV2pEtaIQ3wfbt6GIwFJHWU506m3RCCn
+pL46fAOVKS1GSuf79koXsZeECJRSbipXz3TJs0TqiQKzBBgBCAAmAhsCFiEE1bTE
+O0ioefY1KTbmWTRuDqNcZ+UFAlyo2PAFCQM9QGYAgXYgBBkRCAAdFiEENVUmaGTK
+bX/0Wqbnz8OUl/GybgcFAltMzAoACgkQz8OUl/Gybgf0OwD/c4hwqsfZ79t7pM9d
+PPWYQ1jyq2g3ELMKyPp79GmL0qsA/2t2qkaOEX3y7egmhL/iKyqASb4y/JTABGMU
+hy5GjBhxCRBZNG4Oo1xn5WBvEACbCAQRC00FYoktuRzQQy2LCJe13AUS1/lCWv8B
+Qu7hTmM8TC/iNmYk71qeYInQMp/12b0HSWcv8IBmOlMy2GTjgnTgiwpqY5nhtb9O
+uB5H2g6fpu7FFG9ARhtH9PiTMwOUzfZFUz0tDdEEG5sayzWUcY3zjmJFmHSg5A9B
+/Q/yctqZ1eINtyEECINo/OVEfD7bmyZwK/vrxAg285iF6lB11wVl+5E7sNy9Hvu8
+4kCKPksqyjFWUd0XoEu9AH6+XVeEPF7CQKHpRfhc4uweT9O5nTb7aaPcqq0B4lUL
+unG6KSCm88zaZczp2SUCFwENegmBT/YKN5ZoHsPh1nwLxh194EP/qRjW9IvFKTlJ
+EsB4uCpfDeC233oH5nDkvvphcPYdUuOsVH1uPQ7PyWNTf1ufd9bDSDtK8epIcDPe
+abOuphxQbrMVP4JJsBXnVW5raZO7s5lmSA8Ovce//+xJSAq9u0GTsGu1hWDe60ro
+uOZwqjo/cU5G4y7WHRaC3oshH+DO8ajdXDogoDVs8DzYkTfWND2DDNEVhVrn7lGf
+a4739sFIDagtBq6RzJGL0X82eJZzXPFiYvmy0OVbNDUgH+Drva/wRv/tN8RvBiS6
+bsn8+GBGaU5RASu67UbqxHiytFnN4OnADA5ZHcwQbMgRHHiiMMIf+tJWH/pFMp00
+epiDVQ==
+=VSKJ
+-----END PGP PUBLIC KEY BLOCK-----
+`
+ keys, err := checkArmoredGPGKeyString(testIssue6599)
+ require.NoError(t, err)
+ if assert.NotEmpty(t, keys) {
+ ekey := keys[0]
+ expire := getExpiryTime(ekey)
+ assert.Equal(t, time.Unix(1586105389, 0), expire)
+ }
+}
+
+func TestTryGetKeyIDFromSignature(t *testing.T) {
+ assert.Empty(t, tryGetKeyIDFromSignature(&packet.Signature{}))
+ assert.Equal(t, "038D1A3EADDBEA9C", tryGetKeyIDFromSignature(&packet.Signature{
+ IssuerKeyId: util.ToPointer(uint64(0x38D1A3EADDBEA9C)),
+ }))
+ assert.Equal(t, "038D1A3EADDBEA9C", tryGetKeyIDFromSignature(&packet.Signature{
+ IssuerFingerprint: []uint8{0xb, 0x23, 0x24, 0xc7, 0xe6, 0xfe, 0x4f, 0x3a, 0x6, 0x26, 0xc1, 0x21, 0x3, 0x8d, 0x1a, 0x3e, 0xad, 0xdb, 0xea, 0x9c},
+ }))
+}
diff --git a/models/asymkey/gpg_key_verify.go b/models/asymkey/gpg_key_verify.go
new file mode 100644
index 0000000..01812a2
--- /dev/null
+++ b/models/asymkey/gpg_key_verify.go
@@ -0,0 +1,119 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "strconv"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/log"
+)
+
+// __________________ ________ ____ __.
+// / _____/\______ \/ _____/ | |/ _|____ ___.__.
+// / \ ___ | ___/ \ ___ | <_/ __ < | |
+// \ \_\ \| | \ \_\ \ | | \ ___/\___ |
+// \______ /|____| \______ / |____|__ \___ > ____|
+// \/ \/ \/ \/\/
+// ____ ____ .__ _____
+// \ \ / /___________|__|/ ____\__.__.
+// \ Y // __ \_ __ \ \ __< | |
+// \ /\ ___/| | \/ || | \___ |
+// \___/ \___ >__| |__||__| / ____|
+// \/ \/
+
+// This file provides functions relating verifying gpg keys
+
+// VerifyGPGKey marks a GPG key as verified
+func VerifyGPGKey(ctx context.Context, ownerID int64, keyID, token, signature string) (string, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ defer committer.Close()
+
+ key := new(GPGKey)
+
+ has, err := db.GetEngine(ctx).Where("owner_id = ? AND key_id = ?", ownerID, keyID).Get(key)
+ if err != nil {
+ return "", err
+ } else if !has {
+ return "", ErrGPGKeyNotExist{}
+ }
+
+ if err := key.LoadSubKeys(ctx); err != nil {
+ return "", err
+ }
+
+ sig, err := extractSignature(signature)
+ if err != nil {
+ return "", ErrGPGInvalidTokenSignature{
+ ID: key.KeyID,
+ Wrapped: err,
+ }
+ }
+
+ signer, err := hashAndVerifyWithSubKeys(sig, token, key)
+ if err != nil {
+ return "", ErrGPGInvalidTokenSignature{
+ ID: key.KeyID,
+ Wrapped: err,
+ }
+ }
+ if signer == nil {
+ signer, err = hashAndVerifyWithSubKeys(sig, token+"\n", key)
+ if err != nil {
+ return "", ErrGPGInvalidTokenSignature{
+ ID: key.KeyID,
+ Wrapped: err,
+ }
+ }
+ }
+ if signer == nil {
+ signer, err = hashAndVerifyWithSubKeys(sig, token+"\n\n", key)
+ if err != nil {
+ return "", ErrGPGInvalidTokenSignature{
+ ID: key.KeyID,
+ Wrapped: err,
+ }
+ }
+ }
+
+ if signer == nil {
+ log.Error("Unable to validate token signature. Error: %v", err)
+ return "", ErrGPGInvalidTokenSignature{
+ ID: key.KeyID,
+ }
+ }
+
+ if signer.PrimaryKeyID != key.KeyID && signer.KeyID != key.KeyID {
+ return "", ErrGPGKeyNotExist{}
+ }
+
+ key.Verified = true
+ if _, err := db.GetEngine(ctx).ID(key.ID).SetExpr("verified", true).Update(new(GPGKey)); err != nil {
+ return "", err
+ }
+
+ if err := committer.Commit(); err != nil {
+ return "", err
+ }
+
+ return key.KeyID, nil
+}
+
+// VerificationToken returns token for the user that will be valid in minutes (time)
+func VerificationToken(user *user_model.User, minutes int) string {
+ return base.EncodeSha256(
+ time.Now().Truncate(1*time.Minute).Add(time.Duration(minutes)*time.Minute).Format(
+ time.RFC1123Z) + ":" +
+ user.CreatedUnix.Format(time.RFC1123Z) + ":" +
+ user.Name + ":" +
+ user.Email + ":" +
+ strconv.FormatInt(user.ID, 10))
+}
diff --git a/models/asymkey/main_test.go b/models/asymkey/main_test.go
new file mode 100644
index 0000000..87b5c22
--- /dev/null
+++ b/models/asymkey/main_test.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m, &unittest.TestOptions{
+ FixtureFiles: []string{
+ "gpg_key.yml",
+ "public_key.yml",
+ "TestParseCommitWithSSHSignature/public_key.yml",
+ "deploy_key.yml",
+ "gpg_key_import.yml",
+ "user.yml",
+ "email_address.yml",
+ },
+ })
+}
diff --git a/models/asymkey/ssh_key.go b/models/asymkey/ssh_key.go
new file mode 100644
index 0000000..7a18732
--- /dev/null
+++ b/models/asymkey/ssh_key.go
@@ -0,0 +1,427 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "golang.org/x/crypto/ssh"
+ "xorm.io/builder"
+)
+
+// KeyType specifies the key type
+type KeyType int
+
+const (
+ // KeyTypeUser specifies the user key
+ KeyTypeUser = iota + 1
+ // KeyTypeDeploy specifies the deploy key
+ KeyTypeDeploy
+ // KeyTypePrincipal specifies the authorized principal key
+ KeyTypePrincipal
+)
+
+// PublicKey represents a user or deploy SSH public key.
+type PublicKey struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"INDEX NOT NULL"`
+ Name string `xorm:"NOT NULL"`
+ Fingerprint string `xorm:"INDEX NOT NULL"`
+ Content string `xorm:"MEDIUMTEXT NOT NULL"`
+ Mode perm.AccessMode `xorm:"NOT NULL DEFAULT 2"`
+ Type KeyType `xorm:"NOT NULL DEFAULT 1"`
+ LoginSourceID int64 `xorm:"NOT NULL DEFAULT 0"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ HasRecentActivity bool `xorm:"-"`
+ HasUsed bool `xorm:"-"`
+ Verified bool `xorm:"NOT NULL DEFAULT false"`
+}
+
+func init() {
+ db.RegisterModel(new(PublicKey))
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (key *PublicKey) AfterLoad() {
+ key.HasUsed = key.UpdatedUnix > key.CreatedUnix
+ key.HasRecentActivity = key.UpdatedUnix.AddDuration(7*24*time.Hour) > timeutil.TimeStampNow()
+}
+
+// OmitEmail returns content of public key without email address.
+func (key *PublicKey) OmitEmail() string {
+ return strings.Join(strings.Split(key.Content, " ")[:2], " ")
+}
+
+// AuthorizedString returns formatted public key string for authorized_keys file.
+//
+// TODO: Consider dropping this function
+func (key *PublicKey) AuthorizedString() string {
+ return AuthorizedStringForKey(key)
+}
+
+func addKey(ctx context.Context, key *PublicKey) (err error) {
+ if len(key.Fingerprint) == 0 {
+ key.Fingerprint, err = CalcFingerprint(key.Content)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Save SSH key.
+ if err = db.Insert(ctx, key); err != nil {
+ return err
+ }
+
+ return appendAuthorizedKeysToFile(key)
+}
+
+// AddPublicKey adds new public key to database and authorized_keys file.
+func AddPublicKey(ctx context.Context, ownerID int64, name, content string, authSourceID int64) (*PublicKey, error) {
+ log.Trace(content)
+
+ fingerprint, err := CalcFingerprint(content)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ if err := checkKeyFingerprint(ctx, fingerprint); err != nil {
+ return nil, err
+ }
+
+ // Key name of same user cannot be duplicated.
+ has, err := db.GetEngine(ctx).
+ Where("owner_id = ? AND name = ?", ownerID, name).
+ Get(new(PublicKey))
+ if err != nil {
+ return nil, err
+ } else if has {
+ return nil, ErrKeyNameAlreadyUsed{ownerID, name}
+ }
+
+ key := &PublicKey{
+ OwnerID: ownerID,
+ Name: name,
+ Fingerprint: fingerprint,
+ Content: content,
+ Mode: perm.AccessModeWrite,
+ Type: KeyTypeUser,
+ LoginSourceID: authSourceID,
+ }
+ if err = addKey(ctx, key); err != nil {
+ return nil, fmt.Errorf("addKey: %w", err)
+ }
+
+ return key, committer.Commit()
+}
+
+// GetPublicKeyByID returns public key by given ID.
+func GetPublicKeyByID(ctx context.Context, keyID int64) (*PublicKey, error) {
+ key := new(PublicKey)
+ has, err := db.GetEngine(ctx).
+ ID(keyID).
+ Get(key)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrKeyNotExist{keyID}
+ }
+ return key, nil
+}
+
+// SearchPublicKeyByContent searches content as prefix (leak e-mail part)
+// and returns public key found.
+func SearchPublicKeyByContent(ctx context.Context, content string) (*PublicKey, error) {
+ key := new(PublicKey)
+ has, err := db.GetEngine(ctx).
+ Where("content like ?", content+"%").
+ Get(key)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrKeyNotExist{}
+ }
+ return key, nil
+}
+
+// SearchPublicKeyByContentExact searches content
+// and returns public key found.
+func SearchPublicKeyByContentExact(ctx context.Context, content string) (*PublicKey, error) {
+ key := new(PublicKey)
+ has, err := db.GetEngine(ctx).
+ Where("content = ?", content).
+ Get(key)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrKeyNotExist{}
+ }
+ return key, nil
+}
+
+type FindPublicKeyOptions struct {
+ db.ListOptions
+ OwnerID int64
+ Fingerprint string
+ KeyTypes []KeyType
+ NotKeytype KeyType
+ LoginSourceID int64
+}
+
+func (opts FindPublicKeyOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+ if opts.Fingerprint != "" {
+ cond = cond.And(builder.Eq{"fingerprint": opts.Fingerprint})
+ }
+ if len(opts.KeyTypes) > 0 {
+ cond = cond.And(builder.In("`type`", opts.KeyTypes))
+ }
+ if opts.NotKeytype > 0 {
+ cond = cond.And(builder.Neq{"`type`": opts.NotKeytype})
+ }
+ if opts.LoginSourceID > 0 {
+ cond = cond.And(builder.Eq{"login_source_id": opts.LoginSourceID})
+ }
+ return cond
+}
+
+// UpdatePublicKeyUpdated updates public key use time.
+func UpdatePublicKeyUpdated(ctx context.Context, id int64) error {
+ // Check if key exists before update as affected rows count is unreliable
+ // and will return 0 affected rows if two updates are made at the same time
+ if cnt, err := db.GetEngine(ctx).ID(id).Count(&PublicKey{}); err != nil {
+ return err
+ } else if cnt != 1 {
+ return ErrKeyNotExist{id}
+ }
+
+ _, err := db.GetEngine(ctx).ID(id).Cols("updated_unix").Update(&PublicKey{
+ UpdatedUnix: timeutil.TimeStampNow(),
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// PublicKeysAreExternallyManaged returns whether the provided KeyID represents an externally managed Key
+func PublicKeysAreExternallyManaged(ctx context.Context, keys []*PublicKey) ([]bool, error) {
+ sourceCache := make(map[int64]*auth.Source, len(keys))
+ externals := make([]bool, len(keys))
+
+ for i, key := range keys {
+ if key.LoginSourceID == 0 {
+ externals[i] = false
+ continue
+ }
+
+ source, ok := sourceCache[key.LoginSourceID]
+ if !ok {
+ var err error
+ source, err = auth.GetSourceByID(ctx, key.LoginSourceID)
+ if err != nil {
+ if auth.IsErrSourceNotExist(err) {
+ externals[i] = false
+ sourceCache[key.LoginSourceID] = &auth.Source{
+ ID: key.LoginSourceID,
+ }
+ continue
+ }
+ return nil, err
+ }
+ }
+
+ if sshKeyProvider, ok := source.Cfg.(auth.SSHKeyProvider); ok && sshKeyProvider.ProvidesSSHKeys() {
+ // Disable setting SSH keys for this user
+ externals[i] = true
+ }
+ }
+
+ return externals, nil
+}
+
+// PublicKeyIsExternallyManaged returns whether the provided KeyID represents an externally managed Key
+func PublicKeyIsExternallyManaged(ctx context.Context, id int64) (bool, error) {
+ key, err := GetPublicKeyByID(ctx, id)
+ if err != nil {
+ return false, err
+ }
+ if key.LoginSourceID == 0 {
+ return false, nil
+ }
+ source, err := auth.GetSourceByID(ctx, key.LoginSourceID)
+ if err != nil {
+ if auth.IsErrSourceNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+ if sshKeyProvider, ok := source.Cfg.(auth.SSHKeyProvider); ok && sshKeyProvider.ProvidesSSHKeys() {
+ // Disable setting SSH keys for this user
+ return true, nil
+ }
+ return false, nil
+}
+
+// deleteKeysMarkedForDeletion returns true if ssh keys needs update
+func deleteKeysMarkedForDeletion(ctx context.Context, keys []string) (bool, error) {
+ // Start session
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer committer.Close()
+
+ // Delete keys marked for deletion
+ var sshKeysNeedUpdate bool
+ for _, KeyToDelete := range keys {
+ key, err := SearchPublicKeyByContent(ctx, KeyToDelete)
+ if err != nil {
+ log.Error("SearchPublicKeyByContent: %v", err)
+ continue
+ }
+ if _, err = db.DeleteByID[PublicKey](ctx, key.ID); err != nil {
+ log.Error("DeleteByID[PublicKey]: %v", err)
+ continue
+ }
+ sshKeysNeedUpdate = true
+ }
+
+ if err := committer.Commit(); err != nil {
+ return false, err
+ }
+
+ return sshKeysNeedUpdate, nil
+}
+
+// AddPublicKeysBySource add a users public keys. Returns true if there are changes.
+func AddPublicKeysBySource(ctx context.Context, usr *user_model.User, s *auth.Source, sshPublicKeys []string) bool {
+ var sshKeysNeedUpdate bool
+ for _, sshKey := range sshPublicKeys {
+ var err error
+ found := false
+ keys := []byte(sshKey)
+ loop:
+ for len(keys) > 0 && err == nil {
+ var out ssh.PublicKey
+ // We ignore options as they are not relevant to Gitea
+ out, _, _, keys, err = ssh.ParseAuthorizedKey(keys)
+ if err != nil {
+ break loop
+ }
+ found = true
+ marshalled := string(ssh.MarshalAuthorizedKey(out))
+ marshalled = marshalled[:len(marshalled)-1]
+ sshKeyName := fmt.Sprintf("%s-%s", s.Name, ssh.FingerprintSHA256(out))
+
+ if _, err := AddPublicKey(ctx, usr.ID, sshKeyName, marshalled, s.ID); err != nil {
+ if IsErrKeyAlreadyExist(err) {
+ log.Trace("AddPublicKeysBySource[%s]: Public SSH Key %s already exists for user", sshKeyName, usr.Name)
+ } else {
+ log.Error("AddPublicKeysBySource[%s]: Error adding Public SSH Key for user %s: %v", sshKeyName, usr.Name, err)
+ }
+ } else {
+ log.Trace("AddPublicKeysBySource[%s]: Added Public SSH Key for user %s", sshKeyName, usr.Name)
+ sshKeysNeedUpdate = true
+ }
+ }
+ if !found && err != nil {
+ log.Warn("AddPublicKeysBySource[%s]: Skipping invalid Public SSH Key for user %s: %v", s.Name, usr.Name, sshKey)
+ }
+ }
+ return sshKeysNeedUpdate
+}
+
+// SynchronizePublicKeys updates a users public keys. Returns true if there are changes.
+func SynchronizePublicKeys(ctx context.Context, usr *user_model.User, s *auth.Source, sshPublicKeys []string) bool {
+ var sshKeysNeedUpdate bool
+
+ log.Trace("synchronizePublicKeys[%s]: Handling Public SSH Key synchronization for user %s", s.Name, usr.Name)
+
+ // Get Public Keys from DB with current LDAP source
+ var giteaKeys []string
+ keys, err := db.Find[PublicKey](ctx, FindPublicKeyOptions{
+ OwnerID: usr.ID,
+ LoginSourceID: s.ID,
+ })
+ if err != nil {
+ log.Error("synchronizePublicKeys[%s]: Error listing Public SSH Keys for user %s: %v", s.Name, usr.Name, err)
+ }
+
+ for _, v := range keys {
+ giteaKeys = append(giteaKeys, v.OmitEmail())
+ }
+
+ // Process the provided keys to remove duplicates and name part
+ var providedKeys []string
+ for _, v := range sshPublicKeys {
+ sshKeySplit := strings.Split(v, " ")
+ if len(sshKeySplit) > 1 {
+ key := strings.Join(sshKeySplit[:2], " ")
+ if !util.SliceContainsString(providedKeys, key) {
+ providedKeys = append(providedKeys, key)
+ }
+ }
+ }
+
+ // Check if Public Key sync is needed
+ if util.SliceSortedEqual(giteaKeys, providedKeys) {
+ log.Trace("synchronizePublicKeys[%s]: Public Keys are already in sync for %s (Source:%v/DB:%v)", s.Name, usr.Name, len(providedKeys), len(giteaKeys))
+ return false
+ }
+ log.Trace("synchronizePublicKeys[%s]: Public Key needs update for user %s (Source:%v/DB:%v)", s.Name, usr.Name, len(providedKeys), len(giteaKeys))
+
+ // Add new Public SSH Keys that doesn't already exist in DB
+ var newKeys []string
+ for _, key := range providedKeys {
+ if !util.SliceContainsString(giteaKeys, key) {
+ newKeys = append(newKeys, key)
+ }
+ }
+ if AddPublicKeysBySource(ctx, usr, s, newKeys) {
+ sshKeysNeedUpdate = true
+ }
+
+ // Mark keys from DB that no longer exist in the source for deletion
+ var giteaKeysToDelete []string
+ for _, giteaKey := range giteaKeys {
+ if !util.SliceContainsString(providedKeys, giteaKey) {
+ log.Trace("synchronizePublicKeys[%s]: Marking Public SSH Key for deletion for user %s: %v", s.Name, usr.Name, giteaKey)
+ giteaKeysToDelete = append(giteaKeysToDelete, giteaKey)
+ }
+ }
+
+ // Delete keys from DB that no longer exist in the source
+ needUpd, err := deleteKeysMarkedForDeletion(ctx, giteaKeysToDelete)
+ if err != nil {
+ log.Error("synchronizePublicKeys[%s]: Error deleting Public Keys marked for deletion for user %s: %v", s.Name, usr.Name, err)
+ }
+ if needUpd {
+ sshKeysNeedUpdate = true
+ }
+
+ return sshKeysNeedUpdate
+}
diff --git a/models/asymkey/ssh_key_authorized_keys.go b/models/asymkey/ssh_key_authorized_keys.go
new file mode 100644
index 0000000..d3f9f3f
--- /dev/null
+++ b/models/asymkey/ssh_key_authorized_keys.go
@@ -0,0 +1,220 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// _____ __ .__ .__ .___
+// / _ \ __ ___/ |_| |__ ___________|__|_______ ____ __| _/
+// / /_\ \| | \ __\ | \ / _ \_ __ \ \___ // __ \ / __ |
+// / | \ | /| | | Y ( <_> ) | \/ |/ /\ ___// /_/ |
+// \____|__ /____/ |__| |___| /\____/|__| |__/_____ \\___ >____ |
+// \/ \/ \/ \/ \/
+// ____ __.
+// | |/ _|____ ___.__. ______
+// | <_/ __ < | |/ ___/
+// | | \ ___/\___ |\___ \
+// |____|__ \___ > ____/____ >
+// \/ \/\/ \/
+//
+// This file contains functions for creating authorized_keys files
+//
+// There is a dependence on the database within RegeneratePublicKeys however most of these functions probably belong in a module
+
+const (
+ tplCommentPrefix = `# gitea public key`
+ tplPublicKey = tplCommentPrefix + "\n" + `command=%s,no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty,no-user-rc,restrict %s` + "\n"
+)
+
+var sshOpLocker sync.Mutex
+
+// AuthorizedStringForKey creates the authorized keys string appropriate for the provided key
+func AuthorizedStringForKey(key *PublicKey) string {
+ sb := &strings.Builder{}
+ _ = setting.SSH.AuthorizedKeysCommandTemplateTemplate.Execute(sb, map[string]any{
+ "AppPath": util.ShellEscape(setting.AppPath),
+ "AppWorkPath": util.ShellEscape(setting.AppWorkPath),
+ "CustomConf": util.ShellEscape(setting.CustomConf),
+ "CustomPath": util.ShellEscape(setting.CustomPath),
+ "Key": key,
+ })
+
+ return fmt.Sprintf(tplPublicKey, util.ShellEscape(sb.String()), key.Content)
+}
+
+// appendAuthorizedKeysToFile appends new SSH keys' content to authorized_keys file.
+func appendAuthorizedKeysToFile(keys ...*PublicKey) error {
+ // Don't need to rewrite this file if builtin SSH server is enabled.
+ if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedKeysFile {
+ return nil
+ }
+
+ sshOpLocker.Lock()
+ defer sshOpLocker.Unlock()
+
+ if setting.SSH.RootPath != "" {
+ // First of ensure that the RootPath is present, and if not make it with 0700 permissions
+ // This of course doesn't guarantee that this is the right directory for authorized_keys
+ // but at least if it's supposed to be this directory and it doesn't exist and we're the
+ // right user it will at least be created properly.
+ err := os.MkdirAll(setting.SSH.RootPath, 0o700)
+ if err != nil {
+ log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
+ return err
+ }
+ }
+
+ fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
+ f, err := os.OpenFile(fPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Note: chmod command does not support in Windows.
+ if !setting.IsWindows {
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+
+ // .ssh directory should have mode 700, and authorized_keys file should have mode 600.
+ if fi.Mode().Perm() > 0o600 {
+ log.Error("authorized_keys file has unusual permission flags: %s - setting to -rw-------", fi.Mode().Perm().String())
+ if err = f.Chmod(0o600); err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, key := range keys {
+ if key.Type == KeyTypePrincipal {
+ continue
+ }
+ if _, err = f.WriteString(key.AuthorizedString()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// RewriteAllPublicKeys removes any authorized key and rewrite all keys from database again.
+// Note: db.GetEngine(ctx).Iterate does not get latest data after insert/delete, so we have to call this function
+// outside any session scope independently.
+func RewriteAllPublicKeys(ctx context.Context) error {
+ // Don't rewrite key if internal server
+ if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedKeysFile {
+ return nil
+ }
+
+ sshOpLocker.Lock()
+ defer sshOpLocker.Unlock()
+
+ if setting.SSH.RootPath != "" {
+ // First of ensure that the RootPath is present, and if not make it with 0700 permissions
+ // This of course doesn't guarantee that this is the right directory for authorized_keys
+ // but at least if it's supposed to be this directory and it doesn't exist and we're the
+ // right user it will at least be created properly.
+ err := os.MkdirAll(setting.SSH.RootPath, 0o700)
+ if err != nil {
+ log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
+ return err
+ }
+ }
+
+ fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
+ tmpPath := fPath + ".tmp"
+ t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ t.Close()
+ if err := util.Remove(tmpPath); err != nil {
+ log.Warn("Unable to remove temporary authorized keys file: %s: Error: %v", tmpPath, err)
+ }
+ }()
+
+ if setting.SSH.AuthorizedKeysBackup {
+ isExist, err := util.IsExist(fPath)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", fPath, err)
+ return err
+ }
+ if isExist {
+ bakPath := fmt.Sprintf("%s_%d.gitea_bak", fPath, time.Now().Unix())
+ if err = util.CopyFile(fPath, bakPath); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := RegeneratePublicKeys(ctx, t); err != nil {
+ return err
+ }
+
+ if err := t.Sync(); err != nil {
+ return err
+ }
+ if err := t.Close(); err != nil {
+ return err
+ }
+ return util.Rename(tmpPath, fPath)
+}
+
+// RegeneratePublicKeys regenerates the authorized_keys file
+func RegeneratePublicKeys(ctx context.Context, t io.StringWriter) error {
+ if err := db.GetEngine(ctx).Where("type != ?", KeyTypePrincipal).Iterate(new(PublicKey), func(idx int, bean any) (err error) {
+ _, err = t.WriteString((bean.(*PublicKey)).AuthorizedString())
+ return err
+ }); err != nil {
+ return err
+ }
+
+ fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
+ isExist, err := util.IsExist(fPath)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", fPath, err)
+ return err
+ }
+ if isExist {
+ f, err := os.Open(fPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, tplCommentPrefix) {
+ scanner.Scan()
+ continue
+ }
+ _, err = t.WriteString(line + "\n")
+ if err != nil {
+ return err
+ }
+ }
+ if err = scanner.Err(); err != nil {
+ return fmt.Errorf("RegeneratePublicKeys scan: %w", err)
+ }
+ }
+ return nil
+}
diff --git a/models/asymkey/ssh_key_authorized_principals.go b/models/asymkey/ssh_key_authorized_principals.go
new file mode 100644
index 0000000..f85de12
--- /dev/null
+++ b/models/asymkey/ssh_key_authorized_principals.go
@@ -0,0 +1,142 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// _____ __ .__ .__ .___
+// / _ \ __ ___/ |_| |__ ___________|__|_______ ____ __| _/
+// / /_\ \| | \ __\ | \ / _ \_ __ \ \___ // __ \ / __ |
+// / | \ | /| | | Y ( <_> ) | \/ |/ /\ ___// /_/ |
+// \____|__ /____/ |__| |___| /\____/|__| |__/_____ \\___ >____ |
+// \/ \/ \/ \/ \/
+// __________ .__ .__ .__
+// \______ _______|__| ____ ____ |_____________ | | ______
+// | ___\_ __ | |/ \_/ ___\| \____ \__ \ | | / ___/
+// | | | | \| | | \ \___| | |_> / __ \| |__\___ \
+// |____| |__| |__|___| /\___ |__| __(____ |____/____ >
+// \/ \/ |__| \/ \/
+//
+// This file contains functions for creating authorized_principals files
+//
+// There is a dependence on the database within RewriteAllPrincipalKeys & RegeneratePrincipalKeys
+// The sshOpLocker is used from ssh_key_authorized_keys.go
+
+const authorizedPrincipalsFile = "authorized_principals"
+
+// RewriteAllPrincipalKeys removes any authorized principal and rewrite all keys from database again.
+// Note: db.GetEngine(ctx).Iterate does not get latest data after insert/delete, so we have to call this function
+// outside any session scope independently.
+func RewriteAllPrincipalKeys(ctx context.Context) error {
+ // Don't rewrite key if internal server
+ if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedPrincipalsFile {
+ return nil
+ }
+
+ sshOpLocker.Lock()
+ defer sshOpLocker.Unlock()
+
+ if setting.SSH.RootPath != "" {
+ // First of ensure that the RootPath is present, and if not make it with 0700 permissions
+ // This of course doesn't guarantee that this is the right directory for authorized_keys
+ // but at least if it's supposed to be this directory and it doesn't exist and we're the
+ // right user it will at least be created properly.
+ err := os.MkdirAll(setting.SSH.RootPath, 0o700)
+ if err != nil {
+ log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
+ return err
+ }
+ }
+
+ fPath := filepath.Join(setting.SSH.RootPath, authorizedPrincipalsFile)
+ tmpPath := fPath + ".tmp"
+ t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ t.Close()
+ os.Remove(tmpPath)
+ }()
+
+ if setting.SSH.AuthorizedPrincipalsBackup {
+ isExist, err := util.IsExist(fPath)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", fPath, err)
+ return err
+ }
+ if isExist {
+ bakPath := fmt.Sprintf("%s_%d.gitea_bak", fPath, time.Now().Unix())
+ if err = util.CopyFile(fPath, bakPath); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := regeneratePrincipalKeys(ctx, t); err != nil {
+ return err
+ }
+
+ if err := t.Sync(); err != nil {
+ return err
+ }
+ if err := t.Close(); err != nil {
+ return err
+ }
+ return util.Rename(tmpPath, fPath)
+}
+
+func regeneratePrincipalKeys(ctx context.Context, t io.StringWriter) error {
+ if err := db.GetEngine(ctx).Where("type = ?", KeyTypePrincipal).Iterate(new(PublicKey), func(idx int, bean any) (err error) {
+ _, err = t.WriteString((bean.(*PublicKey)).AuthorizedString())
+ return err
+ }); err != nil {
+ return err
+ }
+
+ fPath := filepath.Join(setting.SSH.RootPath, authorizedPrincipalsFile)
+ isExist, err := util.IsExist(fPath)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", fPath, err)
+ return err
+ }
+ if isExist {
+ f, err := os.Open(fPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, tplCommentPrefix) {
+ scanner.Scan()
+ continue
+ }
+ _, err = t.WriteString(line + "\n")
+ if err != nil {
+ return err
+ }
+ }
+ if err = scanner.Err(); err != nil {
+ return fmt.Errorf("regeneratePrincipalKeys scan: %w", err)
+ }
+ }
+ return nil
+}
diff --git a/models/asymkey/ssh_key_deploy.go b/models/asymkey/ssh_key_deploy.go
new file mode 100644
index 0000000..923c502
--- /dev/null
+++ b/models/asymkey/ssh_key_deploy.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+// ________ .__ ____ __.
+// \______ \ ____ ______ | | ____ ___.__.| |/ _|____ ___.__.
+// | | \_/ __ \\____ \| | / _ < | || <_/ __ < | |
+// | ` \ ___/| |_> > |_( <_> )___ || | \ ___/\___ |
+// /_______ /\___ > __/|____/\____// ____||____|__ \___ > ____|
+// \/ \/|__| \/ \/ \/\/
+//
+// This file contains functions specific to DeployKeys
+
+// DeployKey represents deploy key information and its relation with repository.
+type DeployKey struct {
+ ID int64 `xorm:"pk autoincr"`
+ KeyID int64 `xorm:"UNIQUE(s) INDEX"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX"`
+ Name string
+ Fingerprint string
+ Content string `xorm:"-"`
+
+ Mode perm.AccessMode `xorm:"NOT NULL DEFAULT 1"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ HasRecentActivity bool `xorm:"-"`
+ HasUsed bool `xorm:"-"`
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (key *DeployKey) AfterLoad() {
+ key.HasUsed = key.UpdatedUnix > key.CreatedUnix
+ key.HasRecentActivity = key.UpdatedUnix.AddDuration(7*24*time.Hour) > timeutil.TimeStampNow()
+}
+
+// GetContent gets associated public key content.
+func (key *DeployKey) GetContent(ctx context.Context) error {
+ pkey, err := GetPublicKeyByID(ctx, key.KeyID)
+ if err != nil {
+ return err
+ }
+ key.Content = pkey.Content
+ return nil
+}
+
+// IsReadOnly checks if the key can only be used for read operations, used by template
+func (key *DeployKey) IsReadOnly() bool {
+ return key.Mode == perm.AccessModeRead
+}
+
+func init() {
+ db.RegisterModel(new(DeployKey))
+}
+
+func checkDeployKey(ctx context.Context, keyID, repoID int64, name string) error {
+ // Note: We want error detail, not just true or false here.
+ has, err := db.GetEngine(ctx).
+ Where("key_id = ? AND repo_id = ?", keyID, repoID).
+ Get(new(DeployKey))
+ if err != nil {
+ return err
+ } else if has {
+ return ErrDeployKeyAlreadyExist{keyID, repoID}
+ }
+
+ has, err = db.GetEngine(ctx).
+ Where("repo_id = ? AND name = ?", repoID, name).
+ Get(new(DeployKey))
+ if err != nil {
+ return err
+ } else if has {
+ return ErrDeployKeyNameAlreadyUsed{repoID, name}
+ }
+
+ return nil
+}
+
+// addDeployKey adds new key-repo relation.
+func addDeployKey(ctx context.Context, keyID, repoID int64, name, fingerprint string, mode perm.AccessMode) (*DeployKey, error) {
+ if err := checkDeployKey(ctx, keyID, repoID, name); err != nil {
+ return nil, err
+ }
+
+ key := &DeployKey{
+ KeyID: keyID,
+ RepoID: repoID,
+ Name: name,
+ Fingerprint: fingerprint,
+ Mode: mode,
+ }
+ return key, db.Insert(ctx, key)
+}
+
+// HasDeployKey returns true if public key is a deploy key of given repository.
+func HasDeployKey(ctx context.Context, keyID, repoID int64) bool {
+ has, _ := db.GetEngine(ctx).
+ Where("key_id = ? AND repo_id = ?", keyID, repoID).
+ Get(new(DeployKey))
+ return has
+}
+
+// AddDeployKey add new deploy key to database and authorized_keys file.
+func AddDeployKey(ctx context.Context, repoID int64, name, content string, readOnly bool) (*DeployKey, error) {
+ fingerprint, err := CalcFingerprint(content)
+ if err != nil {
+ return nil, err
+ }
+
+ accessMode := perm.AccessModeRead
+ if !readOnly {
+ accessMode = perm.AccessModeWrite
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ pkey, exist, err := db.Get[PublicKey](ctx, builder.Eq{"fingerprint": fingerprint})
+ if err != nil {
+ return nil, err
+ } else if exist {
+ if pkey.Type != KeyTypeDeploy {
+ return nil, ErrKeyAlreadyExist{0, fingerprint, ""}
+ }
+ } else {
+ // First time use this deploy key.
+ pkey = &PublicKey{
+ Fingerprint: fingerprint,
+ Mode: accessMode,
+ Type: KeyTypeDeploy,
+ Content: content,
+ Name: name,
+ }
+ if err = addKey(ctx, pkey); err != nil {
+ return nil, fmt.Errorf("addKey: %w", err)
+ }
+ }
+
+ key, err := addDeployKey(ctx, pkey.ID, repoID, name, pkey.Fingerprint, accessMode)
+ if err != nil {
+ return nil, err
+ }
+
+ return key, committer.Commit()
+}
+
+// GetDeployKeyByID returns deploy key by given ID.
+func GetDeployKeyByID(ctx context.Context, id int64) (*DeployKey, error) {
+ key, exist, err := db.GetByID[DeployKey](ctx, id)
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrDeployKeyNotExist{id, 0, 0}
+ }
+ return key, nil
+}
+
+// GetDeployKeyByRepo returns deploy key by given public key ID and repository ID.
+func GetDeployKeyByRepo(ctx context.Context, keyID, repoID int64) (*DeployKey, error) {
+ key, exist, err := db.Get[DeployKey](ctx, builder.Eq{"key_id": keyID, "repo_id": repoID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrDeployKeyNotExist{0, keyID, repoID}
+ }
+ return key, nil
+}
+
+// IsDeployKeyExistByKeyID return true if there is at least one deploykey with the key id
+func IsDeployKeyExistByKeyID(ctx context.Context, keyID int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("key_id = ?", keyID).
+ Get(new(DeployKey))
+}
+
+// UpdateDeployKeyCols updates deploy key information in the specified columns.
+func UpdateDeployKeyCols(ctx context.Context, key *DeployKey, cols ...string) error {
+ _, err := db.GetEngine(ctx).ID(key.ID).Cols(cols...).Update(key)
+ return err
+}
+
+// ListDeployKeysOptions are options for ListDeployKeys
+type ListDeployKeysOptions struct {
+ db.ListOptions
+ RepoID int64
+ KeyID int64
+ Fingerprint string
+}
+
+func (opt ListDeployKeysOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opt.RepoID != 0 {
+ cond = cond.And(builder.Eq{"repo_id": opt.RepoID})
+ }
+ if opt.KeyID != 0 {
+ cond = cond.And(builder.Eq{"key_id": opt.KeyID})
+ }
+ if opt.Fingerprint != "" {
+ cond = cond.And(builder.Eq{"fingerprint": opt.Fingerprint})
+ }
+ return cond
+}
diff --git a/models/asymkey/ssh_key_fingerprint.go b/models/asymkey/ssh_key_fingerprint.go
new file mode 100644
index 0000000..1ed3b5d
--- /dev/null
+++ b/models/asymkey/ssh_key_fingerprint.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/process"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "golang.org/x/crypto/ssh"
+ "xorm.io/builder"
+)
+
+// ___________.__ .__ __
+// \_ _____/|__| ____ ____ ________________________|__| _____/ |_
+// | __) | |/ \ / ___\_/ __ \_ __ \____ \_ __ \ |/ \ __\
+// | \ | | | \/ /_/ > ___/| | \/ |_> > | \/ | | \ |
+// \___ / |__|___| /\___ / \___ >__| | __/|__| |__|___| /__|
+// \/ \//_____/ \/ |__| \/
+//
+// This file contains functions for fingerprinting SSH keys
+//
+// The database is used in checkKeyFingerprint however most of these functions probably belong in a module
+
+// checkKeyFingerprint only checks if key fingerprint has been used as public key,
+// it is OK to use same key as deploy key for multiple repositories/users.
+func checkKeyFingerprint(ctx context.Context, fingerprint string) error {
+ has, err := db.Exist[PublicKey](ctx, builder.Eq{"fingerprint": fingerprint})
+ if err != nil {
+ return err
+ } else if has {
+ return ErrKeyAlreadyExist{0, fingerprint, ""}
+ }
+ return nil
+}
+
+func calcFingerprintSSHKeygen(publicKeyContent string) (string, error) {
+ // Calculate fingerprint.
+ tmpPath, err := writeTmpKeyFile(publicKeyContent)
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ if err := util.Remove(tmpPath); err != nil {
+ log.Warn("Unable to remove temporary key file: %s: Error: %v", tmpPath, err)
+ }
+ }()
+ stdout, stderr, err := process.GetManager().Exec("AddPublicKey", "ssh-keygen", "-lf", tmpPath)
+ if err != nil {
+ if strings.Contains(stderr, "is not a public key file") {
+ return "", ErrKeyUnableVerify{stderr}
+ }
+ return "", util.NewInvalidArgumentErrorf("'ssh-keygen -lf %s' failed with error '%s': %s", tmpPath, err, stderr)
+ } else if len(stdout) < 2 {
+ return "", util.NewInvalidArgumentErrorf("not enough output for calculating fingerprint: %s", stdout)
+ }
+ return strings.Split(stdout, " ")[1], nil
+}
+
+func calcFingerprintNative(publicKeyContent string) (string, error) {
+ // Calculate fingerprint.
+ pk, _, _, _, err := ssh.ParseAuthorizedKey([]byte(publicKeyContent))
+ if err != nil {
+ return "", err
+ }
+ return ssh.FingerprintSHA256(pk), nil
+}
+
+// CalcFingerprint calculate public key's fingerprint
+func CalcFingerprint(publicKeyContent string) (string, error) {
+ // Call the method based on configuration
+ useNative := setting.SSH.KeygenPath == ""
+ calcFn := util.Iif(useNative, calcFingerprintNative, calcFingerprintSSHKeygen)
+ fp, err := calcFn(publicKeyContent)
+ if err != nil {
+ if IsErrKeyUnableVerify(err) {
+ return "", err
+ }
+ return "", fmt.Errorf("CalcFingerprint(%s): %w", util.Iif(useNative, "native", "ssh-keygen"), err)
+ }
+ return fp, nil
+}
diff --git a/models/asymkey/ssh_key_object_verification.go b/models/asymkey/ssh_key_object_verification.go
new file mode 100644
index 0000000..5ad6fdb
--- /dev/null
+++ b/models/asymkey/ssh_key_object_verification.go
@@ -0,0 +1,85 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+
+ "github.com/42wim/sshsig"
+)
+
+// ParseObjectWithSSHSignature check if signature is good against keystore.
+func ParseObjectWithSSHSignature(ctx context.Context, c *GitObject, committer *user_model.User) *ObjectVerification {
+ // Now try to associate the signature with the committer, if present
+ if committer.ID != 0 {
+ keys, err := db.Find[PublicKey](ctx, FindPublicKeyOptions{
+ OwnerID: committer.ID,
+ NotKeytype: KeyTypePrincipal,
+ })
+ if err != nil { // Skipping failed to get ssh keys of user
+ log.Error("ListPublicKeys: %v", err)
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: "gpg.error.failed_retrieval_gpg_keys",
+ }
+ }
+
+ committerEmailAddresses, err := user_model.GetEmailAddresses(ctx, committer.ID)
+ if err != nil {
+ log.Error("GetEmailAddresses: %v", err)
+ }
+
+ // Add the noreply email address as verified address.
+ committerEmailAddresses = append(committerEmailAddresses, &user_model.EmailAddress{
+ IsActivated: true,
+ Email: committer.GetPlaceholderEmail(),
+ })
+
+ activated := false
+ for _, e := range committerEmailAddresses {
+ if e.IsActivated && strings.EqualFold(e.Email, c.Committer.Email) {
+ activated = true
+ break
+ }
+ }
+
+ for _, k := range keys {
+ if k.Verified && activated {
+ commitVerification := verifySSHObjectVerification(c.Signature.Signature, c.Signature.Payload, k, committer, committer, c.Committer.Email)
+ if commitVerification != nil {
+ return commitVerification
+ }
+ }
+ }
+ }
+
+ return &ObjectVerification{
+ CommittingUser: committer,
+ Verified: false,
+ Reason: NoKeyFound,
+ }
+}
+
+func verifySSHObjectVerification(sig, payload string, k *PublicKey, committer, signer *user_model.User, email string) *ObjectVerification {
+ if err := sshsig.Verify(bytes.NewBuffer([]byte(payload)), []byte(sig), []byte(k.Content), "git"); err != nil {
+ return nil
+ }
+
+ return &ObjectVerification{ // Everything is ok
+ CommittingUser: committer,
+ Verified: true,
+ Reason: fmt.Sprintf("%s / %s", signer.Name, k.Fingerprint),
+ SigningUser: signer,
+ SigningSSHKey: k,
+ SigningEmail: email,
+ }
+}
diff --git a/models/asymkey/ssh_key_object_verification_test.go b/models/asymkey/ssh_key_object_verification_test.go
new file mode 100644
index 0000000..0d5ebab
--- /dev/null
+++ b/models/asymkey/ssh_key_object_verification_test.go
@@ -0,0 +1,153 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseCommitWithSSHSignature(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ sshKey := unittest.AssertExistsAndLoadBean(t, &PublicKey{ID: 1000, OwnerID: 2})
+
+ t.Run("No commiter", func(t *testing.T) {
+ o := commitToGitObject(&git.Commit{})
+ commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, &user_model.User{})
+ assert.False(t, commitVerification.Verified)
+ assert.Equal(t, NoKeyFound, commitVerification.Reason)
+ })
+
+ t.Run("Commiter without keys", func(t *testing.T) {
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ o := commitToGitObject(&git.Commit{Committer: &git.Signature{Email: user.Email}})
+ commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, user)
+ assert.False(t, commitVerification.Verified)
+ assert.Equal(t, NoKeyFound, commitVerification.Reason)
+ })
+
+ t.Run("Correct signature with wrong email", func(t *testing.T) {
+ gitCommit := &git.Commit{
+ Committer: &git.Signature{
+ Email: "non-existent",
+ },
+ Signature: &git.ObjectSignature{
+ Payload: `tree 2d491b2985a7ff848d5c02748e7ea9f9f7619f9f
+parent 45b03601635a1f463b81963a4022c7f87ce96ef9
+author user2 <non-existent> 1699710556 +0100
+committer user2 <non-existent> 1699710556 +0100
+
+Using email that isn't known to Forgejo
+`,
+ Signature: `-----BEGIN SSH SIGNATURE-----
+U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgoGSe9Zy7Ez9bSJcaTNjh/Y7p95
+f5DujjqkpzFRtw6CEAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
+AAAAQIMufOuSjZeDUujrkVK4sl7ICa0WwEftas8UAYxx0Thdkiw2qWjR1U1PKfTLm16/w8
+/bS1LX1lZNuzm2LR2qEgw=
+-----END SSH SIGNATURE-----
+`,
+ },
+ }
+ o := commitToGitObject(gitCommit)
+ commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, user2)
+ assert.False(t, commitVerification.Verified)
+ assert.Equal(t, NoKeyFound, commitVerification.Reason)
+ })
+
+ t.Run("Incorrect signature with correct email", func(t *testing.T) {
+ gitCommit := &git.Commit{
+ Committer: &git.Signature{
+ Email: "user2@example.com",
+ },
+ Signature: &git.ObjectSignature{
+ Payload: `tree 853694aae8816094a0d875fee7ea26278dbf5d0f
+parent c2780d5c313da2a947eae22efd7dacf4213f4e7f
+author user2 <user2@example.com> 1699707877 +0100
+committer user2 <user2@example.com> 1699707877 +0100
+
+Add content
+`,
+ Signature: `-----BEGIN SSH SIGNATURE-----`,
+ },
+ }
+
+ o := commitToGitObject(gitCommit)
+ commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, user2)
+ assert.False(t, commitVerification.Verified)
+ assert.Equal(t, NoKeyFound, commitVerification.Reason)
+ })
+
+ t.Run("Valid signature with correct email", func(t *testing.T) {
+ gitCommit := &git.Commit{
+ Committer: &git.Signature{
+ Email: "user2@example.com",
+ },
+ Signature: &git.ObjectSignature{
+ Payload: `tree 853694aae8816094a0d875fee7ea26278dbf5d0f
+parent c2780d5c313da2a947eae22efd7dacf4213f4e7f
+author user2 <user2@example.com> 1699707877 +0100
+committer user2 <user2@example.com> 1699707877 +0100
+
+Add content
+`,
+ Signature: `-----BEGIN SSH SIGNATURE-----
+U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgoGSe9Zy7Ez9bSJcaTNjh/Y7p95
+f5DujjqkpzFRtw6CEAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
+AAAAQBe2Fwk/FKY3SBCnG6jSYcO6ucyahp2SpQ/0P+otslzIHpWNW8cQ0fGLdhhaFynJXQ
+fs9cMpZVM9BfIKNUSO8QY=
+-----END SSH SIGNATURE-----
+`,
+ },
+ }
+
+ o := commitToGitObject(gitCommit)
+ commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, user2)
+ assert.True(t, commitVerification.Verified)
+ assert.Equal(t, "user2 / SHA256:TKfwbZMR7e9OnlV2l1prfah1TXH8CmqR0PvFEXVCXA4", commitVerification.Reason)
+ assert.Equal(t, sshKey, commitVerification.SigningSSHKey)
+ })
+
+ t.Run("Valid signature with noreply email", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Service.NoReplyAddress, "noreply.example.com")()
+
+ gitCommit := &git.Commit{
+ Committer: &git.Signature{
+ Email: "user2@noreply.example.com",
+ },
+ Signature: &git.ObjectSignature{
+ Payload: `tree 4836c7f639f37388bab4050ef5c97bbbd54272fc
+parent 795be1b0117ea5c65456050bb9fd84744d4fd9c6
+author user2 <user2@noreply.example.com> 1699709594 +0100
+committer user2 <user2@noreply.example.com> 1699709594 +0100
+
+Commit with noreply
+`,
+ Signature: `-----BEGIN SSH SIGNATURE-----
+U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgoGSe9Zy7Ez9bSJcaTNjh/Y7p95
+f5DujjqkpzFRtw6CEAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
+AAAAQJz83KKxD6Bz/ZvNpqkA3RPOSQ4LQ5FfEItbtoONkbwV9wAWMnmBqgggo/lnXCJ3oq
+muPLbvEduU+Ze/1Ol1pgk=
+-----END SSH SIGNATURE-----
+`,
+ },
+ }
+
+ o := commitToGitObject(gitCommit)
+ commitVerification := ParseObjectWithSSHSignature(db.DefaultContext, &o, user2)
+ assert.True(t, commitVerification.Verified)
+ assert.Equal(t, "user2 / SHA256:TKfwbZMR7e9OnlV2l1prfah1TXH8CmqR0PvFEXVCXA4", commitVerification.Reason)
+ assert.Equal(t, sshKey, commitVerification.SigningSSHKey)
+ })
+}
diff --git a/models/asymkey/ssh_key_parse.go b/models/asymkey/ssh_key_parse.go
new file mode 100644
index 0000000..94b1cf1
--- /dev/null
+++ b/models/asymkey/ssh_key_parse.go
@@ -0,0 +1,312 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/pem"
+ "fmt"
+ "math/big"
+ "os"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/process"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// ____ __. __________
+// | |/ _|____ ___.__. \______ \_____ _______ ______ ___________
+// | <_/ __ < | | | ___/\__ \\_ __ \/ ___// __ \_ __ \
+// | | \ ___/\___ | | | / __ \| | \/\___ \\ ___/| | \/
+// |____|__ \___ > ____| |____| (____ /__| /____ >\___ >__|
+// \/ \/\/ \/ \/ \/
+//
+// This file contains functions for parsing ssh-keys
+//
+// TODO: Consider if these functions belong in models - no other models function call them or are called by them
+// They may belong in a service or a module
+
+const ssh2keyStart = "---- BEGIN SSH2 PUBLIC KEY ----"
+
+func extractTypeFromBase64Key(key string) (string, error) {
+ b, err := base64.StdEncoding.DecodeString(key)
+ if err != nil || len(b) < 4 {
+ return "", fmt.Errorf("invalid key format: %w", err)
+ }
+
+ keyLength := int(binary.BigEndian.Uint32(b))
+ if len(b) < 4+keyLength {
+ return "", fmt.Errorf("invalid key format: not enough length %d", keyLength)
+ }
+
+ return string(b[4 : 4+keyLength]), nil
+}
+
+// parseKeyString parses any key string in OpenSSH or SSH2 format to clean OpenSSH string (RFC4253).
+func parseKeyString(content string) (string, error) {
+ // remove whitespace at start and end
+ content = strings.TrimSpace(content)
+
+ var keyType, keyContent, keyComment string
+
+ if strings.HasPrefix(content, ssh2keyStart) {
+ // Parse SSH2 file format.
+
+ // Transform all legal line endings to a single "\n".
+ content = strings.NewReplacer("\r\n", "\n", "\r", "\n").Replace(content)
+
+ lines := strings.Split(content, "\n")
+ continuationLine := false
+
+ for _, line := range lines {
+ // Skip lines that:
+ // 1) are a continuation of the previous line,
+ // 2) contain ":" as that are comment lines
+ // 3) contain "-" as that are begin and end tags
+ if continuationLine || strings.ContainsAny(line, ":-") {
+ continuationLine = strings.HasSuffix(line, "\\")
+ } else {
+ keyContent += line
+ }
+ }
+
+ t, err := extractTypeFromBase64Key(keyContent)
+ if err != nil {
+ return "", fmt.Errorf("extractTypeFromBase64Key: %w", err)
+ }
+ keyType = t
+ } else {
+ if strings.Contains(content, "-----BEGIN") {
+ // Convert PEM Keys to OpenSSH format
+ // Transform all legal line endings to a single "\n".
+ content = strings.NewReplacer("\r\n", "\n", "\r", "\n").Replace(content)
+
+ block, _ := pem.Decode([]byte(content))
+ if block == nil {
+ return "", fmt.Errorf("failed to parse PEM block containing the public key")
+ }
+ if strings.Contains(block.Type, "PRIVATE") {
+ return "", ErrKeyIsPrivate
+ }
+
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ var pk rsa.PublicKey
+ _, err2 := asn1.Unmarshal(block.Bytes, &pk)
+ if err2 != nil {
+ return "", fmt.Errorf("failed to parse DER encoded public key as either PKIX or PEM RSA Key: %v %w", err, err2)
+ }
+ pub = &pk
+ }
+
+ sshKey, err := ssh.NewPublicKey(pub)
+ if err != nil {
+ return "", fmt.Errorf("unable to convert to ssh public key: %w", err)
+ }
+ content = string(ssh.MarshalAuthorizedKey(sshKey))
+ }
+ // Parse OpenSSH format.
+
+ // Remove all newlines
+ content = strings.NewReplacer("\r\n", "", "\n", "").Replace(content)
+
+ parts := strings.SplitN(content, " ", 3)
+ switch len(parts) {
+ case 0:
+ return "", util.NewInvalidArgumentErrorf("empty key")
+ case 1:
+ keyContent = parts[0]
+ case 2:
+ keyType = parts[0]
+ keyContent = parts[1]
+ default:
+ keyType = parts[0]
+ keyContent = parts[1]
+ keyComment = parts[2]
+ }
+
+ // If keyType is not given, extract it from content. If given, validate it.
+ t, err := extractTypeFromBase64Key(keyContent)
+ if err != nil {
+ return "", fmt.Errorf("extractTypeFromBase64Key: %w", err)
+ }
+ if len(keyType) == 0 {
+ keyType = t
+ } else if keyType != t {
+ return "", fmt.Errorf("key type and content does not match: %s - %s", keyType, t)
+ }
+ }
+ // Finally we need to check whether we can actually read the proposed key:
+ _, _, _, _, err := ssh.ParseAuthorizedKey([]byte(keyType + " " + keyContent + " " + keyComment))
+ if err != nil {
+ return "", fmt.Errorf("invalid ssh public key: %w", err)
+ }
+ return keyType + " " + keyContent + " " + keyComment, nil
+}
+
+// CheckPublicKeyString checks if the given public key string is recognized by SSH.
+// It returns the actual public key line on success.
+func CheckPublicKeyString(content string) (_ string, err error) {
+ content, err = parseKeyString(content)
+ if err != nil {
+ return "", err
+ }
+
+ content = strings.TrimRight(content, "\n\r")
+ if strings.ContainsAny(content, "\n\r") {
+ return "", util.NewInvalidArgumentErrorf("only a single line with a single key please")
+ }
+
+ // remove any unnecessary whitespace now
+ content = strings.TrimSpace(content)
+
+ if !setting.SSH.MinimumKeySizeCheck {
+ return content, nil
+ }
+
+ var (
+ fnName string
+ keyType string
+ length int
+ )
+ if len(setting.SSH.KeygenPath) == 0 {
+ fnName = "SSHNativeParsePublicKey"
+ keyType, length, err = SSHNativeParsePublicKey(content)
+ } else {
+ fnName = "SSHKeyGenParsePublicKey"
+ keyType, length, err = SSHKeyGenParsePublicKey(content)
+ }
+ if err != nil {
+ return "", fmt.Errorf("%s: %w", fnName, err)
+ }
+ log.Trace("Key info [native: %v]: %s-%d", setting.SSH.StartBuiltinServer, keyType, length)
+
+ if minLen, found := setting.SSH.MinimumKeySizes[keyType]; found && length >= minLen {
+ return content, nil
+ } else if found && length < minLen {
+ return "", fmt.Errorf("key length is not enough: got %d, needs %d", length, minLen)
+ }
+ return "", fmt.Errorf("key type is not allowed: %s", keyType)
+}
+
+// SSHNativeParsePublicKey extracts the key type and length using the golang SSH library.
+func SSHNativeParsePublicKey(keyLine string) (string, int, error) {
+ fields := strings.Fields(keyLine)
+ if len(fields) < 2 {
+ return "", 0, fmt.Errorf("not enough fields in public key line: %s", keyLine)
+ }
+
+ raw, err := base64.StdEncoding.DecodeString(fields[1])
+ if err != nil {
+ return "", 0, err
+ }
+
+ pkey, err := ssh.ParsePublicKey(raw)
+ if err != nil {
+ if strings.Contains(err.Error(), "ssh: unknown key algorithm") {
+ return "", 0, ErrKeyUnableVerify{err.Error()}
+ }
+ return "", 0, fmt.Errorf("ParsePublicKey: %w", err)
+ }
+
+ // The ssh library can parse the key, so next we find out what key exactly we have.
+ switch pkey.Type() {
+ case ssh.KeyAlgoDSA:
+ rawPub := struct {
+ Name string
+ P, Q, G, Y *big.Int
+ }{}
+ if err := ssh.Unmarshal(pkey.Marshal(), &rawPub); err != nil {
+ return "", 0, err
+ }
+ // as per https://bugzilla.mindrot.org/show_bug.cgi?id=1647 we should never
+ // see dsa keys != 1024 bit, but as it seems to work, we will not check here
+ return "dsa", rawPub.P.BitLen(), nil // use P as per crypto/dsa/dsa.go (is L)
+ case ssh.KeyAlgoRSA:
+ rawPub := struct {
+ Name string
+ E *big.Int
+ N *big.Int
+ }{}
+ if err := ssh.Unmarshal(pkey.Marshal(), &rawPub); err != nil {
+ return "", 0, err
+ }
+ return "rsa", rawPub.N.BitLen(), nil // use N as per crypto/rsa/rsa.go (is bits)
+ case ssh.KeyAlgoECDSA256:
+ return "ecdsa", 256, nil
+ case ssh.KeyAlgoECDSA384:
+ return "ecdsa", 384, nil
+ case ssh.KeyAlgoECDSA521:
+ return "ecdsa", 521, nil
+ case ssh.KeyAlgoED25519:
+ return "ed25519", 256, nil
+ case ssh.KeyAlgoSKECDSA256:
+ return "ecdsa-sk", 256, nil
+ case ssh.KeyAlgoSKED25519:
+ return "ed25519-sk", 256, nil
+ }
+ return "", 0, fmt.Errorf("unsupported key length detection for type: %s", pkey.Type())
+}
+
+// writeTmpKeyFile writes key content to a temporary file
+// and returns the name of that file, along with any possible errors.
+func writeTmpKeyFile(content string) (string, error) {
+ tmpFile, err := os.CreateTemp(setting.SSH.KeyTestPath, "gitea_keytest")
+ if err != nil {
+ return "", fmt.Errorf("TempFile: %w", err)
+ }
+ defer tmpFile.Close()
+
+ if _, err = tmpFile.WriteString(content); err != nil {
+ return "", fmt.Errorf("WriteString: %w", err)
+ }
+ return tmpFile.Name(), nil
+}
+
+// SSHKeyGenParsePublicKey extracts key type and length using ssh-keygen.
+func SSHKeyGenParsePublicKey(key string) (string, int, error) {
+ tmpName, err := writeTmpKeyFile(key)
+ if err != nil {
+ return "", 0, fmt.Errorf("writeTmpKeyFile: %w", err)
+ }
+ defer func() {
+ if err := util.Remove(tmpName); err != nil {
+ log.Warn("Unable to remove temporary key file: %s: Error: %v", tmpName, err)
+ }
+ }()
+
+ keygenPath := setting.SSH.KeygenPath
+ if len(keygenPath) == 0 {
+ keygenPath = "ssh-keygen"
+ }
+
+ stdout, stderr, err := process.GetManager().Exec("SSHKeyGenParsePublicKey", keygenPath, "-lf", tmpName)
+ if err != nil {
+ return "", 0, fmt.Errorf("fail to parse public key: %s - %s", err, stderr)
+ }
+ if strings.Contains(stdout, "is not a public key file") {
+ return "", 0, ErrKeyUnableVerify{stdout}
+ }
+
+ fields := strings.Split(stdout, " ")
+ if len(fields) < 4 {
+ return "", 0, fmt.Errorf("invalid public key line: %s", stdout)
+ }
+
+ keyType := strings.Trim(fields[len(fields)-1], "()\r\n")
+ length, err := strconv.ParseInt(fields[0], 10, 32)
+ if err != nil {
+ return "", 0, err
+ }
+ return strings.ToLower(keyType), int(length), nil
+}
diff --git a/models/asymkey/ssh_key_principals.go b/models/asymkey/ssh_key_principals.go
new file mode 100644
index 0000000..4e7dee2
--- /dev/null
+++ b/models/asymkey/ssh_key_principals.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// AddPrincipalKey adds new principal to database and authorized_principals file.
+func AddPrincipalKey(ctx context.Context, ownerID int64, content string, authSourceID int64) (*PublicKey, error) {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ // Principals cannot be duplicated.
+ has, err := db.GetEngine(dbCtx).
+ Where("content = ? AND type = ?", content, KeyTypePrincipal).
+ Get(new(PublicKey))
+ if err != nil {
+ return nil, err
+ } else if has {
+ return nil, ErrKeyAlreadyExist{0, "", content}
+ }
+
+ key := &PublicKey{
+ OwnerID: ownerID,
+ Name: content,
+ Content: content,
+ Mode: perm.AccessModeWrite,
+ Type: KeyTypePrincipal,
+ LoginSourceID: authSourceID,
+ }
+ if err = db.Insert(dbCtx, key); err != nil {
+ return nil, fmt.Errorf("addKey: %w", err)
+ }
+
+ if err = committer.Commit(); err != nil {
+ return nil, err
+ }
+
+ committer.Close()
+
+ return key, RewriteAllPrincipalKeys(ctx)
+}
+
+// CheckPrincipalKeyString strips spaces and returns an error if the given principal contains newlines
+func CheckPrincipalKeyString(ctx context.Context, user *user_model.User, content string) (_ string, err error) {
+ if setting.SSH.Disabled {
+ return "", db.ErrSSHDisabled{}
+ }
+
+ content = strings.TrimSpace(content)
+ if strings.ContainsAny(content, "\r\n") {
+ return "", util.NewInvalidArgumentErrorf("only a single line with a single principal please")
+ }
+
+ // check all the allowed principals, email, username or anything
+ // if any matches, return ok
+ for _, v := range setting.SSH.AuthorizedPrincipalsAllow {
+ switch v {
+ case "anything":
+ return content, nil
+ case "email":
+ emails, err := user_model.GetEmailAddresses(ctx, user.ID)
+ if err != nil {
+ return "", err
+ }
+ for _, email := range emails {
+ if !email.IsActivated {
+ continue
+ }
+ if content == email.Email {
+ return content, nil
+ }
+ }
+
+ case "username":
+ if content == user.Name {
+ return content, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("didn't match allowed principals: %s", setting.SSH.AuthorizedPrincipalsAllow)
+}
diff --git a/models/asymkey/ssh_key_test.go b/models/asymkey/ssh_key_test.go
new file mode 100644
index 0000000..2625d6a
--- /dev/null
+++ b/models/asymkey/ssh_key_test.go
@@ -0,0 +1,513 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/42wim/sshsig"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_SSHParsePublicKey(t *testing.T) {
+ testCases := []struct {
+ name string
+ skipSSHKeygen bool
+ keyType string
+ length int
+ content string
+ }{
+ {"rsa-1024", false, "rsa", 1024, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\n"},
+ {"rsa-2048", false, "rsa", 2048, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMZXh+1OBUwSH9D45wTaxErQIN9IoC9xl7MKJkqvTvv6O5RR9YW/IK9FbfjXgXsppYGhsCZo1hFOOsXHMnfOORqu/xMDx4yPuyvKpw4LePEcg4TDipaDFuxbWOqc/BUZRZcXu41QAWfDLrInwsltWZHSeG7hjhpacl4FrVv9V1pS6Oc5Q1NxxEzTzuNLS/8diZrTm/YAQQ/+B+mzWI3zEtF4miZjjAljWd1LTBPvU23d29DcBmmFahcZ441XZsTeAwGxG/Q6j8NgNXj9WxMeWwxXV2jeAX/EBSpZrCVlCQ1yJswT6xCp8TuBnTiGWYMBNTbOZvPC4e0WI2/yZW/s5F nocomment"},
+ {"ecdsa-256", false, "ecdsa", 256, "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFQacN3PrOll7PXmN5B/ZNVahiUIqI05nbBlZk1KXsO3d06ktAWqbNflv2vEmA38bTFTfJ2sbn2B5ksT52cDDbA= nocomment"},
+ {"ecdsa-384", false, "ecdsa", 384, "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBINmioV+XRX1Fm9Qk2ehHXJ2tfVxW30ypUWZw670Zyq5GQfBAH6xjygRsJ5wWsHXBsGYgFUXIHvMKVAG1tpw7s6ax9oA+dJOJ7tj+vhn8joFqT+sg3LYHgZkHrfqryRasQ== nocomment"},
+ {"ecdsa-sk", true, "ecdsa-sk", 256, "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment"},
+ {"ed25519-sk", true, "ed25519-sk", 256, "sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIE7kM1R02+4ertDKGKEDcKG0s+2vyDDcIvceJ0Gqv5f1AAAABHNzaDo= nocomment"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Run("Native", func(t *testing.T) {
+ keyTypeN, lengthN, err := SSHNativeParsePublicKey(tc.content)
+ require.NoError(t, err)
+ assert.Equal(t, tc.keyType, keyTypeN)
+ assert.EqualValues(t, tc.length, lengthN)
+ })
+ if tc.skipSSHKeygen {
+ return
+ }
+ t.Run("SSHKeygen", func(t *testing.T) {
+ keyTypeK, lengthK, err := SSHKeyGenParsePublicKey(tc.content)
+ if err != nil {
+ // Some servers do not support ecdsa format.
+ if !strings.Contains(err.Error(), "line 1 too long:") {
+ assert.FailNow(t, "%v", err)
+ }
+ }
+ assert.Equal(t, tc.keyType, keyTypeK)
+ assert.EqualValues(t, tc.length, lengthK)
+ })
+ t.Run("SSHParseKeyNative", func(t *testing.T) {
+ keyTypeK, lengthK, err := SSHNativeParsePublicKey(tc.content)
+ if err != nil {
+ assert.FailNow(t, "%v", err)
+ }
+ assert.Equal(t, tc.keyType, keyTypeK)
+ assert.EqualValues(t, tc.length, lengthK)
+ })
+ })
+ }
+}
+
+func Test_CheckPublicKeyString(t *testing.T) {
+ oldValue := setting.SSH.MinimumKeySizeCheck
+ setting.SSH.MinimumKeySizeCheck = false
+ for _, test := range []struct {
+ content string
+ }{
+ {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\n"},
+ {"ssh-rsa AAAAB3NzaC1yc2EA\r\nAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+\r\nBZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNx\r\nfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\r\n\r\n"},
+ {"ssh-rsa AAAAB3NzaC1yc2EA\r\nAAADAQABAAAAgQDAu7tvI\nvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+\r\nBZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvW\nqIwC4prx/WVk2wLTJjzBAhyNx\r\nfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\r\n\r\n"},
+ {"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICV0MGX/W9IvLA4FXpIuUcdDcbj5KX4syHgsTy7soVgf"},
+ {"\r\nssh-ed25519 \r\nAAAAC3NzaC1lZDI1NTE5AAAAICV0MGX/W9IvLA4FXpIuUcdDcbj5KX4syHgsTy7soVgf\r\n\r\n"},
+ {"sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment"},
+ {"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIE7kM1R02+4ertDKGKEDcKG0s+2vyDDcIvceJ0Gqv5f1AAAABHNzaDo= nocomment"},
+ {`---- BEGIN SSH2 PUBLIC KEY ----
+Comment: "1024-bit DSA, converted by andrew@phaedra from OpenSSH"
+AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3
+ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/
+YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL
++wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8
+A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb
+0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgP
+aguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxc
+Ns4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd6429
+82daopE7zQ/NPAnJfag=
+---- END SSH2 PUBLIC KEY ----
+`},
+ {`---- BEGIN SSH2 PUBLIC KEY ----
+Comment: "1024-bit RSA, converted by andrew@phaedra from OpenSSH"
+AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxB
+cQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIV
+j0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ==
+---- END SSH2 PUBLIC KEY ----
+`},
+ {`-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAMC7u28i9fpketFe5k1+RHdcsdKy4Ir1mfdfnyXEFxDO6jnFmAHq9HDC
+b9C0m4X7Nk+1jmGxAgsEuYX4FnlakpmnWMF5KMfYbuXF632Rtwf6QhWPS08USjIo
+j3C9aojALimvH9ZWTbAtMmPMECHI3F8SrsL0J6Jf2lARsSol+QoJAgMBAAE=
+-----END RSA PUBLIC KEY-----
+`},
+ {`-----BEGIN PUBLIC KEY-----
+MIIBtzCCASsGByqGSM44BAEwggEeAoGBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn5
+9NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczW
+OVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQse
+cdKktISwTakzAhUAsyrDtiYTSpS/sMMCxjnC336AJpMCgYBpK7/3xvduajLBD/9v
+ASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g
++eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTL
+zIyMtkHf/IrPCwlM+pV/M/96YgOBhQACgYEAqQcGn9CKgzgPaguIZooTAOQdvBLM
+I5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2
+PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982da
+opE7zQ/NPAnJfag=
+-----END PUBLIC KEY-----
+`},
+ {`-----BEGIN PUBLIC KEY-----
+MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDAu7tvIvX6ZHrRXuZNfkR3XLHS
+suCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jB
+eSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C
+9CeiX9pQEbEqJfkKCQIDAQAB
+-----END PUBLIC KEY-----
+`},
+ {`-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzGV4ftTgVMEh/Q+OcE2s
+RK0CDfSKAvcZezCiZKr077+juUUfWFvyCvRW3414F7KaWBobAmaNYRTjrFxzJ3zj
+karv8TA8eMj7sryqcOC3jxHIOEw4qWgxbsW1jqnPwVGUWXF7uNUAFnwy6yJ8LJbV
+mR0nhu4Y4aWnJeBa1b/VdaUujnOUNTccRM087jS0v/HYma05v2AEEP/gfps1iN8x
+LReJomY4wJY1ndS0wT71Nt3dvQ3AZphWoXGeONV2bE3gMBsRv0Oo/DYDV4/VsTHl
+sMV1do3gF/xAUqWawlZQkNcibME+sQqfE7gZ04hlmDATU2zmbzwuHtFiNv8mVv7O
+RQIDAQAB
+-----END PUBLIC KEY-----
+`},
+ {`---- BEGIN SSH2 PUBLIC KEY ----
+Comment: "256-bit ED25519, converted by andrew@phaedra from OpenSSH"
+AAAAC3NzaC1lZDI1NTE5AAAAICV0MGX/W9IvLA4FXpIuUcdDcbj5KX4syHgsTy7soVgf
+---- END SSH2 PUBLIC KEY ----
+`},
+ } {
+ _, err := CheckPublicKeyString(test.content)
+ require.NoError(t, err)
+ }
+ setting.SSH.MinimumKeySizeCheck = oldValue
+ for _, invalidKeys := range []struct {
+ content string
+ }{
+ {"test"},
+ {"---- NOT A REAL KEY ----"},
+ {"bad\nkey"},
+ {"\t\t:)\t\r\n"},
+ {"\r\ntest \r\ngitea\r\n\r\n"},
+ } {
+ _, err := CheckPublicKeyString(invalidKeys.content)
+ require.Error(t, err)
+ }
+}
+
+func Test_calcFingerprint(t *testing.T) {
+ testCases := []struct {
+ name string
+ skipSSHKeygen bool
+ fp string
+ content string
+ }{
+ {"rsa-1024", false, "SHA256:vSnDkvRh/xM6kMxPidLgrUhq3mCN7CDaronCEm2joyQ", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\n"},
+ {"rsa-2048", false, "SHA256:ZHD//a1b9VuTq9XSunAeYjKeU1xDa2tBFZYrFr2Okkg", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMZXh+1OBUwSH9D45wTaxErQIN9IoC9xl7MKJkqvTvv6O5RR9YW/IK9FbfjXgXsppYGhsCZo1hFOOsXHMnfOORqu/xMDx4yPuyvKpw4LePEcg4TDipaDFuxbWOqc/BUZRZcXu41QAWfDLrInwsltWZHSeG7hjhpacl4FrVv9V1pS6Oc5Q1NxxEzTzuNLS/8diZrTm/YAQQ/+B+mzWI3zEtF4miZjjAljWd1LTBPvU23d29DcBmmFahcZ441XZsTeAwGxG/Q6j8NgNXj9WxMeWwxXV2jeAX/EBSpZrCVlCQ1yJswT6xCp8TuBnTiGWYMBNTbOZvPC4e0WI2/yZW/s5F nocomment"},
+ {"ecdsa-256", false, "SHA256:Bqx/xgWqRKLtkZ0Lr4iZpgb+5lYsFpSwXwVZbPwuTRw", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFQacN3PrOll7PXmN5B/ZNVahiUIqI05nbBlZk1KXsO3d06ktAWqbNflv2vEmA38bTFTfJ2sbn2B5ksT52cDDbA= nocomment"},
+ {"ecdsa-384", false, "SHA256:4qfJOgJDtUd8BrEjyVNdI8IgjiZKouztVde43aDhe1E", "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBINmioV+XRX1Fm9Qk2ehHXJ2tfVxW30ypUWZw670Zyq5GQfBAH6xjygRsJ5wWsHXBsGYgFUXIHvMKVAG1tpw7s6ax9oA+dJOJ7tj+vhn8joFqT+sg3LYHgZkHrfqryRasQ== nocomment"},
+ {"ecdsa-sk", true, "SHA256:4wcIu4z+53gHc+db85OPfy8IydyNzPLCr6kHIs625LQ", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment"},
+ {"ed25519-sk", true, "SHA256:RB4ku1OeWKN7fLMrjxz38DK0mp1BnOPBx4BItjTvJ0g", "sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIE7kM1R02+4ertDKGKEDcKG0s+2vyDDcIvceJ0Gqv5f1AAAABHNzaDo= nocomment"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Run("Native", func(t *testing.T) {
+ fpN, err := calcFingerprintNative(tc.content)
+ require.NoError(t, err)
+ assert.Equal(t, tc.fp, fpN)
+ })
+ if tc.skipSSHKeygen {
+ return
+ }
+ t.Run("SSHKeygen", func(t *testing.T) {
+ fpK, err := calcFingerprintSSHKeygen(tc.content)
+ require.NoError(t, err)
+ assert.Equal(t, tc.fp, fpK)
+ })
+ })
+ }
+}
+
+var (
+ // Generated with "ssh-keygen -C test@rekor.dev -f id_rsa"
+ sshPrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEA16H5ImoRO7mr41r8Z8JFBdu6jIM+6XU8M0r9F81RuhLYqzr9zw1n
+LeGCqFxPXNBKm8ZyH2BCsBHsbXbwe85IMHM3SUh8X/9fI0Lpi5/xbqAproFUpNR+UJYv6s
+8AaWk5zpN1rmpBrqGFJfGQKJCioDiiwNGmSdVkUNmQmYIANxJMDWYmNe8vUOh6nYEHB+lz
+fGgDAAzVSXTACW994UkSY47AD05swU4rIT/JWA6BkUrEhO//F0QQhFeROCPJiPRhJXGcFf
+9SicffJqR/ELzM1zNYnRXMD0bbdTUwDrIcIFFNBbtcfJVOUUCGumSlt+qjUC7y8cvwbHAu
+wf5nS6baA7P6LfTYplF2XIAkdWtkN6O1ouoyIHICXMlddDW2vNaJeEXTeKjx51WSM7qPnQ
+ZKsBtwjLQeEY/OPkIvu88lNNYSD63qMUA12msohjwVFCIgJVvYLIrkViczZ7t3L7lgy1X0
+CJI4e1roOfM/r9jTieyDHchEYpZYcw3L1R2qtePlAAAFiHdJQKl3SUCpAAAAB3NzaC1yc2
+EAAAGBANeh+SJqETu5q+Na/GfCRQXbuoyDPul1PDNK/RfNUboS2Ks6/c8NZy3hgqhcT1zQ
+SpvGch9gQrAR7G128HvOSDBzN0lIfF//XyNC6Yuf8W6gKa6BVKTUflCWL+rPAGlpOc6Tda
+5qQa6hhSXxkCiQoqA4osDRpknVZFDZkJmCADcSTA1mJjXvL1Doep2BBwfpc3xoAwAM1Ul0
+wAlvfeFJEmOOwA9ObMFOKyE/yVgOgZFKxITv/xdEEIRXkTgjyYj0YSVxnBX/UonH3yakfx
+C8zNczWJ0VzA9G23U1MA6yHCBRTQW7XHyVTlFAhrpkpbfqo1Au8vHL8GxwLsH+Z0um2gOz
++i302KZRdlyAJHVrZDejtaLqMiByAlzJXXQ1trzWiXhF03io8edVkjO6j50GSrAbcIy0Hh
+GPzj5CL7vPJTTWEg+t6jFANdprKIY8FRQiICVb2CyK5FYnM2e7dy+5YMtV9AiSOHta6Dnz
+P6/Y04nsgx3IRGKWWHMNy9UdqrXj5QAAAAMBAAEAAAGAJyaOcFQnuttUPRxY9ZHNLGofrc
+Fqm8KgYoO7/iVWMF2Zn0U/rec2E5t9OIpCEozy7uOR9uZoVUV70sgkk6X5b2qL4C9b/aYF
+JQbSFnq8wCQuTTPIJYE7SfBq1Mwuu/TR/RLC7B74u/cxkJkSXnscO9Dso+ussH0hEJjf6y
+8yUM1up4Qjbel2gs8i7BPwLdySDkVoPgsWcpbTAyOODGhTAWZ6soy/rD1AEXJeYTGJDtMv
+aR+WBihig1TO1g2RWt9bqqiG7PIlljd3ZsjSSU5y3t6ZN/8j5keKD032EtxbZB0WFD3Ar4
+FbFwlW+urb2MQ0JyNKOio3nhdjolXYkJa+C6LXdaaml/8BhMR1eLoMe8nS45w76o8mdJWX
+wsirB8tvjCLY0QBXgGv/1DTsKu/wEFCW2/Y0e50gF7pHAlYFNmKDcgI9OyORRYhFbV4D82
+fI8JLQ42ZJkS/0t6xQma8WC88pbHGEuVSB6CE/p25fyYRX+UPTQ79tWFvLV4kNQAaBAAAA
+wEvyd6H8ePyBXImg8JzGxthufB0eXSfZBrabjf6e6bR2ivpJsHmB64gbMkV6MFV7EWYX1B
+wYPQxf4gA2Ez7aJvDtfE7uV6pa0WJS3hW1+be8DHEftmLSbTy/TEvDujNb2gqoi7uWQXWJ
+yYWZlYO65r1a6HucryQ8+78fTuTRbZALO43vNGz0oXH1hPSddkcbNAhZTsD0rQKNwqVTe5
+wl+6Cduy/CQwjHLYrY73MyWy1Vh1LXhAdGMPnWZwGIu/dnkgAAAMEA9KuaoGnfnLQkrjeR
+tO4RCRS2quNRvm4L6i4vHgTDsYtoSlR1ujge7SGOOmIPS4XVjZN5zzCOA7+EDVnuz3WWmx
+hmkjpG1YxzmJGaWoYdeo3a6UgJtisfMp8eUKqjJT1mhsCliCWtaOQNRoQieDQmgwZzSX/v
+ZiGsOIKa6cR37eKvOJSjVrHsAUzdtYrmi8P2gvAUFWyzXobAtpzHcWrwWkOEIm04G0OGXb
+J46hfIX3f45E5EKXvFzexGgVOD2I7hAAAAwQDhniYAizfW9YfG7UJWekkl42xMP7Cb8b0W
+SindSIuE8bFTukV1yxbmNZp/f0pKvn/DWc2n0I0bwSGZpy8BCY46RKKB2DYQavY/tGcC1N
+AynKuvbtWs11A0mTXmq3WwHVXQDozMwJ2nnHpm0UHspPuHqkYpurlP+xoFsocaQ9QwITyp
+lL4qHtXBEzaT8okkcGZBHdSx3gk4TzCsEDOP7ZZPLq42lpKMK10zFPTMd0maXtJDYKU/b4
+gAATvvPoylyYUAAAAOdGVzdEByZWtvci5kZXYBAgMEBQ==
+-----END OPENSSH PRIVATE KEY-----
+`
+ sshPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXofkiahE7uavjWvxnwkUF27qMgz7pdTwzSv0XzVG6EtirOv3PDWct4YKoXE9c0EqbxnIfYEKwEextdvB7zkgwczdJSHxf/18jQumLn/FuoCmugVSk1H5Qli/qzwBpaTnOk3WuakGuoYUl8ZAokKKgOKLA0aZJ1WRQ2ZCZggA3EkwNZiY17y9Q6HqdgQcH6XN8aAMADNVJdMAJb33hSRJjjsAPTmzBTishP8lYDoGRSsSE7/8XRBCEV5E4I8mI9GElcZwV/1KJx98mpH8QvMzXM1idFcwPRtt1NTAOshwgUU0Fu1x8lU5RQIa6ZKW36qNQLvLxy/BscC7B/mdLptoDs/ot9NimUXZcgCR1a2Q3o7Wi6jIgcgJcyV10Nba81ol4RdN4qPHnVZIzuo+dBkqwG3CMtB4Rj84+Qi+7zyU01hIPreoxQDXaayiGPBUUIiAlW9gsiuRWJzNnu3cvuWDLVfQIkjh7Wug58z+v2NOJ7IMdyERillhzDcvVHaq14+U= test@rekor.dev
+`
+ // Generated with "ssh-keygen -C other-test@rekor.dev -f id_rsa"
+ otherSSHPrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAw/WCSWC9TEvCQOwO+T68EvNa3OSIv1Y0+sT8uSvyjPyEO0+p0t8C
+g/zy67vOxiQpU5jN6MItjXAjMmeCm8GKMt6gk+cDoaAev/ZfjuzSL7RayExpmhBleh2X3G
+KLkkXF9ABFNchlTqSLOZiEjDoNpbFv16KT1sE6CqW8DjxXQkQk9JK65hLH+BxeWMNCEJVa
+Cma4X04aJmC7zJAi5yGeeT0SKVqMohavF90O6XiYFCQHuwXPPyHfocqgudmXnozz+6D6ax
+JKZMwQsNp3WKumOjlzWnxBCCB1l2jN6Rag8aJ2277iMFXRwjTL/8jaEsW4KkysDf0GjV2/
+iqbr0q5b0arDYbv7CrGBR+uH0wGz/Zog1x5iZANObhZULpDrLVJidEMc27HXBb7PMsNDy7
+BGYRB1yc0d0y83p8mUqvOlWSArxn1WnAZO04pAgTrclrhEh4ZXOkn2Sn82eu3DpQ8inkol
+Y4IfnhIfbOIeemoUNq1tOUquhow9GLRM6INieHLBAAAFkPPnA1jz5wNYAAAAB3NzaC1yc2
+EAAAGBAMP1gklgvUxLwkDsDvk+vBLzWtzkiL9WNPrE/Lkr8oz8hDtPqdLfAoP88uu7zsYk
+KVOYzejCLY1wIzJngpvBijLeoJPnA6GgHr/2X47s0i+0WshMaZoQZXodl9xii5JFxfQART
+XIZU6kizmYhIw6DaWxb9eik9bBOgqlvA48V0JEJPSSuuYSx/gcXljDQhCVWgpmuF9OGiZg
+u8yQIuchnnk9EilajKIWrxfdDul4mBQkB7sFzz8h36HKoLnZl56M8/ug+msSSmTMELDad1
+irpjo5c1p8QQggdZdozekWoPGidtu+4jBV0cI0y//I2hLFuCpMrA39Bo1dv4qm69KuW9Gq
+w2G7+wqxgUfrh9MBs/2aINceYmQDTm4WVC6Q6y1SYnRDHNux1wW+zzLDQ8uwRmEQdcnNHd
+MvN6fJlKrzpVkgK8Z9VpwGTtOKQIE63Ja4RIeGVzpJ9kp/Nnrtw6UPIp5KJWOCH54SH2zi
+HnpqFDatbTlKroaMPRi0TOiDYnhywQAAAAMBAAEAAAGAYycx4oEhp55Zz1HijblxnsEmQ8
+kbbH1pV04fdm7HTxFis0Qu8PVIp5JxNFiWWunnQ1Z5MgI23G9WT+XST4+RpwXBCLWGv9xu
+UsGOPpqUC/FdUiZf9MXBIxYgRjJS3xORA1KzsnAQ2sclb2I+B1pEl4d9yQWJesvQ25xa2H
+Utzej/LgWkrk/ogSGRl6ZNImj/421wc0DouGyP+gUgtATt0/jT3LrlmAqUVCXVqssLYH2O
+r9JTuGUibBJEW2W/c0lsM0jaHa5bGAdL3nhDuF1Q6KFB87mZoNw8c2znYoTzQ3FyWtIEZI
+V/9oWrkS7V6242SKSR9tJoEzK0jtrKC/FZwBiI4hPcwoqY6fZbT1701i/n50xWEfEUOLVm
+d6VqNKyAbIaZIPN0qfZuD+xdrHuM3V6k/rgFxGl4XTrp/N4AsruiQs0nRQKNTw3fHE0zPq
+UTxSeMvjywRCepxhBFCNh8NHydapclHtEPEGdTVHohL3krJehstPO/IuRyKLfSVtL1AAAA
+wQCmGA8k+uW6mway9J3jp8mlMhhp3DCX6DAcvalbA/S5OcqMyiTM3c/HD5OJ6OYFDldcqu
+MPEgLRL2HfxL29LsbQSzjyOIrfp5PLJlo70P5lXS8u2QPbo4/KQJmQmsIX18LDyU2zRtNA
+C2WfBiHSZV+guLhmHms9S5gQYKt2T5OnY/W0tmnInx9lmFCMC+XKS1iSQ2o433IrtCPQJp
+IXZd59OQpO9QjJABgJIDtXxFIXt45qpXduDPJuggrhg81stOwAAADBAPX73u/CY+QUPts+
+LV185Z4mZ2y+qu2ZMCAU3BnpHktGZZ1vFN1Xq9o8KdnuPZ+QJRdO8eKMWpySqrIdIbTYLm
+9nXmVH0uNECIEAvdU+wgKeR+BSHxCRVuTF4YSygmNadgH/z+oRWLgOblGo2ywFBoXsIAKQ
+paNu1MFGRUmhz67+dcpkkBUDRU9loAgBKexMo8D9vkR0YiHLOUjCrtmEZRNm0YRZt0gQhD
+ZSD1fOH0fZDcCVNpGP2zqAKos4EGLnkwAAAMEAy/AuLtPKA2u9oCA8e18ZnuQRAi27FBVU
+rU2D7bMg1eS0IakG8v0gE9K6WdYzyArY1RoKB3ZklK5VmJ1cOcWc2x3Ejc5jcJgc8cC6lZ
+wwjpE8HfWL1kIIYgPdcexqFc+l6MdgH6QMKU3nLg1LsM4v5FEldtk/2dmnw620xnFfstpF
+VxSZNdKrYfM/v9o6sRaDRqSfH1dG8BvkUxPznTAF+JDxBENcKXYECcq9f6dcl1w5IEnNTD
+Wry/EKQvgvOUjbAAAAFG90aGVyLXRlc3RAcmVrb3IuZGV2AQIDBAUG
+-----END OPENSSH PRIVATE KEY-----
+`
+ otherSSHPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDD9YJJYL1MS8JA7A75PrwS81rc5Ii/VjT6xPy5K/KM/IQ7T6nS3wKD/PLru87GJClTmM3owi2NcCMyZ4KbwYoy3qCT5wOhoB6/9l+O7NIvtFrITGmaEGV6HZfcYouSRcX0AEU1yGVOpIs5mISMOg2lsW/XopPWwToKpbwOPFdCRCT0krrmEsf4HF5Yw0IQlVoKZrhfThomYLvMkCLnIZ55PRIpWoyiFq8X3Q7peJgUJAe7Bc8/Id+hyqC52ZeejPP7oPprEkpkzBCw2ndYq6Y6OXNafEEIIHWXaM3pFqDxonbbvuIwVdHCNMv/yNoSxbgqTKwN/QaNXb+KpuvSrlvRqsNhu/sKsYFH64fTAbP9miDXHmJkA05uFlQukOstUmJ0QxzbsdcFvs8yw0PLsEZhEHXJzR3TLzenyZSq86VZICvGfVacBk7TikCBOtyWuESHhlc6SfZKfzZ67cOlDyKeSiVjgh+eEh9s4h56ahQ2rW05Sq6GjD0YtEzog2J4csE= other-test@rekor.dev
+`
+
+ // Generated with ssh-keygen -C test@rekor.dev -t ed25519 -f id_ed25519
+ ed25519PrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBB45zRHxPPFtabwS3Vd6Lb9vMe+tIHZj2qN5VQ+bgLfQAAAJgyRa3cMkWt
+3AAAAAtzc2gtZWQyNTUxOQAAACBB45zRHxPPFtabwS3Vd6Lb9vMe+tIHZj2qN5VQ+bgLfQ
+AAAED7y4N/DsVnRQiBZNxEWdsJ9RmbranvtQ3X9jnb6gFed0HjnNEfE88W1pvBLdV3otv2
+8x760gdmPao3lVD5uAt9AAAADnRlc3RAcmVrb3IuZGV2AQIDBAUGBw==
+-----END OPENSSH PRIVATE KEY-----
+`
+ ed25519PublicKey = `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEHjnNEfE88W1pvBLdV3otv28x760gdmPao3lVD5uAt9 test@rekor.dev
+`
+)
+
+func TestFromOpenSSH(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ pub string
+ priv string
+ }{
+ {
+ name: "rsa",
+ pub: sshPublicKey,
+ priv: sshPrivateKey,
+ },
+ {
+ name: "ed25519",
+ pub: ed25519PublicKey,
+ priv: ed25519PrivateKey,
+ },
+ } {
+ if _, err := exec.LookPath("ssh-keygen"); err != nil {
+ t.Skip("skip TestFromOpenSSH: missing ssh-keygen in PATH")
+ }
+ t.Run(tt.name, func(t *testing.T) {
+ tt := tt
+
+ // Test that a signature from the cli can validate here.
+ td := t.TempDir()
+
+ data := []byte("hello, ssh world")
+ dataPath := write(t, data, td, "data")
+
+ privPath := write(t, []byte(tt.priv), td, "id")
+ write(t, []byte(tt.pub), td, "id.pub")
+
+ sigPath := dataPath + ".sig"
+ run(t, nil, "ssh-keygen", "-Y", "sign", "-n", "file", "-f", privPath, dataPath)
+
+ sigBytes, err := os.ReadFile(sigPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := sshsig.Verify(bytes.NewReader(data), sigBytes, []byte(tt.pub), "file"); err != nil {
+ t.Error(err)
+ }
+
+ // It should not verify if we check against another public key
+ if err := sshsig.Verify(bytes.NewReader(data), sigBytes, []byte(otherSSHPublicKey), "file"); err == nil {
+ t.Error("expected error with incorrect key")
+ }
+
+ // It should not verify if the data is tampered
+ if err := sshsig.Verify(strings.NewReader("bad data"), sigBytes, []byte(sshPublicKey), "file"); err == nil {
+ t.Error("expected error with incorrect data")
+ }
+ })
+ }
+}
+
+func TestToOpenSSH(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ pub string
+ priv string
+ }{
+ {
+ name: "rsa",
+ pub: sshPublicKey,
+ priv: sshPrivateKey,
+ },
+ {
+ name: "ed25519",
+ pub: ed25519PublicKey,
+ priv: ed25519PrivateKey,
+ },
+ } {
+ if _, err := exec.LookPath("ssh-keygen"); err != nil {
+ t.Skip("skip TestToOpenSSH: missing ssh-keygen in PATH")
+ }
+ t.Run(tt.name, func(t *testing.T) {
+ tt := tt
+ // Test that a signature from here can validate in the CLI.
+ td := t.TempDir()
+
+ data := []byte("hello, ssh world")
+ write(t, data, td, "data")
+
+ armored, err := sshsig.Sign([]byte(tt.priv), bytes.NewReader(data), "file")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sigPath := write(t, armored, td, "oursig")
+
+ // Create an allowed_signers file with two keys to check against.
+ allowedSigner := "test@rekor.dev " + tt.pub + "\n"
+ allowedSigner += "othertest@rekor.dev " + otherSSHPublicKey + "\n"
+ allowedSigners := write(t, []byte(allowedSigner), td, "allowed_signer")
+
+ // We use the correct principal here so it should work.
+ run(t, data, "ssh-keygen", "-Y", "verify", "-f", allowedSigners,
+ "-I", "test@rekor.dev", "-n", "file", "-s", sigPath)
+
+ // Just to be sure, check against the other public key as well.
+ runErr(t, data, "ssh-keygen", "-Y", "verify", "-f", allowedSigners,
+ "-I", "othertest@rekor.dev", "-n", "file", "-s", sigPath)
+
+ // It should error if we run it against other data
+ data = []byte("other data!")
+ runErr(t, data, "ssh-keygen", "-Y", "check-novalidate", "-n", "file", "-s", sigPath)
+ })
+ }
+}
+
+func TestRoundTrip(t *testing.T) {
+ data := []byte("my good data to be signed!")
+
+ // Create one extra signature for all the tests.
+ otherSig, err := sshsig.Sign([]byte(otherSSHPrivateKey), bytes.NewReader(data), "file")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, tt := range []struct {
+ name string
+ pub string
+ priv string
+ }{
+ {
+ name: "rsa",
+ pub: sshPublicKey,
+ priv: sshPrivateKey,
+ },
+ {
+ name: "ed25519",
+ pub: ed25519PublicKey,
+ priv: ed25519PrivateKey,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ tt := tt
+ sig, err := sshsig.Sign([]byte(tt.priv), bytes.NewReader(data), "file")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check the signature against that data and public key
+ if err := sshsig.Verify(bytes.NewReader(data), sig, []byte(tt.pub), "file"); err != nil {
+ t.Error(err)
+ }
+
+ // Now check it against invalid data.
+ if err := sshsig.Verify(strings.NewReader("invalid data!"), sig, []byte(tt.pub), "file"); err == nil {
+ t.Error("expected error!")
+ }
+
+ // Now check it against the wrong key.
+ if err := sshsig.Verify(bytes.NewReader(data), sig, []byte(otherSSHPublicKey), "file"); err == nil {
+ t.Error("expected error!")
+ }
+
+ // Now check it against an invalid signature data.
+ if err := sshsig.Verify(bytes.NewReader(data), []byte("invalid signature!"), []byte(tt.pub), "file"); err == nil {
+ t.Error("expected error!")
+ }
+
+ // Once more, use the wrong signature and check it against the original (wrong public key)
+ if err := sshsig.Verify(bytes.NewReader(data), otherSig, []byte(tt.pub), "file"); err == nil {
+ t.Error("expected error!")
+ }
+ // It should work against the correct public key.
+ if err := sshsig.Verify(bytes.NewReader(data), otherSig, []byte(otherSSHPublicKey), "file"); err != nil {
+ t.Error(err)
+ }
+ })
+ }
+}
+
+func write(t *testing.T, d []byte, fp ...string) string {
+ p := filepath.Join(fp...)
+ if err := os.WriteFile(p, d, 0o600); err != nil {
+ t.Fatal(err)
+ }
+ return p
+}
+
+func run(t *testing.T, stdin []byte, args ...string) {
+ t.Helper()
+ /* #nosec */
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Stdin = bytes.NewReader(stdin)
+ out, err := cmd.CombinedOutput()
+ t.Logf("cmd %v: %s", cmd, string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func runErr(t *testing.T, stdin []byte, args ...string) {
+ t.Helper()
+ /* #nosec */
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Stdin = bytes.NewReader(stdin)
+ out, err := cmd.CombinedOutput()
+ t.Logf("cmd %v: %s", cmd, string(out))
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func Test_PublicKeysAreExternallyManaged(t *testing.T) {
+ key1 := unittest.AssertExistsAndLoadBean(t, &PublicKey{ID: 1})
+ externals, err := PublicKeysAreExternallyManaged(db.DefaultContext, []*PublicKey{key1})
+ require.NoError(t, err)
+ assert.Len(t, externals, 1)
+ assert.False(t, externals[0])
+}
diff --git a/models/asymkey/ssh_key_verify.go b/models/asymkey/ssh_key_verify.go
new file mode 100644
index 0000000..208288c
--- /dev/null
+++ b/models/asymkey/ssh_key_verify.go
@@ -0,0 +1,55 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package asymkey
+
+import (
+ "bytes"
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+
+ "github.com/42wim/sshsig"
+)
+
+// VerifySSHKey marks a SSH key as verified
+func VerifySSHKey(ctx context.Context, ownerID int64, fingerprint, token, signature string) (string, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ defer committer.Close()
+
+ key := new(PublicKey)
+
+ has, err := db.GetEngine(ctx).Where("owner_id = ? AND fingerprint = ?", ownerID, fingerprint).Get(key)
+ if err != nil {
+ return "", err
+ } else if !has {
+ return "", ErrKeyNotExist{}
+ }
+
+ err = sshsig.Verify(bytes.NewBuffer([]byte(token)), []byte(signature), []byte(key.Content), "gitea")
+ if err != nil {
+ // edge case for Windows based shells that will add CR LF if piped to ssh-keygen command
+ // see https://github.com/PowerShell/PowerShell/issues/5974
+ if sshsig.Verify(bytes.NewBuffer([]byte(token+"\r\n")), []byte(signature), []byte(key.Content), "gitea") != nil {
+ log.Error("Unable to validate token signature. Error: %v", err)
+ return "", ErrSSHInvalidTokenSignature{
+ Fingerprint: key.Fingerprint,
+ }
+ }
+ }
+
+ key.Verified = true
+ if _, err := db.GetEngine(ctx).ID(key.ID).Cols("verified").Update(key); err != nil {
+ return "", err
+ }
+
+ if err := committer.Commit(); err != nil {
+ return "", err
+ }
+
+ return key.Fingerprint, nil
+}
diff --git a/models/auth/TestOrphanedOAuth2Applications/oauth2_application.yaml b/models/auth/TestOrphanedOAuth2Applications/oauth2_application.yaml
new file mode 100644
index 0000000..cccb404
--- /dev/null
+++ b/models/auth/TestOrphanedOAuth2Applications/oauth2_application.yaml
@@ -0,0 +1,33 @@
+-
+ id: 1000
+ uid: 0
+ name: "Git Credential Manager"
+ client_id: "e90ee53c-94e2-48ac-9358-a874fb9e0662"
+ redirect_uris: '["http://127.0.0.1", "https://127.0.0.1"]'
+ created_unix: 1712358091
+ updated_unix: 1712358091
+-
+ id: 1001
+ uid: 0
+ name: "git-credential-oauth"
+ client_id: "a4792ccc-144e-407e-86c9-5e7d8d9c3269"
+ redirect_uris: '["http://127.0.0.1", "https://127.0.0.1"]'
+ created_unix: 1712358091
+ updated_unix: 1712358091
+
+-
+ id: 1002
+ uid: 1234567890
+ name: "Should be removed"
+ client_id: "deadc0de-badd-dd11-fee1-deaddecafbad"
+ redirect_uris: '["http://127.0.0.1", "https://127.0.0.1"]'
+ created_unix: 1712358091
+ updated_unix: 1712358091
+-
+ id: 1003
+ uid: 0
+ name: "Global Auth source that should be kept"
+ client_id: "2f3467c1-7b3b-463d-ab04-2ae2b2712826"
+ redirect_uris: '["http://example.com/globalapp", "https://example.com/globalapp"]'
+ created_unix: 1732387292
+ updated_unix: 1732387292
diff --git a/models/auth/access_token.go b/models/auth/access_token.go
new file mode 100644
index 0000000..63331b4
--- /dev/null
+++ b/models/auth/access_token.go
@@ -0,0 +1,236 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "crypto/subtle"
+ "encoding/hex"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ lru "github.com/hashicorp/golang-lru/v2"
+ "xorm.io/builder"
+)
+
+// ErrAccessTokenNotExist represents a "AccessTokenNotExist" kind of error.
+type ErrAccessTokenNotExist struct {
+ Token string
+}
+
+// IsErrAccessTokenNotExist checks if an error is a ErrAccessTokenNotExist.
+func IsErrAccessTokenNotExist(err error) bool {
+ _, ok := err.(ErrAccessTokenNotExist)
+ return ok
+}
+
+func (err ErrAccessTokenNotExist) Error() string {
+ return fmt.Sprintf("access token does not exist [sha: %s]", err.Token)
+}
+
+func (err ErrAccessTokenNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrAccessTokenEmpty represents a "AccessTokenEmpty" kind of error.
+type ErrAccessTokenEmpty struct{}
+
+// IsErrAccessTokenEmpty checks if an error is a ErrAccessTokenEmpty.
+func IsErrAccessTokenEmpty(err error) bool {
+ _, ok := err.(ErrAccessTokenEmpty)
+ return ok
+}
+
+func (err ErrAccessTokenEmpty) Error() string {
+ return "access token is empty"
+}
+
+func (err ErrAccessTokenEmpty) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+var successfulAccessTokenCache *lru.Cache[string, any]
+
+// AccessToken represents a personal access token.
+type AccessToken struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX"`
+ Name string
+ Token string `xorm:"-"`
+ TokenHash string `xorm:"UNIQUE"` // sha256 of token
+ TokenSalt string
+ TokenLastEight string `xorm:"INDEX token_last_eight"`
+ Scope AccessTokenScope
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ HasRecentActivity bool `xorm:"-"`
+ HasUsed bool `xorm:"-"`
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (t *AccessToken) AfterLoad() {
+ t.HasUsed = t.UpdatedUnix > t.CreatedUnix
+ t.HasRecentActivity = t.UpdatedUnix.AddDuration(7*24*time.Hour) > timeutil.TimeStampNow()
+}
+
+func init() {
+ db.RegisterModel(new(AccessToken), func() error {
+ if setting.SuccessfulTokensCacheSize > 0 {
+ var err error
+ successfulAccessTokenCache, err = lru.New[string, any](setting.SuccessfulTokensCacheSize)
+ if err != nil {
+ return fmt.Errorf("unable to allocate AccessToken cache: %w", err)
+ }
+ } else {
+ successfulAccessTokenCache = nil
+ }
+ return nil
+ })
+}
+
+// NewAccessToken creates new access token.
+func NewAccessToken(ctx context.Context, t *AccessToken) error {
+ salt, err := util.CryptoRandomString(10)
+ if err != nil {
+ return err
+ }
+ token, err := util.CryptoRandomBytes(20)
+ if err != nil {
+ return err
+ }
+ t.TokenSalt = salt
+ t.Token = hex.EncodeToString(token)
+ t.TokenHash = HashToken(t.Token, t.TokenSalt)
+ t.TokenLastEight = t.Token[len(t.Token)-8:]
+ _, err = db.GetEngine(ctx).Insert(t)
+ return err
+}
+
+// DisplayPublicOnly whether to display this as a public-only token.
+func (t *AccessToken) DisplayPublicOnly() bool {
+ publicOnly, err := t.Scope.PublicOnly()
+ if err != nil {
+ return false
+ }
+ return publicOnly
+}
+
+func getAccessTokenIDFromCache(token string) int64 {
+ if successfulAccessTokenCache == nil {
+ return 0
+ }
+ tInterface, ok := successfulAccessTokenCache.Get(token)
+ if !ok {
+ return 0
+ }
+ t, ok := tInterface.(int64)
+ if !ok {
+ return 0
+ }
+ return t
+}
+
+// GetAccessTokenBySHA returns access token by given token value
+func GetAccessTokenBySHA(ctx context.Context, token string) (*AccessToken, error) {
+ if token == "" {
+ return nil, ErrAccessTokenEmpty{}
+ }
+ // A token is defined as being SHA1 sum these are 40 hexadecimal bytes long
+ if len(token) != 40 {
+ return nil, ErrAccessTokenNotExist{token}
+ }
+ for _, x := range []byte(token) {
+ if x < '0' || (x > '9' && x < 'a') || x > 'f' {
+ return nil, ErrAccessTokenNotExist{token}
+ }
+ }
+
+ lastEight := token[len(token)-8:]
+
+ if id := getAccessTokenIDFromCache(token); id > 0 {
+ accessToken := &AccessToken{
+ TokenLastEight: lastEight,
+ }
+ // Re-get the token from the db in case it has been deleted in the intervening period
+ has, err := db.GetEngine(ctx).ID(id).Get(accessToken)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return accessToken, nil
+ }
+ successfulAccessTokenCache.Remove(token)
+ }
+
+ var tokens []AccessToken
+ err := db.GetEngine(ctx).Table(&AccessToken{}).Where("token_last_eight = ?", lastEight).Find(&tokens)
+ if err != nil {
+ return nil, err
+ } else if len(tokens) == 0 {
+ return nil, ErrAccessTokenNotExist{token}
+ }
+
+ for _, t := range tokens {
+ tempHash := HashToken(token, t.TokenSalt)
+ if subtle.ConstantTimeCompare([]byte(t.TokenHash), []byte(tempHash)) == 1 {
+ if successfulAccessTokenCache != nil {
+ successfulAccessTokenCache.Add(token, t.ID)
+ }
+ return &t, nil
+ }
+ }
+ return nil, ErrAccessTokenNotExist{token}
+}
+
+// AccessTokenByNameExists checks if a token name has been used already by a user.
+func AccessTokenByNameExists(ctx context.Context, token *AccessToken) (bool, error) {
+ return db.GetEngine(ctx).Table("access_token").Where("name = ?", token.Name).And("uid = ?", token.UID).Exist()
+}
+
+// ListAccessTokensOptions contain filter options
+type ListAccessTokensOptions struct {
+ db.ListOptions
+ Name string
+ UserID int64
+}
+
+func (opts ListAccessTokensOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ // user id is required, otherwise it will return all result which maybe a possible bug
+ cond = cond.And(builder.Eq{"uid": opts.UserID})
+ if len(opts.Name) > 0 {
+ cond = cond.And(builder.Eq{"name": opts.Name})
+ }
+ return cond
+}
+
+func (opts ListAccessTokensOptions) ToOrders() string {
+ return "created_unix DESC"
+}
+
+// UpdateAccessToken updates information of access token.
+func UpdateAccessToken(ctx context.Context, t *AccessToken) error {
+ _, err := db.GetEngine(ctx).ID(t.ID).AllCols().Update(t)
+ return err
+}
+
+// DeleteAccessTokenByID deletes access token by given ID.
+func DeleteAccessTokenByID(ctx context.Context, id, userID int64) error {
+ cnt, err := db.GetEngine(ctx).ID(id).Delete(&AccessToken{
+ UID: userID,
+ })
+ if err != nil {
+ return err
+ } else if cnt != 1 {
+ return ErrAccessTokenNotExist{}
+ }
+ return nil
+}
diff --git a/models/auth/access_token_scope.go b/models/auth/access_token_scope.go
new file mode 100644
index 0000000..003ca5c
--- /dev/null
+++ b/models/auth/access_token_scope.go
@@ -0,0 +1,350 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/perm"
+)
+
+// AccessTokenScopeCategory represents the scope category for an access token
+type AccessTokenScopeCategory int
+
+const (
+ AccessTokenScopeCategoryActivityPub = iota
+ AccessTokenScopeCategoryAdmin
+ AccessTokenScopeCategoryMisc // WARN: this is now just a placeholder, don't remove it which will change the following values
+ AccessTokenScopeCategoryNotification
+ AccessTokenScopeCategoryOrganization
+ AccessTokenScopeCategoryPackage
+ AccessTokenScopeCategoryIssue
+ AccessTokenScopeCategoryRepository
+ AccessTokenScopeCategoryUser
+)
+
+// AllAccessTokenScopeCategories contains all access token scope categories
+var AllAccessTokenScopeCategories = []AccessTokenScopeCategory{
+ AccessTokenScopeCategoryActivityPub,
+ AccessTokenScopeCategoryAdmin,
+ AccessTokenScopeCategoryMisc,
+ AccessTokenScopeCategoryNotification,
+ AccessTokenScopeCategoryOrganization,
+ AccessTokenScopeCategoryPackage,
+ AccessTokenScopeCategoryIssue,
+ AccessTokenScopeCategoryRepository,
+ AccessTokenScopeCategoryUser,
+}
+
+// AccessTokenScopeLevel represents the access levels without a given scope category
+type AccessTokenScopeLevel int
+
+const (
+ NoAccess AccessTokenScopeLevel = iota
+ Read
+ Write
+)
+
+// AccessTokenScope represents the scope for an access token.
+type AccessTokenScope string
+
+// for all categories, write implies read
+const (
+ AccessTokenScopeAll AccessTokenScope = "all"
+ AccessTokenScopePublicOnly AccessTokenScope = "public-only" // limited to public orgs/repos
+
+ AccessTokenScopeReadActivityPub AccessTokenScope = "read:activitypub"
+ AccessTokenScopeWriteActivityPub AccessTokenScope = "write:activitypub"
+
+ AccessTokenScopeReadAdmin AccessTokenScope = "read:admin"
+ AccessTokenScopeWriteAdmin AccessTokenScope = "write:admin"
+
+ AccessTokenScopeReadMisc AccessTokenScope = "read:misc"
+ AccessTokenScopeWriteMisc AccessTokenScope = "write:misc"
+
+ AccessTokenScopeReadNotification AccessTokenScope = "read:notification"
+ AccessTokenScopeWriteNotification AccessTokenScope = "write:notification"
+
+ AccessTokenScopeReadOrganization AccessTokenScope = "read:organization"
+ AccessTokenScopeWriteOrganization AccessTokenScope = "write:organization"
+
+ AccessTokenScopeReadPackage AccessTokenScope = "read:package"
+ AccessTokenScopeWritePackage AccessTokenScope = "write:package"
+
+ AccessTokenScopeReadIssue AccessTokenScope = "read:issue"
+ AccessTokenScopeWriteIssue AccessTokenScope = "write:issue"
+
+ AccessTokenScopeReadRepository AccessTokenScope = "read:repository"
+ AccessTokenScopeWriteRepository AccessTokenScope = "write:repository"
+
+ AccessTokenScopeReadUser AccessTokenScope = "read:user"
+ AccessTokenScopeWriteUser AccessTokenScope = "write:user"
+)
+
+// accessTokenScopeBitmap represents a bitmap of access token scopes.
+type accessTokenScopeBitmap uint64
+
+// Bitmap of each scope, including the child scopes.
+const (
+ // AccessTokenScopeAllBits is the bitmap of all access token scopes
+ accessTokenScopeAllBits accessTokenScopeBitmap = accessTokenScopeWriteActivityPubBits |
+ accessTokenScopeWriteAdminBits | accessTokenScopeWriteMiscBits | accessTokenScopeWriteNotificationBits |
+ accessTokenScopeWriteOrganizationBits | accessTokenScopeWritePackageBits | accessTokenScopeWriteIssueBits |
+ accessTokenScopeWriteRepositoryBits | accessTokenScopeWriteUserBits
+
+ accessTokenScopePublicOnlyBits accessTokenScopeBitmap = 1 << iota
+
+ accessTokenScopeReadActivityPubBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteActivityPubBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadActivityPubBits
+
+ accessTokenScopeReadAdminBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteAdminBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadAdminBits
+
+ accessTokenScopeReadMiscBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteMiscBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadMiscBits
+
+ accessTokenScopeReadNotificationBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteNotificationBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadNotificationBits
+
+ accessTokenScopeReadOrganizationBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteOrganizationBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadOrganizationBits
+
+ accessTokenScopeReadPackageBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWritePackageBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadPackageBits
+
+ accessTokenScopeReadIssueBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteIssueBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadIssueBits
+
+ accessTokenScopeReadRepositoryBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteRepositoryBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadRepositoryBits
+
+ accessTokenScopeReadUserBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteUserBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadUserBits
+
+ // The current implementation only supports up to 64 token scopes.
+ // If we need to support > 64 scopes,
+ // refactoring the whole implementation in this file (and only this file) is needed.
+)
+
+// allAccessTokenScopes contains all access token scopes.
+// The order is important: parent scope must precede child scopes.
+var allAccessTokenScopes = []AccessTokenScope{
+ AccessTokenScopePublicOnly,
+ AccessTokenScopeWriteActivityPub, AccessTokenScopeReadActivityPub,
+ AccessTokenScopeWriteAdmin, AccessTokenScopeReadAdmin,
+ AccessTokenScopeWriteMisc, AccessTokenScopeReadMisc,
+ AccessTokenScopeWriteNotification, AccessTokenScopeReadNotification,
+ AccessTokenScopeWriteOrganization, AccessTokenScopeReadOrganization,
+ AccessTokenScopeWritePackage, AccessTokenScopeReadPackage,
+ AccessTokenScopeWriteIssue, AccessTokenScopeReadIssue,
+ AccessTokenScopeWriteRepository, AccessTokenScopeReadRepository,
+ AccessTokenScopeWriteUser, AccessTokenScopeReadUser,
+}
+
+// allAccessTokenScopeBits contains all access token scopes.
+var allAccessTokenScopeBits = map[AccessTokenScope]accessTokenScopeBitmap{
+ AccessTokenScopeAll: accessTokenScopeAllBits,
+ AccessTokenScopePublicOnly: accessTokenScopePublicOnlyBits,
+ AccessTokenScopeReadActivityPub: accessTokenScopeReadActivityPubBits,
+ AccessTokenScopeWriteActivityPub: accessTokenScopeWriteActivityPubBits,
+ AccessTokenScopeReadAdmin: accessTokenScopeReadAdminBits,
+ AccessTokenScopeWriteAdmin: accessTokenScopeWriteAdminBits,
+ AccessTokenScopeReadMisc: accessTokenScopeReadMiscBits,
+ AccessTokenScopeWriteMisc: accessTokenScopeWriteMiscBits,
+ AccessTokenScopeReadNotification: accessTokenScopeReadNotificationBits,
+ AccessTokenScopeWriteNotification: accessTokenScopeWriteNotificationBits,
+ AccessTokenScopeReadOrganization: accessTokenScopeReadOrganizationBits,
+ AccessTokenScopeWriteOrganization: accessTokenScopeWriteOrganizationBits,
+ AccessTokenScopeReadPackage: accessTokenScopeReadPackageBits,
+ AccessTokenScopeWritePackage: accessTokenScopeWritePackageBits,
+ AccessTokenScopeReadIssue: accessTokenScopeReadIssueBits,
+ AccessTokenScopeWriteIssue: accessTokenScopeWriteIssueBits,
+ AccessTokenScopeReadRepository: accessTokenScopeReadRepositoryBits,
+ AccessTokenScopeWriteRepository: accessTokenScopeWriteRepositoryBits,
+ AccessTokenScopeReadUser: accessTokenScopeReadUserBits,
+ AccessTokenScopeWriteUser: accessTokenScopeWriteUserBits,
+}
+
+// readAccessTokenScopes maps a scope category to the read permission scope
+var accessTokenScopes = map[AccessTokenScopeLevel]map[AccessTokenScopeCategory]AccessTokenScope{
+ Read: {
+ AccessTokenScopeCategoryActivityPub: AccessTokenScopeReadActivityPub,
+ AccessTokenScopeCategoryAdmin: AccessTokenScopeReadAdmin,
+ AccessTokenScopeCategoryMisc: AccessTokenScopeReadMisc,
+ AccessTokenScopeCategoryNotification: AccessTokenScopeReadNotification,
+ AccessTokenScopeCategoryOrganization: AccessTokenScopeReadOrganization,
+ AccessTokenScopeCategoryPackage: AccessTokenScopeReadPackage,
+ AccessTokenScopeCategoryIssue: AccessTokenScopeReadIssue,
+ AccessTokenScopeCategoryRepository: AccessTokenScopeReadRepository,
+ AccessTokenScopeCategoryUser: AccessTokenScopeReadUser,
+ },
+ Write: {
+ AccessTokenScopeCategoryActivityPub: AccessTokenScopeWriteActivityPub,
+ AccessTokenScopeCategoryAdmin: AccessTokenScopeWriteAdmin,
+ AccessTokenScopeCategoryMisc: AccessTokenScopeWriteMisc,
+ AccessTokenScopeCategoryNotification: AccessTokenScopeWriteNotification,
+ AccessTokenScopeCategoryOrganization: AccessTokenScopeWriteOrganization,
+ AccessTokenScopeCategoryPackage: AccessTokenScopeWritePackage,
+ AccessTokenScopeCategoryIssue: AccessTokenScopeWriteIssue,
+ AccessTokenScopeCategoryRepository: AccessTokenScopeWriteRepository,
+ AccessTokenScopeCategoryUser: AccessTokenScopeWriteUser,
+ },
+}
+
+// GetRequiredScopes gets the specific scopes for a given level and categories
+func GetRequiredScopes(level AccessTokenScopeLevel, scopeCategories ...AccessTokenScopeCategory) []AccessTokenScope {
+ scopes := make([]AccessTokenScope, 0, len(scopeCategories))
+ for _, cat := range scopeCategories {
+ scopes = append(scopes, accessTokenScopes[level][cat])
+ }
+ return scopes
+}
+
+// ContainsCategory checks if a list of categories contains a specific category
+func ContainsCategory(categories []AccessTokenScopeCategory, category AccessTokenScopeCategory) bool {
+ for _, c := range categories {
+ if c == category {
+ return true
+ }
+ }
+ return false
+}
+
+// GetScopeLevelFromAccessMode converts permission access mode to scope level
+func GetScopeLevelFromAccessMode(mode perm.AccessMode) AccessTokenScopeLevel {
+ switch mode {
+ case perm.AccessModeNone:
+ return NoAccess
+ case perm.AccessModeRead:
+ return Read
+ case perm.AccessModeWrite:
+ return Write
+ case perm.AccessModeAdmin:
+ return Write
+ case perm.AccessModeOwner:
+ return Write
+ default:
+ return NoAccess
+ }
+}
+
+// parse the scope string into a bitmap, thus removing possible duplicates.
+func (s AccessTokenScope) parse() (accessTokenScopeBitmap, error) {
+ var bitmap accessTokenScopeBitmap
+
+ // The following is the more performant equivalent of 'for _, v := range strings.Split(remainingScope, ",")' as this is hot code
+ remainingScopes := string(s)
+ for len(remainingScopes) > 0 {
+ i := strings.IndexByte(remainingScopes, ',')
+ var v string
+ if i < 0 {
+ v = remainingScopes
+ remainingScopes = ""
+ } else if i+1 >= len(remainingScopes) {
+ v = remainingScopes[:i]
+ remainingScopes = ""
+ } else {
+ v = remainingScopes[:i]
+ remainingScopes = remainingScopes[i+1:]
+ }
+ singleScope := AccessTokenScope(v)
+ if singleScope == "" || singleScope == "sudo" {
+ continue
+ }
+ if singleScope == AccessTokenScopeAll {
+ bitmap |= accessTokenScopeAllBits
+ continue
+ }
+
+ bits, ok := allAccessTokenScopeBits[singleScope]
+ if !ok {
+ return 0, fmt.Errorf("invalid access token scope: %s", singleScope)
+ }
+ bitmap |= bits
+ }
+
+ return bitmap, nil
+}
+
+// StringSlice returns the AccessTokenScope as a []string
+func (s AccessTokenScope) StringSlice() []string {
+ return strings.Split(string(s), ",")
+}
+
+// Normalize returns a normalized scope string without any duplicates.
+func (s AccessTokenScope) Normalize() (AccessTokenScope, error) {
+ bitmap, err := s.parse()
+ if err != nil {
+ return "", err
+ }
+
+ return bitmap.toScope(), nil
+}
+
+// PublicOnly checks if this token scope is limited to public resources
+func (s AccessTokenScope) PublicOnly() (bool, error) {
+ bitmap, err := s.parse()
+ if err != nil {
+ return false, err
+ }
+
+ return bitmap.hasScope(AccessTokenScopePublicOnly)
+}
+
+// HasScope returns true if the string has the given scope
+func (s AccessTokenScope) HasScope(scopes ...AccessTokenScope) (bool, error) {
+ bitmap, err := s.parse()
+ if err != nil {
+ return false, err
+ }
+
+ for _, s := range scopes {
+ if has, err := bitmap.hasScope(s); !has || err != nil {
+ return has, err
+ }
+ }
+
+ return true, nil
+}
+
+// hasScope returns true if the string has the given scope
+func (bitmap accessTokenScopeBitmap) hasScope(scope AccessTokenScope) (bool, error) {
+ expectedBits, ok := allAccessTokenScopeBits[scope]
+ if !ok {
+ return false, fmt.Errorf("invalid access token scope: %s", scope)
+ }
+
+ return bitmap&expectedBits == expectedBits, nil
+}
+
+// toScope returns a normalized scope string without any duplicates.
+func (bitmap accessTokenScopeBitmap) toScope() AccessTokenScope {
+ var scopes []string
+
+ // iterate over all scopes, and reconstruct the bitmap
+ // if the reconstructed bitmap doesn't change, then the scope is already included
+ var reconstruct accessTokenScopeBitmap
+
+ for _, singleScope := range allAccessTokenScopes {
+ // no need for error checking here, since we know the scope is valid
+ if ok, _ := bitmap.hasScope(singleScope); ok {
+ current := reconstruct | allAccessTokenScopeBits[singleScope]
+ if current == reconstruct {
+ continue
+ }
+
+ reconstruct = current
+ scopes = append(scopes, string(singleScope))
+ }
+ }
+
+ scope := AccessTokenScope(strings.Join(scopes, ","))
+ scope = AccessTokenScope(strings.ReplaceAll(
+ string(scope),
+ "write:activitypub,write:admin,write:misc,write:notification,write:organization,write:package,write:issue,write:repository,write:user",
+ "all",
+ ))
+ return scope
+}
diff --git a/models/auth/access_token_scope_test.go b/models/auth/access_token_scope_test.go
new file mode 100644
index 0000000..d11c5e6
--- /dev/null
+++ b/models/auth/access_token_scope_test.go
@@ -0,0 +1,90 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type scopeTestNormalize struct {
+ in AccessTokenScope
+ out AccessTokenScope
+ err error
+}
+
+func TestAccessTokenScope_Normalize(t *testing.T) {
+ tests := []scopeTestNormalize{
+ {"", "", nil},
+ {"write:misc,write:notification,read:package,write:notification,public-only", "public-only,write:misc,write:notification,read:package", nil},
+ {"all,sudo", "all", nil},
+ {"write:activitypub,write:admin,write:misc,write:notification,write:organization,write:package,write:issue,write:repository,write:user", "all", nil},
+ {"write:activitypub,write:admin,write:misc,write:notification,write:organization,write:package,write:issue,write:repository,write:user,public-only", "public-only,all", nil},
+ }
+
+ for _, scope := range []string{"activitypub", "admin", "misc", "notification", "organization", "package", "issue", "repository", "user"} {
+ tests = append(tests,
+ scopeTestNormalize{AccessTokenScope(fmt.Sprintf("read:%s", scope)), AccessTokenScope(fmt.Sprintf("read:%s", scope)), nil},
+ scopeTestNormalize{AccessTokenScope(fmt.Sprintf("write:%s", scope)), AccessTokenScope(fmt.Sprintf("write:%s", scope)), nil},
+ scopeTestNormalize{AccessTokenScope(fmt.Sprintf("write:%[1]s,read:%[1]s", scope)), AccessTokenScope(fmt.Sprintf("write:%s", scope)), nil},
+ scopeTestNormalize{AccessTokenScope(fmt.Sprintf("read:%[1]s,write:%[1]s", scope)), AccessTokenScope(fmt.Sprintf("write:%s", scope)), nil},
+ scopeTestNormalize{AccessTokenScope(fmt.Sprintf("read:%[1]s,write:%[1]s,write:%[1]s", scope)), AccessTokenScope(fmt.Sprintf("write:%s", scope)), nil},
+ )
+ }
+
+ for _, test := range tests {
+ t.Run(string(test.in), func(t *testing.T) {
+ scope, err := test.in.Normalize()
+ assert.Equal(t, test.out, scope)
+ assert.Equal(t, test.err, err)
+ })
+ }
+}
+
+type scopeTestHasScope struct {
+ in AccessTokenScope
+ scope AccessTokenScope
+ out bool
+ err error
+}
+
+func TestAccessTokenScope_HasScope(t *testing.T) {
+ tests := []scopeTestHasScope{
+ {"read:admin", "write:package", false, nil},
+ {"all", "write:package", true, nil},
+ {"write:package", "all", false, nil},
+ {"public-only", "read:issue", false, nil},
+ }
+
+ for _, scope := range []string{"activitypub", "admin", "misc", "notification", "organization", "package", "issue", "repository", "user"} {
+ tests = append(tests,
+ scopeTestHasScope{
+ AccessTokenScope(fmt.Sprintf("read:%s", scope)),
+ AccessTokenScope(fmt.Sprintf("read:%s", scope)), true, nil,
+ },
+ scopeTestHasScope{
+ AccessTokenScope(fmt.Sprintf("write:%s", scope)),
+ AccessTokenScope(fmt.Sprintf("write:%s", scope)), true, nil,
+ },
+ scopeTestHasScope{
+ AccessTokenScope(fmt.Sprintf("write:%s", scope)),
+ AccessTokenScope(fmt.Sprintf("read:%s", scope)), true, nil,
+ },
+ scopeTestHasScope{
+ AccessTokenScope(fmt.Sprintf("read:%s", scope)),
+ AccessTokenScope(fmt.Sprintf("write:%s", scope)), false, nil,
+ },
+ )
+ }
+
+ for _, test := range tests {
+ t.Run(string(test.in), func(t *testing.T) {
+ hasScope, err := test.in.HasScope(test.scope)
+ assert.Equal(t, test.out, hasScope)
+ assert.Equal(t, test.err, err)
+ })
+ }
+}
diff --git a/models/auth/access_token_test.go b/models/auth/access_token_test.go
new file mode 100644
index 0000000..e6ea487
--- /dev/null
+++ b/models/auth/access_token_test.go
@@ -0,0 +1,133 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth_test
+
+import (
+ "testing"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewAccessToken(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token := &auth_model.AccessToken{
+ UID: 3,
+ Name: "Token C",
+ }
+ require.NoError(t, auth_model.NewAccessToken(db.DefaultContext, token))
+ unittest.AssertExistsAndLoadBean(t, token)
+
+ invalidToken := &auth_model.AccessToken{
+ ID: token.ID, // duplicate
+ UID: 2,
+ Name: "Token F",
+ }
+ require.Error(t, auth_model.NewAccessToken(db.DefaultContext, invalidToken))
+}
+
+func TestAccessTokenByNameExists(t *testing.T) {
+ name := "Token Gitea"
+
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token := &auth_model.AccessToken{
+ UID: 3,
+ Name: name,
+ }
+
+ // Check to make sure it doesn't exists already
+ exist, err := auth_model.AccessTokenByNameExists(db.DefaultContext, token)
+ require.NoError(t, err)
+ assert.False(t, exist)
+
+ // Save it to the database
+ require.NoError(t, auth_model.NewAccessToken(db.DefaultContext, token))
+ unittest.AssertExistsAndLoadBean(t, token)
+
+ // This token must be found by name in the DB now
+ exist, err = auth_model.AccessTokenByNameExists(db.DefaultContext, token)
+ require.NoError(t, err)
+ assert.True(t, exist)
+
+ user4Token := &auth_model.AccessToken{
+ UID: 4,
+ Name: name,
+ }
+
+ // Name matches but different user ID, this shouldn't exists in the
+ // database
+ exist, err = auth_model.AccessTokenByNameExists(db.DefaultContext, user4Token)
+ require.NoError(t, err)
+ assert.False(t, exist)
+}
+
+func TestGetAccessTokenBySHA(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token, err := auth_model.GetAccessTokenBySHA(db.DefaultContext, "d2c6c1ba3890b309189a8e618c72a162e4efbf36")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), token.UID)
+ assert.Equal(t, "Token A", token.Name)
+ assert.Equal(t, "2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f", token.TokenHash)
+ assert.Equal(t, "e4efbf36", token.TokenLastEight)
+
+ _, err = auth_model.GetAccessTokenBySHA(db.DefaultContext, "notahash")
+ require.Error(t, err)
+ assert.True(t, auth_model.IsErrAccessTokenNotExist(err))
+
+ _, err = auth_model.GetAccessTokenBySHA(db.DefaultContext, "")
+ require.Error(t, err)
+ assert.True(t, auth_model.IsErrAccessTokenEmpty(err))
+}
+
+func TestListAccessTokens(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ tokens, err := db.Find[auth_model.AccessToken](db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 1})
+ require.NoError(t, err)
+ if assert.Len(t, tokens, 2) {
+ assert.Equal(t, int64(1), tokens[0].UID)
+ assert.Equal(t, int64(1), tokens[1].UID)
+ assert.Contains(t, []string{tokens[0].Name, tokens[1].Name}, "Token A")
+ assert.Contains(t, []string{tokens[0].Name, tokens[1].Name}, "Token B")
+ }
+
+ tokens, err = db.Find[auth_model.AccessToken](db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 2})
+ require.NoError(t, err)
+ if assert.Len(t, tokens, 1) {
+ assert.Equal(t, int64(2), tokens[0].UID)
+ assert.Equal(t, "Token A", tokens[0].Name)
+ }
+
+ tokens, err = db.Find[auth_model.AccessToken](db.DefaultContext, auth_model.ListAccessTokensOptions{UserID: 100})
+ require.NoError(t, err)
+ assert.Empty(t, tokens)
+}
+
+func TestUpdateAccessToken(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ token, err := auth_model.GetAccessTokenBySHA(db.DefaultContext, "4c6f36e6cf498e2a448662f915d932c09c5a146c")
+ require.NoError(t, err)
+ token.Name = "Token Z"
+
+ require.NoError(t, auth_model.UpdateAccessToken(db.DefaultContext, token))
+ unittest.AssertExistsAndLoadBean(t, token)
+}
+
+func TestDeleteAccessTokenByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ token, err := auth_model.GetAccessTokenBySHA(db.DefaultContext, "4c6f36e6cf498e2a448662f915d932c09c5a146c")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), token.UID)
+
+ require.NoError(t, auth_model.DeleteAccessTokenByID(db.DefaultContext, token.ID, 1))
+ unittest.AssertNotExistsBean(t, token)
+
+ err = auth_model.DeleteAccessTokenByID(db.DefaultContext, 100, 100)
+ require.Error(t, err)
+ assert.True(t, auth_model.IsErrAccessTokenNotExist(err))
+}
diff --git a/models/auth/auth_token.go b/models/auth/auth_token.go
new file mode 100644
index 0000000..c64af3e
--- /dev/null
+++ b/models/auth/auth_token.go
@@ -0,0 +1,116 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+type AuthorizationPurpose string
+
+var (
+ // Used to store long term authorization tokens.
+ LongTermAuthorization AuthorizationPurpose = "long_term_authorization"
+
+ // Used to activate a user account.
+ UserActivation AuthorizationPurpose = "user_activation"
+
+ // Used to reset the password.
+ PasswordReset AuthorizationPurpose = "password_reset"
+)
+
+// Used to activate the specified email address for a user.
+func EmailActivation(email string) AuthorizationPurpose {
+ return AuthorizationPurpose("email_activation:" + email)
+}
+
+// AuthorizationToken represents a authorization token to a user.
+type AuthorizationToken struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX"`
+ LookupKey string `xorm:"INDEX UNIQUE"`
+ HashedValidator string
+ Purpose AuthorizationPurpose `xorm:"NOT NULL DEFAULT 'long_term_authorization'"`
+ Expiry timeutil.TimeStamp
+}
+
+// TableName provides the real table name.
+func (AuthorizationToken) TableName() string {
+ return "forgejo_auth_token"
+}
+
+func init() {
+ db.RegisterModel(new(AuthorizationToken))
+}
+
+// IsExpired returns if the authorization token is expired.
+func (authToken *AuthorizationToken) IsExpired() bool {
+ return authToken.Expiry.AsLocalTime().Before(time.Now())
+}
+
+// GenerateAuthToken generates a new authentication token for the given user.
+// It returns the lookup key and validator values that should be passed to the
+// user via a long-term cookie.
+func GenerateAuthToken(ctx context.Context, userID int64, expiry timeutil.TimeStamp, purpose AuthorizationPurpose) (lookupKey, validator string, err error) {
+ // Request 64 random bytes. The first 32 bytes will be used for the lookupKey
+ // and the other 32 bytes will be used for the validator.
+ rBytes, err := util.CryptoRandomBytes(64)
+ if err != nil {
+ return "", "", err
+ }
+ hexEncoded := hex.EncodeToString(rBytes)
+ validator, lookupKey = hexEncoded[64:], hexEncoded[:64]
+
+ _, err = db.GetEngine(ctx).Insert(&AuthorizationToken{
+ UID: userID,
+ Expiry: expiry,
+ LookupKey: lookupKey,
+ HashedValidator: HashValidator(rBytes[32:]),
+ Purpose: purpose,
+ })
+ return lookupKey, validator, err
+}
+
+// FindAuthToken will find a authorization token via the lookup key.
+func FindAuthToken(ctx context.Context, lookupKey string, purpose AuthorizationPurpose) (*AuthorizationToken, error) {
+ var authToken AuthorizationToken
+ has, err := db.GetEngine(ctx).Where("lookup_key = ? AND purpose = ?", lookupKey, purpose).Get(&authToken)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("lookup key %q: %w", lookupKey, util.ErrNotExist)
+ }
+ return &authToken, nil
+}
+
+// DeleteAuthToken will delete the authorization token.
+func DeleteAuthToken(ctx context.Context, authToken *AuthorizationToken) error {
+ _, err := db.DeleteByBean(ctx, authToken)
+ return err
+}
+
+// DeleteAuthTokenByUser will delete all authorization tokens for the user.
+func DeleteAuthTokenByUser(ctx context.Context, userID int64) error {
+ if userID == 0 {
+ return nil
+ }
+
+ _, err := db.DeleteByBean(ctx, &AuthorizationToken{UID: userID})
+ return err
+}
+
+// HashValidator will return a hexified hashed version of the validator.
+func HashValidator(validator []byte) string {
+ h := sha256.New()
+ h.Write(validator)
+ return hex.EncodeToString(h.Sum(nil))
+}
diff --git a/models/auth/main_test.go b/models/auth/main_test.go
new file mode 100644
index 0000000..d772ea6
--- /dev/null
+++ b/models/auth/main_test.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/auth"
+ _ "code.gitea.io/gitea/models/perm/access"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/auth/oauth2.go b/models/auth/oauth2.go
new file mode 100644
index 0000000..9a78544
--- /dev/null
+++ b/models/auth/oauth2.go
@@ -0,0 +1,678 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/base32"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ uuid "github.com/google/uuid"
+ "golang.org/x/crypto/bcrypt"
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// OAuth2Application represents an OAuth2 client (RFC 6749)
+type OAuth2Application struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX"`
+ Name string
+ ClientID string `xorm:"unique"`
+ ClientSecret string
+ // OAuth defines both Confidential and Public client types
+ // https://datatracker.ietf.org/doc/html/rfc6749#section-2.1
+ // "Authorization servers MUST record the client type in the client registration details"
+ // https://datatracker.ietf.org/doc/html/rfc8252#section-8.4
+ ConfidentialClient bool `xorm:"NOT NULL DEFAULT TRUE"`
+ RedirectURIs []string `xorm:"redirect_uris JSON TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(OAuth2Application))
+ db.RegisterModel(new(OAuth2AuthorizationCode))
+ db.RegisterModel(new(OAuth2Grant))
+}
+
+type BuiltinOAuth2Application struct {
+ ConfigName string
+ DisplayName string
+ RedirectURIs []string
+}
+
+func BuiltinApplications() map[string]*BuiltinOAuth2Application {
+ m := make(map[string]*BuiltinOAuth2Application)
+ m["a4792ccc-144e-407e-86c9-5e7d8d9c3269"] = &BuiltinOAuth2Application{
+ ConfigName: "git-credential-oauth",
+ DisplayName: "git-credential-oauth",
+ RedirectURIs: []string{"http://127.0.0.1", "https://127.0.0.1"},
+ }
+ m["e90ee53c-94e2-48ac-9358-a874fb9e0662"] = &BuiltinOAuth2Application{
+ ConfigName: "git-credential-manager",
+ DisplayName: "Git Credential Manager",
+ RedirectURIs: []string{"http://127.0.0.1", "https://127.0.0.1"},
+ }
+ m["d57cb8c4-630c-4168-8324-ec79935e18d4"] = &BuiltinOAuth2Application{
+ ConfigName: "tea",
+ DisplayName: "tea",
+ RedirectURIs: []string{"http://127.0.0.1", "https://127.0.0.1"},
+ }
+ return m
+}
+
+func BuiltinApplicationsClientIDs() (clientIDs []string) {
+ for clientID := range BuiltinApplications() {
+ clientIDs = append(clientIDs, clientID)
+ }
+ return clientIDs
+}
+
+func Init(ctx context.Context) error {
+ builtinApps := BuiltinApplications()
+ var builtinAllClientIDs []string
+ for clientID := range builtinApps {
+ builtinAllClientIDs = append(builtinAllClientIDs, clientID)
+ }
+
+ var registeredApps []*OAuth2Application
+ if err := db.GetEngine(ctx).In("client_id", builtinAllClientIDs).Find(&registeredApps); err != nil {
+ return err
+ }
+
+ clientIDsToAdd := container.Set[string]{}
+ for _, configName := range setting.OAuth2.DefaultApplications {
+ found := false
+ for clientID, builtinApp := range builtinApps {
+ if builtinApp.ConfigName == configName {
+ clientIDsToAdd.Add(clientID) // add all user-configured apps to the "add" list
+ found = true
+ }
+ }
+ if !found {
+ return fmt.Errorf("unknown oauth2 application: %q", configName)
+ }
+ }
+ clientIDsToDelete := container.Set[string]{}
+ for _, app := range registeredApps {
+ if !clientIDsToAdd.Contains(app.ClientID) {
+ clientIDsToDelete.Add(app.ClientID) // if a registered app is not in the "add" list, it should be deleted
+ }
+ }
+ for _, app := range registeredApps {
+ clientIDsToAdd.Remove(app.ClientID) // no need to re-add existing (registered) apps, so remove them from the set
+ }
+
+ for _, app := range registeredApps {
+ if clientIDsToDelete.Contains(app.ClientID) {
+ if err := deleteOAuth2Application(ctx, app.ID, 0); err != nil {
+ return err
+ }
+ }
+ }
+ for clientID := range clientIDsToAdd {
+ builtinApp := builtinApps[clientID]
+ if err := db.Insert(ctx, &OAuth2Application{
+ Name: builtinApp.DisplayName,
+ ClientID: clientID,
+ RedirectURIs: builtinApp.RedirectURIs,
+ }); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TableName sets the table name to `oauth2_application`
+func (app *OAuth2Application) TableName() string {
+ return "oauth2_application"
+}
+
+// ContainsRedirectURI checks if redirectURI is allowed for app
+func (app *OAuth2Application) ContainsRedirectURI(redirectURI string) bool {
+ // OAuth2 requires the redirect URI to be an exact match, no dynamic parts are allowed.
+ // https://stackoverflow.com/questions/55524480/should-dynamic-query-parameters-be-present-in-the-redirection-uri-for-an-oauth2
+ // https://www.rfc-editor.org/rfc/rfc6819#section-5.2.3.3
+ // https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
+ // https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics-12#section-3.1
+ contains := func(s string) bool {
+ s = strings.TrimSuffix(strings.ToLower(s), "/")
+ for _, u := range app.RedirectURIs {
+ if strings.TrimSuffix(strings.ToLower(u), "/") == s {
+ return true
+ }
+ }
+ return false
+ }
+ if !app.ConfidentialClient {
+ uri, err := url.Parse(redirectURI)
+ // ignore port for http loopback uris following https://datatracker.ietf.org/doc/html/rfc8252#section-7.3
+ if err == nil && uri.Scheme == "http" && uri.Port() != "" {
+ ip := net.ParseIP(uri.Hostname())
+ if ip != nil && ip.IsLoopback() {
+ // strip port
+ uri.Host = uri.Hostname()
+ if contains(uri.String()) {
+ return true
+ }
+ }
+ }
+ }
+ return contains(redirectURI)
+}
+
+// Base32 characters, but lowercased.
+const lowerBase32Chars = "abcdefghijklmnopqrstuvwxyz234567"
+
+// base32 encoder that uses lowered characters without padding.
+var base32Lower = base32.NewEncoding(lowerBase32Chars).WithPadding(base32.NoPadding)
+
+// GenerateClientSecret will generate the client secret and returns the plaintext and saves the hash at the database
+func (app *OAuth2Application) GenerateClientSecret(ctx context.Context) (string, error) {
+ rBytes, err := util.CryptoRandomBytes(32)
+ if err != nil {
+ return "", err
+ }
+ // Add a prefix to the base32, this is in order to make it easier
+ // for code scanners to grab sensitive tokens.
+ clientSecret := "gto_" + base32Lower.EncodeToString(rBytes)
+
+ hashedSecret, err := bcrypt.GenerateFromPassword([]byte(clientSecret), bcrypt.DefaultCost)
+ if err != nil {
+ return "", err
+ }
+ app.ClientSecret = string(hashedSecret)
+ if _, err := db.GetEngine(ctx).ID(app.ID).Cols("client_secret").Update(app); err != nil {
+ return "", err
+ }
+ return clientSecret, nil
+}
+
+// ValidateClientSecret validates the given secret by the hash saved in database
+func (app *OAuth2Application) ValidateClientSecret(secret []byte) bool {
+ return bcrypt.CompareHashAndPassword([]byte(app.ClientSecret), secret) == nil
+}
+
+// GetGrantByUserID returns a OAuth2Grant by its user and application ID
+func (app *OAuth2Application) GetGrantByUserID(ctx context.Context, userID int64) (grant *OAuth2Grant, err error) {
+ grant = new(OAuth2Grant)
+ if has, err := db.GetEngine(ctx).Where("user_id = ? AND application_id = ?", userID, app.ID).Get(grant); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, nil
+ }
+ return grant, nil
+}
+
+// CreateGrant generates a grant for an user
+func (app *OAuth2Application) CreateGrant(ctx context.Context, userID int64, scope string) (*OAuth2Grant, error) {
+ grant := &OAuth2Grant{
+ ApplicationID: app.ID,
+ UserID: userID,
+ Scope: scope,
+ }
+ err := db.Insert(ctx, grant)
+ if err != nil {
+ return nil, err
+ }
+ return grant, nil
+}
+
+// GetOAuth2ApplicationByClientID returns the oauth2 application with the given client_id. Returns an error if not found.
+func GetOAuth2ApplicationByClientID(ctx context.Context, clientID string) (app *OAuth2Application, err error) {
+ app = new(OAuth2Application)
+ has, err := db.GetEngine(ctx).Where("client_id = ?", clientID).Get(app)
+ if !has {
+ return nil, ErrOAuthClientIDInvalid{ClientID: clientID}
+ }
+ return app, err
+}
+
+// GetOAuth2ApplicationByID returns the oauth2 application with the given id. Returns an error if not found.
+func GetOAuth2ApplicationByID(ctx context.Context, id int64) (app *OAuth2Application, err error) {
+ app = new(OAuth2Application)
+ has, err := db.GetEngine(ctx).ID(id).Get(app)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrOAuthApplicationNotFound{ID: id}
+ }
+ return app, nil
+}
+
+// CreateOAuth2ApplicationOptions holds options to create an oauth2 application
+type CreateOAuth2ApplicationOptions struct {
+ Name string
+ UserID int64
+ ConfidentialClient bool
+ RedirectURIs []string
+}
+
+// CreateOAuth2Application inserts a new oauth2 application
+func CreateOAuth2Application(ctx context.Context, opts CreateOAuth2ApplicationOptions) (*OAuth2Application, error) {
+ clientID := uuid.New().String()
+ app := &OAuth2Application{
+ UID: opts.UserID,
+ Name: opts.Name,
+ ClientID: clientID,
+ RedirectURIs: opts.RedirectURIs,
+ ConfidentialClient: opts.ConfidentialClient,
+ }
+ if err := db.Insert(ctx, app); err != nil {
+ return nil, err
+ }
+ return app, nil
+}
+
+// UpdateOAuth2ApplicationOptions holds options to update an oauth2 application
+type UpdateOAuth2ApplicationOptions struct {
+ ID int64
+ Name string
+ UserID int64
+ ConfidentialClient bool
+ RedirectURIs []string
+}
+
+// UpdateOAuth2Application updates an oauth2 application
+func UpdateOAuth2Application(ctx context.Context, opts UpdateOAuth2ApplicationOptions) (*OAuth2Application, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ app, err := GetOAuth2ApplicationByID(ctx, opts.ID)
+ if err != nil {
+ return nil, err
+ }
+ if app.UID != opts.UserID {
+ return nil, errors.New("UID mismatch")
+ }
+ builtinApps := BuiltinApplications()
+ if _, builtin := builtinApps[app.ClientID]; builtin {
+ return nil, fmt.Errorf("failed to edit OAuth2 application: application is locked: %s", app.ClientID)
+ }
+
+ app.Name = opts.Name
+ app.RedirectURIs = opts.RedirectURIs
+ app.ConfidentialClient = opts.ConfidentialClient
+
+ if err = updateOAuth2Application(ctx, app); err != nil {
+ return nil, err
+ }
+ app.ClientSecret = ""
+
+ return app, committer.Commit()
+}
+
+func updateOAuth2Application(ctx context.Context, app *OAuth2Application) error {
+ if _, err := db.GetEngine(ctx).ID(app.ID).UseBool("confidential_client").Update(app); err != nil {
+ return err
+ }
+ return nil
+}
+
+func deleteOAuth2Application(ctx context.Context, id, userid int64) error {
+ sess := db.GetEngine(ctx)
+ // the userid could be 0 if the app is instance-wide
+ if deleted, err := sess.Where(builder.Eq{"id": id, "uid": userid}).Delete(&OAuth2Application{}); err != nil {
+ return err
+ } else if deleted == 0 {
+ return ErrOAuthApplicationNotFound{ID: id}
+ }
+ codes := make([]*OAuth2AuthorizationCode, 0)
+ // delete correlating auth codes
+ if err := sess.Join("INNER", "oauth2_grant",
+ "oauth2_authorization_code.grant_id = oauth2_grant.id AND oauth2_grant.application_id = ?", id).Find(&codes); err != nil {
+ return err
+ }
+ codeIDs := make([]int64, 0, len(codes))
+ for _, grant := range codes {
+ codeIDs = append(codeIDs, grant.ID)
+ }
+
+ if _, err := sess.In("id", codeIDs).Delete(new(OAuth2AuthorizationCode)); err != nil {
+ return err
+ }
+
+ if _, err := sess.Where("application_id = ?", id).Delete(new(OAuth2Grant)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// DeleteOAuth2Application deletes the application with the given id and the grants and auth codes related to it. It checks if the userid was the creator of the app.
+func DeleteOAuth2Application(ctx context.Context, id, userid int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ app, err := GetOAuth2ApplicationByID(ctx, id)
+ if err != nil {
+ return err
+ }
+ builtinApps := BuiltinApplications()
+ if _, builtin := builtinApps[app.ClientID]; builtin {
+ return fmt.Errorf("failed to delete OAuth2 application: application is locked: %s", app.ClientID)
+ }
+ if err := deleteOAuth2Application(ctx, id, userid); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+//////////////////////////////////////////////////////
+
+// OAuth2AuthorizationCode is a code to obtain an access token in combination with the client secret once. It has a limited lifetime.
+type OAuth2AuthorizationCode struct {
+ ID int64 `xorm:"pk autoincr"`
+ Grant *OAuth2Grant `xorm:"-"`
+ GrantID int64
+ Code string `xorm:"INDEX unique"`
+ CodeChallenge string
+ CodeChallengeMethod string
+ RedirectURI string
+ ValidUntil timeutil.TimeStamp `xorm:"index"`
+}
+
+// TableName sets the table name to `oauth2_authorization_code`
+func (code *OAuth2AuthorizationCode) TableName() string {
+ return "oauth2_authorization_code"
+}
+
+// GenerateRedirectURI generates a redirect URI for a successful authorization request. State will be used if not empty.
+func (code *OAuth2AuthorizationCode) GenerateRedirectURI(state string) (*url.URL, error) {
+ redirect, err := url.Parse(code.RedirectURI)
+ if err != nil {
+ return nil, err
+ }
+ q := redirect.Query()
+ if state != "" {
+ q.Set("state", state)
+ }
+ q.Set("code", code.Code)
+ redirect.RawQuery = q.Encode()
+ return redirect, err
+}
+
+// Invalidate deletes the auth code from the database to invalidate this code
+func (code *OAuth2AuthorizationCode) Invalidate(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).ID(code.ID).NoAutoCondition().Delete(code)
+ return err
+}
+
+// ValidateCodeChallenge validates the given verifier against the saved code challenge. This is part of the PKCE implementation.
+func (code *OAuth2AuthorizationCode) ValidateCodeChallenge(verifier string) bool {
+ switch code.CodeChallengeMethod {
+ case "S256":
+ // base64url(SHA256(verifier)) see https://tools.ietf.org/html/rfc7636#section-4.6
+ h := sha256.Sum256([]byte(verifier))
+ hashedVerifier := base64.RawURLEncoding.EncodeToString(h[:])
+ return hashedVerifier == code.CodeChallenge
+ case "plain":
+ return verifier == code.CodeChallenge
+ case "":
+ return true
+ default:
+ // unsupported method -> return false
+ return false
+ }
+}
+
+// GetOAuth2AuthorizationByCode returns an authorization by its code
+func GetOAuth2AuthorizationByCode(ctx context.Context, code string) (auth *OAuth2AuthorizationCode, err error) {
+ auth = new(OAuth2AuthorizationCode)
+ if has, err := db.GetEngine(ctx).Where("code = ?", code).Get(auth); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, nil
+ }
+ auth.Grant = new(OAuth2Grant)
+ if has, err := db.GetEngine(ctx).ID(auth.GrantID).Get(auth.Grant); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, nil
+ }
+ return auth, nil
+}
+
+//////////////////////////////////////////////////////
+
+// OAuth2Grant represents the permission of an user for a specific application to access resources
+type OAuth2Grant struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"INDEX unique(user_application)"`
+ Application *OAuth2Application `xorm:"-"`
+ ApplicationID int64 `xorm:"INDEX unique(user_application)"`
+ Counter int64 `xorm:"NOT NULL DEFAULT 1"`
+ Scope string `xorm:"TEXT"`
+ Nonce string `xorm:"TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+// TableName sets the table name to `oauth2_grant`
+func (grant *OAuth2Grant) TableName() string {
+ return "oauth2_grant"
+}
+
+// GenerateNewAuthorizationCode generates a new authorization code for a grant and saves it to the database
+func (grant *OAuth2Grant) GenerateNewAuthorizationCode(ctx context.Context, redirectURI, codeChallenge, codeChallengeMethod string) (code *OAuth2AuthorizationCode, err error) {
+ rBytes, err := util.CryptoRandomBytes(32)
+ if err != nil {
+ return &OAuth2AuthorizationCode{}, err
+ }
+ // Add a prefix to the base32, this is in order to make it easier
+ // for code scanners to grab sensitive tokens.
+ codeSecret := "gta_" + base32Lower.EncodeToString(rBytes)
+
+ code = &OAuth2AuthorizationCode{
+ Grant: grant,
+ GrantID: grant.ID,
+ RedirectURI: redirectURI,
+ Code: codeSecret,
+ CodeChallenge: codeChallenge,
+ CodeChallengeMethod: codeChallengeMethod,
+ }
+ if err := db.Insert(ctx, code); err != nil {
+ return nil, err
+ }
+ return code, nil
+}
+
+// IncreaseCounter increases the counter and updates the grant
+func (grant *OAuth2Grant) IncreaseCounter(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).ID(grant.ID).Incr("counter").Update(new(OAuth2Grant))
+ if err != nil {
+ return err
+ }
+ updatedGrant, err := GetOAuth2GrantByID(ctx, grant.ID)
+ if err != nil {
+ return err
+ }
+ grant.Counter = updatedGrant.Counter
+ return nil
+}
+
+// ScopeContains returns true if the grant scope contains the specified scope
+func (grant *OAuth2Grant) ScopeContains(scope string) bool {
+ for _, currentScope := range strings.Split(grant.Scope, " ") {
+ if scope == currentScope {
+ return true
+ }
+ }
+ return false
+}
+
+// SetNonce updates the current nonce value of a grant
+func (grant *OAuth2Grant) SetNonce(ctx context.Context, nonce string) error {
+ grant.Nonce = nonce
+ _, err := db.GetEngine(ctx).ID(grant.ID).Cols("nonce").Update(grant)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// GetOAuth2GrantByID returns the grant with the given ID
+func GetOAuth2GrantByID(ctx context.Context, id int64) (grant *OAuth2Grant, err error) {
+ grant = new(OAuth2Grant)
+ if has, err := db.GetEngine(ctx).ID(id).Get(grant); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, nil
+ }
+ return grant, err
+}
+
+// GetOAuth2GrantsByUserID lists all grants of a certain user
+func GetOAuth2GrantsByUserID(ctx context.Context, uid int64) ([]*OAuth2Grant, error) {
+ type joinedOAuth2Grant struct {
+ Grant *OAuth2Grant `xorm:"extends"`
+ Application *OAuth2Application `xorm:"extends"`
+ }
+ var results *xorm.Rows
+ var err error
+ if results, err = db.GetEngine(ctx).
+ Table("oauth2_grant").
+ Where("user_id = ?", uid).
+ Join("INNER", "oauth2_application", "application_id = oauth2_application.id").
+ Rows(new(joinedOAuth2Grant)); err != nil {
+ return nil, err
+ }
+ defer results.Close()
+ grants := make([]*OAuth2Grant, 0)
+ for results.Next() {
+ joinedGrant := new(joinedOAuth2Grant)
+ if err := results.Scan(joinedGrant); err != nil {
+ return nil, err
+ }
+ joinedGrant.Grant.Application = joinedGrant.Application
+ grants = append(grants, joinedGrant.Grant)
+ }
+ return grants, nil
+}
+
+// RevokeOAuth2Grant deletes the grant with grantID and userID
+func RevokeOAuth2Grant(ctx context.Context, grantID, userID int64) error {
+ _, err := db.GetEngine(ctx).Where(builder.Eq{"id": grantID, "user_id": userID}).Delete(&OAuth2Grant{})
+ return err
+}
+
+// ErrOAuthClientIDInvalid will be thrown if client id cannot be found
+type ErrOAuthClientIDInvalid struct {
+ ClientID string
+}
+
+// IsErrOauthClientIDInvalid checks if an error is a ErrOAuthClientIDInvalid.
+func IsErrOauthClientIDInvalid(err error) bool {
+ _, ok := err.(ErrOAuthClientIDInvalid)
+ return ok
+}
+
+// Error returns the error message
+func (err ErrOAuthClientIDInvalid) Error() string {
+ return fmt.Sprintf("Client ID invalid [Client ID: %s]", err.ClientID)
+}
+
+// Unwrap unwraps this as a ErrNotExist err
+func (err ErrOAuthClientIDInvalid) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrOAuthApplicationNotFound will be thrown if id cannot be found
+type ErrOAuthApplicationNotFound struct {
+ ID int64
+}
+
+// IsErrOAuthApplicationNotFound checks if an error is a ErrReviewNotExist.
+func IsErrOAuthApplicationNotFound(err error) bool {
+ _, ok := err.(ErrOAuthApplicationNotFound)
+ return ok
+}
+
+// Error returns the error message
+func (err ErrOAuthApplicationNotFound) Error() string {
+ return fmt.Sprintf("OAuth application not found [ID: %d]", err.ID)
+}
+
+// Unwrap unwraps this as a ErrNotExist err
+func (err ErrOAuthApplicationNotFound) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// GetActiveOAuth2SourceByName returns a OAuth2 AuthSource based on the given name
+func GetActiveOAuth2SourceByName(ctx context.Context, name string) (*Source, error) {
+ authSource := new(Source)
+ has, err := db.GetEngine(ctx).Where("name = ? and type = ? and is_active = ?", name, OAuth2, true).Get(authSource)
+ if err != nil {
+ return nil, err
+ }
+
+ if !has {
+ return nil, fmt.Errorf("oauth2 source not found, name: %q", name)
+ }
+
+ return authSource, nil
+}
+
+func DeleteOAuth2RelictsByUserID(ctx context.Context, userID int64) error {
+ deleteCond := builder.Select("id").From("oauth2_grant").Where(builder.Eq{"oauth2_grant.user_id": userID})
+
+ if _, err := db.GetEngine(ctx).In("grant_id", deleteCond).
+ Delete(&OAuth2AuthorizationCode{}); err != nil {
+ return err
+ }
+
+ if err := db.DeleteBeans(ctx,
+ &OAuth2Application{UID: userID},
+ &OAuth2Grant{UserID: userID},
+ ); err != nil {
+ return fmt.Errorf("DeleteBeans: %w", err)
+ }
+
+ return nil
+}
+
+// CountOrphanedOAuth2Applications returns the amount of orphaned OAuth2 applications.
+func CountOrphanedOAuth2Applications(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).
+ Table("`oauth2_application`").
+ Join("LEFT", "`user`", "`oauth2_application`.`uid` = `user`.`id`").
+ Where(builder.IsNull{"`user`.id"}).
+ Where(builder.Neq{"uid": 0}). // exclude instance-wide admin applications
+ Where(builder.NotIn("`oauth2_application`.`client_id`", BuiltinApplicationsClientIDs())).
+ Select("COUNT(`oauth2_application`.`id`)").
+ Count()
+}
+
+// DeleteOrphanedOAuth2Applications deletes orphaned OAuth2 applications.
+func DeleteOrphanedOAuth2Applications(ctx context.Context) (int64, error) {
+ subQuery := builder.Select("`oauth2_application`.id").
+ From("`oauth2_application`").
+ Join("LEFT", "`user`", "`oauth2_application`.`uid` = `user`.`id`").
+ Where(builder.IsNull{"`user`.id"}).
+ Where(builder.Neq{"uid": 0}). // exclude instance-wide admin applications
+ Where(builder.NotIn("`oauth2_application`.`client_id`", BuiltinApplicationsClientIDs()))
+
+ b := builder.Delete(builder.In("id", subQuery)).From("`oauth2_application`")
+ _, err := db.GetEngine(ctx).Exec(b)
+ return -1, err
+}
diff --git a/models/auth/oauth2_list.go b/models/auth/oauth2_list.go
new file mode 100644
index 0000000..c55f10b
--- /dev/null
+++ b/models/auth/oauth2_list.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+)
+
+type FindOAuth2ApplicationsOptions struct {
+ db.ListOptions
+ // OwnerID is the user id or org id of the owner of the application
+ OwnerID int64
+ // find global applications, if true, then OwnerID will be igonred
+ IsGlobal bool
+}
+
+func (opts FindOAuth2ApplicationsOptions) ToConds() builder.Cond {
+ conds := builder.NewCond()
+ if opts.IsGlobal {
+ conds = conds.And(builder.Eq{"uid": 0})
+ } else if opts.OwnerID != 0 {
+ conds = conds.And(builder.Eq{"uid": opts.OwnerID})
+ }
+ return conds
+}
+
+func (opts FindOAuth2ApplicationsOptions) ToOrders() string {
+ return "id DESC"
+}
diff --git a/models/auth/oauth2_test.go b/models/auth/oauth2_test.go
new file mode 100644
index 0000000..9b562c8
--- /dev/null
+++ b/models/auth/oauth2_test.go
@@ -0,0 +1,300 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth_test
+
+import (
+ "path/filepath"
+ "slices"
+ "testing"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOAuth2Application_GenerateClientSecret(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1})
+ secret, err := app.GenerateClientSecret(db.DefaultContext)
+ require.NoError(t, err)
+ assert.NotEmpty(t, secret)
+ unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1, ClientSecret: app.ClientSecret})
+}
+
+func BenchmarkOAuth2Application_GenerateClientSecret(b *testing.B) {
+ require.NoError(b, unittest.PrepareTestDatabase())
+ app := unittest.AssertExistsAndLoadBean(b, &auth_model.OAuth2Application{ID: 1})
+ for i := 0; i < b.N; i++ {
+ _, _ = app.GenerateClientSecret(db.DefaultContext)
+ }
+}
+
+func TestOAuth2Application_ContainsRedirectURI(t *testing.T) {
+ app := &auth_model.OAuth2Application{
+ RedirectURIs: []string{"a", "b", "c"},
+ }
+ assert.True(t, app.ContainsRedirectURI("a"))
+ assert.True(t, app.ContainsRedirectURI("b"))
+ assert.True(t, app.ContainsRedirectURI("c"))
+ assert.False(t, app.ContainsRedirectURI("d"))
+}
+
+func TestOAuth2Application_ContainsRedirectURI_WithPort(t *testing.T) {
+ app := &auth_model.OAuth2Application{
+ RedirectURIs: []string{"http://127.0.0.1/", "http://::1/", "http://192.168.0.1/", "http://intranet/", "https://127.0.0.1/"},
+ ConfidentialClient: false,
+ }
+
+ // http loopback uris should ignore port
+ // https://datatracker.ietf.org/doc/html/rfc8252#section-7.3
+ assert.True(t, app.ContainsRedirectURI("http://127.0.0.1:3456/"))
+ assert.True(t, app.ContainsRedirectURI("http://127.0.0.1/"))
+ assert.True(t, app.ContainsRedirectURI("http://[::1]:3456/"))
+
+ // not http
+ assert.False(t, app.ContainsRedirectURI("https://127.0.0.1:3456/"))
+ // not loopback
+ assert.False(t, app.ContainsRedirectURI("http://192.168.0.1:9954/"))
+ assert.False(t, app.ContainsRedirectURI("http://intranet:3456/"))
+ // unparsable
+ assert.False(t, app.ContainsRedirectURI(":"))
+}
+
+func TestOAuth2Application_ContainsRedirect_Slash(t *testing.T) {
+ app := &auth_model.OAuth2Application{RedirectURIs: []string{"http://127.0.0.1"}}
+ assert.True(t, app.ContainsRedirectURI("http://127.0.0.1"))
+ assert.True(t, app.ContainsRedirectURI("http://127.0.0.1/"))
+ assert.False(t, app.ContainsRedirectURI("http://127.0.0.1/other"))
+
+ app = &auth_model.OAuth2Application{RedirectURIs: []string{"http://127.0.0.1/"}}
+ assert.True(t, app.ContainsRedirectURI("http://127.0.0.1"))
+ assert.True(t, app.ContainsRedirectURI("http://127.0.0.1/"))
+ assert.False(t, app.ContainsRedirectURI("http://127.0.0.1/other"))
+}
+
+func TestOAuth2Application_ValidateClientSecret(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1})
+ secret, err := app.GenerateClientSecret(db.DefaultContext)
+ require.NoError(t, err)
+ assert.True(t, app.ValidateClientSecret([]byte(secret)))
+ assert.False(t, app.ValidateClientSecret([]byte("fewijfowejgfiowjeoifew")))
+}
+
+func TestGetOAuth2ApplicationByClientID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app, err := auth_model.GetOAuth2ApplicationByClientID(db.DefaultContext, "da7da3ba-9a13-4167-856f-3899de0b0138")
+ require.NoError(t, err)
+ assert.Equal(t, "da7da3ba-9a13-4167-856f-3899de0b0138", app.ClientID)
+
+ app, err = auth_model.GetOAuth2ApplicationByClientID(db.DefaultContext, "invalid client id")
+ require.Error(t, err)
+ assert.Nil(t, app)
+}
+
+func TestCreateOAuth2Application(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app, err := auth_model.CreateOAuth2Application(db.DefaultContext, auth_model.CreateOAuth2ApplicationOptions{Name: "newapp", UserID: 1})
+ require.NoError(t, err)
+ assert.Equal(t, "newapp", app.Name)
+ assert.Len(t, app.ClientID, 36)
+ unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{Name: "newapp"})
+}
+
+func TestOAuth2Application_TableName(t *testing.T) {
+ assert.Equal(t, "oauth2_application", new(auth_model.OAuth2Application).TableName())
+}
+
+func TestOAuth2Application_GetGrantByUserID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1})
+ grant, err := app.GetGrantByUserID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), grant.UserID)
+
+ grant, err = app.GetGrantByUserID(db.DefaultContext, 34923458)
+ require.NoError(t, err)
+ assert.Nil(t, grant)
+}
+
+func TestOAuth2Application_CreateGrant(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1})
+ grant, err := app.CreateGrant(db.DefaultContext, 2, "")
+ require.NoError(t, err)
+ assert.NotNil(t, grant)
+ assert.Equal(t, int64(2), grant.UserID)
+ assert.Equal(t, int64(1), grant.ApplicationID)
+ assert.Equal(t, "", grant.Scope)
+}
+
+//////////////////// Grant
+
+func TestGetOAuth2GrantByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ grant, err := auth_model.GetOAuth2GrantByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), grant.ID)
+
+ grant, err = auth_model.GetOAuth2GrantByID(db.DefaultContext, 34923458)
+ require.NoError(t, err)
+ assert.Nil(t, grant)
+}
+
+func TestOAuth2Grant_IncreaseCounter(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ grant := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Grant{ID: 1, Counter: 1})
+ require.NoError(t, grant.IncreaseCounter(db.DefaultContext))
+ assert.Equal(t, int64(2), grant.Counter)
+ unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Grant{ID: 1, Counter: 2})
+}
+
+func TestOAuth2Grant_ScopeContains(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ grant := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Grant{ID: 1, Scope: "openid profile"})
+ assert.True(t, grant.ScopeContains("openid"))
+ assert.True(t, grant.ScopeContains("profile"))
+ assert.False(t, grant.ScopeContains("profil"))
+ assert.False(t, grant.ScopeContains("profile2"))
+}
+
+func TestOAuth2Grant_GenerateNewAuthorizationCode(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ grant := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Grant{ID: 1})
+ code, err := grant.GenerateNewAuthorizationCode(db.DefaultContext, "https://example2.com/callback", "CjvyTLSdR47G5zYenDA-eDWW4lRrO8yvjcWwbD_deOg", "S256")
+ require.NoError(t, err)
+ assert.NotNil(t, code)
+ assert.Greater(t, len(code.Code), 32) // secret length > 32
+}
+
+func TestOAuth2Grant_TableName(t *testing.T) {
+ assert.Equal(t, "oauth2_grant", new(auth_model.OAuth2Grant).TableName())
+}
+
+func TestGetOAuth2GrantsByUserID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ result, err := auth_model.GetOAuth2GrantsByUserID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Len(t, result, 1)
+ assert.Equal(t, int64(1), result[0].ID)
+ assert.Equal(t, result[0].ApplicationID, result[0].Application.ID)
+
+ result, err = auth_model.GetOAuth2GrantsByUserID(db.DefaultContext, 34134)
+ require.NoError(t, err)
+ assert.Empty(t, result)
+}
+
+func TestRevokeOAuth2Grant(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ require.NoError(t, auth_model.RevokeOAuth2Grant(db.DefaultContext, 1, 1))
+ unittest.AssertNotExistsBean(t, &auth_model.OAuth2Grant{ID: 1, UserID: 1})
+}
+
+//////////////////// Authorization Code
+
+func TestGetOAuth2AuthorizationByCode(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ code, err := auth_model.GetOAuth2AuthorizationByCode(db.DefaultContext, "authcode")
+ require.NoError(t, err)
+ assert.NotNil(t, code)
+ assert.Equal(t, "authcode", code.Code)
+ assert.Equal(t, int64(1), code.ID)
+
+ code, err = auth_model.GetOAuth2AuthorizationByCode(db.DefaultContext, "does not exist")
+ require.NoError(t, err)
+ assert.Nil(t, code)
+}
+
+func TestOAuth2AuthorizationCode_ValidateCodeChallenge(t *testing.T) {
+ // test plain
+ code := &auth_model.OAuth2AuthorizationCode{
+ CodeChallengeMethod: "plain",
+ CodeChallenge: "test123",
+ }
+ assert.True(t, code.ValidateCodeChallenge("test123"))
+ assert.False(t, code.ValidateCodeChallenge("ierwgjoergjio"))
+
+ // test S256
+ code = &auth_model.OAuth2AuthorizationCode{
+ CodeChallengeMethod: "S256",
+ CodeChallenge: "CjvyTLSdR47G5zYenDA-eDWW4lRrO8yvjcWwbD_deOg",
+ }
+ assert.True(t, code.ValidateCodeChallenge("N1Zo9-8Rfwhkt68r1r29ty8YwIraXR8eh_1Qwxg7yQXsonBt"))
+ assert.False(t, code.ValidateCodeChallenge("wiogjerogorewngoenrgoiuenorg"))
+
+ // test unknown
+ code = &auth_model.OAuth2AuthorizationCode{
+ CodeChallengeMethod: "monkey",
+ CodeChallenge: "foiwgjioriogeiogjerger",
+ }
+ assert.False(t, code.ValidateCodeChallenge("foiwgjioriogeiogjerger"))
+
+ // test no code challenge
+ code = &auth_model.OAuth2AuthorizationCode{
+ CodeChallengeMethod: "",
+ CodeChallenge: "foierjiogerogerg",
+ }
+ assert.True(t, code.ValidateCodeChallenge(""))
+}
+
+func TestOAuth2AuthorizationCode_GenerateRedirectURI(t *testing.T) {
+ code := &auth_model.OAuth2AuthorizationCode{
+ RedirectURI: "https://example.com/callback",
+ Code: "thecode",
+ }
+
+ redirect, err := code.GenerateRedirectURI("thestate")
+ require.NoError(t, err)
+ assert.Equal(t, "https://example.com/callback?code=thecode&state=thestate", redirect.String())
+
+ redirect, err = code.GenerateRedirectURI("")
+ require.NoError(t, err)
+ assert.Equal(t, "https://example.com/callback?code=thecode", redirect.String())
+}
+
+func TestOAuth2AuthorizationCode_Invalidate(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ code := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2AuthorizationCode{Code: "authcode"})
+ require.NoError(t, code.Invalidate(db.DefaultContext))
+ unittest.AssertNotExistsBean(t, &auth_model.OAuth2AuthorizationCode{Code: "authcode"})
+}
+
+func TestOAuth2AuthorizationCode_TableName(t *testing.T) {
+ assert.Equal(t, "oauth2_authorization_code", new(auth_model.OAuth2AuthorizationCode).TableName())
+}
+
+func TestBuiltinApplicationsClientIDs(t *testing.T) {
+ clientIDs := auth_model.BuiltinApplicationsClientIDs()
+ slices.Sort(clientIDs)
+ assert.EqualValues(t, []string{"a4792ccc-144e-407e-86c9-5e7d8d9c3269", "d57cb8c4-630c-4168-8324-ec79935e18d4", "e90ee53c-94e2-48ac-9358-a874fb9e0662"}, clientIDs)
+}
+
+func TestOrphanedOAuth2Applications(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/auth/TestOrphanedOAuth2Applications/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := auth_model.CountOrphanedOAuth2Applications(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count)
+ unittest.AssertExistsIf(t, true, &auth_model.OAuth2Application{ID: 1002})
+
+ _, err = auth_model.DeleteOrphanedOAuth2Applications(db.DefaultContext)
+ require.NoError(t, err)
+
+ count, err = auth_model.CountOrphanedOAuth2Applications(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+ unittest.AssertExistsIf(t, false, &auth_model.OAuth2Application{ID: 1002})
+ unittest.AssertExistsIf(t, true, &auth_model.OAuth2Application{ID: 1003})
+}
diff --git a/models/auth/session.go b/models/auth/session.go
new file mode 100644
index 0000000..75a205f
--- /dev/null
+++ b/models/auth/session.go
@@ -0,0 +1,120 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+// Session represents a session compatible for go-chi session
+type Session struct {
+ Key string `xorm:"pk CHAR(16)"` // has to be Key to match with go-chi/session
+ Data []byte `xorm:"BLOB"` // on MySQL this has a maximum size of 64Kb - this may need to be increased
+ Expiry timeutil.TimeStamp // has to be Expiry to match with go-chi/session
+}
+
+func init() {
+ db.RegisterModel(new(Session))
+}
+
+// UpdateSession updates the session with provided id
+func UpdateSession(ctx context.Context, key string, data []byte) error {
+ _, err := db.GetEngine(ctx).ID(key).Update(&Session{
+ Data: data,
+ Expiry: timeutil.TimeStampNow(),
+ })
+ return err
+}
+
+// ReadSession reads the data for the provided session
+func ReadSession(ctx context.Context, key string) (*Session, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ session, exist, err := db.Get[Session](ctx, builder.Eq{"`key`": key})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ session = &Session{
+ Key: key,
+ Expiry: timeutil.TimeStampNow(),
+ }
+ if err := db.Insert(ctx, session); err != nil {
+ return nil, err
+ }
+ }
+
+ return session, committer.Commit()
+}
+
+// ExistSession checks if a session exists
+func ExistSession(ctx context.Context, key string) (bool, error) {
+ return db.Exist[Session](ctx, builder.Eq{"`key`": key})
+}
+
+// DestroySession destroys a session
+func DestroySession(ctx context.Context, key string) error {
+ _, err := db.GetEngine(ctx).Delete(&Session{
+ Key: key,
+ })
+ return err
+}
+
+// RegenerateSession regenerates a session from the old id
+func RegenerateSession(ctx context.Context, oldKey, newKey string) (*Session, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ if has, err := db.Exist[Session](ctx, builder.Eq{"`key`": newKey}); err != nil {
+ return nil, err
+ } else if has {
+ return nil, fmt.Errorf("session Key: %s already exists", newKey)
+ }
+
+ if has, err := db.Exist[Session](ctx, builder.Eq{"`key`": oldKey}); err != nil {
+ return nil, err
+ } else if !has {
+ if err := db.Insert(ctx, &Session{
+ Key: oldKey,
+ Expiry: timeutil.TimeStampNow(),
+ }); err != nil {
+ return nil, err
+ }
+ }
+
+ if _, err := db.Exec(ctx, "UPDATE "+db.TableName(&Session{})+" SET `key` = ? WHERE `key`=?", newKey, oldKey); err != nil {
+ return nil, err
+ }
+
+ s, _, err := db.Get[Session](ctx, builder.Eq{"`key`": newKey})
+ if err != nil {
+ // is not exist, it should be impossible
+ return nil, err
+ }
+
+ return s, committer.Commit()
+}
+
+// CountSessions returns the number of sessions
+func CountSessions(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Count(&Session{})
+}
+
+// CleanupSessions cleans up expired sessions
+func CleanupSessions(ctx context.Context, maxLifetime int64) error {
+ _, err := db.GetEngine(ctx).Where("expiry <= ?", timeutil.TimeStampNow().Add(-maxLifetime)).Delete(&Session{})
+ return err
+}
diff --git a/models/auth/session_test.go b/models/auth/session_test.go
new file mode 100644
index 0000000..3b57239
--- /dev/null
+++ b/models/auth/session_test.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth_test
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAuthSession(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ defer timeutil.MockUnset()
+
+ key := "I-Like-Free-Software"
+
+ t.Run("Create Session", func(t *testing.T) {
+ // Ensure it doesn't exist.
+ ok, err := auth.ExistSession(db.DefaultContext, key)
+ require.NoError(t, err)
+ assert.False(t, ok)
+
+ preCount, err := auth.CountSessions(db.DefaultContext)
+ require.NoError(t, err)
+
+ now := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)
+ timeutil.MockSet(now)
+
+ // New session is created.
+ sess, err := auth.ReadSession(db.DefaultContext, key)
+ require.NoError(t, err)
+ assert.EqualValues(t, key, sess.Key)
+ assert.Empty(t, sess.Data)
+ assert.EqualValues(t, now.Unix(), sess.Expiry)
+
+ // Ensure it exists.
+ ok, err = auth.ExistSession(db.DefaultContext, key)
+ require.NoError(t, err)
+ assert.True(t, ok)
+
+ // Ensure the session is taken into account for count..
+ postCount, err := auth.CountSessions(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Greater(t, postCount, preCount)
+ })
+
+ t.Run("Update session", func(t *testing.T) {
+ data := []byte{0xba, 0xdd, 0xc0, 0xde}
+ now := time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC)
+ timeutil.MockSet(now)
+
+ // Update session.
+ err := auth.UpdateSession(db.DefaultContext, key, data)
+ require.NoError(t, err)
+
+ timeutil.MockSet(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC))
+
+ // Read updated session.
+ // Ensure data is updated and expiry is set from the update session call.
+ sess, err := auth.ReadSession(db.DefaultContext, key)
+ require.NoError(t, err)
+ assert.EqualValues(t, key, sess.Key)
+ assert.EqualValues(t, data, sess.Data)
+ assert.EqualValues(t, now.Unix(), sess.Expiry)
+
+ timeutil.MockSet(now)
+ })
+
+ t.Run("Delete session", func(t *testing.T) {
+ // Ensure it't exist.
+ ok, err := auth.ExistSession(db.DefaultContext, key)
+ require.NoError(t, err)
+ assert.True(t, ok)
+
+ preCount, err := auth.CountSessions(db.DefaultContext)
+ require.NoError(t, err)
+
+ err = auth.DestroySession(db.DefaultContext, key)
+ require.NoError(t, err)
+
+ // Ensure it doesn't exists.
+ ok, err = auth.ExistSession(db.DefaultContext, key)
+ require.NoError(t, err)
+ assert.False(t, ok)
+
+ // Ensure the session is taken into account for count..
+ postCount, err := auth.CountSessions(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Less(t, postCount, preCount)
+ })
+
+ t.Run("Cleanup sessions", func(t *testing.T) {
+ timeutil.MockSet(time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC))
+
+ _, err := auth.ReadSession(db.DefaultContext, "sess-1")
+ require.NoError(t, err)
+
+ // One minute later.
+ timeutil.MockSet(time.Date(2023, 1, 1, 0, 1, 0, 0, time.UTC))
+ _, err = auth.ReadSession(db.DefaultContext, "sess-2")
+ require.NoError(t, err)
+
+ // 5 minutes, shouldn't clean up anything.
+ err = auth.CleanupSessions(db.DefaultContext, 5*60)
+ require.NoError(t, err)
+
+ ok, err := auth.ExistSession(db.DefaultContext, "sess-1")
+ require.NoError(t, err)
+ assert.True(t, ok)
+
+ ok, err = auth.ExistSession(db.DefaultContext, "sess-2")
+ require.NoError(t, err)
+ assert.True(t, ok)
+
+ // 1 minute, should clean up sess-1.
+ err = auth.CleanupSessions(db.DefaultContext, 60)
+ require.NoError(t, err)
+
+ ok, err = auth.ExistSession(db.DefaultContext, "sess-1")
+ require.NoError(t, err)
+ assert.False(t, ok)
+
+ ok, err = auth.ExistSession(db.DefaultContext, "sess-2")
+ require.NoError(t, err)
+ assert.True(t, ok)
+
+ // Now, should clean up sess-2.
+ err = auth.CleanupSessions(db.DefaultContext, 0)
+ require.NoError(t, err)
+
+ ok, err = auth.ExistSession(db.DefaultContext, "sess-2")
+ require.NoError(t, err)
+ assert.False(t, ok)
+ })
+}
diff --git a/models/auth/source.go b/models/auth/source.go
new file mode 100644
index 0000000..d03d497
--- /dev/null
+++ b/models/auth/source.go
@@ -0,0 +1,412 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+ "xorm.io/xorm/convert"
+)
+
+// Type represents an login type.
+type Type int
+
+// Note: new type must append to the end of list to maintain compatibility.
+const (
+ NoType Type = iota
+ Plain // 1
+ LDAP // 2
+ SMTP // 3
+ PAM // 4
+ DLDAP // 5
+ OAuth2 // 6
+ SSPI // 7
+ Remote // 8
+)
+
+// String returns the string name of the LoginType
+func (typ Type) String() string {
+ return Names[typ]
+}
+
+// Int returns the int value of the LoginType
+func (typ Type) Int() int {
+ return int(typ)
+}
+
+// Names contains the name of LoginType values.
+var Names = map[Type]string{
+ LDAP: "LDAP (via BindDN)",
+ DLDAP: "LDAP (simple auth)", // Via direct bind
+ SMTP: "SMTP",
+ PAM: "PAM",
+ OAuth2: "OAuth2",
+ SSPI: "SPNEGO with SSPI",
+ Remote: "Remote",
+}
+
+// Config represents login config as far as the db is concerned
+type Config interface {
+ convert.Conversion
+}
+
+// SkipVerifiable configurations provide a IsSkipVerify to check if SkipVerify is set
+type SkipVerifiable interface {
+ IsSkipVerify() bool
+}
+
+// HasTLSer configurations provide a HasTLS to check if TLS can be enabled
+type HasTLSer interface {
+ HasTLS() bool
+}
+
+// UseTLSer configurations provide a HasTLS to check if TLS is enabled
+type UseTLSer interface {
+ UseTLS() bool
+}
+
+// SSHKeyProvider configurations provide ProvidesSSHKeys to check if they provide SSHKeys
+type SSHKeyProvider interface {
+ ProvidesSSHKeys() bool
+}
+
+// RegisterableSource configurations provide RegisterSource which needs to be run on creation
+type RegisterableSource interface {
+ RegisterSource() error
+ UnregisterSource() error
+}
+
+var registeredConfigs = map[Type]func() Config{}
+
+// RegisterTypeConfig register a config for a provided type
+func RegisterTypeConfig(typ Type, exemplar Config) {
+ if reflect.TypeOf(exemplar).Kind() == reflect.Ptr {
+ // Pointer:
+ registeredConfigs[typ] = func() Config {
+ return reflect.New(reflect.ValueOf(exemplar).Elem().Type()).Interface().(Config)
+ }
+ return
+ }
+
+ // Not a Pointer
+ registeredConfigs[typ] = func() Config {
+ return reflect.New(reflect.TypeOf(exemplar)).Elem().Interface().(Config)
+ }
+}
+
+// SourceSettable configurations can have their authSource set on them
+type SourceSettable interface {
+ SetAuthSource(*Source)
+}
+
+// Source represents an external way for authorizing users.
+type Source struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type Type
+ Name string `xorm:"UNIQUE"`
+ IsActive bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ IsSyncEnabled bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ Cfg convert.Conversion `xorm:"TEXT"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+// TableName xorm will read the table name from this method
+func (Source) TableName() string {
+ return "login_source"
+}
+
+func init() {
+ db.RegisterModel(new(Source))
+}
+
+// BeforeSet is invoked from XORM before setting the value of a field of this object.
+func (source *Source) BeforeSet(colName string, val xorm.Cell) {
+ if colName == "type" {
+ typ := Type(db.Cell2Int64(val))
+ constructor, ok := registeredConfigs[typ]
+ if !ok {
+ return
+ }
+ source.Cfg = constructor()
+ if settable, ok := source.Cfg.(SourceSettable); ok {
+ settable.SetAuthSource(source)
+ }
+ }
+}
+
+// TypeName return name of this login source type.
+func (source *Source) TypeName() string {
+ return Names[source.Type]
+}
+
+// IsLDAP returns true of this source is of the LDAP type.
+func (source *Source) IsLDAP() bool {
+ return source.Type == LDAP
+}
+
+// IsDLDAP returns true of this source is of the DLDAP type.
+func (source *Source) IsDLDAP() bool {
+ return source.Type == DLDAP
+}
+
+// IsSMTP returns true of this source is of the SMTP type.
+func (source *Source) IsSMTP() bool {
+ return source.Type == SMTP
+}
+
+// IsPAM returns true of this source is of the PAM type.
+func (source *Source) IsPAM() bool {
+ return source.Type == PAM
+}
+
+// IsOAuth2 returns true of this source is of the OAuth2 type.
+func (source *Source) IsOAuth2() bool {
+ return source.Type == OAuth2
+}
+
+// IsSSPI returns true of this source is of the SSPI type.
+func (source *Source) IsSSPI() bool {
+ return source.Type == SSPI
+}
+
+func (source *Source) IsRemote() bool {
+ return source.Type == Remote
+}
+
+// HasTLS returns true of this source supports TLS.
+func (source *Source) HasTLS() bool {
+ hasTLSer, ok := source.Cfg.(HasTLSer)
+ return ok && hasTLSer.HasTLS()
+}
+
+// UseTLS returns true of this source is configured to use TLS.
+func (source *Source) UseTLS() bool {
+ useTLSer, ok := source.Cfg.(UseTLSer)
+ return ok && useTLSer.UseTLS()
+}
+
+// SkipVerify returns true if this source is configured to skip SSL
+// verification.
+func (source *Source) SkipVerify() bool {
+ skipVerifiable, ok := source.Cfg.(SkipVerifiable)
+ return ok && skipVerifiable.IsSkipVerify()
+}
+
+// CreateSource inserts a AuthSource in the DB if not already
+// existing with the given name.
+func CreateSource(ctx context.Context, source *Source) error {
+ has, err := db.GetEngine(ctx).Where("name=?", source.Name).Exist(new(Source))
+ if err != nil {
+ return err
+ } else if has {
+ return ErrSourceAlreadyExist{source.Name}
+ }
+ // Synchronization is only available with LDAP for now
+ if !source.IsLDAP() {
+ source.IsSyncEnabled = false
+ }
+
+ _, err = db.GetEngine(ctx).Insert(source)
+ if err != nil {
+ return err
+ }
+
+ if !source.IsActive {
+ return nil
+ }
+
+ if settable, ok := source.Cfg.(SourceSettable); ok {
+ settable.SetAuthSource(source)
+ }
+
+ registerableSource, ok := source.Cfg.(RegisterableSource)
+ if !ok {
+ return nil
+ }
+
+ err = registerableSource.RegisterSource()
+ if err != nil {
+ // remove the AuthSource in case of errors while registering configuration
+ if _, err := db.GetEngine(ctx).ID(source.ID).Delete(new(Source)); err != nil {
+ log.Error("CreateSource: Error while wrapOpenIDConnectInitializeError: %v", err)
+ }
+ }
+ return err
+}
+
+type FindSourcesOptions struct {
+ db.ListOptions
+ IsActive optional.Option[bool]
+ LoginType Type
+}
+
+func (opts FindSourcesOptions) ToConds() builder.Cond {
+ conds := builder.NewCond()
+ if opts.IsActive.Has() {
+ conds = conds.And(builder.Eq{"is_active": opts.IsActive.Value()})
+ }
+ if opts.LoginType != NoType {
+ conds = conds.And(builder.Eq{"`type`": opts.LoginType})
+ }
+ return conds
+}
+
+// IsSSPIEnabled returns true if there is at least one activated login
+// source of type LoginSSPI
+func IsSSPIEnabled(ctx context.Context) bool {
+ exist, err := db.Exist[Source](ctx, FindSourcesOptions{
+ IsActive: optional.Some(true),
+ LoginType: SSPI,
+ }.ToConds())
+ if err != nil {
+ log.Error("IsSSPIEnabled: failed to query active SSPI sources: %v", err)
+ return false
+ }
+ return exist
+}
+
+// GetSourceByID returns login source by given ID.
+func GetSourceByID(ctx context.Context, id int64) (*Source, error) {
+ source := new(Source)
+ if id == 0 {
+ source.Cfg = registeredConfigs[NoType]()
+ // Set this source to active
+ // FIXME: allow disabling of db based password authentication in future
+ source.IsActive = true
+ return source, nil
+ }
+
+ has, err := db.GetEngine(ctx).ID(id).Get(source)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrSourceNotExist{id}
+ }
+ return source, nil
+}
+
+func GetSourceByName(ctx context.Context, name string) (*Source, error) {
+ source := &Source{}
+ has, err := db.GetEngine(ctx).Where("name = ?", name).Get(source)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrSourceNotExist{}
+ }
+ return source, nil
+}
+
+// UpdateSource updates a Source record in DB.
+func UpdateSource(ctx context.Context, source *Source) error {
+ var originalSource *Source
+ if source.IsOAuth2() {
+ // keep track of the original values so we can restore in case of errors while registering OAuth2 providers
+ var err error
+ if originalSource, err = GetSourceByID(ctx, source.ID); err != nil {
+ return err
+ }
+ }
+
+ has, err := db.GetEngine(ctx).Where("name=? AND id!=?", source.Name, source.ID).Exist(new(Source))
+ if err != nil {
+ return err
+ } else if has {
+ return ErrSourceAlreadyExist{source.Name}
+ }
+
+ _, err = db.GetEngine(ctx).ID(source.ID).AllCols().Update(source)
+ if err != nil {
+ return err
+ }
+
+ if !source.IsActive {
+ return nil
+ }
+
+ if settable, ok := source.Cfg.(SourceSettable); ok {
+ settable.SetAuthSource(source)
+ }
+
+ registerableSource, ok := source.Cfg.(RegisterableSource)
+ if !ok {
+ return nil
+ }
+
+ err = registerableSource.RegisterSource()
+ if err != nil {
+ // restore original values since we cannot update the provider it self
+ if _, err := db.GetEngine(ctx).ID(source.ID).AllCols().Update(originalSource); err != nil {
+ log.Error("UpdateSource: Error while wrapOpenIDConnectInitializeError: %v", err)
+ }
+ }
+ return err
+}
+
+// ErrSourceNotExist represents a "SourceNotExist" kind of error.
+type ErrSourceNotExist struct {
+ ID int64
+}
+
+// IsErrSourceNotExist checks if an error is a ErrSourceNotExist.
+func IsErrSourceNotExist(err error) bool {
+ _, ok := err.(ErrSourceNotExist)
+ return ok
+}
+
+func (err ErrSourceNotExist) Error() string {
+ return fmt.Sprintf("login source does not exist [id: %d]", err.ID)
+}
+
+// Unwrap unwraps this as a ErrNotExist err
+func (err ErrSourceNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrSourceAlreadyExist represents a "SourceAlreadyExist" kind of error.
+type ErrSourceAlreadyExist struct {
+ Name string
+}
+
+// IsErrSourceAlreadyExist checks if an error is a ErrSourceAlreadyExist.
+func IsErrSourceAlreadyExist(err error) bool {
+ _, ok := err.(ErrSourceAlreadyExist)
+ return ok
+}
+
+func (err ErrSourceAlreadyExist) Error() string {
+ return fmt.Sprintf("login source already exists [name: %s]", err.Name)
+}
+
+// Unwrap unwraps this as a ErrExist err
+func (err ErrSourceAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrSourceInUse represents a "SourceInUse" kind of error.
+type ErrSourceInUse struct {
+ ID int64
+}
+
+// IsErrSourceInUse checks if an error is a ErrSourceInUse.
+func IsErrSourceInUse(err error) bool {
+ _, ok := err.(ErrSourceInUse)
+ return ok
+}
+
+func (err ErrSourceInUse) Error() string {
+ return fmt.Sprintf("login source is still used by some users [id: %d]", err.ID)
+}
diff --git a/models/auth/source_test.go b/models/auth/source_test.go
new file mode 100644
index 0000000..522fecc
--- /dev/null
+++ b/models/auth/source_test.go
@@ -0,0 +1,61 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth_test
+
+import (
+ "strings"
+ "testing"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/json"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm/schemas"
+)
+
+type TestSource struct {
+ Provider string
+ ClientID string
+ ClientSecret string
+ OpenIDConnectAutoDiscoveryURL string
+ IconURL string
+}
+
+// FromDB fills up a LDAPConfig from serialized format.
+func (source *TestSource) FromDB(bs []byte) error {
+ return json.Unmarshal(bs, &source)
+}
+
+// ToDB exports a LDAPConfig to a serialized format.
+func (source *TestSource) ToDB() ([]byte, error) {
+ return json.Marshal(source)
+}
+
+func TestDumpAuthSource(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ authSourceSchema, err := db.TableInfo(new(auth_model.Source))
+ require.NoError(t, err)
+
+ auth_model.RegisterTypeConfig(auth_model.OAuth2, new(TestSource))
+
+ auth_model.CreateSource(db.DefaultContext, &auth_model.Source{
+ Type: auth_model.OAuth2,
+ Name: "TestSource",
+ IsActive: false,
+ Cfg: &TestSource{
+ Provider: "ConvertibleSourceName",
+ ClientID: "42",
+ },
+ })
+
+ sb := new(strings.Builder)
+
+ db.DumpTables([]*schemas.Table{authSourceSchema}, sb)
+
+ assert.Contains(t, sb.String(), `"Provider":"ConvertibleSourceName"`)
+}
diff --git a/models/auth/twofactor.go b/models/auth/twofactor.go
new file mode 100644
index 0000000..d0c341a
--- /dev/null
+++ b/models/auth/twofactor.go
@@ -0,0 +1,166 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "crypto/md5"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/secret"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/pquerna/otp/totp"
+ "golang.org/x/crypto/pbkdf2"
+)
+
+//
+// Two-factor authentication
+//
+
+// ErrTwoFactorNotEnrolled indicates that a user is not enrolled in two-factor authentication.
+type ErrTwoFactorNotEnrolled struct {
+ UID int64
+}
+
+// IsErrTwoFactorNotEnrolled checks if an error is a ErrTwoFactorNotEnrolled.
+func IsErrTwoFactorNotEnrolled(err error) bool {
+ _, ok := err.(ErrTwoFactorNotEnrolled)
+ return ok
+}
+
+func (err ErrTwoFactorNotEnrolled) Error() string {
+ return fmt.Sprintf("user not enrolled in 2FA [uid: %d]", err.UID)
+}
+
+// Unwrap unwraps this as a ErrNotExist err
+func (err ErrTwoFactorNotEnrolled) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// TwoFactor represents a two-factor authentication token.
+type TwoFactor struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"UNIQUE"`
+ Secret string
+ ScratchSalt string
+ ScratchHash string
+ LastUsedPasscode string `xorm:"VARCHAR(10)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(TwoFactor))
+}
+
+// GenerateScratchToken recreates the scratch token the user is using.
+func (t *TwoFactor) GenerateScratchToken() (string, error) {
+ tokenBytes, err := util.CryptoRandomBytes(6)
+ if err != nil {
+ return "", err
+ }
+ // these chars are specially chosen, avoid ambiguous chars like `0`, `O`, `1`, `I`.
+ const base32Chars = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
+ token := base32.NewEncoding(base32Chars).WithPadding(base32.NoPadding).EncodeToString(tokenBytes)
+ t.ScratchSalt, _ = util.CryptoRandomString(10)
+ t.ScratchHash = HashToken(token, t.ScratchSalt)
+ return token, nil
+}
+
+// HashToken return the hashable salt
+func HashToken(token, salt string) string {
+ tempHash := pbkdf2.Key([]byte(token), []byte(salt), 10000, 50, sha256.New)
+ return hex.EncodeToString(tempHash)
+}
+
+// VerifyScratchToken verifies if the specified scratch token is valid.
+func (t *TwoFactor) VerifyScratchToken(token string) bool {
+ if len(token) == 0 {
+ return false
+ }
+ tempHash := HashToken(token, t.ScratchSalt)
+ return subtle.ConstantTimeCompare([]byte(t.ScratchHash), []byte(tempHash)) == 1
+}
+
+func (t *TwoFactor) getEncryptionKey() []byte {
+ k := md5.Sum([]byte(setting.SecretKey))
+ return k[:]
+}
+
+// SetSecret sets the 2FA secret.
+func (t *TwoFactor) SetSecret(secretString string) error {
+ secretBytes, err := secret.AesEncrypt(t.getEncryptionKey(), []byte(secretString))
+ if err != nil {
+ return err
+ }
+ t.Secret = base64.StdEncoding.EncodeToString(secretBytes)
+ return nil
+}
+
+// ValidateTOTP validates the provided passcode.
+func (t *TwoFactor) ValidateTOTP(passcode string) (bool, error) {
+ decodedStoredSecret, err := base64.StdEncoding.DecodeString(t.Secret)
+ if err != nil {
+ return false, err
+ }
+ secretBytes, err := secret.AesDecrypt(t.getEncryptionKey(), decodedStoredSecret)
+ if err != nil {
+ return false, err
+ }
+ secretStr := string(secretBytes)
+ return totp.Validate(passcode, secretStr), nil
+}
+
+// NewTwoFactor creates a new two-factor authentication token.
+func NewTwoFactor(ctx context.Context, t *TwoFactor) error {
+ _, err := db.GetEngine(ctx).Insert(t)
+ return err
+}
+
+// UpdateTwoFactor updates a two-factor authentication token.
+func UpdateTwoFactor(ctx context.Context, t *TwoFactor) error {
+ _, err := db.GetEngine(ctx).ID(t.ID).AllCols().Update(t)
+ return err
+}
+
+// GetTwoFactorByUID returns the two-factor authentication token associated with
+// the user, if any.
+func GetTwoFactorByUID(ctx context.Context, uid int64) (*TwoFactor, error) {
+ twofa := &TwoFactor{}
+ has, err := db.GetEngine(ctx).Where("uid=?", uid).Get(twofa)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrTwoFactorNotEnrolled{uid}
+ }
+ return twofa, nil
+}
+
+// HasTwoFactorByUID returns the two-factor authentication token associated with
+// the user, if any.
+func HasTwoFactorByUID(ctx context.Context, uid int64) (bool, error) {
+ return db.GetEngine(ctx).Where("uid=?", uid).Exist(&TwoFactor{})
+}
+
+// DeleteTwoFactorByID deletes two-factor authentication token by given ID.
+func DeleteTwoFactorByID(ctx context.Context, id, userID int64) error {
+ cnt, err := db.GetEngine(ctx).ID(id).Delete(&TwoFactor{
+ UID: userID,
+ })
+ if err != nil {
+ return err
+ } else if cnt != 1 {
+ return ErrTwoFactorNotEnrolled{userID}
+ }
+ return nil
+}
diff --git a/models/auth/webauthn.go b/models/auth/webauthn.go
new file mode 100644
index 0000000..aa13cf6
--- /dev/null
+++ b/models/auth/webauthn.go
@@ -0,0 +1,209 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/go-webauthn/webauthn/webauthn"
+)
+
+// ErrWebAuthnCredentialNotExist represents a "ErrWebAuthnCRedentialNotExist" kind of error.
+type ErrWebAuthnCredentialNotExist struct {
+ ID int64
+ CredentialID []byte
+}
+
+func (err ErrWebAuthnCredentialNotExist) Error() string {
+ if len(err.CredentialID) == 0 {
+ return fmt.Sprintf("WebAuthn credential does not exist [id: %d]", err.ID)
+ }
+ return fmt.Sprintf("WebAuthn credential does not exist [credential_id: %x]", err.CredentialID)
+}
+
+// Unwrap unwraps this as a ErrNotExist err
+func (err ErrWebAuthnCredentialNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// IsErrWebAuthnCredentialNotExist checks if an error is a ErrWebAuthnCredentialNotExist.
+func IsErrWebAuthnCredentialNotExist(err error) bool {
+ _, ok := err.(ErrWebAuthnCredentialNotExist)
+ return ok
+}
+
+// WebAuthnCredential represents the WebAuthn credential data for a public-key
+// credential conformant to WebAuthn Level 3
+type WebAuthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID []byte `xorm:"INDEX VARBINARY(1024)"`
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ BackupEligible bool `XORM:"NOT NULL DEFAULT false"`
+ BackupState bool `XORM:"NOT NULL DEFAULT false"`
+ // If legacy is set to true, backup_eligible and backup_state isn't set.
+ Legacy bool `XORM:"NOT NULL DEFAULT true"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(WebAuthnCredential))
+}
+
+// TableName returns a better table name for WebAuthnCredential
+func (cred WebAuthnCredential) TableName() string {
+ return "webauthn_credential"
+}
+
+// UpdateSignCount will update the database value of SignCount
+func (cred *WebAuthnCredential) UpdateSignCount(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).ID(cred.ID).Cols("sign_count").Update(cred)
+ return err
+}
+
+// UpdateFromLegacy update the values that aren't present on legacy credentials.
+func (cred *WebAuthnCredential) UpdateFromLegacy(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).ID(cred.ID).Cols("legacy", "backup_eligible", "backup_state").Update(cred)
+ return err
+}
+
+// BeforeInsert will be invoked by XORM before updating a record
+func (cred *WebAuthnCredential) BeforeInsert() {
+ cred.LowerName = strings.ToLower(cred.Name)
+}
+
+// BeforeUpdate will be invoked by XORM before updating a record
+func (cred *WebAuthnCredential) BeforeUpdate() {
+ cred.LowerName = strings.ToLower(cred.Name)
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (cred *WebAuthnCredential) AfterLoad() {
+ cred.LowerName = strings.ToLower(cred.Name)
+}
+
+// WebAuthnCredentialList is a list of *WebAuthnCredential
+type WebAuthnCredentialList []*WebAuthnCredential
+
+// ToCredentials will convert all WebAuthnCredentials to webauthn.Credentials
+func (list WebAuthnCredentialList) ToCredentials() []webauthn.Credential {
+ creds := make([]webauthn.Credential, 0, len(list))
+ for _, cred := range list {
+ creds = append(creds, webauthn.Credential{
+ ID: cred.CredentialID,
+ PublicKey: cred.PublicKey,
+ AttestationType: cred.AttestationType,
+ Flags: webauthn.CredentialFlags{
+ BackupEligible: cred.BackupEligible,
+ BackupState: cred.BackupState,
+ },
+ Authenticator: webauthn.Authenticator{
+ AAGUID: cred.AAGUID,
+ SignCount: cred.SignCount,
+ CloneWarning: cred.CloneWarning,
+ },
+ })
+ }
+ return creds
+}
+
+// GetWebAuthnCredentialsByUID returns all WebAuthn credentials of the given user
+func GetWebAuthnCredentialsByUID(ctx context.Context, uid int64) (WebAuthnCredentialList, error) {
+ creds := make(WebAuthnCredentialList, 0)
+ return creds, db.GetEngine(ctx).Where("user_id = ?", uid).Find(&creds)
+}
+
+// ExistsWebAuthnCredentialsForUID returns if the given user has credentials
+func ExistsWebAuthnCredentialsForUID(ctx context.Context, uid int64) (bool, error) {
+ return db.GetEngine(ctx).Where("user_id = ?", uid).Exist(&WebAuthnCredential{})
+}
+
+// GetWebAuthnCredentialByName returns WebAuthn credential by id
+func GetWebAuthnCredentialByName(ctx context.Context, uid int64, name string) (*WebAuthnCredential, error) {
+ cred := new(WebAuthnCredential)
+ if found, err := db.GetEngine(ctx).Where("user_id = ? AND lower_name = ?", uid, strings.ToLower(name)).Get(cred); err != nil {
+ return nil, err
+ } else if !found {
+ return nil, ErrWebAuthnCredentialNotExist{}
+ }
+ return cred, nil
+}
+
+// GetWebAuthnCredentialByID returns WebAuthn credential by id
+func GetWebAuthnCredentialByID(ctx context.Context, id int64) (*WebAuthnCredential, error) {
+ cred := new(WebAuthnCredential)
+ if found, err := db.GetEngine(ctx).ID(id).Get(cred); err != nil {
+ return nil, err
+ } else if !found {
+ return nil, ErrWebAuthnCredentialNotExist{ID: id}
+ }
+ return cred, nil
+}
+
+// HasWebAuthnRegistrationsByUID returns whether a given user has WebAuthn registrations
+func HasWebAuthnRegistrationsByUID(ctx context.Context, uid int64) (bool, error) {
+ return db.GetEngine(ctx).Where("user_id = ?", uid).Exist(&WebAuthnCredential{})
+}
+
+// GetWebAuthnCredentialByCredID returns WebAuthn credential by credential ID
+func GetWebAuthnCredentialByCredID(ctx context.Context, userID int64, credID []byte) (*WebAuthnCredential, error) {
+ cred := new(WebAuthnCredential)
+ if found, err := db.GetEngine(ctx).Where("user_id = ? AND credential_id = ?", userID, credID).Get(cred); err != nil {
+ return nil, err
+ } else if !found {
+ return nil, ErrWebAuthnCredentialNotExist{CredentialID: credID}
+ }
+ return cred, nil
+}
+
+// CreateCredential will create a new WebAuthnCredential from the given Credential
+func CreateCredential(ctx context.Context, userID int64, name string, cred *webauthn.Credential) (*WebAuthnCredential, error) {
+ c := &WebAuthnCredential{
+ UserID: userID,
+ Name: name,
+ CredentialID: cred.ID,
+ PublicKey: cred.PublicKey,
+ AttestationType: cred.AttestationType,
+ AAGUID: cred.Authenticator.AAGUID,
+ SignCount: cred.Authenticator.SignCount,
+ CloneWarning: false,
+ BackupEligible: cred.Flags.BackupEligible,
+ BackupState: cred.Flags.BackupState,
+ Legacy: false,
+ }
+
+ if err := db.Insert(ctx, c); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+// DeleteCredential will delete WebAuthnCredential
+func DeleteCredential(ctx context.Context, id, userID int64) (bool, error) {
+ had, err := db.GetEngine(ctx).ID(id).Where("user_id = ?", userID).Delete(&WebAuthnCredential{})
+ return had > 0, err
+}
+
+// WebAuthnCredentials implementns the webauthn.User interface
+func WebAuthnCredentials(ctx context.Context, userID int64) ([]webauthn.Credential, error) {
+ dbCreds, err := GetWebAuthnCredentialsByUID(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+
+ return dbCreds.ToCredentials(), nil
+}
diff --git a/models/auth/webauthn_test.go b/models/auth/webauthn_test.go
new file mode 100644
index 0000000..e1cd652
--- /dev/null
+++ b/models/auth/webauthn_test.go
@@ -0,0 +1,78 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package auth_test
+
+import (
+ "testing"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/go-webauthn/webauthn/webauthn"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetWebAuthnCredentialByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ res, err := auth_model.GetWebAuthnCredentialByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, "WebAuthn credential", res.Name)
+
+ _, err = auth_model.GetWebAuthnCredentialByID(db.DefaultContext, 342432)
+ require.Error(t, err)
+ assert.True(t, auth_model.IsErrWebAuthnCredentialNotExist(err))
+}
+
+func TestGetWebAuthnCredentialsByUID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ res, err := auth_model.GetWebAuthnCredentialsByUID(db.DefaultContext, 32)
+ require.NoError(t, err)
+ assert.Len(t, res, 1)
+ assert.Equal(t, "WebAuthn credential", res[0].Name)
+}
+
+func TestWebAuthnCredential_TableName(t *testing.T) {
+ assert.Equal(t, "webauthn_credential", auth_model.WebAuthnCredential{}.TableName())
+}
+
+func TestWebAuthnCredential_UpdateSignCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ cred := unittest.AssertExistsAndLoadBean(t, &auth_model.WebAuthnCredential{ID: 1})
+ cred.SignCount = 1
+ require.NoError(t, cred.UpdateSignCount(db.DefaultContext))
+ unittest.AssertExistsIf(t, true, &auth_model.WebAuthnCredential{ID: 1, SignCount: 1})
+}
+
+func TestWebAuthnCredential_UpdateLargeCounter(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ cred := unittest.AssertExistsAndLoadBean(t, &auth_model.WebAuthnCredential{ID: 1})
+ cred.SignCount = 0xffffffff
+ require.NoError(t, cred.UpdateSignCount(db.DefaultContext))
+ unittest.AssertExistsIf(t, true, &auth_model.WebAuthnCredential{ID: 1, SignCount: 0xffffffff})
+}
+
+func TestWebAuthenCredential_UpdateFromLegacy(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ cred := unittest.AssertExistsAndLoadBean(t, &auth_model.WebAuthnCredential{ID: 1, Legacy: true})
+ cred.Legacy = false
+ cred.BackupEligible = true
+ cred.BackupState = true
+ require.NoError(t, cred.UpdateFromLegacy(db.DefaultContext))
+ unittest.AssertExistsIf(t, true, &auth_model.WebAuthnCredential{ID: 1, BackupEligible: true, BackupState: true}, "legacy = false")
+}
+
+func TestCreateCredential(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ res, err := auth_model.CreateCredential(db.DefaultContext, 1, "WebAuthn Created Credential", &webauthn.Credential{ID: []byte("Test"), Flags: webauthn.CredentialFlags{BackupEligible: true, BackupState: true}})
+ require.NoError(t, err)
+ assert.Equal(t, "WebAuthn Created Credential", res.Name)
+ assert.Equal(t, []byte("Test"), res.CredentialID)
+
+ unittest.AssertExistsIf(t, true, &auth_model.WebAuthnCredential{Name: "WebAuthn Created Credential", UserID: 1, BackupEligible: true, BackupState: true}, "legacy = false")
+}
diff --git a/models/avatars/avatar.go b/models/avatars/avatar.go
new file mode 100644
index 0000000..9eb34dc
--- /dev/null
+++ b/models/avatars/avatar.go
@@ -0,0 +1,238 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package avatars
+
+import (
+ "context"
+ "crypto/md5"
+ "encoding/hex"
+ "fmt"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "sync/atomic"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/cache"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "code.forgejo.org/forgejo-contrib/go-libravatar"
+)
+
+const (
+ // DefaultAvatarClass is the default class of a rendered avatar
+ DefaultAvatarClass = "ui avatar tw-align-middle"
+ // DefaultAvatarPixelSize is the default size in pixels of a rendered avatar
+ DefaultAvatarPixelSize = 28
+)
+
+// EmailHash represents a pre-generated hash map (mainly used by LibravatarURL, it queries email server's DNS records)
+type EmailHash struct {
+ Hash string `xorm:"pk varchar(32)"`
+ Email string `xorm:"UNIQUE NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(EmailHash))
+}
+
+type avatarSettingStruct struct {
+ defaultAvatarLink string
+ gravatarSource string
+ gravatarSourceURL *url.URL
+ libravatar *libravatar.Libravatar
+}
+
+var avatarSettingAtomic atomic.Pointer[avatarSettingStruct]
+
+func loadAvatarSetting() (*avatarSettingStruct, error) {
+ s := avatarSettingAtomic.Load()
+ if s == nil || s.gravatarSource != setting.GravatarSource {
+ s = &avatarSettingStruct{}
+ u, err := url.Parse(setting.AppSubURL)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse AppSubURL: %w", err)
+ }
+
+ u.Path = path.Join(u.Path, "/assets/img/avatar_default.png")
+ s.defaultAvatarLink = u.String()
+
+ s.gravatarSourceURL, err = url.Parse(setting.GravatarSource)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse GravatarSource %q: %w", setting.GravatarSource, err)
+ }
+
+ s.libravatar = libravatar.New()
+ if s.gravatarSourceURL.Scheme == "https" {
+ s.libravatar.SetUseHTTPS(true)
+ s.libravatar.SetSecureFallbackHost(s.gravatarSourceURL.Host)
+ } else {
+ s.libravatar.SetUseHTTPS(false)
+ s.libravatar.SetFallbackHost(s.gravatarSourceURL.Host)
+ }
+
+ avatarSettingAtomic.Store(s)
+ }
+ return s, nil
+}
+
+// DefaultAvatarLink the default avatar link
+func DefaultAvatarLink() string {
+ a, err := loadAvatarSetting()
+ if err != nil {
+ log.Error("Failed to loadAvatarSetting: %v", err)
+ return ""
+ }
+ return a.defaultAvatarLink
+}
+
+// HashEmail hashes email address to MD5 string. https://en.gravatar.com/site/implement/hash/
+func HashEmail(email string) string {
+ m := md5.New()
+ _, _ = m.Write([]byte(strings.ToLower(strings.TrimSpace(email))))
+ return hex.EncodeToString(m.Sum(nil))
+}
+
+// GetEmailForHash converts a provided md5sum to the email
+func GetEmailForHash(ctx context.Context, md5Sum string) (string, error) {
+ return cache.GetString("Avatar:"+md5Sum, func() (string, error) {
+ emailHash := EmailHash{
+ Hash: strings.ToLower(strings.TrimSpace(md5Sum)),
+ }
+
+ _, err := db.GetEngine(ctx).Get(&emailHash)
+ return emailHash.Email, err
+ })
+}
+
+// LibravatarURL returns the URL for the given email. Slow due to the DNS lookup.
+// This function should only be called if a federated avatar service is enabled.
+func LibravatarURL(email string) (*url.URL, error) {
+ a, err := loadAvatarSetting()
+ if err != nil {
+ return nil, err
+ }
+ urlStr, err := a.libravatar.FromEmail(email)
+ if err != nil {
+ log.Error("LibravatarService.FromEmail(email=%s): error %v", email, err)
+ return nil, err
+ }
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ log.Error("Failed to parse libravatar url(%s): error %v", urlStr, err)
+ return nil, err
+ }
+ return u, nil
+}
+
+// saveEmailHash returns an avatar link for a provided email,
+// the email and hash are saved into database, which will be used by GetEmailForHash later
+func saveEmailHash(ctx context.Context, email string) string {
+ lowerEmail := strings.ToLower(strings.TrimSpace(email))
+ emailHash := HashEmail(lowerEmail)
+ _, _ = cache.GetString("Avatar:"+emailHash, func() (string, error) {
+ emailHash := &EmailHash{
+ Email: lowerEmail,
+ Hash: emailHash,
+ }
+ // OK we're going to open a session just because I think that that might hide away any problems with postgres reporting errors
+ if err := db.WithTx(ctx, func(ctx context.Context) error {
+ has, err := db.GetEngine(ctx).Where("email = ? AND hash = ?", emailHash.Email, emailHash.Hash).Get(new(EmailHash))
+ if has || err != nil {
+ // Seriously we don't care about any DB problems just return the lowerEmail - we expect the transaction to fail most of the time
+ return nil
+ }
+ _, _ = db.GetEngine(ctx).Insert(emailHash)
+ return nil
+ }); err != nil {
+ // Seriously we don't care about any DB problems just return the lowerEmail - we expect the transaction to fail most of the time
+ return lowerEmail, nil
+ }
+ return lowerEmail, nil
+ })
+ return emailHash
+}
+
+// GenerateUserAvatarFastLink returns a fast link (302) to the user's avatar: "/user/avatar/${User.Name}/${size}"
+func GenerateUserAvatarFastLink(userName string, size int) string {
+ if size < 0 {
+ size = 0
+ }
+ return setting.AppSubURL + "/user/avatar/" + url.PathEscape(userName) + "/" + strconv.Itoa(size)
+}
+
+// GenerateUserAvatarImageLink returns a link for `User.Avatar` image file: "/avatars/${User.Avatar}"
+func GenerateUserAvatarImageLink(userAvatar string, size int) string {
+ if size > 0 {
+ return setting.AppSubURL + "/avatars/" + url.PathEscape(userAvatar) + "?size=" + strconv.Itoa(size)
+ }
+ return setting.AppSubURL + "/avatars/" + url.PathEscape(userAvatar)
+}
+
+// generateRecognizedAvatarURL generate a recognized avatar (Gravatar/Libravatar) URL, it modifies the URL so the parameter is passed by a copy
+func generateRecognizedAvatarURL(u url.URL, size int) string {
+ urlQuery := u.Query()
+ urlQuery.Set("d", "identicon")
+ if size > 0 {
+ urlQuery.Set("s", strconv.Itoa(size))
+ }
+ u.RawQuery = urlQuery.Encode()
+ return u.String()
+}
+
+// generateEmailAvatarLink returns a email avatar link.
+// if final is true, it may use a slow path (eg: query DNS).
+// if final is false, it always uses a fast path.
+func generateEmailAvatarLink(ctx context.Context, email string, size int, final bool) string {
+ email = strings.TrimSpace(email)
+ if email == "" {
+ return DefaultAvatarLink()
+ }
+
+ avatarSetting, err := loadAvatarSetting()
+ if err != nil {
+ return DefaultAvatarLink()
+ }
+
+ enableFederatedAvatar := setting.Config().Picture.EnableFederatedAvatar.Value(ctx)
+ if enableFederatedAvatar {
+ emailHash := saveEmailHash(ctx, email)
+ if final {
+ // for final link, we can spend more time on slow external query
+ var avatarURL *url.URL
+ if avatarURL, err = LibravatarURL(email); err != nil {
+ return DefaultAvatarLink()
+ }
+ return generateRecognizedAvatarURL(*avatarURL, size)
+ }
+ // for non-final link, we should return fast (use a 302 redirection link)
+ urlStr := setting.AppSubURL + "/avatar/" + url.PathEscape(emailHash)
+ if size > 0 {
+ urlStr += "?size=" + strconv.Itoa(size)
+ }
+ return urlStr
+ }
+
+ disableGravatar := setting.Config().Picture.DisableGravatar.Value(ctx)
+ if !disableGravatar {
+ // copy GravatarSourceURL, because we will modify its Path.
+ avatarURLCopy := *avatarSetting.gravatarSourceURL
+ avatarURLCopy.Path = path.Join(avatarURLCopy.Path, HashEmail(email))
+ return generateRecognizedAvatarURL(avatarURLCopy, size)
+ }
+
+ return DefaultAvatarLink()
+}
+
+// GenerateEmailAvatarFastLink returns a avatar link (fast, the link may be a delegated one: "/avatar/${hash}")
+func GenerateEmailAvatarFastLink(ctx context.Context, email string, size int) string {
+ return generateEmailAvatarLink(ctx, email, size, false)
+}
+
+// GenerateEmailAvatarFinalLink returns a avatar final link (maybe slow)
+func GenerateEmailAvatarFinalLink(ctx context.Context, email string, size int) string {
+ return generateEmailAvatarLink(ctx, email, size, true)
+}
diff --git a/models/avatars/avatar_test.go b/models/avatars/avatar_test.go
new file mode 100644
index 0000000..85c40c3
--- /dev/null
+++ b/models/avatars/avatar_test.go
@@ -0,0 +1,59 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package avatars_test
+
+import (
+ "testing"
+
+ avatars_model "code.gitea.io/gitea/models/avatars"
+ "code.gitea.io/gitea/models/db"
+ system_model "code.gitea.io/gitea/models/system"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/setting/config"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const gravatarSource = "https://secure.gravatar.com/avatar/"
+
+func disableGravatar(t *testing.T) {
+ err := system_model.SetSettings(db.DefaultContext, map[string]string{setting.Config().Picture.EnableFederatedAvatar.DynKey(): "false"})
+ require.NoError(t, err)
+ err = system_model.SetSettings(db.DefaultContext, map[string]string{setting.Config().Picture.DisableGravatar.DynKey(): "true"})
+ require.NoError(t, err)
+}
+
+func enableGravatar(t *testing.T) {
+ err := system_model.SetSettings(db.DefaultContext, map[string]string{setting.Config().Picture.DisableGravatar.DynKey(): "false"})
+ require.NoError(t, err)
+ setting.GravatarSource = gravatarSource
+}
+
+func TestHashEmail(t *testing.T) {
+ assert.Equal(t,
+ "d41d8cd98f00b204e9800998ecf8427e",
+ avatars_model.HashEmail(""),
+ )
+ assert.Equal(t,
+ "353cbad9b58e69c96154ad99f92bedc7",
+ avatars_model.HashEmail("gitea@example.com"),
+ )
+}
+
+func TestSizedAvatarLink(t *testing.T) {
+ setting.AppSubURL = "/testsuburl"
+
+ disableGravatar(t)
+ config.GetDynGetter().InvalidateCache()
+ assert.Equal(t, "/testsuburl/assets/img/avatar_default.png",
+ avatars_model.GenerateEmailAvatarFastLink(db.DefaultContext, "gitea@example.com", 100))
+
+ enableGravatar(t)
+ config.GetDynGetter().InvalidateCache()
+ assert.Equal(t,
+ "https://secure.gravatar.com/avatar/353cbad9b58e69c96154ad99f92bedc7?d=identicon&s=100",
+ avatars_model.GenerateEmailAvatarFastLink(db.DefaultContext, "gitea@example.com", 100),
+ )
+}
diff --git a/models/avatars/main_test.go b/models/avatars/main_test.go
new file mode 100644
index 0000000..c721a7d
--- /dev/null
+++ b/models/avatars/main_test.go
@@ -0,0 +1,18 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package avatars_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/perm/access"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/db/collation.go b/models/db/collation.go
new file mode 100644
index 0000000..39d28fa
--- /dev/null
+++ b/models/db/collation.go
@@ -0,0 +1,159 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+type CheckCollationsResult struct {
+ ExpectedCollation string
+ AvailableCollation container.Set[string]
+ DatabaseCollation string
+ IsCollationCaseSensitive func(s string) bool
+ CollationEquals func(a, b string) bool
+ ExistingTableNumber int
+
+ InconsistentCollationColumns []string
+}
+
+func findAvailableCollationsMySQL(x *xorm.Engine) (ret container.Set[string], err error) {
+ var res []struct {
+ Collation string
+ }
+ if err = x.SQL("SHOW COLLATION WHERE (Collation = 'utf8mb4_bin') OR (Collation LIKE '%\\_as\\_cs%')").Find(&res); err != nil {
+ return nil, err
+ }
+ ret = make(container.Set[string], len(res))
+ for _, r := range res {
+ ret.Add(r.Collation)
+ }
+ return ret, nil
+}
+
+func CheckCollations(x *xorm.Engine) (*CheckCollationsResult, error) {
+ dbTables, err := x.DBMetas()
+ if err != nil {
+ return nil, err
+ }
+
+ res := &CheckCollationsResult{
+ ExistingTableNumber: len(dbTables),
+ CollationEquals: func(a, b string) bool { return a == b },
+ }
+
+ var candidateCollations []string
+ if x.Dialect().URI().DBType == schemas.MYSQL {
+ if _, err = x.SQL("SELECT @@collation_database").Get(&res.DatabaseCollation); err != nil {
+ return nil, err
+ }
+ res.IsCollationCaseSensitive = func(s string) bool {
+ return s == "utf8mb4_bin" || strings.HasSuffix(s, "_as_cs")
+ }
+ candidateCollations = []string{"utf8mb4_0900_as_cs", "uca1400_as_cs", "utf8mb4_bin"}
+ res.AvailableCollation, err = findAvailableCollationsMySQL(x)
+ if err != nil {
+ return nil, err
+ }
+ res.CollationEquals = func(a, b string) bool {
+ // MariaDB adds the "utf8mb4_" prefix, eg: "utf8mb4_uca1400_as_cs", but not the name "uca1400_as_cs" in "SHOW COLLATION"
+ // At the moment, it's safe to ignore the database difference, just trim the prefix and compare. It could be fixed easily if there is any problem in the future.
+ return a == b || strings.TrimPrefix(a, "utf8mb4_") == strings.TrimPrefix(b, "utf8mb4_")
+ }
+ } else {
+ return nil, nil
+ }
+
+ if res.DatabaseCollation == "" {
+ return nil, errors.New("unable to get collation for current database")
+ }
+
+ res.ExpectedCollation = setting.Database.CharsetCollation
+ if res.ExpectedCollation == "" {
+ for _, collation := range candidateCollations {
+ if res.AvailableCollation.Contains(collation) {
+ res.ExpectedCollation = collation
+ break
+ }
+ }
+ }
+
+ if res.ExpectedCollation == "" {
+ return nil, errors.New("unable to find a suitable collation for current database")
+ }
+
+ allColumnsMatchExpected := true
+ allColumnsMatchDatabase := true
+ for _, table := range dbTables {
+ for _, col := range table.Columns() {
+ if col.Collation != "" {
+ allColumnsMatchExpected = allColumnsMatchExpected && res.CollationEquals(col.Collation, res.ExpectedCollation)
+ allColumnsMatchDatabase = allColumnsMatchDatabase && res.CollationEquals(col.Collation, res.DatabaseCollation)
+ if !res.IsCollationCaseSensitive(col.Collation) || !res.CollationEquals(col.Collation, res.DatabaseCollation) {
+ res.InconsistentCollationColumns = append(res.InconsistentCollationColumns, fmt.Sprintf("%s.%s", table.Name, col.Name))
+ }
+ }
+ }
+ }
+ // if all columns match expected collation or all match database collation, then it could also be considered as "consistent"
+ if allColumnsMatchExpected || allColumnsMatchDatabase {
+ res.InconsistentCollationColumns = nil
+ }
+ return res, nil
+}
+
+func CheckCollationsDefaultEngine() (*CheckCollationsResult, error) {
+ return CheckCollations(x)
+}
+
+func alterDatabaseCollation(x *xorm.Engine, collation string) error {
+ if x.Dialect().URI().DBType == schemas.MYSQL {
+ _, err := x.Exec("ALTER DATABASE CHARACTER SET utf8mb4 COLLATE " + collation)
+ return err
+ }
+ return errors.New("unsupported database type")
+}
+
+// preprocessDatabaseCollation checks database & table column collation, and alter the database collation if needed
+func preprocessDatabaseCollation(x *xorm.Engine) {
+ r, err := CheckCollations(x)
+ if err != nil {
+ log.Error("Failed to check database collation: %v", err)
+ }
+ if r == nil {
+ return // no check result means the database doesn't need to do such check/process (at the moment ....)
+ }
+
+ // try to alter database collation to expected if the database is empty, it might fail in some cases (and it isn't necessary to succeed)
+ // at the moment.
+ if !r.CollationEquals(r.DatabaseCollation, r.ExpectedCollation) && r.ExistingTableNumber == 0 {
+ if err = alterDatabaseCollation(x, r.ExpectedCollation); err != nil {
+ log.Error("Failed to change database collation to %q: %v", r.ExpectedCollation, err)
+ } else {
+ if r, err = CheckCollations(x); err != nil {
+ log.Error("Failed to check database collation again after altering: %v", err) // impossible case
+ return
+ }
+ log.Warn("Current database has been altered to use collation %q", r.DatabaseCollation)
+ }
+ }
+
+ // check column collation, and show warning/error to end users -- no need to fatal, do not block the startup
+ if !r.IsCollationCaseSensitive(r.DatabaseCollation) {
+ log.Warn("Current database is using a case-insensitive collation %q, although Forgejo could work with it, there might be some rare cases which don't work as expected.", r.DatabaseCollation)
+ }
+
+ if len(r.InconsistentCollationColumns) > 0 {
+ log.Error("There are %d table columns using inconsistent collation, they should use %q. Please go to admin panel Self Check page", len(r.InconsistentCollationColumns), r.DatabaseCollation)
+ }
+}
diff --git a/models/db/common.go b/models/db/common.go
new file mode 100644
index 0000000..f3fd3e7
--- /dev/null
+++ b/models/db/common.go
@@ -0,0 +1,53 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "strings"
+
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// BuildCaseInsensitiveLike returns a condition to check if the given value is like the given key case-insensitively.
+// Handles especially SQLite correctly as UPPER there only transforms ASCII letters.
+func BuildCaseInsensitiveLike(key, value string) builder.Cond {
+ if setting.Database.Type.IsSQLite3() {
+ return builder.Like{"UPPER(" + key + ")", util.ToUpperASCII(value)}
+ }
+ return builder.Like{"UPPER(" + key + ")", strings.ToUpper(value)}
+}
+
+// BuildCaseInsensitiveIn returns a condition to check if the given value is in the given values case-insensitively.
+// Handles especially SQLite correctly as UPPER there only transforms ASCII letters.
+func BuildCaseInsensitiveIn(key string, values []string) builder.Cond {
+ uppers := make([]string, 0, len(values))
+ if setting.Database.Type.IsSQLite3() {
+ for _, value := range values {
+ uppers = append(uppers, util.ToUpperASCII(value))
+ }
+ } else {
+ for _, value := range values {
+ uppers = append(uppers, strings.ToUpper(value))
+ }
+ }
+
+ return builder.In("UPPER("+key+")", uppers)
+}
+
+// BuilderDialect returns the xorm.Builder dialect of the engine
+func BuilderDialect() string {
+ switch {
+ case setting.Database.Type.IsMySQL():
+ return builder.MYSQL
+ case setting.Database.Type.IsSQLite3():
+ return builder.SQLITE
+ case setting.Database.Type.IsPostgreSQL():
+ return builder.POSTGRES
+ default:
+ return ""
+ }
+}
diff --git a/models/db/consistency.go b/models/db/consistency.go
new file mode 100644
index 0000000..d19732c
--- /dev/null
+++ b/models/db/consistency.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+
+ "xorm.io/builder"
+)
+
+// CountOrphanedObjects count subjects with have no existing refobject anymore
+func CountOrphanedObjects(ctx context.Context, subject, refobject, joinCond string) (int64, error) {
+ return GetEngine(ctx).
+ Table("`"+subject+"`").
+ Join("LEFT", "`"+refobject+"`", joinCond).
+ Where(builder.IsNull{"`" + refobject + "`.id"}).
+ Select("COUNT(`" + subject + "`.`id`)").
+ Count()
+}
+
+// DeleteOrphanedObjects delete subjects with have no existing refobject anymore
+func DeleteOrphanedObjects(ctx context.Context, subject, refobject, joinCond string) error {
+ subQuery := builder.Select("`"+subject+"`.id").
+ From("`"+subject+"`").
+ Join("LEFT", "`"+refobject+"`", joinCond).
+ Where(builder.IsNull{"`" + refobject + "`.id"})
+ b := builder.Delete(builder.In("id", subQuery)).From("`" + subject + "`")
+ _, err := GetEngine(ctx).Exec(b)
+ return err
+}
diff --git a/models/db/context.go b/models/db/context.go
new file mode 100644
index 0000000..43f6125
--- /dev/null
+++ b/models/db/context.go
@@ -0,0 +1,331 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+ "database/sql"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// DefaultContext is the default context to run xorm queries in
+// will be overwritten by Init with HammerContext
+var DefaultContext context.Context
+
+// contextKey is a value for use with context.WithValue.
+type contextKey struct {
+ name string
+}
+
+// enginedContextKey is a context key. It is used with context.Value() to get the current Engined for the context
+var (
+ enginedContextKey = &contextKey{"engined"}
+ _ Engined = &Context{}
+)
+
+// Context represents a db context
+type Context struct {
+ context.Context
+ e Engine
+ transaction bool
+}
+
+func newContext(ctx context.Context, e Engine, transaction bool) *Context {
+ return &Context{
+ Context: ctx,
+ e: e,
+ transaction: transaction,
+ }
+}
+
+// InTransaction if context is in a transaction
+func (ctx *Context) InTransaction() bool {
+ return ctx.transaction
+}
+
+// Engine returns db engine
+func (ctx *Context) Engine() Engine {
+ return ctx.e
+}
+
+// Value shadows Value for context.Context but allows us to get ourselves and an Engined object
+func (ctx *Context) Value(key any) any {
+ if key == enginedContextKey {
+ return ctx
+ }
+ return ctx.Context.Value(key)
+}
+
+// WithContext returns this engine tied to this context
+func (ctx *Context) WithContext(other context.Context) *Context {
+ return newContext(ctx, ctx.e.Context(other), ctx.transaction)
+}
+
+// Engined structs provide an Engine
+type Engined interface {
+ Engine() Engine
+}
+
+// GetEngine will get a db Engine from this context or return an Engine restricted to this context
+func GetEngine(ctx context.Context) Engine {
+ if e := getEngine(ctx); e != nil {
+ return e
+ }
+ return x.Context(ctx)
+}
+
+// getEngine will get a db Engine from this context or return nil
+func getEngine(ctx context.Context) Engine {
+ if engined, ok := ctx.(Engined); ok {
+ return engined.Engine()
+ }
+ enginedInterface := ctx.Value(enginedContextKey)
+ if enginedInterface != nil {
+ return enginedInterface.(Engined).Engine()
+ }
+ return nil
+}
+
+// Committer represents an interface to Commit or Close the Context
+type Committer interface {
+ Commit() error
+ Close() error
+}
+
+// halfCommitter is a wrapper of Committer.
+// It can be closed early, but can't be committed early, it is useful for reusing a transaction.
+type halfCommitter struct {
+ committer Committer
+ committed bool
+}
+
+func (c *halfCommitter) Commit() error {
+ c.committed = true
+ // should do nothing, and the parent committer will commit later
+ return nil
+}
+
+func (c *halfCommitter) Close() error {
+ if c.committed {
+ // it's "commit and close", should do nothing, and the parent committer will commit later
+ return nil
+ }
+
+ // it's "rollback and close", let the parent committer rollback right now
+ return c.committer.Close()
+}
+
+// TxContext represents a transaction Context,
+// it will reuse the existing transaction in the parent context or create a new one.
+// Some tips to use:
+//
+// 1 It's always recommended to use `WithTx` in new code instead of `TxContext`, since `WithTx` will handle the transaction automatically.
+// 2. To maintain the old code which uses `TxContext`:
+// a. Always call `Close()` before returning regardless of whether `Commit()` has been called.
+// b. Always call `Commit()` before returning if there are no errors, even if the code did not change any data.
+// c. Remember the `Committer` will be a halfCommitter when a transaction is being reused.
+// So calling `Commit()` will do nothing, but calling `Close()` without calling `Commit()` will rollback the transaction.
+// And all operations submitted by the caller stack will be rollbacked as well, not only the operations in the current function.
+// d. It doesn't mean rollback is forbidden, but always do it only when there is an error, and you do want to rollback.
+func TxContext(parentCtx context.Context) (*Context, Committer, error) {
+ if sess, ok := inTransaction(parentCtx); ok {
+ return newContext(parentCtx, sess, true), &halfCommitter{committer: sess}, nil
+ }
+
+ sess := x.NewSession()
+ if err := sess.Begin(); err != nil {
+ sess.Close()
+ return nil, nil, err
+ }
+
+ return newContext(DefaultContext, sess, true), sess, nil
+}
+
+// WithTx represents executing database operations on a transaction, if the transaction exist,
+// this function will reuse it otherwise will create a new one and close it when finished.
+func WithTx(parentCtx context.Context, f func(ctx context.Context) error) error {
+ if sess, ok := inTransaction(parentCtx); ok {
+ err := f(newContext(parentCtx, sess, true))
+ if err != nil {
+ // rollback immediately, in case the caller ignores returned error and tries to commit the transaction.
+ _ = sess.Close()
+ }
+ return err
+ }
+ return txWithNoCheck(parentCtx, f)
+}
+
+func txWithNoCheck(parentCtx context.Context, f func(ctx context.Context) error) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := f(newContext(parentCtx, sess, true)); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
+
+// Insert inserts records into database
+func Insert(ctx context.Context, beans ...any) error {
+ _, err := GetEngine(ctx).Insert(beans...)
+ return err
+}
+
+// Exec executes a sql with args
+func Exec(ctx context.Context, sqlAndArgs ...any) (sql.Result, error) {
+ return GetEngine(ctx).Exec(sqlAndArgs...)
+}
+
+func Get[T any](ctx context.Context, cond builder.Cond) (object *T, exist bool, err error) {
+ if !cond.IsValid() {
+ panic("cond is invalid in db.Get(ctx, cond). This should not be possible.")
+ }
+
+ var bean T
+ has, err := GetEngine(ctx).Where(cond).NoAutoCondition().Get(&bean)
+ if err != nil {
+ return nil, false, err
+ } else if !has {
+ return nil, false, nil
+ }
+ return &bean, true, nil
+}
+
+func GetByID[T any](ctx context.Context, id int64) (object *T, exist bool, err error) {
+ var bean T
+ has, err := GetEngine(ctx).ID(id).NoAutoCondition().Get(&bean)
+ if err != nil {
+ return nil, false, err
+ } else if !has {
+ return nil, false, nil
+ }
+ return &bean, true, nil
+}
+
+func Exist[T any](ctx context.Context, cond builder.Cond) (bool, error) {
+ if !cond.IsValid() {
+ panic("cond is invalid in db.Exist(ctx, cond). This should not be possible.")
+ }
+
+ var bean T
+ return GetEngine(ctx).Where(cond).NoAutoCondition().Exist(&bean)
+}
+
+func ExistByID[T any](ctx context.Context, id int64) (bool, error) {
+ var bean T
+ return GetEngine(ctx).ID(id).NoAutoCondition().Exist(&bean)
+}
+
+// DeleteByID deletes the given bean with the given ID
+func DeleteByID[T any](ctx context.Context, id int64) (int64, error) {
+ var bean T
+ return GetEngine(ctx).ID(id).NoAutoCondition().NoAutoTime().Delete(&bean)
+}
+
+func DeleteByIDs[T any](ctx context.Context, ids ...int64) error {
+ if len(ids) == 0 {
+ return nil
+ }
+
+ var bean T
+ _, err := GetEngine(ctx).In("id", ids).NoAutoCondition().NoAutoTime().Delete(&bean)
+ return err
+}
+
+func Delete[T any](ctx context.Context, opts FindOptions) (int64, error) {
+ if opts == nil || !opts.ToConds().IsValid() {
+ panic("opts are empty or invalid in db.Delete(ctx, opts). This should not be possible.")
+ }
+
+ var bean T
+ return GetEngine(ctx).Where(opts.ToConds()).NoAutoCondition().NoAutoTime().Delete(&bean)
+}
+
+// DeleteByBean deletes all records according non-empty fields of the bean as conditions.
+func DeleteByBean(ctx context.Context, bean any) (int64, error) {
+ return GetEngine(ctx).Delete(bean)
+}
+
+// FindIDs finds the IDs for the given table name satisfying the given condition
+// By passing a different value than "id" for "idCol", you can query for foreign IDs, i.e. the repo IDs which satisfy the condition
+func FindIDs(ctx context.Context, tableName, idCol string, cond builder.Cond) ([]int64, error) {
+ ids := make([]int64, 0, 10)
+ if err := GetEngine(ctx).Table(tableName).
+ Cols(idCol).
+ Where(cond).
+ Find(&ids); err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// DecrByIDs decreases the given column for entities of the "bean" type with one of the given ids by one
+// Timestamps of the entities won't be updated
+func DecrByIDs(ctx context.Context, ids []int64, decrCol string, bean any) error {
+ _, err := GetEngine(ctx).Decr(decrCol).In("id", ids).NoAutoCondition().NoAutoTime().Update(bean)
+ return err
+}
+
+// DeleteBeans deletes all given beans, beans must contain delete conditions.
+func DeleteBeans(ctx context.Context, beans ...any) (err error) {
+ e := GetEngine(ctx)
+ for i := range beans {
+ if _, err = e.Delete(beans[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TruncateBeans deletes all given beans, beans may contain delete conditions.
+func TruncateBeans(ctx context.Context, beans ...any) (err error) {
+ e := GetEngine(ctx)
+ for i := range beans {
+ if _, err = e.Truncate(beans[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CountByBean counts the number of database records according non-empty fields of the bean as conditions.
+func CountByBean(ctx context.Context, bean any) (int64, error) {
+ return GetEngine(ctx).Count(bean)
+}
+
+// TableName returns the table name according a bean object
+func TableName(bean any) string {
+ return x.TableName(bean)
+}
+
+// InTransaction returns true if the engine is in a transaction otherwise return false
+func InTransaction(ctx context.Context) bool {
+ _, ok := inTransaction(ctx)
+ return ok
+}
+
+func inTransaction(ctx context.Context) (*xorm.Session, bool) {
+ e := getEngine(ctx)
+ if e == nil {
+ return nil, false
+ }
+
+ switch t := e.(type) {
+ case *xorm.Engine:
+ return nil, false
+ case *xorm.Session:
+ if t.IsInTx() {
+ return t, true
+ }
+ return nil, false
+ default:
+ return nil, false
+ }
+}
diff --git a/models/db/context_committer_test.go b/models/db/context_committer_test.go
new file mode 100644
index 0000000..38e91f2
--- /dev/null
+++ b/models/db/context_committer_test.go
@@ -0,0 +1,102 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db // it's not db_test, because this file is for testing the private type halfCommitter
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type MockCommitter struct {
+ wants []string
+ gots []string
+}
+
+func NewMockCommitter(wants ...string) *MockCommitter {
+ return &MockCommitter{
+ wants: wants,
+ }
+}
+
+func (c *MockCommitter) Commit() error {
+ c.gots = append(c.gots, "commit")
+ return nil
+}
+
+func (c *MockCommitter) Close() error {
+ c.gots = append(c.gots, "close")
+ return nil
+}
+
+func (c *MockCommitter) Assert(t *testing.T) {
+ assert.Equal(t, c.wants, c.gots, "want operations %v, but got %v", c.wants, c.gots)
+}
+
+func Test_halfCommitter(t *testing.T) {
+ /*
+ Do something like:
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ if err != nil {
+ return nil
+ }
+ defer committer.Close()
+
+ // ...
+
+ if err != nil {
+ return nil
+ }
+
+ // ...
+
+ return committer.Commit()
+ */
+
+ testWithCommitter := func(committer Committer, f func(committer Committer) error) {
+ if err := f(&halfCommitter{committer: committer}); err == nil {
+ committer.Commit()
+ }
+ committer.Close()
+ }
+
+ t.Run("commit and close", func(t *testing.T) {
+ mockCommitter := NewMockCommitter("commit", "close")
+
+ testWithCommitter(mockCommitter, func(committer Committer) error {
+ defer committer.Close()
+ return committer.Commit()
+ })
+
+ mockCommitter.Assert(t)
+ })
+
+ t.Run("rollback and close", func(t *testing.T) {
+ mockCommitter := NewMockCommitter("close", "close")
+
+ testWithCommitter(mockCommitter, func(committer Committer) error {
+ defer committer.Close()
+ if true {
+ return fmt.Errorf("error")
+ }
+ return committer.Commit()
+ })
+
+ mockCommitter.Assert(t)
+ })
+
+ t.Run("close and commit", func(t *testing.T) {
+ mockCommitter := NewMockCommitter("close", "close")
+
+ testWithCommitter(mockCommitter, func(committer Committer) error {
+ committer.Close()
+ committer.Commit()
+ return fmt.Errorf("error")
+ })
+
+ mockCommitter.Assert(t)
+ })
+}
diff --git a/models/db/context_test.go b/models/db/context_test.go
new file mode 100644
index 0000000..855f360
--- /dev/null
+++ b/models/db/context_test.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db_test
+
+import (
+ "context"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInTransaction(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.False(t, db.InTransaction(db.DefaultContext))
+ require.NoError(t, db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ assert.True(t, db.InTransaction(ctx))
+ return nil
+ }))
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ require.NoError(t, err)
+ defer committer.Close()
+ assert.True(t, db.InTransaction(ctx))
+ require.NoError(t, db.WithTx(ctx, func(ctx context.Context) error {
+ assert.True(t, db.InTransaction(ctx))
+ return nil
+ }))
+}
+
+func TestTxContext(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ { // create new transaction
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ require.NoError(t, err)
+ assert.True(t, db.InTransaction(ctx))
+ require.NoError(t, committer.Commit())
+ }
+
+ { // reuse the transaction created by TxContext and commit it
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ engine := db.GetEngine(ctx)
+ require.NoError(t, err)
+ assert.True(t, db.InTransaction(ctx))
+ {
+ ctx, committer, err := db.TxContext(ctx)
+ require.NoError(t, err)
+ assert.True(t, db.InTransaction(ctx))
+ assert.Equal(t, engine, db.GetEngine(ctx))
+ require.NoError(t, committer.Commit())
+ }
+ require.NoError(t, committer.Commit())
+ }
+
+ { // reuse the transaction created by TxContext and close it
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ engine := db.GetEngine(ctx)
+ require.NoError(t, err)
+ assert.True(t, db.InTransaction(ctx))
+ {
+ ctx, committer, err := db.TxContext(ctx)
+ require.NoError(t, err)
+ assert.True(t, db.InTransaction(ctx))
+ assert.Equal(t, engine, db.GetEngine(ctx))
+ require.NoError(t, committer.Close())
+ }
+ require.NoError(t, committer.Close())
+ }
+
+ { // reuse the transaction created by WithTx
+ require.NoError(t, db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ assert.True(t, db.InTransaction(ctx))
+ {
+ ctx, committer, err := db.TxContext(ctx)
+ require.NoError(t, err)
+ assert.True(t, db.InTransaction(ctx))
+ require.NoError(t, committer.Commit())
+ }
+ return nil
+ }))
+ }
+}
diff --git a/models/db/convert.go b/models/db/convert.go
new file mode 100644
index 0000000..956e17d
--- /dev/null
+++ b/models/db/convert.go
@@ -0,0 +1,64 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+// ConvertDatabaseTable converts database and tables from utf8 to utf8mb4 if it's mysql and set ROW_FORMAT=dynamic
+func ConvertDatabaseTable() error {
+ if x.Dialect().URI().DBType != schemas.MYSQL {
+ return nil
+ }
+
+ r, err := CheckCollations(x)
+ if err != nil {
+ return err
+ }
+
+ databaseName := strings.SplitN(setting.Database.Name, "?", 2)[0]
+ _, err = x.Exec(fmt.Sprintf("ALTER DATABASE `%s` CHARACTER SET utf8mb4 COLLATE %s", databaseName, r.ExpectedCollation))
+ if err != nil {
+ return err
+ }
+
+ tables, err := x.DBMetas()
+ if err != nil {
+ return err
+ }
+ for _, table := range tables {
+ if _, err := x.Exec(fmt.Sprintf("ALTER TABLE `%s` ROW_FORMAT=dynamic", table.Name)); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec(fmt.Sprintf("ALTER TABLE `%s` CONVERT TO CHARACTER SET utf8mb4 COLLATE %s", table.Name, r.ExpectedCollation)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Cell2Int64 converts a xorm.Cell type to int64,
+// and handles possible irregular cases.
+func Cell2Int64(val xorm.Cell) int64 {
+ switch (*val).(type) {
+ case []uint8:
+ log.Trace("Cell2Int64 ([]uint8): %v", *val)
+
+ v, _ := strconv.ParseInt(string((*val).([]uint8)), 10, 64)
+ return v
+ default:
+ return (*val).(int64)
+ }
+}
diff --git a/models/db/engine.go b/models/db/engine.go
new file mode 100755
index 0000000..6164959
--- /dev/null
+++ b/models/db/engine.go
@@ -0,0 +1,354 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/contexts"
+ "xorm.io/xorm/names"
+ "xorm.io/xorm/schemas"
+
+ _ "github.com/go-sql-driver/mysql" // Needed for the MySQL driver
+ _ "github.com/lib/pq" // Needed for the Postgresql driver
+)
+
+var (
+ x *xorm.Engine
+ tables []any
+ initFuncs []func() error
+)
+
+// Engine represents a xorm engine or session.
+type Engine interface {
+ Table(tableNameOrBean any) *xorm.Session
+ Count(...any) (int64, error)
+ Decr(column string, arg ...any) *xorm.Session
+ Delete(...any) (int64, error)
+ Truncate(...any) (int64, error)
+ Exec(...any) (sql.Result, error)
+ Find(any, ...any) error
+ Get(beans ...any) (bool, error)
+ ID(any) *xorm.Session
+ In(string, ...any) *xorm.Session
+ Incr(column string, arg ...any) *xorm.Session
+ Insert(...any) (int64, error)
+ Iterate(any, xorm.IterFunc) error
+ IsTableExist(any) (bool, error)
+ Join(joinOperator string, tablename, condition any, args ...any) *xorm.Session
+ SQL(any, ...any) *xorm.Session
+ Where(any, ...any) *xorm.Session
+ Asc(colNames ...string) *xorm.Session
+ Desc(colNames ...string) *xorm.Session
+ Limit(limit int, start ...int) *xorm.Session
+ NoAutoTime() *xorm.Session
+ SumInt(bean any, columnName string) (res int64, err error)
+ Sync(...any) error
+ Select(string) *xorm.Session
+ SetExpr(string, any) *xorm.Session
+ NotIn(string, ...any) *xorm.Session
+ OrderBy(any, ...any) *xorm.Session
+ Exist(...any) (bool, error)
+ Distinct(...string) *xorm.Session
+ Query(...any) ([]map[string][]byte, error)
+ Cols(...string) *xorm.Session
+ Context(ctx context.Context) *xorm.Session
+ Ping() error
+}
+
+// TableInfo returns table's information via an object
+func TableInfo(v any) (*schemas.Table, error) {
+ return x.TableInfo(v)
+}
+
+// DumpTables dump tables information
+func DumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error {
+ return x.DumpTables(tables, w, tp...)
+}
+
+// RegisterModel registers model, if initfunc provided, it will be invoked after data model sync
+func RegisterModel(bean any, initFunc ...func() error) {
+ tables = append(tables, bean)
+ if len(initFuncs) > 0 && initFunc[0] != nil {
+ initFuncs = append(initFuncs, initFunc[0])
+ }
+}
+
+func init() {
+ gonicNames := []string{"SSL", "UID"}
+ for _, name := range gonicNames {
+ names.LintGonicMapper[name] = true
+ }
+}
+
+// newXORMEngine returns a new XORM engine from the configuration
+func newXORMEngine() (*xorm.Engine, error) {
+ connStr, err := setting.DBConnStr()
+ if err != nil {
+ return nil, err
+ }
+
+ var engine *xorm.Engine
+
+ if setting.Database.Type.IsPostgreSQL() && len(setting.Database.Schema) > 0 {
+ // OK whilst we sort out our schema issues - create a schema aware postgres
+ registerPostgresSchemaDriver()
+ engine, err = xorm.NewEngine("postgresschema", connStr)
+ } else {
+ engine, err = xorm.NewEngine(setting.Database.Type.String(), connStr)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ if setting.Database.Type.IsMySQL() {
+ engine.Dialect().SetParams(map[string]string{"rowFormat": "DYNAMIC"})
+ }
+ engine.SetSchema(setting.Database.Schema)
+ return engine, nil
+}
+
+// SyncAllTables sync the schemas of all tables, is required by unit test code
+func SyncAllTables() error {
+ _, err := x.StoreEngine("InnoDB").SyncWithOptions(xorm.SyncOptions{
+ WarnIfDatabaseColumnMissed: true,
+ }, tables...)
+ return err
+}
+
+// InitEngine initializes the xorm.Engine and sets it as db.DefaultContext
+func InitEngine(ctx context.Context) error {
+ xormEngine, err := newXORMEngine()
+ if err != nil {
+ return fmt.Errorf("failed to connect to database: %w", err)
+ }
+
+ xormEngine.SetMapper(names.GonicMapper{})
+ // WARNING: for serv command, MUST remove the output to os.stdout,
+ // so use log file to instead print to stdout.
+ xormEngine.SetLogger(NewXORMLogger(setting.Database.LogSQL))
+ xormEngine.ShowSQL(setting.Database.LogSQL)
+ xormEngine.SetMaxOpenConns(setting.Database.MaxOpenConns)
+ xormEngine.SetMaxIdleConns(setting.Database.MaxIdleConns)
+ xormEngine.SetConnMaxLifetime(setting.Database.ConnMaxLifetime)
+ xormEngine.SetConnMaxIdleTime(setting.Database.ConnMaxIdleTime)
+ xormEngine.SetDefaultContext(ctx)
+
+ if setting.Database.SlowQueryThreshold > 0 {
+ xormEngine.AddHook(&SlowQueryHook{
+ Treshold: setting.Database.SlowQueryThreshold,
+ Logger: log.GetLogger("xorm"),
+ })
+ }
+
+ errorLogger := log.GetLogger("xorm")
+ if setting.IsInTesting {
+ errorLogger = log.GetLogger(log.DEFAULT)
+ }
+
+ xormEngine.AddHook(&ErrorQueryHook{
+ Logger: errorLogger,
+ })
+
+ SetDefaultEngine(ctx, xormEngine)
+ return nil
+}
+
+// SetDefaultEngine sets the default engine for db
+func SetDefaultEngine(ctx context.Context, eng *xorm.Engine) {
+ x = eng
+ DefaultContext = &Context{
+ Context: ctx,
+ e: x,
+ }
+}
+
+// UnsetDefaultEngine closes and unsets the default engine
+// We hope the SetDefaultEngine and UnsetDefaultEngine can be paired, but it's impossible now,
+// there are many calls to InitEngine -> SetDefaultEngine directly to overwrite the `x` and DefaultContext without close
+// Global database engine related functions are all racy and there is no graceful close right now.
+func UnsetDefaultEngine() {
+ if x != nil {
+ _ = x.Close()
+ x = nil
+ }
+ DefaultContext = nil
+}
+
+// InitEngineWithMigration initializes a new xorm.Engine and sets it as the db.DefaultContext
+// This function must never call .Sync() if the provided migration function fails.
+// When called from the "doctor" command, the migration function is a version check
+// that prevents the doctor from fixing anything in the database if the migration level
+// is different from the expected value.
+func InitEngineWithMigration(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err error) {
+ if err = InitEngine(ctx); err != nil {
+ return err
+ }
+
+ if err = x.Ping(); err != nil {
+ return err
+ }
+
+ preprocessDatabaseCollation(x)
+
+ // We have to run migrateFunc here in case the user is re-running installation on a previously created DB.
+ // If we do not then table schemas will be changed and there will be conflicts when the migrations run properly.
+ //
+ // Installation should only be being re-run if users want to recover an old database.
+ // However, we should think carefully about should we support re-install on an installed instance,
+ // as there may be other problems due to secret reinitialization.
+ if err = migrateFunc(x); err != nil {
+ return fmt.Errorf("migrate: %w", err)
+ }
+
+ if err = SyncAllTables(); err != nil {
+ return fmt.Errorf("sync database struct error: %w", err)
+ }
+
+ for _, initFunc := range initFuncs {
+ if err := initFunc(); err != nil {
+ return fmt.Errorf("initFunc failed: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// NamesToBean return a list of beans or an error
+func NamesToBean(names ...string) ([]any, error) {
+ beans := []any{}
+ if len(names) == 0 {
+ beans = append(beans, tables...)
+ return beans, nil
+ }
+ // Need to map provided names to beans...
+ beanMap := make(map[string]any)
+ for _, bean := range tables {
+ beanMap[strings.ToLower(reflect.Indirect(reflect.ValueOf(bean)).Type().Name())] = bean
+ beanMap[strings.ToLower(x.TableName(bean))] = bean
+ beanMap[strings.ToLower(x.TableName(bean, true))] = bean
+ }
+
+ gotBean := make(map[any]bool)
+ for _, name := range names {
+ bean, ok := beanMap[strings.ToLower(strings.TrimSpace(name))]
+ if !ok {
+ return nil, fmt.Errorf("no table found that matches: %s", name)
+ }
+ if !gotBean[bean] {
+ beans = append(beans, bean)
+ gotBean[bean] = true
+ }
+ }
+ return beans, nil
+}
+
+// DumpDatabase dumps all data from database according the special database SQL syntax to file system.
+func DumpDatabase(filePath, dbType string) error {
+ var tbs []*schemas.Table
+ for _, t := range tables {
+ t, err := x.TableInfo(t)
+ if err != nil {
+ return err
+ }
+ tbs = append(tbs, t)
+ }
+
+ type Version struct {
+ ID int64 `xorm:"pk autoincr"`
+ Version int64
+ }
+ t, err := x.TableInfo(&Version{})
+ if err != nil {
+ return err
+ }
+ tbs = append(tbs, t)
+
+ if len(dbType) > 0 {
+ return x.DumpTablesToFile(tbs, filePath, schemas.DBType(dbType))
+ }
+ return x.DumpTablesToFile(tbs, filePath)
+}
+
+// MaxBatchInsertSize returns the table's max batch insert size
+func MaxBatchInsertSize(bean any) int {
+ t, err := x.TableInfo(bean)
+ if err != nil {
+ return 50
+ }
+ return 999 / len(t.ColumnsSeq())
+}
+
+// IsTableNotEmpty returns true if table has at least one record
+func IsTableNotEmpty(beanOrTableName any) (bool, error) {
+ return x.Table(beanOrTableName).Exist()
+}
+
+// DeleteAllRecords will delete all the records of this table
+func DeleteAllRecords(tableName string) error {
+ _, err := x.Exec(fmt.Sprintf("DELETE FROM %s", tableName))
+ return err
+}
+
+// GetMaxID will return max id of the table
+func GetMaxID(beanOrTableName any) (maxID int64, err error) {
+ _, err = x.Select("MAX(id)").Table(beanOrTableName).Get(&maxID)
+ return maxID, err
+}
+
+func SetLogSQL(ctx context.Context, on bool) {
+ e := GetEngine(ctx)
+ if x, ok := e.(*xorm.Engine); ok {
+ x.ShowSQL(on)
+ } else if sess, ok := e.(*xorm.Session); ok {
+ sess.Engine().ShowSQL(on)
+ }
+}
+
+type SlowQueryHook struct {
+ Treshold time.Duration
+ Logger log.Logger
+}
+
+var _ contexts.Hook = &SlowQueryHook{}
+
+func (SlowQueryHook) BeforeProcess(c *contexts.ContextHook) (context.Context, error) {
+ return c.Ctx, nil
+}
+
+func (h *SlowQueryHook) AfterProcess(c *contexts.ContextHook) error {
+ if c.ExecuteTime >= h.Treshold {
+ h.Logger.Log(8, log.WARN, "[Slow SQL Query] %s %v - %v", c.SQL, c.Args, c.ExecuteTime)
+ }
+ return nil
+}
+
+type ErrorQueryHook struct {
+ Logger log.Logger
+}
+
+var _ contexts.Hook = &ErrorQueryHook{}
+
+func (ErrorQueryHook) BeforeProcess(c *contexts.ContextHook) (context.Context, error) {
+ return c.Ctx, nil
+}
+
+func (h *ErrorQueryHook) AfterProcess(c *contexts.ContextHook) error {
+ if c.Err != nil && !errors.Is(c.Err, context.Canceled) {
+ h.Logger.Log(8, log.ERROR, "[Error SQL Query] %s %v - %v", c.SQL, c.Args, c.Err)
+ }
+ return nil
+}
diff --git a/models/db/engine_test.go b/models/db/engine_test.go
new file mode 100644
index 0000000..230ee3f
--- /dev/null
+++ b/models/db/engine_test.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db_test
+
+import (
+ "path/filepath"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+
+ _ "code.gitea.io/gitea/cmd" // for TestPrimaryKeys
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm"
+)
+
+func TestDumpDatabase(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ dir := t.TempDir()
+
+ type Version struct {
+ ID int64 `xorm:"pk autoincr"`
+ Version int64
+ }
+ require.NoError(t, db.GetEngine(db.DefaultContext).Sync(new(Version)))
+
+ for _, dbType := range setting.SupportedDatabaseTypes {
+ require.NoError(t, db.DumpDatabase(filepath.Join(dir, dbType+".sql"), dbType))
+ }
+}
+
+func TestDeleteOrphanedObjects(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ countBefore, err := db.GetEngine(db.DefaultContext).Count(&issues_model.PullRequest{})
+ require.NoError(t, err)
+
+ _, err = db.GetEngine(db.DefaultContext).Insert(&issues_model.PullRequest{IssueID: 1000}, &issues_model.PullRequest{IssueID: 1001}, &issues_model.PullRequest{IssueID: 1003})
+ require.NoError(t, err)
+
+ orphaned, err := db.CountOrphanedObjects(db.DefaultContext, "pull_request", "issue", "pull_request.issue_id=issue.id")
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, orphaned)
+
+ err = db.DeleteOrphanedObjects(db.DefaultContext, "pull_request", "issue", "pull_request.issue_id=issue.id")
+ require.NoError(t, err)
+
+ countAfter, err := db.GetEngine(db.DefaultContext).Count(&issues_model.PullRequest{})
+ require.NoError(t, err)
+ assert.EqualValues(t, countBefore, countAfter)
+}
+
+func TestPrimaryKeys(t *testing.T) {
+ // Some dbs require that all tables have primary keys, see
+ // https://github.com/go-gitea/gitea/issues/21086
+ // https://github.com/go-gitea/gitea/issues/16802
+ // To avoid creating tables without primary key again, this test will check them.
+ // Import "code.gitea.io/gitea/cmd" to make sure each db.RegisterModel in init functions has been called.
+
+ beans, err := db.NamesToBean()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ whitelist := map[string]string{
+ "the_table_name_to_skip_checking": "Write a note here to explain why",
+ "forgejo_sem_ver": "seriously dude",
+ }
+
+ for _, bean := range beans {
+ table, err := db.TableInfo(bean)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if why, ok := whitelist[table.Name]; ok {
+ t.Logf("ignore %q because %q", table.Name, why)
+ continue
+ }
+ if len(table.PrimaryKeys) == 0 {
+ t.Errorf("table %q has no primary key", table.Name)
+ }
+ }
+}
+
+func TestSlowQuery(t *testing.T) {
+ lc, cleanup := test.NewLogChecker("slow-query", log.INFO)
+ lc.StopMark("[Slow SQL Query]")
+ defer cleanup()
+
+ e := db.GetEngine(db.DefaultContext)
+ engine, ok := e.(*xorm.Engine)
+ assert.True(t, ok)
+
+ // It's not possible to clean this up with XORM, but it's luckily not harmful
+ // to leave around.
+ engine.AddHook(&db.SlowQueryHook{
+ Treshold: time.Second * 10,
+ Logger: log.GetLogger("slow-query"),
+ })
+
+ // NOOP query.
+ e.Exec("SELECT 1 WHERE false;")
+
+ _, stopped := lc.Check(100 * time.Millisecond)
+ assert.False(t, stopped)
+
+ engine.AddHook(&db.SlowQueryHook{
+ Treshold: 0, // Every query should be logged.
+ Logger: log.GetLogger("slow-query"),
+ })
+
+ // NOOP query.
+ e.Exec("SELECT 1 WHERE false;")
+
+ _, stopped = lc.Check(100 * time.Millisecond)
+ assert.True(t, stopped)
+}
+
+func TestErrorQuery(t *testing.T) {
+ lc, cleanup := test.NewLogChecker("error-query", log.INFO)
+ lc.StopMark("[Error SQL Query]")
+ defer cleanup()
+
+ e := db.GetEngine(db.DefaultContext)
+ engine, ok := e.(*xorm.Engine)
+ assert.True(t, ok)
+
+ // It's not possible to clean this up with XORM, but it's luckily not harmful
+ // to leave around.
+ engine.AddHook(&db.ErrorQueryHook{
+ Logger: log.GetLogger("error-query"),
+ })
+
+ // Valid query.
+ e.Exec("SELECT 1 WHERE false;")
+
+ _, stopped := lc.Check(100 * time.Millisecond)
+ assert.False(t, stopped)
+
+ // Table doesn't exist.
+ e.Exec("SELECT column FROM table;")
+
+ _, stopped = lc.Check(100 * time.Millisecond)
+ assert.True(t, stopped)
+}
diff --git a/models/db/error.go b/models/db/error.go
new file mode 100644
index 0000000..665e970
--- /dev/null
+++ b/models/db/error.go
@@ -0,0 +1,74 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrCancelled represents an error due to context cancellation
+type ErrCancelled struct {
+ Message string
+}
+
+// IsErrCancelled checks if an error is a ErrCancelled.
+func IsErrCancelled(err error) bool {
+ _, ok := err.(ErrCancelled)
+ return ok
+}
+
+func (err ErrCancelled) Error() string {
+ return "Cancelled: " + err.Message
+}
+
+// ErrCancelledf returns an ErrCancelled for the provided format and args
+func ErrCancelledf(format string, args ...any) error {
+ return ErrCancelled{
+ fmt.Sprintf(format, args...),
+ }
+}
+
+// ErrSSHDisabled represents an "SSH disabled" error.
+type ErrSSHDisabled struct{}
+
+// IsErrSSHDisabled checks if an error is a ErrSSHDisabled.
+func IsErrSSHDisabled(err error) bool {
+ _, ok := err.(ErrSSHDisabled)
+ return ok
+}
+
+func (err ErrSSHDisabled) Error() string {
+ return "SSH is disabled"
+}
+
+// ErrNotExist represents a non-exist error.
+type ErrNotExist struct {
+ Resource string
+ ID int64
+}
+
+// IsErrNotExist checks if an error is an ErrNotExist
+func IsErrNotExist(err error) bool {
+ _, ok := err.(ErrNotExist)
+ return ok
+}
+
+func (err ErrNotExist) Error() string {
+ name := "record"
+ if err.Resource != "" {
+ name = err.Resource
+ }
+
+ if err.ID != 0 {
+ return fmt.Sprintf("%s does not exist [id: %d]", name, err.ID)
+ }
+ return fmt.Sprintf("%s does not exist", name)
+}
+
+// Unwrap unwraps this as a ErrNotExist err
+func (err ErrNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
diff --git a/models/db/index.go b/models/db/index.go
new file mode 100644
index 0000000..259ddd6
--- /dev/null
+++ b/models/db/index.go
@@ -0,0 +1,148 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strconv"
+
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// ResourceIndex represents a resource index which could be used as issue/release and others
+// We can create different tables i.e. issue_index, release_index, etc.
+type ResourceIndex struct {
+ GroupID int64 `xorm:"pk"`
+ MaxIndex int64 `xorm:"index"`
+}
+
+var (
+ // ErrResouceOutdated represents an error when request resource outdated
+ ErrResouceOutdated = errors.New("resource outdated")
+ // ErrGetResourceIndexFailed represents an error when resource index retries 3 times
+ ErrGetResourceIndexFailed = errors.New("get resource index failed")
+)
+
+// SyncMaxResourceIndex sync the max index with the resource
+func SyncMaxResourceIndex(ctx context.Context, tableName string, groupID, maxIndex int64) (err error) {
+ e := GetEngine(ctx)
+
+ // try to update the max_index and acquire the write-lock for the record
+ res, err := e.Exec(fmt.Sprintf("UPDATE %s SET max_index=? WHERE group_id=? AND max_index<?", tableName), maxIndex, groupID, maxIndex)
+ if err != nil {
+ return err
+ }
+ affected, err := res.RowsAffected()
+ if err != nil {
+ return err
+ }
+ if affected == 0 {
+ // if nothing is updated, the record might not exist or might be larger, it's safe to try to insert it again and then check whether the record exists
+ _, errIns := e.Exec(fmt.Sprintf("INSERT INTO %s (group_id, max_index) VALUES (?, ?)", tableName), groupID, maxIndex)
+ var savedIdx int64
+ has, err := e.SQL(fmt.Sprintf("SELECT max_index FROM %s WHERE group_id=?", tableName), groupID).Get(&savedIdx)
+ if err != nil {
+ return err
+ }
+ // if the record still doesn't exist, there must be some errors (insert error)
+ if !has {
+ if errIns == nil {
+ return errors.New("impossible error when SyncMaxResourceIndex, insert succeeded but no record is saved")
+ }
+ return errIns
+ }
+ }
+ return nil
+}
+
+func postgresGetNextResourceIndex(ctx context.Context, tableName string, groupID int64) (int64, error) {
+ res, err := GetEngine(ctx).Query(fmt.Sprintf("INSERT INTO %s (group_id, max_index) "+
+ "VALUES (?,1) ON CONFLICT (group_id) DO UPDATE SET max_index = %s.max_index+1 RETURNING max_index",
+ tableName, tableName), groupID)
+ if err != nil {
+ return 0, err
+ }
+ if len(res) == 0 {
+ return 0, ErrGetResourceIndexFailed
+ }
+ return strconv.ParseInt(string(res[0]["max_index"]), 10, 64)
+}
+
+func mysqlGetNextResourceIndex(ctx context.Context, tableName string, groupID int64) (int64, error) {
+ if _, err := GetEngine(ctx).Exec(fmt.Sprintf("INSERT INTO %s (group_id, max_index) "+
+ "VALUES (?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1",
+ tableName), groupID); err != nil {
+ return 0, err
+ }
+
+ var idx int64
+ _, err := GetEngine(ctx).SQL(fmt.Sprintf("SELECT max_index FROM %s WHERE group_id = ?", tableName), groupID).Get(&idx)
+ if err != nil {
+ return 0, err
+ }
+ if idx == 0 {
+ return 0, errors.New("cannot get the correct index")
+ }
+ return idx, nil
+}
+
+// GetNextResourceIndex generates a resource index, it must run in the same transaction where the resource is created
+func GetNextResourceIndex(ctx context.Context, tableName string, groupID int64) (int64, error) {
+ switch {
+ case setting.Database.Type.IsPostgreSQL():
+ return postgresGetNextResourceIndex(ctx, tableName, groupID)
+ case setting.Database.Type.IsMySQL():
+ return mysqlGetNextResourceIndex(ctx, tableName, groupID)
+ }
+
+ e := GetEngine(ctx)
+
+ // try to update the max_index to next value, and acquire the write-lock for the record
+ res, err := e.Exec(fmt.Sprintf("UPDATE %s SET max_index=max_index+1 WHERE group_id=?", tableName), groupID)
+ if err != nil {
+ return 0, err
+ }
+ affected, err := res.RowsAffected()
+ if err != nil {
+ return 0, err
+ }
+ if affected == 0 {
+ // this slow path is only for the first time of creating a resource index
+ _, errIns := e.Exec(fmt.Sprintf("INSERT INTO %s (group_id, max_index) VALUES (?, 0)", tableName), groupID)
+ res, err = e.Exec(fmt.Sprintf("UPDATE %s SET max_index=max_index+1 WHERE group_id=?", tableName), groupID)
+ if err != nil {
+ return 0, err
+ }
+ affected, err = res.RowsAffected()
+ if err != nil {
+ return 0, err
+ }
+ // if the update still can not update any records, the record must not exist and there must be some errors (insert error)
+ if affected == 0 {
+ if errIns == nil {
+ return 0, errors.New("impossible error when GetNextResourceIndex, insert and update both succeeded but no record is updated")
+ }
+ return 0, errIns
+ }
+ }
+
+ // now, the new index is in database (protected by the transaction and write-lock)
+ var newIdx int64
+ has, err := e.SQL(fmt.Sprintf("SELECT max_index FROM %s WHERE group_id=?", tableName), groupID).Get(&newIdx)
+ if err != nil {
+ return 0, err
+ }
+ if !has {
+ return 0, errors.New("impossible error when GetNextResourceIndex, upsert succeeded but no record can be selected")
+ }
+ return newIdx, nil
+}
+
+// DeleteResourceIndex delete resource index
+func DeleteResourceIndex(ctx context.Context, tableName string, groupID int64) error {
+ _, err := Exec(ctx, fmt.Sprintf("DELETE FROM %s WHERE group_id=?", tableName), groupID)
+ return err
+}
diff --git a/models/db/index_test.go b/models/db/index_test.go
new file mode 100644
index 0000000..11fbc70
--- /dev/null
+++ b/models/db/index_test.go
@@ -0,0 +1,127 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type TestIndex db.ResourceIndex
+
+func getCurrentResourceIndex(ctx context.Context, tableName string, groupID int64) (int64, error) {
+ e := db.GetEngine(ctx)
+ var idx int64
+ has, err := e.SQL(fmt.Sprintf("SELECT max_index FROM %s WHERE group_id=?", tableName), groupID).Get(&idx)
+ if err != nil {
+ return 0, err
+ }
+ if !has {
+ return 0, errors.New("no record")
+ }
+ return idx, nil
+}
+
+func TestSyncMaxResourceIndex(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ xe := unittest.GetXORMEngine()
+ require.NoError(t, xe.Sync(&TestIndex{}))
+
+ err := db.SyncMaxResourceIndex(db.DefaultContext, "test_index", 10, 51)
+ require.NoError(t, err)
+
+ // sync new max index
+ maxIndex, err := getCurrentResourceIndex(db.DefaultContext, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 51, maxIndex)
+
+ // smaller index doesn't change
+ err = db.SyncMaxResourceIndex(db.DefaultContext, "test_index", 10, 30)
+ require.NoError(t, err)
+ maxIndex, err = getCurrentResourceIndex(db.DefaultContext, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 51, maxIndex)
+
+ // larger index changes
+ err = db.SyncMaxResourceIndex(db.DefaultContext, "test_index", 10, 62)
+ require.NoError(t, err)
+ maxIndex, err = getCurrentResourceIndex(db.DefaultContext, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 62, maxIndex)
+
+ // commit transaction
+ err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ err = db.SyncMaxResourceIndex(ctx, "test_index", 10, 73)
+ require.NoError(t, err)
+ maxIndex, err = getCurrentResourceIndex(ctx, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 73, maxIndex)
+ return nil
+ })
+ require.NoError(t, err)
+ maxIndex, err = getCurrentResourceIndex(db.DefaultContext, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 73, maxIndex)
+
+ // rollback transaction
+ err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ err = db.SyncMaxResourceIndex(ctx, "test_index", 10, 84)
+ maxIndex, err = getCurrentResourceIndex(ctx, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 84, maxIndex)
+ return errors.New("test rollback")
+ })
+ require.Error(t, err)
+ maxIndex, err = getCurrentResourceIndex(db.DefaultContext, "test_index", 10)
+ require.NoError(t, err)
+ assert.EqualValues(t, 73, maxIndex) // the max index doesn't change because the transaction was rolled back
+}
+
+func TestGetNextResourceIndex(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ xe := unittest.GetXORMEngine()
+ require.NoError(t, xe.Sync(&TestIndex{}))
+
+ // create a new record
+ maxIndex, err := db.GetNextResourceIndex(db.DefaultContext, "test_index", 20)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, maxIndex)
+
+ // increase the existing record
+ maxIndex, err = db.GetNextResourceIndex(db.DefaultContext, "test_index", 20)
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, maxIndex)
+
+ // commit transaction
+ err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ maxIndex, err = db.GetNextResourceIndex(ctx, "test_index", 20)
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, maxIndex)
+ return nil
+ })
+ require.NoError(t, err)
+ maxIndex, err = getCurrentResourceIndex(db.DefaultContext, "test_index", 20)
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, maxIndex)
+
+ // rollback transaction
+ err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
+ maxIndex, err = db.GetNextResourceIndex(ctx, "test_index", 20)
+ require.NoError(t, err)
+ assert.EqualValues(t, 4, maxIndex)
+ return errors.New("test rollback")
+ })
+ require.Error(t, err)
+ maxIndex, err = getCurrentResourceIndex(db.DefaultContext, "test_index", 20)
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, maxIndex) // the max index doesn't change because the transaction was rolled back
+}
diff --git a/models/db/install/db.go b/models/db/install/db.go
new file mode 100644
index 0000000..d4c1139
--- /dev/null
+++ b/models/db/install/db.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package install
+
+import (
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func getXORMEngine() *xorm.Engine {
+ return db.DefaultContext.(*db.Context).Engine().(*xorm.Engine)
+}
+
+// CheckDatabaseConnection checks the database connection
+func CheckDatabaseConnection() error {
+ e := db.GetEngine(db.DefaultContext)
+ _, err := e.Exec("SELECT 1")
+ return err
+}
+
+// GetMigrationVersion gets the database migration version
+func GetMigrationVersion() (int64, error) {
+ var installedDbVersion int64
+ x := getXORMEngine()
+ exist, err := x.IsTableExist("version")
+ if err != nil {
+ return 0, err
+ }
+ if !exist {
+ return 0, nil
+ }
+ _, err = x.Table("version").Cols("version").Get(&installedDbVersion)
+ if err != nil {
+ return 0, err
+ }
+ return installedDbVersion, nil
+}
+
+// HasPostInstallationUsers checks whether there are users after installation
+func HasPostInstallationUsers() (bool, error) {
+ x := getXORMEngine()
+ exist, err := x.IsTableExist("user")
+ if err != nil {
+ return false, err
+ }
+ if !exist {
+ return false, nil
+ }
+
+ // if there are 2 or more users in database, we consider there are users created after installation
+ threshold := 2
+ if !setting.IsProd {
+ // to debug easily, with non-prod RUN_MODE, we only check the count to 1
+ threshold = 1
+ }
+ res, err := x.Table("user").Cols("id").Limit(threshold).Query()
+ if err != nil {
+ return false, err
+ }
+ return len(res) >= threshold, nil
+}
diff --git a/models/db/iterate.go b/models/db/iterate.go
new file mode 100644
index 0000000..e1caefa
--- /dev/null
+++ b/models/db/iterate.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/builder"
+)
+
+// Iterate iterate all the Bean object
+func Iterate[Bean any](ctx context.Context, cond builder.Cond, f func(ctx context.Context, bean *Bean) error) error {
+ var start int
+ batchSize := setting.Database.IterateBufferSize
+ sess := GetEngine(ctx)
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ beans := make([]*Bean, 0, batchSize)
+ if cond != nil {
+ sess = sess.Where(cond)
+ }
+ if err := sess.Limit(batchSize, start).Find(&beans); err != nil {
+ return err
+ }
+ if len(beans) == 0 {
+ return nil
+ }
+ start += len(beans)
+
+ for _, bean := range beans {
+ if err := f(ctx, bean); err != nil {
+ return err
+ }
+ }
+ }
+ }
+}
diff --git a/models/db/iterate_test.go b/models/db/iterate_test.go
new file mode 100644
index 0000000..7535d01
--- /dev/null
+++ b/models/db/iterate_test.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db_test
+
+import (
+ "context"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIterate(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ xe := unittest.GetXORMEngine()
+ require.NoError(t, xe.Sync(&repo_model.RepoUnit{}))
+
+ cnt, err := db.GetEngine(db.DefaultContext).Count(&repo_model.RepoUnit{})
+ require.NoError(t, err)
+
+ var repoUnitCnt int
+ err = db.Iterate(db.DefaultContext, nil, func(ctx context.Context, repo *repo_model.RepoUnit) error {
+ repoUnitCnt++
+ return nil
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, cnt, repoUnitCnt)
+
+ err = db.Iterate(db.DefaultContext, nil, func(ctx context.Context, repoUnit *repo_model.RepoUnit) error {
+ has, err := db.ExistByID[repo_model.RepoUnit](ctx, repoUnit.ID)
+ if err != nil {
+ return err
+ }
+ if !has {
+ return db.ErrNotExist{Resource: "repo_unit", ID: repoUnit.ID}
+ }
+ return nil
+ })
+ require.NoError(t, err)
+}
diff --git a/models/db/list.go b/models/db/list.go
new file mode 100644
index 0000000..5c005a0
--- /dev/null
+++ b/models/db/list.go
@@ -0,0 +1,215 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+const (
+ // DefaultMaxInSize represents default variables number on IN () in SQL
+ DefaultMaxInSize = 50
+ defaultFindSliceSize = 10
+)
+
+// Paginator is the base for different ListOptions types
+type Paginator interface {
+ GetSkipTake() (skip, take int)
+ IsListAll() bool
+}
+
+// SetSessionPagination sets pagination for a database session
+func SetSessionPagination(sess Engine, p Paginator) *xorm.Session {
+ skip, take := p.GetSkipTake()
+
+ return sess.Limit(take, skip)
+}
+
+// ListOptions options to paginate results
+type ListOptions struct {
+ PageSize int
+ Page int // start from 1
+ ListAll bool // if true, then PageSize and Page will not be taken
+}
+
+var ListOptionsAll = ListOptions{ListAll: true}
+
+var (
+ _ Paginator = &ListOptions{}
+ _ FindOptions = ListOptions{}
+)
+
+// GetSkipTake returns the skip and take values
+func (opts *ListOptions) GetSkipTake() (skip, take int) {
+ opts.SetDefaultValues()
+ return (opts.Page - 1) * opts.PageSize, opts.PageSize
+}
+
+func (opts ListOptions) GetPage() int {
+ return opts.Page
+}
+
+func (opts ListOptions) GetPageSize() int {
+ return opts.PageSize
+}
+
+// IsListAll indicates PageSize and Page will be ignored
+func (opts ListOptions) IsListAll() bool {
+ return opts.ListAll
+}
+
+// SetDefaultValues sets default values
+func (opts *ListOptions) SetDefaultValues() {
+ if opts.PageSize <= 0 {
+ opts.PageSize = setting.API.DefaultPagingNum
+ }
+ if opts.PageSize > setting.API.MaxResponseItems {
+ opts.PageSize = setting.API.MaxResponseItems
+ }
+ if opts.Page <= 0 {
+ opts.Page = 1
+ }
+}
+
+func (opts ListOptions) ToConds() builder.Cond {
+ return builder.NewCond()
+}
+
+// AbsoluteListOptions absolute options to paginate results
+type AbsoluteListOptions struct {
+ skip int
+ take int
+}
+
+var _ Paginator = &AbsoluteListOptions{}
+
+// NewAbsoluteListOptions creates a list option with applied limits
+func NewAbsoluteListOptions(skip, take int) *AbsoluteListOptions {
+ if skip < 0 {
+ skip = 0
+ }
+ if take <= 0 {
+ take = setting.API.DefaultPagingNum
+ }
+ if take > setting.API.MaxResponseItems {
+ take = setting.API.MaxResponseItems
+ }
+ return &AbsoluteListOptions{skip, take}
+}
+
+// IsListAll will always return false
+func (opts *AbsoluteListOptions) IsListAll() bool {
+ return false
+}
+
+// GetSkipTake returns the skip and take values
+func (opts *AbsoluteListOptions) GetSkipTake() (skip, take int) {
+ return opts.skip, opts.take
+}
+
+// FindOptions represents a find options
+type FindOptions interface {
+ GetPage() int
+ GetPageSize() int
+ IsListAll() bool
+ ToConds() builder.Cond
+}
+
+type JoinFunc func(sess Engine) error
+
+type FindOptionsJoin interface {
+ ToJoins() []JoinFunc
+}
+
+type FindOptionsOrder interface {
+ ToOrders() string
+}
+
+// Find represents a common find function which accept an options interface
+func Find[T any](ctx context.Context, opts FindOptions) ([]*T, error) {
+ sess := GetEngine(ctx).Where(opts.ToConds())
+
+ if joinOpt, ok := opts.(FindOptionsJoin); ok {
+ for _, joinFunc := range joinOpt.ToJoins() {
+ if err := joinFunc(sess); err != nil {
+ return nil, err
+ }
+ }
+ }
+ if orderOpt, ok := opts.(FindOptionsOrder); ok {
+ if order := orderOpt.ToOrders(); order != "" {
+ sess.OrderBy(order)
+ }
+ }
+
+ page, pageSize := opts.GetPage(), opts.GetPageSize()
+ if !opts.IsListAll() && pageSize > 0 {
+ if page == 0 {
+ page = 1
+ }
+ sess.Limit(pageSize, (page-1)*pageSize)
+ }
+
+ findPageSize := defaultFindSliceSize
+ if pageSize > 0 {
+ findPageSize = pageSize
+ }
+ objects := make([]*T, 0, findPageSize)
+ if err := sess.Find(&objects); err != nil {
+ return nil, err
+ }
+ return objects, nil
+}
+
+// Count represents a common count function which accept an options interface
+func Count[T any](ctx context.Context, opts FindOptions) (int64, error) {
+ sess := GetEngine(ctx).Where(opts.ToConds())
+ if joinOpt, ok := opts.(FindOptionsJoin); ok {
+ for _, joinFunc := range joinOpt.ToJoins() {
+ if err := joinFunc(sess); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ var object T
+ return sess.Count(&object)
+}
+
+// FindAndCount represents a common findandcount function which accept an options interface
+func FindAndCount[T any](ctx context.Context, opts FindOptions) ([]*T, int64, error) {
+ sess := GetEngine(ctx).Where(opts.ToConds())
+ page, pageSize := opts.GetPage(), opts.GetPageSize()
+ if !opts.IsListAll() && pageSize > 0 && page >= 1 {
+ sess.Limit(pageSize, (page-1)*pageSize)
+ }
+ if joinOpt, ok := opts.(FindOptionsJoin); ok {
+ for _, joinFunc := range joinOpt.ToJoins() {
+ if err := joinFunc(sess); err != nil {
+ return nil, 0, err
+ }
+ }
+ }
+ if orderOpt, ok := opts.(FindOptionsOrder); ok {
+ if order := orderOpt.ToOrders(); order != "" {
+ sess.OrderBy(order)
+ }
+ }
+
+ findPageSize := defaultFindSliceSize
+ if pageSize > 0 {
+ findPageSize = pageSize
+ }
+ objects := make([]*T, 0, findPageSize)
+ cnt, err := sess.FindAndCount(&objects)
+ if err != nil {
+ return nil, 0, err
+ }
+ return objects, cnt, nil
+}
diff --git a/models/db/list_test.go b/models/db/list_test.go
new file mode 100644
index 0000000..82240d2
--- /dev/null
+++ b/models/db/list_test.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/builder"
+)
+
+type mockListOptions struct {
+ db.ListOptions
+}
+
+func (opts mockListOptions) IsListAll() bool {
+ return true
+}
+
+func (opts mockListOptions) ToConds() builder.Cond {
+ return builder.NewCond()
+}
+
+func TestFind(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ xe := unittest.GetXORMEngine()
+ require.NoError(t, xe.Sync(&repo_model.RepoUnit{}))
+
+ var repoUnitCount int
+ _, err := db.GetEngine(db.DefaultContext).SQL("SELECT COUNT(*) FROM repo_unit").Get(&repoUnitCount)
+ require.NoError(t, err)
+ assert.NotEmpty(t, repoUnitCount)
+
+ opts := mockListOptions{}
+ repoUnits, err := db.Find[repo_model.RepoUnit](db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.Len(t, repoUnits, repoUnitCount)
+
+ cnt, err := db.Count[repo_model.RepoUnit](db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.EqualValues(t, repoUnitCount, cnt)
+
+ repoUnits, newCnt, err := db.FindAndCount[repo_model.RepoUnit](db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.EqualValues(t, cnt, newCnt)
+ assert.Len(t, repoUnits, repoUnitCount)
+}
diff --git a/models/db/log.go b/models/db/log.go
new file mode 100644
index 0000000..457ee80
--- /dev/null
+++ b/models/db/log.go
@@ -0,0 +1,107 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "code.gitea.io/gitea/modules/log"
+
+ xormlog "xorm.io/xorm/log"
+)
+
+// XORMLogBridge a logger bridge from Logger to xorm
+type XORMLogBridge struct {
+ showSQL atomic.Bool
+ logger log.Logger
+}
+
+// NewXORMLogger inits a log bridge for xorm
+func NewXORMLogger(showSQL bool) xormlog.Logger {
+ l := &XORMLogBridge{logger: log.GetLogger("xorm")}
+ l.showSQL.Store(showSQL)
+ return l
+}
+
+const stackLevel = 8
+
+// Log a message with defined skip and at logging level
+func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...any) {
+ l.logger.Log(skip+1, level, format, v...)
+}
+
+// Debug show debug log
+func (l *XORMLogBridge) Debug(v ...any) {
+ l.Log(stackLevel, log.DEBUG, "%s", fmt.Sprint(v...))
+}
+
+// Debugf show debug log
+func (l *XORMLogBridge) Debugf(format string, v ...any) {
+ l.Log(stackLevel, log.DEBUG, format, v...)
+}
+
+// Error show error log
+func (l *XORMLogBridge) Error(v ...any) {
+ l.Log(stackLevel, log.ERROR, "%s", fmt.Sprint(v...))
+}
+
+// Errorf show error log
+func (l *XORMLogBridge) Errorf(format string, v ...any) {
+ l.Log(stackLevel, log.ERROR, format, v...)
+}
+
+// Info show information level log
+func (l *XORMLogBridge) Info(v ...any) {
+ l.Log(stackLevel, log.INFO, "%s", fmt.Sprint(v...))
+}
+
+// Infof show information level log
+func (l *XORMLogBridge) Infof(format string, v ...any) {
+ l.Log(stackLevel, log.INFO, format, v...)
+}
+
+// Warn show warning log
+func (l *XORMLogBridge) Warn(v ...any) {
+ l.Log(stackLevel, log.WARN, "%s", fmt.Sprint(v...))
+}
+
+// Warnf show warning log
+func (l *XORMLogBridge) Warnf(format string, v ...any) {
+ l.Log(stackLevel, log.WARN, format, v...)
+}
+
+// Level get logger level
+func (l *XORMLogBridge) Level() xormlog.LogLevel {
+ switch l.logger.GetLevel() {
+ case log.TRACE, log.DEBUG:
+ return xormlog.LOG_DEBUG
+ case log.INFO:
+ return xormlog.LOG_INFO
+ case log.WARN:
+ return xormlog.LOG_WARNING
+ case log.ERROR:
+ return xormlog.LOG_ERR
+ case log.NONE:
+ return xormlog.LOG_OFF
+ }
+ return xormlog.LOG_UNKNOWN
+}
+
+// SetLevel set the logger level
+func (l *XORMLogBridge) SetLevel(lvl xormlog.LogLevel) {
+}
+
+// ShowSQL set if record SQL
+func (l *XORMLogBridge) ShowSQL(show ...bool) {
+ if len(show) == 0 {
+ show = []bool{true}
+ }
+ l.showSQL.Store(show[0])
+}
+
+// IsShowSQL if record SQL
+func (l *XORMLogBridge) IsShowSQL() bool {
+ return l.showSQL.Load()
+}
diff --git a/models/db/main_test.go b/models/db/main_test.go
new file mode 100644
index 0000000..7d80b40
--- /dev/null
+++ b/models/db/main_test.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/repo"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/db/name.go b/models/db/name.go
new file mode 100644
index 0000000..51be33a
--- /dev/null
+++ b/models/db/name.go
@@ -0,0 +1,106 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+
+ "code.gitea.io/gitea/modules/util"
+)
+
+var (
+ // ErrNameEmpty name is empty error
+ ErrNameEmpty = util.SilentWrap{Message: "name is empty", Err: util.ErrInvalidArgument}
+
+ // AlphaDashDotPattern characters prohibited in a user name (anything except A-Za-z0-9_.-)
+ AlphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`)
+)
+
+// ErrNameReserved represents a "reserved name" error.
+type ErrNameReserved struct {
+ Name string
+}
+
+// IsErrNameReserved checks if an error is a ErrNameReserved.
+func IsErrNameReserved(err error) bool {
+ _, ok := err.(ErrNameReserved)
+ return ok
+}
+
+func (err ErrNameReserved) Error() string {
+ return fmt.Sprintf("name is reserved [name: %s]", err.Name)
+}
+
+// Unwrap unwraps this as a ErrInvalid err
+func (err ErrNameReserved) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrNamePatternNotAllowed represents a "pattern not allowed" error.
+type ErrNamePatternNotAllowed struct {
+ Pattern string
+}
+
+// IsErrNamePatternNotAllowed checks if an error is an ErrNamePatternNotAllowed.
+func IsErrNamePatternNotAllowed(err error) bool {
+ _, ok := err.(ErrNamePatternNotAllowed)
+ return ok
+}
+
+func (err ErrNamePatternNotAllowed) Error() string {
+ return fmt.Sprintf("name pattern is not allowed [pattern: %s]", err.Pattern)
+}
+
+// Unwrap unwraps this as a ErrInvalid err
+func (err ErrNamePatternNotAllowed) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrNameCharsNotAllowed represents a "character not allowed in name" error.
+type ErrNameCharsNotAllowed struct {
+ Name string
+}
+
+// IsErrNameCharsNotAllowed checks if an error is an ErrNameCharsNotAllowed.
+func IsErrNameCharsNotAllowed(err error) bool {
+ _, ok := err.(ErrNameCharsNotAllowed)
+ return ok
+}
+
+func (err ErrNameCharsNotAllowed) Error() string {
+ return fmt.Sprintf("name is invalid [%s]: must be valid alpha or numeric or dash(-_) or dot characters", err.Name)
+}
+
+// Unwrap unwraps this as a ErrInvalid err
+func (err ErrNameCharsNotAllowed) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// IsUsableName checks if name is reserved or pattern of name is not allowed
+// based on given reserved names and patterns.
+// Names are exact match, patterns can be prefix or suffix match with placeholder '*'.
+func IsUsableName(names, patterns []string, name string) error {
+ name = strings.TrimSpace(strings.ToLower(name))
+ if utf8.RuneCountInString(name) == 0 {
+ return ErrNameEmpty
+ }
+
+ for i := range names {
+ if name == names[i] {
+ return ErrNameReserved{name}
+ }
+ }
+
+ for _, pat := range patterns {
+ if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) ||
+ (pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) {
+ return ErrNamePatternNotAllowed{pat}
+ }
+ }
+
+ return nil
+}
diff --git a/models/db/paginator/main_test.go b/models/db/paginator/main_test.go
new file mode 100644
index 0000000..47993ae
--- /dev/null
+++ b/models/db/paginator/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package paginator
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/db/paginator/paginator.go b/models/db/paginator/paginator.go
new file mode 100644
index 0000000..bcda47d
--- /dev/null
+++ b/models/db/paginator/paginator.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package paginator
+
+// dummy only. in the future, the models/db/list_options.go should be moved here to decouple from db package
+// otherwise the unit test will cause cycle import
diff --git a/models/db/paginator/paginator_test.go b/models/db/paginator/paginator_test.go
new file mode 100644
index 0000000..2060221
--- /dev/null
+++ b/models/db/paginator/paginator_test.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package paginator
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPaginator(t *testing.T) {
+ cases := []struct {
+ db.Paginator
+ Skip int
+ Take int
+ Start int
+ End int
+ }{
+ {
+ Paginator: &db.ListOptions{Page: -1, PageSize: -1},
+ Skip: 0,
+ Take: setting.API.DefaultPagingNum,
+ Start: 0,
+ End: setting.API.DefaultPagingNum,
+ },
+ {
+ Paginator: &db.ListOptions{Page: 2, PageSize: 10},
+ Skip: 10,
+ Take: 10,
+ Start: 10,
+ End: 20,
+ },
+ {
+ Paginator: db.NewAbsoluteListOptions(-1, -1),
+ Skip: 0,
+ Take: setting.API.DefaultPagingNum,
+ Start: 0,
+ End: setting.API.DefaultPagingNum,
+ },
+ {
+ Paginator: db.NewAbsoluteListOptions(2, 10),
+ Skip: 2,
+ Take: 10,
+ Start: 2,
+ End: 12,
+ },
+ }
+
+ for _, c := range cases {
+ skip, take := c.Paginator.GetSkipTake()
+
+ assert.Equal(t, c.Skip, skip)
+ assert.Equal(t, c.Take, take)
+ }
+}
diff --git a/models/db/search.go b/models/db/search.go
new file mode 100644
index 0000000..37565f4
--- /dev/null
+++ b/models/db/search.go
@@ -0,0 +1,33 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+// SearchOrderBy is used to sort the result
+type SearchOrderBy string
+
+func (s SearchOrderBy) String() string {
+ return string(s)
+}
+
+// Strings for sorting result
+const (
+ SearchOrderByAlphabetically SearchOrderBy = "name ASC"
+ SearchOrderByAlphabeticallyReverse SearchOrderBy = "name DESC"
+ SearchOrderByLeastUpdated SearchOrderBy = "updated_unix ASC"
+ SearchOrderByRecentUpdated SearchOrderBy = "updated_unix DESC"
+ SearchOrderByOldest SearchOrderBy = "created_unix ASC"
+ SearchOrderByNewest SearchOrderBy = "created_unix DESC"
+ SearchOrderByID SearchOrderBy = "id ASC"
+ SearchOrderByIDReverse SearchOrderBy = "id DESC"
+ SearchOrderByStars SearchOrderBy = "num_stars ASC"
+ SearchOrderByStarsReverse SearchOrderBy = "num_stars DESC"
+ SearchOrderByForks SearchOrderBy = "num_forks ASC"
+ SearchOrderByForksReverse SearchOrderBy = "num_forks DESC"
+)
+
+const (
+ // Which means a condition to filter the records which don't match any id.
+ // It's different from zero which means the condition could be ignored.
+ NoConditionID = -1
+)
diff --git a/models/db/sequence.go b/models/db/sequence.go
new file mode 100644
index 0000000..f49ad93
--- /dev/null
+++ b/models/db/sequence.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// CountBadSequences looks for broken sequences from recreate-table mistakes
+func CountBadSequences(_ context.Context) (int64, error) {
+ if !setting.Database.Type.IsPostgreSQL() {
+ return 0, nil
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ var sequences []string
+ schema := x.Dialect().URI().Schema
+
+ sess.Engine().SetSchema("")
+ if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__%_id_seq%' AND sequence_catalog = ?", setting.Database.Name).Find(&sequences); err != nil {
+ return 0, err
+ }
+ sess.Engine().SetSchema(schema)
+
+ return int64(len(sequences)), nil
+}
+
+// FixBadSequences fixes for broken sequences from recreate-table mistakes
+func FixBadSequences(_ context.Context) error {
+ if !setting.Database.Type.IsPostgreSQL() {
+ return nil
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ var sequences []string
+ schema := sess.Engine().Dialect().URI().Schema
+
+ sess.Engine().SetSchema("")
+ if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__%_id_seq%' AND sequence_catalog = ?", setting.Database.Name).Find(&sequences); err != nil {
+ return err
+ }
+ sess.Engine().SetSchema(schema)
+
+ sequenceRegexp := regexp.MustCompile(`tmp_recreate__(\w+)_id_seq.*`)
+
+ for _, sequence := range sequences {
+ tableName := sequenceRegexp.FindStringSubmatch(sequence)[1]
+ newSequenceName := tableName + "_id_seq"
+ if _, err := sess.Exec(fmt.Sprintf("ALTER SEQUENCE `%s` RENAME TO `%s`", sequence, newSequenceName)); err != nil {
+ return err
+ }
+ if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM `%s`), 1), false)", newSequenceName, tableName)); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/db/sql_postgres_with_schema.go b/models/db/sql_postgres_with_schema.go
new file mode 100644
index 0000000..ec63447
--- /dev/null
+++ b/models/db/sql_postgres_with_schema.go
@@ -0,0 +1,74 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package db
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "sync"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/lib/pq"
+ "xorm.io/xorm/dialects"
+)
+
+var registerOnce sync.Once
+
+func registerPostgresSchemaDriver() {
+ registerOnce.Do(func() {
+ sql.Register("postgresschema", &postgresSchemaDriver{})
+ dialects.RegisterDriver("postgresschema", dialects.QueryDriver("postgres"))
+ })
+}
+
+type postgresSchemaDriver struct {
+ pq.Driver
+}
+
+// Open opens a new connection to the database. name is a connection string.
+// This function opens the postgres connection in the default manner but immediately
+// runs set_config to set the search_path appropriately
+func (d *postgresSchemaDriver) Open(name string) (driver.Conn, error) {
+ conn, err := d.Driver.Open(name)
+ if err != nil {
+ return conn, err
+ }
+ schemaValue, _ := driver.String.ConvertValue(setting.Database.Schema)
+
+ // golangci lint is incorrect here - there is no benefit to using driver.ExecerContext here
+ // and in any case pq does not implement it
+ if execer, ok := conn.(driver.Execer); ok { //nolint
+ _, err := execer.Exec(`SELECT set_config(
+ 'search_path',
+ $1 || ',' || current_setting('search_path'),
+ false)`, []driver.Value{schemaValue})
+ if err != nil {
+ _ = conn.Close()
+ return nil, err
+ }
+ return conn, nil
+ }
+
+ stmt, err := conn.Prepare(`SELECT set_config(
+ 'search_path',
+ $1 || ',' || current_setting('search_path'),
+ false)`)
+ if err != nil {
+ _ = conn.Close()
+ return nil, err
+ }
+ defer stmt.Close()
+
+ // driver.String.ConvertValue will never return err for string
+
+ // golangci lint is incorrect here - there is no benefit to using stmt.ExecWithContext here
+ _, err = stmt.Exec([]driver.Value{schemaValue}) //nolint
+ if err != nil {
+ _ = conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
diff --git a/models/dbfs/dbfile.go b/models/dbfs/dbfile.go
new file mode 100644
index 0000000..dd27b5c
--- /dev/null
+++ b/models/dbfs/dbfile.go
@@ -0,0 +1,368 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package dbfs
+
+import (
+ "context"
+ "errors"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+var defaultFileBlockSize int64 = 32 * 1024
+
+type File interface {
+ io.ReadWriteCloser
+ io.Seeker
+ fs.File
+}
+
+type file struct {
+ ctx context.Context
+ metaID int64
+ fullPath string
+ blockSize int64
+
+ allowRead bool
+ allowWrite bool
+ offset int64
+}
+
+var _ File = (*file)(nil)
+
+func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err error) {
+ if offset >= fileMeta.FileSize {
+ return 0, io.EOF
+ }
+
+ blobPos := int(offset % f.blockSize)
+ blobOffset := offset - int64(blobPos)
+ blobRemaining := int(f.blockSize) - blobPos
+ needRead := len(p)
+ if needRead > blobRemaining {
+ needRead = blobRemaining
+ }
+ if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize {
+ needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos))
+ }
+ if needRead <= 0 {
+ return 0, io.EOF
+ }
+ var fileData dbfsData
+ ok, err := db.GetEngine(f.ctx).Where("meta_id = ? AND blob_offset = ?", f.metaID, blobOffset).Get(&fileData)
+ if err != nil {
+ return 0, err
+ }
+ blobData := fileData.BlobData
+ if !ok {
+ blobData = nil
+ }
+
+ canCopy := len(blobData) - blobPos
+ if canCopy <= 0 {
+ canCopy = 0
+ }
+ realRead := needRead
+ if realRead > canCopy {
+ realRead = canCopy
+ }
+ if realRead > 0 {
+ copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead])
+ }
+ for i := realRead; i < needRead; i++ {
+ p[i] = 0
+ }
+ return needRead, nil
+}
+
+func (f *file) Read(p []byte) (n int, err error) {
+ if f.metaID == 0 || !f.allowRead {
+ return 0, os.ErrInvalid
+ }
+
+ fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
+ if err != nil {
+ return 0, err
+ }
+ n, err = f.readAt(fileMeta, f.offset, p)
+ f.offset += int64(n)
+ return n, err
+}
+
+func (f *file) Write(p []byte) (n int, err error) {
+ if f.metaID == 0 || !f.allowWrite {
+ return 0, os.ErrInvalid
+ }
+
+ fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
+ if err != nil {
+ return 0, err
+ }
+
+ needUpdateSize := false
+ written := 0
+ for len(p) > 0 {
+ blobPos := int(f.offset % f.blockSize)
+ blobOffset := f.offset - int64(blobPos)
+ blobRemaining := int(f.blockSize) - blobPos
+ needWrite := len(p)
+ if needWrite > blobRemaining {
+ needWrite = blobRemaining
+ }
+ buf := make([]byte, f.blockSize)
+ readBytes, err := f.readAt(fileMeta, blobOffset, buf)
+ if err != nil && !errors.Is(err, io.EOF) {
+ return written, err
+ }
+ copy(buf[blobPos:blobPos+needWrite], p[:needWrite])
+ if blobPos+needWrite > readBytes {
+ buf = buf[:blobPos+needWrite]
+ } else {
+ buf = buf[:readBytes]
+ }
+
+ fileData := dbfsData{
+ MetaID: fileMeta.ID,
+ BlobOffset: blobOffset,
+ BlobData: buf,
+ }
+ if res, err := db.GetEngine(f.ctx).Exec("UPDATE dbfs_data SET revision=revision+1, blob_data=? WHERE meta_id=? AND blob_offset=?", buf, fileMeta.ID, blobOffset); err != nil {
+ return written, err
+ } else if updated, err := res.RowsAffected(); err != nil {
+ return written, err
+ } else if updated == 0 {
+ if _, err = db.GetEngine(f.ctx).Insert(&fileData); err != nil {
+ return written, err
+ }
+ }
+ written += needWrite
+ f.offset += int64(needWrite)
+ if f.offset > fileMeta.FileSize {
+ fileMeta.FileSize = f.offset
+ needUpdateSize = true
+ }
+ p = p[needWrite:]
+ }
+
+ fileMetaUpdate := dbfsMeta{
+ ModifyTimestamp: timeToFileTimestamp(time.Now()),
+ }
+ if needUpdateSize {
+ fileMetaUpdate.FileSize = f.offset
+ }
+ if _, err := db.GetEngine(f.ctx).ID(fileMeta.ID).Update(fileMetaUpdate); err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+func (f *file) Seek(n int64, whence int) (int64, error) {
+ if f.metaID == 0 {
+ return 0, os.ErrInvalid
+ }
+
+ newOffset := f.offset
+ switch whence {
+ case io.SeekStart:
+ newOffset = n
+ case io.SeekCurrent:
+ newOffset += n
+ case io.SeekEnd:
+ size, err := f.size()
+ if err != nil {
+ return f.offset, err
+ }
+ newOffset = size + n
+ default:
+ return f.offset, os.ErrInvalid
+ }
+ if newOffset < 0 {
+ return f.offset, os.ErrInvalid
+ }
+ f.offset = newOffset
+ return newOffset, nil
+}
+
+func (f *file) Close() error {
+ return nil
+}
+
+func (f *file) Stat() (os.FileInfo, error) {
+ if f.metaID == 0 {
+ return nil, os.ErrInvalid
+ }
+
+ fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
+ if err != nil {
+ return nil, err
+ }
+ return fileMeta, nil
+}
+
+func timeToFileTimestamp(t time.Time) int64 {
+ return t.UnixMicro()
+}
+
+func fileTimestampToTime(timestamp int64) time.Time {
+ return time.UnixMicro(timestamp)
+}
+
+func (f *file) loadMetaByPath() error {
+ var fileMeta dbfsMeta
+ if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {
+ return err
+ } else if ok {
+ f.metaID = fileMeta.ID
+ f.blockSize = fileMeta.BlockSize
+ }
+ return nil
+}
+
+func (f *file) open(flag int) (err error) {
+ // see os.OpenFile for flag values
+ if flag&os.O_WRONLY != 0 {
+ f.allowWrite = true
+ } else if flag&os.O_RDWR != 0 {
+ f.allowRead = true
+ f.allowWrite = true
+ } else /* O_RDONLY */ {
+ f.allowRead = true
+ }
+
+ if f.allowWrite {
+ if flag&os.O_CREATE != 0 {
+ if flag&os.O_EXCL != 0 {
+ // file must not exist.
+ if f.metaID != 0 {
+ return os.ErrExist
+ }
+ } else {
+ // create a new file if none exists.
+ if f.metaID == 0 {
+ if err = f.createEmpty(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if flag&os.O_TRUNC != 0 {
+ if err = f.truncate(); err != nil {
+ return err
+ }
+ }
+ if flag&os.O_APPEND != 0 {
+ if _, err = f.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // read only mode
+ if f.metaID == 0 {
+ return os.ErrNotExist
+ }
+ return nil
+}
+
+func (f *file) createEmpty() error {
+ if f.metaID != 0 {
+ return os.ErrExist
+ }
+ now := time.Now()
+ _, err := db.GetEngine(f.ctx).Insert(&dbfsMeta{
+ FullPath: f.fullPath,
+ BlockSize: f.blockSize,
+ CreateTimestamp: timeToFileTimestamp(now),
+ ModifyTimestamp: timeToFileTimestamp(now),
+ })
+ if err != nil {
+ return err
+ }
+ return f.loadMetaByPath()
+}
+
+func (f *file) truncate() error {
+ if f.metaID == 0 {
+ return os.ErrNotExist
+ }
+ return db.WithTx(f.ctx, func(ctx context.Context) error {
+ if _, err := db.GetEngine(ctx).Exec("UPDATE dbfs_meta SET file_size = 0 WHERE id = ?", f.metaID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Delete(&dbfsData{MetaID: f.metaID}); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func (f *file) renameTo(newPath string) error {
+ if f.metaID == 0 {
+ return os.ErrNotExist
+ }
+ newPath = buildPath(newPath)
+ return db.WithTx(f.ctx, func(ctx context.Context) error {
+ if _, err := db.GetEngine(ctx).Exec("UPDATE dbfs_meta SET full_path = ? WHERE id = ?", newPath, f.metaID); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func (f *file) delete() error {
+ if f.metaID == 0 {
+ return os.ErrNotExist
+ }
+ return db.WithTx(f.ctx, func(ctx context.Context) error {
+ if _, err := db.GetEngine(ctx).Delete(&dbfsMeta{ID: f.metaID}); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Delete(&dbfsData{MetaID: f.metaID}); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func (f *file) size() (int64, error) {
+ if f.metaID == 0 {
+ return 0, os.ErrNotExist
+ }
+ fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
+ if err != nil {
+ return 0, err
+ }
+ return fileMeta.FileSize, nil
+}
+
+func findFileMetaByID(ctx context.Context, metaID int64) (*dbfsMeta, error) {
+ var fileMeta dbfsMeta
+ if ok, err := db.GetEngine(ctx).Where("id = ?", metaID).Get(&fileMeta); err != nil {
+ return nil, err
+ } else if ok {
+ return &fileMeta, nil
+ }
+ return nil, nil
+}
+
+func buildPath(path string) string {
+ path = filepath.Clean(path)
+ path = strings.ReplaceAll(path, "\\", "/")
+ path = strings.TrimPrefix(path, "/")
+ return strconv.Itoa(strings.Count(path, "/")) + ":" + path
+}
+
+func newDbFile(ctx context.Context, path string) (*file, error) {
+ path = buildPath(path)
+ f := &file{ctx: ctx, fullPath: path, blockSize: defaultFileBlockSize}
+ return f, f.loadMetaByPath()
+}
diff --git a/models/dbfs/dbfs.go b/models/dbfs/dbfs.go
new file mode 100644
index 0000000..f68b4a2
--- /dev/null
+++ b/models/dbfs/dbfs.go
@@ -0,0 +1,131 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package dbfs
+
+import (
+ "context"
+ "io/fs"
+ "os"
+ "path"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+/*
+The reasons behind the DBFS (database-filesystem) package:
+When a Gitea action is running, the Gitea action server should collect and store all the logs.
+
+The requirements are:
+* The running logs must be stored across the cluster if the Gitea servers are deployed as a cluster.
+* The logs will be archived to Object Storage (S3/MinIO, etc.) after a period of time.
+* The Gitea action UI should be able to render the running logs and the archived logs.
+
+Some possible solutions for the running logs:
+* [Not ideal] Using local temp file: it can not be shared across the cluster.
+* [Not ideal] Using shared file in the filesystem of git repository: although at the moment, the Gitea cluster's
+ git repositories must be stored in a shared filesystem, in the future, Gitea may need a dedicated Git Service Server
+ to decouple the shared filesystem. Then the action logs will become a blocker.
+* [Not ideal] Record the logs in a database table line by line: it has a couple of problems:
+ - It's difficult to make multiple increasing sequence (log line number) for different databases.
+ - The database table will have a lot of rows and be affected by the big-table performance problem.
+ - It's difficult to load logs by using the same interface as other storages.
+ - It's difficult to calculate the size of the logs.
+
+The DBFS solution:
+* It can be used in a cluster.
+* It can share the same interface (Read/Write/Seek) as other storages.
+* It's very friendly to database because it only needs to store much fewer rows than the log-line solution.
+* In the future, when Gitea action needs to limit the log size (other CI/CD services also do so), it's easier to calculate the log file size.
+* Even sometimes the UI needs to render the tailing lines, the tailing lines can be found be counting the "\n" from the end of the file by seek.
+ The seeking and finding is not the fastest way, but it's still acceptable and won't affect the performance too much.
+*/
+
+type dbfsMeta struct {
+ ID int64 `xorm:"pk autoincr"`
+ FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
+ BlockSize int64 `xorm:"BIGINT NOT NULL"`
+ FileSize int64 `xorm:"BIGINT NOT NULL"`
+ CreateTimestamp int64 `xorm:"BIGINT NOT NULL"`
+ ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
+}
+
+type dbfsData struct {
+ ID int64 `xorm:"pk autoincr"`
+ Revision int64 `xorm:"BIGINT NOT NULL"`
+ MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
+ BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
+ BlobSize int64 `xorm:"BIGINT NOT NULL"`
+ BlobData []byte `xorm:"BLOB NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(dbfsMeta))
+ db.RegisterModel(new(dbfsData))
+}
+
+func OpenFile(ctx context.Context, name string, flag int) (File, error) {
+ f, err := newDbFile(ctx, name)
+ if err != nil {
+ return nil, err
+ }
+ err = f.open(flag)
+ if err != nil {
+ _ = f.Close()
+ return nil, err
+ }
+ return f, nil
+}
+
+func Open(ctx context.Context, name string) (File, error) {
+ return OpenFile(ctx, name, os.O_RDONLY)
+}
+
+func Create(ctx context.Context, name string) (File, error) {
+ return OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
+}
+
+func Rename(ctx context.Context, oldPath, newPath string) error {
+ f, err := newDbFile(ctx, oldPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return f.renameTo(newPath)
+}
+
+func Remove(ctx context.Context, name string) error {
+ f, err := newDbFile(ctx, name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return f.delete()
+}
+
+var _ fs.FileInfo = (*dbfsMeta)(nil)
+
+func (m *dbfsMeta) Name() string {
+ return path.Base(m.FullPath)
+}
+
+func (m *dbfsMeta) Size() int64 {
+ return m.FileSize
+}
+
+func (m *dbfsMeta) Mode() fs.FileMode {
+ return os.ModePerm
+}
+
+func (m *dbfsMeta) ModTime() time.Time {
+ return fileTimestampToTime(m.ModifyTimestamp)
+}
+
+func (m *dbfsMeta) IsDir() bool {
+ return false
+}
+
+func (m *dbfsMeta) Sys() any {
+ return nil
+}
diff --git a/models/dbfs/dbfs_test.go b/models/dbfs/dbfs_test.go
new file mode 100644
index 0000000..3ad273a
--- /dev/null
+++ b/models/dbfs/dbfs_test.go
@@ -0,0 +1,191 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package dbfs
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func changeDefaultFileBlockSize(n int64) (restore func()) {
+ old := defaultFileBlockSize
+ defaultFileBlockSize = n
+ return func() {
+ defaultFileBlockSize = old
+ }
+}
+
+func TestDbfsBasic(t *testing.T) {
+ defer changeDefaultFileBlockSize(4)()
+
+ // test basic write/read
+ f, err := OpenFile(db.DefaultContext, "test.txt", os.O_RDWR|os.O_CREATE)
+ require.NoError(t, err)
+
+ n, err := f.Write([]byte("0123456789")) // blocks: 0123 4567 89
+ require.NoError(t, err)
+ assert.EqualValues(t, 10, n)
+
+ _, err = f.Seek(0, io.SeekStart)
+ require.NoError(t, err)
+
+ buf, err := io.ReadAll(f)
+ require.NoError(t, err)
+ assert.EqualValues(t, 10, n)
+ assert.EqualValues(t, "0123456789", string(buf))
+
+ // write some new data
+ _, err = f.Seek(1, io.SeekStart)
+ require.NoError(t, err)
+ _, err = f.Write([]byte("bcdefghi")) // blocks: 0bcd efgh i9
+ require.NoError(t, err)
+
+ // read from offset
+ buf, err = io.ReadAll(f)
+ require.NoError(t, err)
+ assert.EqualValues(t, "9", string(buf))
+
+ // read all
+ _, err = f.Seek(0, io.SeekStart)
+ require.NoError(t, err)
+ buf, err = io.ReadAll(f)
+ require.NoError(t, err)
+ assert.EqualValues(t, "0bcdefghi9", string(buf))
+
+ // write to new size
+ _, err = f.Seek(-1, io.SeekEnd)
+ require.NoError(t, err)
+ _, err = f.Write([]byte("JKLMNOP")) // blocks: 0bcd efgh iJKL MNOP
+ require.NoError(t, err)
+ _, err = f.Seek(0, io.SeekStart)
+ require.NoError(t, err)
+ buf, err = io.ReadAll(f)
+ require.NoError(t, err)
+ assert.EqualValues(t, "0bcdefghiJKLMNOP", string(buf))
+
+ // write beyond EOF and fill with zero
+ _, err = f.Seek(5, io.SeekCurrent)
+ require.NoError(t, err)
+ _, err = f.Write([]byte("xyzu")) // blocks: 0bcd efgh iJKL MNOP 0000 0xyz u
+ require.NoError(t, err)
+ _, err = f.Seek(0, io.SeekStart)
+ require.NoError(t, err)
+ buf, err = io.ReadAll(f)
+ require.NoError(t, err)
+ assert.EqualValues(t, "0bcdefghiJKLMNOP\x00\x00\x00\x00\x00xyzu", string(buf))
+
+ // write to the block with zeros
+ _, err = f.Seek(-6, io.SeekCurrent)
+ require.NoError(t, err)
+ _, err = f.Write([]byte("ABCD")) // blocks: 0bcd efgh iJKL MNOP 000A BCDz u
+ require.NoError(t, err)
+ _, err = f.Seek(0, io.SeekStart)
+ require.NoError(t, err)
+ buf, err = io.ReadAll(f)
+ require.NoError(t, err)
+ assert.EqualValues(t, "0bcdefghiJKLMNOP\x00\x00\x00ABCDzu", string(buf))
+
+ require.NoError(t, f.Close())
+
+ // test rename
+ err = Rename(db.DefaultContext, "test.txt", "test2.txt")
+ require.NoError(t, err)
+
+ _, err = OpenFile(db.DefaultContext, "test.txt", os.O_RDONLY)
+ require.Error(t, err)
+
+ f, err = OpenFile(db.DefaultContext, "test2.txt", os.O_RDONLY)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ // test remove
+ err = Remove(db.DefaultContext, "test2.txt")
+ require.NoError(t, err)
+
+ _, err = OpenFile(db.DefaultContext, "test2.txt", os.O_RDONLY)
+ require.Error(t, err)
+
+ // test stat
+ f, err = OpenFile(db.DefaultContext, "test/test.txt", os.O_RDWR|os.O_CREATE)
+ require.NoError(t, err)
+ stat, err := f.Stat()
+ require.NoError(t, err)
+ assert.EqualValues(t, "test.txt", stat.Name())
+ assert.EqualValues(t, 0, stat.Size())
+ _, err = f.Write([]byte("0123456789"))
+ require.NoError(t, err)
+ stat, err = f.Stat()
+ require.NoError(t, err)
+ assert.EqualValues(t, 10, stat.Size())
+}
+
+func TestDbfsReadWrite(t *testing.T) {
+ defer changeDefaultFileBlockSize(4)()
+
+ f1, err := OpenFile(db.DefaultContext, "test.log", os.O_RDWR|os.O_CREATE)
+ require.NoError(t, err)
+ defer f1.Close()
+
+ f2, err := OpenFile(db.DefaultContext, "test.log", os.O_RDONLY)
+ require.NoError(t, err)
+ defer f2.Close()
+
+ _, err = f1.Write([]byte("line 1\n"))
+ require.NoError(t, err)
+
+ f2r := bufio.NewReader(f2)
+
+ line, err := f2r.ReadString('\n')
+ require.NoError(t, err)
+ assert.EqualValues(t, "line 1\n", line)
+ _, err = f2r.ReadString('\n')
+ require.ErrorIs(t, err, io.EOF)
+
+ _, err = f1.Write([]byte("line 2\n"))
+ require.NoError(t, err)
+
+ line, err = f2r.ReadString('\n')
+ require.NoError(t, err)
+ assert.EqualValues(t, "line 2\n", line)
+ _, err = f2r.ReadString('\n')
+ require.ErrorIs(t, err, io.EOF)
+}
+
+func TestDbfsSeekWrite(t *testing.T) {
+ defer changeDefaultFileBlockSize(4)()
+
+ f, err := OpenFile(db.DefaultContext, "test2.log", os.O_RDWR|os.O_CREATE)
+ require.NoError(t, err)
+ defer f.Close()
+
+ n, err := f.Write([]byte("111"))
+ require.NoError(t, err)
+
+ _, err = f.Seek(int64(n), io.SeekStart)
+ require.NoError(t, err)
+
+ _, err = f.Write([]byte("222"))
+ require.NoError(t, err)
+
+ _, err = f.Seek(int64(n), io.SeekStart)
+ require.NoError(t, err)
+
+ _, err = f.Write([]byte("333"))
+ require.NoError(t, err)
+
+ fr, err := OpenFile(db.DefaultContext, "test2.log", os.O_RDONLY)
+ require.NoError(t, err)
+ defer f.Close()
+
+ buf, err := io.ReadAll(fr)
+ require.NoError(t, err)
+ assert.EqualValues(t, "111333", string(buf))
+}
diff --git a/models/dbfs/main_test.go b/models/dbfs/main_test.go
new file mode 100644
index 0000000..537ba09
--- /dev/null
+++ b/models/dbfs/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package dbfs
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/error.go b/models/error.go
new file mode 100644
index 0000000..75c5324
--- /dev/null
+++ b/models/error.go
@@ -0,0 +1,552 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "fmt"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrUserOwnRepos represents a "UserOwnRepos" kind of error.
+type ErrUserOwnRepos struct {
+ UID int64
+}
+
+// IsErrUserOwnRepos checks if an error is a ErrUserOwnRepos.
+func IsErrUserOwnRepos(err error) bool {
+ _, ok := err.(ErrUserOwnRepos)
+ return ok
+}
+
+func (err ErrUserOwnRepos) Error() string {
+ return fmt.Sprintf("user still has ownership of repositories [uid: %d]", err.UID)
+}
+
+// ErrUserHasOrgs represents a "UserHasOrgs" kind of error.
+type ErrUserHasOrgs struct {
+ UID int64
+}
+
+// IsErrUserHasOrgs checks if an error is a ErrUserHasOrgs.
+func IsErrUserHasOrgs(err error) bool {
+ _, ok := err.(ErrUserHasOrgs)
+ return ok
+}
+
+func (err ErrUserHasOrgs) Error() string {
+ return fmt.Sprintf("user still has membership of organizations [uid: %d]", err.UID)
+}
+
+// ErrUserOwnPackages notifies that the user (still) owns the packages.
+type ErrUserOwnPackages struct {
+ UID int64
+}
+
+// IsErrUserOwnPackages checks if an error is an ErrUserOwnPackages.
+func IsErrUserOwnPackages(err error) bool {
+ _, ok := err.(ErrUserOwnPackages)
+ return ok
+}
+
+func (err ErrUserOwnPackages) Error() string {
+ return fmt.Sprintf("user still has ownership of packages [uid: %d]", err.UID)
+}
+
+// ErrDeleteLastAdminUser represents a "DeleteLastAdminUser" kind of error.
+type ErrDeleteLastAdminUser struct {
+ UID int64
+}
+
+// IsErrDeleteLastAdminUser checks if an error is a ErrDeleteLastAdminUser.
+func IsErrDeleteLastAdminUser(err error) bool {
+ _, ok := err.(ErrDeleteLastAdminUser)
+ return ok
+}
+
+func (err ErrDeleteLastAdminUser) Error() string {
+ return fmt.Sprintf("can not delete the last admin user [uid: %d]", err.UID)
+}
+
+// ErrNoPendingRepoTransfer is an error type for repositories without a pending
+// transfer request
+type ErrNoPendingRepoTransfer struct {
+ RepoID int64
+}
+
+func (err ErrNoPendingRepoTransfer) Error() string {
+ return fmt.Sprintf("repository doesn't have a pending transfer [repo_id: %d]", err.RepoID)
+}
+
+// IsErrNoPendingTransfer is an error type when a repository has no pending
+// transfers
+func IsErrNoPendingTransfer(err error) bool {
+ _, ok := err.(ErrNoPendingRepoTransfer)
+ return ok
+}
+
+func (err ErrNoPendingRepoTransfer) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrRepoTransferInProgress represents the state of a repository that has an
+// ongoing transfer
+type ErrRepoTransferInProgress struct {
+ Uname string
+ Name string
+}
+
+// IsErrRepoTransferInProgress checks if an error is a ErrRepoTransferInProgress.
+func IsErrRepoTransferInProgress(err error) bool {
+ _, ok := err.(ErrRepoTransferInProgress)
+ return ok
+}
+
+func (err ErrRepoTransferInProgress) Error() string {
+ return fmt.Sprintf("repository is already being transferred [uname: %s, name: %s]", err.Uname, err.Name)
+}
+
+func (err ErrRepoTransferInProgress) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrInvalidCloneAddr represents a "InvalidCloneAddr" kind of error.
+type ErrInvalidCloneAddr struct {
+ Host string
+ IsURLError bool
+ IsInvalidPath bool
+ IsProtocolInvalid bool
+ IsPermissionDenied bool
+ LocalPath bool
+}
+
+// IsErrInvalidCloneAddr checks if an error is a ErrInvalidCloneAddr.
+func IsErrInvalidCloneAddr(err error) bool {
+ _, ok := err.(*ErrInvalidCloneAddr)
+ return ok
+}
+
+func (err *ErrInvalidCloneAddr) Error() string {
+ if err.IsInvalidPath {
+ return fmt.Sprintf("migration/cloning from '%s' is not allowed: the provided path is invalid", err.Host)
+ }
+ if err.IsProtocolInvalid {
+ return fmt.Sprintf("migration/cloning from '%s' is not allowed: the provided url protocol is not allowed", err.Host)
+ }
+ if err.IsPermissionDenied {
+ return fmt.Sprintf("migration/cloning from '%s' is not allowed.", err.Host)
+ }
+ if err.IsURLError {
+ return fmt.Sprintf("migration/cloning from '%s' is not allowed: the provided url is invalid", err.Host)
+ }
+
+ return fmt.Sprintf("migration/cloning from '%s' is not allowed", err.Host)
+}
+
+func (err *ErrInvalidCloneAddr) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrUpdateTaskNotExist represents a "UpdateTaskNotExist" kind of error.
+type ErrUpdateTaskNotExist struct {
+ UUID string
+}
+
+// IsErrUpdateTaskNotExist checks if an error is a ErrUpdateTaskNotExist.
+func IsErrUpdateTaskNotExist(err error) bool {
+ _, ok := err.(ErrUpdateTaskNotExist)
+ return ok
+}
+
+func (err ErrUpdateTaskNotExist) Error() string {
+ return fmt.Sprintf("update task does not exist [uuid: %s]", err.UUID)
+}
+
+func (err ErrUpdateTaskNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrInvalidTagName represents a "InvalidTagName" kind of error.
+type ErrInvalidTagName struct {
+ TagName string
+}
+
+// IsErrInvalidTagName checks if an error is a ErrInvalidTagName.
+func IsErrInvalidTagName(err error) bool {
+ _, ok := err.(ErrInvalidTagName)
+ return ok
+}
+
+func (err ErrInvalidTagName) Error() string {
+ return fmt.Sprintf("release tag name is not valid [tag_name: %s]", err.TagName)
+}
+
+func (err ErrInvalidTagName) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrProtectedTagName represents a "ProtectedTagName" kind of error.
+type ErrProtectedTagName struct {
+ TagName string
+}
+
+// IsErrProtectedTagName checks if an error is a ErrProtectedTagName.
+func IsErrProtectedTagName(err error) bool {
+ _, ok := err.(ErrProtectedTagName)
+ return ok
+}
+
+func (err ErrProtectedTagName) Error() string {
+ return fmt.Sprintf("release tag name is protected [tag_name: %s]", err.TagName)
+}
+
+func (err ErrProtectedTagName) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrRepoFileAlreadyExists represents a "RepoFileAlreadyExist" kind of error.
+type ErrRepoFileAlreadyExists struct {
+ Path string
+}
+
+// IsErrRepoFileAlreadyExists checks if an error is a ErrRepoFileAlreadyExists.
+func IsErrRepoFileAlreadyExists(err error) bool {
+ _, ok := err.(ErrRepoFileAlreadyExists)
+ return ok
+}
+
+func (err ErrRepoFileAlreadyExists) Error() string {
+ return fmt.Sprintf("repository file already exists [path: %s]", err.Path)
+}
+
+func (err ErrRepoFileAlreadyExists) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrRepoFileDoesNotExist represents a "RepoFileDoesNotExist" kind of error.
+type ErrRepoFileDoesNotExist struct {
+ Path string
+ Name string
+}
+
+// IsErrRepoFileDoesNotExist checks if an error is a ErrRepoDoesNotExist.
+func IsErrRepoFileDoesNotExist(err error) bool {
+ _, ok := err.(ErrRepoFileDoesNotExist)
+ return ok
+}
+
+func (err ErrRepoFileDoesNotExist) Error() string {
+ return fmt.Sprintf("repository file does not exist [path: %s]", err.Path)
+}
+
+func (err ErrRepoFileDoesNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrFilenameInvalid represents a "FilenameInvalid" kind of error.
+type ErrFilenameInvalid struct {
+ Path string
+}
+
+// IsErrFilenameInvalid checks if an error is an ErrFilenameInvalid.
+func IsErrFilenameInvalid(err error) bool {
+ _, ok := err.(ErrFilenameInvalid)
+ return ok
+}
+
+func (err ErrFilenameInvalid) Error() string {
+ return fmt.Sprintf("path contains a malformed path component [path: %s]", err.Path)
+}
+
+func (err ErrFilenameInvalid) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrUserCannotCommit represents "UserCannotCommit" kind of error.
+type ErrUserCannotCommit struct {
+ UserName string
+}
+
+// IsErrUserCannotCommit checks if an error is an ErrUserCannotCommit.
+func IsErrUserCannotCommit(err error) bool {
+ _, ok := err.(ErrUserCannotCommit)
+ return ok
+}
+
+func (err ErrUserCannotCommit) Error() string {
+ return fmt.Sprintf("user cannot commit to repo [user: %s]", err.UserName)
+}
+
+func (err ErrUserCannotCommit) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrFilePathInvalid represents a "FilePathInvalid" kind of error.
+type ErrFilePathInvalid struct {
+ Message string
+ Path string
+ Name string
+ Type git.EntryMode
+}
+
+// IsErrFilePathInvalid checks if an error is an ErrFilePathInvalid.
+func IsErrFilePathInvalid(err error) bool {
+ _, ok := err.(ErrFilePathInvalid)
+ return ok
+}
+
+func (err ErrFilePathInvalid) Error() string {
+ if err.Message != "" {
+ return err.Message
+ }
+ return fmt.Sprintf("path is invalid [path: %s]", err.Path)
+}
+
+func (err ErrFilePathInvalid) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrFilePathProtected represents a "FilePathProtected" kind of error.
+type ErrFilePathProtected struct {
+ Message string
+ Path string
+}
+
+// IsErrFilePathProtected checks if an error is an ErrFilePathProtected.
+func IsErrFilePathProtected(err error) bool {
+ _, ok := err.(ErrFilePathProtected)
+ return ok
+}
+
+func (err ErrFilePathProtected) Error() string {
+ if err.Message != "" {
+ return err.Message
+ }
+ return fmt.Sprintf("path is protected and can not be changed [path: %s]", err.Path)
+}
+
+func (err ErrFilePathProtected) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrDisallowedToMerge represents an error that a branch is protected and the current user is not allowed to modify it.
+type ErrDisallowedToMerge struct {
+ Reason string
+}
+
+// IsErrDisallowedToMerge checks if an error is an ErrDisallowedToMerge.
+func IsErrDisallowedToMerge(err error) bool {
+ _, ok := err.(ErrDisallowedToMerge)
+ return ok
+}
+
+func (err ErrDisallowedToMerge) Error() string {
+ return fmt.Sprintf("not allowed to merge [reason: %s]", err.Reason)
+}
+
+func (err ErrDisallowedToMerge) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrTagAlreadyExists represents an error that tag with such name already exists.
+type ErrTagAlreadyExists struct {
+ TagName string
+}
+
+// IsErrTagAlreadyExists checks if an error is an ErrTagAlreadyExists.
+func IsErrTagAlreadyExists(err error) bool {
+ _, ok := err.(ErrTagAlreadyExists)
+ return ok
+}
+
+func (err ErrTagAlreadyExists) Error() string {
+ return fmt.Sprintf("tag already exists [name: %s]", err.TagName)
+}
+
+func (err ErrTagAlreadyExists) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrSHADoesNotMatch represents a "SHADoesNotMatch" kind of error.
+type ErrSHADoesNotMatch struct {
+ Path string
+ GivenSHA string
+ CurrentSHA string
+}
+
+// IsErrSHADoesNotMatch checks if an error is a ErrSHADoesNotMatch.
+func IsErrSHADoesNotMatch(err error) bool {
+ _, ok := err.(ErrSHADoesNotMatch)
+ return ok
+}
+
+func (err ErrSHADoesNotMatch) Error() string {
+ return fmt.Sprintf("sha does not match [given: %s, expected: %s]", err.GivenSHA, err.CurrentSHA)
+}
+
+// ErrSHANotFound represents a "SHADoesNotMatch" kind of error.
+type ErrSHANotFound struct {
+ SHA string
+}
+
+// IsErrSHANotFound checks if an error is a ErrSHANotFound.
+func IsErrSHANotFound(err error) bool {
+ _, ok := err.(ErrSHANotFound)
+ return ok
+}
+
+func (err ErrSHANotFound) Error() string {
+ return fmt.Sprintf("sha not found [%s]", err.SHA)
+}
+
+func (err ErrSHANotFound) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrCommitIDDoesNotMatch represents a "CommitIDDoesNotMatch" kind of error.
+type ErrCommitIDDoesNotMatch struct {
+ GivenCommitID string
+ CurrentCommitID string
+}
+
+// IsErrCommitIDDoesNotMatch checks if an error is a ErrCommitIDDoesNotMatch.
+func IsErrCommitIDDoesNotMatch(err error) bool {
+ _, ok := err.(ErrCommitIDDoesNotMatch)
+ return ok
+}
+
+func (err ErrCommitIDDoesNotMatch) Error() string {
+ return fmt.Sprintf("file CommitID does not match [given: %s, expected: %s]", err.GivenCommitID, err.CurrentCommitID)
+}
+
+// ErrSHAOrCommitIDNotProvided represents a "SHAOrCommitIDNotProvided" kind of error.
+type ErrSHAOrCommitIDNotProvided struct{}
+
+// IsErrSHAOrCommitIDNotProvided checks if an error is a ErrSHAOrCommitIDNotProvided.
+func IsErrSHAOrCommitIDNotProvided(err error) bool {
+ _, ok := err.(ErrSHAOrCommitIDNotProvided)
+ return ok
+}
+
+func (err ErrSHAOrCommitIDNotProvided) Error() string {
+ return "a SHA or commit ID must be proved when updating a file"
+}
+
+// ErrInvalidMergeStyle represents an error if merging with disabled merge strategy
+type ErrInvalidMergeStyle struct {
+ ID int64
+ Style repo_model.MergeStyle
+}
+
+// IsErrInvalidMergeStyle checks if an error is a ErrInvalidMergeStyle.
+func IsErrInvalidMergeStyle(err error) bool {
+ _, ok := err.(ErrInvalidMergeStyle)
+ return ok
+}
+
+func (err ErrInvalidMergeStyle) Error() string {
+ return fmt.Sprintf("merge strategy is not allowed or is invalid [repo_id: %d, strategy: %s]",
+ err.ID, err.Style)
+}
+
+func (err ErrInvalidMergeStyle) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrMergeConflicts represents an error if merging fails with a conflict
+type ErrMergeConflicts struct {
+ Style repo_model.MergeStyle
+ StdOut string
+ StdErr string
+ Err error
+}
+
+// IsErrMergeConflicts checks if an error is a ErrMergeConflicts.
+func IsErrMergeConflicts(err error) bool {
+ _, ok := err.(ErrMergeConflicts)
+ return ok
+}
+
+func (err ErrMergeConflicts) Error() string {
+ return fmt.Sprintf("Merge Conflict Error: %v: %s\n%s", err.Err, err.StdErr, err.StdOut)
+}
+
+// ErrMergeUnrelatedHistories represents an error if merging fails due to unrelated histories
+type ErrMergeUnrelatedHistories struct {
+ Style repo_model.MergeStyle
+ StdOut string
+ StdErr string
+ Err error
+}
+
+// IsErrMergeUnrelatedHistories checks if an error is a ErrMergeUnrelatedHistories.
+func IsErrMergeUnrelatedHistories(err error) bool {
+ _, ok := err.(ErrMergeUnrelatedHistories)
+ return ok
+}
+
+func (err ErrMergeUnrelatedHistories) Error() string {
+ return fmt.Sprintf("Merge UnrelatedHistories Error: %v: %s\n%s", err.Err, err.StdErr, err.StdOut)
+}
+
+// ErrMergeDivergingFastForwardOnly represents an error if a fast-forward-only merge fails because the branches diverge
+type ErrMergeDivergingFastForwardOnly struct {
+ StdOut string
+ StdErr string
+ Err error
+}
+
+// IsErrMergeDivergingFastForwardOnly checks if an error is a ErrMergeDivergingFastForwardOnly.
+func IsErrMergeDivergingFastForwardOnly(err error) bool {
+ _, ok := err.(ErrMergeDivergingFastForwardOnly)
+ return ok
+}
+
+func (err ErrMergeDivergingFastForwardOnly) Error() string {
+ return fmt.Sprintf("Merge DivergingFastForwardOnly Error: %v: %s\n%s", err.Err, err.StdErr, err.StdOut)
+}
+
+// ErrRebaseConflicts represents an error if rebase fails with a conflict
+type ErrRebaseConflicts struct {
+ Style repo_model.MergeStyle
+ CommitSHA string
+ StdOut string
+ StdErr string
+ Err error
+}
+
+// IsErrRebaseConflicts checks if an error is a ErrRebaseConflicts.
+func IsErrRebaseConflicts(err error) bool {
+ _, ok := err.(ErrRebaseConflicts)
+ return ok
+}
+
+func (err ErrRebaseConflicts) Error() string {
+ return fmt.Sprintf("Rebase Error: %v: Whilst Rebasing: %s\n%s\n%s", err.Err, err.CommitSHA, err.StdErr, err.StdOut)
+}
+
+// ErrPullRequestHasMerged represents a "PullRequestHasMerged"-error
+type ErrPullRequestHasMerged struct {
+ ID int64
+ IssueID int64
+ HeadRepoID int64
+ BaseRepoID int64
+ HeadBranch string
+ BaseBranch string
+}
+
+// IsErrPullRequestHasMerged checks if an error is a ErrPullRequestHasMerged.
+func IsErrPullRequestHasMerged(err error) bool {
+ _, ok := err.(ErrPullRequestHasMerged)
+ return ok
+}
+
+// Error does pretty-printing :D
+func (err ErrPullRequestHasMerged) Error() string {
+ return fmt.Sprintf("pull request has merged [id: %d, issue_id: %d, head_repo_id: %d, base_repo_id: %d, head_branch: %s, base_branch: %s]",
+ err.ID, err.IssueID, err.HeadRepoID, err.BaseRepoID, err.HeadBranch, err.BaseBranch)
+}
diff --git a/models/fixture_generation.go b/models/fixture_generation.go
new file mode 100644
index 0000000..6234cae
--- /dev/null
+++ b/models/fixture_generation.go
@@ -0,0 +1,50 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+)
+
+// GetYamlFixturesAccess returns a string containing the contents
+// for the access table, as recalculated using repo.RecalculateAccesses()
+func GetYamlFixturesAccess(ctx context.Context) (string, error) {
+ repos := make([]*repo_model.Repository, 0, 50)
+ if err := db.GetEngine(ctx).Find(&repos); err != nil {
+ return "", err
+ }
+
+ for _, repo := range repos {
+ repo.MustOwner(ctx)
+ if err := access_model.RecalculateAccesses(ctx, repo); err != nil {
+ return "", err
+ }
+ }
+
+ var b strings.Builder
+
+ accesses := make([]*access_model.Access, 0, 200)
+ if err := db.GetEngine(ctx).OrderBy("user_id, repo_id").Find(&accesses); err != nil {
+ return "", err
+ }
+
+ for i, a := range accesses {
+ fmt.Fprintf(&b, "-\n")
+ fmt.Fprintf(&b, " id: %d\n", i+1)
+ fmt.Fprintf(&b, " user_id: %d\n", a.UserID)
+ fmt.Fprintf(&b, " repo_id: %d\n", a.RepoID)
+ fmt.Fprintf(&b, " mode: %d\n", a.Mode)
+ if i < len(accesses)-1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ }
+
+ return b.String(), nil
+}
diff --git a/models/fixture_test.go b/models/fixture_test.go
new file mode 100644
index 0000000..33429c8
--- /dev/null
+++ b/models/fixture_test.go
@@ -0,0 +1,36 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFixtureGeneration(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(ctx context.Context, gen func(ctx context.Context) (string, error), name string) {
+ expected, err := gen(ctx)
+ require.NoError(t, err)
+
+ p := filepath.Join(unittest.FixturesDir(), name+".yml")
+ bytes, err := os.ReadFile(p)
+ require.NoError(t, err)
+
+ data := string(util.NormalizeEOL(bytes))
+ assert.EqualValues(t, expected, data, "Differences detected for %s", p)
+ }
+
+ test(db.DefaultContext, GetYamlFixturesAccess, "access")
+}
diff --git a/models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/issue.yml b/models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/issue.yml
new file mode 100644
index 0000000..7fe592e
--- /dev/null
+++ b/models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/issue.yml
@@ -0,0 +1,12 @@
+-
+ id: 1001
+ repo_id: 1
+ index: 1001
+ poster_id: 1
+ name: issue1
+ content: content for the first issue
+ is_pull: true
+ created: 111111111
+ created_unix: 946684800
+ updated_unix: 978307200
+ is_closed: false
diff --git a/models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/pull_request.yml b/models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/pull_request.yml
new file mode 100644
index 0000000..7e7c0d1
--- /dev/null
+++ b/models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/pull_request.yml
@@ -0,0 +1,13 @@
+-
+ id: 1001
+ type: 0 # pull request
+ status: 2 # mergeable
+ issue_id: 1001
+ index: 1001
+ head_repo_id: 1
+ base_repo_id: 1
+ head_branch: branchmax
+ base_branch: master
+ merge_base: 4a357436d925b5c974181ff12a994538ddc5a269
+ has_merged: false
+ flow: 0
diff --git a/models/fixtures/TestParseCommitWithSSHSignature/public_key.yml b/models/fixtures/TestParseCommitWithSSHSignature/public_key.yml
new file mode 100644
index 0000000..f76dabb
--- /dev/null
+++ b/models/fixtures/TestParseCommitWithSSHSignature/public_key.yml
@@ -0,0 +1,13 @@
+-
+ id: 1000
+ owner_id: 2
+ name: user2@localhost
+ fingerprint: "SHA256:TKfwbZMR7e9OnlV2l1prfah1TXH8CmqR0PvFEXVCXA4"
+ content: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKBknvWcuxM/W0iXGkzY4f2O6feX+Q7o46pKcxUbcOgh user2@localhost"
+ # private key (base64-ed) LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFNd0FBQUF0emMyZ3RaVwpReU5UVXhPUUFBQUNDZ1pKNzFuTHNUUDF0SWx4cE0yT0g5anVuM2wva082T09xU25NVkczRG9JUUFBQUpocG43YTZhWisyCnVnQUFBQXR6YzJndFpXUXlOVFV4T1FBQUFDQ2daSjcxbkxzVFAxdElseHBNMk9IOWp1bjNsL2tPNk9PcVNuTVZHM0RvSVEKQUFBRUFxVm12bmo1LzZ5TW12ck9Ub29xa3F5MmUrc21aK0tBcEtKR0crRnY5MlA2QmtudldjdXhNL1cwaVhHa3pZNGYyTwo2ZmVYK1E3bzQ2cEtjeFViY09naEFBQUFFMmQxYzNSbFpFQm5kWE4wWldRdFltVmhjM1FCQWc9PQotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0=
+ mode: 2
+ type: 1
+ verified: true
+ created_unix: 1559593109
+ updated_unix: 1565224552
+ login_source_id: 0
diff --git a/models/fixtures/access.yml b/models/fixtures/access.yml
new file mode 100644
index 0000000..641c453
--- /dev/null
+++ b/models/fixtures/access.yml
@@ -0,0 +1,161 @@
+-
+ id: 1
+ user_id: 2
+ repo_id: 3
+ mode: 4
+
+-
+ id: 2
+ user_id: 2
+ repo_id: 5
+ mode: 4
+
+-
+ id: 3
+ user_id: 2
+ repo_id: 24
+ mode: 2
+
+-
+ id: 4
+ user_id: 2
+ repo_id: 32
+ mode: 4
+
+-
+ id: 5
+ user_id: 4
+ repo_id: 3
+ mode: 2
+
+-
+ id: 6
+ user_id: 4
+ repo_id: 4
+ mode: 2
+
+-
+ id: 7
+ user_id: 4
+ repo_id: 40
+ mode: 2
+
+-
+ id: 8
+ user_id: 15
+ repo_id: 21
+ mode: 2
+
+-
+ id: 9
+ user_id: 15
+ repo_id: 22
+ mode: 2
+
+-
+ id: 10
+ user_id: 15
+ repo_id: 23
+ mode: 4
+
+-
+ id: 11
+ user_id: 15
+ repo_id: 24
+ mode: 4
+
+-
+ id: 12
+ user_id: 15
+ repo_id: 32
+ mode: 2
+
+-
+ id: 13
+ user_id: 18
+ repo_id: 21
+ mode: 2
+
+-
+ id: 14
+ user_id: 18
+ repo_id: 22
+ mode: 2
+
+-
+ id: 15
+ user_id: 18
+ repo_id: 23
+ mode: 4
+
+-
+ id: 16
+ user_id: 18
+ repo_id: 24
+ mode: 4
+
+-
+ id: 17
+ user_id: 20
+ repo_id: 24
+ mode: 1
+
+-
+ id: 18
+ user_id: 20
+ repo_id: 27
+ mode: 4
+
+-
+ id: 19
+ user_id: 20
+ repo_id: 28
+ mode: 4
+
+-
+ id: 20
+ user_id: 29
+ repo_id: 4
+ mode: 2
+
+-
+ id: 21
+ user_id: 29
+ repo_id: 24
+ mode: 1
+
+-
+ id: 22
+ user_id: 31
+ repo_id: 27
+ mode: 4
+
+-
+ id: 23
+ user_id: 31
+ repo_id: 28
+ mode: 4
+
+-
+ id: 24
+ user_id: 38
+ repo_id: 60
+ mode: 2
+
+-
+ id: 25
+ user_id: 38
+ repo_id: 61
+ mode: 1
+
+-
+ id: 26
+ user_id: 39
+ repo_id: 61
+ mode: 1
+
+-
+ id: 27
+ user_id: 40
+ repo_id: 61
+ mode: 4
diff --git a/models/fixtures/access_token.yml b/models/fixtures/access_token.yml
new file mode 100644
index 0000000..0744255
--- /dev/null
+++ b/models/fixtures/access_token.yml
@@ -0,0 +1,33 @@
+-
+ id: 1
+ uid: 1
+ name: Token A
+ # token: d2c6c1ba3890b309189a8e618c72a162e4efbf36
+ token_hash: 2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f
+ token_salt: QuSiZr1byZ
+ token_last_eight: e4efbf36
+ created_unix: 946687980
+ updated_unix: 946687980
+
+-
+ id: 2
+ uid: 1
+ name: Token B
+ # token: 4c6f36e6cf498e2a448662f915d932c09c5a146c
+ token_hash: 1a0e32a231ebbd582dc626c1543a42d3c63d4fa76c07c72862721467c55e8f81c923d60700f0528b5f5f443f055559d3a279
+ token_salt: Lfwopukrq5
+ token_last_eight: 9c5a146c
+ created_unix: 946687980
+ updated_unix: 946687980
+
+-
+ id: 3
+ uid: 2
+ name: Token A
+ # token: 90a18faa671dc43924b795806ffe4fd169d28c91
+ token_hash: d6d404048048812d9e911d93aefbe94fc768d4876fdf75e3bef0bdc67828e0af422846d3056f2f25ec35c51dc92075685ec5
+ token_salt: 99ArgXKlQQ
+ token_last_eight: 69d28c91
+ created_unix: 946687980
+ updated_unix: 946687980
+ # commented out tokens so you can see what they are in plaintext
diff --git a/models/fixtures/action.yml b/models/fixtures/action.yml
new file mode 100644
index 0000000..b2febb4
--- /dev/null
+++ b/models/fixtures/action.yml
@@ -0,0 +1,76 @@
+-
+ id: 1
+ user_id: 2
+ op_type: 12 # close issue
+ act_user_id: 2
+ repo_id: 2 # private
+ is_private: true
+ created_unix: 1603228283
+ content: '1|' # issueId 4
+
+-
+ id: 2
+ user_id: 3
+ op_type: 2 # rename repo
+ act_user_id: 2
+ repo_id: 3 # private
+ is_private: true
+ content: oldRepoName
+
+-
+ id: 3
+ user_id: 11
+ op_type: 1 # create repo
+ act_user_id: 11
+ repo_id: 9 # public
+ is_private: false
+
+-
+ id: 4
+ user_id: 16
+ op_type: 12 # close issue
+ act_user_id: 16
+ repo_id: 22 # private
+ is_private: true
+ created_unix: 1603267920
+
+- id: 5
+ user_id: 10
+ op_type: 1 # create repo
+ act_user_id: 10
+ repo_id: 6 # private
+ is_private: true
+ created_unix: 1603010100
+
+- id: 6
+ user_id: 10
+ op_type: 1 # create repo
+ act_user_id: 10
+ repo_id: 7 # private
+ is_private: true
+ created_unix: 1603011300
+
+- id: 7
+ user_id: 10
+ op_type: 1 # create repo
+ act_user_id: 10
+ repo_id: 8 # public
+ is_private: false
+ created_unix: 1603011540 # grouped with id:7
+
+- id: 8
+ user_id: 1
+ op_type: 12 # close issue
+ act_user_id: 1
+ repo_id: 1700 # dangling intentional
+ is_private: false
+ created_unix: 1603011541
+
+- id: 9
+ user_id: 34
+ op_type: 12 # close issue
+ act_user_id: 34
+ repo_id: 1 # public
+ is_private: false
+ created_unix: 1680454039
+ content: '4|' # issueId 5
diff --git a/models/fixtures/action_run.yml b/models/fixtures/action_run.yml
new file mode 100644
index 0000000..9c60b35
--- /dev/null
+++ b/models/fixtures/action_run.yml
@@ -0,0 +1,435 @@
+-
+ id: 791
+ title: "update actions"
+ repo_id: 4
+ owner_id: 1
+ workflow_id: "artifact.yaml"
+ index: 187
+ trigger_user_id: 1
+ ref: "refs/heads/master"
+ commit_sha: "c2d72f548424103f01ee1dc02889c1e2bff816b0"
+ event: "push"
+ is_fork_pull_request: 0
+ status: 1
+ started: 1683636528
+ stopped: 1683636626
+ created: 1683636108
+ updated: 1683636626
+ need_approval: 0
+ approved_by: 0
+ event_payload: |
+ {
+ "after": "7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "before": "0000000000000000000000000000000000000000",
+ "commits": [
+ {
+ "added": [
+ ".forgejo/workflows/test.yml"
+ ],
+ "author": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "committer": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "id": "7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "message": "initial commit\n",
+ "modified": [],
+ "removed": [],
+ "timestamp": "2024-01-24T18:59:25Z",
+ "url": "http://10.201.14.40:3000/root/example-push/commit/7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "verification": null
+ }
+ ],
+ "compare_url": "http://10.201.14.40:3000/",
+ "head_commit": {
+ "added": [
+ ".forgejo/workflows/test.yml"
+ ],
+ "author": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "committer": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "id": "7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "message": "initial commit\n",
+ "modified": [],
+ "removed": [],
+ "timestamp": "2024-01-24T18:59:25Z",
+ "url": "http://10.201.14.40:3000/root/example-push/commit/7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "verification": null
+ },
+ "pusher": {
+ "active": false,
+ "avatar_url": "http://10.201.14.40:3000/avatars/04edfc0ef6c6cf6d6b88fbc69f9f9071",
+ "created": "2024-01-24T18:57:32Z",
+ "description": "",
+ "email": "root@noreply.10.201.14.40",
+ "followers_count": 0,
+ "following_count": 0,
+ "full_name": "",
+ "id": 1,
+ "is_admin": false,
+ "language": "",
+ "last_login": "0001-01-01T00:00:00Z",
+ "location": "",
+ "login": "root",
+ "login_name": "",
+ "prohibit_login": false,
+ "restricted": false,
+ "starred_repos_count": 0,
+ "username": "root",
+ "visibility": "public",
+ "website": ""
+ },
+ "ref": "refs/heads/main",
+ "repository": {
+ "allow_merge_commits": true,
+ "allow_rebase": true,
+ "allow_rebase_explicit": true,
+ "allow_rebase_update": true,
+ "allow_squash_merge": true,
+ "archived": false,
+ "archived_at": "1970-01-01T00:00:00Z",
+ "avatar_url": "",
+ "clone_url": "http://10.201.14.40:3000/root/example-push.git",
+ "created_at": "2024-01-24T18:59:25Z",
+ "default_allow_maintainer_edit": false,
+ "default_branch": "main",
+ "default_delete_branch_after_merge": false,
+ "default_merge_style": "merge",
+ "description": "",
+ "empty": false,
+ "fork": false,
+ "forks_count": 0,
+ "full_name": "root/example-push",
+ "has_actions": true,
+ "has_issues": true,
+ "has_packages": true,
+ "has_projects": true,
+ "has_pull_requests": true,
+ "has_releases": true,
+ "has_wiki": true,
+ "html_url": "http://10.201.14.40:3000/root/example-push",
+ "id": 2,
+ "ignore_whitespace_conflicts": false,
+ "internal": false,
+ "internal_tracker": {
+ "allow_only_contributors_to_track_time": true,
+ "enable_issue_dependencies": true,
+ "enable_time_tracker": true
+ },
+ "language": "",
+ "languages_url": "http://10.201.14.40:3000/api/v1/repos/root/example-push/languages",
+ "link": "",
+ "mirror": false,
+ "mirror_interval": "",
+ "mirror_updated": "0001-01-01T00:00:00Z",
+ "name": "example-push",
+ "object_format_name": "",
+ "open_issues_count": 0,
+ "open_pr_counter": 0,
+ "original_url": "",
+ "owner": {
+ "active": false,
+ "avatar_url": "http://10.201.14.40:3000/avatars/04edfc0ef6c6cf6d6b88fbc69f9f9071",
+ "created": "2024-01-24T18:57:32Z",
+ "description": "",
+ "email": "root@example.com",
+ "followers_count": 0,
+ "following_count": 0,
+ "full_name": "",
+ "id": 1,
+ "is_admin": false,
+ "language": "",
+ "last_login": "0001-01-01T00:00:00Z",
+ "location": "",
+ "login": "root",
+ "login_name": "",
+ "prohibit_login": false,
+ "restricted": false,
+ "starred_repos_count": 0,
+ "username": "root",
+ "visibility": "public",
+ "website": ""
+ },
+ "parent": null,
+ "permissions": {
+ "admin": true,
+ "pull": true,
+ "push": true
+ },
+ "private": false,
+ "release_counter": 0,
+ "repo_transfer": null,
+ "size": 25,
+ "ssh_url": "forgejo@10.201.14.40:root/example-push.git",
+ "stars_count": 0,
+ "template": false,
+ "updated_at": "2024-01-24T18:59:25Z",
+ "url": "http://10.201.14.40:3000/api/v1/repos/root/example-push",
+ "watchers_count": 1,
+ "website": ""
+ },
+ "sender": {
+ "active": false,
+ "avatar_url": "http://10.201.14.40:3000/avatars/04edfc0ef6c6cf6d6b88fbc69f9f9071",
+ "created": "2024-01-24T18:57:32Z",
+ "description": "",
+ "email": "root@noreply.10.201.14.40",
+ "followers_count": 0,
+ "following_count": 0,
+ "full_name": "",
+ "id": 1,
+ "is_admin": false,
+ "language": "",
+ "last_login": "0001-01-01T00:00:00Z",
+ "location": "",
+ "login": "root",
+ "login_name": "",
+ "prohibit_login": false,
+ "restricted": false,
+ "starred_repos_count": 0,
+ "username": "root",
+ "visibility": "public",
+ "website": ""
+ },
+ "total_commits": 0
+ }
+
+-
+ id: 792
+ title: "update actions"
+ repo_id: 4
+ owner_id: 1
+ workflow_id: "artifact.yaml"
+ index: 188
+ trigger_user_id: 1
+ ref: "refs/heads/master"
+ commit_sha: "c2d72f548424103f01ee1dc02889c1e2bff816b0"
+ event: "push"
+ is_fork_pull_request: 0
+ status: 1
+ started: 1683636528
+ stopped: 1683636626
+ created: 1683636108
+ updated: 1683636626
+ need_approval: 0
+ approved_by: 0
+ event_payload: |
+ {
+ "after": "7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "before": "0000000000000000000000000000000000000000",
+ "commits": [
+ {
+ "added": [
+ ".forgejo/workflows/test.yml"
+ ],
+ "author": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "committer": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "id": "7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "message": "initial commit\n",
+ "modified": [],
+ "removed": [],
+ "timestamp": "2024-01-24T18:59:25Z",
+ "url": "http://10.201.14.40:3000/root/example-push/commit/7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "verification": null
+ }
+ ],
+ "compare_url": "http://10.201.14.40:3000/",
+ "head_commit": {
+ "added": [
+ ".forgejo/workflows/test.yml"
+ ],
+ "author": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "committer": {
+ "email": "root@example.com",
+ "name": "username",
+ "username": "root"
+ },
+ "id": "7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "message": "initial commit\n",
+ "modified": [],
+ "removed": [],
+ "timestamp": "2024-01-24T18:59:25Z",
+ "url": "http://10.201.14.40:3000/root/example-push/commit/7a3858dc7f059543a8807a8b551304b7e362a7ef",
+ "verification": null
+ },
+ "pusher": {
+ "active": false,
+ "avatar_url": "http://10.201.14.40:3000/avatars/04edfc0ef6c6cf6d6b88fbc69f9f9071",
+ "created": "2024-01-24T18:57:32Z",
+ "description": "",
+ "email": "root@noreply.10.201.14.40",
+ "followers_count": 0,
+ "following_count": 0,
+ "full_name": "",
+ "id": 1,
+ "is_admin": false,
+ "language": "",
+ "last_login": "0001-01-01T00:00:00Z",
+ "location": "",
+ "login": "root",
+ "login_name": "",
+ "prohibit_login": false,
+ "restricted": false,
+ "starred_repos_count": 0,
+ "username": "root",
+ "visibility": "public",
+ "website": ""
+ },
+ "ref": "refs/heads/main",
+ "repository": {
+ "allow_merge_commits": true,
+ "allow_rebase": true,
+ "allow_rebase_explicit": true,
+ "allow_rebase_update": true,
+ "allow_squash_merge": true,
+ "archived": false,
+ "archived_at": "1970-01-01T00:00:00Z",
+ "avatar_url": "",
+ "clone_url": "http://10.201.14.40:3000/root/example-push.git",
+ "created_at": "2024-01-24T18:59:25Z",
+ "default_allow_maintainer_edit": false,
+ "default_branch": "main",
+ "default_delete_branch_after_merge": false,
+ "default_merge_style": "merge",
+ "description": "",
+ "empty": false,
+ "fork": false,
+ "forks_count": 0,
+ "full_name": "root/example-push",
+ "has_actions": true,
+ "has_issues": true,
+ "has_packages": true,
+ "has_projects": true,
+ "has_pull_requests": true,
+ "has_releases": true,
+ "has_wiki": true,
+ "html_url": "http://10.201.14.40:3000/root/example-push",
+ "id": 2,
+ "ignore_whitespace_conflicts": false,
+ "internal": false,
+ "internal_tracker": {
+ "allow_only_contributors_to_track_time": true,
+ "enable_issue_dependencies": true,
+ "enable_time_tracker": true
+ },
+ "language": "",
+ "languages_url": "http://10.201.14.40:3000/api/v1/repos/root/example-push/languages",
+ "link": "",
+ "mirror": false,
+ "mirror_interval": "",
+ "mirror_updated": "0001-01-01T00:00:00Z",
+ "name": "example-push",
+ "object_format_name": "",
+ "open_issues_count": 0,
+ "open_pr_counter": 0,
+ "original_url": "",
+ "owner": {
+ "active": false,
+ "avatar_url": "http://10.201.14.40:3000/avatars/04edfc0ef6c6cf6d6b88fbc69f9f9071",
+ "created": "2024-01-24T18:57:32Z",
+ "description": "",
+ "email": "root@example.com",
+ "followers_count": 0,
+ "following_count": 0,
+ "full_name": "",
+ "id": 1,
+ "is_admin": false,
+ "language": "",
+ "last_login": "0001-01-01T00:00:00Z",
+ "location": "",
+ "login": "root",
+ "login_name": "",
+ "prohibit_login": false,
+ "restricted": false,
+ "starred_repos_count": 0,
+ "username": "root",
+ "visibility": "public",
+ "website": ""
+ },
+ "parent": null,
+ "permissions": {
+ "admin": true,
+ "pull": true,
+ "push": true
+ },
+ "private": false,
+ "release_counter": 0,
+ "repo_transfer": null,
+ "size": 25,
+ "ssh_url": "forgejo@10.201.14.40:root/example-push.git",
+ "stars_count": 0,
+ "template": false,
+ "updated_at": "2024-01-24T18:59:25Z",
+ "url": "http://10.201.14.40:3000/api/v1/repos/root/example-push",
+ "watchers_count": 1,
+ "website": ""
+ },
+ "sender": {
+ "active": false,
+ "avatar_url": "http://10.201.14.40:3000/avatars/04edfc0ef6c6cf6d6b88fbc69f9f9071",
+ "created": "2024-01-24T18:57:32Z",
+ "description": "",
+ "email": "root@noreply.10.201.14.40",
+ "followers_count": 0,
+ "following_count": 0,
+ "full_name": "",
+ "id": 1,
+ "is_admin": false,
+ "language": "",
+ "last_login": "0001-01-01T00:00:00Z",
+ "location": "",
+ "login": "root",
+ "login_name": "",
+ "prohibit_login": false,
+ "restricted": false,
+ "starred_repos_count": 0,
+ "username": "root",
+ "visibility": "public",
+ "website": ""
+ },
+ "total_commits": 0
+ }
+-
+ id: 891
+ title: "update actions"
+ repo_id: 1
+ owner_id: 1
+ workflow_id: "artifact.yaml"
+ index: 187
+ trigger_user_id: 1
+ ref: "refs/heads/branch2"
+ commit_sha: "985f0301dba5e7b34be866819cd15ad3d8f508ee"
+ event: "push"
+ is_fork_pull_request: 0
+ status: 1 # success
+ started: 1683636528
+ stopped: 1683636626
+ created: 1683636108
+ updated: 1683636626
+ need_approval: 0
+ approved_by: 0
+ event_payload: '{"head_commit":{"id":"5f22f7d0d95d614d25a5b68592adb345a4b5c7fd"}}'
diff --git a/models/fixtures/action_run_job.yml b/models/fixtures/action_run_job.yml
new file mode 100644
index 0000000..0b02d0e
--- /dev/null
+++ b/models/fixtures/action_run_job.yml
@@ -0,0 +1,42 @@
+-
+ id: 192
+ run_id: 791
+ repo_id: 4
+ owner_id: 1
+ commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
+ is_fork_pull_request: 0
+ name: job_2
+ attempt: 1
+ job_id: job_2
+ task_id: 47
+ status: 1
+ started: 1683636528
+ stopped: 1683636626
+-
+ id: 193
+ run_id: 792
+ repo_id: 4
+ owner_id: 1
+ commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
+ is_fork_pull_request: 0
+ name: job_2
+ attempt: 1
+ job_id: job_2
+ task_id: 48
+ status: 1
+ started: 1683636528
+ stopped: 1683636626
+-
+ id: 292
+ run_id: 891
+ repo_id: 1
+ owner_id: 1
+ commit_sha: 985f0301dba5e7b34be866819cd15ad3d8f508ee
+ is_fork_pull_request: 0
+ name: job_2
+ attempt: 1
+ job_id: job_2
+ task_id: 47
+ status: 1
+ started: 1683636528
+ stopped: 1683636626
diff --git a/models/fixtures/action_runner.yml b/models/fixtures/action_runner.yml
new file mode 100644
index 0000000..94deac9
--- /dev/null
+++ b/models/fixtures/action_runner.yml
@@ -0,0 +1,20 @@
+-
+ # A global runner
+ # Secret is 7e577e577e577e57feedfacefeedfacefeedface
+ id: 12345678
+ uuid: "37653537-3765-3537-3765-353737653537"
+ name: "test"
+ version: ""
+ owner_id: 0
+ repo_id: 0
+ description: ""
+ base: 0
+ repo_range: ""
+ token_hash: "3af8a56b850dba8848044385fedcfa4d9432e17de9f9803e4d279991394ac2945066ceb9a5e7cbe60a087d90d4bad03a8f9b"
+ token_salt: "832f8529db6151a1c3c605dd7570b58f"
+ last_online: 0
+ last_active: 0
+ agent_labels: '["woop", "doop"]'
+ created: 1716104432
+ updated: 1716104432
+ deleted: ~
diff --git a/models/fixtures/action_runner_token.yml b/models/fixtures/action_runner_token.yml
new file mode 100644
index 0000000..6520b7f
--- /dev/null
+++ b/models/fixtures/action_runner_token.yml
@@ -0,0 +1,35 @@
+-
+ id: 1 # instance scope
+ token: xeiWBL5kuTYxGPynHCqQdoeYmJAeG3IzGXCYTrDX
+ owner_id: 0
+ repo_id: 0
+ is_active: 1
+ created: 1695617748
+ updated: 1695617748
+
+-
+ id: 2 # user scope and can't be used
+ token: vohJB9QcZuSv1gAXESTk2uqpSjHhsKT9j4zYF84x
+ owner_id: 1
+ repo_id: 0
+ is_active: 0
+ created: 1695617749
+ updated: 1695617749
+
+-
+ id: 3 # user scope and can be used
+ token: gjItAeJ3CA74hNPmPPo0Zco8I1eMaNcP1jVifjOE
+ owner_id: 1
+ repo_id: 0
+ is_active: 1
+ created: 1695617750
+ updated: 1695617750
+
+-
+ id: 4 # repo scope
+ token: NOjLubxzFxPGhPXflZknys0gjVvQNhomFbAYuhbH
+ owner_id: 0
+ repo_id: 1
+ is_active: 1
+ created: 1695617751
+ updated: 1695617751
diff --git a/models/fixtures/action_task.yml b/models/fixtures/action_task.yml
new file mode 100644
index 0000000..d88a8ed
--- /dev/null
+++ b/models/fixtures/action_task.yml
@@ -0,0 +1,59 @@
+-
+ id: 46
+ attempt: 3
+ runner_id: 1
+ status: 3 # 3 is the status code for "cancelled"
+ started: 1683636528
+ stopped: 1683636626
+ repo_id: 4
+ owner_id: 1
+ commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
+ is_fork_pull_request: 0
+ token_hash: 6d8ef48297195edcc8e22c70b3020eaa06c52976db67d39b4260c64a69a2cc1508825121b7b8394e48e00b1bf8718b2aaaaa
+ token_salt: eeeeeeee
+ token_last_eight: eeeeeeee
+ log_filename: artifact-test2/2f/47.log
+ log_in_storage: 1
+ log_length: 707
+ log_size: 90179
+ log_expired: 0
+-
+ id: 47
+ job_id: 192
+ attempt: 3
+ runner_id: 1
+ status: 6 # 6 is the status code for "running", running task can upload artifacts
+ started: 1683636528
+ stopped: 1683636626
+ repo_id: 4
+ owner_id: 1
+ commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
+ is_fork_pull_request: 0
+ token_hash: 6d8ef48297195edcc8e22c70b3020eaa06c52976db67d39b4260c64a69a2cc1508825121b7b8394e48e00b1bf8718b2a867e
+ token_salt: jVuKnSPGgy
+ token_last_eight: eeb1a71a
+ log_filename: artifact-test2/2f/47.log
+ log_in_storage: 1
+ log_length: 707
+ log_size: 90179
+ log_expired: 0
+-
+ id: 48
+ job_id: 193
+ attempt: 1
+ runner_id: 1
+ status: 6 # 6 is the status code for "running", running task can upload artifacts
+ started: 1683636528
+ stopped: 1683636626
+ repo_id: 4
+ owner_id: 1
+ commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
+ is_fork_pull_request: 0
+ token_hash: ffffcfffffffbffffffffffffffffefffffffafffffffffffffffffffffffffffffdffffffffffffffffffffffffffffffff
+ token_salt: ffffffffff
+ token_last_eight: ffffffff
+ log_filename: artifact-test2/2f/47.log
+ log_in_storage: 1
+ log_length: 707
+ log_size: 90179
+ log_expired: 0
diff --git a/models/fixtures/attachment.yml b/models/fixtures/attachment.yml
new file mode 100644
index 0000000..7882d8b
--- /dev/null
+++ b/models/fixtures/attachment.yml
@@ -0,0 +1,155 @@
+-
+ id: 1
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11
+ repo_id: 1
+ issue_id: 1
+ release_id: 0
+ uploader_id: 0
+ comment_id: 0
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 2
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12
+ repo_id: 2
+ issue_id: 4
+ release_id: 0
+ uploader_id: 0
+ comment_id: 0
+ name: attach2
+ download_count: 1
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 3
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a13
+ repo_id: 1
+ issue_id: 2
+ release_id: 0
+ uploader_id: 0
+ comment_id: 1
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 4
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14
+ repo_id: 1
+ issue_id: 3
+ release_id: 0
+ uploader_id: 0
+ comment_id: 1
+ name: attach2
+ download_count: 1
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 5
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a15
+ repo_id: 2
+ issue_id: 4
+ release_id: 0
+ uploader_id: 0
+ comment_id: 0
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 6
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a16
+ repo_id: 1
+ issue_id: 5
+ release_id: 0
+ uploader_id: 0
+ comment_id: 2
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 7
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17
+ repo_id: 1
+ issue_id: 5
+ release_id: 0
+ uploader_id: 0
+ comment_id: 2
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 8
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18
+ repo_id: 3
+ issue_id: 6
+ release_id: 0
+ uploader_id: 0
+ comment_id: 0
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 9
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a19
+ repo_id: 1
+ issue_id: 0
+ release_id: 1
+ uploader_id: 0
+ comment_id: 0
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 10
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a20
+ repo_id: 0 # TestGetAttachment/NotLinked
+ issue_id: 0
+ release_id: 0
+ uploader_id: 8
+ comment_id: 0
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 11
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a21
+ repo_id: 40
+ issue_id: 0
+ release_id: 2
+ uploader_id: 0
+ comment_id: 0
+ name: attach1
+ download_count: 0
+ size: 0
+ created_unix: 946684800
+
+-
+ id: 12
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a22
+ repo_id: 2
+ issue_id: 0
+ release_id: 11
+ uploader_id: 2
+ comment_id: 0
+ name: README.md
+ download_count: 0
+ size: 0
+ created_unix: 946684800
diff --git a/models/fixtures/branch.yml b/models/fixtures/branch.yml
new file mode 100644
index 0000000..9300304
--- /dev/null
+++ b/models/fixtures/branch.yml
@@ -0,0 +1,47 @@
+-
+ id: 1
+ repo_id: 1
+ name: 'foo'
+ commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'
+ commit_message: 'first commit'
+ commit_time: 978307100
+ pusher_id: 1
+ is_deleted: true
+ deleted_by_id: 1
+ deleted_unix: 978307200
+
+-
+ id: 2
+ repo_id: 1
+ name: 'bar'
+ commit_id: '62fb502a7172d4453f0322a2cc85bddffa57f07a'
+ commit_message: 'second commit'
+ commit_time: 978307100
+ pusher_id: 1
+ is_deleted: true
+ deleted_by_id: 99
+ deleted_unix: 978307200
+
+-
+ id: 3
+ repo_id: 1
+ name: 'branch2'
+ commit_id: '985f0301dba5e7b34be866819cd15ad3d8f508ee'
+ commit_message: 'make pull5 outdated'
+ commit_time: 1579166279
+ pusher_id: 1
+ is_deleted: false
+ deleted_by_id: 0
+ deleted_unix: 0
+
+-
+ id: 4
+ repo_id: 1
+ name: 'master'
+ commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'
+ commit_message: 'Initial commit'
+ commit_time: 1489927679
+ pusher_id: 1
+ is_deleted: false
+ deleted_by_id: 0
+ deleted_unix: 0
diff --git a/models/fixtures/collaboration.yml b/models/fixtures/collaboration.yml
new file mode 100644
index 0000000..7603bda
--- /dev/null
+++ b/models/fixtures/collaboration.yml
@@ -0,0 +1,53 @@
+-
+ id: 1
+ repo_id: 3
+ user_id: 2
+ mode: 2 # write
+
+-
+ id: 2
+ repo_id: 4
+ user_id: 4
+ mode: 2 # write
+
+-
+ id: 3
+ repo_id: 40
+ user_id: 4
+ mode: 2 # write
+
+-
+ id: 4
+ repo_id: 4
+ user_id: 29
+ mode: 2 # write
+
+-
+ id: 5
+ repo_id: 21
+ user_id: 15
+ mode: 2 # write
+
+-
+ id: 6
+ repo_id: 21
+ user_id: 18
+ mode: 2 # write
+
+-
+ id: 7
+ repo_id: 22
+ user_id: 15
+ mode: 2 # write
+
+-
+ id: 8
+ repo_id: 22
+ user_id: 18
+ mode: 2 # write
+
+-
+ id: 9
+ repo_id: 60
+ user_id: 38
+ mode: 2 # write
diff --git a/models/fixtures/comment.yml b/models/fixtures/comment.yml
new file mode 100644
index 0000000..f412128
--- /dev/null
+++ b/models/fixtures/comment.yml
@@ -0,0 +1,115 @@
+-
+ id: 1
+ type: 7 # label
+ poster_id: 2
+ issue_id: 1 # in repo_id 1
+ label_id: 1
+ content: "1"
+ created_unix: 946684810
+-
+ id: 2
+ type: 0 # comment
+ poster_id: 3 # user not watching (see watch.yml)
+ issue_id: 1 # in repo_id 1
+ content: "good work!"
+ created_unix: 946684811
+ updated_unix: 946684811
+ content_version: 1
+-
+ id: 3
+ type: 0 # comment
+ poster_id: 5 # user not watching (see watch.yml)
+ issue_id: 1 # in repo_id 1
+ content: "meh..."
+ created_unix: 946684812
+ updated_unix: 946684812
+-
+ id: 4
+ type: 21 # code comment
+ poster_id: 1
+ issue_id: 2
+ content: "meh..."
+ review_id: 4
+ line: 4
+ tree_path: "README.md"
+ created_unix: 946684812
+ invalidated: false
+ content_version: 1
+-
+ id: 5
+ type: 21 # code comment
+ poster_id: 1
+ issue_id: 2
+ content: "meh..."
+ line: -4
+ tree_path: "README.md"
+ created_unix: 946684812
+ invalidated: false
+
+-
+ id: 6
+ type: 21 # code comment
+ poster_id: 1
+ issue_id: 2
+ content: "it's already invalidated. boring..."
+ line: -4
+ tree_path: "README.md"
+ created_unix: 946684812
+ invalidated: true
+
+-
+ id: 7
+ type: 21 # code comment
+ poster_id: 100
+ issue_id: 3
+ content: "a review from a deleted user"
+ line: -4
+ review_id: 10
+ tree_path: "README.md"
+ created_unix: 946684812
+ invalidated: true
+
+-
+ id: 8
+ type: 0 # comment
+ poster_id: 2
+ issue_id: 4 # in repo_id 2
+ content: "comment in private pository"
+ created_unix: 946684811
+ updated_unix: 946684811
+
+-
+ id: 9
+ type: 22 # review
+ poster_id: 2
+ issue_id: 2 # in repo_id 1
+ review_id: 20
+ created_unix: 946684810
+
+-
+ id: 10
+ type: 0
+ poster_id: 1
+ issue_id: 1 # in repo_id 1
+ content: "test markup light/dark-mode-only ![GitHub-Mark-Light](https://user-images.githubusercontent.com/3369400/139447912-e0f43f33-6d9f-45f8-be46-2df5bbc91289.png#gh-dark-mode-only)![GitHub-Mark-Dark](https://user-images.githubusercontent.com/3369400/139448065-39a229ba-4b06-434b-bc67-616e2ed80c8f.png#gh-light-mode-only)"
+ created_unix: 946684813
+ updated_unix: 946684813
+
+-
+ id: 11
+ type: 22 # review
+ poster_id: 5
+ issue_id: 3 # in repo_id 1
+ content: "reviewed by user5"
+ review_id: 21
+ created_unix: 946684816
+
+-
+ id: 12
+ type: 27 # review request
+ poster_id: 2
+ issue_id: 3 # in repo_id 1
+ content: "review request for user5"
+ review_id: 22
+ assignee_id: 5
+ created_unix: 946684817
diff --git a/models/fixtures/commit_status.yml b/models/fixtures/commit_status.yml
new file mode 100644
index 0000000..0ba6caa
--- /dev/null
+++ b/models/fixtures/commit_status.yml
@@ -0,0 +1,65 @@
+-
+ id: 1
+ index: 1
+ repo_id: 1
+ state: "pending"
+ sha: "1234123412341234123412341234123412341234"
+ target_url: https://example.com/builds/
+ description: My awesome CI-service
+ context: ci/awesomeness
+ creator_id: 2
+
+-
+ id: 2
+ index: 2
+ repo_id: 1
+ state: "warning"
+ sha: "1234123412341234123412341234123412341234"
+ target_url: https://example.com/coverage/
+ description: My awesome Coverage service
+ context: cov/awesomeness
+ creator_id: 2
+
+-
+ id: 3
+ index: 3
+ repo_id: 1
+ state: "success"
+ sha: "1234123412341234123412341234123412341234"
+ target_url: https://example.com/coverage/
+ description: My awesome Coverage service
+ context: cov/awesomeness
+ creator_id: 2
+
+-
+ id: 4
+ index: 4
+ repo_id: 1
+ state: "failure"
+ sha: "1234123412341234123412341234123412341234"
+ target_url: https://example.com/builds/
+ description: My awesome CI-service
+ context: ci/awesomeness
+ creator_id: 2
+
+-
+ id: 5
+ index: 5
+ repo_id: 1
+ state: "error"
+ sha: "1234123412341234123412341234123412341234"
+ target_url: https://example.com/builds/
+ description: My awesome deploy service
+ context: deploy/awesomeness
+ creator_id: 2
+
+-
+ id: 6
+ index: 6
+ repo_id: 62
+ state: "failure"
+ sha: "774f93df12d14931ea93259ae93418da4482fcc1"
+ target_url: "/user2/test_workflows/actions"
+ description: My awesome deploy service
+ context: deploy/awesomeness
+ creator_id: 2
diff --git a/models/fixtures/commit_status_index.yml b/models/fixtures/commit_status_index.yml
new file mode 100644
index 0000000..f63343b
--- /dev/null
+++ b/models/fixtures/commit_status_index.yml
@@ -0,0 +1,5 @@
+-
+ id: 1
+ repo_id: 1
+ sha: "1234123412341234123412341234123412341234"
+ max_index: 5
diff --git a/models/fixtures/deploy_key.yml b/models/fixtures/deploy_key.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/deploy_key.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/email_address.yml b/models/fixtures/email_address.yml
new file mode 100644
index 0000000..b2a0432
--- /dev/null
+++ b/models/fixtures/email_address.yml
@@ -0,0 +1,319 @@
+-
+ id: 1
+ uid: 11
+ email: user11@example.com
+ lower_email: user11@example.com
+ is_activated: false
+ is_primary: true
+
+-
+ id: 2
+ uid: 12
+ email: user12@example.com
+ lower_email: user12@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 3
+ uid: 2
+ email: user2@example.com
+ lower_email: user2@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 4
+ uid: 21
+ email: user21@example.com
+ lower_email: user21@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 5
+ uid: 9999999
+ email: user9999999@example.com
+ lower_email: user9999999@example.com
+ is_activated: true
+ is_primary: false
+
+-
+ id: 6
+ uid: 10
+ email: user10@example.com
+ lower_email: user10@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 7
+ uid: 10
+ email: user101@example.com
+ lower_email: user101@example.com
+ is_activated: true
+ is_primary: false
+
+-
+ id: 8
+ uid: 9
+ email: user9@example.com
+ lower_email: user9@example.com
+ is_activated: false
+ is_primary: true
+
+-
+ id: 9
+ uid: 1
+ email: user1@example.com
+ lower_email: user1@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 10
+ uid: 3
+ email: org3@example.com
+ lower_email: org3@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 11
+ uid: 4
+ email: user4@example.com
+ lower_email: user4@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 12
+ uid: 5
+ email: user5@example.com
+ lower_email: user5@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 13
+ uid: 6
+ email: org6@example.com
+ lower_email: org6@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 14
+ uid: 7
+ email: org7@example.com
+ lower_email: org7@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 15
+ uid: 8
+ email: user8@example.com
+ lower_email: user8@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 16
+ uid: 13
+ email: user13@example.com
+ lower_email: user13@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 17
+ uid: 14
+ email: user14@example.com
+ lower_email: user14@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 18
+ uid: 15
+ email: user15@example.com
+ lower_email: user15@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 19
+ uid: 16
+ email: user16@example.com
+ lower_email: user16@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 20
+ uid: 17
+ email: org17@example.com
+ lower_email: org17@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 21
+ uid: 18
+ email: user18@example.com
+ lower_email: user18@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 22
+ uid: 19
+ email: org19@example.com
+ lower_email: org19@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 23
+ uid: 20
+ email: user20@example.com
+ lower_email: user20@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 24
+ uid: 22
+ email: limited_org@example.com
+ lower_email: limited_org@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 25
+ uid: 23
+ email: privated_org@example.com
+ lower_email: privated_org@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 26
+ uid: 24
+ email: user24@example.com
+ lower_email: user24@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 27
+ uid: 25
+ email: org25@example.com
+ lower_email: org25@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 28
+ uid: 26
+ email: org26@example.com
+ lower_email: org26@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 29
+ uid: 27
+ email: user27@example.com
+ lower_email: user27@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 30
+ uid: 28
+ email: user28@example.com
+ lower_email: user28@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 31
+ uid: 29
+ email: user29@example.com
+ lower_email: user29@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 32
+ uid: 30
+ email: user30@example.com
+ lower_email: user30@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 33
+ uid: 1
+ email: user1-2@example.com
+ lower_email: user1-2@example.com
+ is_activated: true
+ is_primary: false
+
+-
+ id: 34
+ uid: 1
+ email: user1-3@example.com
+ lower_email: user1-3@example.com
+ is_activated: true
+ is_primary: false
+
+-
+ id: 35
+ uid: 2
+ email: user2-2@example.com
+ lower_email: user2-2@example.com
+ is_activated: false
+ is_primary: false
+
+-
+ id: 36
+ uid: 36
+ email: abcde@gitea.com
+ lower_email: abcde@gitea.com
+ is_activated: true
+ is_primary: false
+
+-
+ id: 37
+ uid: 37
+ email: user37@example.com
+ lower_email: user37@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 38
+ uid: 38
+ email: user38@example.com
+ lower_email: user38@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 39
+ uid: 39
+ email: user39@example.com
+ lower_email: user39@example.com
+ is_activated: true
+ is_primary: true
+
+-
+ id: 40
+ uid: 40
+ email: user40@example.com
+ lower_email: user40@example.com
+ is_activated: true
+ is_primary: true
diff --git a/models/fixtures/external_login_user.yml b/models/fixtures/external_login_user.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/external_login_user.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/follow.yml b/models/fixtures/follow.yml
new file mode 100644
index 0000000..b8d3582
--- /dev/null
+++ b/models/fixtures/follow.yml
@@ -0,0 +1,19 @@
+-
+ id: 1
+ user_id: 4
+ follow_id: 2
+
+-
+ id: 2
+ user_id: 8
+ follow_id: 2
+
+-
+ id: 3
+ user_id: 2
+ follow_id: 8
+
+-
+ id: 4
+ user_id: 31
+ follow_id: 33
diff --git a/models/fixtures/forgejo_blocked_user.yml b/models/fixtures/forgejo_blocked_user.yml
new file mode 100644
index 0000000..88c378a
--- /dev/null
+++ b/models/fixtures/forgejo_blocked_user.yml
@@ -0,0 +1,5 @@
+-
+ id: 1
+ user_id: 4
+ block_id: 1
+ created_unix: 1671607299
diff --git a/models/fixtures/gpg_key.yml b/models/fixtures/gpg_key.yml
new file mode 100644
index 0000000..2d54313
--- /dev/null
+++ b/models/fixtures/gpg_key.yml
@@ -0,0 +1,23 @@
+-
+ id: 5
+ owner_id: 36
+ key_id: B15431642629B826
+ primary_key_id:
+ content: xsDNBGTrY3UBDAC2HLBqmMplAV15qSnC7g1c4dV406f5EHNhFr95Nup2My6b2eafTlvedv77s8PT/I7F3fy4apOZs5A7w2SsPlLMcQ3ev4uGOsxRtkq5RLy1Yb6SNueX0Da2UVKR5KTC5Q6BWaqxwS0IjKOLZ/xz0Pbe/ClV3bZSKBEY2omkVo3Z0HZ771vB2clPRvGJ/IdeKOsZ3ZytSFXfyiJBdARmeSPmydXLil8+Ibq5iLAeow5PK8hK1TCOnKHzLWNqcNq70tyjoHvcGi70iGjoVEEUgPCLLuU8WmzTJwlvA3BuDzjtaO7TLo/jdE6iqkHtMSS8x+43sAH6hcFRCWAVh/0Uq7n36uGDfNxGnX3YrmX3LR9x5IsBES1rGGWbpxio4o5GIf/Xd+JgDd9rzJCqRuZ3/sW/TxK38htWaVNZV0kMkHUCTc1ctzWpCm635hbFCHBhPYIp+/z206khkAKDbz/CNuU91Wazsh7KO07wrwDtxfDDbInJ8TfHE2TGjzjQzgChfmcAEQEAAQ==
+ verified: true
+ can_sign: true
+ can_encrypt_comms: true
+ can_encrypt_storage: true
+ can_certify: true
+
+-
+ id: 6
+ owner_id: 36
+ key_id: EE3AF48454AFD619
+ primary_key_id: B15431642629B826
+ content: zsDNBGTrY3UBDADsHrzuOicQaPdUQm0+0UNrs92cESm/j/4yBBUk+sfLZAo6J99c4eh4nAQzzZ7al080rYKB0G+7xoRz1eHcQH6zrVcqB8KYtf/sdY47WaMiMyxM+kTSvzp7tsv7QuSQZ0neUEXRyYMz5ttBfIjWUd+3NDItuHyB+MtNWlS3zXgaUbe5VifqKaNmzN0Ye4yXTKcpypE3AOqPVz+iIFv3c6TmsqLHJaR4VoicCleAqLyF/28WsJO7M9dDW+EM3MZVnsVpycTURyHAJGfSk10waQZAaRwmarCN/q0KEJ+aEAK/SRliUneBZoMO5hY5iBeG432tofwaQqAahPv9uXIb1n2JEMKwnMlMA9UGD1AcDbywfj1m/ZGBBw95i4Ekkfn43RvV3THr7uJU/dRqqP+iic4MwpUrOxqELW/kmeHXlBcNbZZhEEvwRoW7U2/9eeuog4nRleRJ0pi/xOP9wmxkKjaIPIK3phdBtEpVk4w/UTAWNdyIIrFggukeAnZFyGJwlm8AEQEAAQ==
+ verified: true
+ can_sign: true
+ can_encrypt_comms: true
+ can_encrypt_storage: true
+ can_certify: true
diff --git a/models/fixtures/gpg_key_import.yml b/models/fixtures/gpg_key_import.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/gpg_key_import.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/hook_task.yml b/models/fixtures/hook_task.yml
new file mode 100644
index 0000000..fc0e03b
--- /dev/null
+++ b/models/fixtures/hook_task.yml
@@ -0,0 +1,43 @@
+-
+ id: 1
+ hook_id: 1
+ uuid: uuid1
+ is_delivered: true
+ is_succeed: false
+ request_content: >
+ {
+ "url": "/matrix-delivered",
+ "http_method":"PUT",
+ "headers": {
+ "X-Head": "42"
+ },
+ "body": "{}"
+ }
+
+-
+ id: 2
+ hook_id: 1
+ uuid: uuid2
+ is_delivered: false
+
+-
+ id: 3
+ hook_id: 1
+ uuid: uuid3
+ is_delivered: true
+ is_succeed: true
+ payload_content: '{"key":"value"}' # legacy task, payload saved in payload_content (and not in request_content)
+ request_content: >
+ {
+ "url": "/matrix-success",
+ "http_method":"PUT",
+ "headers": {
+ "X-Head": "42"
+ }
+ }
+
+-
+ id: 4
+ hook_id: 3
+ uuid: uuid4
+ is_delivered: false
diff --git a/models/fixtures/issue.yml b/models/fixtures/issue.yml
new file mode 100644
index 0000000..adb407f
--- /dev/null
+++ b/models/fixtures/issue.yml
@@ -0,0 +1,374 @@
+-
+ id: 1
+ repo_id: 1
+ index: 1
+ poster_id: 1
+ original_author_id: 0
+ name: issue1
+ content: content for the first issue
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 3
+ created_unix: 946684800
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 2
+ repo_id: 1
+ index: 2
+ poster_id: 1
+ original_author_id: 0
+ name: issue2
+ content: content for the second issue
+ milestone_id: 1
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 946684810
+ updated_unix: 978307190
+ is_locked: false
+
+-
+ id: 3
+ repo_id: 1
+ index: 3
+ poster_id: 1
+ original_author_id: 0
+ name: issue3
+ content: content for the third issue
+ milestone_id: 3
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 946684820
+ updated_unix: 978307180
+ is_locked: false
+
+-
+ id: 4
+ repo_id: 2
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue4
+ content: content for the fourth issue
+ milestone_id: 0
+ priority: 0
+ is_closed: true
+ is_pull: false
+ num_comments: 1
+ created_unix: 946684830
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 5
+ repo_id: 1
+ index: 4
+ poster_id: 2
+ original_author_id: 0
+ name: issue5
+ content: content for the fifth issue
+ milestone_id: 0
+ priority: 0
+ is_closed: true
+ is_pull: false
+ num_comments: 0
+ created_unix: 946684840
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 6
+ repo_id: 3
+ index: 1
+ poster_id: 1
+ original_author_id: 0
+ name: issue6
+ content: content6
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 946684850
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 7
+ repo_id: 2
+ index: 2
+ poster_id: 2
+ original_author_id: 0
+ name: issue7
+ content: content for the seventh issue
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 946684830
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 8
+ repo_id: 10
+ index: 1
+ poster_id: 11
+ original_author_id: 0
+ name: pr2
+ content: a pull request
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 946684820
+ updated_unix: 978307180
+ is_locked: false
+
+-
+ id: 9
+ repo_id: 48
+ index: 1
+ poster_id: 11
+ original_author_id: 0
+ name: pr1
+ content: a pull request
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 946684820
+ updated_unix: 978307180
+ is_locked: false
+
+-
+ id: 10
+ repo_id: 42
+ index: 1
+ poster_id: 500
+ original_author_id: 0
+ name: issue from deleted account
+ content: content from deleted account
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ deadline_unix: 1019307200
+ created_unix: 946684830
+ updated_unix: 999307200
+ is_locked: false
+
+-
+ id: 11
+ repo_id: 1
+ index: 5
+ poster_id: 1
+ original_author_id: 0
+ name: pull5
+ content: content for the a pull request
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 1579194806
+ updated_unix: 1579194806
+ is_locked: false
+
+-
+ id: 12
+ repo_id: 3
+ index: 2
+ poster_id: 2
+ original_author_id: 0
+ name: pull6
+ content: content for the a pull request
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 1602935696
+ updated_unix: 1602935696
+ is_locked: false
+
+-
+ id: 13
+ repo_id: 50
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue in active repo
+ content: we'll be testing github issue 13171 with this.
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 1602935696
+ updated_unix: 1602935696
+ is_locked: false
+
+-
+ id: 14
+ repo_id: 51
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue in archived repo
+ content: we'll be testing github issue 13171 with this.
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 1602935696
+ updated_unix: 1602935696
+ is_locked: false
+
+-
+ id: 15
+ repo_id: 5
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue in repo not linked to team1
+ content: content
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 1602935696
+ updated_unix: 1602935696
+ is_locked: false
+
+-
+ id: 16
+ repo_id: 32
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: just a normal issue
+ content: content
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 1602935696
+ updated_unix: 1602935696
+ is_locked: false
+
+-
+ id: 17
+ repo_id: 32
+ index: 2
+ poster_id: 15
+ original_author_id: 0
+ name: a issue with a assignment
+ content: content
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 1602935696
+ updated_unix: 1602935696
+ is_locked: false
+
+-
+ id: 18
+ repo_id: 55
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue for scoped labels
+ content: content
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: false
+ num_comments: 0
+ created_unix: 946684830
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 19
+ repo_id: 58
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue for pr
+ content: content
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 946684830
+ updated_unix: 978307200
+ is_locked: false
+
+-
+ id: 20
+ repo_id: 23
+ index: 1
+ poster_id: 2
+ original_author_id: 0
+ name: issue for pr
+ content: content
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 978307210
+ updated_unix: 978307210
+ is_locked: false
+
+-
+ id: 21
+ repo_id: 60
+ index: 1
+ poster_id: 39
+ original_author_id: 0
+ name: repo60 pull1
+ content: content for the 1st issue
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 1707270422
+ updated_unix: 1707270422
+ is_locked: false
+
+-
+ id: 22
+ repo_id: 61
+ index: 1
+ poster_id: 40
+ original_author_id: 0
+ name: repo61 pull1
+ content: content for the 1st issue
+ milestone_id: 0
+ priority: 0
+ is_closed: false
+ is_pull: true
+ num_comments: 0
+ created_unix: 1707270422
+ updated_unix: 1707270422
+ is_locked: false
diff --git a/models/fixtures/issue_assignees.yml b/models/fixtures/issue_assignees.yml
new file mode 100644
index 0000000..e5d36f9
--- /dev/null
+++ b/models/fixtures/issue_assignees.yml
@@ -0,0 +1,16 @@
+-
+ id: 1
+ assignee_id: 1
+ issue_id: 1
+-
+ id: 2
+ assignee_id: 1
+ issue_id: 6
+-
+ id: 3
+ assignee_id: 2
+ issue_id: 6
+-
+ id: 4
+ assignee_id: 2
+ issue_id: 17
diff --git a/models/fixtures/issue_index.yml b/models/fixtures/issue_index.yml
new file mode 100644
index 0000000..de6e955
--- /dev/null
+++ b/models/fixtures/issue_index.yml
@@ -0,0 +1,27 @@
+-
+ group_id: 1
+ max_index: 5
+-
+ group_id: 2
+ max_index: 2
+-
+ group_id: 3
+ max_index: 2
+-
+ group_id: 10
+ max_index: 1
+-
+ group_id: 32
+ max_index: 2
+-
+ group_id: 48
+ max_index: 1
+-
+ group_id: 42
+ max_index: 1
+-
+ group_id: 50
+ max_index: 1
+-
+ group_id: 51
+ max_index: 1
diff --git a/models/fixtures/issue_label.yml b/models/fixtures/issue_label.yml
new file mode 100644
index 0000000..f4ecb1f
--- /dev/null
+++ b/models/fixtures/issue_label.yml
@@ -0,0 +1,19 @@
+-
+ id: 1
+ issue_id: 1
+ label_id: 1
+
+-
+ id: 2
+ issue_id: 5
+ label_id: 2
+
+-
+ id: 3
+ issue_id: 2
+ label_id: 1
+
+-
+ id: 4
+ issue_id: 2
+ label_id: 4
diff --git a/models/fixtures/issue_user.yml b/models/fixtures/issue_user.yml
new file mode 100644
index 0000000..6482431
--- /dev/null
+++ b/models/fixtures/issue_user.yml
@@ -0,0 +1,20 @@
+-
+ id: 1
+ uid: 1
+ issue_id: 1
+ is_read: true
+ is_mentioned: false
+
+-
+ id: 2
+ uid: 2
+ issue_id: 1
+ is_read: true
+ is_mentioned: false
+
+-
+ id: 3
+ uid: 4
+ issue_id: 1
+ is_read: false
+ is_mentioned: true
diff --git a/models/fixtures/issue_watch.yml b/models/fixtures/issue_watch.yml
new file mode 100644
index 0000000..4bc3ff1
--- /dev/null
+++ b/models/fixtures/issue_watch.yml
@@ -0,0 +1,31 @@
+-
+ id: 1
+ user_id: 9
+ issue_id: 1
+ is_watching: true
+ created_unix: 946684800
+ updated_unix: 946684800
+
+-
+ id: 2
+ user_id: 2
+ issue_id: 2
+ is_watching: false
+ created_unix: 946684800
+ updated_unix: 946684800
+
+-
+ id: 3
+ user_id: 2
+ issue_id: 7
+ is_watching: true
+ created_unix: 946684800
+ updated_unix: 946684800
+
+-
+ id: 4
+ user_id: 1
+ issue_id: 7
+ is_watching: false
+ created_unix: 946684800
+ updated_unix: 946684800
diff --git a/models/fixtures/label.yml b/models/fixtures/label.yml
new file mode 100644
index 0000000..2242b90
--- /dev/null
+++ b/models/fixtures/label.yml
@@ -0,0 +1,98 @@
+-
+ id: 1
+ repo_id: 1
+ org_id: 0
+ name: label1
+ color: '#abcdef'
+ exclusive: false
+ num_issues: 2
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 2
+ repo_id: 1
+ org_id: 0
+ name: label2
+ color: '#000000'
+ exclusive: false
+ num_issues: 1
+ num_closed_issues: 1
+ archived_unix: 0
+
+-
+ id: 3
+ repo_id: 0
+ org_id: 3
+ name: orglabel3
+ color: '#abcdef'
+ exclusive: false
+ num_issues: 0
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 4
+ repo_id: 0
+ org_id: 3
+ name: orglabel4
+ color: '#000000'
+ exclusive: false
+ num_issues: 1
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 5
+ repo_id: 10
+ org_id: 0
+ name: pull-test-label
+ color: '#000000'
+ exclusive: false
+ num_issues: 0
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 6
+ repo_id: 55
+ org_id: 0
+ name: unscoped_label
+ color: '#000000'
+ exclusive: false
+ num_issues: 0
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 7
+ repo_id: 55
+ org_id: 0
+ name: scope/label1
+ color: '#000000'
+ exclusive: true
+ num_issues: 0
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 8
+ repo_id: 55
+ org_id: 0
+ name: scope/label2
+ color: '#000000'
+ exclusive: true
+ num_issues: 0
+ num_closed_issues: 0
+ archived_unix: 0
+
+-
+ id: 9
+ repo_id: 55
+ org_id: 0
+ name: scope/subscope/label2
+ color: '#000000'
+ exclusive: true
+ num_issues: 0
+ num_closed_issues: 0
+ archived_unix: 0
diff --git a/models/fixtures/lfs_meta_object.yml b/models/fixtures/lfs_meta_object.yml
new file mode 100644
index 0000000..cef4824
--- /dev/null
+++ b/models/fixtures/lfs_meta_object.yml
@@ -0,0 +1,32 @@
+# These are the LFS objects in user2/lfs.git
+-
+
+ id: 1
+ oid: 0b8d8b5f15046343fd32f451df93acc2bdd9e6373be478b968e4cad6b6647351
+ size: 107
+ repository_id: 54
+ created_unix: 1671607299
+
+-
+
+ id: 2 # this is an LFS orphan object
+ oid: 2eccdb43825d2a49d99d542daa20075cff1d97d9d2349a8977efe9c03661737c
+ size: 107
+ repository_id: 54
+ created_unix: 1671607299
+
+-
+
+ id: 3
+ oid: 7b6b2c88dba9f760a1a58469b67fee2b698ef7e9399c4ca4f34a14ccbe39f623
+ size: 27
+ repository_id: 54
+ created_unix: 1671607299
+
+-
+
+ id: 4
+ oid: 9d172e5c64b4f0024b9901ec6afe9ea052f3c9b6ff9f4b07956d8c48c86fca82
+ size: 25
+ repository_id: 54
+ created_unix: 1671607299
diff --git a/models/fixtures/login_source.yml b/models/fixtures/login_source.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/login_source.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/milestone.yml b/models/fixtures/milestone.yml
new file mode 100644
index 0000000..87c30cc
--- /dev/null
+++ b/models/fixtures/milestone.yml
@@ -0,0 +1,54 @@
+-
+ id: 1
+ repo_id: 1
+ name: milestone1
+ content: content1
+ is_closed: false
+ num_issues: 1
+ num_closed_issues: 0
+ completeness: 0
+ deadline_unix: 253370764800
+
+-
+ id: 2
+ repo_id: 1
+ name: milestone2
+ content: content2
+ is_closed: false
+ num_issues: 0
+ num_closed_issues: 0
+ completeness: 0
+ deadline_unix: 253370764800
+
+-
+ id: 3
+ repo_id: 1
+ name: milestone3
+ content: content3
+ is_closed: true
+ num_issues: 1
+ num_closed_issues: 0
+ completeness: 0
+ deadline_unix: 253370764800
+
+-
+ id: 4
+ repo_id: 42
+ name: milestone of repo42
+ content: content random
+ is_closed: false
+ num_issues: 0
+ num_closed_issues: 0
+ completeness: 0
+ deadline_unix: 253370764800
+
+-
+ id: 5
+ repo_id: 10
+ name: milestone of repo 10
+ content: for testing with PRs
+ is_closed: false
+ num_issues: 0
+ num_closed_issues: 0
+ completeness: 0
+ deadline_unix: 253370764800
diff --git a/models/fixtures/mirror.yml b/models/fixtures/mirror.yml
new file mode 100644
index 0000000..97bc4ae
--- /dev/null
+++ b/models/fixtures/mirror.yml
@@ -0,0 +1,49 @@
+-
+ id: 1
+ repo_id: 5
+ interval: 3600
+ enable_prune: false
+ updated_unix: 0
+ next_update_unix: 0
+ lfs_enabled: false
+ lfs_endpoint: ""
+
+-
+ id: 2
+ repo_id: 25
+ interval: 3600
+ enable_prune: false
+ updated_unix: 0
+ next_update_unix: 0
+ lfs_enabled: false
+ lfs_endpoint: ""
+
+-
+ id: 3
+ repo_id: 26
+ interval: 3600
+ enable_prune: false
+ updated_unix: 0
+ next_update_unix: 0
+ lfs_enabled: false
+ lfs_endpoint: ""
+
+-
+ id: 4
+ repo_id: 27
+ interval: 3600
+ enable_prune: false
+ updated_unix: 0
+ next_update_unix: 0
+ lfs_enabled: false
+ lfs_endpoint: ""
+
+-
+ id: 5
+ repo_id: 28
+ interval: 3600
+ enable_prune: false
+ updated_unix: 0
+ next_update_unix: 0
+ lfs_enabled: false
+ lfs_endpoint: ""
diff --git a/models/fixtures/notice.yml b/models/fixtures/notice.yml
new file mode 100644
index 0000000..af08f07
--- /dev/null
+++ b/models/fixtures/notice.yml
@@ -0,0 +1,14 @@
+-
+ id: 1
+ type: 1 # NoticeRepository
+ description: description1
+
+-
+ id: 2
+ type: 1 # NoticeRepository
+ description: description2
+
+-
+ id: 3
+ type: 1 # NoticeRepository
+ description: description3
diff --git a/models/fixtures/notification.yml b/models/fixtures/notification.yml
new file mode 100644
index 0000000..bd279d4
--- /dev/null
+++ b/models/fixtures/notification.yml
@@ -0,0 +1,54 @@
+-
+ id: 1
+ user_id: 1
+ repo_id: 1
+ status: 1 # unread
+ source: 1 # issue
+ updated_by: 2
+ issue_id: 1
+ created_unix: 946684800
+ updated_unix: 946684820
+
+-
+ id: 2
+ user_id: 2
+ repo_id: 1
+ status: 2 # read
+ source: 1 # issue
+ updated_by: 1
+ issue_id: 2
+ created_unix: 946685800
+ updated_unix: 946685820
+
+-
+ id: 3
+ user_id: 2
+ repo_id: 1
+ status: 3 # pinned
+ source: 1 # issue
+ updated_by: 1
+ issue_id: 3
+ created_unix: 946686800
+ updated_unix: 946686800
+
+-
+ id: 4
+ user_id: 2
+ repo_id: 1
+ status: 1 # unread
+ source: 1 # issue
+ updated_by: 1
+ issue_id: 5
+ created_unix: 946687800
+ updated_unix: 946687800
+
+-
+ id: 5
+ user_id: 2
+ repo_id: 2
+ status: 1 # unread
+ source: 1 # issue
+ updated_by: 5
+ issue_id: 4
+ created_unix: 946688800
+ updated_unix: 946688820
diff --git a/models/fixtures/oauth2_application.yml b/models/fixtures/oauth2_application.yml
new file mode 100644
index 0000000..beae913
--- /dev/null
+++ b/models/fixtures/oauth2_application.yml
@@ -0,0 +1,20 @@
+-
+ id: 1
+ uid: 1
+ name: "Test"
+ client_id: "da7da3ba-9a13-4167-856f-3899de0b0138"
+ client_secret: "$2a$10$UYRgUSgekzBp6hYe8pAdc.cgB4Gn06QRKsORUnIYTYQADs.YR/uvi" # bcrypt of "4MK8Na6R55smdCY0WuCCumZ6hjRPnGY5saWVRHHjJiA=
+ redirect_uris: '["a", "https://example.com/xyzzy"]'
+ created_unix: 1546869730
+ updated_unix: 1546869730
+ confidential_client: true
+-
+ id: 2
+ uid: 2
+ name: "Test native app"
+ client_id: "ce5a1322-42a7-11ed-b878-0242ac120002"
+ client_secret: "$2a$10$UYRgUSgekzBp6hYe8pAdc.cgB4Gn06QRKsORUnIYTYQADs.YR/uvi" # bcrypt of "4MK8Na6R55smdCY0WuCCumZ6hjRPnGY5saWVRHHjJiA=
+ redirect_uris: '["b", "http://127.0.0.1"]'
+ created_unix: 1546869730
+ updated_unix: 1546869730
+ confidential_client: false
diff --git a/models/fixtures/oauth2_authorization_code.yml b/models/fixtures/oauth2_authorization_code.yml
new file mode 100644
index 0000000..d295021
--- /dev/null
+++ b/models/fixtures/oauth2_authorization_code.yml
@@ -0,0 +1,15 @@
+- id: 1
+ grant_id: 1
+ code: "authcode"
+ code_challenge: "CjvyTLSdR47G5zYenDA-eDWW4lRrO8yvjcWwbD_deOg" # Code Verifier: N1Zo9-8Rfwhkt68r1r29ty8YwIraXR8eh_1Qwxg7yQXsonBt
+ code_challenge_method: "S256"
+ redirect_uri: "a"
+ valid_until: 3546869730
+
+- id: 2
+ grant_id: 4
+ code: "authcodepublic"
+ code_challenge: "CjvyTLSdR47G5zYenDA-eDWW4lRrO8yvjcWwbD_deOg" # Code Verifier: N1Zo9-8Rfwhkt68r1r29ty8YwIraXR8eh_1Qwxg7yQXsonBt
+ code_challenge_method: "S256"
+ redirect_uri: "http://127.0.0.1/"
+ valid_until: 3546869730
diff --git a/models/fixtures/oauth2_grant.yml b/models/fixtures/oauth2_grant.yml
new file mode 100644
index 0000000..e632868
--- /dev/null
+++ b/models/fixtures/oauth2_grant.yml
@@ -0,0 +1,31 @@
+- id: 1
+ user_id: 1
+ application_id: 1
+ counter: 1
+ scope: "openid profile"
+ created_unix: 1546869730
+ updated_unix: 1546869730
+
+- id: 2
+ user_id: 3
+ application_id: 1
+ counter: 1
+ scope: "openid"
+ created_unix: 1546869730
+ updated_unix: 1546869730
+
+- id: 3
+ user_id: 5
+ application_id: 1
+ counter: 1
+ scope: "openid profile email"
+ created_unix: 1546869730
+ updated_unix: 1546869730
+
+- id: 4
+ user_id: 99
+ application_id: 2
+ counter: 1
+ scope: "whatever"
+ created_unix: 1546869730
+ updated_unix: 1546869730
diff --git a/models/fixtures/org_user.yml b/models/fixtures/org_user.yml
new file mode 100644
index 0000000..a7fbcb2
--- /dev/null
+++ b/models/fixtures/org_user.yml
@@ -0,0 +1,119 @@
+-
+ id: 1
+ uid: 2
+ org_id: 3
+ is_public: true
+
+-
+ id: 2
+ uid: 4
+ org_id: 3
+ is_public: false
+
+-
+ id: 3
+ uid: 5
+ org_id: 6
+ is_public: true
+
+-
+ id: 4
+ uid: 5
+ org_id: 7
+ is_public: false
+
+-
+ id: 5
+ uid: 15
+ org_id: 17
+ is_public: true
+
+-
+ id: 6
+ uid: 18
+ org_id: 17
+ is_public: false
+
+-
+ id: 7
+ uid: 20
+ org_id: 19
+ is_public: true
+
+-
+ id: 8
+ uid: 24
+ org_id: 25
+ is_public: true
+
+-
+ id: 9
+ uid: 28
+ org_id: 3
+ is_public: true
+
+-
+ id: 10
+ uid: 28
+ org_id: 6
+ is_public: true
+
+-
+ id: 11
+ uid: 29
+ org_id: 17
+ is_public: true
+
+-
+ id: 12
+ uid: 2
+ org_id: 17
+ is_public: true
+
+-
+ id: 13
+ uid: 31
+ org_id: 19
+ is_public: true
+
+-
+ id: 14
+ uid: 5
+ org_id: 23
+ is_public: false
+
+-
+ id: 15
+ uid: 1
+ org_id: 35
+ is_public: true
+
+-
+ id: 16
+ uid: 1
+ org_id: 36
+ is_public: true
+
+-
+ id: 17
+ uid: 5
+ org_id: 36
+ is_public: true
+
+-
+ id: 18
+ uid: 38
+ org_id: 41
+ is_public: true
+
+-
+ id: 19
+ uid: 39
+ org_id: 41
+ is_public: true
+
+-
+ id: 20
+ uid: 40
+ org_id: 41
+ is_public: true
diff --git a/models/fixtures/project.yml b/models/fixtures/project.yml
new file mode 100644
index 0000000..44d87bc
--- /dev/null
+++ b/models/fixtures/project.yml
@@ -0,0 +1,71 @@
+-
+ id: 1
+ title: First project
+ owner_id: 0
+ repo_id: 1
+ is_closed: false
+ creator_id: 2
+ board_type: 1
+ type: 2
+ created_unix: 1688973030
+ updated_unix: 1688973030
+
+-
+ id: 2
+ title: second project
+ owner_id: 0
+ repo_id: 3
+ is_closed: false
+ creator_id: 3
+ board_type: 1
+ type: 2
+ created_unix: 1688973010
+ updated_unix: 1688973010
+
+-
+ id: 3
+ title: project on repo with disabled project
+ owner_id: 0
+ repo_id: 4
+ is_closed: true
+ creator_id: 5
+ board_type: 1
+ type: 2
+ created_unix: 1688973020
+ updated_unix: 1688973020
+
+-
+ id: 4
+ title: project on user2
+ owner_id: 2
+ repo_id: 0
+ is_closed: false
+ creator_id: 2
+ board_type: 1
+ type: 2
+ created_unix: 1688973000
+ updated_unix: 1688973000
+
+-
+ id: 5
+ title: project without default column
+ owner_id: 2
+ repo_id: 0
+ is_closed: false
+ creator_id: 2
+ board_type: 1
+ type: 2
+ created_unix: 1688973000
+ updated_unix: 1688973000
+
+-
+ id: 6
+ title: project with multiple default columns
+ owner_id: 2
+ repo_id: 0
+ is_closed: false
+ creator_id: 2
+ board_type: 1
+ type: 2
+ created_unix: 1688973000
+ updated_unix: 1688973000
diff --git a/models/fixtures/project_board.yml b/models/fixtures/project_board.yml
new file mode 100644
index 0000000..3293dea
--- /dev/null
+++ b/models/fixtures/project_board.yml
@@ -0,0 +1,77 @@
+-
+ id: 1
+ project_id: 1
+ title: To Do
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 2
+ project_id: 1
+ title: In Progress
+ creator_id: 2
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 3
+ project_id: 1
+ title: Done
+ creator_id: 2
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 4
+ project_id: 4
+ title: Done
+ creator_id: 2
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 5
+ project_id: 2
+ title: Backlog
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 6
+ project_id: 4
+ title: Backlog
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 7
+ project_id: 5
+ title: Done
+ creator_id: 2
+ default: false
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 8
+ project_id: 6
+ title: Backlog
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 9
+ project_id: 6
+ title: Uncategorized
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
diff --git a/models/fixtures/project_issue.yml b/models/fixtures/project_issue.yml
new file mode 100644
index 0000000..b1af059
--- /dev/null
+++ b/models/fixtures/project_issue.yml
@@ -0,0 +1,23 @@
+-
+ id: 1
+ issue_id: 1
+ project_id: 1
+ project_board_id: 1
+
+-
+ id: 2
+ issue_id: 2
+ project_id: 1
+ project_board_id: 0 # no board assigned
+
+-
+ id: 3
+ issue_id: 3
+ project_id: 1
+ project_board_id: 2
+
+-
+ id: 4
+ issue_id: 5
+ project_id: 1
+ project_board_id: 3
diff --git a/models/fixtures/protected_branch.yml b/models/fixtures/protected_branch.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/protected_branch.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/protected_tag.yml b/models/fixtures/protected_tag.yml
new file mode 100644
index 0000000..dbec52c
--- /dev/null
+++ b/models/fixtures/protected_tag.yml
@@ -0,0 +1,24 @@
+-
+ id: 1
+ repo_id: 4
+ name_pattern: /v.+/
+ allowlist_user_i_ds: []
+ allowlist_team_i_ds: []
+ created_unix: 1715596037
+ updated_unix: 1715596037
+-
+ id: 2
+ repo_id: 1
+ name_pattern: v-*
+ allowlist_user_i_ds: []
+ allowlist_team_i_ds: []
+ created_unix: 1715596037
+ updated_unix: 1715596037
+-
+ id: 3
+ repo_id: 1
+ name_pattern: v-1.1
+ allowlist_user_i_ds: [2]
+ allowlist_team_i_ds: []
+ created_unix: 1715596037
+ updated_unix: 1715596037
diff --git a/models/fixtures/public_key.yml b/models/fixtures/public_key.yml
new file mode 100644
index 0000000..ae620ee
--- /dev/null
+++ b/models/fixtures/public_key.yml
@@ -0,0 +1,11 @@
+-
+ id: 1
+ owner_id: 2
+ name: user2@localhost
+ fingerprint: "SHA256:M3iiFbqQKgLxi+WAoRa38ZVQ9ktdfau2sOu9xuPb9ew"
+ content: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDWVj0fQ5N8wNc0LVNA41wDLYJ89ZIbejrPfg/avyj3u/ZohAKsQclxG4Ju0VirduBFF9EOiuxoiFBRr3xRpqzpsZtnMPkWVWb+akZwBFAx8p+jKdy4QXR/SZqbVobrGwip2UjSrri1CtBxpJikojRIZfCnDaMOyd9Jp6KkujvniFzUWdLmCPxUE9zhTaPu0JsEP7MW0m6yx7ZUhHyfss+NtqmFTaDO+QlMR7L2QkDliN2Jl3Xa3PhuWnKJfWhdAq1Cw4oraKUOmIgXLkuiuxVQ6mD3AiFupkmfqdHq6h+uHHmyQqv3gU+/sD8GbGAhf6ftqhTsXjnv1Aj4R8NoDf9BS6KRkzkeun5UisSzgtfQzjOMEiJtmrep2ZQrMGahrXa+q4VKr0aKJfm+KlLfwm/JztfsBcqQWNcTURiCFqz+fgZw0Ey/de0eyMzldYTdXXNRYCKjs9bvBK+6SSXRM7AhftfQ0ZuoW5+gtinPrnmoOaSCEJbAiEiTO/BzOHgowiM= user2@localhost"
+ mode: 2
+ type: 1
+ created_unix: 1559593109
+ updated_unix: 1565224552
+ login_source_id: 0
diff --git a/models/fixtures/pull_request.yml b/models/fixtures/pull_request.yml
new file mode 100644
index 0000000..9a16316
--- /dev/null
+++ b/models/fixtures/pull_request.yml
@@ -0,0 +1,119 @@
+-
+ id: 1
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 2
+ index: 2
+ head_repo_id: 1
+ base_repo_id: 1
+ head_branch: branch1
+ base_branch: master
+ merge_base: 4a357436d925b5c974181ff12a994538ddc5a269
+ merged_commit_id: 1a8823cd1a9549fde083f992f6b9b87a7ab74fb3
+ has_merged: true
+ merger_id: 2
+
+-
+ id: 2
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 3
+ index: 3
+ head_repo_id: 1
+ base_repo_id: 1
+ head_branch: branch2
+ base_branch: master
+ merge_base: 4a357436d925b5c974181ff12a994538ddc5a269
+ has_merged: false
+
+-
+ id: 3
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 8
+ index: 1
+ head_repo_id: 11
+ base_repo_id: 10
+ head_branch: branch2
+ base_branch: master
+ merge_base: 0abcb056019adb83
+ has_merged: false
+
+-
+ id: 4
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 9
+ index: 1
+ head_repo_id: 48
+ base_repo_id: 48
+ head_branch: branch1
+ base_branch: master
+ merge_base: abcdef1234567890
+ has_merged: false
+
+-
+ id: 5 # this PR is outdated (one commit behind branch1 )
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 11
+ index: 5
+ head_repo_id: 1
+ base_repo_id: 1
+ head_branch: pr-to-update
+ base_branch: branch2
+ merge_base: 985f0301dba5e7b34be866819cd15ad3d8f508ee
+ has_merged: false
+
+-
+ id: 6
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 12
+ index: 2
+ head_repo_id: 3
+ base_repo_id: 3
+ head_branch: test_branch
+ base_branch: master
+ merge_base: 2a47ca4b614a9f5a
+ has_merged: false
+
+-
+ id: 7
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 19
+ index: 1
+ head_repo_id: 58
+ base_repo_id: 58
+ head_branch: branch1
+ base_branch: main
+ merge_base: cbff181af4c9c7fee3cf6c106699e07d9a3f54e6
+ has_merged: false
+
+-
+ id: 8
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 20
+ index: 1
+ head_repo_id: 23
+ base_repo_id: 23
+
+-
+ id: 9
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 21
+ index: 1
+ head_repo_id: 60
+ base_repo_id: 60
+
+-
+ id: 10
+ type: 0 # gitea pull request
+ status: 2 # mergeable
+ issue_id: 22
+ index: 1
+ head_repo_id: 61
+ base_repo_id: 61
diff --git a/models/fixtures/push_mirror.yml b/models/fixtures/push_mirror.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/push_mirror.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/reaction.yml b/models/fixtures/reaction.yml
new file mode 100644
index 0000000..ee571a7
--- /dev/null
+++ b/models/fixtures/reaction.yml
@@ -0,0 +1,39 @@
+-
+ id: 1 # issue reaction
+ type: zzz # not allowed reaction (added before allowed reaction list has changed)
+ issue_id: 1
+ comment_id: 0
+ user_id: 2
+ created_unix: 1573248001
+
+-
+ id: 2 # issue reaction
+ type: zzz # not allowed reaction (added before allowed reaction list has changed)
+ issue_id: 1
+ comment_id: 0
+ user_id: 1
+ created_unix: 1573248002
+
+-
+ id: 3 # issue reaction
+ type: eyes # allowed reaction
+ issue_id: 1
+ comment_id: 0
+ user_id: 2
+ created_unix: 1573248003
+
+-
+ id: 4 # comment reaction
+ type: laugh # allowed reaction
+ issue_id: 1
+ comment_id: 2
+ user_id: 2
+ created_unix: 1573248004
+
+-
+ id: 5 # comment reaction
+ type: laugh # allowed reaction
+ issue_id: 1
+ comment_id: 2
+ user_id: 1
+ created_unix: 1573248005
diff --git a/models/fixtures/release.yml b/models/fixtures/release.yml
new file mode 100644
index 0000000..0163506
--- /dev/null
+++ b/models/fixtures/release.yml
@@ -0,0 +1,166 @@
+- id: 1
+ repo_id: 1
+ publisher_id: 2
+ tag_name: "v1.1"
+ lower_tag_name: "v1.1"
+ target: "master"
+ title: "testing-release"
+ sha1: "65f1bf27bc3bf70f64657658635e66094edbcb4d"
+ num_commits: 10
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684800
+
+- id: 2
+ repo_id: 40
+ publisher_id: 2
+ tag_name: "v1.1"
+ lower_tag_name: "v1.1"
+ target: "master"
+ title: "testing-release"
+ sha1: "65f1bf27bc3bf70f64657658635e66094edbcb4d"
+ num_commits: 10
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684800
+
+- id: 3
+ repo_id: 1
+ publisher_id: 2
+ tag_name: "delete-tag"
+ lower_tag_name: "delete-tag"
+ target: "master"
+ title: "delete-tag"
+ sha1: "65f1bf27bc3bf70f64657658635e66094edbcb4d"
+ num_commits: 10
+ is_draft: false
+ is_prerelease: false
+ is_tag: true
+ created_unix: 946684800
+
+- id: 4
+ repo_id: 1
+ publisher_id: 2
+ tag_name: "draft-release"
+ lower_tag_name: "draft-release"
+ target: "master"
+ title: "draft-release"
+ is_draft: true
+ is_prerelease: false
+ is_tag: false
+ created_unix: 1619524806
+
+- id: 5
+ repo_id: 1
+ publisher_id: 2
+ tag_name: "v1.0"
+ lower_tag_name: "v1.0"
+ target: "master"
+ title: "pre-release"
+ note: "some text for a pre release"
+ sha1: "65f1bf27bc3bf70f64657658635e66094edbcb4d"
+ num_commits: 1
+ is_draft: false
+ is_prerelease: true
+ is_tag: false
+ created_unix: 946684800
+
+- id: 6
+ repo_id: 57
+ publisher_id: 2
+ tag_name: "v1.0"
+ lower_tag_name: "v1.0"
+ target: "main"
+ title: "v1.0"
+ sha1: "a8a700e8c644c783ba2c6e742bb81bf91e244bff"
+ num_commits: 3
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684801
+
+- id: 7
+ repo_id: 57
+ publisher_id: 2
+ tag_name: "v1.1"
+ lower_tag_name: "v1.1"
+ target: "main"
+ title: "v1.1"
+ sha1: "cef06e48f2642cd0dc9597b4bea09f4b3f74aad6"
+ num_commits: 5
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684802
+
+- id: 8
+ repo_id: 57
+ publisher_id: 2
+ tag_name: "v2.0"
+ lower_tag_name: "v2.0"
+ target: "main"
+ title: "v2.0"
+ sha1: "7197b56fdc75b453f47c9110938cb46a303579fd"
+ num_commits: 6
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684803
+
+- id: 9
+ repo_id: 57
+ publisher_id: 2
+ tag_name: "non-existing-target-branch"
+ lower_tag_name: "non-existing-target-branch"
+ target: "non-existing"
+ title: "non-existing-target-branch"
+ sha1: "cef06e48f2642cd0dc9597b4bea09f4b3f74aad6"
+ num_commits: 5
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684803
+
+- id: 10
+ repo_id: 57
+ publisher_id: 2
+ tag_name: "empty-target-branch"
+ lower_tag_name: "empty-target-branch"
+ target: ""
+ title: "empty-target-branch"
+ sha1: "cef06e48f2642cd0dc9597b4bea09f4b3f74aad6"
+ num_commits: 5
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684803
+
+- id: 11
+ repo_id: 2
+ publisher_id: 2
+ tag_name: "v1.1"
+ lower_tag_name: "v1.1"
+ target: ""
+ title: "v1.1"
+ sha1: "205ac761f3326a7ebe416e8673760016450b5cec"
+ num_commits: 2
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684803
+
+- id: 12
+ repo_id: 1059
+ publisher_id: 2
+ tag_name: "v1.0"
+ lower_tag_name: "v1.0"
+ target: "main"
+ title: "v1.0"
+ sha1: "d8f53dfb33f6ccf4169c34970b5e747511c18beb"
+ num_commits: 1
+ is_draft: false
+ is_prerelease: false
+ is_tag: false
+ created_unix: 946684803
diff --git a/models/fixtures/renamed_branch.yml b/models/fixtures/renamed_branch.yml
new file mode 100644
index 0000000..efa5130
--- /dev/null
+++ b/models/fixtures/renamed_branch.yml
@@ -0,0 +1,5 @@
+-
+ id: 1
+ repo_id: 1
+ from: dev
+ to: master
diff --git a/models/fixtures/repo_archiver.yml b/models/fixtures/repo_archiver.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/repo_archiver.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/repo_indexer_status.yml b/models/fixtures/repo_indexer_status.yml
new file mode 100644
index 0000000..ca780a7
--- /dev/null
+++ b/models/fixtures/repo_indexer_status.yml
@@ -0,0 +1 @@
+[] # empty
diff --git a/models/fixtures/repo_redirect.yml b/models/fixtures/repo_redirect.yml
new file mode 100644
index 0000000..8850c8d
--- /dev/null
+++ b/models/fixtures/repo_redirect.yml
@@ -0,0 +1,5 @@
+-
+ id: 1
+ owner_id: 2
+ lower_name: oldrepo1
+ redirect_repo_id: 1
diff --git a/models/fixtures/repo_topic.yml b/models/fixtures/repo_topic.yml
new file mode 100644
index 0000000..f166fac
--- /dev/null
+++ b/models/fixtures/repo_topic.yml
@@ -0,0 +1,27 @@
+-
+ repo_id: 1
+ topic_id: 1
+
+-
+ repo_id: 1
+ topic_id: 2
+
+-
+ repo_id: 1
+ topic_id: 3
+
+-
+ repo_id: 33
+ topic_id: 1
+
+-
+ repo_id: 33
+ topic_id: 4
+
+-
+ repo_id: 2
+ topic_id: 5
+
+-
+ repo_id: 2
+ topic_id: 6
diff --git a/models/fixtures/repo_transfer.yml b/models/fixtures/repo_transfer.yml
new file mode 100644
index 0000000..b841b5e
--- /dev/null
+++ b/models/fixtures/repo_transfer.yml
@@ -0,0 +1,7 @@
+-
+ id: 1
+ doer_id: 3
+ recipient_id: 1
+ repo_id: 3
+ created_unix: 1553610671
+ updated_unix: 1553610671
diff --git a/models/fixtures/repo_unit.yml b/models/fixtures/repo_unit.yml
new file mode 100644
index 0000000..cd49a51
--- /dev/null
+++ b/models/fixtures/repo_unit.yml
@@ -0,0 +1,797 @@
+# See models/unit/unit.go for the meaning of the type
+-
+ id: 1
+ repo_id: 1
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 2
+ repo_id: 1
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 3
+ repo_id: 1
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 4
+ repo_id: 1
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 5
+ repo_id: 1
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 6
+ repo_id: 3
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 7
+ repo_id: 3
+ type: 2
+ config: "{\"EnableTimetracker\":false,\"AllowOnlyContributorsToTrackTime\":false}"
+ created_unix: 946684810
+
+-
+ id: 8
+ repo_id: 3
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":true,\"AllowMerge\":true,\"AllowRebase\":false,\"AllowRebaseMerge\":true,\"AllowSquash\":false}"
+ created_unix: 946684810
+
+-
+ id: 9
+ repo_id: 3
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 10
+ repo_id: 3
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 11
+ repo_id: 31
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 12
+ repo_id: 33
+ type: 1
+ config: "{}"
+ created_unix: 1535593231
+
+-
+ id: 13
+ repo_id: 33
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 1535593231
+
+-
+ id: 14
+ repo_id: 33
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowSquash\":true}"
+ created_unix: 1535593231
+
+-
+ id: 15
+ repo_id: 33
+ type: 4
+ config: "{}"
+ created_unix: 1535593231
+
+-
+ id: 16
+ repo_id: 33
+ type: 5
+ config: "{}"
+ created_unix: 1535593231
+
+-
+ id: 17
+ repo_id: 4
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 18
+ repo_id: 4
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 19
+ repo_id: 4
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 20
+ repo_id: 4
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 21
+ repo_id: 4
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 22
+ repo_id: 2
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 23
+ repo_id: 2
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 24
+ repo_id: 2
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 25
+ repo_id: 32
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 26
+ repo_id: 32
+ type: 2
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 27
+ repo_id: 24
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 28
+ repo_id: 24
+ type: 2
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 29
+ repo_id: 16
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 30
+ repo_id: 23
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 31
+ repo_id: 27
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 32
+ repo_id: 28
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 33
+ repo_id: 36
+ type: 4
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 34
+ repo_id: 36
+ type: 5
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 35
+ repo_id: 36
+ type: 1
+ config: "{}"
+ created_unix: 1524304355
+
+-
+ id: 36
+ repo_id: 36
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 1524304355
+
+-
+ id: 37
+ repo_id: 36
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 1524304355
+
+-
+ id: 38
+ repo_id: 37
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 39
+ repo_id: 37
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 40
+ repo_id: 37
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 41
+ repo_id: 37
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 42
+ repo_id: 37
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 43
+ repo_id: 38
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 44
+ repo_id: 38
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 45
+ repo_id: 38
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 46
+ repo_id: 39
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 47
+ repo_id: 39
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 48
+ repo_id: 39
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 49
+ repo_id: 40
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 50
+ repo_id: 40
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 51
+ repo_id: 40
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 52
+ repo_id: 41
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 53
+ repo_id: 41
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 54
+ repo_id: 41
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 55
+ repo_id: 10
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 56
+ repo_id: 10
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 57
+ repo_id: 10
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 58
+ repo_id: 11
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 59
+ repo_id: 42
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 60
+ repo_id: 42
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 61
+ repo_id: 42
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 62
+ repo_id: 42
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 63
+ repo_id: 42
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 64
+ repo_id: 44
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 65
+ repo_id: 45
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 66
+ repo_id: 46
+ type: 7
+ config: "{\"ExternalTrackerURL\":\"https://tracker.com\",\"ExternalTrackerFormat\":\"https://tracker.com/{user}/{repo}/issues/{index}\",\"ExternalTrackerStyle\":\"\"}"
+ created_unix: 946684810
+
+-
+ id: 67
+ repo_id: 47
+ type: 7
+ config: "{\"ExternalTrackerURL\":\"https://tracker.com\",\"ExternalTrackerFormat\":\"https://tracker.com/{user}/{repo}/issues/{index}\",\"ExternalTrackerStyle\":\"numeric\"}"
+ created_unix: 946684810
+
+-
+ id: 68
+ repo_id: 48
+ type: 7
+ config: "{\"ExternalTrackerURL\":\"https://tracker.com\",\"ExternalTrackerFormat\":\"https://tracker.com/{user}/{repo}/issues/{index}\",\"ExternalTrackerStyle\":\"alphanumeric\"}"
+ created_unix: 946684810
+-
+ id: 69
+ repo_id: 2
+ type: 2
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 70
+ repo_id: 5
+ type: 4
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 71
+ repo_id: 5
+ type: 5
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 72
+ repo_id: 5
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 73
+ repo_id: 5
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 74
+ repo_id: 5
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 75
+ repo_id: 1
+ type: 8
+ created_unix: 946684810
+
+-
+ id: 76
+ repo_id: 2
+ type: 8
+ created_unix: 946684810
+
+-
+ id: 77
+ repo_id: 3
+ type: 8
+ created_unix: 946684810
+
+-
+ id: 78
+ repo_id: 50
+ type: 2
+ created_unix: 946684810
+
+-
+ id: 79
+ repo_id: 51
+ type: 2
+ created_unix: 946684810
+
+-
+ id: 80
+ repo_id: 53
+ type: 1
+ created_unix: 946684810
+
+-
+ id: 81
+ repo_id: 54
+ type: 1
+ created_unix: 946684810
+
+-
+ id: 82
+ repo_id: 31
+ type: 1
+ created_unix: 946684810
+
+-
+ id: 83
+ repo_id: 31
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 84
+ repo_id: 56
+ type: 1
+ created_unix: 946684810
+-
+ id: 85
+ repo_id: 57
+ type: 1
+ created_unix: 946684810
+-
+ id: 86
+ repo_id: 57
+ type: 2
+ created_unix: 946684810
+-
+ id: 87
+ repo_id: 57
+ type: 3
+ created_unix: 946684810
+-
+ id: 88
+ repo_id: 57
+ type: 4
+ created_unix: 946684810
+-
+ id: 89
+ repo_id: 57
+ type: 5
+ created_unix: 946684810
+
+-
+ id: 90
+ repo_id: 52
+ type: 1
+ created_unix: 946684810
+
+# BEGIN Forgejo [GITEA] Improve HTML title on repositories
+-
+ id: 1093
+ repo_id: 1059
+ type: 1
+ created_unix: 946684810
+
+-
+ id: 1094
+ repo_id: 1059
+ type: 2
+ created_unix: 946684810
+
+-
+ id: 1095
+ repo_id: 1059
+ type: 3
+ created_unix: 946684810
+
+-
+ id: 1096
+ repo_id: 1059
+ type: 4
+ created_unix: 946684810
+
+-
+ id: 1097
+ repo_id: 1059
+ type: 5
+ created_unix: 946684810
+# END Forgejo [GITEA] Improve HTML title on repositories
+
+-
+ id: 91
+ repo_id: 58
+ type: 1
+ created_unix: 946684810
+
+-
+ id: 92
+ repo_id: 58
+ type: 2
+ created_unix: 946684810
+
+-
+ id: 93
+ repo_id: 58
+ type: 3
+ created_unix: 946684810
+
+-
+ id: 94
+ repo_id: 58
+ type: 4
+ created_unix: 946684810
+
+-
+ id: 95
+ repo_id: 58
+ type: 5
+ created_unix: 946684810
+
+-
+ id: 96
+ repo_id: 49
+ type: 1
+ created_unix: 946684810
+
+-
+ id: 97
+ repo_id: 49
+ type: 2
+ created_unix: 946684810
+
+-
+ id: 98
+ repo_id: 1
+ type: 8
+ created_unix: 946684810
+
+-
+ id: 99
+ repo_id: 1
+ type: 9
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 100
+ repo_id: 1
+ type: 10
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 101
+ repo_id: 59
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 102
+ repo_id: 60
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 103
+ repo_id: 60
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 104
+ repo_id: 60
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 105
+ repo_id: 61
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 106
+ repo_id: 61
+ type: 2
+ config: "{\"EnableTimetracker\":true,\"AllowOnlyContributorsToTrackTime\":true}"
+ created_unix: 946684810
+
+-
+ id: 107
+ repo_id: 61
+ type: 3
+ config: "{\"IgnoreWhitespaceConflicts\":false,\"AllowMerge\":true,\"AllowRebase\":true,\"AllowRebaseMerge\":true,\"AllowSquash\":true}"
+ created_unix: 946684810
+
+-
+ id: 108
+ repo_id: 62
+ type: 1
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 109
+ repo_id: 62
+ type: 2
+ created_unix: 946684810
+
+-
+ id: 110
+ repo_id: 62
+ type: 3
+ created_unix: 946684810
+
+-
+ id: 111
+ repo_id: 62
+ type: 4
+ created_unix: 946684810
+
+-
+ id: 112
+ repo_id: 62
+ type: 5
+ created_unix: 946684810
+
+-
+ id: 113
+ repo_id: 62
+ type: 10
+ config: "{}"
+ created_unix: 946684810
+
+-
+ id: 114
+ repo_id: 4
+ type: 10
+ config: "{}"
+ created_unix: 946684810
diff --git a/models/fixtures/repository.yml b/models/fixtures/repository.yml
new file mode 100644
index 0000000..51f526f
--- /dev/null
+++ b/models/fixtures/repository.yml
@@ -0,0 +1,1826 @@
+# don't forget to add fixtures in repo_unit.yml
+-
+ id: 1
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo1
+ name: repo1
+ default_branch: master
+ num_watches: 4
+ num_stars: 0
+ num_forks: 0
+ num_issues: 2
+ num_closed_issues: 1
+ num_pulls: 3
+ num_closed_pulls: 0
+ num_milestones: 3
+ num_closed_milestones: 1
+ num_projects: 1
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 7597
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 2
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo2
+ name: repo2
+ default_branch: master
+ num_watches: 1
+ num_stars: 1
+ num_forks: 0
+ num_issues: 2
+ num_closed_issues: 1
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: true
+
+-
+ id: 3
+ owner_id: 3
+ owner_name: org3
+ lower_name: repo3
+ name: repo3
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 1
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 1
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 2
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+ created_unix: 1700000001
+ updated_unix: 1700000001
+
+-
+ id: 4
+ owner_id: 5
+ owner_name: user5
+ lower_name: repo4
+ name: repo4
+ default_branch: master
+ num_watches: 0
+ num_stars: 1
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 1
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 5
+ owner_id: 3
+ owner_name: org3
+ lower_name: repo5
+ name: repo5
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 1
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: true
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+ created_unix: 1700000002
+ updated_unix: 1700000002
+
+-
+ id: 6
+ owner_id: 10
+ owner_name: user10
+ lower_name: repo6
+ name: repo6
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+ created_unix: 1710000001
+ updated_unix: 1710000001
+
+-
+ id: 7
+ owner_id: 10
+ owner_name: user10
+ lower_name: repo7
+ name: repo7
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+ created_unix: 1710000003
+ updated_unix: 1710000003
+
+-
+ id: 8
+ owner_id: 10
+ owner_name: user10
+ lower_name: repo8
+ name: repo8
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+ created_unix: 1710000002
+ updated_unix: 1710000002
+
+-
+ id: 9
+ owner_id: 11
+ owner_name: user11
+ lower_name: repo9
+ name: repo9
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 10
+ owner_id: 12
+ owner_name: user12
+ lower_name: repo10
+ name: repo10
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 1
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 1
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 11
+ owner_id: 13
+ owner_name: user13
+ lower_name: repo11
+ name: repo11
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 10
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 12
+ owner_id: 14
+ owner_name: user14
+ lower_name: test_repo_12
+ name: test_repo_12
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 13
+ owner_id: 14
+ owner_name: user14
+ lower_name: test_repo_13
+ name: test_repo_13
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 14
+ owner_id: 14
+ owner_name: user14
+ lower_name: test_repo_14
+ name: test_repo_14
+ description: test_description_14
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 15
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo15
+ name: repo15
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 16
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo16
+ name: repo16
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 17
+ owner_id: 15
+ owner_name: user15
+ lower_name: big_test_public_1
+ name: big_test_public_1
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 18
+ owner_id: 15
+ owner_name: user15
+ lower_name: big_test_public_2
+ name: big_test_public_2
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 19
+ owner_id: 15
+ owner_name: user15
+ lower_name: big_test_private_1
+ name: big_test_private_1
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 20
+ owner_id: 15
+ owner_name: user15
+ lower_name: big_test_private_2
+ name: big_test_private_2
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 21
+ owner_id: 16
+ owner_name: user16
+ lower_name: big_test_public_3
+ name: big_test_public_3
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 22
+ owner_id: 16
+ owner_name: user16
+ lower_name: big_test_private_3
+ name: big_test_private_3
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 23
+ owner_id: 17
+ owner_name: org17
+ lower_name: big_test_public_4
+ name: big_test_public_4
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 24
+ owner_id: 17
+ owner_name: org17
+ lower_name: big_test_private_4
+ name: big_test_private_4
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 25
+ owner_id: 20
+ owner_name: user20
+ lower_name: big_test_public_mirror_5
+ name: big_test_public_mirror_5
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: true
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 26
+ owner_id: 20
+ owner_name: user20
+ lower_name: big_test_private_mirror_5
+ name: big_test_private_mirror_5
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: true
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 27
+ owner_id: 19
+ owner_name: org19
+ lower_name: big_test_public_mirror_6
+ name: big_test_public_mirror_6
+ num_watches: 0
+ num_stars: 0
+ num_forks: 1
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: true
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 28
+ owner_id: 19
+ owner_name: org19
+ lower_name: big_test_private_mirror_6
+ name: big_test_private_mirror_6
+ num_watches: 0
+ num_stars: 0
+ num_forks: 1
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: true
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 29
+ owner_id: 20
+ owner_name: user20
+ lower_name: big_test_public_fork_7
+ name: big_test_public_fork_7
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: true
+ fork_id: 27
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 30
+ owner_id: 20
+ owner_name: user20
+ lower_name: big_test_private_fork_7
+ name: big_test_private_fork_7
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: true
+ fork_id: 28
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 31
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo20
+ name: repo20
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 32 # org public repo
+ owner_id: 3
+ owner_name: org3
+ lower_name: repo21
+ name: repo21
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 2
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+ created_unix: 1700000003
+ updated_unix: 1700000003
+
+-
+ id: 33
+ owner_id: 2
+ owner_name: user2
+ lower_name: utf8
+ name: utf8
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 34
+ owner_id: 21
+ owner_name: user21
+ lower_name: golang
+ name: golang
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 35
+ owner_id: 21
+ owner_name: user21
+ lower_name: graphql
+ name: graphql
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 36
+ owner_id: 2
+ owner_name: user2
+ lower_name: commits_search_test
+ name: commits_search_test
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 37
+ owner_id: 2
+ owner_name: user2
+ lower_name: git_hooks_test
+ name: git_hooks_test
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 38
+ owner_id: 22
+ owner_name: limited_org
+ lower_name: public_repo_on_limited_org
+ name: public_repo_on_limited_org
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 39
+ owner_id: 22
+ owner_name: limited_org
+ lower_name: private_repo_on_limited_org
+ name: private_repo_on_limited_org
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 40
+ owner_id: 23
+ owner_name: privated_org
+ lower_name: public_repo_on_private_org
+ name: public_repo_on_private_org
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 41
+ owner_id: 23
+ owner_name: privated_org
+ lower_name: private_repo_on_private_org
+ name: private_repo_on_private_org
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 42
+ owner_id: 2
+ owner_name: user2
+ lower_name: glob
+ name: glob
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 1
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 1
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 43
+ owner_id: 26
+ owner_name: org26
+ lower_name: repo26
+ name: repo26
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 44
+ owner_id: 27
+ owner_name: user27
+ lower_name: template1
+ name: template1
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: true
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 45
+ owner_id: 27
+ owner_name: user27
+ lower_name: template2
+ name: template2
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: true
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 46
+ owner_id: 26
+ owner_name: org26
+ lower_name: repo_external_tracker
+ name: repo_external_tracker
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 47
+ owner_id: 26
+ owner_name: org26
+ lower_name: repo_external_tracker_numeric
+ name: repo_external_tracker_numeric
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 48
+ owner_id: 26
+ owner_name: org26
+ lower_name: repo_external_tracker_alpha
+ name: repo_external_tracker_alpha
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 49
+ owner_id: 27
+ owner_name: user27
+ lower_name: repo49
+ name: repo49
+ description: A wonderful repository with more than just a README.md
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 50
+ owner_id: 30
+ owner_name: user30
+ lower_name: repo50
+ name: repo50
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 1
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 51
+ owner_id: 30
+ owner_name: user30
+ lower_name: repo51
+ name: repo51
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 1
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: true
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 52
+ owner_id: 30
+ owner_name: user30
+ lower_name: empty
+ name: empty
+ default_branch: master
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: true
+ is_empty: true
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 53
+ owner_id: 30
+ owner_name: user30
+ lower_name: renderer
+ name: renderer
+ default_branch: master
+ is_archived: false
+ is_empty: false
+ is_private: false
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_watches: 0
+ num_projects: 0
+ num_closed_projects: 0
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 54
+ owner_id: 2
+ owner_name: user2
+ lower_name: lfs
+ name: lfs
+ default_branch: master
+ is_empty: false
+ is_archived: false
+ is_private: true
+ status: 0
+
+-
+ id: 55
+ owner_id: 2
+ owner_name: user2
+ lower_name: scoped_label
+ name: scoped_label
+ is_empty: false
+ is_archived: false
+ is_private: true
+ num_issues: 1
+ status: 0
+
+-
+ id: 56
+ owner_id: 2
+ owner_name: user2
+ lower_name: readme-test
+ name: readme-test
+ default_branch: master
+ is_empty: false
+ is_archived: false
+ is_private: true
+ status: 0
+ num_issues: 0
+
+-
+ id: 57
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo-release
+ name: repo-release
+ default_branch: main
+ is_empty: false
+ is_archived: false
+ is_private: false
+ status: 0
+ num_issues: 0
+
+-
+ id: 58 # org public repo
+ owner_id: 2
+ owner_name: user2
+ lower_name: commitsonpr
+ name: commitsonpr
+ default_branch: main
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 1059
+ owner_id: 2
+ owner_name: user2
+ lower_name: repo59
+ name: repo59
+ default_branch: master
+ is_empty: false
+ is_archived: false
+ is_private: false
+ status: 0
+ num_issues: 0
+
+-
+ id: 59
+ owner_id: 2
+ owner_name: user2
+ lower_name: test_commit_revert
+ name: test_commit_revert
+ default_branch: main
+ is_empty: false
+ is_archived: false
+ is_private: true
+ status: 0
+ num_issues: 0
+
+-
+ id: 60
+ owner_id: 40
+ owner_name: user40
+ lower_name: repo60
+ name: repo60
+ default_branch: main
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+-
+ id: 61
+ owner_id: 41
+ owner_name: org41
+ lower_name: repo61
+ name: repo61
+ default_branch: main
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 1
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
+
+- id: 62
+ owner_id: 2
+ owner_name: user2
+ lower_name: test_workflows
+ name: test_workflows
+ default_branch: main
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
diff --git a/models/fixtures/review.yml b/models/fixtures/review.yml
new file mode 100644
index 0000000..0438cea
--- /dev/null
+++ b/models/fixtures/review.yml
@@ -0,0 +1,200 @@
+-
+ id: 1
+ type: 1
+ reviewer_id: 1
+ issue_id: 2
+ content: "Demo Review"
+ updated_unix: 946684810
+ created_unix: 946684810
+-
+ id: 2
+ type: 1
+ reviewer_id: 534543
+ issue_id: 534543
+ content: "Invalid Review #1"
+ updated_unix: 946684810
+ created_unix: 946684810
+-
+ id: 3
+ type: 1
+ reviewer_id: 1
+ issue_id: 343545
+ content: "Invalid Review #2"
+ updated_unix: 946684810
+ created_unix: 946684810
+-
+ id: 4
+ type: 0 # Pending review
+ reviewer_id: 1
+ issue_id: 2
+ content: "Pending Review"
+ updated_unix: 946684810
+ created_unix: 946684810
+-
+ id: 5
+ type: 2
+ reviewer_id: 1
+ issue_id: 3
+ content: "New review 1"
+ updated_unix: 946684810
+ created_unix: 946684810
+-
+ id: 6
+ type: 0
+ reviewer_id: 2
+ issue_id: 3
+ content: "New review 3"
+ original_author_id: 0
+ updated_unix: 946684811
+ created_unix: 946684811
+-
+ id: 7
+ type: 3
+ reviewer_id: 3
+ issue_id: 3
+ content: "New review 4"
+ original_author_id: 0
+ updated_unix: 946684812
+ created_unix: 946684812
+-
+ id: 8
+ type: 1
+ reviewer_id: 4
+ issue_id: 3
+ original_author_id: 0
+ content: "New review 5"
+ commit_id: 8091a55037cd59e47293aca02981b5a67076b364
+ stale: true
+ updated_unix: 946684813
+ created_unix: 946684813
+-
+ id: 9
+ type: 3
+ reviewer_id: 2
+ issue_id: 3
+ content: "New review 3 rejected"
+ updated_unix: 946684814
+ created_unix: 946684814
+ original_author_id: 0
+
+-
+ id: 10
+ type: 3
+ reviewer_id: 100
+ issue_id: 3
+ content: "a deleted user's review"
+ official: true
+ updated_unix: 946684815
+ created_unix: 946684815
+
+-
+ id: 11
+ type: 4
+ reviewer_id: 0
+ reviewer_team_id: 7
+ issue_id: 12
+ official: true
+ updated_unix: 1602936509
+ created_unix: 1602936509
+
+-
+ id: 12
+ type: 4
+ reviewer_id: 1
+ issue_id: 12
+ official: true
+ updated_unix: 1603196749
+ created_unix: 1603196749
+
+-
+ id: 13
+ type: 1
+ reviewer_id: 5
+ issue_id: 11
+ content: "old review from user5"
+ updated_unix: 946684820
+ created_unix: 946684820
+
+-
+ id: 14
+ type: 1
+ reviewer_id: 5
+ issue_id: 11
+ content: "duplicate review from user5 (latest)"
+ updated_unix: 946684830
+ created_unix: 946684830
+
+-
+ id: 15
+ type: 1
+ reviewer_id: 6
+ issue_id: 11
+ content: "singular review from org6 and final review for this pr"
+ updated_unix: 946684831
+ created_unix: 946684831
+
+-
+ id: 16
+ type: 4
+ reviewer_id: 20
+ issue_id: 20
+ content: "review request for user20"
+ updated_unix: 946684832
+ created_unix: 946684832
+
+-
+ id: 17
+ type: 1
+ reviewer_id: 20
+ issue_id: 20
+ content: "review approved by user20"
+ updated_unix: 946684833
+ created_unix: 946684833
+
+-
+ id: 18
+ type: 4
+ reviewer_id: 0
+ reviewer_team_id: 5
+ issue_id: 20
+ content: "review request for team5"
+ updated_unix: 946684834
+ created_unix: 946684834
+
+-
+ id: 19
+ type: 4
+ reviewer_id: 15
+ reviewer_team_id: 0
+ issue_id: 20
+ content: "review request for user15"
+ updated_unix: 946684835
+ created_unix: 946684835
+
+-
+ id: 20
+ type: 22
+ reviewer_id: 1
+ issue_id: 2
+ content: "Review Comment"
+ updated_unix: 946684810
+ created_unix: 946684810
+
+-
+ id: 21
+ type: 2
+ reviewer_id: 5
+ issue_id: 3
+ content: "reviewed by user5"
+ commit_id: 4a357436d925b5c974181ff12a994538ddc5a269
+ updated_unix: 946684816
+ created_unix: 946684816
+
+-
+ id: 22
+ type: 4
+ reviewer_id: 5
+ issue_id: 3
+ content: "review request for user5"
+ updated_unix: 946684817
+ created_unix: 946684817
diff --git a/models/fixtures/star.yml b/models/fixtures/star.yml
new file mode 100644
index 0000000..860f26b
--- /dev/null
+++ b/models/fixtures/star.yml
@@ -0,0 +1,9 @@
+-
+ id: 1
+ uid: 2
+ repo_id: 2
+
+-
+ id: 2
+ uid: 2
+ repo_id: 4
diff --git a/models/fixtures/stopwatch.yml b/models/fixtures/stopwatch.yml
new file mode 100644
index 0000000..b7919d6
--- /dev/null
+++ b/models/fixtures/stopwatch.yml
@@ -0,0 +1,11 @@
+-
+ id: 1
+ user_id: 1
+ issue_id: 1
+ created_unix: 1500988001
+
+-
+ id: 2
+ user_id: 2
+ issue_id: 2
+ created_unix: 1500988002
diff --git a/models/fixtures/system_setting.yml b/models/fixtures/system_setting.yml
new file mode 100644
index 0000000..30542bc
--- /dev/null
+++ b/models/fixtures/system_setting.yml
@@ -0,0 +1,15 @@
+-
+ id: 1
+ setting_key: 'picture.disable_gravatar'
+ setting_value: 'false'
+ version: 1
+ created: 1653533198
+ updated: 1653533198
+
+-
+ id: 2
+ setting_key: 'picture.enable_federated_avatar'
+ setting_value: 'false'
+ version: 1
+ created: 1653533198
+ updated: 1653533198
diff --git a/models/fixtures/team.yml b/models/fixtures/team.yml
new file mode 100644
index 0000000..149fe90
--- /dev/null
+++ b/models/fixtures/team.yml
@@ -0,0 +1,241 @@
+-
+ id: 1
+ org_id: 3
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 3
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 2
+ org_id: 3
+ lower_name: team1
+ name: team1
+ authorize: 2 # write
+ num_repos: 1
+ num_members: 2
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 3
+ org_id: 6
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 4
+ org_id: 7
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 5
+ org_id: 17
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 2
+ num_members: 2
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 6
+ org_id: 19
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 2
+ num_members: 2
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 7
+ org_id: 3
+ lower_name: test_team
+ name: test_team
+ authorize: 2 # write
+ num_repos: 1
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 8
+ org_id: 17
+ lower_name: test_team
+ name: test_team
+ authorize: 2 # write
+ num_repos: 1
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 9
+ org_id: 17
+ lower_name: review_team
+ name: review_team
+ authorize: 1 # read
+ num_repos: 1
+ num_members: 3
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 10
+ org_id: 25
+ lower_name: notowners
+ name: NotOwners
+ authorize: 1 # read
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 11
+ org_id: 26
+ lower_name: team11
+ name: team11
+ authorize: 1 # read
+ num_repos: 0
+ num_members: 0
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 12
+ org_id: 3
+ lower_name: team12creators
+ name: team12Creators
+ authorize: 3 # admin
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 13
+ org_id: 6
+ lower_name: team13notcreators
+ name: team13NotCreators
+ authorize: 3 # admin
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: false
+
+-
+ id: 14
+ org_id: 3
+ lower_name: teamcreaterepo
+ name: teamCreateRepo
+ authorize: 2 # write
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 15
+ org_id: 22
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 0
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 16
+ org_id: 23
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 0
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 17
+ org_id: 23
+ lower_name: team14writeauth
+ name: team14WriteAuth
+ authorize: 2 # write
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 18
+ org_id: 35
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 19
+ org_id: 36
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 20
+ org_id: 36
+ lower_name: team20writepackage
+ name: team20writepackage
+ authorize: 1
+ num_repos: 0
+ num_members: 1
+ includes_all_repositories: false
+ can_create_org_repo: true
+
+-
+ id: 21
+ org_id: 41
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 1
+ num_members: 1
+ includes_all_repositories: true
+ can_create_org_repo: true
+
+-
+ id: 22
+ org_id: 41
+ lower_name: team1
+ name: Team1
+ authorize: 1 # read
+ num_repos: 1
+ num_members: 2
+ includes_all_repositories: false
+ can_create_org_repo: false
diff --git a/models/fixtures/team_repo.yml b/models/fixtures/team_repo.yml
new file mode 100644
index 0000000..a290781
--- /dev/null
+++ b/models/fixtures/team_repo.yml
@@ -0,0 +1,77 @@
+-
+ id: 1
+ org_id: 3
+ team_id: 1
+ repo_id: 3
+
+-
+ id: 2
+ org_id: 3
+ team_id: 2
+ repo_id: 3
+
+-
+ id: 3
+ org_id: 3
+ team_id: 1
+ repo_id: 5
+
+-
+ id: 4
+ org_id: 17
+ team_id: 5
+ repo_id: 23
+
+-
+ id: 5
+ org_id: 17
+ team_id: 5
+ repo_id: 24
+
+-
+ id: 6
+ org_id: 19
+ team_id: 6
+ repo_id: 27
+
+-
+ id: 7
+ org_id: 19
+ team_id: 6
+ repo_id: 28
+
+-
+ id: 8
+ org_id: 3
+ team_id: 1
+ repo_id: 32
+
+-
+ id: 9
+ org_id: 3
+ team_id: 7
+ repo_id: 32
+
+-
+ id: 10
+ org_id: 17
+ team_id: 8
+ repo_id: 24
+
+-
+ id: 11
+ org_id: 17
+ team_id: 9
+ repo_id: 24
+
+-
+ id: 12
+ org_id: 41
+ team_id: 21
+ repo_id: 61
+
+-
+ id: 13
+ org_id: 41
+ team_id: 22
+ repo_id: 61
diff --git a/models/fixtures/team_unit.yml b/models/fixtures/team_unit.yml
new file mode 100644
index 0000000..de0e8d7
--- /dev/null
+++ b/models/fixtures/team_unit.yml
@@ -0,0 +1,324 @@
+-
+ id: 1
+ team_id: 1
+ type: 1
+ access_mode: 4
+
+-
+ id: 2
+ team_id: 1
+ type: 2
+ access_mode: 4
+
+-
+ id: 3
+ team_id: 1
+ type: 3
+ access_mode: 4
+
+-
+ id: 4
+ team_id: 1
+ type: 4
+ access_mode: 4
+
+-
+ id: 5
+ team_id: 1
+ type: 5
+ access_mode: 4
+
+-
+ id: 6
+ team_id: 1
+ type: 6
+ access_mode: 4
+
+-
+ id: 7
+ team_id: 1
+ type: 7
+ access_mode: 4
+
+-
+ id: 8
+ team_id: 2
+ type: 1
+ access_mode: 2
+
+-
+ id: 9
+ team_id: 2
+ type: 2
+ access_mode: 2
+
+-
+ id: 10
+ team_id: 2
+ type: 3
+ access_mode: 2
+
+-
+ id: 11
+ team_id: 2
+ type: 4
+ access_mode: 2
+
+-
+ id: 12
+ team_id: 2
+ type: 5
+ access_mode: 2
+
+-
+ id: 13
+ team_id: 2
+ type: 6
+ access_mode: 2
+
+-
+ id: 14
+ team_id: 2
+ type: 7
+ access_mode: 2
+
+-
+ id: 15
+ team_id: 3
+ type: 1
+ access_mode: 4
+
+-
+ id: 16
+ team_id: 3
+ type: 2
+ access_mode: 4
+
+-
+ id: 17
+ team_id: 3
+ type: 3
+ access_mode: 4
+
+-
+ id: 18
+ team_id: 3
+ type: 4
+ access_mode: 4
+
+-
+ id: 19
+ team_id: 3
+ type: 5
+ access_mode: 4
+
+-
+ id: 20
+ team_id: 3
+ type: 6
+ access_mode: 4
+
+-
+ id: 21
+ team_id: 3
+ type: 7
+ access_mode: 4
+
+-
+ id: 22
+ team_id: 4
+ type: 1
+ access_mode: 4
+
+-
+ id: 23
+ team_id: 4
+ type: 2
+ access_mode: 4
+
+-
+ id: 24
+ team_id: 4
+ type: 3
+ access_mode: 4
+
+-
+ id: 25
+ team_id: 4
+ type: 4
+ access_mode: 4
+
+-
+ id: 26
+ team_id: 4
+ type: 5
+ access_mode: 4
+
+-
+ id: 27
+ team_id: 4
+ type: 6
+ access_mode: 4
+
+-
+ id: 28
+ team_id: 4
+ type: 7
+ access_mode: 4
+
+-
+ id: 29
+ team_id: 5
+ type: 1
+ access_mode: 4
+
+-
+ id: 30
+ team_id: 5
+ type: 2
+ access_mode: 4
+
+-
+ id: 31
+ team_id: 5
+ type: 3
+ access_mode: 4
+
+-
+ id: 32
+ team_id: 5
+ type: 4
+ access_mode: 4
+
+-
+ id: 33
+ team_id: 5
+ type: 5
+ access_mode: 4
+
+-
+ id: 34
+ team_id: 5
+ type: 6
+ access_mode: 4
+
+-
+ id: 35
+ team_id: 5
+ type: 7
+ access_mode: 4
+
+-
+ id: 36
+ team_id: 6
+ type: 1
+ access_mode: 4
+
+-
+ id: 37
+ team_id: 6
+ type: 2
+ access_mode: 4
+
+-
+ id: 38
+ team_id: 6
+ type: 3
+ access_mode: 4
+
+-
+ id: 39
+ team_id: 6
+ type: 4
+ access_mode: 4
+
+-
+ id: 40
+ team_id: 6
+ type: 5
+ access_mode: 4
+
+-
+ id: 41
+ team_id: 6
+ type: 6
+ access_mode: 4
+
+-
+ id: 42
+ team_id: 6
+ type: 7
+ access_mode: 4
+
+-
+ id: 43
+ org_id: 3
+ team_id: 7
+ type: 2 # issues
+ access_mode: 2
+
+-
+ id: 44
+ team_id: 8
+ type: 2 # issues
+ access_mode: 2
+
+-
+ id: 45
+ team_id: 9
+ type: 1 # code
+ access_mode: 1
+
+-
+ id: 46
+ team_id: 17
+ type: 9 # package
+ access_mode: 2
+
+-
+ id: 47
+ team_id: 20
+ type: 9 # package
+ access_mode: 2
+
+-
+ id: 48
+ team_id: 2
+ type: 8
+ access_mode: 2
+
+-
+ id: 49
+ team_id: 21
+ type: 1
+ access_mode: 4
+
+-
+ id: 50
+ team_id: 21
+ type: 2
+ access_mode: 4
+
+-
+ id: 51
+ team_id: 21
+ type: 3
+ access_mode: 4
+
+-
+ id: 52
+ team_id: 22
+ type: 1
+ access_mode: 1
+
+-
+ id: 53
+ team_id: 22
+ type: 2
+ access_mode: 1
+
+-
+ id: 54
+ team_id: 22
+ type: 3
+ access_mode: 1
diff --git a/models/fixtures/team_user.yml b/models/fixtures/team_user.yml
new file mode 100644
index 0000000..02d57ae
--- /dev/null
+++ b/models/fixtures/team_user.yml
@@ -0,0 +1,149 @@
+-
+ id: 1
+ org_id: 3
+ team_id: 1
+ uid: 2
+
+-
+ id: 2
+ org_id: 3
+ team_id: 2
+ uid: 2
+
+-
+ id: 3
+ org_id: 3
+ team_id: 2
+ uid: 4
+
+-
+ id: 4
+ org_id: 6
+ team_id: 3
+ uid: 5
+
+-
+ id: 5
+ org_id: 7
+ team_id: 4
+ uid: 5
+
+-
+ id: 6
+ org_id: 17
+ team_id: 5
+ uid: 15
+
+-
+ id: 7
+ org_id: 17
+ team_id: 5
+ uid: 18
+
+-
+ id: 8
+ org_id: 19
+ team_id: 6
+ uid: 20
+
+-
+ id: 9
+ org_id: 3
+ team_id: 7
+ uid: 15
+
+-
+ id: 10
+ org_id: 17
+ team_id: 8
+ uid: 2
+
+-
+ id: 11
+ org_id: 17
+ team_id: 9
+ uid: 20
+
+-
+ id: 12
+ org_id: 25
+ team_id: 10
+ uid: 24
+
+-
+ id: 13
+ org_id: 3
+ team_id: 12
+ uid: 28
+
+-
+ id: 14
+ org_id: 6
+ team_id: 13
+ uid: 28
+
+-
+ id: 15
+ org_id: 17
+ team_id: 9
+ uid: 29
+
+-
+ id: 16
+ org_id: 19
+ team_id: 6
+ uid: 31
+
+-
+ id: 17
+ org_id: 3
+ team_id: 14
+ uid: 2
+
+-
+ id: 18
+ org_id: 23
+ team_id: 17
+ uid: 5
+
+-
+ id: 19
+ org_id: 35
+ team_id: 18
+ uid: 1
+
+-
+ id: 20
+ org_id: 36
+ team_id: 19
+ uid: 1
+
+-
+ id: 21
+ org_id: 36
+ team_id: 20
+ uid: 5
+
+-
+ id: 22
+ org_id: 17
+ team_id: 9
+ uid: 15
+
+-
+ id: 23
+ org_id: 41
+ team_id: 21
+ uid: 40
+
+-
+ id: 24
+ org_id: 41
+ team_id: 22
+ uid: 38
+
+-
+ id: 25
+ org_id: 41
+ team_id: 22
+ uid: 39
diff --git a/models/fixtures/topic.yml b/models/fixtures/topic.yml
new file mode 100644
index 0000000..055addf
--- /dev/null
+++ b/models/fixtures/topic.yml
@@ -0,0 +1,29 @@
+-
+ id: 1
+ name: golang
+ repo_count: 2
+
+-
+ id: 2
+ name: database
+ repo_count: 1
+
+-
+ id: 3
+ name: SQL
+ repo_count: 1
+
+-
+ id: 4
+ name: graphql
+ repo_count: 1
+
+-
+ id: 5
+ name: topicname1
+ repo_count: 1
+
+-
+ id: 6
+ name: topicname2
+ repo_count: 2
diff --git a/models/fixtures/tracked_time.yml b/models/fixtures/tracked_time.yml
new file mode 100644
index 0000000..768af38
--- /dev/null
+++ b/models/fixtures/tracked_time.yml
@@ -0,0 +1,71 @@
+-
+ id: 1
+ user_id: 1
+ issue_id: 1
+ time: 400
+ created_unix: 946684800
+ deleted: false
+
+-
+ id: 2
+ user_id: 2
+ issue_id: 2
+ time: 3661
+ created_unix: 946684801
+ deleted: false
+
+-
+ id: 3
+ user_id: 2
+ issue_id: 2
+ time: 1
+ created_unix: 946684802
+ deleted: false
+
+-
+ id: 4
+ user_id: -1
+ issue_id: 4
+ time: 1
+ created_unix: 946684803
+ deleted: false
+
+-
+ id: 5
+ user_id: 2
+ issue_id: 5
+ time: 1
+ created_unix: 946684804
+ deleted: false
+
+-
+ id: 6
+ user_id: 1
+ issue_id: 2
+ time: 20
+ created_unix: 946684812
+ deleted: false
+
+-
+ id: 7
+ user_id: 2
+ issue_id: 4
+ time: 3
+ created_unix: 946684813
+ deleted: false
+
+-
+ id: 8
+ user_id: 1
+ issue_id: 4
+ time: 71
+ created_unix: 947688814
+ deleted: false
+
+-
+ id: 9
+ user_id: 2
+ issue_id: 2
+ time: 100000
+ created_unix: 947688815
+ deleted: true
diff --git a/models/fixtures/two_factor.yml b/models/fixtures/two_factor.yml
new file mode 100644
index 0000000..d8cb852
--- /dev/null
+++ b/models/fixtures/two_factor.yml
@@ -0,0 +1,9 @@
+-
+ id: 1
+ uid: 24
+ secret: KlDporn6Ile4vFcKI8z7Z6sqK1Scj2Qp0ovtUzCZO6jVbRW2lAoT7UDxDPtrab8d2B9zKOocBRdBJnS8orsrUNrsyETY+jJHb79M82uZRioKbRUz15sfOpmJmEzkFeSg6S4LicUBQos=
+ scratch_salt: Qb5bq2DyR2
+ scratch_hash: 068eb9b8746e0bcfe332fac4457693df1bda55800eb0f6894d14ebb736ae6a24e0fc8fc5333c19f57f81599788f0b8e51ec1
+ last_used_passcode:
+ created_unix: 1564253724
+ updated_unix: 1564253724
diff --git a/models/fixtures/user.yml b/models/fixtures/user.yml
new file mode 100644
index 0000000..73b9a97
--- /dev/null
+++ b/models/fixtures/user.yml
@@ -0,0 +1,1520 @@
+# NOTE: all users should have a password of "password"
+
+- # NOTE: this user (id=1) is the admin
+ id: 1
+ lower_name: user1
+ name: user1
+ full_name: User One
+ email: user1@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user1
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: true
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar1
+ avatar_email: user1@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 2
+ lower_name: user2
+ name: user2
+ full_name: ' < U<se>r Tw<o > >< '
+ email: user2@example.com
+ keep_email_private: true
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user2
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar2
+ avatar_email: user2@example.com
+ use_custom_avatar: false
+ num_followers: 2
+ num_following: 1
+ num_stars: 2
+ num_repos: 17
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 3
+ lower_name: org3
+ name: org3
+ full_name: ' <<<< >> >> > >> > >>> >> '
+ email: org3@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org3
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar3
+ avatar_email: org3@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 3
+ num_teams: 5
+ num_members: 3
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 4
+ lower_name: user4
+ name: user4
+ full_name: ' '
+ email: user4@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user4
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar4
+ avatar_email: user4@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 1
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 5
+ lower_name: user5
+ name: user5
+ full_name: User Five
+ email: user5@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user5
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: false
+ prohibit_login: false
+ avatar: avatar5
+ avatar_email: user5@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 1
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 6
+ lower_name: org6
+ name: org6
+ full_name: Org Six
+ email: org6@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org6
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar6
+ avatar_email: org6@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 2
+ num_members: 2
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 7
+ lower_name: org7
+ name: org7
+ full_name: Org Seven
+ email: org7@example.com
+ keep_email_private: false
+ email_notifications_preference: disabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org7
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar7
+ avatar_email: org7@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 1
+ num_members: 1
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 8
+ lower_name: user8
+ name: user8
+ full_name: User Eight
+ email: user8@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user8
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar8
+ avatar_email: user8@example.com
+ use_custom_avatar: false
+ num_followers: 1
+ num_following: 1
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 9
+ lower_name: user9
+ name: user9
+ full_name: User Nine
+ email: user9@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user9
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar9
+ avatar_email: user9@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+ created_unix: 1730468968
+
+-
+ id: 10
+ lower_name: user10
+ name: user10
+ full_name: User Ten
+ email: user10@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user10
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar10
+ avatar_email: user10@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 3
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 11
+ lower_name: user11
+ name: user11
+ full_name: User Eleven
+ email: user11@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user11
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar11
+ avatar_email: user11@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 1
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 12
+ lower_name: user12
+ name: user12
+ full_name: User 12
+ email: user12@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user12
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar12
+ avatar_email: user12@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 1
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 13
+ lower_name: user13
+ name: user13
+ full_name: User 13
+ email: user13@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user13
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar13
+ avatar_email: user13@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 1
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 14
+ lower_name: user14
+ name: user14
+ full_name: User 14
+ email: user14@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user14
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar14
+ avatar_email: user13@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 3
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 15
+ lower_name: user15
+ name: user15
+ full_name: User 15
+ email: user15@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user15
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar15
+ avatar_email: user15@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 4
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 16
+ lower_name: user16
+ name: user16
+ full_name: User 16
+ email: user16@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user16
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar16
+ avatar_email: user16@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 2
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 17
+ lower_name: org17
+ name: org17
+ full_name: org 17
+ email: org17@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org17
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar17
+ avatar_email: org17@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 2
+ num_teams: 3
+ num_members: 4
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 18
+ lower_name: user18
+ name: user18
+ full_name: User 18
+ email: user18@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user18
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar18
+ avatar_email: user18@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 19
+ lower_name: org19
+ name: org19
+ full_name: Org 19
+ email: org19@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org19
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar19
+ avatar_email: org19@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 2
+ num_teams: 1
+ num_members: 2
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 20
+ lower_name: user20
+ name: user20
+ full_name: User 20
+ email: user20@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user20
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar20
+ avatar_email: user20@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 4
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 21
+ lower_name: user21
+ name: user21
+ full_name: User 21
+ email: user21@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user21
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar21
+ avatar_email: user21@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 2
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 22
+ lower_name: limited_org
+ name: limited_org
+ full_name: Limited Org
+ email: limited_org@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: limited_org
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar22
+ avatar_email: limited_org@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 2
+ num_teams: 1
+ num_members: 0
+ visibility: 1
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 23
+ lower_name: privated_org
+ name: privated_org
+ full_name: Privated Org
+ email: privated_org@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: privated_org
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar23
+ avatar_email: privated_org@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 2
+ num_teams: 2
+ num_members: 1
+ visibility: 2
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 24
+ lower_name: user24
+ name: user24
+ full_name: user24
+ email: user24@example.com
+ keep_email_private: true
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user24
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar24
+ avatar_email: user24@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 25
+ lower_name: org25
+ name: org25
+ full_name: org25
+ email: org25@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org25
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar25
+ avatar_email: org25@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 1
+ num_members: 1
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 26
+ lower_name: org26
+ name: org26
+ full_name: Org26
+ email: org26@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org26
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar26
+ avatar_email: org26@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 4
+ num_teams: 1
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: true
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 27
+ lower_name: user27
+ name: user27
+ full_name: User Twenty-Seven
+ email: user27@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user27
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar27
+ avatar_email: user27@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 3
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 28
+ lower_name: user28
+ name: user28
+ full_name: user27
+ email: user28@example.com
+ keep_email_private: true
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user28
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar28
+ avatar_email: user28@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 29
+ lower_name: user29
+ name: user29
+ full_name: User 29
+ email: user29@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user29
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: true
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar29
+ avatar_email: user29@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 30
+ lower_name: user30
+ name: user30
+ full_name: User Thirty
+ email: user30@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user30
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar29
+ avatar_email: user30@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 4
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 31
+ lower_name: user31
+ name: user31
+ full_name: user31
+ email: user31@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user31
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar31
+ avatar_email: user31@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 1
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 2
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 32
+ lower_name: user32
+ name: user32
+ full_name: User 32 (U2F test)
+ email: user32@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:notpassword
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user32
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar32
+ avatar_email: user30@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 33
+ lower_name: user33
+ name: user33
+ full_name: User 33 (Limited Visibility)
+ email: user33@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user33
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar33
+ avatar_email: user33@example.com
+ use_custom_avatar: false
+ num_followers: 1
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 1
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 34
+ lower_name: the_34-user.with.all.allowedchars
+ name: the_34-user.with.all.allowedChars
+ full_name: the_1-user.with.all.allowedChars
+ description: 'some [commonmark](https://commonmark.org/)!'
+ email: user34@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: the_34-user.with.all.allowedchars
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: false
+ prohibit_login: false
+ avatar: avatar34
+ avatar_email: user34@example.com
+ use_custom_avatar: true
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 35
+ lower_name: private_org35
+ name: private_org35
+ full_name: Private Org 35
+ email: private_org35@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: private_org35
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar35
+ avatar_email: private_org35@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 1
+ num_members: 1
+ visibility: 2
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 36
+ lower_name: limited_org36
+ name: limited_org36
+ full_name: Limited Org 36
+ email: abcde@gitea.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: limited_org36
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar22
+ avatar_email: abcde@gitea.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 2
+ num_members: 2
+ visibility: 1
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 37
+ lower_name: user37
+ name: user37
+ full_name: User 37
+ email: user37@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user37
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: true
+ avatar: avatar29
+ avatar_email: user37@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 38
+ lower_name: user38
+ name: user38
+ full_name: User38
+ email: user38@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user38
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar38
+ avatar_email: user38@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 39
+ lower_name: user39
+ name: user39
+ full_name: User39
+ email: user39@example.com
+ keep_email_private: false
+ email_notifications_preference: enabled
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user39
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar39
+ avatar_email: user39@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 40
+ lower_name: user40
+ name: user40
+ full_name: User40
+ email: user40@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: user40
+ type: 0
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar40
+ avatar_email: user40@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 1
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
+
+-
+ id: 41
+ lower_name: org41
+ name: org41
+ full_name: Org41
+ email: org41@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 0
+ login_name: org41
+ type: 1
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: false
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: false
+ avatar: avatar41
+ avatar_email: org41@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 1
+ num_teams: 2
+ num_members: 3
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
diff --git a/models/fixtures/user_open_id.yml b/models/fixtures/user_open_id.yml
new file mode 100644
index 0000000..d3a367b
--- /dev/null
+++ b/models/fixtures/user_open_id.yml
@@ -0,0 +1,17 @@
+-
+ id: 1
+ uid: 1
+ uri: https://user1.domain1.tld/
+ show: false
+
+-
+ id: 2
+ uid: 1
+ uri: http://user1.domain2.tld/
+ show: true
+
+-
+ id: 3
+ uid: 2
+ uri: https://domain1.tld/user2/
+ show: true
diff --git a/models/fixtures/user_redirect.yml b/models/fixtures/user_redirect.yml
new file mode 100644
index 0000000..8ff7993
--- /dev/null
+++ b/models/fixtures/user_redirect.yml
@@ -0,0 +1,4 @@
+-
+ id: 1
+ lower_name: olduser1
+ redirect_user_id: 1
diff --git a/models/fixtures/watch.yml b/models/fixtures/watch.yml
new file mode 100644
index 0000000..c6c9726
--- /dev/null
+++ b/models/fixtures/watch.yml
@@ -0,0 +1,35 @@
+-
+ id: 1
+ user_id: 1
+ repo_id: 1
+ mode: 1 # normal
+
+-
+ id: 2
+ user_id: 4
+ repo_id: 1
+ mode: 1 # normal
+
+-
+ id: 3
+ user_id: 9
+ repo_id: 1
+ mode: 1 # normal
+
+-
+ id: 4
+ user_id: 8
+ repo_id: 1
+ mode: 2 # don't watch
+
+-
+ id: 5
+ user_id: 11
+ repo_id: 1
+ mode: 3 # auto
+
+-
+ id: 6
+ user_id: 4
+ repo_id: 2
+ mode: 1 # normal
diff --git a/models/fixtures/webauthn_credential.yml b/models/fixtures/webauthn_credential.yml
new file mode 100644
index 0000000..edf9935
--- /dev/null
+++ b/models/fixtures/webauthn_credential.yml
@@ -0,0 +1,10 @@
+-
+ id: 1
+ name: WebAuthn credential
+ user_id: 32
+ attestation_type: none
+ sign_count: 0
+ clone_warning: false
+ legacy: true
+ created_unix: 946684800
+ updated_unix: 946684800
diff --git a/models/fixtures/webhook.yml b/models/fixtures/webhook.yml
new file mode 100644
index 0000000..cab5c5a
--- /dev/null
+++ b/models/fixtures/webhook.yml
@@ -0,0 +1,37 @@
+-
+ id: 1
+ repo_id: 1
+ url: http://www.example.com/url1
+ http_method: POST
+ type: forgejo
+ content_type: 1 # json
+ events: '{"push_only":true,"send_everything":false,"choose_events":false,"events":{"create":false,"push":true,"pull_request":false}}'
+ is_active: false # disable to prevent sending hook task during unrelated tests
+
+-
+ id: 2
+ repo_id: 1
+ url: http://www.example.com/url2
+ http_method: POST
+ content_type: 1 # json
+ events: '{"push_only":false,"send_everything":false,"choose_events":false,"events":{"create":false,"push":true,"pull_request":true}}'
+ is_active: false
+
+-
+ id: 3
+ owner_id: 3
+ repo_id: 3
+ url: http://www.example.com/url3
+ http_method: POST
+ content_type: 1 # json
+ events: '{"push_only":false,"send_everything":false,"choose_events":false,"events":{"create":false,"push":true,"pull_request":true}}'
+ is_active: false
+-
+ id: 4
+ repo_id: 2
+ type: gitea
+ url: http://www.example.com/url4
+ http_method: POST
+ content_type: 1 # json
+ events: '{"send_everything":true,"branch_filter":"{master,feature*}"}'
+ is_active: false
diff --git a/models/forgefed/federationhost.go b/models/forgefed/federationhost.go
new file mode 100644
index 0000000..b60c0c3
--- /dev/null
+++ b/models/forgefed/federationhost.go
@@ -0,0 +1,52 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgefed
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+// FederationHost data type
+// swagger:model
+type FederationHost struct {
+ ID int64 `xorm:"pk autoincr"`
+ HostFqdn string `xorm:"host_fqdn UNIQUE INDEX VARCHAR(255) NOT NULL"`
+ NodeInfo NodeInfo `xorm:"extends NOT NULL"`
+ LatestActivity time.Time `xorm:"NOT NULL"`
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+// Factory function for FederationHost. Created struct is asserted to be valid.
+func NewFederationHost(nodeInfo NodeInfo, hostFqdn string) (FederationHost, error) {
+ result := FederationHost{
+ HostFqdn: strings.ToLower(hostFqdn),
+ NodeInfo: nodeInfo,
+ }
+ if valid, err := validation.IsValid(result); !valid {
+ return FederationHost{}, err
+ }
+ return result, nil
+}
+
+// Validate collects error strings in a slice and returns this
+func (host FederationHost) Validate() []string {
+ var result []string
+ result = append(result, validation.ValidateNotEmpty(host.HostFqdn, "HostFqdn")...)
+ result = append(result, validation.ValidateMaxLen(host.HostFqdn, 255, "HostFqdn")...)
+ result = append(result, host.NodeInfo.Validate()...)
+ if host.HostFqdn != strings.ToLower(host.HostFqdn) {
+ result = append(result, fmt.Sprintf("HostFqdn has to be lower case but was: %v", host.HostFqdn))
+ }
+ if !host.LatestActivity.IsZero() && host.LatestActivity.After(time.Now().Add(10*time.Minute)) {
+ result = append(result, fmt.Sprintf("Latest Activity cannot be in the far future: %v", host.LatestActivity))
+ }
+
+ return result
+}
diff --git a/models/forgefed/federationhost_repository.go b/models/forgefed/federationhost_repository.go
new file mode 100644
index 0000000..03d8741
--- /dev/null
+++ b/models/forgefed/federationhost_repository.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgefed
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func init() {
+ db.RegisterModel(new(FederationHost))
+}
+
+func GetFederationHost(ctx context.Context, ID int64) (*FederationHost, error) {
+ host := new(FederationHost)
+ has, err := db.GetEngine(ctx).Where("id=?", ID).Get(host)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, fmt.Errorf("FederationInfo record %v does not exist", ID)
+ }
+ if res, err := validation.IsValid(host); !res {
+ return nil, err
+ }
+ return host, nil
+}
+
+func FindFederationHostByFqdn(ctx context.Context, fqdn string) (*FederationHost, error) {
+ host := new(FederationHost)
+ has, err := db.GetEngine(ctx).Where("host_fqdn=?", strings.ToLower(fqdn)).Get(host)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, nil
+ }
+ if res, err := validation.IsValid(host); !res {
+ return nil, err
+ }
+ return host, nil
+}
+
+func CreateFederationHost(ctx context.Context, host *FederationHost) error {
+ if res, err := validation.IsValid(host); !res {
+ return err
+ }
+ _, err := db.GetEngine(ctx).Insert(host)
+ return err
+}
+
+func UpdateFederationHost(ctx context.Context, host *FederationHost) error {
+ if res, err := validation.IsValid(host); !res {
+ return err
+ }
+ _, err := db.GetEngine(ctx).ID(host.ID).Update(host)
+ return err
+}
diff --git a/models/forgefed/federationhost_test.go b/models/forgefed/federationhost_test.go
new file mode 100644
index 0000000..ea5494c
--- /dev/null
+++ b/models/forgefed/federationhost_test.go
@@ -0,0 +1,78 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgefed
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func Test_FederationHostValidation(t *testing.T) {
+ sut := FederationHost{
+ HostFqdn: "host.do.main",
+ NodeInfo: NodeInfo{
+ SoftwareName: "forgejo",
+ },
+ LatestActivity: time.Now(),
+ }
+ if res, err := validation.IsValid(sut); !res {
+ t.Errorf("sut should be valid but was %q", err)
+ }
+
+ sut = FederationHost{
+ HostFqdn: "",
+ NodeInfo: NodeInfo{
+ SoftwareName: "forgejo",
+ },
+ LatestActivity: time.Now(),
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid: HostFqdn empty")
+ }
+
+ sut = FederationHost{
+ HostFqdn: strings.Repeat("fill", 64),
+ NodeInfo: NodeInfo{
+ SoftwareName: "forgejo",
+ },
+ LatestActivity: time.Now(),
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid: HostFqdn too long (len=256)")
+ }
+
+ sut = FederationHost{
+ HostFqdn: "host.do.main",
+ NodeInfo: NodeInfo{},
+ LatestActivity: time.Now(),
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid: NodeInfo invalid")
+ }
+
+ sut = FederationHost{
+ HostFqdn: "host.do.main",
+ NodeInfo: NodeInfo{
+ SoftwareName: "forgejo",
+ },
+ LatestActivity: time.Now().Add(1 * time.Hour),
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid: Future timestamp")
+ }
+
+ sut = FederationHost{
+ HostFqdn: "hOst.do.main",
+ NodeInfo: NodeInfo{
+ SoftwareName: "forgejo",
+ },
+ LatestActivity: time.Now(),
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid: HostFqdn lower case")
+ }
+}
diff --git a/models/forgefed/nodeinfo.go b/models/forgefed/nodeinfo.go
new file mode 100644
index 0000000..66d2eca
--- /dev/null
+++ b/models/forgefed/nodeinfo.go
@@ -0,0 +1,123 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgefed
+
+import (
+ "net/url"
+
+ "code.gitea.io/gitea/modules/validation"
+
+ "github.com/valyala/fastjson"
+)
+
+// ToDo: Search for full text SourceType and Source, also in .md files
+type (
+ SoftwareNameType string
+)
+
+const (
+ ForgejoSourceType SoftwareNameType = "forgejo"
+ GiteaSourceType SoftwareNameType = "gitea"
+)
+
+var KnownSourceTypes = []any{
+ ForgejoSourceType, GiteaSourceType,
+}
+
+// ------------------------------------------------ NodeInfoWellKnown ------------------------------------------------
+
+// NodeInfo data type
+// swagger:model
+type NodeInfoWellKnown struct {
+ Href string
+}
+
+// Factory function for NodeInfoWellKnown. Created struct is asserted to be valid.
+func NewNodeInfoWellKnown(body []byte) (NodeInfoWellKnown, error) {
+ result, err := NodeInfoWellKnownUnmarshalJSON(body)
+ if err != nil {
+ return NodeInfoWellKnown{}, err
+ }
+
+ if valid, err := validation.IsValid(result); !valid {
+ return NodeInfoWellKnown{}, err
+ }
+
+ return result, nil
+}
+
+func NodeInfoWellKnownUnmarshalJSON(data []byte) (NodeInfoWellKnown, error) {
+ p := fastjson.Parser{}
+ val, err := p.ParseBytes(data)
+ if err != nil {
+ return NodeInfoWellKnown{}, err
+ }
+ href := string(val.GetStringBytes("links", "0", "href"))
+ return NodeInfoWellKnown{Href: href}, nil
+}
+
+// Validate collects error strings in a slice and returns this
+func (node NodeInfoWellKnown) Validate() []string {
+ var result []string
+ result = append(result, validation.ValidateNotEmpty(node.Href, "Href")...)
+
+ parsedURL, err := url.Parse(node.Href)
+ if err != nil {
+ result = append(result, err.Error())
+ return result
+ }
+
+ if parsedURL.Host == "" {
+ result = append(result, "Href has to be absolute")
+ }
+
+ result = append(result, validation.ValidateOneOf(parsedURL.Scheme, []any{"http", "https"}, "parsedURL.Scheme")...)
+
+ if parsedURL.RawQuery != "" {
+ result = append(result, "Href may not contain query")
+ }
+
+ return result
+}
+
+// ------------------------------------------------ NodeInfo ------------------------------------------------
+
+// NodeInfo data type
+// swagger:model
+type NodeInfo struct {
+ SoftwareName SoftwareNameType
+}
+
+func NodeInfoUnmarshalJSON(data []byte) (NodeInfo, error) {
+ p := fastjson.Parser{}
+ val, err := p.ParseBytes(data)
+ if err != nil {
+ return NodeInfo{}, err
+ }
+ source := string(val.GetStringBytes("software", "name"))
+ result := NodeInfo{}
+ result.SoftwareName = SoftwareNameType(source)
+ return result, nil
+}
+
+func NewNodeInfo(body []byte) (NodeInfo, error) {
+ result, err := NodeInfoUnmarshalJSON(body)
+ if err != nil {
+ return NodeInfo{}, err
+ }
+
+ if valid, err := validation.IsValid(result); !valid {
+ return NodeInfo{}, err
+ }
+ return result, nil
+}
+
+// Validate collects error strings in a slice and returns this
+func (node NodeInfo) Validate() []string {
+ var result []string
+ result = append(result, validation.ValidateNotEmpty(string(node.SoftwareName), "node.SoftwareName")...)
+ result = append(result, validation.ValidateOneOf(node.SoftwareName, KnownSourceTypes, "node.SoftwareName")...)
+
+ return result
+}
diff --git a/models/forgefed/nodeinfo_test.go b/models/forgefed/nodeinfo_test.go
new file mode 100644
index 0000000..4c73bb4
--- /dev/null
+++ b/models/forgefed/nodeinfo_test.go
@@ -0,0 +1,92 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgefed
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func Test_NodeInfoWellKnownUnmarshalJSON(t *testing.T) {
+ type testPair struct {
+ item []byte
+ want NodeInfoWellKnown
+ wantErr error
+ }
+
+ tests := map[string]testPair{
+ "with href": {
+ item: []byte(`{"links":[{"href":"https://federated-repo.prod.meissa.de/api/v1/nodeinfo","rel":"http://nodeinfo.diaspora.software/ns/schema/2.1"}]}`),
+ want: NodeInfoWellKnown{
+ Href: "https://federated-repo.prod.meissa.de/api/v1/nodeinfo",
+ },
+ },
+ "empty": {
+ item: []byte(``),
+ wantErr: fmt.Errorf("cannot parse JSON: cannot parse empty string; unparsed tail: \"\""),
+ },
+ }
+
+ for name, tt := range tests {
+ t.Run(name, func(t *testing.T) {
+ got, err := NodeInfoWellKnownUnmarshalJSON(tt.item)
+ if (err != nil || tt.wantErr != nil) && tt.wantErr.Error() != err.Error() {
+ t.Errorf("UnmarshalJSON() error = \"%v\", wantErr \"%v\"", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("UnmarshalJSON() got = %q, want %q", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_NodeInfoWellKnownValidate(t *testing.T) {
+ sut := NodeInfoWellKnown{Href: "https://federated-repo.prod.meissa.de/api/v1/nodeinfo"}
+ if b, err := validation.IsValid(sut); !b {
+ t.Errorf("sut should be valid, %v, %v", sut, err)
+ }
+
+ sut = NodeInfoWellKnown{Href: "./federated-repo.prod.meissa.de/api/v1/nodeinfo"}
+ _, err := validation.IsValid(sut)
+ if !validation.IsErrNotValid(err) && strings.Contains(err.Error(), "Href has to be absolute\nValue is not contained in allowed values [http https]") {
+ t.Errorf("validation error expected but was: %v\n", err)
+ }
+
+ sut = NodeInfoWellKnown{Href: "https://federated-repo.prod.meissa.de/api/v1/nodeinfo?alert=1"}
+ _, err = validation.IsValid(sut)
+ if !validation.IsErrNotValid(err) && strings.Contains(err.Error(), "Href has to be absolute\nValue is not contained in allowed values [http https]") {
+ t.Errorf("sut should be valid, %v, %v", sut, err)
+ }
+}
+
+func Test_NewNodeInfoWellKnown(t *testing.T) {
+ sut, _ := NewNodeInfoWellKnown([]byte(`{"links":[{"href":"https://federated-repo.prod.meissa.de/api/v1/nodeinfo","rel":"http://nodeinfo.diaspora.software/ns/schema/2.1"}]}`))
+ expected := NodeInfoWellKnown{Href: "https://federated-repo.prod.meissa.de/api/v1/nodeinfo"}
+ if sut != expected {
+ t.Errorf("expected was: %v but was: %v", expected, sut)
+ }
+
+ _, err := NewNodeInfoWellKnown([]byte(`invalid`))
+ if err == nil {
+ t.Errorf("error was expected here")
+ }
+}
+
+func Test_NewNodeInfo(t *testing.T) {
+ sut, _ := NewNodeInfo([]byte(`{"version":"2.1","software":{"name":"gitea","version":"1.20.0+dev-2539-g5840cc6d3","repository":"https://github.com/go-gitea/gitea.git","homepage":"https://gitea.io/"},"protocols":["activitypub"],"services":{"inbound":[],"outbound":["rss2.0"]},"openRegistrations":true,"usage":{"users":{"total":13,"activeHalfyear":1,"activeMonth":1}},"metadata":{}}`))
+ expected := NodeInfo{SoftwareName: "gitea"}
+ if sut != expected {
+ t.Errorf("expected was: %v but was: %v", expected, sut)
+ }
+
+ _, err := NewNodeInfo([]byte(`invalid`))
+ if err == nil {
+ t.Errorf("error was expected here")
+ }
+}
diff --git a/models/forgejo/semver/main_test.go b/models/forgejo/semver/main_test.go
new file mode 100644
index 0000000..fa56182
--- /dev/null
+++ b/models/forgejo/semver/main_test.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+
+package semver
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/forgejo/semver/semver.go b/models/forgejo/semver/semver.go
new file mode 100644
index 0000000..7f122d2
--- /dev/null
+++ b/models/forgejo/semver/semver.go
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+
+package semver
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+
+ "github.com/hashicorp/go-version"
+)
+
+func init() {
+ db.RegisterModel(new(ForgejoSemVer))
+}
+
+var DefaultVersionString = "1.0.0"
+
+type ForgejoSemVer struct {
+ Version string
+}
+
+func GetVersion(ctx context.Context) (*version.Version, error) {
+ return GetVersionWithEngine(db.GetEngine(ctx))
+}
+
+func GetVersionWithEngine(e db.Engine) (*version.Version, error) {
+ versionString := DefaultVersionString
+
+ exists, err := e.IsTableExist("forgejo_sem_ver")
+ if err != nil {
+ return nil, err
+ }
+ if exists {
+ var semver ForgejoSemVer
+ has, err := e.Get(&semver)
+ if err != nil {
+ return nil, err
+ } else if has {
+ versionString = semver.Version
+ }
+ }
+
+ v, err := version.NewVersion(versionString)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+}
+
+func SetVersionString(ctx context.Context, versionString string) error {
+ return SetVersionStringWithEngine(db.GetEngine(ctx), versionString)
+}
+
+func SetVersionStringWithEngine(e db.Engine, versionString string) error {
+ v, err := version.NewVersion(versionString)
+ if err != nil {
+ return err
+ }
+ return SetVersionWithEngine(e, v)
+}
+
+func SetVersion(ctx context.Context, v *version.Version) error {
+ return SetVersionWithEngine(db.GetEngine(ctx), v)
+}
+
+func SetVersionWithEngine(e db.Engine, v *version.Version) error {
+ var semver ForgejoSemVer
+ has, err := e.Get(&semver)
+ if err != nil {
+ return err
+ }
+
+ if !has {
+ _, err = e.Exec("insert into forgejo_sem_ver values (?)", v.String())
+ } else {
+ _, err = e.Exec("update forgejo_sem_ver set version = ?", v.String())
+ }
+ return err
+}
diff --git a/models/forgejo/semver/semver_test.go b/models/forgejo/semver/semver_test.go
new file mode 100644
index 0000000..a508c69
--- /dev/null
+++ b/models/forgejo/semver/semver_test.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+
+package semver
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/hashicorp/go-version"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestForgejoSemVerSetGet(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ ctx := db.DefaultContext
+
+ newVersion, err := version.NewVersion("v1.2.3")
+ require.NoError(t, err)
+ require.NoError(t, SetVersionString(ctx, newVersion.String()))
+ databaseVersion, err := GetVersion(ctx)
+ require.NoError(t, err)
+ assert.EqualValues(t, newVersion.String(), databaseVersion.String())
+ assert.True(t, newVersion.Equal(databaseVersion))
+}
+
+func TestForgejoSemVerMissing(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ ctx := db.DefaultContext
+ e := db.GetEngine(ctx)
+
+ _, err := e.Exec("delete from forgejo_sem_ver")
+ require.NoError(t, err)
+
+ v, err := GetVersion(ctx)
+ require.NoError(t, err)
+ assert.EqualValues(t, "1.0.0", v.String())
+
+ _, err = e.Exec("drop table forgejo_sem_ver")
+ require.NoError(t, err)
+
+ v, err = GetVersion(ctx)
+ require.NoError(t, err)
+ assert.EqualValues(t, "1.0.0", v.String())
+}
diff --git a/models/forgejo_migrations/main_test.go b/models/forgejo_migrations/main_test.go
new file mode 100644
index 0000000..2297f74
--- /dev/null
+++ b/models/forgejo_migrations/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/forgejo_migrations/migrate.go b/models/forgejo_migrations/migrate.go
new file mode 100644
index 0000000..cca83d6
--- /dev/null
+++ b/models/forgejo_migrations/migrate.go
@@ -0,0 +1,192 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "code.gitea.io/gitea/models/forgejo/semver"
+ forgejo_v1_20 "code.gitea.io/gitea/models/forgejo_migrations/v1_20"
+ forgejo_v1_22 "code.gitea.io/gitea/models/forgejo_migrations/v1_22"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/names"
+)
+
+// ForgejoVersion describes the Forgejo version table. Should have only one row with id = 1.
+type ForgejoVersion struct {
+ ID int64 `xorm:"pk autoincr"`
+ Version int64
+}
+
+type Migration struct {
+ description string
+ migrate func(*xorm.Engine) error
+}
+
+// NewMigration creates a new migration.
+func NewMigration(desc string, fn func(*xorm.Engine) error) *Migration {
+ return &Migration{desc, fn}
+}
+
+// This is a sequence of additional Forgejo migrations.
+// Add new migrations to the bottom of the list.
+var migrations = []*Migration{
+ // v0 -> v1
+ NewMigration("Create the `forgejo_blocked_user` table", forgejo_v1_20.AddForgejoBlockedUser),
+ // v1 -> v2
+ NewMigration("Create the `forgejo_sem_ver` table", forgejo_v1_20.CreateSemVerTable),
+ // v2 -> v3
+ NewMigration("Create the `forgejo_auth_token` table", forgejo_v1_20.CreateAuthorizationTokenTable),
+ // v3 -> v4
+ NewMigration("Add the `default_permissions` column to the `repo_unit` table", forgejo_v1_22.AddDefaultPermissionsToRepoUnit),
+ // v4 -> v5
+ NewMigration("Create the `forgejo_repo_flag` table", forgejo_v1_22.CreateRepoFlagTable),
+ // v5 -> v6
+ NewMigration("Add the `wiki_branch` column to the `repository` table", forgejo_v1_22.AddWikiBranchToRepository),
+ // v6 -> v7
+ NewMigration("Add the `enable_repo_unit_hints` column to the `user` table", forgejo_v1_22.AddUserRepoUnitHintsSetting),
+ // v7 -> v8
+ NewMigration("Modify the `release`.`note` content to remove SSH signatures", forgejo_v1_22.RemoveSSHSignaturesFromReleaseNotes),
+ // v8 -> v9
+ NewMigration("Add the `apply_to_admins` column to the `protected_branch` table", forgejo_v1_22.AddApplyToAdminsSetting),
+ // v9 -> v10
+ NewMigration("Add pronouns to user", forgejo_v1_22.AddPronounsToUser),
+ // v11 -> v12
+ NewMigration("Add the `created` column to the `issue` table", forgejo_v1_22.AddCreatedToIssue),
+ // v12 -> v13
+ NewMigration("Add repo_archive_download_count table", forgejo_v1_22.AddRepoArchiveDownloadCount),
+ // v13 -> v14
+ NewMigration("Add `hide_archive_links` column to `release` table", AddHideArchiveLinksToRelease),
+ // v14 -> v15
+ NewMigration("Remove Gitea-specific columns from the repository and badge tables", RemoveGiteaSpecificColumnsFromRepositoryAndBadge),
+ // v15 -> v16
+ NewMigration("Create the `federation_host` table", CreateFederationHostTable),
+ // v16 -> v17
+ NewMigration("Create the `federated_user` table", CreateFederatedUserTable),
+ // v17 -> v18
+ NewMigration("Add `normalized_federated_uri` column to `user` table", AddNormalizedFederatedURIToUser),
+ // v18 -> v19
+ NewMigration("Create the `following_repo` table", CreateFollowingRepoTable),
+ // v19 -> v20
+ NewMigration("Add external_url to attachment table", AddExternalURLColumnToAttachmentTable),
+ // v20 -> v21
+ NewMigration("Creating Quota-related tables", CreateQuotaTables),
+ // v21 -> v22
+ NewMigration("Add SSH keypair to `pull_mirror` table", AddSSHKeypairToPushMirror),
+ // v22 -> v23
+ NewMigration("Add `legacy` to `web_authn_credential` table", AddLegacyToWebAuthnCredential),
+}
+
+// GetCurrentDBVersion returns the current Forgejo database version.
+func GetCurrentDBVersion(x *xorm.Engine) (int64, error) {
+ if err := x.Sync(new(ForgejoVersion)); err != nil {
+ return -1, fmt.Errorf("sync: %w", err)
+ }
+
+ currentVersion := &ForgejoVersion{ID: 1}
+ has, err := x.Get(currentVersion)
+ if err != nil {
+ return -1, fmt.Errorf("get: %w", err)
+ }
+ if !has {
+ return -1, nil
+ }
+ return currentVersion.Version, nil
+}
+
+// ExpectedVersion returns the expected Forgejo database version.
+func ExpectedVersion() int64 {
+ return int64(len(migrations))
+}
+
+// EnsureUpToDate will check if the Forgejo database is at the correct version.
+func EnsureUpToDate(x *xorm.Engine) error {
+ currentDB, err := GetCurrentDBVersion(x)
+ if err != nil {
+ return err
+ }
+
+ if currentDB < 0 {
+ return fmt.Errorf("database has not been initialized")
+ }
+
+ expected := ExpectedVersion()
+
+ if currentDB != expected {
+ return fmt.Errorf(`current Forgejo database version %d is not equal to the expected version %d. Please run "forgejo [--config /path/to/app.ini] migrate" to update the database version`, currentDB, expected)
+ }
+
+ return nil
+}
+
+// Migrate Forgejo database to current version.
+func Migrate(x *xorm.Engine) error {
+ // Set a new clean the default mapper to GonicMapper as that is the default for .
+ x.SetMapper(names.GonicMapper{})
+ if err := x.Sync(new(ForgejoVersion)); err != nil {
+ return fmt.Errorf("sync: %w", err)
+ }
+
+ currentVersion := &ForgejoVersion{ID: 1}
+ has, err := x.Get(currentVersion)
+ if err != nil {
+ return fmt.Errorf("get: %w", err)
+ } else if !has {
+ // If the version record does not exist we think
+ // it is a fresh installation and we can skip all migrations.
+ currentVersion.ID = 0
+ currentVersion.Version = ExpectedVersion()
+
+ if _, err = x.InsertOne(currentVersion); err != nil {
+ return fmt.Errorf("insert: %w", err)
+ }
+ }
+
+ v := currentVersion.Version
+
+ // Downgrading Forgejo's database version not supported
+ if v > ExpectedVersion() {
+ msg := fmt.Sprintf("Your Forgejo database (migration version: %d) is for a newer version of Forgejo, you cannot use the newer database for this old Forgejo release (%d).", v, ExpectedVersion())
+ msg += "\nForgejo will exit to keep your database safe and unchanged. Please use the correct Forgejo release, do not change the migration version manually (incorrect manual operation may cause data loss)."
+ if !setting.IsProd {
+ msg += fmt.Sprintf("\nIf you are in development and really know what you're doing, you can force changing the migration version by executing: UPDATE forgejo_version SET version=%d WHERE id=1;", ExpectedVersion())
+ }
+ _, _ = fmt.Fprintln(os.Stderr, msg)
+ log.Fatal(msg)
+ return nil
+ }
+
+ // Some migration tasks depend on the git command
+ if git.DefaultContext == nil {
+ if err = git.InitSimple(context.Background()); err != nil {
+ return err
+ }
+ }
+
+ // Migrate
+ for i, m := range migrations[v:] {
+ log.Info("Migration[%d]: %s", v+int64(i), m.description)
+ // Reset the mapper between each migration - migrations are not supposed to depend on each other
+ x.SetMapper(names.GonicMapper{})
+ if err = m.migrate(x); err != nil {
+ return fmt.Errorf("migration[%d]: %s failed: %w", v+int64(i), m.description, err)
+ }
+ currentVersion.Version = v + int64(i) + 1
+ if _, err = x.ID(1).Update(currentVersion); err != nil {
+ return err
+ }
+ }
+
+ if err := x.Sync(new(semver.ForgejoSemVer)); err != nil {
+ return fmt.Errorf("sync: %w", err)
+ }
+
+ return semver.SetVersionStringWithEngine(x, setting.ForgejoVersion)
+}
diff --git a/models/forgejo_migrations/migrate_test.go b/models/forgejo_migrations/migrate_test.go
new file mode 100644
index 0000000..48ee4f7
--- /dev/null
+++ b/models/forgejo_migrations/migrate_test.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TestEnsureUpToDate tests the behavior of EnsureUpToDate.
+func TestEnsureUpToDate(t *testing.T) {
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(ForgejoVersion))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ // Ensure error if there's no row in Forgejo Version.
+ err := EnsureUpToDate(x)
+ require.Error(t, err)
+
+ // Insert 'good' Forgejo Version row.
+ _, err = x.InsertOne(&ForgejoVersion{ID: 1, Version: ExpectedVersion()})
+ require.NoError(t, err)
+
+ err = EnsureUpToDate(x)
+ require.NoError(t, err)
+
+ // Modify forgejo version to have a lower version.
+ _, err = x.Exec("UPDATE `forgejo_version` SET version = ? WHERE id = 1", ExpectedVersion()-1)
+ require.NoError(t, err)
+
+ err = EnsureUpToDate(x)
+ require.Error(t, err)
+}
diff --git a/models/forgejo_migrations/v13.go b/models/forgejo_migrations/v13.go
new file mode 100644
index 0000000..614f682
--- /dev/null
+++ b/models/forgejo_migrations/v13.go
@@ -0,0 +1,15 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+func AddHideArchiveLinksToRelease(x *xorm.Engine) error {
+ type Release struct {
+ ID int64 `xorm:"pk autoincr"`
+ HideArchiveLinks bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(&Release{})
+}
diff --git a/models/forgejo_migrations/v14.go b/models/forgejo_migrations/v14.go
new file mode 100644
index 0000000..f6dd35e
--- /dev/null
+++ b/models/forgejo_migrations/v14.go
@@ -0,0 +1,43 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func RemoveGiteaSpecificColumnsFromRepositoryAndBadge(x *xorm.Engine) error {
+ // Make sure the columns exist before dropping them
+ type Repository struct {
+ ID int64
+ DefaultWikiBranch string
+ }
+ if err := x.Sync(&Repository{}); err != nil {
+ return err
+ }
+
+ type Badge struct {
+ ID int64 `xorm:"pk autoincr"`
+ Slug string
+ }
+ err := x.Sync(new(Badge))
+ if err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "repository", "default_wiki_branch"); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "badge", "slug"); err != nil {
+ return err
+ }
+ return sess.Commit()
+}
diff --git a/models/forgejo_migrations/v15.go b/models/forgejo_migrations/v15.go
new file mode 100644
index 0000000..d7ed19c
--- /dev/null
+++ b/models/forgejo_migrations/v15.go
@@ -0,0 +1,33 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import (
+ "time"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+type (
+ SoftwareNameType string
+)
+
+type NodeInfo struct {
+ SoftwareName SoftwareNameType
+}
+
+type FederationHost struct {
+ ID int64 `xorm:"pk autoincr"`
+ HostFqdn string `xorm:"host_fqdn UNIQUE INDEX VARCHAR(255) NOT NULL"`
+ NodeInfo NodeInfo `xorm:"extends NOT NULL"`
+ LatestActivity time.Time `xorm:"NOT NULL"`
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+func CreateFederationHostTable(x *xorm.Engine) error {
+ return x.Sync(new(FederationHost))
+}
diff --git a/models/forgejo_migrations/v16.go b/models/forgejo_migrations/v16.go
new file mode 100644
index 0000000..f80bfc5
--- /dev/null
+++ b/models/forgejo_migrations/v16.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+type FederatedUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"NOT NULL"`
+ ExternalID string `xorm:"UNIQUE(federation_user_mapping) NOT NULL"`
+ FederationHostID int64 `xorm:"UNIQUE(federation_user_mapping) NOT NULL"`
+}
+
+func CreateFederatedUserTable(x *xorm.Engine) error {
+ return x.Sync(new(FederatedUser))
+}
diff --git a/models/forgejo_migrations/v17.go b/models/forgejo_migrations/v17.go
new file mode 100644
index 0000000..d6e2983
--- /dev/null
+++ b/models/forgejo_migrations/v17.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+func AddNormalizedFederatedURIToUser(x *xorm.Engine) error {
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ NormalizedFederatedURI string
+ }
+ return x.Sync(&User{})
+}
diff --git a/models/forgejo_migrations/v18.go b/models/forgejo_migrations/v18.go
new file mode 100644
index 0000000..afccfbf
--- /dev/null
+++ b/models/forgejo_migrations/v18.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+type FollowingRepo struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ ExternalID string `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ FederationHostID int64 `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ URI string
+}
+
+func CreateFollowingRepoTable(x *xorm.Engine) error {
+ return x.Sync(new(FederatedUser))
+}
diff --git a/models/forgejo_migrations/v19.go b/models/forgejo_migrations/v19.go
new file mode 100644
index 0000000..69b7746
--- /dev/null
+++ b/models/forgejo_migrations/v19.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+func AddExternalURLColumnToAttachmentTable(x *xorm.Engine) error {
+ type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ ExternalURL string
+ }
+ return x.Sync(new(Attachment))
+}
diff --git a/models/forgejo_migrations/v1_20/v1.go b/models/forgejo_migrations/v1_20/v1.go
new file mode 100644
index 0000000..1097613
--- /dev/null
+++ b/models/forgejo_migrations/v1_20/v1.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_v1_20 //nolint:revive
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddForgejoBlockedUser(x *xorm.Engine) error {
+ type ForgejoBlockedUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ BlockID int64 `xorm:"index"`
+ UserID int64 `xorm:"index"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ }
+
+ return x.Sync(new(ForgejoBlockedUser))
+}
diff --git a/models/forgejo_migrations/v1_20/v2.go b/models/forgejo_migrations/v1_20/v2.go
new file mode 100644
index 0000000..39f3b58
--- /dev/null
+++ b/models/forgejo_migrations/v1_20/v2.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+
+package forgejo_v1_20 //nolint:revive
+
+import (
+ "xorm.io/xorm"
+)
+
+func CreateSemVerTable(x *xorm.Engine) error {
+ type ForgejoSemVer struct {
+ Version string
+ }
+
+ return x.Sync(new(ForgejoSemVer))
+}
diff --git a/models/forgejo_migrations/v1_20/v3.go b/models/forgejo_migrations/v1_20/v3.go
new file mode 100644
index 0000000..caa4f1a
--- /dev/null
+++ b/models/forgejo_migrations/v1_20/v3.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_v1_20 //nolint:revive
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+type AuthorizationToken struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX"`
+ LookupKey string `xorm:"INDEX UNIQUE"`
+ HashedValidator string
+ Expiry timeutil.TimeStamp
+}
+
+func (AuthorizationToken) TableName() string {
+ return "forgejo_auth_token"
+}
+
+func CreateAuthorizationTokenTable(x *xorm.Engine) error {
+ return x.Sync(new(AuthorizationToken))
+}
diff --git a/models/forgejo_migrations/v1_22/main_test.go b/models/forgejo_migrations/v1_22/main_test.go
new file mode 100644
index 0000000..0971108
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/forgejo_migrations/v1_22/v10.go b/models/forgejo_migrations/v1_22/v10.go
new file mode 100644
index 0000000..819800a
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v10.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddPronounsToUser(x *xorm.Engine) error {
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ Pronouns string
+ }
+
+ return x.Sync(&User{})
+}
diff --git a/models/forgejo_migrations/v1_22/v11.go b/models/forgejo_migrations/v1_22/v11.go
new file mode 100644
index 0000000..c693993
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v11.go
@@ -0,0 +1,19 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddCreatedToIssue(x *xorm.Engine) error {
+ type Issue struct {
+ ID int64 `xorm:"pk autoincr"`
+ Created timeutil.TimeStampNano
+ }
+
+ return x.Sync(&Issue{})
+}
diff --git a/models/forgejo_migrations/v1_22/v12.go b/models/forgejo_migrations/v1_22/v12.go
new file mode 100644
index 0000000..6822524
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v12.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func AddRepoArchiveDownloadCount(x *xorm.Engine) error {
+ type RepoArchiveDownloadCount struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"index unique(s)"`
+ ReleaseID int64 `xorm:"index unique(s)"`
+ Type int `xorm:"unique(s)"`
+ Count int64
+ }
+
+ return x.Sync(&RepoArchiveDownloadCount{})
+}
diff --git a/models/forgejo_migrations/v1_22/v4.go b/models/forgejo_migrations/v1_22/v4.go
new file mode 100644
index 0000000..f1195f5
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v4.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddDefaultPermissionsToRepoUnit(x *xorm.Engine) error {
+ type RepoUnit struct {
+ ID int64
+ DefaultPermissions int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ return x.Sync(&RepoUnit{})
+}
diff --git a/models/forgejo_migrations/v1_22/v5.go b/models/forgejo_migrations/v1_22/v5.go
new file mode 100644
index 0000000..55f9fe1
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v5.go
@@ -0,0 +1,22 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+type RepoFlag struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX"`
+ Name string `xorm:"UNIQUE(s) INDEX"`
+}
+
+func (RepoFlag) TableName() string {
+ return "forgejo_repo_flag"
+}
+
+func CreateRepoFlagTable(x *xorm.Engine) error {
+ return x.Sync(new(RepoFlag))
+}
diff --git a/models/forgejo_migrations/v1_22/v6.go b/models/forgejo_migrations/v1_22/v6.go
new file mode 100644
index 0000000..1a48748
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v6.go
@@ -0,0 +1,24 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddWikiBranchToRepository(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ WikiBranch string
+ }
+
+ if err := x.Sync(&Repository{}); err != nil {
+ return err
+ }
+
+ // Update existing repositories to use `master` as the wiki branch, for
+ // compatilibty's sake.
+ _, err := x.Exec("UPDATE repository SET wiki_branch = 'master' WHERE wiki_branch = '' OR wiki_branch IS NULL")
+ return err
+}
diff --git a/models/forgejo_migrations/v1_22/v7.go b/models/forgejo_migrations/v1_22/v7.go
new file mode 100644
index 0000000..b42dd1a
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v7.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddUserRepoUnitHintsSetting(x *xorm.Engine) error {
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ EnableRepoUnitHints bool `xorm:"NOT NULL DEFAULT true"`
+ }
+
+ return x.Sync(&User{})
+}
diff --git a/models/forgejo_migrations/v1_22/v8.go b/models/forgejo_migrations/v1_22/v8.go
new file mode 100644
index 0000000..2d3c0c5
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v8.go
@@ -0,0 +1,51 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "strings"
+
+ "xorm.io/xorm"
+)
+
+func RemoveSSHSignaturesFromReleaseNotes(x *xorm.Engine) error {
+ type Release struct {
+ ID int64 `xorm:"pk autoincr"`
+ Note string `xorm:"TEXT"`
+ }
+
+ if err := x.Sync(&Release{}); err != nil {
+ return err
+ }
+
+ var releaseNotes []struct {
+ ID int64
+ Note string
+ }
+
+ if err := x.Table("release").Where("note LIKE '%-----BEGIN SSH SIGNATURE-----%'").Find(&releaseNotes); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ for _, release := range releaseNotes {
+ idx := strings.LastIndex(release.Note, "-----BEGIN SSH SIGNATURE-----")
+ if idx == -1 {
+ continue
+ }
+ release.Note = release.Note[:idx]
+ _, err := sess.Exec("UPDATE `release` SET note = ? WHERE id = ?", release.Note, release.ID)
+ if err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/forgejo_migrations/v1_22/v8_test.go b/models/forgejo_migrations/v1_22/v8_test.go
new file mode 100644
index 0000000..128fd08
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v8_test.go
@@ -0,0 +1,35 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_RemoveSSHSignaturesFromReleaseNotes(t *testing.T) {
+ // A reduced mock of the `repo_model.Release` struct.
+ type Release struct {
+ ID int64 `xorm:"pk autoincr"`
+ Note string `xorm:"TEXT"`
+ }
+
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(Release))
+ defer deferable()
+
+ require.NoError(t, RemoveSSHSignaturesFromReleaseNotes(x))
+
+ var releases []Release
+ err := x.Table("release").OrderBy("id ASC").Find(&releases)
+ require.NoError(t, err)
+ assert.Len(t, releases, 3)
+
+ assert.Equal(t, "", releases[0].Note)
+ assert.Equal(t, "A message.\n", releases[1].Note)
+ assert.Equal(t, "no signature present here", releases[2].Note)
+}
diff --git a/models/forgejo_migrations/v1_22/v9.go b/models/forgejo_migrations/v1_22/v9.go
new file mode 100644
index 0000000..34c2844
--- /dev/null
+++ b/models/forgejo_migrations/v1_22/v9.go
@@ -0,0 +1,15 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func AddApplyToAdminsSetting(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ ID int64 `xorm:"pk autoincr"`
+ ApplyToAdmins bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(&ProtectedBranch{})
+}
diff --git a/models/forgejo_migrations/v20.go b/models/forgejo_migrations/v20.go
new file mode 100644
index 0000000..8ca9e91
--- /dev/null
+++ b/models/forgejo_migrations/v20.go
@@ -0,0 +1,52 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+type (
+ QuotaLimitSubject int
+ QuotaLimitSubjects []QuotaLimitSubject
+
+ QuotaKind int
+)
+
+type QuotaRule struct {
+ Name string `xorm:"pk not null"`
+ Limit int64 `xorm:"NOT NULL"`
+ Subjects QuotaLimitSubjects
+}
+
+type QuotaGroup struct {
+ Name string `xorm:"pk NOT NULL"`
+}
+
+type QuotaGroupRuleMapping struct {
+ ID int64 `xorm:"pk autoincr"`
+ GroupName string `xorm:"index unique(qgrm_gr) not null"`
+ RuleName string `xorm:"unique(qgrm_gr) not null"`
+}
+
+type QuotaGroupMapping struct {
+ ID int64 `xorm:"pk autoincr"`
+ Kind QuotaKind `xorm:"unique(qgm_kmg) not null"`
+ MappedID int64 `xorm:"unique(qgm_kmg) not null"`
+ GroupName string `xorm:"index unique(qgm_kmg) not null"`
+}
+
+func CreateQuotaTables(x *xorm.Engine) error {
+ if err := x.Sync(new(QuotaRule)); err != nil {
+ return err
+ }
+
+ if err := x.Sync(new(QuotaGroup)); err != nil {
+ return err
+ }
+
+ if err := x.Sync(new(QuotaGroupRuleMapping)); err != nil {
+ return err
+ }
+
+ return x.Sync(new(QuotaGroupMapping))
+}
diff --git a/models/forgejo_migrations/v21.go b/models/forgejo_migrations/v21.go
new file mode 100644
index 0000000..53f141b
--- /dev/null
+++ b/models/forgejo_migrations/v21.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+func AddSSHKeypairToPushMirror(x *xorm.Engine) error {
+ type PushMirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ PublicKey string `xorm:"VARCHAR(100)"`
+ PrivateKey []byte `xorm:"BLOB"`
+ }
+
+ return x.Sync(&PushMirror{})
+}
diff --git a/models/forgejo_migrations/v22.go b/models/forgejo_migrations/v22.go
new file mode 100644
index 0000000..eeb7387
--- /dev/null
+++ b/models/forgejo_migrations/v22.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forgejo_migrations //nolint:revive
+
+import "xorm.io/xorm"
+
+func AddLegacyToWebAuthnCredential(x *xorm.Engine) error {
+ type WebauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ BackupEligible bool `xorm:"NOT NULL DEFAULT false"`
+ BackupState bool `xorm:"NOT NULL DEFAULT false"`
+ Legacy bool `xorm:"NOT NULL DEFAULT true"`
+ }
+
+ return x.Sync(&WebauthnCredential{})
+}
diff --git a/models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml b/models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml
new file mode 100644
index 0000000..fdfa66a
--- /dev/null
+++ b/models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml
@@ -0,0 +1,7 @@
+-
+
+ id: 1000
+ oid: 9d172e5c64b4f0024b9901ec6afe9ea052f3c9b6ff9f4b07956d8c48c86fca82
+ size: 25
+ repository_id: 1
+ created_unix: 1712309123
diff --git a/models/git/branch.go b/models/git/branch.go
new file mode 100644
index 0000000..f004d50
--- /dev/null
+++ b/models/git/branch.go
@@ -0,0 +1,434 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrBranchNotExist represents an error that branch with such name does not exist.
+type ErrBranchNotExist struct {
+ RepoID int64
+ BranchName string
+}
+
+// IsErrBranchNotExist checks if an error is an ErrBranchDoesNotExist.
+func IsErrBranchNotExist(err error) bool {
+ _, ok := err.(ErrBranchNotExist)
+ return ok
+}
+
+func (err ErrBranchNotExist) Error() string {
+ return fmt.Sprintf("branch does not exist [repo_id: %d name: %s]", err.RepoID, err.BranchName)
+}
+
+func (err ErrBranchNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrBranchAlreadyExists represents an error that branch with such name already exists.
+type ErrBranchAlreadyExists struct {
+ BranchName string
+}
+
+// IsErrBranchAlreadyExists checks if an error is an ErrBranchAlreadyExists.
+func IsErrBranchAlreadyExists(err error) bool {
+ _, ok := err.(ErrBranchAlreadyExists)
+ return ok
+}
+
+func (err ErrBranchAlreadyExists) Error() string {
+ return fmt.Sprintf("branch already exists [name: %s]", err.BranchName)
+}
+
+func (err ErrBranchAlreadyExists) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrBranchNameConflict represents an error that branch name conflicts with other branch.
+type ErrBranchNameConflict struct {
+ BranchName string
+}
+
+// IsErrBranchNameConflict checks if an error is an ErrBranchNameConflict.
+func IsErrBranchNameConflict(err error) bool {
+ _, ok := err.(ErrBranchNameConflict)
+ return ok
+}
+
+func (err ErrBranchNameConflict) Error() string {
+ return fmt.Sprintf("branch conflicts with existing branch [name: %s]", err.BranchName)
+}
+
+func (err ErrBranchNameConflict) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrBranchesEqual represents an error that base branch is equal to the head branch.
+type ErrBranchesEqual struct {
+ BaseBranchName string
+ HeadBranchName string
+}
+
+// IsErrBranchesEqual checks if an error is an ErrBranchesEqual.
+func IsErrBranchesEqual(err error) bool {
+ _, ok := err.(ErrBranchesEqual)
+ return ok
+}
+
+func (err ErrBranchesEqual) Error() string {
+ return fmt.Sprintf("branches are equal [head: %sm base: %s]", err.HeadBranchName, err.BaseBranchName)
+}
+
+func (err ErrBranchesEqual) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// Branch represents a branch of a repository
+// For those repository who have many branches, stored into database is a good choice
+// for pagination, keyword search and filtering
+type Branch struct {
+ ID int64
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Name string `xorm:"UNIQUE(s) NOT NULL"` // git's ref-name is case-sensitive internally, however, in some databases (mysql, by default), it's case-insensitive at the moment
+ CommitID string
+ CommitMessage string `xorm:"TEXT"` // it only stores the message summary (the first line)
+ PusherID int64
+ Pusher *user_model.User `xorm:"-"`
+ IsDeleted bool `xorm:"index"`
+ DeletedByID int64
+ DeletedBy *user_model.User `xorm:"-"`
+ DeletedUnix timeutil.TimeStamp `xorm:"index"`
+ CommitTime timeutil.TimeStamp // The commit
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func (b *Branch) LoadDeletedBy(ctx context.Context) (err error) {
+ if b.DeletedBy == nil {
+ b.DeletedBy, err = user_model.GetUserByID(ctx, b.DeletedByID)
+ if user_model.IsErrUserNotExist(err) {
+ b.DeletedBy = user_model.NewGhostUser()
+ err = nil
+ }
+ }
+ return err
+}
+
+func (b *Branch) GetRepo(ctx context.Context) (*repo_model.Repository, error) {
+ return repo_model.GetRepositoryByID(ctx, b.RepoID)
+}
+
+func (b *Branch) LoadPusher(ctx context.Context) (err error) {
+ if b.Pusher == nil && b.PusherID > 0 {
+ b.Pusher, err = user_model.GetUserByID(ctx, b.PusherID)
+ if user_model.IsErrUserNotExist(err) {
+ b.Pusher = user_model.NewGhostUser()
+ err = nil
+ }
+ }
+ return err
+}
+
+func init() {
+ db.RegisterModel(new(Branch))
+ db.RegisterModel(new(RenamedBranch))
+}
+
+func GetBranch(ctx context.Context, repoID int64, branchName string) (*Branch, error) {
+ var branch Branch
+ has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).And("name=?", branchName).Get(&branch)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ BranchName: branchName,
+ }
+ }
+ return &branch, nil
+}
+
+func GetBranches(ctx context.Context, repoID int64, branchNames []string) ([]*Branch, error) {
+ branches := make([]*Branch, 0, len(branchNames))
+ return branches, db.GetEngine(ctx).Where("repo_id=?", repoID).In("name", branchNames).Find(&branches)
+}
+
+func AddBranches(ctx context.Context, branches []*Branch) error {
+ for _, branch := range branches {
+ if _, err := db.GetEngine(ctx).Insert(branch); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func GetDeletedBranchByID(ctx context.Context, repoID, branchID int64) (*Branch, error) {
+ var branch Branch
+ has, err := db.GetEngine(ctx).ID(branchID).Get(&branch)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ }
+ }
+ if branch.RepoID != repoID {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ }
+ }
+ if !branch.IsDeleted {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ }
+ }
+ return &branch, nil
+}
+
+func DeleteBranches(ctx context.Context, repoID, doerID int64, branchIDs []int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ branches := make([]*Branch, 0, len(branchIDs))
+ if err := db.GetEngine(ctx).In("id", branchIDs).Find(&branches); err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ if err := AddDeletedBranch(ctx, repoID, branch.Name, doerID); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+// UpdateBranch updates the branch information in the database.
+func UpdateBranch(ctx context.Context, repoID, pusherID int64, branchName string, commit *git.Commit) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_id=? AND name=?", repoID, branchName).
+ Cols("commit_id, commit_message, pusher_id, commit_time, is_deleted, updated_unix").
+ Update(&Branch{
+ CommitID: commit.ID.String(),
+ CommitMessage: commit.Summary(),
+ PusherID: pusherID,
+ CommitTime: timeutil.TimeStamp(commit.Committer.When.Unix()),
+ IsDeleted: false,
+ })
+}
+
+// AddDeletedBranch adds a deleted branch to the database
+func AddDeletedBranch(ctx context.Context, repoID int64, branchName string, deletedByID int64) error {
+ branch, err := GetBranch(ctx, repoID, branchName)
+ if err != nil {
+ return err
+ }
+ if branch.IsDeleted {
+ return nil
+ }
+
+ cnt, err := db.GetEngine(ctx).Where("repo_id=? AND name=? AND is_deleted=?", repoID, branchName, false).
+ Cols("is_deleted, deleted_by_id, deleted_unix").
+ Update(&Branch{
+ IsDeleted: true,
+ DeletedByID: deletedByID,
+ DeletedUnix: timeutil.TimeStampNow(),
+ })
+ if err != nil {
+ return err
+ }
+ if cnt == 0 {
+ return fmt.Errorf("branch %s not found or has been deleted", branchName)
+ }
+ return err
+}
+
+func RemoveDeletedBranchByID(ctx context.Context, repoID, branchID int64) error {
+ _, err := db.GetEngine(ctx).Where("repo_id=? AND id=? AND is_deleted = ?", repoID, branchID, true).Delete(new(Branch))
+ return err
+}
+
+// RemoveOldDeletedBranches removes old deleted branches
+func RemoveOldDeletedBranches(ctx context.Context, olderThan time.Duration) {
+ // Nothing to do for shutdown or terminate
+ log.Trace("Doing: DeletedBranchesCleanup")
+
+ deleteBefore := time.Now().Add(-olderThan)
+ _, err := db.GetEngine(ctx).Where("is_deleted=? AND deleted_unix < ?", true, deleteBefore.Unix()).Delete(new(Branch))
+ if err != nil {
+ log.Error("DeletedBranchesCleanup: %v", err)
+ }
+}
+
+// RenamedBranch provide renamed branch log
+// will check it when a branch can't be found
+type RenamedBranch struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX NOT NULL"`
+ From string
+ To string
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+// FindRenamedBranch check if a branch was renamed
+func FindRenamedBranch(ctx context.Context, repoID int64, from string) (branch *RenamedBranch, exist bool, err error) {
+ branch = &RenamedBranch{
+ RepoID: repoID,
+ From: from,
+ }
+ exist, err = db.GetEngine(ctx).Get(branch)
+
+ return branch, exist, err
+}
+
+// RenameBranch rename a branch
+func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to string, gitAction func(ctx context.Context, isDefault bool) error) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ sess := db.GetEngine(ctx)
+
+ // check whether from branch exist
+ var branch Branch
+ exist, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, from).Get(&branch)
+ if err != nil {
+ return err
+ } else if !exist || branch.IsDeleted {
+ return ErrBranchNotExist{
+ RepoID: repo.ID,
+ BranchName: from,
+ }
+ }
+
+ // check whether to branch exist or is_deleted
+ var dstBranch Branch
+ exist, err = db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, to).Get(&dstBranch)
+ if err != nil {
+ return err
+ }
+ if exist {
+ if !dstBranch.IsDeleted {
+ return ErrBranchAlreadyExists{
+ BranchName: to,
+ }
+ }
+
+ if _, err := db.GetEngine(ctx).ID(dstBranch.ID).NoAutoCondition().Delete(&dstBranch); err != nil {
+ return err
+ }
+ }
+
+ // 1. update branch in database
+ if n, err := sess.Where("repo_id=? AND name=?", repo.ID, from).Update(&Branch{
+ Name: to,
+ }); err != nil {
+ return err
+ } else if n <= 0 {
+ return ErrBranchNotExist{
+ RepoID: repo.ID,
+ BranchName: from,
+ }
+ }
+
+ // 2. update default branch if needed
+ isDefault := repo.DefaultBranch == from
+ if isDefault {
+ repo.DefaultBranch = to
+ _, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
+ if err != nil {
+ return err
+ }
+ }
+
+ // 3. Update protected branch if needed
+ protectedBranch, err := GetProtectedBranchRuleByName(ctx, repo.ID, from)
+ if err != nil {
+ return err
+ }
+
+ if protectedBranch != nil {
+ // there is a protect rule for this branch
+ protectedBranch.RuleName = to
+ _, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch)
+ if err != nil {
+ return err
+ }
+ } else {
+ // some glob protect rules may match this branch
+ protected, err := IsBranchProtected(ctx, repo.ID, from)
+ if err != nil {
+ return err
+ }
+ if protected {
+ return ErrBranchIsProtected
+ }
+ }
+
+ // 4. Update all not merged pull request base branch name
+ _, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
+ repo.ID, from, false).
+ Update(map[string]any{"base_branch": to})
+ if err != nil {
+ return err
+ }
+
+ // 4.1 Update all not merged pull request head branch name
+ if _, err = sess.Table("pull_request").Where("head_repo_id=? AND head_branch=? AND has_merged=?",
+ repo.ID, from, false).
+ Update(map[string]any{"head_branch": to}); err != nil {
+ return err
+ }
+
+ // 5. insert renamed branch record
+ renamedBranch := &RenamedBranch{
+ RepoID: repo.ID,
+ From: from,
+ To: to,
+ }
+ err = db.Insert(ctx, renamedBranch)
+ if err != nil {
+ return err
+ }
+
+ // 6. do git action
+ if err = gitAction(ctx, isDefault); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// FindRecentlyPushedNewBranches return at most 2 new branches pushed by the user in 6 hours which has no opened PRs created
+// except the indicate branch
+func FindRecentlyPushedNewBranches(ctx context.Context, repoID, userID int64, excludeBranchName string) (BranchList, error) {
+ branches := make(BranchList, 0, 2)
+ subQuery := builder.Select("head_branch").From("pull_request").
+ InnerJoin("issue", "issue.id = pull_request.issue_id").
+ Where(builder.Eq{
+ "pull_request.head_repo_id": repoID,
+ "issue.is_closed": false,
+ })
+ err := db.GetEngine(ctx).
+ Where("pusher_id=? AND is_deleted=?", userID, false).
+ And("name <> ?", excludeBranchName).
+ And("repo_id = ?", repoID).
+ And("commit_time >= ?", time.Now().Add(-time.Hour*6).Unix()).
+ NotIn("name", subQuery).
+ OrderBy("branch.commit_time DESC").
+ Limit(2).
+ Find(&branches)
+ return branches, err
+}
diff --git a/models/git/branch_list.go b/models/git/branch_list.go
new file mode 100644
index 0000000..81a43ea
--- /dev/null
+++ b/models/git/branch_list.go
@@ -0,0 +1,132 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+
+ "xorm.io/builder"
+)
+
+type BranchList []*Branch
+
+func (branches BranchList) LoadDeletedBy(ctx context.Context) error {
+ ids := container.FilterSlice(branches, func(branch *Branch) (int64, bool) {
+ return branch.DeletedByID, branch.IsDeleted
+ })
+
+ usersMap := make(map[int64]*user_model.User, len(ids))
+ if err := db.GetEngine(ctx).In("id", ids).Find(&usersMap); err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ if !branch.IsDeleted {
+ continue
+ }
+ branch.DeletedBy = usersMap[branch.DeletedByID]
+ if branch.DeletedBy == nil {
+ branch.DeletedBy = user_model.NewGhostUser()
+ }
+ }
+ return nil
+}
+
+func (branches BranchList) LoadPusher(ctx context.Context) error {
+ ids := container.FilterSlice(branches, func(branch *Branch) (int64, bool) {
+ // pusher_id maybe zero because some branches are sync by backend with no pusher
+ return branch.PusherID, branch.PusherID > 0
+ })
+
+ usersMap := make(map[int64]*user_model.User, len(ids))
+ if err := db.GetEngine(ctx).In("id", ids).Find(&usersMap); err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ if branch.PusherID <= 0 {
+ continue
+ }
+ branch.Pusher = usersMap[branch.PusherID]
+ if branch.Pusher == nil {
+ branch.Pusher = user_model.NewGhostUser()
+ }
+ }
+ return nil
+}
+
+type FindBranchOptions struct {
+ db.ListOptions
+ RepoID int64
+ ExcludeBranchNames []string
+ IsDeletedBranch optional.Option[bool]
+ OrderBy string
+ Keyword string
+}
+
+func (opts FindBranchOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+
+ if len(opts.ExcludeBranchNames) > 0 {
+ cond = cond.And(builder.NotIn("name", opts.ExcludeBranchNames))
+ }
+ if opts.IsDeletedBranch.Has() {
+ cond = cond.And(builder.Eq{"is_deleted": opts.IsDeletedBranch.Value()})
+ }
+ if opts.Keyword != "" {
+ cond = cond.And(builder.Like{"name", opts.Keyword})
+ }
+ return cond
+}
+
+func (opts FindBranchOptions) ToOrders() string {
+ orderBy := opts.OrderBy
+ if orderBy == "" {
+ // the commit_time might be the same, so add the "name" to make sure the order is stable
+ orderBy = "commit_time DESC, name ASC"
+ }
+ if opts.IsDeletedBranch.ValueOrDefault(true) { // if deleted branch included, put them at the beginning
+ orderBy = "is_deleted ASC, " + orderBy
+ }
+ return orderBy
+}
+
+func FindBranchNames(ctx context.Context, opts FindBranchOptions) ([]string, error) {
+ sess := db.GetEngine(ctx).Select("name").Where(opts.ToConds())
+ if opts.PageSize > 0 && !opts.IsListAll() {
+ sess = db.SetSessionPagination(sess, &opts.ListOptions)
+ }
+
+ var branches []string
+ if err := sess.Table("branch").OrderBy(opts.ToOrders()).Find(&branches); err != nil {
+ return nil, err
+ }
+ return branches, nil
+}
+
+func FindBranchesByRepoAndBranchName(ctx context.Context, repoBranches map[int64]string) (map[int64]string, error) {
+ if len(repoBranches) == 0 {
+ return nil, nil
+ }
+ cond := builder.NewCond()
+ for repoID, branchName := range repoBranches {
+ cond = cond.Or(builder.And(builder.Eq{"repo_id": repoID}, builder.Eq{"name": branchName}))
+ }
+ var branches []*Branch
+ if err := db.GetEngine(ctx).
+ Where(cond).Find(&branches); err != nil {
+ return nil, err
+ }
+ branchMap := make(map[int64]string, len(branches))
+ for _, branch := range branches {
+ branchMap[branch.RepoID] = branch.CommitID
+ }
+ return branchMap, nil
+}
diff --git a/models/git/branch_test.go b/models/git/branch_test.go
new file mode 100644
index 0000000..81839eb
--- /dev/null
+++ b/models/git/branch_test.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "context"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/optional"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddDeletedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.EqualValues(t, git.Sha1ObjectFormat.Name(), repo.ObjectFormatName)
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+
+ assert.True(t, firstBranch.IsDeleted)
+ require.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, firstBranch.Name, firstBranch.DeletedByID))
+ require.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, "branch2", int64(1)))
+
+ secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo.ID, Name: "branch2"})
+ assert.True(t, secondBranch.IsDeleted)
+
+ commit := &git.Commit{
+ ID: git.MustIDFromString(secondBranch.CommitID),
+ CommitMessage: secondBranch.CommitMessage,
+ Committer: &git.Signature{
+ When: secondBranch.CommitTime.AsLocalTime(),
+ },
+ }
+
+ _, err := git_model.UpdateBranch(db.DefaultContext, repo.ID, secondBranch.PusherID, secondBranch.Name, commit)
+ require.NoError(t, err)
+}
+
+func TestGetDeletedBranches(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ branches, err := db.Find[git_model.Branch](db.DefaultContext, git_model.FindBranchOptions{
+ ListOptions: db.ListOptionsAll,
+ RepoID: repo.ID,
+ IsDeletedBranch: optional.Some(true),
+ })
+ require.NoError(t, err)
+ assert.Len(t, branches, 2)
+}
+
+func TestGetDeletedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+
+ assert.NotNil(t, getDeletedBranch(t, firstBranch))
+}
+
+func TestDeletedBranchLoadUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+ secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 2})
+
+ branch := getDeletedBranch(t, firstBranch)
+ assert.Nil(t, branch.DeletedBy)
+ branch.LoadDeletedBy(db.DefaultContext)
+ assert.NotNil(t, branch.DeletedBy)
+ assert.Equal(t, "user1", branch.DeletedBy.Name)
+
+ branch = getDeletedBranch(t, secondBranch)
+ assert.Nil(t, branch.DeletedBy)
+ branch.LoadDeletedBy(db.DefaultContext)
+ assert.NotNil(t, branch.DeletedBy)
+ assert.Equal(t, "Ghost", branch.DeletedBy.Name)
+}
+
+func TestRemoveDeletedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+
+ err := git_model.RemoveDeletedBranchByID(db.DefaultContext, repo.ID, 1)
+ require.NoError(t, err)
+ unittest.AssertNotExistsBean(t, firstBranch)
+ unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 2})
+}
+
+func getDeletedBranch(t *testing.T, branch *git_model.Branch) *git_model.Branch {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ deletedBranch, err := git_model.GetDeletedBranchByID(db.DefaultContext, repo.ID, branch.ID)
+ require.NoError(t, err)
+ assert.Equal(t, branch.ID, deletedBranch.ID)
+ assert.Equal(t, branch.Name, deletedBranch.Name)
+ assert.Equal(t, branch.CommitID, deletedBranch.CommitID)
+ assert.Equal(t, branch.DeletedByID, deletedBranch.DeletedByID)
+
+ return deletedBranch
+}
+
+func TestFindRenamedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ branch, exist, err := git_model.FindRenamedBranch(db.DefaultContext, 1, "dev")
+ require.NoError(t, err)
+ assert.True(t, exist)
+ assert.Equal(t, "master", branch.To)
+
+ _, exist, err = git_model.FindRenamedBranch(db.DefaultContext, 1, "unknow")
+ require.NoError(t, err)
+ assert.False(t, exist)
+}
+
+func TestRenameBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ _isDefault := false
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ defer committer.Close()
+ require.NoError(t, err)
+ require.NoError(t, git_model.UpdateProtectBranch(ctx, repo1, &git_model.ProtectedBranch{
+ RepoID: repo1.ID,
+ RuleName: "master",
+ }, git_model.WhitelistOptions{}))
+ require.NoError(t, committer.Commit())
+
+ require.NoError(t, git_model.RenameBranch(db.DefaultContext, repo1, "master", "main", func(ctx context.Context, isDefault bool) error {
+ _isDefault = isDefault
+ return nil
+ }))
+
+ assert.True(t, _isDefault)
+ repo1 = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.Equal(t, "main", repo1.DefaultBranch)
+
+ pull := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1}) // merged
+ assert.Equal(t, "master", pull.BaseBranch)
+
+ pull = unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2}) // open
+ assert.Equal(t, "main", pull.BaseBranch)
+
+ renamedBranch := unittest.AssertExistsAndLoadBean(t, &git_model.RenamedBranch{ID: 2})
+ assert.Equal(t, "master", renamedBranch.From)
+ assert.Equal(t, "main", renamedBranch.To)
+ assert.Equal(t, int64(1), renamedBranch.RepoID)
+
+ unittest.AssertExistsAndLoadBean(t, &git_model.ProtectedBranch{
+ RepoID: repo1.ID,
+ RuleName: "main",
+ })
+}
+
+func TestOnlyGetDeletedBranchOnCorrectRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Get deletedBranch with ID of 1 on repo with ID 2.
+ // This should return a nil branch as this deleted branch
+ // is actually on repo with ID 1.
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+
+ deletedBranch, err := git_model.GetDeletedBranchByID(db.DefaultContext, repo2.ID, 1)
+
+ // Expect error, and the returned branch is nil.
+ require.Error(t, err)
+ assert.Nil(t, deletedBranch)
+
+ // Now get the deletedBranch with ID of 1 on repo with ID 1.
+ // This should return the deletedBranch.
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ deletedBranch, err = git_model.GetDeletedBranchByID(db.DefaultContext, repo1.ID, 1)
+
+ // Expect no error, and the returned branch to be not nil.
+ require.NoError(t, err)
+ assert.NotNil(t, deletedBranch)
+}
+
+func TestFindBranchesByRepoAndBranchName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // With no repos or branches given, we find no branches.
+ branches, err := git_model.FindBranchesByRepoAndBranchName(db.DefaultContext, map[int64]string{})
+ require.NoError(t, err)
+ assert.Empty(t, branches)
+}
diff --git a/models/git/commit_status.go b/models/git/commit_status.go
new file mode 100644
index 0000000..53d1ddc
--- /dev/null
+++ b/models/git/commit_status.go
@@ -0,0 +1,519 @@
+// Copyright 2017 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "crypto/sha1"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/translation"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// CommitStatus holds a single Status of a single Commit
+type CommitStatus struct {
+ ID int64 `xorm:"pk autoincr"`
+ Index int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
+ Repo *repo_model.Repository `xorm:"-"`
+ State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
+ SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_sha_index)"`
+ TargetURL string `xorm:"TEXT"`
+ Description string `xorm:"TEXT"`
+ ContextHash string `xorm:"VARCHAR(64) index"`
+ Context string `xorm:"TEXT"`
+ Creator *user_model.User `xorm:"-"`
+ CreatorID int64
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(CommitStatus))
+ db.RegisterModel(new(CommitStatusIndex))
+}
+
+func postgresGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
+ res, err := db.GetEngine(ctx).Query("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
+ "VALUES (?,?,1) ON CONFLICT (repo_id, sha) DO UPDATE SET max_index = `commit_status_index`.max_index+1 RETURNING max_index",
+ repoID, sha)
+ if err != nil {
+ return 0, err
+ }
+ if len(res) == 0 {
+ return 0, db.ErrGetResourceIndexFailed
+ }
+ return strconv.ParseInt(string(res[0]["max_index"]), 10, 64)
+}
+
+func mysqlGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
+ if _, err := db.GetEngine(ctx).Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
+ "VALUES (?,?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1",
+ repoID, sha); err != nil {
+ return 0, err
+ }
+
+ var idx int64
+ _, err := db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ?",
+ repoID, sha).Get(&idx)
+ if err != nil {
+ return 0, err
+ }
+ if idx == 0 {
+ return 0, errors.New("cannot get the correct index")
+ }
+ return idx, nil
+}
+
+// GetNextCommitStatusIndex retried 3 times to generate a resource index
+func GetNextCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
+ _, err := git.NewIDFromString(sha)
+ if err != nil {
+ return 0, git.ErrInvalidSHA{SHA: sha}
+ }
+
+ switch {
+ case setting.Database.Type.IsPostgreSQL():
+ return postgresGetCommitStatusIndex(ctx, repoID, sha)
+ case setting.Database.Type.IsMySQL():
+ return mysqlGetCommitStatusIndex(ctx, repoID, sha)
+ }
+
+ e := db.GetEngine(ctx)
+
+ // try to update the max_index to next value, and acquire the write-lock for the record
+ res, err := e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
+ if err != nil {
+ return 0, fmt.Errorf("update failed: %w", err)
+ }
+ affected, err := res.RowsAffected()
+ if err != nil {
+ return 0, err
+ }
+ if affected == 0 {
+ // this slow path is only for the first time of creating a resource index
+ _, errIns := e.Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) VALUES (?, ?, 0)", repoID, sha)
+ res, err = e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
+ if err != nil {
+ return 0, fmt.Errorf("update2 failed: %w", err)
+ }
+ affected, err = res.RowsAffected()
+ if err != nil {
+ return 0, fmt.Errorf("RowsAffected failed: %w", err)
+ }
+ // if the update still can not update any records, the record must not exist and there must be some errors (insert error)
+ if affected == 0 {
+ if errIns == nil {
+ return 0, errors.New("impossible error when GetNextCommitStatusIndex, insert and update both succeeded but no record is updated")
+ }
+ return 0, fmt.Errorf("insert failed: %w", errIns)
+ }
+ }
+
+ // now, the new index is in database (protected by the transaction and write-lock)
+ var newIdx int64
+ has, err := e.SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id=? AND sha=?", repoID, sha).Get(&newIdx)
+ if err != nil {
+ return 0, fmt.Errorf("select failed: %w", err)
+ }
+ if !has {
+ return 0, errors.New("impossible error when GetNextCommitStatusIndex, upsert succeeded but no record can be selected")
+ }
+ return newIdx, nil
+}
+
+func (status *CommitStatus) loadRepository(ctx context.Context) (err error) {
+ if status.Repo == nil {
+ status.Repo, err = repo_model.GetRepositoryByID(ctx, status.RepoID)
+ if err != nil {
+ return fmt.Errorf("getRepositoryByID [%d]: %w", status.RepoID, err)
+ }
+ }
+ return nil
+}
+
+func (status *CommitStatus) loadCreator(ctx context.Context) (err error) {
+ if status.Creator == nil && status.CreatorID > 0 {
+ status.Creator, err = user_model.GetUserByID(ctx, status.CreatorID)
+ if err != nil {
+ return fmt.Errorf("getUserByID [%d]: %w", status.CreatorID, err)
+ }
+ }
+ return nil
+}
+
+func (status *CommitStatus) loadAttributes(ctx context.Context) (err error) {
+ if err := status.loadRepository(ctx); err != nil {
+ return err
+ }
+ return status.loadCreator(ctx)
+}
+
+// APIURL returns the absolute APIURL to this commit-status.
+func (status *CommitStatus) APIURL(ctx context.Context) string {
+ _ = status.loadAttributes(ctx)
+ return status.Repo.APIURL() + "/statuses/" + url.PathEscape(status.SHA)
+}
+
+// LocaleString returns the locale string name of the Status
+func (status *CommitStatus) LocaleString(lang translation.Locale) string {
+ return lang.TrString("repo.commitstatus." + status.State.String())
+}
+
+// HideActionsURL set `TargetURL` to an empty string if the status comes from Gitea Actions
+func (status *CommitStatus) HideActionsURL(ctx context.Context) {
+ if status.RepoID == 0 {
+ return
+ }
+
+ if status.Repo == nil {
+ if err := status.loadRepository(ctx); err != nil {
+ log.Error("loadRepository: %v", err)
+ return
+ }
+ }
+
+ prefix := fmt.Sprintf("%s/actions", status.Repo.Link())
+ if strings.HasPrefix(status.TargetURL, prefix) {
+ status.TargetURL = ""
+ }
+}
+
+// CalcCommitStatus returns commit status state via some status, the commit statues should order by id desc
+func CalcCommitStatus(statuses []*CommitStatus) *CommitStatus {
+ if len(statuses) == 0 {
+ return nil
+ }
+
+ latestWorstStatus := statuses[0]
+ for _, status := range statuses[1:] {
+ if status.State.NoBetterThan(latestWorstStatus.State) {
+ latestWorstStatus = status
+ }
+ }
+ return latestWorstStatus
+}
+
+// CommitStatusOptions holds the options for query commit statuses
+type CommitStatusOptions struct {
+ db.ListOptions
+ RepoID int64
+ SHA string
+ State string
+ SortType string
+}
+
+func (opts *CommitStatusOptions) ToConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "repo_id": opts.RepoID,
+ "sha": opts.SHA,
+ }
+
+ switch opts.State {
+ case "pending", "success", "error", "failure", "warning":
+ cond = cond.And(builder.Eq{
+ "state": opts.State,
+ })
+ }
+
+ return cond
+}
+
+func (opts *CommitStatusOptions) ToOrders() string {
+ switch opts.SortType {
+ case "oldest":
+ return "created_unix ASC"
+ case "recentupdate":
+ return "updated_unix DESC"
+ case "leastupdate":
+ return "updated_unix ASC"
+ case "leastindex":
+ return "`index` DESC"
+ case "highestindex":
+ return "`index` ASC"
+ default:
+ return "created_unix DESC"
+ }
+}
+
+// CommitStatusIndex represents a table for commit status index
+type CommitStatusIndex struct {
+ ID int64
+ RepoID int64 `xorm:"unique(repo_sha)"`
+ SHA string `xorm:"unique(repo_sha)"`
+ MaxIndex int64 `xorm:"index"`
+}
+
+// GetLatestCommitStatus returns all statuses with a unique context for a given commit.
+func GetLatestCommitStatus(ctx context.Context, repoID int64, sha string, listOptions db.ListOptions) ([]*CommitStatus, int64, error) {
+ getBase := func() *xorm.Session {
+ return db.GetEngine(ctx).Table(&CommitStatus{}).
+ Where("repo_id = ?", repoID).And("sha = ?", sha)
+ }
+ indices := make([]int64, 0, 10)
+ sess := getBase().Select("max( `index` ) as `index`").
+ GroupBy("context_hash").OrderBy("max( `index` ) desc")
+ if !listOptions.IsListAll() {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+ count, err := sess.FindAndCount(&indices)
+ if err != nil {
+ return nil, count, err
+ }
+ statuses := make([]*CommitStatus, 0, len(indices))
+ if len(indices) == 0 {
+ return statuses, count, nil
+ }
+ return statuses, count, getBase().And(builder.In("`index`", indices)).Find(&statuses)
+}
+
+// GetLatestCommitStatusForPairs returns all statuses with a unique context for a given list of repo-sha pairs
+func GetLatestCommitStatusForPairs(ctx context.Context, repoSHAs []RepoSHA) (map[int64][]*CommitStatus, error) {
+ type result struct {
+ Index int64
+ RepoID int64
+ SHA string
+ }
+
+ results := make([]result, 0, len(repoSHAs))
+
+ getBase := func() *xorm.Session {
+ return db.GetEngine(ctx).Table(&CommitStatus{})
+ }
+
+ // Create a disjunction of conditions for each repoID and SHA pair
+ conds := make([]builder.Cond, 0, len(repoSHAs))
+ for _, repoSHA := range repoSHAs {
+ conds = append(conds, builder.Eq{"repo_id": repoSHA.RepoID, "sha": repoSHA.SHA})
+ }
+ sess := getBase().Where(builder.Or(conds...)).
+ Select("max( `index` ) as `index`, repo_id, sha").
+ GroupBy("context_hash, repo_id, sha").OrderBy("max( `index` ) desc")
+
+ err := sess.Find(&results)
+ if err != nil {
+ return nil, err
+ }
+
+ repoStatuses := make(map[int64][]*CommitStatus)
+
+ if len(results) > 0 {
+ statuses := make([]*CommitStatus, 0, len(results))
+
+ conds = make([]builder.Cond, 0, len(results))
+ for _, result := range results {
+ cond := builder.Eq{
+ "`index`": result.Index,
+ "repo_id": result.RepoID,
+ "sha": result.SHA,
+ }
+ conds = append(conds, cond)
+ }
+ err = getBase().Where(builder.Or(conds...)).Find(&statuses)
+ if err != nil {
+ return nil, err
+ }
+
+ // Group the statuses by repo ID
+ for _, status := range statuses {
+ repoStatuses[status.RepoID] = append(repoStatuses[status.RepoID], status)
+ }
+ }
+
+ return repoStatuses, nil
+}
+
+// GetLatestCommitStatusForRepoCommitIDs returns all statuses with a unique context for a given list of repo-sha pairs
+func GetLatestCommitStatusForRepoCommitIDs(ctx context.Context, repoID int64, commitIDs []string) (map[string][]*CommitStatus, error) {
+ type result struct {
+ Index int64
+ SHA string
+ }
+
+ getBase := func() *xorm.Session {
+ return db.GetEngine(ctx).Table(&CommitStatus{}).Where("repo_id = ?", repoID)
+ }
+ results := make([]result, 0, len(commitIDs))
+
+ conds := make([]builder.Cond, 0, len(commitIDs))
+ for _, sha := range commitIDs {
+ conds = append(conds, builder.Eq{"sha": sha})
+ }
+ sess := getBase().And(builder.Or(conds...)).
+ Select("max( `index` ) as `index`, sha").
+ GroupBy("context_hash, sha").OrderBy("max( `index` ) desc")
+
+ err := sess.Find(&results)
+ if err != nil {
+ return nil, err
+ }
+
+ repoStatuses := make(map[string][]*CommitStatus)
+
+ if len(results) > 0 {
+ statuses := make([]*CommitStatus, 0, len(results))
+
+ conds = make([]builder.Cond, 0, len(results))
+ for _, result := range results {
+ conds = append(conds, builder.Eq{"`index`": result.Index, "sha": result.SHA})
+ }
+ err = getBase().And(builder.Or(conds...)).Find(&statuses)
+ if err != nil {
+ return nil, err
+ }
+
+ // Group the statuses by commit
+ for _, status := range statuses {
+ repoStatuses[status.SHA] = append(repoStatuses[status.SHA], status)
+ }
+ }
+
+ return repoStatuses, nil
+}
+
+// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
+func FindRepoRecentCommitStatusContexts(ctx context.Context, repoID int64, before time.Duration) ([]string, error) {
+ start := timeutil.TimeStampNow().AddDuration(-before)
+
+ var contexts []string
+ if err := db.GetEngine(ctx).Table("commit_status").
+ Where("repo_id = ?", repoID).And("updated_unix >= ?", start).
+ Cols("context").Distinct().Find(&contexts); err != nil {
+ return nil, err
+ }
+
+ return contexts, nil
+}
+
+// NewCommitStatusOptions holds options for creating a CommitStatus
+type NewCommitStatusOptions struct {
+ Repo *repo_model.Repository
+ Creator *user_model.User
+ SHA git.ObjectID
+ CommitStatus *CommitStatus
+}
+
+// NewCommitStatus save commit statuses into database
+func NewCommitStatus(ctx context.Context, opts NewCommitStatusOptions) error {
+ if opts.Repo == nil {
+ return fmt.Errorf("NewCommitStatus[nil, %s]: no repository specified", opts.SHA)
+ }
+
+ repoPath := opts.Repo.RepoPath()
+ if opts.Creator == nil {
+ return fmt.Errorf("NewCommitStatus[%s, %s]: no user specified", repoPath, opts.SHA)
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return fmt.Errorf("NewCommitStatus[repo_id: %d, user_id: %d, sha: %s]: %w", opts.Repo.ID, opts.Creator.ID, opts.SHA, err)
+ }
+ defer committer.Close()
+
+ // Get the next Status Index
+ idx, err := GetNextCommitStatusIndex(ctx, opts.Repo.ID, opts.SHA.String())
+ if err != nil {
+ return fmt.Errorf("generate commit status index failed: %w", err)
+ }
+
+ opts.CommitStatus.Description = strings.TrimSpace(opts.CommitStatus.Description)
+ opts.CommitStatus.Context = strings.TrimSpace(opts.CommitStatus.Context)
+ opts.CommitStatus.TargetURL = strings.TrimSpace(opts.CommitStatus.TargetURL)
+ opts.CommitStatus.SHA = opts.SHA.String()
+ opts.CommitStatus.CreatorID = opts.Creator.ID
+ opts.CommitStatus.RepoID = opts.Repo.ID
+ opts.CommitStatus.Index = idx
+ log.Debug("NewCommitStatus[%s, %s]: %d", repoPath, opts.SHA, opts.CommitStatus.Index)
+
+ opts.CommitStatus.ContextHash = hashCommitStatusContext(opts.CommitStatus.Context)
+
+ // Insert new CommitStatus
+ if _, err = db.GetEngine(ctx).Insert(opts.CommitStatus); err != nil {
+ return fmt.Errorf("insert CommitStatus[%s, %s]: %w", repoPath, opts.SHA, err)
+ }
+
+ return committer.Commit()
+}
+
+// SignCommitWithStatuses represents a commit with validation of signature and status state.
+type SignCommitWithStatuses struct {
+ Status *CommitStatus
+ Statuses []*CommitStatus
+ *asymkey_model.SignCommit
+}
+
+// ParseCommitsWithStatus checks commits latest statuses and calculates its worst status state
+func ParseCommitsWithStatus(ctx context.Context, oldCommits []*asymkey_model.SignCommit, repo *repo_model.Repository) []*SignCommitWithStatuses {
+ newCommits := make([]*SignCommitWithStatuses, 0, len(oldCommits))
+
+ for _, c := range oldCommits {
+ commit := &SignCommitWithStatuses{
+ SignCommit: c,
+ }
+ statuses, _, err := GetLatestCommitStatus(ctx, repo.ID, commit.ID.String(), db.ListOptions{})
+ if err != nil {
+ log.Error("GetLatestCommitStatus: %v", err)
+ } else {
+ commit.Statuses = statuses
+ commit.Status = CalcCommitStatus(statuses)
+ }
+
+ newCommits = append(newCommits, commit)
+ }
+ return newCommits
+}
+
+// hashCommitStatusContext hash context
+func hashCommitStatusContext(context string) string {
+ return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
+}
+
+// ConvertFromGitCommit converts git commits into SignCommitWithStatuses
+func ConvertFromGitCommit(ctx context.Context, commits []*git.Commit, repo *repo_model.Repository) []*SignCommitWithStatuses {
+ return ParseCommitsWithStatus(ctx,
+ asymkey_model.ParseCommitsWithSignature(
+ ctx,
+ user_model.ValidateCommitsWithEmails(ctx, commits),
+ repo.GetTrustModel(),
+ func(user *user_model.User) (bool, error) {
+ return repo_model.IsOwnerMemberCollaborator(ctx, repo, user.ID)
+ },
+ ),
+ repo,
+ )
+}
+
+// CommitStatusesHideActionsURL hide Gitea Actions urls
+func CommitStatusesHideActionsURL(ctx context.Context, statuses []*CommitStatus) {
+ idToRepos := make(map[int64]*repo_model.Repository)
+ for _, status := range statuses {
+ if status == nil {
+ continue
+ }
+
+ if status.Repo == nil {
+ status.Repo = idToRepos[status.RepoID]
+ }
+ status.HideActionsURL(ctx)
+ idToRepos[status.RepoID] = status.Repo
+ }
+}
diff --git a/models/git/commit_status_summary.go b/models/git/commit_status_summary.go
new file mode 100644
index 0000000..7603e7a
--- /dev/null
+++ b/models/git/commit_status_summary.go
@@ -0,0 +1,88 @@
+// Copyright 2024 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+
+ "xorm.io/builder"
+)
+
+// CommitStatusSummary holds the latest commit Status of a single Commit
+type CommitStatusSummary struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_id_sha)"`
+ SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_id_sha)"`
+ State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
+ TargetURL string `xorm:"TEXT"`
+}
+
+func init() {
+ db.RegisterModel(new(CommitStatusSummary))
+}
+
+type RepoSHA struct {
+ RepoID int64
+ SHA string
+}
+
+func GetLatestCommitStatusForRepoAndSHAs(ctx context.Context, repoSHAs []RepoSHA) ([]*CommitStatus, error) {
+ cond := builder.NewCond()
+ for _, rs := range repoSHAs {
+ cond = cond.Or(builder.Eq{"repo_id": rs.RepoID, "sha": rs.SHA})
+ }
+
+ var summaries []CommitStatusSummary
+ if err := db.GetEngine(ctx).Where(cond).Find(&summaries); err != nil {
+ return nil, err
+ }
+
+ commitStatuses := make([]*CommitStatus, 0, len(repoSHAs))
+ for _, summary := range summaries {
+ commitStatuses = append(commitStatuses, &CommitStatus{
+ RepoID: summary.RepoID,
+ SHA: summary.SHA,
+ State: summary.State,
+ TargetURL: summary.TargetURL,
+ })
+ }
+ return commitStatuses, nil
+}
+
+func UpdateCommitStatusSummary(ctx context.Context, repoID int64, sha string) error {
+ commitStatuses, _, err := GetLatestCommitStatus(ctx, repoID, sha, db.ListOptionsAll)
+ if err != nil {
+ return err
+ }
+ state := CalcCommitStatus(commitStatuses)
+ // mysql will return 0 when update a record which state hasn't been changed which behaviour is different from other database,
+ // so we need to use insert in on duplicate
+ if setting.Database.Type.IsMySQL() {
+ _, err := db.GetEngine(ctx).Exec("INSERT INTO commit_status_summary (repo_id,sha,state,target_url) VALUES (?,?,?,?) ON DUPLICATE KEY UPDATE state=?",
+ repoID, sha, state.State, state.TargetURL, state.State)
+ return err
+ }
+
+ if cnt, err := db.GetEngine(ctx).Where("repo_id=? AND sha=?", repoID, sha).
+ Cols("state, target_url").
+ Update(&CommitStatusSummary{
+ State: state.State,
+ TargetURL: state.TargetURL,
+ }); err != nil {
+ return err
+ } else if cnt == 0 {
+ _, err = db.GetEngine(ctx).Insert(&CommitStatusSummary{
+ RepoID: repoID,
+ SHA: sha,
+ State: state.State,
+ TargetURL: state.TargetURL,
+ })
+ return err
+ }
+ return nil
+}
diff --git a/models/git/commit_status_test.go b/models/git/commit_status_test.go
new file mode 100644
index 0000000..1014ee1
--- /dev/null
+++ b/models/git/commit_status_test.go
@@ -0,0 +1,267 @@
+// Copyright 2017 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetCommitStatuses(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ sha1 := "1234123412341234123412341234123412341234"
+
+ statuses, maxResults, err := db.FindAndCount[git_model.CommitStatus](db.DefaultContext, &git_model.CommitStatusOptions{
+ ListOptions: db.ListOptions{Page: 1, PageSize: 50},
+ RepoID: repo1.ID,
+ SHA: sha1,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, 5, int(maxResults))
+ assert.Len(t, statuses, 5)
+
+ assert.Equal(t, "ci/awesomeness", statuses[0].Context)
+ assert.Equal(t, structs.CommitStatusPending, statuses[0].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[0].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "cov/awesomeness", statuses[1].Context)
+ assert.Equal(t, structs.CommitStatusWarning, statuses[1].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[1].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "cov/awesomeness", statuses[2].Context)
+ assert.Equal(t, structs.CommitStatusSuccess, statuses[2].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[2].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "ci/awesomeness", statuses[3].Context)
+ assert.Equal(t, structs.CommitStatusFailure, statuses[3].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[3].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "deploy/awesomeness", statuses[4].Context)
+ assert.Equal(t, structs.CommitStatusError, statuses[4].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[4].APIURL(db.DefaultContext))
+
+ statuses, maxResults, err = db.FindAndCount[git_model.CommitStatus](db.DefaultContext, &git_model.CommitStatusOptions{
+ ListOptions: db.ListOptions{Page: 2, PageSize: 50},
+ RepoID: repo1.ID,
+ SHA: sha1,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, 5, int(maxResults))
+ assert.Empty(t, statuses)
+}
+
+func Test_CalcCommitStatus(t *testing.T) {
+ kases := []struct {
+ statuses []*git_model.CommitStatus
+ expected *git_model.CommitStatus
+ }{
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusPending,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusPending,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusPending,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusPending,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusError,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusError,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusWarning,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusWarning,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusSuccess,
+ ID: 1,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ ID: 2,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ ID: 3,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusSuccess,
+ ID: 3,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusFailure,
+ },
+ {
+ State: structs.CommitStatusError,
+ },
+ {
+ State: structs.CommitStatusWarning,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusError,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{},
+ expected: nil,
+ },
+ }
+
+ for _, kase := range kases {
+ assert.Equal(t, kase.expected, git_model.CalcCommitStatus(kase.statuses))
+ }
+}
+
+func TestFindRepoRecentCommitStatusContexts(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ gitRepo, err := gitrepo.OpenRepository(git.DefaultContext, repo2)
+ require.NoError(t, err)
+ defer gitRepo.Close()
+
+ commit, err := gitRepo.GetBranchCommit(repo2.DefaultBranch)
+ require.NoError(t, err)
+
+ defer func() {
+ _, err := db.DeleteByBean(db.DefaultContext, &git_model.CommitStatus{
+ RepoID: repo2.ID,
+ CreatorID: user2.ID,
+ SHA: commit.ID.String(),
+ })
+ require.NoError(t, err)
+ }()
+
+ err = git_model.NewCommitStatus(db.DefaultContext, git_model.NewCommitStatusOptions{
+ Repo: repo2,
+ Creator: user2,
+ SHA: commit.ID,
+ CommitStatus: &git_model.CommitStatus{
+ State: structs.CommitStatusFailure,
+ TargetURL: "https://example.com/tests/",
+ Context: "compliance/lint-backend",
+ },
+ })
+ require.NoError(t, err)
+
+ err = git_model.NewCommitStatus(db.DefaultContext, git_model.NewCommitStatusOptions{
+ Repo: repo2,
+ Creator: user2,
+ SHA: commit.ID,
+ CommitStatus: &git_model.CommitStatus{
+ State: structs.CommitStatusSuccess,
+ TargetURL: "https://example.com/tests/",
+ Context: "compliance/lint-backend",
+ },
+ })
+ require.NoError(t, err)
+
+ contexts, err := git_model.FindRepoRecentCommitStatusContexts(db.DefaultContext, repo2.ID, time.Hour)
+ require.NoError(t, err)
+ if assert.Len(t, contexts, 1) {
+ assert.Equal(t, "compliance/lint-backend", contexts[0])
+ }
+}
+
+func TestCommitStatusesHideActionsURL(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ run := unittest.AssertExistsAndLoadBean(t, &actions_model.ActionRun{ID: 791, RepoID: repo.ID})
+ require.NoError(t, run.LoadAttributes(db.DefaultContext))
+
+ statuses := []*git_model.CommitStatus{
+ {
+ RepoID: repo.ID,
+ TargetURL: fmt.Sprintf("%s/jobs/%d", run.Link(), run.Index),
+ },
+ {
+ RepoID: repo.ID,
+ TargetURL: "https://mycicd.org/1",
+ },
+ }
+
+ git_model.CommitStatusesHideActionsURL(db.DefaultContext, statuses)
+ assert.Empty(t, statuses[0].TargetURL)
+ assert.Equal(t, "https://mycicd.org/1", statuses[1].TargetURL)
+}
diff --git a/models/git/lfs.go b/models/git/lfs.go
new file mode 100644
index 0000000..44b741c
--- /dev/null
+++ b/models/git/lfs.go
@@ -0,0 +1,419 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/lfs"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrLFSLockNotExist represents a "LFSLockNotExist" kind of error.
+type ErrLFSLockNotExist struct {
+ ID int64
+ RepoID int64
+ Path string
+}
+
+// IsErrLFSLockNotExist checks if an error is a ErrLFSLockNotExist.
+func IsErrLFSLockNotExist(err error) bool {
+ _, ok := err.(ErrLFSLockNotExist)
+ return ok
+}
+
+func (err ErrLFSLockNotExist) Error() string {
+ return fmt.Sprintf("lfs lock does not exist [id: %d, rid: %d, path: %s]", err.ID, err.RepoID, err.Path)
+}
+
+func (err ErrLFSLockNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrLFSUnauthorizedAction represents a "LFSUnauthorizedAction" kind of error.
+type ErrLFSUnauthorizedAction struct {
+ RepoID int64
+ UserName string
+ Mode perm.AccessMode
+}
+
+// IsErrLFSUnauthorizedAction checks if an error is a ErrLFSUnauthorizedAction.
+func IsErrLFSUnauthorizedAction(err error) bool {
+ _, ok := err.(ErrLFSUnauthorizedAction)
+ return ok
+}
+
+func (err ErrLFSUnauthorizedAction) Error() string {
+ if err.Mode == perm.AccessModeWrite {
+ return fmt.Sprintf("User %s doesn't have write access for lfs lock [rid: %d]", err.UserName, err.RepoID)
+ }
+ return fmt.Sprintf("User %s doesn't have read access for lfs lock [rid: %d]", err.UserName, err.RepoID)
+}
+
+func (err ErrLFSUnauthorizedAction) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrLFSLockAlreadyExist represents a "LFSLockAlreadyExist" kind of error.
+type ErrLFSLockAlreadyExist struct {
+ RepoID int64
+ Path string
+}
+
+// IsErrLFSLockAlreadyExist checks if an error is a ErrLFSLockAlreadyExist.
+func IsErrLFSLockAlreadyExist(err error) bool {
+ _, ok := err.(ErrLFSLockAlreadyExist)
+ return ok
+}
+
+func (err ErrLFSLockAlreadyExist) Error() string {
+ return fmt.Sprintf("lfs lock already exists [rid: %d, path: %s]", err.RepoID, err.Path)
+}
+
+func (err ErrLFSLockAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrLFSFileLocked represents a "LFSFileLocked" kind of error.
+type ErrLFSFileLocked struct {
+ RepoID int64
+ Path string
+ UserName string
+}
+
+// IsErrLFSFileLocked checks if an error is a ErrLFSFileLocked.
+func IsErrLFSFileLocked(err error) bool {
+ _, ok := err.(ErrLFSFileLocked)
+ return ok
+}
+
+func (err ErrLFSFileLocked) Error() string {
+ return fmt.Sprintf("File is lfs locked [repo: %d, locked by: %s, path: %s]", err.RepoID, err.UserName, err.Path)
+}
+
+func (err ErrLFSFileLocked) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// LFSMetaObject stores metadata for LFS tracked files.
+type LFSMetaObject struct {
+ ID int64 `xorm:"pk autoincr"`
+ lfs.Pointer `xorm:"extends"`
+ RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Existing bool `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(LFSMetaObject))
+}
+
+// LFSTokenResponse defines the JSON structure in which the JWT token is stored.
+// This structure is fetched via SSH and passed by the Git LFS client to the server
+// endpoint for authorization.
+type LFSTokenResponse struct {
+ Header map[string]string `json:"header"`
+ Href string `json:"href"`
+}
+
+// ErrLFSObjectNotExist is returned from lfs models functions in order
+// to differentiate between database and missing object errors.
+var ErrLFSObjectNotExist = db.ErrNotExist{Resource: "LFS Meta object"}
+
+// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
+// if it is not already present.
+func NewLFSMetaObject(ctx context.Context, repoID int64, p lfs.Pointer) (*LFSMetaObject, error) {
+ var err error
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ m, exist, err := db.Get[LFSMetaObject](ctx, builder.Eq{"repository_id": repoID, "oid": p.Oid})
+ if err != nil {
+ return nil, err
+ } else if exist {
+ m.Existing = true
+ return m, committer.Commit()
+ }
+
+ m = &LFSMetaObject{Pointer: p, RepositoryID: repoID}
+ if err = db.Insert(ctx, m); err != nil {
+ return nil, err
+ }
+
+ return m, committer.Commit()
+}
+
+// GetLFSMetaObjectByOid selects a LFSMetaObject entry from database by its OID.
+// It may return ErrLFSObjectNotExist or a database error. If the error is nil,
+// the returned pointer is a valid LFSMetaObject.
+func GetLFSMetaObjectByOid(ctx context.Context, repoID int64, oid string) (*LFSMetaObject, error) {
+ if len(oid) == 0 {
+ return nil, ErrLFSObjectNotExist
+ }
+
+ m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
+ has, err := db.GetEngine(ctx).Get(m)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrLFSObjectNotExist
+ }
+ return m, nil
+}
+
+// RemoveLFSMetaObjectByOid removes a LFSMetaObject entry from database by its OID.
+// It may return ErrLFSObjectNotExist or a database error.
+func RemoveLFSMetaObjectByOid(ctx context.Context, repoID int64, oid string) (int64, error) {
+ return RemoveLFSMetaObjectByOidFn(ctx, repoID, oid, nil)
+}
+
+// RemoveLFSMetaObjectByOidFn removes a LFSMetaObject entry from database by its OID.
+// It may return ErrLFSObjectNotExist or a database error. It will run Fn with the current count within the transaction
+func RemoveLFSMetaObjectByOidFn(ctx context.Context, repoID int64, oid string, fn func(count int64) error) (int64, error) {
+ if len(oid) == 0 {
+ return 0, ErrLFSObjectNotExist
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer committer.Close()
+
+ m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
+ if _, err := db.DeleteByBean(ctx, m); err != nil {
+ return -1, err
+ }
+
+ count, err := db.CountByBean(ctx, &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+ if err != nil {
+ return count, err
+ }
+
+ if fn != nil {
+ if err := fn(count); err != nil {
+ return count, err
+ }
+ }
+
+ return count, committer.Commit()
+}
+
+// GetLFSMetaObjects returns all LFSMetaObjects associated with a repository
+func GetLFSMetaObjects(ctx context.Context, repoID int64, page, pageSize int) ([]*LFSMetaObject, error) {
+ sess := db.GetEngine(ctx)
+
+ if page >= 0 && pageSize > 0 {
+ start := 0
+ if page > 0 {
+ start = (page - 1) * pageSize
+ }
+ sess.Limit(pageSize, start)
+ }
+ lfsObjects := make([]*LFSMetaObject, 0, pageSize)
+ return lfsObjects, sess.Find(&lfsObjects, &LFSMetaObject{RepositoryID: repoID})
+}
+
+// CountLFSMetaObjects returns a count of all LFSMetaObjects associated with a repository
+func CountLFSMetaObjects(ctx context.Context, repoID int64) (int64, error) {
+ return db.GetEngine(ctx).Count(&LFSMetaObject{RepositoryID: repoID})
+}
+
+// LFSObjectAccessible checks if a provided Oid is accessible to the user
+func LFSObjectAccessible(ctx context.Context, user *user_model.User, oid string) (bool, error) {
+ if user.IsAdmin {
+ count, err := db.GetEngine(ctx).Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+ return count > 0, err
+ }
+ cond := repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)
+ count, err := db.GetEngine(ctx).Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+ return count > 0, err
+}
+
+// ExistsLFSObject checks if a provided Oid exists within the DB
+func ExistsLFSObject(ctx context.Context, oid string) (bool, error) {
+ return db.GetEngine(ctx).Exist(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+}
+
+// LFSAutoAssociate auto associates accessible LFSMetaObjects
+func LFSAutoAssociate(ctx context.Context, metas []*LFSMetaObject, user *user_model.User, repoID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ sess := db.GetEngine(ctx)
+
+ oids := make([]any, len(metas))
+ oidMap := make(map[string]*LFSMetaObject, len(metas))
+ for i, meta := range metas {
+ oids[i] = meta.Oid
+ oidMap[meta.Oid] = meta
+ }
+
+ if !user.IsAdmin {
+ newMetas := make([]*LFSMetaObject, 0, len(metas))
+ cond := builder.In(
+ "`lfs_meta_object`.repository_id",
+ builder.Select("`repository`.id").From("repository").Where(repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)),
+ )
+ err = sess.Cols("oid").Where(cond).In("oid", oids...).GroupBy("oid").Find(&newMetas)
+ if err != nil {
+ return err
+ }
+ if len(newMetas) != len(oidMap) {
+ return fmt.Errorf("unable collect all LFS objects from database, expected %d, actually %d", len(oidMap), len(newMetas))
+ }
+ for i := range newMetas {
+ newMetas[i].Size = oidMap[newMetas[i].Oid].Size
+ newMetas[i].RepositoryID = repoID
+ }
+ if err = db.Insert(ctx, newMetas); err != nil {
+ return err
+ }
+ } else {
+ // admin can associate any LFS object to any repository, and we do not care about errors (eg: duplicated unique key),
+ // even if error occurs, it won't hurt users and won't make things worse
+ for i := range metas {
+ p := lfs.Pointer{Oid: metas[i].Oid, Size: metas[i].Size}
+ _, err = sess.Insert(&LFSMetaObject{
+ Pointer: p,
+ RepositoryID: repoID,
+ })
+ if err != nil {
+ log.Warn("failed to insert LFS meta object %-v for repo_id: %d into database, err=%v", p, repoID, err)
+ }
+ }
+ }
+ return committer.Commit()
+}
+
+// CopyLFS copies LFS data from one repo to another
+func CopyLFS(ctx context.Context, newRepo, oldRepo *repo_model.Repository) error {
+ var lfsObjects []*LFSMetaObject
+ if err := db.GetEngine(ctx).Where("repository_id=?", oldRepo.ID).Find(&lfsObjects); err != nil {
+ return err
+ }
+
+ for _, v := range lfsObjects {
+ v.ID = 0
+ v.RepositoryID = newRepo.ID
+ if err := db.Insert(ctx, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetRepoLFSSize return a repository's lfs files size
+func GetRepoLFSSize(ctx context.Context, repoID int64) (int64, error) {
+ lfsSize, err := db.GetEngine(ctx).Where("repository_id = ?", repoID).SumInt(new(LFSMetaObject), "size")
+ if err != nil {
+ return 0, fmt.Errorf("updateSize: GetLFSMetaObjects: %w", err)
+ }
+ return lfsSize, nil
+}
+
+// IterateRepositoryIDsWithLFSMetaObjects iterates across the repositories that have LFSMetaObjects
+func IterateRepositoryIDsWithLFSMetaObjects(ctx context.Context, f func(ctx context.Context, repoID, count int64) error) error {
+ batchSize := setting.Database.IterateBufferSize
+ sess := db.GetEngine(ctx)
+ var start int
+ type RepositoryCount struct {
+ RepositoryID int64
+ Count int64
+ }
+ for {
+ counts := make([]*RepositoryCount, 0, batchSize)
+ if err := sess.Select("repository_id, COUNT(id) AS count").
+ Table("lfs_meta_object").
+ GroupBy("repository_id").
+ OrderBy("repository_id ASC").Limit(batchSize, start).Find(&counts); err != nil {
+ return err
+ }
+ if len(counts) == 0 {
+ return nil
+ }
+ start += len(counts)
+
+ for _, count := range counts {
+ if err := f(ctx, count.RepositoryID, count.Count); err != nil {
+ return err
+ }
+ }
+ }
+}
+
+// IterateLFSMetaObjectsForRepoOptions provides options for IterateLFSMetaObjectsForRepo
+type IterateLFSMetaObjectsForRepoOptions struct {
+ OlderThan timeutil.TimeStamp
+ UpdatedLessRecentlyThan timeutil.TimeStamp
+}
+
+// IterateLFSMetaObjectsForRepo provides a iterator for LFSMetaObjects per Repo
+func IterateLFSMetaObjectsForRepo(ctx context.Context, repoID int64, f func(context.Context, *LFSMetaObject) error, opts *IterateLFSMetaObjectsForRepoOptions) error {
+ batchSize := setting.Database.IterateBufferSize
+ engine := db.GetEngine(ctx)
+ id := int64(0)
+
+ for {
+ beans := make([]*LFSMetaObject, 0, batchSize)
+ sess := engine.Table("lfs_meta_object").Select("`lfs_meta_object`.*").
+ Join("INNER", "`lfs_meta_object` AS l1", "`lfs_meta_object`.oid = `l1`.oid").
+ Where("`lfs_meta_object`.repository_id = ?", repoID)
+ if !opts.OlderThan.IsZero() {
+ sess.And("`lfs_meta_object`.created_unix < ?", opts.OlderThan)
+ }
+ if !opts.UpdatedLessRecentlyThan.IsZero() {
+ sess.And("`lfs_meta_object`.updated_unix < ?", opts.UpdatedLessRecentlyThan)
+ }
+ sess.GroupBy("`lfs_meta_object`.id").
+ And("`lfs_meta_object`.id > ?", id).
+ OrderBy("`lfs_meta_object`.id ASC")
+
+ if err := sess.Limit(batchSize, 0).Find(&beans); err != nil {
+ return err
+ }
+ if len(beans) == 0 {
+ return nil
+ }
+
+ for _, bean := range beans {
+ if err := f(ctx, bean); err != nil {
+ return err
+ }
+ }
+ id = beans[len(beans)-1].ID
+ }
+}
+
+// MarkLFSMetaObject updates the updated time for the provided LFSMetaObject
+func MarkLFSMetaObject(ctx context.Context, id int64) error {
+ obj := &LFSMetaObject{
+ UpdatedUnix: timeutil.TimeStampNow(),
+ }
+ count, err := db.GetEngine(ctx).ID(id).Update(obj)
+ if count != 1 {
+ log.Error("Unexpectedly updated %d LFSMetaObjects with ID: %d", count, id)
+ }
+ return err
+}
diff --git a/models/git/lfs_lock.go b/models/git/lfs_lock.go
new file mode 100644
index 0000000..07ce7d4
--- /dev/null
+++ b/models/git/lfs_lock.go
@@ -0,0 +1,209 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// LFSLock represents a git lfs lock of repository.
+type LFSLock struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX NOT NULL"`
+ OwnerID int64 `xorm:"INDEX NOT NULL"`
+ Owner *user_model.User `xorm:"-"`
+ Path string `xorm:"TEXT"`
+ Created time.Time `xorm:"created"`
+}
+
+func init() {
+ db.RegisterModel(new(LFSLock))
+}
+
+// BeforeInsert is invoked from XORM before inserting an object of this type.
+func (l *LFSLock) BeforeInsert() {
+ l.Path = util.PathJoinRel(l.Path)
+}
+
+// LoadAttributes loads attributes of the lock.
+func (l *LFSLock) LoadAttributes(ctx context.Context) error {
+ // Load owner
+ if err := l.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("load owner: %w", err)
+ }
+
+ return nil
+}
+
+// LoadOwner loads owner of the lock.
+func (l *LFSLock) LoadOwner(ctx context.Context) error {
+ if l.Owner != nil {
+ return nil
+ }
+
+ owner, err := user_model.GetUserByID(ctx, l.OwnerID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ l.Owner = user_model.NewGhostUser()
+ return nil
+ }
+ return err
+ }
+ l.Owner = owner
+
+ return nil
+}
+
+// CreateLFSLock creates a new lock.
+func CreateLFSLock(ctx context.Context, repo *repo_model.Repository, lock *LFSLock) (*LFSLock, error) {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ if err := CheckLFSAccessForRepo(dbCtx, lock.OwnerID, repo, perm.AccessModeWrite); err != nil {
+ return nil, err
+ }
+
+ lock.Path = util.PathJoinRel(lock.Path)
+ lock.RepoID = repo.ID
+
+ l, err := GetLFSLock(dbCtx, repo, lock.Path)
+ if err == nil {
+ return l, ErrLFSLockAlreadyExist{lock.RepoID, lock.Path}
+ }
+ if !IsErrLFSLockNotExist(err) {
+ return nil, err
+ }
+
+ if err := db.Insert(dbCtx, lock); err != nil {
+ return nil, err
+ }
+
+ return lock, committer.Commit()
+}
+
+// GetLFSLock returns release by given path.
+func GetLFSLock(ctx context.Context, repo *repo_model.Repository, path string) (*LFSLock, error) {
+ path = util.PathJoinRel(path)
+ rel := &LFSLock{RepoID: repo.ID}
+ has, err := db.GetEngine(ctx).Where("lower(path) = ?", strings.ToLower(path)).Get(rel)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrLFSLockNotExist{0, repo.ID, path}
+ }
+ return rel, nil
+}
+
+// GetLFSLockByID returns release by given id.
+func GetLFSLockByID(ctx context.Context, id int64) (*LFSLock, error) {
+ lock := new(LFSLock)
+ has, err := db.GetEngine(ctx).ID(id).Get(lock)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrLFSLockNotExist{id, 0, ""}
+ }
+ return lock, nil
+}
+
+// GetLFSLockByRepoID returns a list of locks of repository.
+func GetLFSLockByRepoID(ctx context.Context, repoID int64, page, pageSize int) (LFSLockList, error) {
+ e := db.GetEngine(ctx)
+ if page >= 0 && pageSize > 0 {
+ start := 0
+ if page > 0 {
+ start = (page - 1) * pageSize
+ }
+ e.Limit(pageSize, start)
+ }
+ lfsLocks := make(LFSLockList, 0, pageSize)
+ return lfsLocks, e.Find(&lfsLocks, &LFSLock{RepoID: repoID})
+}
+
+// GetTreePathLock returns LSF lock for the treePath
+func GetTreePathLock(ctx context.Context, repoID int64, treePath string) (*LFSLock, error) {
+ if !setting.LFS.StartServer {
+ return nil, nil
+ }
+
+ locks, err := GetLFSLockByRepoID(ctx, repoID, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+ for _, lock := range locks {
+ if lock.Path == treePath {
+ return lock, nil
+ }
+ }
+ return nil, nil
+}
+
+// CountLFSLockByRepoID returns a count of all LFSLocks associated with a repository.
+func CountLFSLockByRepoID(ctx context.Context, repoID int64) (int64, error) {
+ return db.GetEngine(ctx).Count(&LFSLock{RepoID: repoID})
+}
+
+// DeleteLFSLockByID deletes a lock by given ID.
+func DeleteLFSLockByID(ctx context.Context, id int64, repo *repo_model.Repository, u *user_model.User, force bool) (*LFSLock, error) {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ lock, err := GetLFSLockByID(dbCtx, id)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := CheckLFSAccessForRepo(dbCtx, u.ID, repo, perm.AccessModeWrite); err != nil {
+ return nil, err
+ }
+
+ if !force && u.ID != lock.OwnerID {
+ return nil, errors.New("user doesn't own lock and force flag is not set")
+ }
+
+ if _, err := db.GetEngine(dbCtx).ID(id).Delete(new(LFSLock)); err != nil {
+ return nil, err
+ }
+
+ return lock, committer.Commit()
+}
+
+// CheckLFSAccessForRepo check needed access mode base on action
+func CheckLFSAccessForRepo(ctx context.Context, ownerID int64, repo *repo_model.Repository, mode perm.AccessMode) error {
+ if ownerID == 0 {
+ return ErrLFSUnauthorizedAction{repo.ID, "undefined", mode}
+ }
+ u, err := user_model.GetUserByID(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ perm, err := access_model.GetUserRepoPermission(ctx, repo, u)
+ if err != nil {
+ return err
+ }
+ if !perm.CanAccess(mode, unit.TypeCode) {
+ return ErrLFSUnauthorizedAction{repo.ID, u.DisplayName(), mode}
+ }
+ return nil
+}
diff --git a/models/git/lfs_lock_list.go b/models/git/lfs_lock_list.go
new file mode 100644
index 0000000..cab1e61
--- /dev/null
+++ b/models/git/lfs_lock_list.go
@@ -0,0 +1,54 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+)
+
+// LFSLockList is a list of LFSLock
+type LFSLockList []*LFSLock
+
+// LoadAttributes loads the attributes for the given locks
+func (locks LFSLockList) LoadAttributes(ctx context.Context) error {
+ if len(locks) == 0 {
+ return nil
+ }
+
+ if err := locks.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("load owner: %w", err)
+ }
+
+ return nil
+}
+
+// LoadOwner loads the owner of the locks
+func (locks LFSLockList) LoadOwner(ctx context.Context) error {
+ if len(locks) == 0 {
+ return nil
+ }
+
+ usersIDs := container.FilterSlice(locks, func(lock *LFSLock) (int64, bool) {
+ return lock.OwnerID, true
+ })
+ users := make(map[int64]*user_model.User, len(usersIDs))
+ if err := db.GetEngine(ctx).
+ In("id", usersIDs).
+ Find(&users); err != nil {
+ return fmt.Errorf("find users: %w", err)
+ }
+ for _, v := range locks {
+ v.Owner = users[v.OwnerID]
+ if v.Owner == nil { // not exist
+ v.Owner = user_model.NewGhostUser()
+ }
+ }
+
+ return nil
+}
diff --git a/models/git/lfs_test.go b/models/git/lfs_test.go
new file mode 100644
index 0000000..afb73ec
--- /dev/null
+++ b/models/git/lfs_test.go
@@ -0,0 +1,102 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIterateRepositoryIDsWithLFSMetaObjects(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/git/TestIterateRepositoryIDsWithLFSMetaObjects/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ type repocount struct {
+ repoid int64
+ count int64
+ }
+ expected := []repocount{{1, 1}, {54, 4}}
+
+ t.Run("Normal batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 20)()
+ cases := []repocount{}
+
+ err := IterateRepositoryIDsWithLFSMetaObjects(db.DefaultContext, func(ctx context.Context, repoID, count int64) error {
+ cases = append(cases, repocount{repoID, count})
+ return nil
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, cases)
+ })
+
+ t.Run("Low batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 1)()
+ cases := []repocount{}
+
+ err := IterateRepositoryIDsWithLFSMetaObjects(db.DefaultContext, func(ctx context.Context, repoID, count int64) error {
+ cases = append(cases, repocount{repoID, count})
+ return nil
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, cases)
+ })
+}
+
+func TestIterateLFSMetaObjectsForRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ expectedIDs := []int64{1, 2, 3, 4}
+
+ t.Run("Normal batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 20)()
+ actualIDs := []int64{}
+
+ err := IterateLFSMetaObjectsForRepo(db.DefaultContext, 54, func(ctx context.Context, lo *LFSMetaObject) error {
+ actualIDs = append(actualIDs, lo.ID)
+ return nil
+ }, &IterateLFSMetaObjectsForRepoOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedIDs, actualIDs)
+ })
+
+ t.Run("Low batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 1)()
+ actualIDs := []int64{}
+
+ err := IterateLFSMetaObjectsForRepo(db.DefaultContext, 54, func(ctx context.Context, lo *LFSMetaObject) error {
+ actualIDs = append(actualIDs, lo.ID)
+ return nil
+ }, &IterateLFSMetaObjectsForRepoOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedIDs, actualIDs)
+
+ t.Run("Batch handles updates", func(t *testing.T) {
+ actualIDs := []int64{}
+
+ err := IterateLFSMetaObjectsForRepo(db.DefaultContext, 54, func(ctx context.Context, lo *LFSMetaObject) error {
+ actualIDs = append(actualIDs, lo.ID)
+ _, err := db.DeleteByID[LFSMetaObject](ctx, lo.ID)
+ require.NoError(t, err)
+ return nil
+ }, &IterateLFSMetaObjectsForRepoOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedIDs, actualIDs)
+ })
+ })
+}
diff --git a/models/git/main_test.go b/models/git/main_test.go
new file mode 100644
index 0000000..aab1fa9
--- /dev/null
+++ b/models/git/main_test.go
@@ -0,0 +1,18 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/git/protected_banch_list_test.go b/models/git/protected_banch_list_test.go
new file mode 100644
index 0000000..09319d2
--- /dev/null
+++ b/models/git/protected_banch_list_test.go
@@ -0,0 +1,77 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBranchRuleMatchPriority(t *testing.T) {
+ kases := []struct {
+ Rules []string
+ BranchName string
+ ExpectedMatchIdx int
+ }{
+ {
+ Rules: []string{"release/*", "release/v1.17"},
+ BranchName: "release/v1.17",
+ ExpectedMatchIdx: 1,
+ },
+ {
+ Rules: []string{"release/v1.17", "release/*"},
+ BranchName: "release/v1.17",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/**/v1.17", "release/test/v1.17"},
+ BranchName: "release/test/v1.17",
+ ExpectedMatchIdx: 1,
+ },
+ {
+ Rules: []string{"release/test/v1.17", "release/**/v1.17"},
+ BranchName: "release/test/v1.17",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/**", "release/v1.0.0"},
+ BranchName: "release/v1.0.0",
+ ExpectedMatchIdx: 1,
+ },
+ {
+ Rules: []string{"release/v1.0.0", "release/**"},
+ BranchName: "release/v1.0.0",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/**", "release/v1.0.0"},
+ BranchName: "release/v2.0.0",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/*", "release/v1.0.0"},
+ BranchName: "release/1/v2.0.0",
+ ExpectedMatchIdx: -1,
+ },
+ }
+
+ for _, kase := range kases {
+ var pbs ProtectedBranchRules
+ for _, rule := range kase.Rules {
+ pbs = append(pbs, &ProtectedBranch{RuleName: rule})
+ }
+ pbs.sort()
+ matchedPB := pbs.GetFirstMatched(kase.BranchName)
+ if matchedPB == nil {
+ if kase.ExpectedMatchIdx >= 0 {
+ require.Error(t, fmt.Errorf("no matched rules but expected %s[%d]", kase.Rules[kase.ExpectedMatchIdx], kase.ExpectedMatchIdx))
+ }
+ } else {
+ assert.EqualValues(t, kase.Rules[kase.ExpectedMatchIdx], matchedPB.RuleName)
+ }
+ }
+}
diff --git a/models/git/protected_branch.go b/models/git/protected_branch.go
new file mode 100644
index 0000000..a8b8c81
--- /dev/null
+++ b/models/git/protected_branch.go
@@ -0,0 +1,511 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/gobwas/glob"
+ "github.com/gobwas/glob/syntax"
+ "xorm.io/builder"
+)
+
+var ErrBranchIsProtected = errors.New("branch is protected")
+
+// ProtectedBranch struct
+type ProtectedBranch struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Repo *repo_model.Repository `xorm:"-"`
+ RuleName string `xorm:"'branch_name' UNIQUE(s)"` // a branch name or a glob match to branch name
+ globRule glob.Glob `xorm:"-"`
+ isPlainName bool `xorm:"-"`
+ CanPush bool `xorm:"NOT NULL DEFAULT false"`
+ EnableWhitelist bool
+ WhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ WhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ EnableMergeWhitelist bool `xorm:"NOT NULL DEFAULT false"`
+ WhitelistDeployKeys bool `xorm:"NOT NULL DEFAULT false"`
+ MergeWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ MergeWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ EnableStatusCheck bool `xorm:"NOT NULL DEFAULT false"`
+ StatusCheckContexts []string `xorm:"JSON TEXT"`
+ EnableApprovalsWhitelist bool `xorm:"NOT NULL DEFAULT false"`
+ ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"`
+ BlockOnRejectedReviews bool `xorm:"NOT NULL DEFAULT false"`
+ BlockOnOfficialReviewRequests bool `xorm:"NOT NULL DEFAULT false"`
+ BlockOnOutdatedBranch bool `xorm:"NOT NULL DEFAULT false"`
+ DismissStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
+ IgnoreStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
+ RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
+ ProtectedFilePatterns string `xorm:"TEXT"`
+ UnprotectedFilePatterns string `xorm:"TEXT"`
+ ApplyToAdmins bool `xorm:"NOT NULL DEFAULT false"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ProtectedBranch))
+}
+
+// IsRuleNameSpecial return true if it contains special character
+func IsRuleNameSpecial(ruleName string) bool {
+ for i := 0; i < len(ruleName); i++ {
+ if syntax.Special(ruleName[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+func (protectBranch *ProtectedBranch) loadGlob() {
+ if protectBranch.globRule == nil {
+ var err error
+ protectBranch.globRule, err = glob.Compile(protectBranch.RuleName, '/')
+ if err != nil {
+ log.Warn("Invalid glob rule for ProtectedBranch[%d]: %s %v", protectBranch.ID, protectBranch.RuleName, err)
+ protectBranch.globRule = glob.MustCompile(glob.QuoteMeta(protectBranch.RuleName), '/')
+ }
+ protectBranch.isPlainName = !IsRuleNameSpecial(protectBranch.RuleName)
+ }
+}
+
+// Match tests if branchName matches the rule
+func (protectBranch *ProtectedBranch) Match(branchName string) bool {
+ protectBranch.loadGlob()
+ if protectBranch.isPlainName {
+ return strings.EqualFold(protectBranch.RuleName, branchName)
+ }
+
+ return protectBranch.globRule.Match(branchName)
+}
+
+func (protectBranch *ProtectedBranch) LoadRepo(ctx context.Context) (err error) {
+ if protectBranch.Repo != nil {
+ return nil
+ }
+ protectBranch.Repo, err = repo_model.GetRepositoryByID(ctx, protectBranch.RepoID)
+ return err
+}
+
+// CanUserPush returns if some user could push to this protected branch
+func (protectBranch *ProtectedBranch) CanUserPush(ctx context.Context, user *user_model.User) bool {
+ if !protectBranch.CanPush {
+ return false
+ }
+
+ if !protectBranch.EnableWhitelist {
+ if err := protectBranch.LoadRepo(ctx); err != nil {
+ log.Error("LoadRepo: %v", err)
+ return false
+ }
+
+ writeAccess, err := access_model.HasAccessUnit(ctx, user, protectBranch.Repo, unit.TypeCode, perm.AccessModeWrite)
+ if err != nil {
+ log.Error("HasAccessUnit: %v", err)
+ return false
+ }
+ return writeAccess
+ }
+
+ if slices.Contains(protectBranch.WhitelistUserIDs, user.ID) {
+ return true
+ }
+
+ if len(protectBranch.WhitelistTeamIDs) == 0 {
+ return false
+ }
+
+ in, err := organization.IsUserInTeams(ctx, user.ID, protectBranch.WhitelistTeamIDs)
+ if err != nil {
+ log.Error("IsUserInTeams: %v", err)
+ return false
+ }
+ return in
+}
+
+// IsUserMergeWhitelisted checks if some user is whitelisted to merge to this branch
+func IsUserMergeWhitelisted(ctx context.Context, protectBranch *ProtectedBranch, userID int64, permissionInRepo access_model.Permission) bool {
+ if !protectBranch.EnableMergeWhitelist {
+ // Then we need to fall back on whether the user has write permission
+ return permissionInRepo.CanWrite(unit.TypeCode)
+ }
+
+ if slices.Contains(protectBranch.MergeWhitelistUserIDs, userID) {
+ return true
+ }
+
+ if len(protectBranch.MergeWhitelistTeamIDs) == 0 {
+ return false
+ }
+
+ in, err := organization.IsUserInTeams(ctx, userID, protectBranch.MergeWhitelistTeamIDs)
+ if err != nil {
+ log.Error("IsUserInTeams: %v", err)
+ return false
+ }
+ return in
+}
+
+// IsUserOfficialReviewer check if user is official reviewer for the branch (counts towards required approvals)
+func IsUserOfficialReviewer(ctx context.Context, protectBranch *ProtectedBranch, user *user_model.User) (bool, error) {
+ repo, err := repo_model.GetRepositoryByID(ctx, protectBranch.RepoID)
+ if err != nil {
+ return false, err
+ }
+
+ if !protectBranch.EnableApprovalsWhitelist {
+ // Anyone with write access is considered official reviewer
+ writeAccess, err := access_model.HasAccessUnit(ctx, user, repo, unit.TypeCode, perm.AccessModeWrite)
+ if err != nil {
+ return false, err
+ }
+ return writeAccess, nil
+ }
+
+ if slices.Contains(protectBranch.ApprovalsWhitelistUserIDs, user.ID) {
+ return true, nil
+ }
+
+ inTeam, err := organization.IsUserInTeams(ctx, user.ID, protectBranch.ApprovalsWhitelistTeamIDs)
+ if err != nil {
+ return false, err
+ }
+
+ return inTeam, nil
+}
+
+// GetProtectedFilePatterns parses a semicolon separated list of protected file patterns and returns a glob.Glob slice
+func (protectBranch *ProtectedBranch) GetProtectedFilePatterns() []glob.Glob {
+ return getFilePatterns(protectBranch.ProtectedFilePatterns)
+}
+
+// GetUnprotectedFilePatterns parses a semicolon separated list of unprotected file patterns and returns a glob.Glob slice
+func (protectBranch *ProtectedBranch) GetUnprotectedFilePatterns() []glob.Glob {
+ return getFilePatterns(protectBranch.UnprotectedFilePatterns)
+}
+
+func getFilePatterns(filePatterns string) []glob.Glob {
+ extarr := make([]glob.Glob, 0, 10)
+ for _, expr := range strings.Split(strings.ToLower(filePatterns), ";") {
+ expr = strings.TrimSpace(expr)
+ if expr != "" {
+ if g, err := glob.Compile(expr, '.', '/'); err != nil {
+ log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
+ } else {
+ extarr = append(extarr, g)
+ }
+ }
+ }
+ return extarr
+}
+
+// MergeBlockedByProtectedFiles returns true if merge is blocked by protected files change
+func (protectBranch *ProtectedBranch) MergeBlockedByProtectedFiles(changedProtectedFiles []string) bool {
+ glob := protectBranch.GetProtectedFilePatterns()
+ if len(glob) == 0 {
+ return false
+ }
+
+ return len(changedProtectedFiles) > 0
+}
+
+// IsProtectedFile return if path is protected
+func (protectBranch *ProtectedBranch) IsProtectedFile(patterns []glob.Glob, path string) bool {
+ if len(patterns) == 0 {
+ patterns = protectBranch.GetProtectedFilePatterns()
+ if len(patterns) == 0 {
+ return false
+ }
+ }
+
+ lpath := strings.ToLower(strings.TrimSpace(path))
+
+ r := false
+ for _, pat := range patterns {
+ if pat.Match(lpath) {
+ r = true
+ break
+ }
+ }
+
+ return r
+}
+
+// IsUnprotectedFile return if path is unprotected
+func (protectBranch *ProtectedBranch) IsUnprotectedFile(patterns []glob.Glob, path string) bool {
+ if len(patterns) == 0 {
+ patterns = protectBranch.GetUnprotectedFilePatterns()
+ if len(patterns) == 0 {
+ return false
+ }
+ }
+
+ lpath := strings.ToLower(strings.TrimSpace(path))
+
+ r := false
+ for _, pat := range patterns {
+ if pat.Match(lpath) {
+ r = true
+ break
+ }
+ }
+
+ return r
+}
+
+// GetProtectedBranchRuleByName getting protected branch rule by name
+func GetProtectedBranchRuleByName(ctx context.Context, repoID int64, ruleName string) (*ProtectedBranch, error) {
+ // branch_name is legacy name, it actually is rule name
+ rel, exist, err := db.Get[ProtectedBranch](ctx, builder.Eq{"repo_id": repoID, "branch_name": ruleName})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, nil
+ }
+ return rel, nil
+}
+
+// GetProtectedBranchRuleByID getting protected branch rule by rule ID
+func GetProtectedBranchRuleByID(ctx context.Context, repoID, ruleID int64) (*ProtectedBranch, error) {
+ rel, exist, err := db.Get[ProtectedBranch](ctx, builder.Eq{"repo_id": repoID, "id": ruleID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, nil
+ }
+ return rel, nil
+}
+
+// WhitelistOptions represent all sorts of whitelists used for protected branches
+type WhitelistOptions struct {
+ UserIDs []int64
+ TeamIDs []int64
+
+ MergeUserIDs []int64
+ MergeTeamIDs []int64
+
+ ApprovalsUserIDs []int64
+ ApprovalsTeamIDs []int64
+}
+
+// UpdateProtectBranch saves branch protection options of repository.
+// If ID is 0, it creates a new record. Otherwise, updates existing record.
+// This function also performs check if whitelist user and team's IDs have been changed
+// to avoid unnecessary whitelist delete and regenerate.
+func UpdateProtectBranch(ctx context.Context, repo *repo_model.Repository, protectBranch *ProtectedBranch, opts WhitelistOptions) (err error) {
+ err = repo.MustNotBeArchived()
+ if err != nil {
+ return err
+ }
+
+ if err = repo.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("LoadOwner: %v", err)
+ }
+
+ whitelist, err := updateUserWhitelist(ctx, repo, protectBranch.WhitelistUserIDs, opts.UserIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.WhitelistUserIDs = whitelist
+
+ whitelist, err = updateUserWhitelist(ctx, repo, protectBranch.MergeWhitelistUserIDs, opts.MergeUserIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.MergeWhitelistUserIDs = whitelist
+
+ whitelist, err = updateApprovalWhitelist(ctx, repo, protectBranch.ApprovalsWhitelistUserIDs, opts.ApprovalsUserIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.ApprovalsWhitelistUserIDs = whitelist
+
+ // if the repo is in an organization
+ whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.WhitelistTeamIDs, opts.TeamIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.WhitelistTeamIDs = whitelist
+
+ whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.MergeWhitelistTeamIDs, opts.MergeTeamIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.MergeWhitelistTeamIDs = whitelist
+
+ whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.ApprovalsWhitelistTeamIDs, opts.ApprovalsTeamIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.ApprovalsWhitelistTeamIDs = whitelist
+
+ // Make sure protectBranch.ID is not 0 for whitelists
+ if protectBranch.ID == 0 {
+ if _, err = db.GetEngine(ctx).Insert(protectBranch); err != nil {
+ return fmt.Errorf("Insert: %v", err)
+ }
+ return nil
+ }
+
+ if _, err = db.GetEngine(ctx).ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {
+ return fmt.Errorf("Update: %v", err)
+ }
+
+ return nil
+}
+
+// updateApprovalWhitelist checks whether the user whitelist changed and returns a whitelist with
+// the users from newWhitelist which have explicit read or write access to the repo.
+func updateApprovalWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
+ hasUsersChanged := !util.SliceSortedEqual(currentWhitelist, newWhitelist)
+ if !hasUsersChanged {
+ return currentWhitelist, nil
+ }
+
+ whitelist = make([]int64, 0, len(newWhitelist))
+ for _, userID := range newWhitelist {
+ if reader, err := access_model.IsRepoReader(ctx, repo, userID); err != nil {
+ return nil, err
+ } else if !reader {
+ continue
+ }
+ whitelist = append(whitelist, userID)
+ }
+
+ return whitelist, err
+}
+
+// updateUserWhitelist checks whether the user whitelist changed and returns a whitelist with
+// the users from newWhitelist which have write access to the repo.
+func updateUserWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
+ hasUsersChanged := !util.SliceSortedEqual(currentWhitelist, newWhitelist)
+ if !hasUsersChanged {
+ return currentWhitelist, nil
+ }
+
+ whitelist = make([]int64, 0, len(newWhitelist))
+ for _, userID := range newWhitelist {
+ user, err := user_model.GetUserByID(ctx, userID)
+ if err != nil {
+ return nil, fmt.Errorf("GetUserByID [user_id: %d, repo_id: %d]: %v", userID, repo.ID, err)
+ }
+ perm, err := access_model.GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ return nil, fmt.Errorf("GetUserRepoPermission [user_id: %d, repo_id: %d]: %v", userID, repo.ID, err)
+ }
+
+ if !perm.CanWrite(unit.TypeCode) {
+ continue // Drop invalid user ID
+ }
+
+ whitelist = append(whitelist, userID)
+ }
+
+ return whitelist, err
+}
+
+// updateTeamWhitelist checks whether the team whitelist changed and returns a whitelist with
+// the teams from newWhitelist which have write access to the repo.
+func updateTeamWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
+ hasTeamsChanged := !util.SliceSortedEqual(currentWhitelist, newWhitelist)
+ if !hasTeamsChanged {
+ return currentWhitelist, nil
+ }
+
+ teams, err := organization.GetTeamsWithAccessToRepo(ctx, repo.OwnerID, repo.ID, perm.AccessModeRead)
+ if err != nil {
+ return nil, fmt.Errorf("GetTeamsWithAccessToRepo [org_id: %d, repo_id: %d]: %v", repo.OwnerID, repo.ID, err)
+ }
+
+ whitelist = make([]int64, 0, len(teams))
+ for i := range teams {
+ if slices.Contains(newWhitelist, teams[i].ID) {
+ whitelist = append(whitelist, teams[i].ID)
+ }
+ }
+
+ return whitelist, err
+}
+
+// DeleteProtectedBranch removes ProtectedBranch relation between the user and repository.
+func DeleteProtectedBranch(ctx context.Context, repo *repo_model.Repository, id int64) (err error) {
+ err = repo.MustNotBeArchived()
+ if err != nil {
+ return err
+ }
+
+ protectedBranch := &ProtectedBranch{
+ RepoID: repo.ID,
+ ID: id,
+ }
+
+ if affected, err := db.GetEngine(ctx).Delete(protectedBranch); err != nil {
+ return err
+ } else if affected != 1 {
+ return fmt.Errorf("delete protected branch ID(%v) failed", id)
+ }
+
+ return nil
+}
+
+// RemoveUserIDFromProtectedBranch remove all user ids from protected branch options
+func RemoveUserIDFromProtectedBranch(ctx context.Context, p *ProtectedBranch, userID int64) error {
+ lenIDs, lenApprovalIDs, lenMergeIDs := len(p.WhitelistUserIDs), len(p.ApprovalsWhitelistUserIDs), len(p.MergeWhitelistUserIDs)
+ p.WhitelistUserIDs = util.SliceRemoveAll(p.WhitelistUserIDs, userID)
+ p.ApprovalsWhitelistUserIDs = util.SliceRemoveAll(p.ApprovalsWhitelistUserIDs, userID)
+ p.MergeWhitelistUserIDs = util.SliceRemoveAll(p.MergeWhitelistUserIDs, userID)
+
+ if lenIDs != len(p.WhitelistUserIDs) || lenApprovalIDs != len(p.ApprovalsWhitelistUserIDs) ||
+ lenMergeIDs != len(p.MergeWhitelistUserIDs) {
+ if _, err := db.GetEngine(ctx).ID(p.ID).Cols(
+ "whitelist_user_i_ds",
+ "merge_whitelist_user_i_ds",
+ "approvals_whitelist_user_i_ds",
+ ).Update(p); err != nil {
+ return fmt.Errorf("updateProtectedBranches: %v", err)
+ }
+ }
+ return nil
+}
+
+// RemoveTeamIDFromProtectedBranch remove all team ids from protected branch options
+func RemoveTeamIDFromProtectedBranch(ctx context.Context, p *ProtectedBranch, teamID int64) error {
+ lenIDs, lenApprovalIDs, lenMergeIDs := len(p.WhitelistTeamIDs), len(p.ApprovalsWhitelistTeamIDs), len(p.MergeWhitelistTeamIDs)
+ p.WhitelistTeamIDs = util.SliceRemoveAll(p.WhitelistTeamIDs, teamID)
+ p.ApprovalsWhitelistTeamIDs = util.SliceRemoveAll(p.ApprovalsWhitelistTeamIDs, teamID)
+ p.MergeWhitelistTeamIDs = util.SliceRemoveAll(p.MergeWhitelistTeamIDs, teamID)
+
+ if lenIDs != len(p.WhitelistTeamIDs) ||
+ lenApprovalIDs != len(p.ApprovalsWhitelistTeamIDs) ||
+ lenMergeIDs != len(p.MergeWhitelistTeamIDs) {
+ if _, err := db.GetEngine(ctx).ID(p.ID).Cols(
+ "whitelist_team_i_ds",
+ "merge_whitelist_team_i_ds",
+ "approvals_whitelist_team_i_ds",
+ ).Update(p); err != nil {
+ return fmt.Errorf("updateProtectedBranches: %v", err)
+ }
+ }
+ return nil
+}
diff --git a/models/git/protected_branch_list.go b/models/git/protected_branch_list.go
new file mode 100644
index 0000000..613333a
--- /dev/null
+++ b/models/git/protected_branch_list.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "sort"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/optional"
+
+ "github.com/gobwas/glob"
+)
+
+type ProtectedBranchRules []*ProtectedBranch
+
+func (rules ProtectedBranchRules) GetFirstMatched(branchName string) *ProtectedBranch {
+ for _, rule := range rules {
+ if rule.Match(branchName) {
+ return rule
+ }
+ }
+ return nil
+}
+
+func (rules ProtectedBranchRules) sort() {
+ sort.Slice(rules, func(i, j int) bool {
+ rules[i].loadGlob()
+ rules[j].loadGlob()
+ if rules[i].isPlainName != rules[j].isPlainName {
+ return rules[i].isPlainName // plain name comes first, so plain name means "less"
+ }
+ return rules[i].CreatedUnix < rules[j].CreatedUnix
+ })
+}
+
+// FindRepoProtectedBranchRules load all repository's protected rules
+func FindRepoProtectedBranchRules(ctx context.Context, repoID int64) (ProtectedBranchRules, error) {
+ var rules ProtectedBranchRules
+ err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Asc("created_unix").Find(&rules)
+ if err != nil {
+ return nil, err
+ }
+ rules.sort() // to make non-glob rules have higher priority, and for same glob/non-glob rules, first created rules have higher priority
+ return rules, nil
+}
+
+// FindAllMatchedBranches find all matched branches
+func FindAllMatchedBranches(ctx context.Context, repoID int64, ruleName string) ([]string, error) {
+ results := make([]string, 0, 10)
+ for page := 1; ; page++ {
+ brancheNames, err := FindBranchNames(ctx, FindBranchOptions{
+ ListOptions: db.ListOptions{
+ PageSize: 100,
+ Page: page,
+ },
+ RepoID: repoID,
+ IsDeletedBranch: optional.Some(false),
+ })
+ if err != nil {
+ return nil, err
+ }
+ rule := glob.MustCompile(ruleName)
+
+ for _, branch := range brancheNames {
+ if rule.Match(branch) {
+ results = append(results, branch)
+ }
+ }
+ if len(brancheNames) < 100 {
+ break
+ }
+ }
+
+ return results, nil
+}
+
+// GetFirstMatchProtectedBranchRule returns the first matched rules
+func GetFirstMatchProtectedBranchRule(ctx context.Context, repoID int64, branchName string) (*ProtectedBranch, error) {
+ rules, err := FindRepoProtectedBranchRules(ctx, repoID)
+ if err != nil {
+ return nil, err
+ }
+ return rules.GetFirstMatched(branchName), nil
+}
+
+// IsBranchProtected checks if branch is protected
+func IsBranchProtected(ctx context.Context, repoID int64, branchName string) (bool, error) {
+ rule, err := GetFirstMatchProtectedBranchRule(ctx, repoID, branchName)
+ if err != nil {
+ return false, err
+ }
+ return rule != nil, nil
+}
diff --git a/models/git/protected_branch_test.go b/models/git/protected_branch_test.go
new file mode 100644
index 0000000..278fa9f
--- /dev/null
+++ b/models/git/protected_branch_test.go
@@ -0,0 +1,69 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBranchRuleMatch(t *testing.T) {
+ kases := []struct {
+ Rule string
+ BranchName string
+ ExpectedMatch bool
+ }{
+ {
+ Rule: "release/*",
+ BranchName: "release/v1.17",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "release/**/v1.17",
+ BranchName: "release/test/v1.17",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "release/**/v1.17",
+ BranchName: "release/test/1/v1.17",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "release/*/v1.17",
+ BranchName: "release/test/1/v1.17",
+ ExpectedMatch: false,
+ },
+ {
+ Rule: "release/v*",
+ BranchName: "release/v1.16",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "*",
+ BranchName: "release/v1.16",
+ ExpectedMatch: false,
+ },
+ {
+ Rule: "**",
+ BranchName: "release/v1.16",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "main",
+ BranchName: "main",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "master",
+ BranchName: "main",
+ ExpectedMatch: false,
+ },
+ }
+
+ for _, kase := range kases {
+ pb := ProtectedBranch{RuleName: kase.Rule}
+ assert.EqualValues(t, kase.ExpectedMatch, pb.Match(kase.BranchName), "%s - %s", kase.BranchName, kase.Rule)
+ }
+}
diff --git a/models/git/protected_tag.go b/models/git/protected_tag.go
new file mode 100644
index 0000000..9a6646c
--- /dev/null
+++ b/models/git/protected_tag.go
@@ -0,0 +1,150 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "regexp"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/gobwas/glob"
+)
+
+// ProtectedTag struct
+type ProtectedTag struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64
+ NamePattern string
+ RegexPattern *regexp.Regexp `xorm:"-"`
+ GlobPattern glob.Glob `xorm:"-"`
+ AllowlistUserIDs []int64 `xorm:"JSON TEXT"`
+ AllowlistTeamIDs []int64 `xorm:"JSON TEXT"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ProtectedTag))
+}
+
+// EnsureCompiledPattern ensures the glob pattern is compiled
+func (pt *ProtectedTag) EnsureCompiledPattern() error {
+ if pt.RegexPattern != nil || pt.GlobPattern != nil {
+ return nil
+ }
+
+ var err error
+ if len(pt.NamePattern) >= 2 && strings.HasPrefix(pt.NamePattern, "/") && strings.HasSuffix(pt.NamePattern, "/") {
+ pt.RegexPattern, err = regexp.Compile(pt.NamePattern[1 : len(pt.NamePattern)-1])
+ } else {
+ pt.GlobPattern, err = glob.Compile(pt.NamePattern)
+ }
+ return err
+}
+
+func (pt *ProtectedTag) matchString(name string) bool {
+ if pt.RegexPattern != nil {
+ return pt.RegexPattern.MatchString(name)
+ }
+ return pt.GlobPattern.Match(name)
+}
+
+// InsertProtectedTag inserts a protected tag to database
+func InsertProtectedTag(ctx context.Context, pt *ProtectedTag) error {
+ _, err := db.GetEngine(ctx).Insert(pt)
+ return err
+}
+
+// UpdateProtectedTag updates the protected tag
+func UpdateProtectedTag(ctx context.Context, pt *ProtectedTag) error {
+ _, err := db.GetEngine(ctx).ID(pt.ID).AllCols().Update(pt)
+ return err
+}
+
+// DeleteProtectedTag deletes a protected tag by ID
+func DeleteProtectedTag(ctx context.Context, pt *ProtectedTag) error {
+ _, err := db.GetEngine(ctx).ID(pt.ID).Delete(&ProtectedTag{})
+ return err
+}
+
+// IsUserAllowedModifyTag returns true if the user is allowed to modify the tag
+func IsUserAllowedModifyTag(ctx context.Context, pt *ProtectedTag, userID int64) (bool, error) {
+ if slices.Contains(pt.AllowlistUserIDs, userID) {
+ return true, nil
+ }
+
+ if len(pt.AllowlistTeamIDs) == 0 {
+ return false, nil
+ }
+
+ in, err := organization.IsUserInTeams(ctx, userID, pt.AllowlistTeamIDs)
+ if err != nil {
+ return false, err
+ }
+ return in, nil
+}
+
+// GetProtectedTags gets all protected tags of the repository
+func GetProtectedTags(ctx context.Context, repoID int64) ([]*ProtectedTag, error) {
+ tags := make([]*ProtectedTag, 0)
+ return tags, db.GetEngine(ctx).Find(&tags, &ProtectedTag{RepoID: repoID})
+}
+
+// GetProtectedTagByID gets the protected tag with the specific id
+func GetProtectedTagByID(ctx context.Context, id int64) (*ProtectedTag, error) {
+ tag := new(ProtectedTag)
+ has, err := db.GetEngine(ctx).ID(id).Get(tag)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return tag, nil
+}
+
+// GetProtectedTagByNamePattern gets protected tag by name_pattern
+func GetProtectedTagByNamePattern(ctx context.Context, repoID int64, pattern string) (*ProtectedTag, error) {
+ tag := &ProtectedTag{NamePattern: pattern, RepoID: repoID}
+ has, err := db.GetEngine(ctx).Get(tag)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return tag, nil
+}
+
+// IsUserAllowedToControlTag checks if a user can control the specific tag.
+// It returns true if the tag name is not protected or the user is allowed to control it.
+func IsUserAllowedToControlTag(ctx context.Context, tags []*ProtectedTag, tagName string, userID int64) (bool, error) {
+ isAllowed := true
+ for _, tag := range tags {
+ err := tag.EnsureCompiledPattern()
+ if err != nil {
+ return false, err
+ }
+
+ if !tag.matchString(tagName) {
+ continue
+ }
+
+ isAllowed, err = IsUserAllowedModifyTag(ctx, tag, userID)
+ if err != nil {
+ return false, err
+ }
+ if isAllowed {
+ break
+ }
+ }
+
+ return isAllowed, nil
+}
diff --git a/models/git/protected_tag_test.go b/models/git/protected_tag_test.go
new file mode 100644
index 0000000..796e159
--- /dev/null
+++ b/models/git/protected_tag_test.go
@@ -0,0 +1,166 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsUserAllowed(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ pt := &git_model.ProtectedTag{}
+ allowed, err := git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.False(t, allowed)
+
+ pt = &git_model.ProtectedTag{
+ AllowlistUserIDs: []int64{1},
+ }
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 2)
+ require.NoError(t, err)
+ assert.False(t, allowed)
+
+ pt = &git_model.ProtectedTag{
+ AllowlistTeamIDs: []int64{1},
+ }
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.False(t, allowed)
+
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 2)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+
+ pt = &git_model.ProtectedTag{
+ AllowlistUserIDs: []int64{1},
+ AllowlistTeamIDs: []int64{1},
+ }
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 2)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+}
+
+func TestIsUserAllowedToControlTag(t *testing.T) {
+ cases := []struct {
+ name string
+ userid int64
+ allowed bool
+ }{
+ {
+ name: "test",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "test",
+ userid: 3,
+ allowed: true,
+ },
+ {
+ name: "gitea",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "gitea",
+ userid: 3,
+ allowed: false,
+ },
+ {
+ name: "test-gitea",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "test-gitea",
+ userid: 3,
+ allowed: false,
+ },
+ {
+ name: "gitea-test",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "gitea-test",
+ userid: 3,
+ allowed: true,
+ },
+ {
+ name: "v-1",
+ userid: 1,
+ allowed: false,
+ },
+ {
+ name: "v-1",
+ userid: 2,
+ allowed: true,
+ },
+ {
+ name: "release",
+ userid: 1,
+ allowed: false,
+ },
+ }
+
+ t.Run("Glob", func(t *testing.T) {
+ protectedTags := []*git_model.ProtectedTag{
+ {
+ NamePattern: `*gitea`,
+ AllowlistUserIDs: []int64{1},
+ },
+ {
+ NamePattern: `v-*`,
+ AllowlistUserIDs: []int64{2},
+ },
+ {
+ NamePattern: "release",
+ },
+ }
+
+ for n, c := range cases {
+ isAllowed, err := git_model.IsUserAllowedToControlTag(db.DefaultContext, protectedTags, c.name, c.userid)
+ require.NoError(t, err)
+ assert.Equal(t, c.allowed, isAllowed, "case %d: error should match", n)
+ }
+ })
+
+ t.Run("Regex", func(t *testing.T) {
+ protectedTags := []*git_model.ProtectedTag{
+ {
+ NamePattern: `/gitea\z/`,
+ AllowlistUserIDs: []int64{1},
+ },
+ {
+ NamePattern: `/\Av-/`,
+ AllowlistUserIDs: []int64{2},
+ },
+ {
+ NamePattern: "/release/",
+ },
+ }
+
+ for n, c := range cases {
+ isAllowed, err := git_model.IsUserAllowedToControlTag(db.DefaultContext, protectedTags, c.name, c.userid)
+ require.NoError(t, err)
+ assert.Equal(t, c.allowed, isAllowed, "case %d: error should match", n)
+ }
+ })
+}
diff --git a/models/issues/TestGetUIDsAndStopwatch/stopwatch.yml b/models/issues/TestGetUIDsAndStopwatch/stopwatch.yml
new file mode 100644
index 0000000..f564e4b
--- /dev/null
+++ b/models/issues/TestGetUIDsAndStopwatch/stopwatch.yml
@@ -0,0 +1,11 @@
+-
+ id: 3
+ user_id: 1
+ issue_id: 2
+ created_unix: 1500988004
+
+-
+ id: 4
+ user_id: 3
+ issue_id: 0
+ created_unix: 1500988003
diff --git a/models/issues/assignees.go b/models/issues/assignees.go
new file mode 100644
index 0000000..a83cb25
--- /dev/null
+++ b/models/issues/assignees.go
@@ -0,0 +1,177 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// IssueAssignees saves all issue assignees
+type IssueAssignees struct {
+ ID int64 `xorm:"pk autoincr"`
+ AssigneeID int64 `xorm:"INDEX"`
+ IssueID int64 `xorm:"INDEX"`
+}
+
+func init() {
+ db.RegisterModel(new(IssueAssignees))
+}
+
+// LoadAssignees load assignees of this issue.
+func (issue *Issue) LoadAssignees(ctx context.Context) (err error) {
+ if issue.isAssigneeLoaded || len(issue.Assignees) > 0 {
+ return nil
+ }
+
+ // Reset maybe preexisting assignees
+ issue.Assignees = []*user_model.User{}
+ issue.Assignee = nil
+
+ if err = db.GetEngine(ctx).Table("`user`").
+ Join("INNER", "issue_assignees", "assignee_id = `user`.id").
+ Where("issue_assignees.issue_id = ?", issue.ID).
+ Find(&issue.Assignees); err != nil {
+ return err
+ }
+
+ issue.isAssigneeLoaded = true
+ // Check if we have at least one assignee and if yes put it in as `Assignee`
+ if len(issue.Assignees) > 0 {
+ issue.Assignee = issue.Assignees[0]
+ }
+ return nil
+}
+
+// GetAssigneeIDsByIssue returns the IDs of users assigned to an issue
+// but skips joining with `user` for performance reasons.
+// User permissions must be verified elsewhere if required.
+func GetAssigneeIDsByIssue(ctx context.Context, issueID int64) ([]int64, error) {
+ userIDs := make([]int64, 0, 5)
+ return userIDs, db.GetEngine(ctx).
+ Table("issue_assignees").
+ Cols("assignee_id").
+ Where("issue_id = ?", issueID).
+ Distinct("assignee_id").
+ Find(&userIDs)
+}
+
+// IsUserAssignedToIssue returns true when the user is assigned to the issue
+func IsUserAssignedToIssue(ctx context.Context, issue *Issue, user *user_model.User) (isAssigned bool, err error) {
+ return db.Exist[IssueAssignees](ctx, builder.Eq{"assignee_id": user.ID, "issue_id": issue.ID})
+}
+
+// ToggleIssueAssignee changes a user between assigned and not assigned for this issue, and make issue comment for it.
+func ToggleIssueAssignee(ctx context.Context, issue *Issue, doer *user_model.User, assigneeID int64) (removed bool, comment *Comment, err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return false, nil, err
+ }
+ defer committer.Close()
+
+ removed, comment, err = toggleIssueAssignee(ctx, issue, doer, assigneeID, false)
+ if err != nil {
+ return false, nil, err
+ }
+
+ if err := committer.Commit(); err != nil {
+ return false, nil, err
+ }
+
+ return removed, comment, nil
+}
+
+func toggleIssueAssignee(ctx context.Context, issue *Issue, doer *user_model.User, assigneeID int64, isCreate bool) (removed bool, comment *Comment, err error) {
+ removed, err = toggleUserAssignee(ctx, issue, assigneeID)
+ if err != nil {
+ return false, nil, fmt.Errorf("UpdateIssueUserByAssignee: %w", err)
+ }
+
+ // Repo infos
+ if err = issue.LoadRepo(ctx); err != nil {
+ return false, nil, fmt.Errorf("loadRepo: %w", err)
+ }
+
+ opts := &CreateCommentOptions{
+ Type: CommentTypeAssignees,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ RemovedAssignee: removed,
+ AssigneeID: assigneeID,
+ }
+ // Comment
+ comment, err = CreateComment(ctx, opts)
+ if err != nil {
+ return false, nil, fmt.Errorf("createComment: %w", err)
+ }
+
+ // if pull request is in the middle of creation - don't call webhook
+ if isCreate {
+ return removed, comment, err
+ }
+
+ return removed, comment, nil
+}
+
+// toggles user assignee state in database
+func toggleUserAssignee(ctx context.Context, issue *Issue, assigneeID int64) (removed bool, err error) {
+ // Check if the user exists
+ assignee, err := user_model.GetUserByID(ctx, assigneeID)
+ if err != nil {
+ return false, err
+ }
+
+ // Check if the submitted user is already assigned, if yes delete him otherwise add him
+ found := false
+ i := 0
+ for ; i < len(issue.Assignees); i++ {
+ if issue.Assignees[i].ID == assigneeID {
+ found = true
+ break
+ }
+ }
+
+ assigneeIn := IssueAssignees{AssigneeID: assigneeID, IssueID: issue.ID}
+ if found {
+ issue.Assignees = append(issue.Assignees[:i], issue.Assignees[i+1:]...)
+ _, err = db.DeleteByBean(ctx, &assigneeIn)
+ if err != nil {
+ return found, err
+ }
+ } else {
+ issue.Assignees = append(issue.Assignees, assignee)
+ if err = db.Insert(ctx, &assigneeIn); err != nil {
+ return found, err
+ }
+ }
+
+ return found, nil
+}
+
+// MakeIDsFromAPIAssigneesToAdd returns an array with all assignee IDs
+func MakeIDsFromAPIAssigneesToAdd(ctx context.Context, oneAssignee string, multipleAssignees []string) (assigneeIDs []int64, err error) {
+ var requestAssignees []string
+
+ // Keeping the old assigning method for compatibility reasons
+ if oneAssignee != "" && !util.SliceContainsString(multipleAssignees, oneAssignee) {
+ requestAssignees = append(requestAssignees, oneAssignee)
+ }
+
+ // Prevent empty assignees
+ if len(multipleAssignees) > 0 && multipleAssignees[0] != "" {
+ requestAssignees = append(requestAssignees, multipleAssignees...)
+ }
+
+ // Get the IDs of all assignees
+ assigneeIDs, err = user_model.GetUserIDsByNames(ctx, requestAssignees, false)
+
+ return assigneeIDs, err
+}
diff --git a/models/issues/assignees_test.go b/models/issues/assignees_test.go
new file mode 100644
index 0000000..47fb81a
--- /dev/null
+++ b/models/issues/assignees_test.go
@@ -0,0 +1,95 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUpdateAssignee(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Fake issue with assignees
+ issue, err := issues_model.GetIssueByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ err = issue.LoadAttributes(db.DefaultContext)
+ require.NoError(t, err)
+
+ // Assign multiple users
+ user2, err := user_model.GetUserByID(db.DefaultContext, 2)
+ require.NoError(t, err)
+ _, _, err = issues_model.ToggleIssueAssignee(db.DefaultContext, issue, &user_model.User{ID: 1}, user2.ID)
+ require.NoError(t, err)
+
+ org3, err := user_model.GetUserByID(db.DefaultContext, 3)
+ require.NoError(t, err)
+ _, _, err = issues_model.ToggleIssueAssignee(db.DefaultContext, issue, &user_model.User{ID: 1}, org3.ID)
+ require.NoError(t, err)
+
+ user1, err := user_model.GetUserByID(db.DefaultContext, 1) // This user is already assigned (see the definition in fixtures), so running UpdateAssignee should unassign him
+ require.NoError(t, err)
+ _, _, err = issues_model.ToggleIssueAssignee(db.DefaultContext, issue, &user_model.User{ID: 1}, user1.ID)
+ require.NoError(t, err)
+
+ // Check if he got removed
+ isAssigned, err := issues_model.IsUserAssignedToIssue(db.DefaultContext, issue, user1)
+ require.NoError(t, err)
+ assert.False(t, isAssigned)
+
+ // Check if they're all there
+ err = issue.LoadAssignees(db.DefaultContext)
+ require.NoError(t, err)
+
+ var expectedAssignees []*user_model.User
+ expectedAssignees = append(expectedAssignees, user2, org3)
+
+ for in, assignee := range issue.Assignees {
+ assert.Equal(t, assignee.ID, expectedAssignees[in].ID)
+ }
+
+ // Check if the user is assigned
+ isAssigned, err = issues_model.IsUserAssignedToIssue(db.DefaultContext, issue, user2)
+ require.NoError(t, err)
+ assert.True(t, isAssigned)
+
+ // This user should not be assigned
+ isAssigned, err = issues_model.IsUserAssignedToIssue(db.DefaultContext, issue, &user_model.User{ID: 4})
+ require.NoError(t, err)
+ assert.False(t, isAssigned)
+}
+
+func TestMakeIDsFromAPIAssigneesToAdd(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ _ = unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ _ = unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ IDs, err := issues_model.MakeIDsFromAPIAssigneesToAdd(db.DefaultContext, "", []string{""})
+ require.NoError(t, err)
+ assert.Equal(t, []int64{}, IDs)
+
+ _, err = issues_model.MakeIDsFromAPIAssigneesToAdd(db.DefaultContext, "", []string{"none_existing_user"})
+ require.Error(t, err)
+
+ IDs, err = issues_model.MakeIDsFromAPIAssigneesToAdd(db.DefaultContext, "user1", []string{"user1"})
+ require.NoError(t, err)
+ assert.Equal(t, []int64{1}, IDs)
+
+ IDs, err = issues_model.MakeIDsFromAPIAssigneesToAdd(db.DefaultContext, "user2", []string{""})
+ require.NoError(t, err)
+ assert.Equal(t, []int64{2}, IDs)
+
+ IDs, err = issues_model.MakeIDsFromAPIAssigneesToAdd(db.DefaultContext, "", []string{"user1", "user2"})
+ require.NoError(t, err)
+ assert.Equal(t, []int64{1, 2}, IDs)
+}
diff --git a/models/issues/comment.go b/models/issues/comment.go
new file mode 100644
index 0000000..d53e5f5
--- /dev/null
+++ b/models/issues/comment.go
@@ -0,0 +1,1333 @@
+// Copyright 2018 The Gitea Authors.
+// Copyright 2016 The Gogs Authors.
+// All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "strconv"
+ "unicode/utf8"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/organization"
+ project_model "code.gitea.io/gitea/models/project"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/references"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/translation"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrCommentNotExist represents a "CommentNotExist" kind of error.
+type ErrCommentNotExist struct {
+ ID int64
+ IssueID int64
+}
+
+// IsErrCommentNotExist checks if an error is a ErrCommentNotExist.
+func IsErrCommentNotExist(err error) bool {
+ _, ok := err.(ErrCommentNotExist)
+ return ok
+}
+
+func (err ErrCommentNotExist) Error() string {
+ return fmt.Sprintf("comment does not exist [id: %d, issue_id: %d]", err.ID, err.IssueID)
+}
+
+func (err ErrCommentNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+var ErrCommentAlreadyChanged = util.NewInvalidArgumentErrorf("the comment is already changed")
+
+// CommentType defines whether a comment is just a simple comment, an action (like close) or a reference.
+type CommentType int
+
+// CommentTypeUndefined is used to search for comments of any type
+const CommentTypeUndefined CommentType = -1
+
+const (
+ CommentTypeComment CommentType = iota // 0 Plain comment, can be associated with a commit (CommitID > 0) and a line (LineNum > 0)
+
+ CommentTypeReopen // 1
+ CommentTypeClose // 2
+
+ CommentTypeIssueRef // 3 References.
+ CommentTypeCommitRef // 4 Reference from a commit (not part of a pull request)
+ CommentTypeCommentRef // 5 Reference from a comment
+ CommentTypePullRef // 6 Reference from a pull request
+
+ CommentTypeLabel // 7 Labels changed
+ CommentTypeMilestone // 8 Milestone changed
+ CommentTypeAssignees // 9 Assignees changed
+ CommentTypeChangeTitle // 10 Change Title
+ CommentTypeDeleteBranch // 11 Delete Branch
+
+ CommentTypeStartTracking // 12 Start a stopwatch for time tracking
+ CommentTypeStopTracking // 13 Stop a stopwatch for time tracking
+ CommentTypeAddTimeManual // 14 Add time manual for time tracking
+ CommentTypeCancelTracking // 15 Cancel a stopwatch for time tracking
+ CommentTypeAddedDeadline // 16 Added a due date
+ CommentTypeModifiedDeadline // 17 Modified the due date
+ CommentTypeRemovedDeadline // 18 Removed a due date
+
+ CommentTypeAddDependency // 19 Dependency added
+ CommentTypeRemoveDependency // 20 Dependency removed
+
+ CommentTypeCode // 21 Comment a line of code
+ CommentTypeReview // 22 Reviews a pull request by giving general feedback
+
+ CommentTypeLock // 23 Lock an issue, giving only collaborators access
+ CommentTypeUnlock // 24 Unlocks a previously locked issue
+
+ CommentTypeChangeTargetBranch // 25 Change pull request's target branch
+
+ CommentTypeDeleteTimeManual // 26 Delete time manual for time tracking
+
+ CommentTypeReviewRequest // 27 add or remove Request from one
+ CommentTypeMergePull // 28 merge pull request
+ CommentTypePullRequestPush // 29 push to PR head branch
+
+ CommentTypeProject // 30 Project changed
+ CommentTypeProjectColumn // 31 Project column changed
+
+ CommentTypeDismissReview // 32 Dismiss Review
+
+ CommentTypeChangeIssueRef // 33 Change issue ref
+
+ CommentTypePRScheduledToAutoMerge // 34 pr was scheduled to auto merge when checks succeed
+ CommentTypePRUnScheduledToAutoMerge // 35 pr was un scheduled to auto merge when checks succeed
+
+ CommentTypePin // 36 pin Issue
+ CommentTypeUnpin // 37 unpin Issue
+)
+
+var commentStrings = []string{
+ "comment",
+ "reopen",
+ "close",
+ "issue_ref",
+ "commit_ref",
+ "comment_ref",
+ "pull_ref",
+ "label",
+ "milestone",
+ "assignees",
+ "change_title",
+ "delete_branch",
+ "start_tracking",
+ "stop_tracking",
+ "add_time_manual",
+ "cancel_tracking",
+ "added_deadline",
+ "modified_deadline",
+ "removed_deadline",
+ "add_dependency",
+ "remove_dependency",
+ "code",
+ "review",
+ "lock",
+ "unlock",
+ "change_target_branch",
+ "delete_time_manual",
+ "review_request",
+ "merge_pull",
+ "pull_push",
+ "project",
+ "project_board", // FIXME: the name should be project_column
+ "dismiss_review",
+ "change_issue_ref",
+ "pull_scheduled_merge",
+ "pull_cancel_scheduled_merge",
+ "pin",
+ "unpin",
+}
+
+func (t CommentType) String() string {
+ return commentStrings[t]
+}
+
+func AsCommentType(typeName string) CommentType {
+ for index, name := range commentStrings {
+ if typeName == name {
+ return CommentType(index)
+ }
+ }
+ return CommentTypeUndefined
+}
+
+func (t CommentType) HasContentSupport() bool {
+ switch t {
+ case CommentTypeComment, CommentTypeCode, CommentTypeReview, CommentTypeDismissReview:
+ return true
+ }
+ return false
+}
+
+func (t CommentType) HasAttachmentSupport() bool {
+ switch t {
+ case CommentTypeComment, CommentTypeCode, CommentTypeReview:
+ return true
+ }
+ return false
+}
+
+func (t CommentType) HasMailReplySupport() bool {
+ switch t {
+ case CommentTypeComment, CommentTypeCode, CommentTypeReview, CommentTypeDismissReview, CommentTypeReopen, CommentTypeClose, CommentTypeMergePull, CommentTypeAssignees:
+ return true
+ }
+ return false
+}
+
+// RoleInRepo presents the user's participation in the repo
+type RoleInRepo string
+
+// RoleDescriptor defines comment "role" tags
+type RoleDescriptor struct {
+ IsPoster bool
+ RoleInRepo RoleInRepo
+}
+
+// Enumerate all the role tags.
+const (
+ RoleRepoOwner RoleInRepo = "owner"
+ RoleRepoMember RoleInRepo = "member"
+ RoleRepoCollaborator RoleInRepo = "collaborator"
+ RoleRepoFirstTimeContributor RoleInRepo = "first_time_contributor"
+ RoleRepoContributor RoleInRepo = "contributor"
+)
+
+// LocaleString returns the locale string name of the role
+func (r RoleInRepo) LocaleString(lang translation.Locale) string {
+ return lang.TrString("repo.issues.role." + string(r))
+}
+
+// LocaleHelper returns the locale tooltip of the role
+func (r RoleInRepo) LocaleHelper(lang translation.Locale) string {
+ return lang.TrString("repo.issues.role." + string(r) + "_helper")
+}
+
+// Comment represents a comment in commit and issue page.
+type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type CommentType `xorm:"INDEX"`
+ PosterID int64 `xorm:"INDEX"`
+ Poster *user_model.User `xorm:"-"`
+ OriginalAuthor string
+ OriginalAuthorID int64
+ IssueID int64 `xorm:"INDEX"`
+ Issue *Issue `xorm:"-"`
+ LabelID int64
+ Label *Label `xorm:"-"`
+ AddedLabels []*Label `xorm:"-"`
+ RemovedLabels []*Label `xorm:"-"`
+ OldProjectID int64
+ ProjectID int64
+ OldProject *project_model.Project `xorm:"-"`
+ Project *project_model.Project `xorm:"-"`
+ OldMilestoneID int64
+ MilestoneID int64
+ OldMilestone *Milestone `xorm:"-"`
+ Milestone *Milestone `xorm:"-"`
+ TimeID int64
+ Time *TrackedTime `xorm:"-"`
+ AssigneeID int64
+ RemovedAssignee bool
+ Assignee *user_model.User `xorm:"-"`
+ AssigneeTeamID int64 `xorm:"NOT NULL DEFAULT 0"`
+ AssigneeTeam *organization.Team `xorm:"-"`
+ ResolveDoerID int64
+ ResolveDoer *user_model.User `xorm:"-"`
+ OldTitle string
+ NewTitle string
+ OldRef string
+ NewRef string
+ DependentIssueID int64 `xorm:"index"` // This is used by issue_service.deleteIssue
+ DependentIssue *Issue `xorm:"-"`
+
+ CommitID int64
+ Line int64 // - previous line / + proposed line
+ TreePath string
+ Content string `xorm:"LONGTEXT"`
+ ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
+ RenderedContent template.HTML `xorm:"-"`
+
+ // Path represents the 4 lines of code cemented by this comment
+ Patch string `xorm:"-"`
+ PatchQuoted string `xorm:"LONGTEXT patch"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+
+ // Reference issue in commit message
+ CommitSHA string `xorm:"VARCHAR(64)"`
+
+ Attachments []*repo_model.Attachment `xorm:"-"`
+ Reactions ReactionList `xorm:"-"`
+
+ // For view issue page.
+ ShowRole RoleDescriptor `xorm:"-"`
+
+ Review *Review `xorm:"-"`
+ ReviewID int64 `xorm:"index"`
+ Invalidated bool
+
+ // Reference an issue or pull from another comment, issue or PR
+ // All information is about the origin of the reference
+ RefRepoID int64 `xorm:"index"` // Repo where the referencing
+ RefIssueID int64 `xorm:"index"`
+ RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's)
+ RefAction references.XRefAction `xorm:"SMALLINT"` // What happens if RefIssueID resolves
+ RefIsPull bool
+
+ RefRepo *repo_model.Repository `xorm:"-"`
+ RefIssue *Issue `xorm:"-"`
+ RefComment *Comment `xorm:"-"`
+
+ Commits []*git_model.SignCommitWithStatuses `xorm:"-"`
+ OldCommit string `xorm:"-"`
+ NewCommit string `xorm:"-"`
+ CommitsNum int64 `xorm:"-"`
+ IsForcePush bool `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(Comment))
+}
+
+// PushActionContent is content of push pull comment
+type PushActionContent struct {
+ IsForcePush bool `json:"is_force_push"`
+ CommitIDs []string `json:"commit_ids"`
+}
+
+// LoadIssue loads the issue reference for the comment
+func (c *Comment) LoadIssue(ctx context.Context) (err error) {
+ if c.Issue != nil {
+ return nil
+ }
+ c.Issue, err = GetIssueByID(ctx, c.IssueID)
+ return err
+}
+
+// BeforeInsert will be invoked by XORM before inserting a record
+func (c *Comment) BeforeInsert() {
+ c.PatchQuoted = c.Patch
+ if !utf8.ValidString(c.Patch) {
+ c.PatchQuoted = strconv.Quote(c.Patch)
+ }
+}
+
+// BeforeUpdate will be invoked by XORM before updating a record
+func (c *Comment) BeforeUpdate() {
+ c.PatchQuoted = c.Patch
+ if !utf8.ValidString(c.Patch) {
+ c.PatchQuoted = strconv.Quote(c.Patch)
+ }
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (c *Comment) AfterLoad() {
+ c.Patch = c.PatchQuoted
+ if len(c.PatchQuoted) > 0 && c.PatchQuoted[0] == '"' {
+ unquoted, err := strconv.Unquote(c.PatchQuoted)
+ if err == nil {
+ c.Patch = unquoted
+ }
+ }
+}
+
+// LoadPoster loads comment poster
+func (c *Comment) LoadPoster(ctx context.Context) (err error) {
+ if c.Poster != nil {
+ return nil
+ }
+
+ c.Poster, err = user_model.GetPossibleUserByID(ctx, c.PosterID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ c.PosterID = user_model.GhostUserID
+ c.Poster = user_model.NewGhostUser()
+ } else {
+ log.Error("getUserByID[%d]: %v", c.ID, err)
+ }
+ }
+ return err
+}
+
+// AfterDelete is invoked from XORM after the object is deleted.
+func (c *Comment) AfterDelete(ctx context.Context) {
+ if c.ID <= 0 {
+ return
+ }
+
+ _, err := repo_model.DeleteAttachmentsByComment(ctx, c.ID, true)
+ if err != nil {
+ log.Info("Could not delete files for comment %d on issue #%d: %s", c.ID, c.IssueID, err)
+ }
+}
+
+// HTMLURL formats a URL-string to the issue-comment
+func (c *Comment) HTMLURL(ctx context.Context) string {
+ err := c.LoadIssue(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadIssue(%d): %v", c.IssueID, err)
+ return ""
+ }
+ err = c.Issue.LoadRepo(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
+ return ""
+ }
+ return c.Issue.HTMLURL() + c.hashLink(ctx)
+}
+
+// Link formats a relative URL-string to the issue-comment
+func (c *Comment) Link(ctx context.Context) string {
+ err := c.LoadIssue(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadIssue(%d): %v", c.IssueID, err)
+ return ""
+ }
+ err = c.Issue.LoadRepo(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
+ return ""
+ }
+ return c.Issue.Link() + c.hashLink(ctx)
+}
+
+func (c *Comment) hashLink(ctx context.Context) string {
+ if c.Type == CommentTypeCode {
+ if c.ReviewID == 0 {
+ return "/files#" + c.HashTag()
+ }
+ if c.Review == nil {
+ if err := c.LoadReview(ctx); err != nil {
+ log.Warn("LoadReview(%d): %v", c.ReviewID, err)
+ return "/files#" + c.HashTag()
+ }
+ }
+ if c.Review.Type <= ReviewTypePending {
+ return "/files#" + c.HashTag()
+ }
+ }
+ return "#" + c.HashTag()
+}
+
+// APIURL formats a API-string to the issue-comment
+func (c *Comment) APIURL(ctx context.Context) string {
+ err := c.LoadIssue(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadIssue(%d): %v", c.IssueID, err)
+ return ""
+ }
+ err = c.Issue.LoadRepo(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
+ return ""
+ }
+
+ return fmt.Sprintf("%s/issues/comments/%d", c.Issue.Repo.APIURL(), c.ID)
+}
+
+// IssueURL formats a URL-string to the issue
+func (c *Comment) IssueURL(ctx context.Context) string {
+ err := c.LoadIssue(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadIssue(%d): %v", c.IssueID, err)
+ return ""
+ }
+
+ if c.Issue.IsPull {
+ return ""
+ }
+
+ err = c.Issue.LoadRepo(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
+ return ""
+ }
+ return c.Issue.HTMLURL()
+}
+
+// PRURL formats a URL-string to the pull-request
+func (c *Comment) PRURL(ctx context.Context) string {
+ err := c.LoadIssue(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadIssue(%d): %v", c.IssueID, err)
+ return ""
+ }
+
+ err = c.Issue.LoadRepo(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
+ return ""
+ }
+
+ if !c.Issue.IsPull {
+ return ""
+ }
+ return c.Issue.HTMLURL()
+}
+
+// CommentHashTag returns unique hash tag for comment id.
+func CommentHashTag(id int64) string {
+ return fmt.Sprintf("issuecomment-%d", id)
+}
+
+// HashTag returns unique hash tag for comment.
+func (c *Comment) HashTag() string {
+ return CommentHashTag(c.ID)
+}
+
+// EventTag returns unique event hash tag for comment.
+func (c *Comment) EventTag() string {
+ return fmt.Sprintf("event-%d", c.ID)
+}
+
+// LoadLabel if comment.Type is CommentTypeLabel, then load Label
+func (c *Comment) LoadLabel(ctx context.Context) error {
+ var label Label
+ has, err := db.GetEngine(ctx).ID(c.LabelID).Get(&label)
+ if err != nil {
+ return err
+ } else if has {
+ c.Label = &label
+ } else {
+ // Ignore Label is deleted, but not clear this table
+ log.Warn("Commit %d cannot load label %d", c.ID, c.LabelID)
+ }
+
+ return nil
+}
+
+// LoadProject if comment.Type is CommentTypeProject, then load project.
+func (c *Comment) LoadProject(ctx context.Context) error {
+ if c.OldProjectID > 0 {
+ var oldProject project_model.Project
+ has, err := db.GetEngine(ctx).ID(c.OldProjectID).Get(&oldProject)
+ if err != nil {
+ return err
+ } else if has {
+ c.OldProject = &oldProject
+ }
+ }
+
+ if c.ProjectID > 0 {
+ var project project_model.Project
+ has, err := db.GetEngine(ctx).ID(c.ProjectID).Get(&project)
+ if err != nil {
+ return err
+ } else if has {
+ c.Project = &project
+ }
+ }
+
+ return nil
+}
+
+// LoadMilestone if comment.Type is CommentTypeMilestone, then load milestone
+func (c *Comment) LoadMilestone(ctx context.Context) error {
+ if c.OldMilestoneID > 0 {
+ var oldMilestone Milestone
+ has, err := db.GetEngine(ctx).ID(c.OldMilestoneID).Get(&oldMilestone)
+ if err != nil {
+ return err
+ } else if has {
+ c.OldMilestone = &oldMilestone
+ }
+ }
+
+ if c.MilestoneID > 0 {
+ var milestone Milestone
+ has, err := db.GetEngine(ctx).ID(c.MilestoneID).Get(&milestone)
+ if err != nil {
+ return err
+ } else if has {
+ c.Milestone = &milestone
+ }
+ }
+ return nil
+}
+
+// LoadAttachments loads attachments (it never returns error, the error during `GetAttachmentsByCommentIDCtx` is ignored)
+func (c *Comment) LoadAttachments(ctx context.Context) error {
+ if len(c.Attachments) > 0 {
+ return nil
+ }
+
+ var err error
+ c.Attachments, err = repo_model.GetAttachmentsByCommentID(ctx, c.ID)
+ if err != nil {
+ log.Error("getAttachmentsByCommentID[%d]: %v", c.ID, err)
+ }
+ return nil
+}
+
+// UpdateAttachments update attachments by UUIDs for the comment
+func (c *Comment) UpdateAttachments(ctx context.Context, uuids []string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, uuids)
+ if err != nil {
+ return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err)
+ }
+ for i := 0; i < len(attachments); i++ {
+ attachments[i].IssueID = c.IssueID
+ attachments[i].CommentID = c.ID
+ if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
+ return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
+ }
+ }
+ return committer.Commit()
+}
+
+// LoadAssigneeUserAndTeam if comment.Type is CommentTypeAssignees, then load assignees
+func (c *Comment) LoadAssigneeUserAndTeam(ctx context.Context) error {
+ var err error
+
+ if c.AssigneeID > 0 && c.Assignee == nil {
+ c.Assignee, err = user_model.GetUserByID(ctx, c.AssigneeID)
+ if err != nil {
+ if !user_model.IsErrUserNotExist(err) {
+ return err
+ }
+ c.Assignee = user_model.NewGhostUser()
+ }
+ } else if c.AssigneeTeamID > 0 && c.AssigneeTeam == nil {
+ if err = c.LoadIssue(ctx); err != nil {
+ return err
+ }
+
+ if err = c.Issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if err = c.Issue.Repo.LoadOwner(ctx); err != nil {
+ return err
+ }
+
+ if c.Issue.Repo.Owner.IsOrganization() {
+ c.AssigneeTeam, err = organization.GetTeamByID(ctx, c.AssigneeTeamID)
+ if err != nil && !organization.IsErrTeamNotExist(err) {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// LoadResolveDoer if comment.Type is CommentTypeCode and ResolveDoerID not zero, then load resolveDoer
+func (c *Comment) LoadResolveDoer(ctx context.Context) (err error) {
+ if c.ResolveDoerID == 0 || c.Type != CommentTypeCode {
+ return nil
+ }
+ c.ResolveDoer, err = user_model.GetUserByID(ctx, c.ResolveDoerID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ c.ResolveDoer = user_model.NewGhostUser()
+ err = nil
+ }
+ }
+ return err
+}
+
+// IsResolved check if an code comment is resolved
+func (c *Comment) IsResolved() bool {
+ return c.ResolveDoerID != 0 && c.Type == CommentTypeCode
+}
+
+// LoadDepIssueDetails loads Dependent Issue Details
+func (c *Comment) LoadDepIssueDetails(ctx context.Context) (err error) {
+ if c.DependentIssueID <= 0 || c.DependentIssue != nil {
+ return nil
+ }
+ c.DependentIssue, err = GetIssueByID(ctx, c.DependentIssueID)
+ return err
+}
+
+// LoadTime loads the associated time for a CommentTypeAddTimeManual
+func (c *Comment) LoadTime(ctx context.Context) error {
+ if c.Time != nil || c.TimeID == 0 {
+ return nil
+ }
+ var err error
+ c.Time, err = GetTrackedTimeByID(ctx, c.TimeID)
+ return err
+}
+
+// LoadReactions loads comment reactions
+func (c *Comment) LoadReactions(ctx context.Context, repo *repo_model.Repository) (err error) {
+ if c.Reactions != nil {
+ return nil
+ }
+ c.Reactions, _, err = FindReactions(ctx, FindReactionsOptions{
+ IssueID: c.IssueID,
+ CommentID: c.ID,
+ })
+ if err != nil {
+ return err
+ }
+ // Load reaction user data
+ if _, err := c.Reactions.LoadUsers(ctx, repo); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Comment) loadReview(ctx context.Context) (err error) {
+ if c.ReviewID == 0 {
+ return nil
+ }
+ if c.Review == nil {
+ if c.Review, err = GetReviewByID(ctx, c.ReviewID); err != nil {
+ // review request which has been replaced by actual reviews doesn't exist in database anymore, so ignorem them.
+ if c.Type == CommentTypeReviewRequest {
+ return nil
+ }
+ return err
+ }
+ }
+ c.Review.Issue = c.Issue
+ return nil
+}
+
+// LoadReview loads the associated review
+func (c *Comment) LoadReview(ctx context.Context) error {
+ return c.loadReview(ctx)
+}
+
+// DiffSide returns "previous" if Comment.Line is a LOC of the previous changes and "proposed" if it is a LOC of the proposed changes.
+func (c *Comment) DiffSide() string {
+ if c.Line < 0 {
+ return "previous"
+ }
+ return "proposed"
+}
+
+// UnsignedLine returns the LOC of the code comment without + or -
+func (c *Comment) UnsignedLine() uint64 {
+ if c.Line < 0 {
+ return uint64(c.Line * -1)
+ }
+ return uint64(c.Line)
+}
+
+// CodeCommentLink returns the url to a comment in code
+func (c *Comment) CodeCommentLink(ctx context.Context) string {
+ err := c.LoadIssue(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadIssue(%d): %v", c.IssueID, err)
+ return ""
+ }
+ err = c.Issue.LoadRepo(ctx)
+ if err != nil { // Silently dropping errors :unamused:
+ log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
+ return ""
+ }
+ return fmt.Sprintf("%s/files#%s", c.Issue.Link(), c.HashTag())
+}
+
+// LoadPushCommits Load push commits
+func (c *Comment) LoadPushCommits(ctx context.Context) (err error) {
+ if c.Content == "" || c.Commits != nil || c.Type != CommentTypePullRequestPush {
+ return nil
+ }
+
+ var data PushActionContent
+
+ err = json.Unmarshal([]byte(c.Content), &data)
+ if err != nil {
+ return err
+ }
+
+ c.IsForcePush = data.IsForcePush
+
+ if c.IsForcePush {
+ if len(data.CommitIDs) != 2 {
+ return nil
+ }
+ c.OldCommit = data.CommitIDs[0]
+ c.NewCommit = data.CommitIDs[1]
+ } else {
+ gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, c.Issue.Repo)
+ if err != nil {
+ return err
+ }
+ defer closer.Close()
+
+ c.Commits = git_model.ConvertFromGitCommit(ctx, gitRepo.GetCommitsFromIDs(data.CommitIDs), c.Issue.Repo)
+ c.CommitsNum = int64(len(c.Commits))
+ }
+
+ return err
+}
+
+// CreateComment creates comment with context
+func CreateComment(ctx context.Context, opts *CreateCommentOptions) (_ *Comment, err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ e := db.GetEngine(ctx)
+ var LabelID int64
+ if opts.Label != nil {
+ LabelID = opts.Label.ID
+ }
+
+ comment := &Comment{
+ Type: opts.Type,
+ PosterID: opts.Doer.ID,
+ Poster: opts.Doer,
+ IssueID: opts.Issue.ID,
+ LabelID: LabelID,
+ OldMilestoneID: opts.OldMilestoneID,
+ MilestoneID: opts.MilestoneID,
+ OldProjectID: opts.OldProjectID,
+ ProjectID: opts.ProjectID,
+ TimeID: opts.TimeID,
+ RemovedAssignee: opts.RemovedAssignee,
+ AssigneeID: opts.AssigneeID,
+ AssigneeTeamID: opts.AssigneeTeamID,
+ CommitID: opts.CommitID,
+ CommitSHA: opts.CommitSHA,
+ Line: opts.LineNum,
+ Content: opts.Content,
+ OldTitle: opts.OldTitle,
+ NewTitle: opts.NewTitle,
+ OldRef: opts.OldRef,
+ NewRef: opts.NewRef,
+ DependentIssueID: opts.DependentIssueID,
+ TreePath: opts.TreePath,
+ ReviewID: opts.ReviewID,
+ Patch: opts.Patch,
+ RefRepoID: opts.RefRepoID,
+ RefIssueID: opts.RefIssueID,
+ RefCommentID: opts.RefCommentID,
+ RefAction: opts.RefAction,
+ RefIsPull: opts.RefIsPull,
+ IsForcePush: opts.IsForcePush,
+ Invalidated: opts.Invalidated,
+ }
+ if opts.Issue.NoAutoTime {
+ // Preload the comment with the Issue containing the forced update
+ // date. This is needed to propagate those data in AddCrossReferences()
+ comment.Issue = opts.Issue
+ comment.CreatedUnix = opts.Issue.UpdatedUnix
+ comment.UpdatedUnix = opts.Issue.UpdatedUnix
+ e.NoAutoTime()
+ }
+ if _, err = e.Insert(comment); err != nil {
+ return nil, err
+ }
+
+ if err = opts.Repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ if err = updateCommentInfos(ctx, opts, comment); err != nil {
+ return nil, err
+ }
+
+ if err = comment.AddCrossReferences(ctx, opts.Doer, false); err != nil {
+ return nil, err
+ }
+ if err = committer.Commit(); err != nil {
+ return nil, err
+ }
+ return comment, nil
+}
+
+func updateCommentInfos(ctx context.Context, opts *CreateCommentOptions, comment *Comment) (err error) {
+ // Check comment type.
+ switch opts.Type {
+ case CommentTypeCode:
+ if err = updateAttachments(ctx, opts, comment); err != nil {
+ return err
+ }
+ if comment.ReviewID != 0 {
+ if comment.Review == nil {
+ if err := comment.loadReview(ctx); err != nil {
+ return err
+ }
+ }
+ if comment.Review.Type <= ReviewTypePending {
+ return nil
+ }
+ }
+ fallthrough
+ case CommentTypeComment:
+ if _, err = db.Exec(ctx, "UPDATE `issue` SET num_comments=num_comments+1 WHERE id=?", opts.Issue.ID); err != nil {
+ return err
+ }
+ fallthrough
+ case CommentTypeReview:
+ if err = updateAttachments(ctx, opts, comment); err != nil {
+ return err
+ }
+ case CommentTypeReopen, CommentTypeClose:
+ if err = repo_model.UpdateRepoIssueNumbers(ctx, opts.Issue.RepoID, opts.Issue.IsPull, true); err != nil {
+ return err
+ }
+ }
+ // update the issue's updated_unix column
+ return UpdateIssueCols(ctx, opts.Issue, "updated_unix")
+}
+
+func updateAttachments(ctx context.Context, opts *CreateCommentOptions, comment *Comment) error {
+ attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, opts.Attachments)
+ if err != nil {
+ return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", opts.Attachments, err)
+ }
+ for i := range attachments {
+ attachments[i].IssueID = opts.Issue.ID
+ attachments[i].CommentID = comment.ID
+ // No assign value could be 0, so ignore AllCols().
+ if _, err = db.GetEngine(ctx).ID(attachments[i].ID).Update(attachments[i]); err != nil {
+ return fmt.Errorf("update attachment [%d]: %w", attachments[i].ID, err)
+ }
+ }
+ comment.Attachments = attachments
+ return nil
+}
+
+func createDeadlineComment(ctx context.Context, doer *user_model.User, issue *Issue, newDeadlineUnix timeutil.TimeStamp) (*Comment, error) {
+ var content string
+ var commentType CommentType
+
+ // newDeadline = 0 means deleting
+ if newDeadlineUnix == 0 {
+ commentType = CommentTypeRemovedDeadline
+ content = issue.DeadlineUnix.FormatDate()
+ } else if issue.DeadlineUnix == 0 {
+ // Check if the new date was added or modified
+ // If the actual deadline is 0 => deadline added
+ commentType = CommentTypeAddedDeadline
+ content = newDeadlineUnix.FormatDate()
+ } else { // Otherwise modified
+ commentType = CommentTypeModifiedDeadline
+ content = newDeadlineUnix.FormatDate() + "|" + issue.DeadlineUnix.FormatDate()
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return nil, err
+ }
+
+ opts := &CreateCommentOptions{
+ Type: commentType,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ Content: content,
+ }
+ comment, err := CreateComment(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ return comment, nil
+}
+
+// Creates issue dependency comment
+func createIssueDependencyComment(ctx context.Context, doer *user_model.User, issue, dependentIssue *Issue, add bool) (err error) {
+ cType := CommentTypeAddDependency
+ if !add {
+ cType = CommentTypeRemoveDependency
+ }
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ // Make two comments, one in each issue
+ opts := &CreateCommentOptions{
+ Type: cType,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ DependentIssueID: dependentIssue.ID,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return err
+ }
+
+ opts = &CreateCommentOptions{
+ Type: cType,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: dependentIssue,
+ DependentIssueID: issue.ID,
+ }
+ _, err = CreateComment(ctx, opts)
+ return err
+}
+
+// CreateCommentOptions defines options for creating comment
+type CreateCommentOptions struct {
+ Type CommentType
+ Doer *user_model.User
+ Repo *repo_model.Repository
+ Issue *Issue
+ Label *Label
+
+ DependentIssueID int64
+ OldMilestoneID int64
+ MilestoneID int64
+ OldProjectID int64
+ ProjectID int64
+ TimeID int64
+ AssigneeID int64
+ AssigneeTeamID int64
+ RemovedAssignee bool
+ OldTitle string
+ NewTitle string
+ OldRef string
+ NewRef string
+ CommitID int64
+ CommitSHA string
+ Patch string
+ LineNum int64
+ TreePath string
+ ReviewID int64
+ Content string
+ Attachments []string // UUIDs of attachments
+ RefRepoID int64
+ RefIssueID int64
+ RefCommentID int64
+ RefAction references.XRefAction
+ RefIsPull bool
+ IsForcePush bool
+ Invalidated bool
+}
+
+// GetCommentByID returns the comment by given ID.
+func GetCommentByID(ctx context.Context, id int64) (*Comment, error) {
+ c := new(Comment)
+ has, err := db.GetEngine(ctx).ID(id).Get(c)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrCommentNotExist{id, 0}
+ }
+ return c, nil
+}
+
+// FindCommentsOptions describes the conditions to Find comments
+type FindCommentsOptions struct {
+ db.ListOptions
+ RepoID int64
+ IssueID int64
+ ReviewID int64
+ Since int64
+ Before int64
+ Line int64
+ TreePath string
+ Type CommentType
+ IssueIDs []int64
+ Invalidated optional.Option[bool]
+ IsPull optional.Option[bool]
+}
+
+// ToConds implements FindOptions interface
+func (opts FindCommentsOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"issue.repo_id": opts.RepoID})
+ }
+ if opts.IssueID > 0 {
+ cond = cond.And(builder.Eq{"comment.issue_id": opts.IssueID})
+ } else if len(opts.IssueIDs) > 0 {
+ cond = cond.And(builder.In("comment.issue_id", opts.IssueIDs))
+ }
+ if opts.ReviewID > 0 {
+ cond = cond.And(builder.Eq{"comment.review_id": opts.ReviewID})
+ }
+ if opts.Since > 0 {
+ cond = cond.And(builder.Gte{"comment.updated_unix": opts.Since})
+ }
+ if opts.Before > 0 {
+ cond = cond.And(builder.Lte{"comment.updated_unix": opts.Before})
+ }
+ if opts.Type != CommentTypeUndefined {
+ cond = cond.And(builder.Eq{"comment.type": opts.Type})
+ }
+ if opts.Line != 0 {
+ cond = cond.And(builder.Eq{"comment.line": opts.Line})
+ }
+ if len(opts.TreePath) > 0 {
+ cond = cond.And(builder.Eq{"comment.tree_path": opts.TreePath})
+ }
+ if opts.Invalidated.Has() {
+ cond = cond.And(builder.Eq{"comment.invalidated": opts.Invalidated.Value()})
+ }
+ if opts.IsPull.Has() {
+ cond = cond.And(builder.Eq{"issue.is_pull": opts.IsPull.Value()})
+ }
+ return cond
+}
+
+// FindComments returns all comments according options
+func FindComments(ctx context.Context, opts *FindCommentsOptions) (CommentList, error) {
+ comments := make([]*Comment, 0, 10)
+ sess := db.GetEngine(ctx).Where(opts.ToConds())
+ if opts.RepoID > 0 || opts.IsPull.Has() {
+ sess.Join("INNER", "issue", "issue.id = comment.issue_id")
+ }
+
+ if opts.Page != 0 {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+
+ // WARNING: If you change this order you will need to fix createCodeComment
+
+ return comments, sess.
+ Asc("comment.created_unix").
+ Asc("comment.id").
+ Find(&comments)
+}
+
+// CountComments count all comments according options by ignoring pagination
+func CountComments(ctx context.Context, opts *FindCommentsOptions) (int64, error) {
+ sess := db.GetEngine(ctx).Where(opts.ToConds())
+ if opts.RepoID > 0 {
+ sess.Join("INNER", "issue", "issue.id = comment.issue_id")
+ }
+ return sess.Count(&Comment{})
+}
+
+// UpdateCommentInvalidate updates comment invalidated column
+func UpdateCommentInvalidate(ctx context.Context, c *Comment) error {
+ _, err := db.GetEngine(ctx).ID(c.ID).Cols("invalidated").Update(c)
+ return err
+}
+
+// UpdateComment updates information of comment.
+func UpdateComment(ctx context.Context, c *Comment, contentVersion int, doer *user_model.User) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := c.LoadIssue(ctx); err != nil {
+ return err
+ }
+
+ sess := db.GetEngine(ctx).ID(c.ID).AllCols()
+ if c.Issue.NoAutoTime {
+ // update the DataBase
+ sess = sess.NoAutoTime().SetExpr("updated_unix", c.Issue.UpdatedUnix)
+ // the UpdatedUnix value of the Comment also has to be set,
+ // to return the adequate value
+ // see https://codeberg.org/forgejo/forgejo/pulls/764#issuecomment-1023801
+ c.UpdatedUnix = c.Issue.UpdatedUnix
+ }
+ c.ContentVersion = contentVersion + 1
+
+ affected, err := sess.Where("content_version = ?", contentVersion).Update(c)
+ if err != nil {
+ return err
+ }
+ if affected == 0 {
+ return ErrCommentAlreadyChanged
+ }
+ if err := c.AddCrossReferences(ctx, doer, true); err != nil {
+ return err
+ }
+ if err := committer.Commit(); err != nil {
+ return fmt.Errorf("Commit: %w", err)
+ }
+
+ return nil
+}
+
+// DeleteComment deletes the comment
+func DeleteComment(ctx context.Context, comment *Comment) error {
+ e := db.GetEngine(ctx)
+ if _, err := e.ID(comment.ID).NoAutoCondition().Delete(comment); err != nil {
+ return err
+ }
+
+ if _, err := db.DeleteByBean(ctx, &ContentHistory{
+ CommentID: comment.ID,
+ }); err != nil {
+ return err
+ }
+
+ if comment.Type == CommentTypeComment {
+ if _, err := e.ID(comment.IssueID).Decr("num_comments").Update(new(Issue)); err != nil {
+ return err
+ }
+ }
+ if _, err := e.Table("action").
+ Where("comment_id = ?", comment.ID).
+ Update(map[string]any{
+ "is_deleted": true,
+ }); err != nil {
+ return err
+ }
+
+ if err := comment.neuterCrossReferences(ctx); err != nil {
+ return err
+ }
+
+ return DeleteReaction(ctx, &ReactionOptions{CommentID: comment.ID})
+}
+
+// UpdateCommentsMigrationsByType updates comments' migrations information via given git service type and original id and poster id
+func UpdateCommentsMigrationsByType(ctx context.Context, tp structs.GitServiceType, originalAuthorID string, posterID int64) error {
+ _, err := db.GetEngine(ctx).Table("comment").
+ Join("INNER", "issue", "issue.id = comment.issue_id").
+ Join("INNER", "repository", "issue.repo_id = repository.id").
+ Where("repository.original_service_type = ?", tp).
+ And("comment.original_author_id = ?", originalAuthorID).
+ Update(map[string]any{
+ "poster_id": posterID,
+ "original_author": "",
+ "original_author_id": 0,
+ })
+ return err
+}
+
+// CreateAutoMergeComment is a internal function, only use it for CommentTypePRScheduledToAutoMerge and CommentTypePRUnScheduledToAutoMerge CommentTypes
+func CreateAutoMergeComment(ctx context.Context, typ CommentType, pr *PullRequest, doer *user_model.User) (comment *Comment, err error) {
+ if typ != CommentTypePRScheduledToAutoMerge && typ != CommentTypePRUnScheduledToAutoMerge {
+ return nil, fmt.Errorf("comment type %d cannot be used to create an auto merge comment", typ)
+ }
+ if err = pr.LoadIssue(ctx); err != nil {
+ return nil, err
+ }
+
+ if err = pr.LoadBaseRepo(ctx); err != nil {
+ return nil, err
+ }
+
+ comment, err = CreateComment(ctx, &CreateCommentOptions{
+ Type: typ,
+ Doer: doer,
+ Repo: pr.BaseRepo,
+ Issue: pr.Issue,
+ })
+ return comment, err
+}
+
+// RemapExternalUser ExternalUserRemappable interface
+func (c *Comment) RemapExternalUser(externalName string, externalID, userID int64) error {
+ c.OriginalAuthor = externalName
+ c.OriginalAuthorID = externalID
+ c.PosterID = userID
+ return nil
+}
+
+// GetUserID ExternalUserRemappable interface
+func (c *Comment) GetUserID() int64 { return c.PosterID }
+
+// GetExternalName ExternalUserRemappable interface
+func (c *Comment) GetExternalName() string { return c.OriginalAuthor }
+
+// GetExternalID ExternalUserRemappable interface
+func (c *Comment) GetExternalID() int64 { return c.OriginalAuthorID }
+
+// CountCommentTypeLabelWithEmptyLabel count label comments with empty label
+func CountCommentTypeLabelWithEmptyLabel(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.Eq{"type": CommentTypeLabel, "label_id": 0}).Count(new(Comment))
+}
+
+// FixCommentTypeLabelWithEmptyLabel count label comments with empty label
+func FixCommentTypeLabelWithEmptyLabel(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.Eq{"type": CommentTypeLabel, "label_id": 0}).Delete(new(Comment))
+}
+
+// CountCommentTypeLabelWithOutsideLabels count label comments with outside label
+func CountCommentTypeLabelWithOutsideLabels(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("comment.type = ? AND ((label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id))", CommentTypeLabel).
+ Table("comment").
+ Join("inner", "label", "label.id = comment.label_id").
+ Join("inner", "issue", "issue.id = comment.issue_id ").
+ Join("inner", "repository", "issue.repo_id = repository.id").
+ Count()
+}
+
+// FixCommentTypeLabelWithOutsideLabels count label comments with outside label
+func FixCommentTypeLabelWithOutsideLabels(ctx context.Context) (int64, error) {
+ res, err := db.GetEngine(ctx).Exec(`DELETE FROM comment WHERE comment.id IN (
+ SELECT il_too.id FROM (
+ SELECT com.id
+ FROM comment AS com
+ INNER JOIN label ON com.label_id = label.id
+ INNER JOIN issue on issue.id = com.issue_id
+ INNER JOIN repository ON issue.repo_id = repository.id
+ WHERE
+ com.type = ? AND ((label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id))
+ ) AS il_too)`, CommentTypeLabel)
+ if err != nil {
+ return 0, err
+ }
+
+ return res.RowsAffected()
+}
+
+// HasOriginalAuthor returns if a comment was migrated and has an original author.
+func (c *Comment) HasOriginalAuthor() bool {
+ return c.OriginalAuthor != "" && c.OriginalAuthorID != 0
+}
+
+// InsertIssueComments inserts many comments of issues.
+func InsertIssueComments(ctx context.Context, comments []*Comment) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ issueIDs := container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.IssueID, true
+ })
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ for _, comment := range comments {
+ if _, err := db.GetEngine(ctx).NoAutoTime().Insert(comment); err != nil {
+ return err
+ }
+
+ for _, reaction := range comment.Reactions {
+ reaction.IssueID = comment.IssueID
+ reaction.CommentID = comment.ID
+ }
+ if len(comment.Reactions) > 0 {
+ if err := db.Insert(ctx, comment.Reactions); err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, issueID := range issueIDs {
+ if _, err := db.Exec(ctx, "UPDATE issue set num_comments = (SELECT count(*) FROM comment WHERE issue_id = ? AND `type`=?) WHERE id = ?",
+ issueID, CommentTypeComment, issueID); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
diff --git a/models/issues/comment_code.go b/models/issues/comment_code.go
new file mode 100644
index 0000000..2f6f57e
--- /dev/null
+++ b/models/issues/comment_code.go
@@ -0,0 +1,181 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/markdown"
+
+ "xorm.io/builder"
+)
+
+// CodeConversation contains the comment of a given review
+type CodeConversation []*Comment
+
+// CodeConversationsAtLine contains the conversations for a given line
+type CodeConversationsAtLine map[int64][]CodeConversation
+
+// CodeConversationsAtLineAndTreePath contains the conversations for a given TreePath and line
+type CodeConversationsAtLineAndTreePath map[string]CodeConversationsAtLine
+
+func newCodeConversationsAtLineAndTreePath(comments []*Comment) CodeConversationsAtLineAndTreePath {
+ tree := make(CodeConversationsAtLineAndTreePath)
+ for _, comment := range comments {
+ tree.insertComment(comment)
+ }
+ return tree
+}
+
+func (tree CodeConversationsAtLineAndTreePath) insertComment(comment *Comment) {
+ // attempt to append comment to existing conversations (i.e. list of comments belonging to the same review)
+ for i, conversation := range tree[comment.TreePath][comment.Line] {
+ if conversation[0].ReviewID == comment.ReviewID {
+ tree[comment.TreePath][comment.Line][i] = append(conversation, comment)
+ return
+ }
+ }
+
+ // no previous conversation was found at this line, create it
+ if tree[comment.TreePath] == nil {
+ tree[comment.TreePath] = make(map[int64][]CodeConversation)
+ }
+
+ tree[comment.TreePath][comment.Line] = append(tree[comment.TreePath][comment.Line], CodeConversation{comment})
+}
+
+// FetchCodeConversations will return a 2d-map: ["Path"]["Line"] = List of CodeConversation (one per review) for this line
+func FetchCodeConversations(ctx context.Context, issue *Issue, doer *user_model.User, showOutdatedComments bool) (CodeConversationsAtLineAndTreePath, error) {
+ opts := FindCommentsOptions{
+ Type: CommentTypeCode,
+ IssueID: issue.ID,
+ }
+ comments, err := findCodeComments(ctx, opts, issue, doer, nil, showOutdatedComments)
+ if err != nil {
+ return nil, err
+ }
+
+ return newCodeConversationsAtLineAndTreePath(comments), nil
+}
+
+// CodeComments represents comments on code by using this structure: FILENAME -> LINE (+ == proposed; - == previous) -> COMMENTS
+type CodeComments map[string]map[int64][]*Comment
+
+func fetchCodeCommentsByReview(ctx context.Context, issue *Issue, doer *user_model.User, review *Review, showOutdatedComments bool) (CodeComments, error) {
+ pathToLineToComment := make(CodeComments)
+ if review == nil {
+ review = &Review{ID: 0}
+ }
+ opts := FindCommentsOptions{
+ Type: CommentTypeCode,
+ IssueID: issue.ID,
+ ReviewID: review.ID,
+ }
+
+ comments, err := findCodeComments(ctx, opts, issue, doer, review, showOutdatedComments)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, comment := range comments {
+ if pathToLineToComment[comment.TreePath] == nil {
+ pathToLineToComment[comment.TreePath] = make(map[int64][]*Comment)
+ }
+ pathToLineToComment[comment.TreePath][comment.Line] = append(pathToLineToComment[comment.TreePath][comment.Line], comment)
+ }
+ return pathToLineToComment, nil
+}
+
+func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issue, doer *user_model.User, review *Review, showOutdatedComments bool) (CommentList, error) {
+ var comments CommentList
+ if review == nil {
+ review = &Review{ID: 0}
+ }
+ conds := opts.ToConds()
+
+ if !showOutdatedComments && review.ID == 0 {
+ conds = conds.And(builder.Eq{"invalidated": false})
+ }
+
+ e := db.GetEngine(ctx)
+ if err := e.Where(conds).
+ Asc("comment.created_unix").
+ Asc("comment.id").
+ Find(&comments); err != nil {
+ return nil, err
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return nil, err
+ }
+
+ if err := comments.LoadPosters(ctx); err != nil {
+ return nil, err
+ }
+
+ if err := comments.LoadAttachments(ctx); err != nil {
+ return nil, err
+ }
+
+ // Find all reviews by ReviewID
+ reviews := make(map[int64]*Review)
+ ids := make([]int64, 0, len(comments))
+ for _, comment := range comments {
+ if comment.ReviewID != 0 {
+ ids = append(ids, comment.ReviewID)
+ }
+ }
+ if err := e.In("id", ids).Find(&reviews); err != nil {
+ return nil, err
+ }
+
+ n := 0
+ for _, comment := range comments {
+ if re, ok := reviews[comment.ReviewID]; ok && re != nil {
+ // If the review is pending only the author can see the comments (except if the review is set)
+ if review.ID == 0 && re.Type == ReviewTypePending &&
+ (doer == nil || doer.ID != re.ReviewerID) {
+ continue
+ }
+ comment.Review = re
+ }
+ comments[n] = comment
+ n++
+
+ if err := comment.LoadResolveDoer(ctx); err != nil {
+ return nil, err
+ }
+
+ if err := comment.LoadReactions(ctx, issue.Repo); err != nil {
+ return nil, err
+ }
+
+ var err error
+ if comment.RenderedContent, err = markdown.RenderString(&markup.RenderContext{
+ Ctx: ctx,
+ Links: markup.Links{
+ Base: issue.Repo.Link(),
+ },
+ Metas: issue.Repo.ComposeMetas(ctx),
+ }, comment.Content); err != nil {
+ return nil, err
+ }
+ }
+ return comments[:n], nil
+}
+
+// FetchCodeConversation fetches the code conversation of a given comment (same review, treePath and line number)
+func FetchCodeConversation(ctx context.Context, comment *Comment, doer *user_model.User) (CommentList, error) {
+ opts := FindCommentsOptions{
+ Type: CommentTypeCode,
+ IssueID: comment.IssueID,
+ ReviewID: comment.ReviewID,
+ TreePath: comment.TreePath,
+ Line: comment.Line,
+ }
+ return findCodeComments(ctx, opts, comment.Issue, doer, nil, true)
+}
diff --git a/models/issues/comment_list.go b/models/issues/comment_list.go
new file mode 100644
index 0000000..7a133d1
--- /dev/null
+++ b/models/issues/comment_list.go
@@ -0,0 +1,488 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+)
+
+// CommentList defines a list of comments
+type CommentList []*Comment
+
+// LoadPosters loads posters
+func (comments CommentList) LoadPosters(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ posterIDs := container.FilterSlice(comments, func(c *Comment) (int64, bool) {
+ return c.PosterID, c.Poster == nil && user_model.IsValidUserID(c.PosterID)
+ })
+
+ posterMaps, err := getPostersByIDs(ctx, posterIDs)
+ if err != nil {
+ return err
+ }
+
+ for _, comment := range comments {
+ if comment.Poster == nil {
+ comment.PosterID, comment.Poster = user_model.GetUserFromMap(comment.PosterID, posterMaps)
+ }
+ }
+ return nil
+}
+
+func (comments CommentList) getLabelIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.LabelID, comment.LabelID > 0
+ })
+}
+
+func (comments CommentList) loadLabels(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ labelIDs := comments.getLabelIDs()
+ commentLabels := make(map[int64]*Label, len(labelIDs))
+ left := len(labelIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", labelIDs[:limit]).
+ Rows(new(Label))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var label Label
+ err = rows.Scan(&label)
+ if err != nil {
+ _ = rows.Close()
+ return err
+ }
+ commentLabels[label.ID] = &label
+ }
+ _ = rows.Close()
+ left -= limit
+ labelIDs = labelIDs[limit:]
+ }
+
+ for _, comment := range comments {
+ comment.Label = commentLabels[comment.ID]
+ }
+ return nil
+}
+
+func (comments CommentList) getMilestoneIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.MilestoneID, comment.MilestoneID > 0
+ })
+}
+
+func (comments CommentList) loadMilestones(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ milestoneIDs := comments.getMilestoneIDs()
+ if len(milestoneIDs) == 0 {
+ return nil
+ }
+
+ milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
+ left := len(milestoneIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ err := db.GetEngine(ctx).
+ In("id", milestoneIDs[:limit]).
+ Find(&milestoneMaps)
+ if err != nil {
+ return err
+ }
+ left -= limit
+ milestoneIDs = milestoneIDs[limit:]
+ }
+
+ for _, issue := range comments {
+ issue.Milestone = milestoneMaps[issue.MilestoneID]
+ }
+ return nil
+}
+
+func (comments CommentList) getOldMilestoneIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.OldMilestoneID, comment.OldMilestoneID > 0
+ })
+}
+
+func (comments CommentList) loadOldMilestones(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ milestoneIDs := comments.getOldMilestoneIDs()
+ if len(milestoneIDs) == 0 {
+ return nil
+ }
+
+ milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
+ left := len(milestoneIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ err := db.GetEngine(ctx).
+ In("id", milestoneIDs[:limit]).
+ Find(&milestoneMaps)
+ if err != nil {
+ return err
+ }
+ left -= limit
+ milestoneIDs = milestoneIDs[limit:]
+ }
+
+ for _, issue := range comments {
+ issue.OldMilestone = milestoneMaps[issue.MilestoneID]
+ }
+ return nil
+}
+
+func (comments CommentList) getAssigneeIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.AssigneeID, user_model.IsValidUserID(comment.AssigneeID)
+ })
+}
+
+func (comments CommentList) loadAssignees(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ assigneeIDs := comments.getAssigneeIDs()
+ assignees := make(map[int64]*user_model.User, len(assigneeIDs))
+ left := len(assigneeIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", assigneeIDs[:limit]).
+ Rows(new(user_model.User))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var user user_model.User
+ err = rows.Scan(&user)
+ if err != nil {
+ rows.Close()
+ return err
+ }
+
+ assignees[user.ID] = &user
+ }
+ _ = rows.Close()
+
+ left -= limit
+ assigneeIDs = assigneeIDs[limit:]
+ }
+
+ for _, comment := range comments {
+ comment.AssigneeID, comment.Assignee = user_model.GetUserFromMap(comment.AssigneeID, assignees)
+ }
+ return nil
+}
+
+// getIssueIDs returns all the issue ids on this comment list which issue hasn't been loaded
+func (comments CommentList) getIssueIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.IssueID, comment.Issue == nil
+ })
+}
+
+// Issues returns all the issues of comments
+func (comments CommentList) Issues() IssueList {
+ issues := make(map[int64]*Issue, len(comments))
+ for _, comment := range comments {
+ if comment.Issue != nil {
+ if _, ok := issues[comment.Issue.ID]; !ok {
+ issues[comment.Issue.ID] = comment.Issue
+ }
+ }
+ }
+
+ issueList := make([]*Issue, 0, len(issues))
+ for _, issue := range issues {
+ issueList = append(issueList, issue)
+ }
+ return issueList
+}
+
+// LoadIssues loads issues of comments
+func (comments CommentList) LoadIssues(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ issueIDs := comments.getIssueIDs()
+ issues := make(map[int64]*Issue, len(issueIDs))
+ left := len(issueIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("id", issueIDs[:limit]).
+ Rows(new(Issue))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var issue Issue
+ err = rows.Scan(&issue)
+ if err != nil {
+ rows.Close()
+ return err
+ }
+
+ issues[issue.ID] = &issue
+ }
+ _ = rows.Close()
+
+ left -= limit
+ issueIDs = issueIDs[limit:]
+ }
+
+ for _, comment := range comments {
+ if comment.Issue == nil {
+ comment.Issue = issues[comment.IssueID]
+ }
+ }
+ return nil
+}
+
+func (comments CommentList) getDependentIssueIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ if comment.DependentIssue != nil {
+ return 0, false
+ }
+ return comment.DependentIssueID, comment.DependentIssueID > 0
+ })
+}
+
+func (comments CommentList) loadDependentIssues(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ e := db.GetEngine(ctx)
+ issueIDs := comments.getDependentIssueIDs()
+ issues := make(map[int64]*Issue, len(issueIDs))
+ left := len(issueIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := e.
+ In("id", issueIDs[:limit]).
+ Rows(new(Issue))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var issue Issue
+ err = rows.Scan(&issue)
+ if err != nil {
+ _ = rows.Close()
+ return err
+ }
+
+ issues[issue.ID] = &issue
+ }
+ _ = rows.Close()
+
+ left -= limit
+ issueIDs = issueIDs[limit:]
+ }
+
+ for _, comment := range comments {
+ if comment.DependentIssue == nil {
+ comment.DependentIssue = issues[comment.DependentIssueID]
+ if comment.DependentIssue != nil {
+ if err := comment.DependentIssue.LoadRepo(ctx); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// getAttachmentCommentIDs only return the comment ids which possibly has attachments
+func (comments CommentList) getAttachmentCommentIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.ID, comment.Type.HasAttachmentSupport()
+ })
+}
+
+// LoadAttachmentsByIssue loads attachments by issue id
+func (comments CommentList) LoadAttachmentsByIssue(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ attachments := make([]*repo_model.Attachment, 0, len(comments)/2)
+ if err := db.GetEngine(ctx).Where("issue_id=? AND comment_id>0", comments[0].IssueID).Find(&attachments); err != nil {
+ return err
+ }
+
+ commentAttachmentsMap := make(map[int64][]*repo_model.Attachment, len(comments))
+ for _, attach := range attachments {
+ commentAttachmentsMap[attach.CommentID] = append(commentAttachmentsMap[attach.CommentID], attach)
+ }
+
+ for _, comment := range comments {
+ comment.Attachments = commentAttachmentsMap[comment.ID]
+ }
+ return nil
+}
+
+// LoadAttachments loads attachments
+func (comments CommentList) LoadAttachments(ctx context.Context) (err error) {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ attachments := make(map[int64][]*repo_model.Attachment, len(comments))
+ commentsIDs := comments.getAttachmentCommentIDs()
+ left := len(commentsIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("comment_id", commentsIDs[:limit]).
+ Rows(new(repo_model.Attachment))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var attachment repo_model.Attachment
+ err = rows.Scan(&attachment)
+ if err != nil {
+ _ = rows.Close()
+ return err
+ }
+ attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment)
+ }
+
+ _ = rows.Close()
+ left -= limit
+ commentsIDs = commentsIDs[limit:]
+ }
+
+ for _, comment := range comments {
+ comment.Attachments = attachments[comment.ID]
+ }
+ return nil
+}
+
+func (comments CommentList) getReviewIDs() []int64 {
+ return container.FilterSlice(comments, func(comment *Comment) (int64, bool) {
+ return comment.ReviewID, comment.ReviewID > 0
+ })
+}
+
+func (comments CommentList) loadReviews(ctx context.Context) error {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ reviewIDs := comments.getReviewIDs()
+ reviews := make(map[int64]*Review, len(reviewIDs))
+ if err := db.GetEngine(ctx).In("id", reviewIDs).Find(&reviews); err != nil {
+ return err
+ }
+
+ for _, comment := range comments {
+ comment.Review = reviews[comment.ReviewID]
+ if comment.Review == nil {
+ // review request which has been replaced by actual reviews doesn't exist in database anymore, so don't log errors for them.
+ if comment.ReviewID > 0 && comment.Type != CommentTypeReviewRequest {
+ log.Error("comment with review id [%d] but has no review record", comment.ReviewID)
+ }
+ continue
+ }
+
+ // If the comment dismisses a review, we need to load the reviewer to show whose review has been dismissed.
+ // Otherwise, the reviewer is the poster of the comment, so we don't need to load it.
+ if comment.Type == CommentTypeDismissReview {
+ if err := comment.Review.LoadReviewer(ctx); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// LoadAttributes loads attributes of the comments, except for attachments and
+// comments
+func (comments CommentList) LoadAttributes(ctx context.Context) (err error) {
+ if err = comments.LoadPosters(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.loadLabels(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.loadMilestones(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.loadOldMilestones(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.loadAssignees(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.LoadAttachments(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.loadReviews(ctx); err != nil {
+ return err
+ }
+
+ if err = comments.LoadIssues(ctx); err != nil {
+ return err
+ }
+
+ return comments.loadDependentIssues(ctx)
+}
diff --git a/models/issues/comment_list_test.go b/models/issues/comment_list_test.go
new file mode 100644
index 0000000..5ad1cd1
--- /dev/null
+++ b/models/issues/comment_list_test.go
@@ -0,0 +1,86 @@
+// Copyright 2024 The Forgejo Authors
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCommentListLoadUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &Issue{})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+
+ for _, testCase := range []struct {
+ poster int64
+ assignee int64
+ user *user_model.User
+ }{
+ {
+ poster: user_model.ActionsUserID,
+ assignee: user_model.ActionsUserID,
+ user: user_model.NewActionsUser(),
+ },
+ {
+ poster: user_model.GhostUserID,
+ assignee: user_model.GhostUserID,
+ user: user_model.NewGhostUser(),
+ },
+ {
+ poster: doer.ID,
+ assignee: doer.ID,
+ user: doer,
+ },
+ {
+ poster: 0,
+ assignee: 0,
+ user: user_model.NewGhostUser(),
+ },
+ {
+ poster: -200,
+ assignee: -200,
+ user: user_model.NewGhostUser(),
+ },
+ {
+ poster: 200,
+ assignee: 200,
+ user: user_model.NewGhostUser(),
+ },
+ } {
+ t.Run(testCase.user.Name, func(t *testing.T) {
+ comment, err := CreateComment(db.DefaultContext, &CreateCommentOptions{
+ Type: CommentTypeComment,
+ Doer: testCase.user,
+ Repo: repo,
+ Issue: issue,
+ Content: "Hello",
+ })
+ assert.NoError(t, err)
+
+ list := CommentList{comment}
+
+ comment.PosterID = testCase.poster
+ comment.Poster = nil
+ assert.NoError(t, list.LoadPosters(db.DefaultContext))
+ require.NotNil(t, comment.Poster)
+ assert.Equal(t, testCase.user.ID, comment.Poster.ID)
+
+ comment.AssigneeID = testCase.assignee
+ comment.Assignee = nil
+ require.NoError(t, list.loadAssignees(db.DefaultContext))
+ require.NotNil(t, comment.Assignee)
+ assert.Equal(t, testCase.user.ID, comment.Assignee.ID)
+ })
+ }
+}
diff --git a/models/issues/comment_test.go b/models/issues/comment_test.go
new file mode 100644
index 0000000..f7088cc
--- /dev/null
+++ b/models/issues/comment_test.go
@@ -0,0 +1,127 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCreateComment(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+
+ now := time.Now().Unix()
+ comment, err := issues_model.CreateComment(db.DefaultContext, &issues_model.CreateCommentOptions{
+ Type: issues_model.CommentTypeComment,
+ Doer: doer,
+ Repo: repo,
+ Issue: issue,
+ Content: "Hello",
+ })
+ require.NoError(t, err)
+ then := time.Now().Unix()
+
+ assert.EqualValues(t, issues_model.CommentTypeComment, comment.Type)
+ assert.EqualValues(t, "Hello", comment.Content)
+ assert.EqualValues(t, issue.ID, comment.IssueID)
+ assert.EqualValues(t, doer.ID, comment.PosterID)
+ unittest.AssertInt64InRange(t, now, then, int64(comment.CreatedUnix))
+ unittest.AssertExistsAndLoadBean(t, comment) // assert actually added to DB
+
+ updatedIssue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: issue.ID})
+ unittest.AssertInt64InRange(t, now, then, int64(updatedIssue.UpdatedUnix))
+}
+
+func TestFetchCodeConversations(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ res, err := issues_model.FetchCodeConversations(db.DefaultContext, issue, user, false)
+ require.NoError(t, err)
+ assert.Contains(t, res, "README.md")
+ assert.Contains(t, res["README.md"], int64(4))
+ assert.Len(t, res["README.md"][4], 1)
+ assert.Equal(t, int64(4), res["README.md"][4][0][0].ID)
+
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ res, err = issues_model.FetchCodeConversations(db.DefaultContext, issue, user2, false)
+ require.NoError(t, err)
+ assert.Len(t, res, 1)
+}
+
+func TestAsCommentType(t *testing.T) {
+ assert.Equal(t, issues_model.CommentTypeComment, issues_model.CommentType(0))
+ assert.Equal(t, issues_model.CommentTypeUndefined, issues_model.AsCommentType(""))
+ assert.Equal(t, issues_model.CommentTypeUndefined, issues_model.AsCommentType("nonsense"))
+ assert.Equal(t, issues_model.CommentTypeComment, issues_model.AsCommentType("comment"))
+ assert.Equal(t, issues_model.CommentTypePRUnScheduledToAutoMerge, issues_model.AsCommentType("pull_cancel_scheduled_merge"))
+}
+
+func TestMigrate_InsertIssueComments(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+ _ = issue.LoadRepo(db.DefaultContext)
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: issue.Repo.OwnerID})
+ reaction := &issues_model.Reaction{
+ Type: "heart",
+ UserID: owner.ID,
+ }
+
+ comment := &issues_model.Comment{
+ PosterID: owner.ID,
+ Poster: owner,
+ IssueID: issue.ID,
+ Issue: issue,
+ Reactions: []*issues_model.Reaction{reaction},
+ }
+
+ err := issues_model.InsertIssueComments(db.DefaultContext, []*issues_model.Comment{comment})
+ require.NoError(t, err)
+
+ issueModified := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+ assert.EqualValues(t, issue.NumComments+1, issueModified.NumComments)
+
+ unittest.CheckConsistencyFor(t, &issues_model.Issue{})
+}
+
+func TestUpdateCommentsMigrationsByType(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID})
+ comment := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{ID: 1, IssueID: issue.ID})
+
+ // Set repository to migrated from Gitea.
+ repo.OriginalServiceType = structs.GiteaService
+ repo_model.UpdateRepositoryCols(db.DefaultContext, repo, "original_service_type")
+
+ // Set comment to have an original author.
+ comment.OriginalAuthor = "Example User"
+ comment.OriginalAuthorID = 1
+ comment.PosterID = 0
+ _, err := db.GetEngine(db.DefaultContext).ID(comment.ID).Cols("original_author", "original_author_id", "poster_id").Update(comment)
+ require.NoError(t, err)
+
+ require.NoError(t, issues_model.UpdateCommentsMigrationsByType(db.DefaultContext, structs.GiteaService, "1", 513))
+
+ comment = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{ID: 1, IssueID: issue.ID})
+ assert.Empty(t, comment.OriginalAuthor)
+ assert.Empty(t, comment.OriginalAuthorID)
+ assert.EqualValues(t, 513, comment.PosterID)
+}
diff --git a/models/issues/content_history.go b/models/issues/content_history.go
new file mode 100644
index 0000000..cd3e217
--- /dev/null
+++ b/models/issues/content_history.go
@@ -0,0 +1,242 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/avatars"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ContentHistory save issue/comment content history revisions.
+type ContentHistory struct {
+ ID int64 `xorm:"pk autoincr"`
+ PosterID int64
+ IssueID int64 `xorm:"INDEX"`
+ CommentID int64 `xorm:"INDEX"`
+ EditedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ ContentText string `xorm:"LONGTEXT"`
+ IsFirstCreated bool
+ IsDeleted bool
+}
+
+// TableName provides the real table name
+func (m *ContentHistory) TableName() string {
+ return "issue_content_history"
+}
+
+func init() {
+ db.RegisterModel(new(ContentHistory))
+}
+
+// SaveIssueContentHistory save history
+func SaveIssueContentHistory(ctx context.Context, posterID, issueID, commentID int64, editTime timeutil.TimeStamp, contentText string, isFirstCreated bool) error {
+ ch := &ContentHistory{
+ PosterID: posterID,
+ IssueID: issueID,
+ CommentID: commentID,
+ ContentText: contentText,
+ EditedUnix: editTime,
+ IsFirstCreated: isFirstCreated,
+ }
+ if err := db.Insert(ctx, ch); err != nil {
+ log.Error("can not save issue content history. err=%v", err)
+ return err
+ }
+ // We only keep at most 20 history revisions now. It is enough in most cases.
+ // If there is a special requirement to keep more, we can consider introducing a new setting option then, but not now.
+ KeepLimitedContentHistory(ctx, issueID, commentID, 20)
+ return nil
+}
+
+// KeepLimitedContentHistory keeps at most `limit` history revisions, it will hard delete out-dated revisions, sorting by revision interval
+// we can ignore all errors in this function, so we just log them
+func KeepLimitedContentHistory(ctx context.Context, issueID, commentID int64, limit int) {
+ type IDEditTime struct {
+ ID int64
+ EditedUnix timeutil.TimeStamp
+ }
+
+ var res []*IDEditTime
+ err := db.GetEngine(ctx).Select("id, edited_unix").Table("issue_content_history").
+ Where(builder.Eq{"issue_id": issueID, "comment_id": commentID}).
+ OrderBy("edited_unix ASC").
+ Find(&res)
+ if err != nil {
+ log.Error("can not query content history for deletion, err=%v", err)
+ return
+ }
+ if len(res) <= 2 {
+ return
+ }
+
+ outDatedCount := len(res) - limit
+ for outDatedCount > 0 {
+ var indexToDelete int
+ minEditedInterval := -1
+ // find a history revision with minimal edited interval to delete, the first and the last should never be deleted
+ for i := 1; i < len(res)-1; i++ {
+ editedInterval := int(res[i].EditedUnix - res[i-1].EditedUnix)
+ if minEditedInterval == -1 || editedInterval < minEditedInterval {
+ minEditedInterval = editedInterval
+ indexToDelete = i
+ }
+ }
+ if indexToDelete == 0 {
+ break
+ }
+
+ // hard delete the found one
+ _, err = db.GetEngine(ctx).Delete(&ContentHistory{ID: res[indexToDelete].ID})
+ if err != nil {
+ log.Error("can not delete out-dated content history, err=%v", err)
+ break
+ }
+ res = append(res[:indexToDelete], res[indexToDelete+1:]...)
+ outDatedCount--
+ }
+}
+
+// QueryIssueContentHistoryEditedCountMap query related history count of each comment (comment_id = 0 means the main issue)
+// only return the count map for "edited" (history revision count > 1) issues or comments.
+func QueryIssueContentHistoryEditedCountMap(dbCtx context.Context, issueID int64) (map[int64]int, error) {
+ type HistoryCountRecord struct {
+ CommentID int64
+ HistoryCount int
+ }
+ records := make([]*HistoryCountRecord, 0)
+
+ err := db.GetEngine(dbCtx).Select("comment_id, COUNT(1) as history_count").
+ Table("issue_content_history").
+ Where(builder.Eq{"issue_id": issueID}).
+ GroupBy("comment_id").
+ Having("count(1) > 1").
+ Find(&records)
+ if err != nil {
+ log.Error("can not query issue content history count map. err=%v", err)
+ return nil, err
+ }
+
+ res := map[int64]int{}
+ for _, r := range records {
+ res[r.CommentID] = r.HistoryCount
+ }
+ return res, nil
+}
+
+// IssueContentListItem the list for web ui
+type IssueContentListItem struct {
+ UserID int64
+ UserName string
+ UserFullName string
+ UserAvatarLink string
+
+ HistoryID int64
+ EditedUnix timeutil.TimeStamp
+ IsFirstCreated bool
+ IsDeleted bool
+}
+
+// FetchIssueContentHistoryList fetch list
+func FetchIssueContentHistoryList(dbCtx context.Context, issueID, commentID int64) ([]*IssueContentListItem, error) {
+ res := make([]*IssueContentListItem, 0)
+ err := db.GetEngine(dbCtx).Select("u.id as user_id, u.name as user_name, u.full_name as user_full_name,"+
+ "h.id as history_id, h.edited_unix, h.is_first_created, h.is_deleted").
+ Table([]string{"issue_content_history", "h"}).
+ Join("LEFT", []string{"user", "u"}, "h.poster_id = u.id").
+ Where(builder.Eq{"issue_id": issueID, "comment_id": commentID}).
+ OrderBy("edited_unix DESC").
+ Find(&res)
+ if err != nil {
+ log.Error("can not fetch issue content history list. err=%v", err)
+ return nil, err
+ }
+
+ for _, item := range res {
+ if item.UserID > 0 {
+ item.UserAvatarLink = avatars.GenerateUserAvatarFastLink(item.UserName, 0)
+ } else {
+ item.UserAvatarLink = avatars.DefaultAvatarLink()
+ }
+ }
+ return res, nil
+}
+
+// HasIssueContentHistory check if a ContentHistory entry exists
+func HasIssueContentHistory(dbCtx context.Context, issueID, commentID int64) (bool, error) {
+ return db.GetEngine(dbCtx).Where("issue_id = ? AND comment_id = ?", issueID, commentID).Exist(new(ContentHistory))
+}
+
+// SoftDeleteIssueContentHistory soft delete
+func SoftDeleteIssueContentHistory(dbCtx context.Context, historyID int64) error {
+ if _, err := db.GetEngine(dbCtx).ID(historyID).Cols("is_deleted", "content_text").Update(&ContentHistory{
+ IsDeleted: true,
+ ContentText: "",
+ }); err != nil {
+ log.Error("failed to soft delete issue content history. err=%v", err)
+ return err
+ }
+ return nil
+}
+
+// ErrIssueContentHistoryNotExist not exist error
+type ErrIssueContentHistoryNotExist struct {
+ ID int64
+}
+
+// Error error string
+func (err ErrIssueContentHistoryNotExist) Error() string {
+ return fmt.Sprintf("issue content history does not exist [id: %d]", err.ID)
+}
+
+func (err ErrIssueContentHistoryNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// GetIssueContentHistoryByID get issue content history
+func GetIssueContentHistoryByID(dbCtx context.Context, id int64) (*ContentHistory, error) {
+ h := &ContentHistory{}
+ has, err := db.GetEngine(dbCtx).ID(id).Get(h)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrIssueContentHistoryNotExist{id}
+ }
+ return h, nil
+}
+
+// GetIssueContentHistoryAndPrev get a history and the previous non-deleted history (to compare)
+func GetIssueContentHistoryAndPrev(dbCtx context.Context, issueID, id int64) (history, prevHistory *ContentHistory, err error) {
+ history = &ContentHistory{}
+ has, err := db.GetEngine(dbCtx).Where("id=? AND issue_id=?", id, issueID).Get(history)
+ if err != nil {
+ log.Error("failed to get issue content history %v. err=%v", id, err)
+ return nil, nil, err
+ } else if !has {
+ log.Error("issue content history does not exist. id=%v. err=%v", id, err)
+ return nil, nil, &ErrIssueContentHistoryNotExist{id}
+ }
+
+ prevHistory = &ContentHistory{}
+ has, err = db.GetEngine(dbCtx).Where(builder.Eq{"issue_id": history.IssueID, "comment_id": history.CommentID, "is_deleted": false}).
+ And(builder.Lt{"edited_unix": history.EditedUnix}).
+ OrderBy("edited_unix DESC").Limit(1).
+ Get(prevHistory)
+
+ if err != nil {
+ log.Error("failed to get issue content history %v. err=%v", id, err)
+ return nil, nil, err
+ } else if !has {
+ return history, nil, nil
+ }
+
+ return history, prevHistory, nil
+}
diff --git a/models/issues/content_history_test.go b/models/issues/content_history_test.go
new file mode 100644
index 0000000..dde6f19
--- /dev/null
+++ b/models/issues/content_history_test.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestContentHistory(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ dbCtx := db.DefaultContext
+ timeStampNow := timeutil.TimeStampNow()
+
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 0, timeStampNow, "i-a", true)
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 0, timeStampNow.Add(2), "i-b", false)
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 0, timeStampNow.Add(7), "i-c", false)
+
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 100, timeStampNow, "c-a", true)
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 100, timeStampNow.Add(5), "c-b", false)
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 100, timeStampNow.Add(20), "c-c", false)
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 100, timeStampNow.Add(50), "c-d", false)
+ _ = issues_model.SaveIssueContentHistory(dbCtx, 1, 10, 100, timeStampNow.Add(51), "c-e", false)
+
+ h1, _ := issues_model.GetIssueContentHistoryByID(dbCtx, 1)
+ assert.EqualValues(t, 1, h1.ID)
+
+ m, _ := issues_model.QueryIssueContentHistoryEditedCountMap(dbCtx, 10)
+ assert.Equal(t, 3, m[0])
+ assert.Equal(t, 5, m[100])
+
+ /*
+ we can not have this test with real `User` now, because we can not depend on `User` model (circle-import), so there is no `user` table
+ when the refactor of models are done, this test will be possible to be run then with a real `User` model.
+ */
+ type User struct {
+ ID int64
+ Name string
+ FullName string
+ }
+ _ = db.GetEngine(dbCtx).Sync(&User{})
+
+ list1, _ := issues_model.FetchIssueContentHistoryList(dbCtx, 10, 0)
+ assert.Len(t, list1, 3)
+ list2, _ := issues_model.FetchIssueContentHistoryList(dbCtx, 10, 100)
+ assert.Len(t, list2, 5)
+
+ hasHistory1, _ := issues_model.HasIssueContentHistory(dbCtx, 10, 0)
+ assert.True(t, hasHistory1)
+ hasHistory2, _ := issues_model.HasIssueContentHistory(dbCtx, 10, 1)
+ assert.False(t, hasHistory2)
+
+ h6, h6Prev, _ := issues_model.GetIssueContentHistoryAndPrev(dbCtx, 10, 6)
+ assert.EqualValues(t, 6, h6.ID)
+ assert.EqualValues(t, 5, h6Prev.ID)
+
+ // soft-delete
+ _ = issues_model.SoftDeleteIssueContentHistory(dbCtx, 5)
+ h6, h6Prev, _ = issues_model.GetIssueContentHistoryAndPrev(dbCtx, 10, 6)
+ assert.EqualValues(t, 6, h6.ID)
+ assert.EqualValues(t, 4, h6Prev.ID)
+
+ // only keep 3 history revisions for comment_id=100, the first and the last should never be deleted
+ issues_model.KeepLimitedContentHistory(dbCtx, 10, 100, 3)
+ list1, _ = issues_model.FetchIssueContentHistoryList(dbCtx, 10, 0)
+ assert.Len(t, list1, 3)
+ list2, _ = issues_model.FetchIssueContentHistoryList(dbCtx, 10, 100)
+ assert.Len(t, list2, 3)
+ assert.EqualValues(t, 8, list2[0].HistoryID)
+ assert.EqualValues(t, 7, list2[1].HistoryID)
+ assert.EqualValues(t, 4, list2[2].HistoryID)
+}
+
+func TestHasIssueContentHistory(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Ensures that comment_id is into taken account even if it's zero.
+ _ = issues_model.SaveIssueContentHistory(db.DefaultContext, 1, 11, 100, timeutil.TimeStampNow(), "c-a", true)
+ _ = issues_model.SaveIssueContentHistory(db.DefaultContext, 1, 11, 100, timeutil.TimeStampNow().Add(5), "c-b", false)
+
+ hasHistory1, _ := issues_model.HasIssueContentHistory(db.DefaultContext, 11, 0)
+ assert.False(t, hasHistory1)
+ hasHistory2, _ := issues_model.HasIssueContentHistory(db.DefaultContext, 11, 100)
+ assert.True(t, hasHistory2)
+}
diff --git a/models/issues/dependency.go b/models/issues/dependency.go
new file mode 100644
index 0000000..146dd18
--- /dev/null
+++ b/models/issues/dependency.go
@@ -0,0 +1,222 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrDependencyExists represents a "DependencyAlreadyExists" kind of error.
+type ErrDependencyExists struct {
+ IssueID int64
+ DependencyID int64
+}
+
+// IsErrDependencyExists checks if an error is a ErrDependencyExists.
+func IsErrDependencyExists(err error) bool {
+ _, ok := err.(ErrDependencyExists)
+ return ok
+}
+
+func (err ErrDependencyExists) Error() string {
+ return fmt.Sprintf("issue dependency does already exist [issue id: %d, dependency id: %d]", err.IssueID, err.DependencyID)
+}
+
+func (err ErrDependencyExists) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrDependencyNotExists represents a "DependencyAlreadyExists" kind of error.
+type ErrDependencyNotExists struct {
+ IssueID int64
+ DependencyID int64
+}
+
+// IsErrDependencyNotExists checks if an error is a ErrDependencyExists.
+func IsErrDependencyNotExists(err error) bool {
+ _, ok := err.(ErrDependencyNotExists)
+ return ok
+}
+
+func (err ErrDependencyNotExists) Error() string {
+ return fmt.Sprintf("issue dependency does not exist [issue id: %d, dependency id: %d]", err.IssueID, err.DependencyID)
+}
+
+func (err ErrDependencyNotExists) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrCircularDependency represents a "DependencyCircular" kind of error.
+type ErrCircularDependency struct {
+ IssueID int64
+ DependencyID int64
+}
+
+// IsErrCircularDependency checks if an error is a ErrCircularDependency.
+func IsErrCircularDependency(err error) bool {
+ _, ok := err.(ErrCircularDependency)
+ return ok
+}
+
+func (err ErrCircularDependency) Error() string {
+ return fmt.Sprintf("circular dependencies exists (two issues blocking each other) [issue id: %d, dependency id: %d]", err.IssueID, err.DependencyID)
+}
+
+// ErrDependenciesLeft represents an error where the issue you're trying to close still has dependencies left.
+type ErrDependenciesLeft struct {
+ IssueID int64
+}
+
+// IsErrDependenciesLeft checks if an error is a ErrDependenciesLeft.
+func IsErrDependenciesLeft(err error) bool {
+ _, ok := err.(ErrDependenciesLeft)
+ return ok
+}
+
+func (err ErrDependenciesLeft) Error() string {
+ return fmt.Sprintf("issue has open dependencies [issue id: %d]", err.IssueID)
+}
+
+// ErrUnknownDependencyType represents an error where an unknown dependency type was passed
+type ErrUnknownDependencyType struct {
+ Type DependencyType
+}
+
+// IsErrUnknownDependencyType checks if an error is ErrUnknownDependencyType
+func IsErrUnknownDependencyType(err error) bool {
+ _, ok := err.(ErrUnknownDependencyType)
+ return ok
+}
+
+func (err ErrUnknownDependencyType) Error() string {
+ return fmt.Sprintf("unknown dependency type [type: %d]", err.Type)
+}
+
+func (err ErrUnknownDependencyType) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// IssueDependency represents an issue dependency
+type IssueDependency struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"NOT NULL"`
+ IssueID int64 `xorm:"UNIQUE(issue_dependency) NOT NULL"`
+ DependencyID int64 `xorm:"UNIQUE(issue_dependency) NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(IssueDependency))
+}
+
+// DependencyType Defines Dependency Type Constants
+type DependencyType int
+
+// Define Dependency Types
+const (
+ DependencyTypeBlockedBy DependencyType = iota
+ DependencyTypeBlocking
+)
+
+// CreateIssueDependency creates a new dependency for an issue
+func CreateIssueDependency(ctx context.Context, user *user_model.User, issue, dep *Issue) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // Check if it already exists
+ exists, err := issueDepExists(ctx, issue.ID, dep.ID)
+ if err != nil {
+ return err
+ }
+ if exists {
+ return ErrDependencyExists{issue.ID, dep.ID}
+ }
+ // And if it would be circular
+ circular, err := issueDepExists(ctx, dep.ID, issue.ID)
+ if err != nil {
+ return err
+ }
+ if circular {
+ return ErrCircularDependency{issue.ID, dep.ID}
+ }
+
+ if err := db.Insert(ctx, &IssueDependency{
+ UserID: user.ID,
+ IssueID: issue.ID,
+ DependencyID: dep.ID,
+ }); err != nil {
+ return err
+ }
+
+ // Add comment referencing the new dependency
+ if err = createIssueDependencyComment(ctx, user, issue, dep, true); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// RemoveIssueDependency removes a dependency from an issue
+func RemoveIssueDependency(ctx context.Context, user *user_model.User, issue, dep *Issue, depType DependencyType) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ var issueDepToDelete IssueDependency
+
+ switch depType {
+ case DependencyTypeBlockedBy:
+ issueDepToDelete = IssueDependency{IssueID: issue.ID, DependencyID: dep.ID}
+ case DependencyTypeBlocking:
+ issueDepToDelete = IssueDependency{IssueID: dep.ID, DependencyID: issue.ID}
+ default:
+ return ErrUnknownDependencyType{depType}
+ }
+
+ affected, err := db.GetEngine(ctx).Delete(&issueDepToDelete)
+ if err != nil {
+ return err
+ }
+
+ // If we deleted nothing, the dependency did not exist
+ if affected <= 0 {
+ return ErrDependencyNotExists{issue.ID, dep.ID}
+ }
+
+ // Add comment referencing the removed dependency
+ if err = createIssueDependencyComment(ctx, user, issue, dep, false); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+// Check if the dependency already exists
+func issueDepExists(ctx context.Context, issueID, depID int64) (bool, error) {
+ return db.GetEngine(ctx).Where("(issue_id = ? AND dependency_id = ?)", issueID, depID).Exist(&IssueDependency{})
+}
+
+// IssueNoDependenciesLeft checks if issue can be closed
+func IssueNoDependenciesLeft(ctx context.Context, issue *Issue) (bool, error) {
+ exists, err := db.GetEngine(ctx).
+ Table("issue_dependency").
+ Select("issue.*").
+ Join("INNER", "issue", "issue.id = issue_dependency.dependency_id").
+ Where("issue_dependency.issue_id = ?", issue.ID).
+ And("issue.is_closed = ?", "0").
+ Exist(&Issue{})
+
+ return !exists, err
+}
diff --git a/models/issues/dependency_test.go b/models/issues/dependency_test.go
new file mode 100644
index 0000000..1e73c58
--- /dev/null
+++ b/models/issues/dependency_test.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCreateIssueDependency(t *testing.T) {
+ // Prepare
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1, err := user_model.GetUserByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ issue1, err := issues_model.GetIssueByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ issue2, err := issues_model.GetIssueByID(db.DefaultContext, 2)
+ require.NoError(t, err)
+
+ // Create a dependency and check if it was successful
+ err = issues_model.CreateIssueDependency(db.DefaultContext, user1, issue1, issue2)
+ require.NoError(t, err)
+
+ // Do it again to see if it will check if the dependency already exists
+ err = issues_model.CreateIssueDependency(db.DefaultContext, user1, issue1, issue2)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrDependencyExists(err))
+
+ // Check for circular dependencies
+ err = issues_model.CreateIssueDependency(db.DefaultContext, user1, issue2, issue1)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrCircularDependency(err))
+
+ _ = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{Type: issues_model.CommentTypeAddDependency, PosterID: user1.ID, IssueID: issue1.ID})
+
+ // Check if dependencies left is correct
+ left, err := issues_model.IssueNoDependenciesLeft(db.DefaultContext, issue1)
+ require.NoError(t, err)
+ assert.False(t, left)
+
+ // Close #2 and check again
+ _, err = issues_model.ChangeIssueStatus(db.DefaultContext, issue2, user1, true)
+ require.NoError(t, err)
+
+ left, err = issues_model.IssueNoDependenciesLeft(db.DefaultContext, issue1)
+ require.NoError(t, err)
+ assert.True(t, left)
+
+ // Test removing the dependency
+ err = issues_model.RemoveIssueDependency(db.DefaultContext, user1, issue1, issue2, issues_model.DependencyTypeBlockedBy)
+ require.NoError(t, err)
+}
diff --git a/models/issues/issue.go b/models/issues/issue.go
new file mode 100644
index 0000000..f7379b7
--- /dev/null
+++ b/models/issues/issue.go
@@ -0,0 +1,939 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "regexp"
+ "slices"
+
+ "code.gitea.io/gitea/models/db"
+ project_model "code.gitea.io/gitea/models/project"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrIssueNotExist represents a "IssueNotExist" kind of error.
+type ErrIssueNotExist struct {
+ ID int64
+ RepoID int64
+ Index int64
+}
+
+// IsErrIssueNotExist checks if an error is a ErrIssueNotExist.
+func IsErrIssueNotExist(err error) bool {
+ _, ok := err.(ErrIssueNotExist)
+ return ok
+}
+
+func (err ErrIssueNotExist) Error() string {
+ return fmt.Sprintf("issue does not exist [id: %d, repo_id: %d, index: %d]", err.ID, err.RepoID, err.Index)
+}
+
+func (err ErrIssueNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrIssueIsClosed represents a "IssueIsClosed" kind of error.
+type ErrIssueIsClosed struct {
+ ID int64
+ RepoID int64
+ Index int64
+}
+
+// IsErrIssueIsClosed checks if an error is a ErrIssueNotExist.
+func IsErrIssueIsClosed(err error) bool {
+ _, ok := err.(ErrIssueIsClosed)
+ return ok
+}
+
+func (err ErrIssueIsClosed) Error() string {
+ return fmt.Sprintf("issue is closed [id: %d, repo_id: %d, index: %d]", err.ID, err.RepoID, err.Index)
+}
+
+// ErrNewIssueInsert is used when the INSERT statement in newIssue fails
+type ErrNewIssueInsert struct {
+ OriginalError error
+}
+
+// IsErrNewIssueInsert checks if an error is a ErrNewIssueInsert.
+func IsErrNewIssueInsert(err error) bool {
+ _, ok := err.(ErrNewIssueInsert)
+ return ok
+}
+
+func (err ErrNewIssueInsert) Error() string {
+ return err.OriginalError.Error()
+}
+
+// ErrIssueWasClosed is used when close a closed issue
+type ErrIssueWasClosed struct {
+ ID int64
+ Index int64
+}
+
+// IsErrIssueWasClosed checks if an error is a ErrIssueWasClosed.
+func IsErrIssueWasClosed(err error) bool {
+ _, ok := err.(ErrIssueWasClosed)
+ return ok
+}
+
+func (err ErrIssueWasClosed) Error() string {
+ return fmt.Sprintf("Issue [%d] %d was already closed", err.ID, err.Index)
+}
+
+var ErrIssueAlreadyChanged = util.NewInvalidArgumentErrorf("the issue is already changed")
+
+// Issue represents an issue or pull request of repository.
+type Issue struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
+ Repo *repo_model.Repository `xorm:"-"`
+ Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
+ PosterID int64 `xorm:"INDEX"`
+ Poster *user_model.User `xorm:"-"`
+ OriginalAuthor string
+ OriginalAuthorID int64 `xorm:"index"`
+ Title string `xorm:"name"`
+ Content string `xorm:"LONGTEXT"`
+ RenderedContent template.HTML `xorm:"-"`
+ ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
+ Labels []*Label `xorm:"-"`
+ isLabelsLoaded bool `xorm:"-"`
+ MilestoneID int64 `xorm:"INDEX"`
+ Milestone *Milestone `xorm:"-"`
+ isMilestoneLoaded bool `xorm:"-"`
+ Project *project_model.Project `xorm:"-"`
+ Priority int
+ AssigneeID int64 `xorm:"-"`
+ Assignee *user_model.User `xorm:"-"`
+ isAssigneeLoaded bool `xorm:"-"`
+ IsClosed bool `xorm:"INDEX"`
+ IsRead bool `xorm:"-"`
+ IsPull bool `xorm:"INDEX"` // Indicates whether is a pull request or not.
+ PullRequest *PullRequest `xorm:"-"`
+ NumComments int
+ Ref string
+ PinOrder int `xorm:"DEFAULT 0"`
+
+ DeadlineUnix timeutil.TimeStamp `xorm:"INDEX"`
+
+ Created timeutil.TimeStampNano
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ ClosedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ NoAutoTime bool `xorm:"-"`
+
+ Attachments []*repo_model.Attachment `xorm:"-"`
+ isAttachmentsLoaded bool `xorm:"-"`
+ Comments CommentList `xorm:"-"`
+ Reactions ReactionList `xorm:"-"`
+ TotalTrackedTime int64 `xorm:"-"`
+ Assignees []*user_model.User `xorm:"-"`
+
+ // IsLocked limits commenting abilities to users on an issue
+ // with write access
+ IsLocked bool `xorm:"NOT NULL DEFAULT false"`
+
+ // For view issue page.
+ ShowRole RoleDescriptor `xorm:"-"`
+}
+
+var (
+ issueTasksPat = regexp.MustCompile(`(^|\n)\s*[-*]\s*\[[\sxX]\]`)
+ issueTasksDonePat = regexp.MustCompile(`(^|\n)\s*[-*]\s*\[[xX]\]`)
+)
+
+// IssueIndex represents the issue index table
+type IssueIndex db.ResourceIndex
+
+func init() {
+ db.RegisterModel(new(Issue))
+ db.RegisterModel(new(IssueIndex))
+}
+
+// LoadTotalTimes load total tracked time
+func (issue *Issue) LoadTotalTimes(ctx context.Context) (err error) {
+ opts := FindTrackedTimesOptions{IssueID: issue.ID}
+ issue.TotalTrackedTime, err = opts.toSession(db.GetEngine(ctx)).SumInt(&TrackedTime{}, "time")
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsOverdue checks if the issue is overdue
+func (issue *Issue) IsOverdue() bool {
+ if issue.IsClosed {
+ return issue.ClosedUnix >= issue.DeadlineUnix
+ }
+ return timeutil.TimeStampNow() >= issue.DeadlineUnix
+}
+
+// LoadRepo loads issue's repository
+func (issue *Issue) LoadRepo(ctx context.Context) (err error) {
+ if issue.Repo == nil && issue.RepoID != 0 {
+ issue.Repo, err = repo_model.GetRepositoryByID(ctx, issue.RepoID)
+ if err != nil {
+ return fmt.Errorf("getRepositoryByID [%d]: %w", issue.RepoID, err)
+ }
+ }
+ return nil
+}
+
+func (issue *Issue) LoadAttachments(ctx context.Context) (err error) {
+ if issue.isAttachmentsLoaded || issue.Attachments != nil {
+ return nil
+ }
+
+ issue.Attachments, err = repo_model.GetAttachmentsByIssueID(ctx, issue.ID)
+ if err != nil {
+ return fmt.Errorf("getAttachmentsByIssueID [%d]: %w", issue.ID, err)
+ }
+ issue.isAttachmentsLoaded = true
+ return nil
+}
+
+// IsTimetrackerEnabled returns true if the repo enables timetracking
+func (issue *Issue) IsTimetrackerEnabled(ctx context.Context) bool {
+ if err := issue.LoadRepo(ctx); err != nil {
+ log.Error(fmt.Sprintf("loadRepo: %v", err))
+ return false
+ }
+ return issue.Repo.IsTimetrackerEnabled(ctx)
+}
+
+// LoadPoster loads poster
+func (issue *Issue) LoadPoster(ctx context.Context) (err error) {
+ if issue.Poster == nil && issue.PosterID != 0 {
+ issue.Poster, err = user_model.GetPossibleUserByID(ctx, issue.PosterID)
+ if err != nil {
+ issue.PosterID = user_model.GhostUserID
+ issue.Poster = user_model.NewGhostUser()
+ if !user_model.IsErrUserNotExist(err) {
+ return fmt.Errorf("getUserByID.(poster) [%d]: %w", issue.PosterID, err)
+ }
+ return nil
+ }
+ }
+ return err
+}
+
+// LoadPullRequest loads pull request info
+func (issue *Issue) LoadPullRequest(ctx context.Context) (err error) {
+ if issue.IsPull {
+ if issue.PullRequest == nil && issue.ID != 0 {
+ issue.PullRequest, err = GetPullRequestByIssueID(ctx, issue.ID)
+ if err != nil {
+ if IsErrPullRequestNotExist(err) {
+ return err
+ }
+ return fmt.Errorf("getPullRequestByIssueID [%d]: %w", issue.ID, err)
+ }
+ }
+ if issue.PullRequest != nil {
+ issue.PullRequest.Issue = issue
+ }
+ }
+ return nil
+}
+
+func (issue *Issue) loadComments(ctx context.Context) (err error) {
+ return issue.loadCommentsByType(ctx, CommentTypeUndefined)
+}
+
+// LoadDiscussComments loads discuss comments
+func (issue *Issue) LoadDiscussComments(ctx context.Context) error {
+ return issue.loadCommentsByType(ctx, CommentTypeComment)
+}
+
+func (issue *Issue) loadCommentsByType(ctx context.Context, tp CommentType) (err error) {
+ if issue.Comments != nil {
+ return nil
+ }
+ issue.Comments, err = FindComments(ctx, &FindCommentsOptions{
+ IssueID: issue.ID,
+ Type: tp,
+ })
+ return err
+}
+
+func (issue *Issue) loadReactions(ctx context.Context) (err error) {
+ if issue.Reactions != nil {
+ return nil
+ }
+ reactions, _, err := FindReactions(ctx, FindReactionsOptions{
+ IssueID: issue.ID,
+ })
+ if err != nil {
+ return err
+ }
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+ // Load reaction user data
+ if _, err := reactions.LoadUsers(ctx, issue.Repo); err != nil {
+ return err
+ }
+
+ // Cache comments to map
+ comments := make(map[int64]*Comment)
+ for _, comment := range issue.Comments {
+ comments[comment.ID] = comment
+ }
+ // Add reactions either to issue or comment
+ for _, react := range reactions {
+ if react.CommentID == 0 {
+ issue.Reactions = append(issue.Reactions, react)
+ } else if comment, ok := comments[react.CommentID]; ok {
+ comment.Reactions = append(comment.Reactions, react)
+ }
+ }
+ return nil
+}
+
+// LoadMilestone load milestone of this issue.
+func (issue *Issue) LoadMilestone(ctx context.Context) (err error) {
+ if !issue.isMilestoneLoaded && (issue.Milestone == nil || issue.Milestone.ID != issue.MilestoneID) && issue.MilestoneID > 0 {
+ issue.Milestone, err = GetMilestoneByRepoID(ctx, issue.RepoID, issue.MilestoneID)
+ if err != nil && !IsErrMilestoneNotExist(err) {
+ return fmt.Errorf("getMilestoneByRepoID [repo_id: %d, milestone_id: %d]: %w", issue.RepoID, issue.MilestoneID, err)
+ }
+ issue.isMilestoneLoaded = true
+ }
+ return nil
+}
+
+// LoadAttributes loads the attribute of this issue.
+func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadPoster(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadLabels(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadMilestone(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadProject(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadAssignees(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadPullRequest(ctx); err != nil && !IsErrPullRequestNotExist(err) {
+ // It is possible pull request is not yet created.
+ return err
+ }
+
+ if err = issue.LoadAttachments(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.loadComments(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.Comments.LoadAttributes(ctx); err != nil {
+ return err
+ }
+ if issue.IsTimetrackerEnabled(ctx) {
+ if err = issue.LoadTotalTimes(ctx); err != nil {
+ return err
+ }
+ }
+
+ return issue.loadReactions(ctx)
+}
+
+func (issue *Issue) ResetAttributesLoaded() {
+ issue.isLabelsLoaded = false
+ issue.isMilestoneLoaded = false
+ issue.isAttachmentsLoaded = false
+ issue.isAssigneeLoaded = false
+}
+
+// GetIsRead load the `IsRead` field of the issue
+func (issue *Issue) GetIsRead(ctx context.Context, userID int64) error {
+ issueUser := &IssueUser{IssueID: issue.ID, UID: userID}
+ if has, err := db.GetEngine(ctx).Get(issueUser); err != nil {
+ return err
+ } else if !has {
+ issue.IsRead = false
+ return nil
+ }
+ issue.IsRead = issueUser.IsRead
+ return nil
+}
+
+// APIURL returns the absolute APIURL to this issue.
+func (issue *Issue) APIURL(ctx context.Context) string {
+ if issue.Repo == nil {
+ err := issue.LoadRepo(ctx)
+ if err != nil {
+ log.Error("Issue[%d].APIURL(): %v", issue.ID, err)
+ return ""
+ }
+ }
+ return fmt.Sprintf("%s/issues/%d", issue.Repo.APIURL(), issue.Index)
+}
+
+// HTMLURL returns the absolute URL to this issue.
+func (issue *Issue) HTMLURL() string {
+ var path string
+ if issue.IsPull {
+ path = "pulls"
+ } else {
+ path = "issues"
+ }
+ return fmt.Sprintf("%s/%s/%d", issue.Repo.HTMLURL(), path, issue.Index)
+}
+
+// Link returns the issue's relative URL.
+func (issue *Issue) Link() string {
+ var path string
+ if issue.IsPull {
+ path = "pulls"
+ } else {
+ path = "issues"
+ }
+ return fmt.Sprintf("%s/%s/%d", issue.Repo.Link(), path, issue.Index)
+}
+
+// DiffURL returns the absolute URL to this diff
+func (issue *Issue) DiffURL() string {
+ if issue.IsPull {
+ return fmt.Sprintf("%s/pulls/%d.diff", issue.Repo.HTMLURL(), issue.Index)
+ }
+ return ""
+}
+
+// PatchURL returns the absolute URL to this patch
+func (issue *Issue) PatchURL() string {
+ if issue.IsPull {
+ return fmt.Sprintf("%s/pulls/%d.patch", issue.Repo.HTMLURL(), issue.Index)
+ }
+ return ""
+}
+
+// State returns string representation of issue status.
+func (issue *Issue) State() api.StateType {
+ if issue.IsClosed {
+ return api.StateClosed
+ }
+ return api.StateOpen
+}
+
+// HashTag returns unique hash tag for issue.
+func (issue *Issue) HashTag() string {
+ return fmt.Sprintf("issue-%d", issue.ID)
+}
+
+// IsPoster returns true if given user by ID is the poster.
+func (issue *Issue) IsPoster(uid int64) bool {
+ return issue.OriginalAuthorID == 0 && issue.PosterID == uid
+}
+
+// GetTasks returns the amount of tasks in the issues content
+func (issue *Issue) GetTasks() int {
+ return len(issueTasksPat.FindAllStringIndex(issue.Content, -1))
+}
+
+// GetTasksDone returns the amount of completed tasks in the issues content
+func (issue *Issue) GetTasksDone() int {
+ return len(issueTasksDonePat.FindAllStringIndex(issue.Content, -1))
+}
+
+// GetLastEventTimestamp returns the last user visible event timestamp, either the creation of this issue or the close.
+func (issue *Issue) GetLastEventTimestamp() timeutil.TimeStamp {
+ if issue.IsClosed {
+ return issue.ClosedUnix
+ }
+ return issue.CreatedUnix
+}
+
+// GetLastEventLabel returns the localization label for the current issue.
+func (issue *Issue) GetLastEventLabel() string {
+ if issue.IsClosed {
+ if issue.IsPull && issue.PullRequest.HasMerged {
+ return "repo.pulls.merged_by"
+ }
+ return "repo.issues.closed_by"
+ }
+ return "repo.issues.opened_by"
+}
+
+// GetLastComment return last comment for the current issue.
+func (issue *Issue) GetLastComment(ctx context.Context) (*Comment, error) {
+ var c Comment
+ exist, err := db.GetEngine(ctx).Where("type = ?", CommentTypeComment).
+ And("issue_id = ?", issue.ID).Desc("created_unix").Get(&c)
+ if err != nil {
+ return nil, err
+ }
+ if !exist {
+ return nil, nil
+ }
+ return &c, nil
+}
+
+// GetLastEventLabelFake returns the localization label for the current issue without providing a link in the username.
+func (issue *Issue) GetLastEventLabelFake() string {
+ if issue.IsClosed {
+ if issue.IsPull && issue.PullRequest.HasMerged {
+ return "repo.pulls.merged_by_fake"
+ }
+ return "repo.issues.closed_by_fake"
+ }
+ return "repo.issues.opened_by_fake"
+}
+
+// GetIssueByIndex returns raw issue without loading attributes by index in a repository.
+func GetIssueByIndex(ctx context.Context, repoID, index int64) (*Issue, error) {
+ if index < 1 {
+ return nil, ErrIssueNotExist{0, repoID, index}
+ }
+ issue := &Issue{
+ RepoID: repoID,
+ Index: index,
+ }
+ has, err := db.GetEngine(ctx).Get(issue)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrIssueNotExist{0, repoID, index}
+ }
+ return issue, nil
+}
+
+// GetIssueWithAttrsByIndex returns issue by index in a repository.
+func GetIssueWithAttrsByIndex(ctx context.Context, repoID, index int64) (*Issue, error) {
+ issue, err := GetIssueByIndex(ctx, repoID, index)
+ if err != nil {
+ return nil, err
+ }
+ return issue, issue.LoadAttributes(ctx)
+}
+
+// GetIssueByID returns an issue by given ID.
+func GetIssueByID(ctx context.Context, id int64) (*Issue, error) {
+ issue := new(Issue)
+ has, err := db.GetEngine(ctx).ID(id).Get(issue)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrIssueNotExist{id, 0, 0}
+ }
+ return issue, nil
+}
+
+// GetIssuesByIDs return issues with the given IDs.
+// If keepOrder is true, the order of the returned issues will be the same as the given IDs.
+func GetIssuesByIDs(ctx context.Context, issueIDs []int64, keepOrder ...bool) (IssueList, error) {
+ issues := make([]*Issue, 0, len(issueIDs))
+
+ if err := db.GetEngine(ctx).In("id", issueIDs).Find(&issues); err != nil {
+ return nil, err
+ }
+
+ if len(keepOrder) > 0 && keepOrder[0] {
+ m := make(map[int64]*Issue, len(issues))
+ appended := container.Set[int64]{}
+ for _, issue := range issues {
+ m[issue.ID] = issue
+ }
+ issues = issues[:0]
+ for _, id := range issueIDs {
+ if issue, ok := m[id]; ok && !appended.Contains(id) { // make sure the id is existed and not appended
+ appended.Add(id)
+ issues = append(issues, issue)
+ }
+ }
+ }
+
+ return issues, nil
+}
+
+// GetIssueIDsByRepoID returns all issue ids by repo id
+func GetIssueIDsByRepoID(ctx context.Context, repoID int64) ([]int64, error) {
+ ids := make([]int64, 0, 10)
+ err := db.GetEngine(ctx).Table("issue").Cols("id").Where("repo_id = ?", repoID).Find(&ids)
+ return ids, err
+}
+
+// GetParticipantsIDsByIssueID returns the IDs of all users who participated in comments of an issue,
+// but skips joining with `user` for performance reasons.
+// User permissions must be verified elsewhere if required.
+func GetParticipantsIDsByIssueID(ctx context.Context, issueID int64) ([]int64, error) {
+ userIDs := make([]int64, 0, 5)
+ return userIDs, db.GetEngine(ctx).
+ Table("comment").
+ Cols("poster_id").
+ Where("issue_id = ?", issueID).
+ And("type in (?,?,?)", CommentTypeComment, CommentTypeCode, CommentTypeReview).
+ Distinct("poster_id").
+ Find(&userIDs)
+}
+
+// IsUserParticipantsOfIssue return true if user is participants of an issue
+func IsUserParticipantsOfIssue(ctx context.Context, user *user_model.User, issue *Issue) bool {
+ userIDs, err := issue.GetParticipantIDsByIssue(ctx)
+ if err != nil {
+ log.Error(err.Error())
+ return false
+ }
+ return slices.Contains(userIDs, user.ID)
+}
+
+// DependencyInfo represents high level information about an issue which is a dependency of another issue.
+type DependencyInfo struct {
+ Issue `xorm:"extends"`
+ repo_model.Repository `xorm:"extends"`
+}
+
+// GetParticipantIDsByIssue returns all userIDs who are participated in comments of an issue and issue author
+func (issue *Issue) GetParticipantIDsByIssue(ctx context.Context) ([]int64, error) {
+ if issue == nil {
+ return nil, nil
+ }
+ userIDs := make([]int64, 0, 5)
+ if err := db.GetEngine(ctx).Table("comment").Cols("poster_id").
+ Where("`comment`.issue_id = ?", issue.ID).
+ And("`comment`.type in (?,?,?)", CommentTypeComment, CommentTypeCode, CommentTypeReview).
+ And("`user`.is_active = ?", true).
+ And("`user`.prohibit_login = ?", false).
+ Join("INNER", "`user`", "`user`.id = `comment`.poster_id").
+ Distinct("poster_id").
+ Find(&userIDs); err != nil {
+ return nil, fmt.Errorf("get poster IDs: %w", err)
+ }
+ if !slices.Contains(userIDs, issue.PosterID) {
+ return append(userIDs, issue.PosterID), nil
+ }
+ return userIDs, nil
+}
+
+// BlockedByDependencies finds all Dependencies an issue is blocked by
+func (issue *Issue) BlockedByDependencies(ctx context.Context, opts db.ListOptions) (issueDeps []*DependencyInfo, err error) {
+ sess := db.GetEngine(ctx).
+ Table("issue").
+ Join("INNER", "repository", "repository.id = issue.repo_id").
+ Join("INNER", "issue_dependency", "issue_dependency.dependency_id = issue.id").
+ Where("issue_id = ?", issue.ID).
+ // sort by repo id then created date, with the issues of the same repo at the beginning of the list
+ OrderBy("CASE WHEN issue.repo_id = ? THEN 0 ELSE issue.repo_id END, issue.created_unix DESC", issue.RepoID)
+ if opts.Page != 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+ }
+ err = sess.Find(&issueDeps)
+
+ for _, depInfo := range issueDeps {
+ depInfo.Issue.Repo = &depInfo.Repository
+ }
+
+ return issueDeps, err
+}
+
+// BlockingDependencies returns all blocking dependencies, aka all other issues a given issue blocks
+func (issue *Issue) BlockingDependencies(ctx context.Context) (issueDeps []*DependencyInfo, err error) {
+ err = db.GetEngine(ctx).
+ Table("issue").
+ Join("INNER", "repository", "repository.id = issue.repo_id").
+ Join("INNER", "issue_dependency", "issue_dependency.issue_id = issue.id").
+ Where("dependency_id = ?", issue.ID).
+ // sort by repo id then created date, with the issues of the same repo at the beginning of the list
+ OrderBy("CASE WHEN issue.repo_id = ? THEN 0 ELSE issue.repo_id END, issue.created_unix DESC", issue.RepoID).
+ Find(&issueDeps)
+
+ for _, depInfo := range issueDeps {
+ depInfo.Issue.Repo = &depInfo.Repository
+ }
+
+ return issueDeps, err
+}
+
+func migratedIssueCond(tp api.GitServiceType) builder.Cond {
+ return builder.In("issue_id",
+ builder.Select("issue.id").
+ From("issue").
+ InnerJoin("repository", "issue.repo_id = repository.id").
+ Where(builder.Eq{
+ "repository.original_service_type": tp,
+ }),
+ )
+}
+
+// RemapExternalUser ExternalUserRemappable interface
+func (issue *Issue) RemapExternalUser(externalName string, externalID, userID int64) error {
+ issue.OriginalAuthor = externalName
+ issue.OriginalAuthorID = externalID
+ issue.PosterID = userID
+ return nil
+}
+
+// GetUserID ExternalUserRemappable interface
+func (issue *Issue) GetUserID() int64 { return issue.PosterID }
+
+// GetExternalName ExternalUserRemappable interface
+func (issue *Issue) GetExternalName() string { return issue.OriginalAuthor }
+
+// GetExternalID ExternalUserRemappable interface
+func (issue *Issue) GetExternalID() int64 { return issue.OriginalAuthorID }
+
+// HasOriginalAuthor returns if an issue was migrated and has an original author.
+func (issue *Issue) HasOriginalAuthor() bool {
+ return issue.OriginalAuthor != "" && issue.OriginalAuthorID != 0
+}
+
+var ErrIssueMaxPinReached = util.NewInvalidArgumentErrorf("the max number of pinned issues has been readched")
+
+// IsPinned returns if a Issue is pinned
+func (issue *Issue) IsPinned() bool {
+ return issue.PinOrder != 0
+}
+
+// Pin pins a Issue
+func (issue *Issue) Pin(ctx context.Context, user *user_model.User) error {
+ // If the Issue is already pinned, we don't need to pin it twice
+ if issue.IsPinned() {
+ return nil
+ }
+
+ var maxPin int
+ _, err := db.GetEngine(ctx).SQL("SELECT MAX(pin_order) FROM issue WHERE repo_id = ? AND is_pull = ?", issue.RepoID, issue.IsPull).Get(&maxPin)
+ if err != nil {
+ return err
+ }
+
+ // Check if the maximum allowed Pins reached
+ if maxPin >= setting.Repository.Issue.MaxPinned {
+ return ErrIssueMaxPinReached
+ }
+
+ _, err = db.GetEngine(ctx).Table("issue").
+ Where("id = ?", issue.ID).
+ Update(map[string]any{
+ "pin_order": maxPin + 1,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Add the pin event to the history
+ opts := &CreateCommentOptions{
+ Type: CommentTypePin,
+ Doer: user,
+ Repo: issue.Repo,
+ Issue: issue,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UnpinIssue unpins a Issue
+func (issue *Issue) Unpin(ctx context.Context, user *user_model.User) error {
+ // If the Issue is not pinned, we don't need to unpin it
+ if !issue.IsPinned() {
+ return nil
+ }
+
+ // This sets the Pin for all Issues that come after the unpined Issue to the correct value
+ _, err := db.GetEngine(ctx).Exec("UPDATE issue SET pin_order = pin_order - 1 WHERE repo_id = ? AND is_pull = ? AND pin_order > ?", issue.RepoID, issue.IsPull, issue.PinOrder)
+ if err != nil {
+ return err
+ }
+
+ _, err = db.GetEngine(ctx).Table("issue").
+ Where("id = ?", issue.ID).
+ Update(map[string]any{
+ "pin_order": 0,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Add the unpin event to the history
+ opts := &CreateCommentOptions{
+ Type: CommentTypeUnpin,
+ Doer: user,
+ Repo: issue.Repo,
+ Issue: issue,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// PinOrUnpin pins or unpins a Issue
+func (issue *Issue) PinOrUnpin(ctx context.Context, user *user_model.User) error {
+ if !issue.IsPinned() {
+ return issue.Pin(ctx, user)
+ }
+
+ return issue.Unpin(ctx, user)
+}
+
+// MovePin moves a Pinned Issue to a new Position
+func (issue *Issue) MovePin(ctx context.Context, newPosition int) error {
+ // If the Issue is not pinned, we can't move them
+ if !issue.IsPinned() {
+ return nil
+ }
+
+ if newPosition < 1 {
+ return fmt.Errorf("The Position can't be lower than 1")
+ }
+
+ dbctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ var maxPin int
+ _, err = db.GetEngine(dbctx).SQL("SELECT MAX(pin_order) FROM issue WHERE repo_id = ? AND is_pull = ?", issue.RepoID, issue.IsPull).Get(&maxPin)
+ if err != nil {
+ return err
+ }
+
+ // If the new Position bigger than the current Maximum, set it to the Maximum
+ if newPosition > maxPin+1 {
+ newPosition = maxPin + 1
+ }
+
+ // Lower the Position of all Pinned Issue that came after the current Position
+ _, err = db.GetEngine(dbctx).Exec("UPDATE issue SET pin_order = pin_order - 1 WHERE repo_id = ? AND is_pull = ? AND pin_order > ?", issue.RepoID, issue.IsPull, issue.PinOrder)
+ if err != nil {
+ return err
+ }
+
+ // Higher the Position of all Pinned Issues that comes after the new Position
+ _, err = db.GetEngine(dbctx).Exec("UPDATE issue SET pin_order = pin_order + 1 WHERE repo_id = ? AND is_pull = ? AND pin_order >= ?", issue.RepoID, issue.IsPull, newPosition)
+ if err != nil {
+ return err
+ }
+
+ _, err = db.GetEngine(dbctx).Table("issue").
+ Where("id = ?", issue.ID).
+ Update(map[string]any{
+ "pin_order": newPosition,
+ })
+ if err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// GetPinnedIssues returns the pinned Issues for the given Repo and type
+func GetPinnedIssues(ctx context.Context, repoID int64, isPull bool) (IssueList, error) {
+ issues := make(IssueList, 0)
+
+ err := db.GetEngine(ctx).
+ Table("issue").
+ Where("repo_id = ?", repoID).
+ And("is_pull = ?", isPull).
+ And("pin_order > 0").
+ OrderBy("pin_order").
+ Find(&issues)
+ if err != nil {
+ return nil, err
+ }
+
+ err = issues.LoadAttributes(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return issues, nil
+}
+
+// IsNewPinnedAllowed returns if a new Issue or Pull request can be pinned
+func IsNewPinAllowed(ctx context.Context, repoID int64, isPull bool) (bool, error) {
+ var maxPin int
+ _, err := db.GetEngine(ctx).SQL("SELECT COUNT(pin_order) FROM issue WHERE repo_id = ? AND is_pull = ? AND pin_order > 0", repoID, isPull).Get(&maxPin)
+ if err != nil {
+ return false, err
+ }
+
+ return maxPin < setting.Repository.Issue.MaxPinned, nil
+}
+
+// IsErrIssueMaxPinReached returns if the error is, that the User can't pin more Issues
+func IsErrIssueMaxPinReached(err error) bool {
+ return err == ErrIssueMaxPinReached
+}
+
+// InsertIssues insert issues to database
+func InsertIssues(ctx context.Context, issues ...*Issue) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ for _, issue := range issues {
+ if err := insertIssue(ctx, issue); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
+
+func insertIssue(ctx context.Context, issue *Issue) error {
+ sess := db.GetEngine(ctx)
+ if _, err := sess.NoAutoTime().Insert(issue); err != nil {
+ return err
+ }
+ issueLabels := make([]IssueLabel, 0, len(issue.Labels))
+ for _, label := range issue.Labels {
+ issueLabels = append(issueLabels, IssueLabel{
+ IssueID: issue.ID,
+ LabelID: label.ID,
+ })
+ }
+ if len(issueLabels) > 0 {
+ if _, err := sess.Insert(issueLabels); err != nil {
+ return err
+ }
+ }
+
+ for _, reaction := range issue.Reactions {
+ reaction.IssueID = issue.ID
+ }
+
+ if len(issue.Reactions) > 0 {
+ if _, err := sess.Insert(issue.Reactions); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/issues/issue_index.go b/models/issues/issue_index.go
new file mode 100644
index 0000000..9386027
--- /dev/null
+++ b/models/issues/issue_index.go
@@ -0,0 +1,39 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+func GetMaxIssueIndexForRepo(ctx context.Context, repoID int64) (int64, error) {
+ var max int64
+ if _, err := db.GetEngine(ctx).Select("MAX(`index`)").Table("issue").Where("repo_id=?", repoID).Get(&max); err != nil {
+ return 0, err
+ }
+ return max, nil
+}
+
+// RecalculateIssueIndexForRepo create issue_index for repo if not exist and
+// update it based on highest index of existing issues assigned to a repo
+func RecalculateIssueIndexForRepo(ctx context.Context, repoID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ max, err := GetMaxIssueIndexForRepo(ctx, repoID)
+ if err != nil {
+ return err
+ }
+
+ if err = db.SyncMaxResourceIndex(ctx, "issue_index", repoID, max); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
diff --git a/models/issues/issue_index_test.go b/models/issues/issue_index_test.go
new file mode 100644
index 0000000..eb79a08
--- /dev/null
+++ b/models/issues/issue_index_test.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Forgejo Authors
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetMaxIssueIndexForRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ maxPR, err := issues_model.GetMaxIssueIndexForRepo(db.DefaultContext, repo.ID)
+ require.NoError(t, err)
+
+ issue := testCreateIssue(t, repo.ID, repo.OwnerID, "title1", "content1", false)
+ assert.Greater(t, issue.Index, maxPR)
+
+ maxPR, err = issues_model.GetMaxIssueIndexForRepo(db.DefaultContext, repo.ID)
+ require.NoError(t, err)
+
+ pull := testCreateIssue(t, repo.ID, repo.OwnerID, "title2", "content2", true)
+ assert.Greater(t, pull.Index, maxPR)
+
+ maxPR, err = issues_model.GetMaxIssueIndexForRepo(db.DefaultContext, repo.ID)
+ require.NoError(t, err)
+
+ assert.Equal(t, maxPR, pull.Index)
+}
diff --git a/models/issues/issue_label.go b/models/issues/issue_label.go
new file mode 100644
index 0000000..04e1fa3
--- /dev/null
+++ b/models/issues/issue_label.go
@@ -0,0 +1,507 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ "code.gitea.io/gitea/models/db"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "xorm.io/builder"
+)
+
+// IssueLabel represents an issue-label relation.
+type IssueLabel struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"UNIQUE(s)"`
+ LabelID int64 `xorm:"UNIQUE(s)"`
+}
+
+// HasIssueLabel returns true if issue has been labeled.
+func HasIssueLabel(ctx context.Context, issueID, labelID int64) bool {
+ has, _ := db.GetEngine(ctx).Where("issue_id = ? AND label_id = ?", issueID, labelID).Get(new(IssueLabel))
+ return has
+}
+
+// newIssueLabel this function creates a new label it does not check if the label is valid for the issue
+// YOU MUST CHECK THIS BEFORE THIS FUNCTION
+func newIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *user_model.User) (err error) {
+ if err = db.Insert(ctx, &IssueLabel{
+ IssueID: issue.ID,
+ LabelID: label.ID,
+ }); err != nil {
+ return err
+ }
+
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ opts := &CreateCommentOptions{
+ Type: CommentTypeLabel,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ Label: label,
+ Content: "1",
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return err
+ }
+
+ issue.Labels = append(issue.Labels, label)
+
+ return updateLabelCols(ctx, label, "num_issues", "num_closed_issue")
+}
+
+// Remove all issue labels in the given exclusive scope
+func RemoveDuplicateExclusiveIssueLabels(ctx context.Context, issue *Issue, label *Label, doer *user_model.User) (err error) {
+ scope := label.ExclusiveScope()
+ if scope == "" {
+ return nil
+ }
+
+ var toRemove []*Label
+ for _, issueLabel := range issue.Labels {
+ if label.ID != issueLabel.ID && issueLabel.ExclusiveScope() == scope {
+ toRemove = append(toRemove, issueLabel)
+ }
+ }
+
+ for _, issueLabel := range toRemove {
+ if err = deleteIssueLabel(ctx, issue, issueLabel, doer); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// NewIssueLabel creates a new issue-label relation.
+func NewIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *user_model.User) (err error) {
+ if HasIssueLabel(ctx, issue.ID, label.ID) {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ // Do NOT add invalid labels
+ if issue.RepoID != label.RepoID && issue.Repo.OwnerID != label.OrgID {
+ return nil
+ }
+
+ if err = RemoveDuplicateExclusiveIssueLabels(ctx, issue, label, doer); err != nil {
+ return nil
+ }
+
+ if err = newIssueLabel(ctx, issue, label, doer); err != nil {
+ return err
+ }
+
+ if err = issue.ReloadLabels(ctx); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// newIssueLabels add labels to an issue. It will check if the labels are valid for the issue
+func newIssueLabels(ctx context.Context, issue *Issue, labels []*Label, doer *user_model.User) (err error) {
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadLabels(ctx); err != nil {
+ return err
+ }
+
+ for _, l := range labels {
+ // Don't add already present labels and invalid labels
+ if HasIssueLabel(ctx, issue.ID, l.ID) ||
+ (l.RepoID != issue.RepoID && l.OrgID != issue.Repo.OwnerID) {
+ continue
+ }
+
+ if err = RemoveDuplicateExclusiveIssueLabels(ctx, issue, l, doer); err != nil {
+ return err
+ }
+
+ if err = newIssueLabel(ctx, issue, l, doer); err != nil {
+ return fmt.Errorf("newIssueLabel: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// NewIssueLabels creates a list of issue-label relations.
+func NewIssueLabels(ctx context.Context, issue *Issue, labels []*Label, doer *user_model.User) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = newIssueLabels(ctx, issue, labels, doer); err != nil {
+ return err
+ }
+
+ if err = issue.ReloadLabels(ctx); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func deleteIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *user_model.User) (err error) {
+ if count, err := db.DeleteByBean(ctx, &IssueLabel{
+ IssueID: issue.ID,
+ LabelID: label.ID,
+ }); err != nil {
+ return err
+ } else if count == 0 {
+ return nil
+ }
+
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ opts := &CreateCommentOptions{
+ Type: CommentTypeLabel,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ Label: label,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return err
+ }
+
+ return updateLabelCols(ctx, label, "num_issues", "num_closed_issue")
+}
+
+// DeleteIssueLabel deletes issue-label relation.
+func DeleteIssueLabel(ctx context.Context, issue *Issue, label *Label, doer *user_model.User) error {
+ if err := deleteIssueLabel(ctx, issue, label, doer); err != nil {
+ return err
+ }
+
+ return issue.ReloadLabels(ctx)
+}
+
+// DeleteLabelsByRepoID deletes labels of some repository
+func DeleteLabelsByRepoID(ctx context.Context, repoID int64) error {
+ deleteCond := builder.Select("id").From("label").Where(builder.Eq{"label.repo_id": repoID})
+
+ if _, err := db.GetEngine(ctx).In("label_id", deleteCond).
+ Delete(&IssueLabel{}); err != nil {
+ return err
+ }
+
+ _, err := db.DeleteByBean(ctx, &Label{RepoID: repoID})
+ return err
+}
+
+// CountOrphanedLabels return count of labels witch are broken and not accessible via ui anymore
+func CountOrphanedLabels(ctx context.Context) (int64, error) {
+ noref, err := db.GetEngine(ctx).Table("label").Where("repo_id=? AND org_id=?", 0, 0).Count()
+ if err != nil {
+ return 0, err
+ }
+
+ norepo, err := db.GetEngine(ctx).Table("label").
+ Where(builder.And(
+ builder.Gt{"repo_id": 0},
+ builder.NotIn("repo_id", builder.Select("id").From("`repository`")),
+ )).
+ Count()
+ if err != nil {
+ return 0, err
+ }
+
+ noorg, err := db.GetEngine(ctx).Table("label").
+ Where(builder.And(
+ builder.Gt{"org_id": 0},
+ builder.NotIn("org_id", builder.Select("id").From("`user`")),
+ )).
+ Count()
+ if err != nil {
+ return 0, err
+ }
+
+ return noref + norepo + noorg, nil
+}
+
+// DeleteOrphanedLabels delete labels witch are broken and not accessible via ui anymore
+func DeleteOrphanedLabels(ctx context.Context) error {
+ // delete labels with no reference
+ if _, err := db.GetEngine(ctx).Table("label").Where("repo_id=? AND org_id=?", 0, 0).Delete(new(Label)); err != nil {
+ return err
+ }
+
+ // delete labels with none existing repos
+ if _, err := db.GetEngine(ctx).
+ Where(builder.And(
+ builder.Gt{"repo_id": 0},
+ builder.NotIn("repo_id", builder.Select("id").From("`repository`")),
+ )).
+ Delete(Label{}); err != nil {
+ return err
+ }
+
+ // delete labels with none existing orgs
+ if _, err := db.GetEngine(ctx).
+ Where(builder.And(
+ builder.Gt{"org_id": 0},
+ builder.NotIn("org_id", builder.Select("id").From("`user`")),
+ )).
+ Delete(Label{}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CountOrphanedIssueLabels return count of IssueLabels witch have no label behind anymore
+func CountOrphanedIssueLabels(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Table("issue_label").
+ NotIn("label_id", builder.Select("id").From("label")).
+ Count()
+}
+
+// DeleteOrphanedIssueLabels delete IssueLabels witch have no label behind anymore
+func DeleteOrphanedIssueLabels(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).
+ NotIn("label_id", builder.Select("id").From("label")).
+ Delete(IssueLabel{})
+ return err
+}
+
+// CountIssueLabelWithOutsideLabels count label comments with outside label
+func CountIssueLabelWithOutsideLabels(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.Expr("(label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id)")).
+ Table("issue_label").
+ Join("inner", "label", "issue_label.label_id = label.id ").
+ Join("inner", "issue", "issue.id = issue_label.issue_id ").
+ Join("inner", "repository", "issue.repo_id = repository.id").
+ Count(new(IssueLabel))
+}
+
+// FixIssueLabelWithOutsideLabels fix label comments with outside label
+func FixIssueLabelWithOutsideLabels(ctx context.Context) (int64, error) {
+ res, err := db.GetEngine(ctx).Exec(`DELETE FROM issue_label WHERE issue_label.id IN (
+ SELECT il_too.id FROM (
+ SELECT il_too_too.id
+ FROM issue_label AS il_too_too
+ INNER JOIN label ON il_too_too.label_id = label.id
+ INNER JOIN issue on issue.id = il_too_too.issue_id
+ INNER JOIN repository on repository.id = issue.repo_id
+ WHERE
+ (label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id)
+ ) AS il_too )`)
+ if err != nil {
+ return 0, err
+ }
+
+ return res.RowsAffected()
+}
+
+// LoadLabels only if they are not already set
+func (issue *Issue) LoadLabels(ctx context.Context) (err error) {
+ if !issue.isLabelsLoaded && issue.Labels == nil {
+ if err := issue.ReloadLabels(ctx); err != nil {
+ return err
+ }
+ issue.isLabelsLoaded = true
+ }
+ return nil
+}
+
+func (issue *Issue) ReloadLabels(ctx context.Context) (err error) {
+ if issue.ID != 0 {
+ issue.Labels, err = GetLabelsByIssueID(ctx, issue.ID)
+ if err != nil {
+ return fmt.Errorf("getLabelsByIssueID [%d]: %w", issue.ID, err)
+ }
+ }
+ return nil
+}
+
+// GetLabelsByIssueID returns all labels that belong to given issue by ID.
+func GetLabelsByIssueID(ctx context.Context, issueID int64) ([]*Label, error) {
+ var labels []*Label
+ return labels, db.GetEngine(ctx).Where("issue_label.issue_id = ?", issueID).
+ Join("LEFT", "issue_label", "issue_label.label_id = label.id").
+ Asc("label.name").
+ Find(&labels)
+}
+
+func clearIssueLabels(ctx context.Context, issue *Issue, doer *user_model.User) (err error) {
+ if err = issue.LoadLabels(ctx); err != nil {
+ return fmt.Errorf("getLabels: %w", err)
+ }
+
+ for i := range issue.Labels {
+ if err = deleteIssueLabel(ctx, issue, issue.Labels[i], doer); err != nil {
+ return fmt.Errorf("removeLabel: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// ClearIssueLabels removes all issue labels as the given user.
+// Triggers appropriate WebHooks, if any.
+func ClearIssueLabels(ctx context.Context, issue *Issue, doer *user_model.User) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ } else if err = issue.LoadPullRequest(ctx); err != nil {
+ return err
+ }
+
+ perm, err := access_model.GetUserRepoPermission(ctx, issue.Repo, doer)
+ if err != nil {
+ return err
+ }
+ if !perm.CanWriteIssuesOrPulls(issue.IsPull) {
+ return ErrRepoLabelNotExist{}
+ }
+
+ if err = clearIssueLabels(ctx, issue, doer); err != nil {
+ return err
+ }
+
+ if err = committer.Commit(); err != nil {
+ return fmt.Errorf("Commit: %w", err)
+ }
+
+ return nil
+}
+
+type labelSorter []*Label
+
+func (ts labelSorter) Len() int {
+ return len([]*Label(ts))
+}
+
+func (ts labelSorter) Less(i, j int) bool {
+ return []*Label(ts)[i].ID < []*Label(ts)[j].ID
+}
+
+func (ts labelSorter) Swap(i, j int) {
+ []*Label(ts)[i], []*Label(ts)[j] = []*Label(ts)[j], []*Label(ts)[i]
+}
+
+// Ensure only one label of a given scope exists, with labels at the end of the
+// array getting preference over earlier ones.
+func RemoveDuplicateExclusiveLabels(labels []*Label) []*Label {
+ validLabels := make([]*Label, 0, len(labels))
+
+ for i, label := range labels {
+ scope := label.ExclusiveScope()
+ if scope != "" {
+ foundOther := false
+ for _, otherLabel := range labels[i+1:] {
+ if otherLabel.ExclusiveScope() == scope {
+ foundOther = true
+ break
+ }
+ }
+ if foundOther {
+ continue
+ }
+ }
+ validLabels = append(validLabels, label)
+ }
+
+ return validLabels
+}
+
+// ReplaceIssueLabels removes all current labels and add new labels to the issue.
+// Triggers appropriate WebHooks, if any.
+func ReplaceIssueLabels(ctx context.Context, issue *Issue, labels []*Label, doer *user_model.User) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if err = issue.LoadLabels(ctx); err != nil {
+ return err
+ }
+
+ labels = RemoveDuplicateExclusiveLabels(labels)
+
+ sort.Sort(labelSorter(labels))
+ sort.Sort(labelSorter(issue.Labels))
+
+ var toAdd, toRemove []*Label
+
+ addIndex, removeIndex := 0, 0
+ for addIndex < len(labels) && removeIndex < len(issue.Labels) {
+ addLabel := labels[addIndex]
+ removeLabel := issue.Labels[removeIndex]
+ if addLabel.ID == removeLabel.ID {
+ // Silently drop invalid labels
+ if removeLabel.RepoID != issue.RepoID && removeLabel.OrgID != issue.Repo.OwnerID {
+ toRemove = append(toRemove, removeLabel)
+ }
+
+ addIndex++
+ removeIndex++
+ } else if addLabel.ID < removeLabel.ID {
+ // Only add if the label is valid
+ if addLabel.RepoID == issue.RepoID || addLabel.OrgID == issue.Repo.OwnerID {
+ toAdd = append(toAdd, addLabel)
+ }
+ addIndex++
+ } else {
+ toRemove = append(toRemove, removeLabel)
+ removeIndex++
+ }
+ }
+ toAdd = append(toAdd, labels[addIndex:]...)
+ toRemove = append(toRemove, issue.Labels[removeIndex:]...)
+
+ if len(toAdd) > 0 {
+ if err = newIssueLabels(ctx, issue, toAdd, doer); err != nil {
+ return fmt.Errorf("addLabels: %w", err)
+ }
+ }
+
+ for _, l := range toRemove {
+ if err = deleteIssueLabel(ctx, issue, l, doer); err != nil {
+ return fmt.Errorf("removeLabel: %w", err)
+ }
+ }
+
+ if err = issue.ReloadLabels(ctx); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
diff --git a/models/issues/issue_label_test.go b/models/issues/issue_label_test.go
new file mode 100644
index 0000000..67f4874
--- /dev/null
+++ b/models/issues/issue_label_test.go
@@ -0,0 +1,138 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIssueNewIssueLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ label1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ label2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 4})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ label3 := &issues_model.Label{RepoID: 1, Name: "label3", Color: "#123"}
+ require.NoError(t, issues_model.NewLabel(db.DefaultContext, label3))
+
+ // label1 is already set, do nothing
+ // label3 is new, add it
+ require.NoError(t, issues_model.NewIssueLabels(db.DefaultContext, issue, []*issues_model.Label{label1, label3}, doer))
+
+ assert.Len(t, issue.Labels, 3)
+ // check that the pre-existing label1 is still present
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+ // check that new label3 was added
+ assert.Equal(t, label3.ID, issue.Labels[1].ID)
+ // check that pre-existing label2 was not removed
+ assert.Equal(t, label2.ID, issue.Labels[2].ID)
+}
+
+func TestIssueNewIssueLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 3})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ label := &issues_model.Label{RepoID: 1, Name: "label3", Color: "#123"}
+ require.NoError(t, issues_model.NewLabel(db.DefaultContext, label))
+
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, label, doer))
+
+ assert.Len(t, issue.Labels, 1)
+ assert.Equal(t, label.ID, issue.Labels[0].ID)
+}
+
+func TestIssueReplaceIssueLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ label1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ label2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 4})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ label3 := &issues_model.Label{RepoID: 1, Name: "label3", Color: "#123"}
+ require.NoError(t, issues_model.NewLabel(db.DefaultContext, label3))
+
+ issue.LoadLabels(db.DefaultContext)
+ assert.Len(t, issue.Labels, 2)
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+ assert.Equal(t, label2.ID, issue.Labels[1].ID)
+
+ // label1 is already set, do nothing
+ // label3 is new, add it
+ // label2 is not in the list but already set, remove it
+ require.NoError(t, issues_model.ReplaceIssueLabels(db.DefaultContext, issue, []*issues_model.Label{label1, label3}, doer))
+
+ assert.Len(t, issue.Labels, 2)
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+ assert.Equal(t, label3.ID, issue.Labels[1].ID)
+}
+
+func TestIssueDeleteIssueLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ label1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ label2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 4})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ issue.LoadLabels(db.DefaultContext)
+ assert.Len(t, issue.Labels, 2)
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+ assert.Equal(t, label2.ID, issue.Labels[1].ID)
+
+ require.NoError(t, issues_model.DeleteIssueLabel(db.DefaultContext, issue, label2, doer))
+
+ assert.Len(t, issue.Labels, 1)
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+}
+
+func TestIssueLoadLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ label1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ label2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 4})
+
+ assert.Empty(t, issue.Labels)
+ issue.LoadLabels(db.DefaultContext)
+ assert.Len(t, issue.Labels, 2)
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+ assert.Equal(t, label2.ID, issue.Labels[1].ID)
+
+ unittest.AssertSuccessfulDelete(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: label2.ID})
+
+ // the database change is not noticed because the labels are cached
+ issue.LoadLabels(db.DefaultContext)
+ assert.Len(t, issue.Labels, 2)
+
+ issue.ReloadLabels(db.DefaultContext)
+ assert.Len(t, issue.Labels, 1)
+ assert.Equal(t, label1.ID, issue.Labels[0].ID)
+}
+
+func TestNewIssueLabelsScope(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 18})
+ label1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 7})
+ label2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 8})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ require.NoError(t, issues_model.NewIssueLabels(db.DefaultContext, issue, []*issues_model.Label{label1, label2}, doer))
+
+ assert.Len(t, issue.Labels, 1)
+ assert.Equal(t, label2.ID, issue.Labels[0].ID)
+}
diff --git a/models/issues/issue_list.go b/models/issues/issue_list.go
new file mode 100644
index 0000000..fe6c630
--- /dev/null
+++ b/models/issues/issue_list.go
@@ -0,0 +1,622 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ project_model "code.gitea.io/gitea/models/project"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+
+ "xorm.io/builder"
+)
+
+// IssueList defines a list of issues
+type IssueList []*Issue
+
+// get the repo IDs to be loaded later, these IDs are for issue.Repo and issue.PullRequest.HeadRepo
+func (issues IssueList) getRepoIDs() []int64 {
+ repoIDs := make(container.Set[int64], len(issues))
+ for _, issue := range issues {
+ if issue.Repo == nil {
+ repoIDs.Add(issue.RepoID)
+ }
+ if issue.PullRequest != nil && issue.PullRequest.HeadRepo == nil {
+ repoIDs.Add(issue.PullRequest.HeadRepoID)
+ }
+ }
+ return repoIDs.Values()
+}
+
+// LoadRepositories loads issues' all repositories
+func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.RepositoryList, error) {
+ if len(issues) == 0 {
+ return nil, nil
+ }
+
+ repoIDs := issues.getRepoIDs()
+ repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs))
+ left := len(repoIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ err := db.GetEngine(ctx).
+ In("id", repoIDs[:limit]).
+ Find(&repoMaps)
+ if err != nil {
+ return nil, fmt.Errorf("find repository: %w", err)
+ }
+ left -= limit
+ repoIDs = repoIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ if issue.Repo == nil {
+ issue.Repo = repoMaps[issue.RepoID]
+ } else {
+ repoMaps[issue.RepoID] = issue.Repo
+ }
+ if issue.PullRequest != nil {
+ issue.PullRequest.BaseRepo = issue.Repo
+ if issue.PullRequest.HeadRepo == nil {
+ issue.PullRequest.HeadRepo = repoMaps[issue.PullRequest.HeadRepoID]
+ }
+ }
+ }
+ return repo_model.ValuesRepository(repoMaps), nil
+}
+
+func (issues IssueList) LoadPosters(ctx context.Context) error {
+ if len(issues) == 0 {
+ return nil
+ }
+
+ posterIDs := container.FilterSlice(issues, func(issue *Issue) (int64, bool) {
+ return issue.PosterID, issue.Poster == nil && user_model.IsValidUserID(issue.PosterID)
+ })
+
+ posterMaps, err := getPostersByIDs(ctx, posterIDs)
+ if err != nil {
+ return err
+ }
+
+ for _, issue := range issues {
+ if issue.Poster == nil {
+ issue.PosterID, issue.Poster = user_model.GetUserFromMap(issue.PosterID, posterMaps)
+ }
+ }
+ return nil
+}
+
+func getPostersByIDs(ctx context.Context, posterIDs []int64) (map[int64]*user_model.User, error) {
+ posterMaps := make(map[int64]*user_model.User, len(posterIDs))
+ left := len(posterIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ err := db.GetEngine(ctx).
+ In("id", posterIDs[:limit]).
+ Find(&posterMaps)
+ if err != nil {
+ return nil, err
+ }
+ left -= limit
+ posterIDs = posterIDs[limit:]
+ }
+ return posterMaps, nil
+}
+
+func (issues IssueList) getIssueIDs() []int64 {
+ ids := make([]int64, 0, len(issues))
+ for _, issue := range issues {
+ ids = append(ids, issue.ID)
+ }
+ return ids
+}
+
+func (issues IssueList) LoadLabels(ctx context.Context) error {
+ if len(issues) == 0 {
+ return nil
+ }
+
+ type LabelIssue struct {
+ Label *Label `xorm:"extends"`
+ IssueLabel *IssueLabel `xorm:"extends"`
+ }
+
+ issueLabels := make(map[int64][]*Label, len(issues)*3)
+ issueIDs := issues.getIssueIDs()
+ left := len(issueIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).Table("label").
+ Join("LEFT", "issue_label", "issue_label.label_id = label.id").
+ In("issue_label.issue_id", issueIDs[:limit]).
+ Asc("label.name").
+ Rows(new(LabelIssue))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var labelIssue LabelIssue
+ err = rows.Scan(&labelIssue)
+ if err != nil {
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.LoadLabels: Close: %w", err1)
+ }
+ return err
+ }
+ issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label)
+ }
+ // When there are no rows left and we try to close it.
+ // Since that is not relevant for us, we can safely ignore it.
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.LoadLabels: Close: %w", err1)
+ }
+ left -= limit
+ issueIDs = issueIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.Labels = issueLabels[issue.ID]
+ issue.isLabelsLoaded = true
+ }
+ return nil
+}
+
+func (issues IssueList) getMilestoneIDs() []int64 {
+ return container.FilterSlice(issues, func(issue *Issue) (int64, bool) {
+ return issue.MilestoneID, true
+ })
+}
+
+func (issues IssueList) LoadMilestones(ctx context.Context) error {
+ milestoneIDs := issues.getMilestoneIDs()
+ if len(milestoneIDs) == 0 {
+ return nil
+ }
+
+ milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
+ left := len(milestoneIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ err := db.GetEngine(ctx).
+ In("id", milestoneIDs[:limit]).
+ Find(&milestoneMaps)
+ if err != nil {
+ return err
+ }
+ left -= limit
+ milestoneIDs = milestoneIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.Milestone = milestoneMaps[issue.MilestoneID]
+ issue.isMilestoneLoaded = true
+ }
+ return nil
+}
+
+func (issues IssueList) LoadProjects(ctx context.Context) error {
+ issueIDs := issues.getIssueIDs()
+ projectMaps := make(map[int64]*project_model.Project, len(issues))
+ left := len(issueIDs)
+
+ type projectWithIssueID struct {
+ *project_model.Project `xorm:"extends"`
+ IssueID int64
+ }
+
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+
+ projects := make([]*projectWithIssueID, 0, limit)
+ err := db.GetEngine(ctx).
+ Table("project").
+ Select("project.*, project_issue.issue_id").
+ Join("INNER", "project_issue", "project.id = project_issue.project_id").
+ In("project_issue.issue_id", issueIDs[:limit]).
+ Find(&projects)
+ if err != nil {
+ return err
+ }
+ for _, project := range projects {
+ projectMaps[project.IssueID] = project.Project
+ }
+ left -= limit
+ issueIDs = issueIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.Project = projectMaps[issue.ID]
+ }
+ return nil
+}
+
+func (issues IssueList) LoadAssignees(ctx context.Context) error {
+ if len(issues) == 0 {
+ return nil
+ }
+
+ type AssigneeIssue struct {
+ IssueAssignee *IssueAssignees `xorm:"extends"`
+ Assignee *user_model.User `xorm:"extends"`
+ }
+
+ assignees := make(map[int64][]*user_model.User, len(issues))
+ issueIDs := issues.getIssueIDs()
+ left := len(issueIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).Table("issue_assignees").
+ Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id").
+ In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()).
+ Rows(new(AssigneeIssue))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var assigneeIssue AssigneeIssue
+ err = rows.Scan(&assigneeIssue)
+ if err != nil {
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadAssignees: Close: %w", err1)
+ }
+ return err
+ }
+
+ assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee)
+ }
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadAssignees: Close: %w", err1)
+ }
+ left -= limit
+ issueIDs = issueIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.Assignees = assignees[issue.ID]
+ if len(issue.Assignees) > 0 {
+ issue.Assignee = issue.Assignees[0]
+ }
+ issue.isAssigneeLoaded = true
+ }
+ return nil
+}
+
+func (issues IssueList) getPullIssueIDs() []int64 {
+ ids := make([]int64, 0, len(issues))
+ for _, issue := range issues {
+ if issue.IsPull && issue.PullRequest == nil {
+ ids = append(ids, issue.ID)
+ }
+ }
+ return ids
+}
+
+// LoadPullRequests loads pull requests
+func (issues IssueList) LoadPullRequests(ctx context.Context) error {
+ issuesIDs := issues.getPullIssueIDs()
+ if len(issuesIDs) == 0 {
+ return nil
+ }
+
+ pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
+ left := len(issuesIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("issue_id", issuesIDs[:limit]).
+ Rows(new(PullRequest))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var pr PullRequest
+ err = rows.Scan(&pr)
+ if err != nil {
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadPullRequests: Close: %w", err1)
+ }
+ return err
+ }
+ pullRequestMaps[pr.IssueID] = &pr
+ }
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadPullRequests: Close: %w", err1)
+ }
+ left -= limit
+ issuesIDs = issuesIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.PullRequest = pullRequestMaps[issue.ID]
+ if issue.PullRequest != nil {
+ issue.PullRequest.Issue = issue
+ }
+ }
+ return nil
+}
+
+// LoadAttachments loads attachments
+func (issues IssueList) LoadAttachments(ctx context.Context) (err error) {
+ if len(issues) == 0 {
+ return nil
+ }
+
+ attachments := make(map[int64][]*repo_model.Attachment, len(issues))
+ issuesIDs := issues.getIssueIDs()
+ left := len(issuesIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).
+ In("issue_id", issuesIDs[:limit]).
+ Rows(new(repo_model.Attachment))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var attachment repo_model.Attachment
+ err = rows.Scan(&attachment)
+ if err != nil {
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadAttachments: Close: %w", err1)
+ }
+ return err
+ }
+ attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment)
+ }
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadAttachments: Close: %w", err1)
+ }
+ left -= limit
+ issuesIDs = issuesIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.Attachments = attachments[issue.ID]
+ issue.isAttachmentsLoaded = true
+ }
+ return nil
+}
+
+func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (err error) {
+ if len(issues) == 0 {
+ return nil
+ }
+
+ comments := make(map[int64][]*Comment, len(issues))
+ issuesIDs := issues.getIssueIDs()
+ left := len(issuesIDs)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+ rows, err := db.GetEngine(ctx).Table("comment").
+ Join("INNER", "issue", "issue.id = comment.issue_id").
+ In("issue.id", issuesIDs[:limit]).
+ Where(cond).
+ Rows(new(Comment))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var comment Comment
+ err = rows.Scan(&comment)
+ if err != nil {
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadComments: Close: %w", err1)
+ }
+ return err
+ }
+ comments[comment.IssueID] = append(comments[comment.IssueID], &comment)
+ }
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadComments: Close: %w", err1)
+ }
+ left -= limit
+ issuesIDs = issuesIDs[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.Comments = comments[issue.ID]
+ }
+ return nil
+}
+
+func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) {
+ type totalTimesByIssue struct {
+ IssueID int64
+ Time int64
+ }
+ if len(issues) == 0 {
+ return nil
+ }
+ trackedTimes := make(map[int64]int64, len(issues))
+
+ reposMap := make(map[int64]*repo_model.Repository, len(issues))
+ for _, issue := range issues {
+ reposMap[issue.RepoID] = issue.Repo
+ }
+ repos := repo_model.RepositoryListOfMap(reposMap)
+
+ if err := repos.LoadUnits(ctx); err != nil {
+ return err
+ }
+
+ ids := make([]int64, 0, len(issues))
+ for _, issue := range issues {
+ if issue.Repo.IsTimetrackerEnabled(ctx) {
+ ids = append(ids, issue.ID)
+ }
+ }
+
+ left := len(ids)
+ for left > 0 {
+ limit := db.DefaultMaxInSize
+ if left < limit {
+ limit = left
+ }
+
+ // select issue_id, sum(time) from tracked_time where issue_id in (<issue ids in current page>) group by issue_id
+ rows, err := db.GetEngine(ctx).Table("tracked_time").
+ Where("deleted = ?", false).
+ Select("issue_id, sum(time) as time").
+ In("issue_id", ids[:limit]).
+ GroupBy("issue_id").
+ Rows(new(totalTimesByIssue))
+ if err != nil {
+ return err
+ }
+
+ for rows.Next() {
+ var totalTime totalTimesByIssue
+ err = rows.Scan(&totalTime)
+ if err != nil {
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadTotalTrackedTimes: Close: %w", err1)
+ }
+ return err
+ }
+ trackedTimes[totalTime.IssueID] = totalTime.Time
+ }
+ if err1 := rows.Close(); err1 != nil {
+ return fmt.Errorf("IssueList.loadTotalTrackedTimes: Close: %w", err1)
+ }
+ left -= limit
+ ids = ids[limit:]
+ }
+
+ for _, issue := range issues {
+ issue.TotalTrackedTime = trackedTimes[issue.ID]
+ }
+ return nil
+}
+
+// loadAttributes loads all attributes, expect for attachments and comments
+func (issues IssueList) LoadAttributes(ctx context.Context) error {
+ if _, err := issues.LoadRepositories(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: LoadRepositories: %w", err)
+ }
+
+ if err := issues.LoadPosters(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: LoadPosters: %w", err)
+ }
+
+ if err := issues.LoadLabels(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: LoadLabels: %w", err)
+ }
+
+ if err := issues.LoadMilestones(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: LoadMilestones: %w", err)
+ }
+
+ if err := issues.LoadProjects(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: loadProjects: %w", err)
+ }
+
+ if err := issues.LoadAssignees(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: loadAssignees: %w", err)
+ }
+
+ if err := issues.LoadPullRequests(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: loadPullRequests: %w", err)
+ }
+
+ if err := issues.loadTotalTrackedTimes(ctx); err != nil {
+ return fmt.Errorf("issue.loadAttributes: loadTotalTrackedTimes: %w", err)
+ }
+
+ return nil
+}
+
+// LoadComments loads comments
+func (issues IssueList) LoadComments(ctx context.Context) error {
+ return issues.loadComments(ctx, builder.NewCond())
+}
+
+// LoadDiscussComments loads discuss comments
+func (issues IssueList) LoadDiscussComments(ctx context.Context) error {
+ return issues.loadComments(ctx, builder.Eq{"comment.type": CommentTypeComment})
+}
+
+// GetApprovalCounts returns a map of issue ID to slice of approval counts
+// FIXME: only returns official counts due to double counting of non-official approvals
+func (issues IssueList) GetApprovalCounts(ctx context.Context) (map[int64][]*ReviewCount, error) {
+ rCounts := make([]*ReviewCount, 0, 2*len(issues))
+ ids := make([]int64, len(issues))
+ for i, issue := range issues {
+ ids[i] = issue.ID
+ }
+ sess := db.GetEngine(ctx).In("issue_id", ids)
+ err := sess.Select("issue_id, type, count(id) as `count`").
+ Where("official = ? AND dismissed = ?", true, false).
+ GroupBy("issue_id, type").
+ OrderBy("issue_id").
+ Table("review").
+ Find(&rCounts)
+ if err != nil {
+ return nil, err
+ }
+
+ approvalCountMap := make(map[int64][]*ReviewCount, len(issues))
+
+ for _, c := range rCounts {
+ approvalCountMap[c.IssueID] = append(approvalCountMap[c.IssueID], c)
+ }
+
+ return approvalCountMap, nil
+}
+
+func (issues IssueList) LoadIsRead(ctx context.Context, userID int64) error {
+ issueIDs := issues.getIssueIDs()
+ issueUsers := make([]*IssueUser, 0, len(issueIDs))
+ if err := db.GetEngine(ctx).Where("uid =?", userID).
+ In("issue_id", issueIDs).
+ Find(&issueUsers); err != nil {
+ return err
+ }
+
+ for _, issueUser := range issueUsers {
+ for _, issue := range issues {
+ if issue.ID == issueUser.IssueID {
+ issue.IsRead = issueUser.IsRead
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/models/issues/issue_list_test.go b/models/issues/issue_list_test.go
new file mode 100644
index 0000000..32cc0fe
--- /dev/null
+++ b/models/issues/issue_list_test.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIssueList_LoadRepositories(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issueList := issues_model.IssueList{
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1}),
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2}),
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 4}),
+ }
+
+ repos, err := issueList.LoadRepositories(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, repos, 2)
+ for _, issue := range issueList {
+ assert.EqualValues(t, issue.RepoID, issue.Repo.ID)
+ }
+}
+
+func TestIssueList_LoadAttributes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ setting.Service.EnableTimetracking = true
+ issueList := issues_model.IssueList{
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1}),
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 4}),
+ }
+
+ require.NoError(t, issueList.LoadAttributes(db.DefaultContext))
+ for _, issue := range issueList {
+ assert.EqualValues(t, issue.RepoID, issue.Repo.ID)
+ for _, label := range issue.Labels {
+ assert.EqualValues(t, issue.RepoID, label.RepoID)
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: label.ID})
+ }
+ if issue.PosterID > 0 {
+ assert.EqualValues(t, issue.PosterID, issue.Poster.ID)
+ }
+ if issue.AssigneeID > 0 {
+ assert.EqualValues(t, issue.AssigneeID, issue.Assignee.ID)
+ }
+ if issue.MilestoneID > 0 {
+ assert.EqualValues(t, issue.MilestoneID, issue.Milestone.ID)
+ }
+ if issue.IsPull {
+ assert.EqualValues(t, issue.ID, issue.PullRequest.IssueID)
+ }
+ for _, attachment := range issue.Attachments {
+ assert.EqualValues(t, issue.ID, attachment.IssueID)
+ }
+ for _, comment := range issue.Comments {
+ assert.EqualValues(t, issue.ID, comment.IssueID)
+ }
+ if issue.ID == int64(1) {
+ assert.Equal(t, int64(400), issue.TotalTrackedTime)
+ assert.NotNil(t, issue.Project)
+ assert.Equal(t, int64(1), issue.Project.ID)
+ } else {
+ assert.Nil(t, issue.Project)
+ }
+ }
+
+ require.NoError(t, issueList.LoadIsRead(db.DefaultContext, 1))
+ for _, issue := range issueList {
+ assert.Equal(t, issue.ID == 1, issue.IsRead, "unexpected is_read value for issue[%d]", issue.ID)
+ }
+}
+
+func TestIssueListLoadUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ for _, testCase := range []struct {
+ poster int64
+ user *user_model.User
+ }{
+ {
+ poster: user_model.ActionsUserID,
+ user: user_model.NewActionsUser(),
+ },
+ {
+ poster: user_model.GhostUserID,
+ user: user_model.NewGhostUser(),
+ },
+ {
+ poster: doer.ID,
+ user: doer,
+ },
+ {
+ poster: 0,
+ user: user_model.NewGhostUser(),
+ },
+ {
+ poster: -200,
+ user: user_model.NewGhostUser(),
+ },
+ {
+ poster: 200,
+ user: user_model.NewGhostUser(),
+ },
+ } {
+ t.Run(testCase.user.Name, func(t *testing.T) {
+ list := issues_model.IssueList{issue}
+
+ issue.PosterID = testCase.poster
+ issue.Poster = nil
+ require.NoError(t, list.LoadPosters(db.DefaultContext))
+ require.NotNil(t, issue.Poster)
+ assert.Equal(t, testCase.user.ID, issue.Poster.ID)
+ })
+ }
+}
diff --git a/models/issues/issue_lock.go b/models/issues/issue_lock.go
new file mode 100644
index 0000000..b21629b
--- /dev/null
+++ b/models/issues/issue_lock.go
@@ -0,0 +1,66 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+)
+
+// IssueLockOptions defines options for locking and/or unlocking an issue/PR
+type IssueLockOptions struct {
+ Doer *user_model.User
+ Issue *Issue
+ Reason string
+}
+
+// LockIssue locks an issue. This would limit commenting abilities to
+// users with write access to the repo
+func LockIssue(ctx context.Context, opts *IssueLockOptions) error {
+ return updateIssueLock(ctx, opts, true)
+}
+
+// UnlockIssue unlocks a previously locked issue.
+func UnlockIssue(ctx context.Context, opts *IssueLockOptions) error {
+ return updateIssueLock(ctx, opts, false)
+}
+
+func updateIssueLock(ctx context.Context, opts *IssueLockOptions, lock bool) error {
+ if opts.Issue.IsLocked == lock {
+ return nil
+ }
+
+ opts.Issue.IsLocked = lock
+ var commentType CommentType
+ if opts.Issue.IsLocked {
+ commentType = CommentTypeLock
+ } else {
+ commentType = CommentTypeUnlock
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := UpdateIssueCols(ctx, opts.Issue, "is_locked"); err != nil {
+ return err
+ }
+
+ opt := &CreateCommentOptions{
+ Doer: opts.Doer,
+ Issue: opts.Issue,
+ Repo: opts.Issue.Repo,
+ Type: commentType,
+ Content: opts.Reason,
+ }
+ if _, err := CreateComment(ctx, opt); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
diff --git a/models/issues/issue_project.go b/models/issues/issue_project.go
new file mode 100644
index 0000000..835ea1d
--- /dev/null
+++ b/models/issues/issue_project.go
@@ -0,0 +1,162 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ project_model "code.gitea.io/gitea/models/project"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// LoadProject load the project the issue was assigned to
+func (issue *Issue) LoadProject(ctx context.Context) (err error) {
+ if issue.Project == nil {
+ var p project_model.Project
+ has, err := db.GetEngine(ctx).Table("project").
+ Join("INNER", "project_issue", "project.id=project_issue.project_id").
+ Where("project_issue.issue_id = ?", issue.ID).Get(&p)
+ if err != nil {
+ return err
+ } else if has {
+ issue.Project = &p
+ }
+ }
+ return err
+}
+
+func (issue *Issue) projectID(ctx context.Context) int64 {
+ var ip project_model.ProjectIssue
+ has, err := db.GetEngine(ctx).Where("issue_id=?", issue.ID).Get(&ip)
+ if err != nil || !has {
+ return 0
+ }
+ return ip.ProjectID
+}
+
+// ProjectColumnID return project column id if issue was assigned to one
+func (issue *Issue) ProjectColumnID(ctx context.Context) int64 {
+ var ip project_model.ProjectIssue
+ has, err := db.GetEngine(ctx).Where("issue_id=?", issue.ID).Get(&ip)
+ if err != nil || !has {
+ return 0
+ }
+ return ip.ProjectColumnID
+}
+
+// LoadIssuesFromColumn load issues assigned to this column
+func LoadIssuesFromColumn(ctx context.Context, b *project_model.Column) (IssueList, error) {
+ issueList, err := Issues(ctx, &IssuesOptions{
+ ProjectColumnID: b.ID,
+ ProjectID: b.ProjectID,
+ SortType: "project-column-sorting",
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if b.Default {
+ issues, err := Issues(ctx, &IssuesOptions{
+ ProjectColumnID: db.NoConditionID,
+ ProjectID: b.ProjectID,
+ SortType: "project-column-sorting",
+ })
+ if err != nil {
+ return nil, err
+ }
+ issueList = append(issueList, issues...)
+ }
+
+ if err := issueList.LoadComments(ctx); err != nil {
+ return nil, err
+ }
+
+ return issueList, nil
+}
+
+// LoadIssuesFromColumnList load issues assigned to the columns
+func LoadIssuesFromColumnList(ctx context.Context, bs project_model.ColumnList) (map[int64]IssueList, error) {
+ issuesMap := make(map[int64]IssueList, len(bs))
+ for i := range bs {
+ il, err := LoadIssuesFromColumn(ctx, bs[i])
+ if err != nil {
+ return nil, err
+ }
+ issuesMap[bs[i].ID] = il
+ }
+ return issuesMap, nil
+}
+
+// IssueAssignOrRemoveProject changes the project associated with an issue
+// If newProjectID is 0, the issue is removed from the project
+func IssueAssignOrRemoveProject(ctx context.Context, issue *Issue, doer *user_model.User, newProjectID, newColumnID int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ oldProjectID := issue.projectID(ctx)
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ // Only check if we add a new project and not remove it.
+ if newProjectID > 0 {
+ newProject, err := project_model.GetProjectByID(ctx, newProjectID)
+ if err != nil {
+ return err
+ }
+ if !newProject.CanBeAccessedByOwnerRepo(issue.Repo.OwnerID, issue.Repo) {
+ return util.NewPermissionDeniedErrorf("issue %d can't be accessed by project %d", issue.ID, newProject.ID)
+ }
+ if newColumnID == 0 {
+ newDefaultColumn, err := newProject.GetDefaultColumn(ctx)
+ if err != nil {
+ return err
+ }
+ newColumnID = newDefaultColumn.ID
+ }
+ }
+
+ if _, err := db.GetEngine(ctx).Where("project_issue.issue_id=?", issue.ID).Delete(&project_model.ProjectIssue{}); err != nil {
+ return err
+ }
+
+ if oldProjectID > 0 || newProjectID > 0 {
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Type: CommentTypeProject,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ OldProjectID: oldProjectID,
+ ProjectID: newProjectID,
+ }); err != nil {
+ return err
+ }
+ }
+ if newProjectID == 0 {
+ return nil
+ }
+ if newColumnID == 0 {
+ panic("newColumnID must not be zero") // shouldn't happen
+ }
+
+ res := struct {
+ MaxSorting int64
+ IssueCount int64
+ }{}
+ if _, err := db.GetEngine(ctx).Select("max(sorting) as max_sorting, count(*) as issue_count").Table("project_issue").
+ Where("project_id=?", newProjectID).
+ And("project_board_id=?", newColumnID).
+ Get(&res); err != nil {
+ return err
+ }
+ newSorting := util.Iif(res.IssueCount > 0, res.MaxSorting+1, 0)
+ return db.Insert(ctx, &project_model.ProjectIssue{
+ IssueID: issue.ID,
+ ProjectID: newProjectID,
+ ProjectColumnID: newColumnID,
+ Sorting: newSorting,
+ })
+ })
+}
diff --git a/models/issues/issue_search.go b/models/issues/issue_search.go
new file mode 100644
index 0000000..e9f116b
--- /dev/null
+++ b/models/issues/issue_search.go
@@ -0,0 +1,489 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// IssuesOptions represents options of an issue.
+type IssuesOptions struct { //nolint
+ Paginator *db.ListOptions
+ RepoIDs []int64 // overwrites RepoCond if the length is not 0
+ AllPublic bool // include also all public repositories
+ RepoCond builder.Cond
+ AssigneeID int64
+ PosterID int64
+ MentionedID int64
+ ReviewRequestedID int64
+ ReviewedID int64
+ SubscriberID int64
+ MilestoneIDs []int64
+ ProjectID int64
+ ProjectColumnID int64
+ IsClosed optional.Option[bool]
+ IsPull optional.Option[bool]
+ LabelIDs []int64
+ IncludedLabelNames []string
+ ExcludedLabelNames []string
+ IncludeMilestones []string
+ SortType string
+ IssueIDs []int64
+ UpdatedAfterUnix int64
+ UpdatedBeforeUnix int64
+ // prioritize issues from this repo
+ PriorityRepoID int64
+ IsArchived optional.Option[bool]
+ Org *organization.Organization // issues permission scope
+ Team *organization.Team // issues permission scope
+ User *user_model.User // issues permission scope
+}
+
+// applySorts sort an issues-related session based on the provided
+// sortType string
+func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) {
+ switch sortType {
+ case "oldest":
+ sess.Asc("issue.created_unix").Asc("issue.id")
+ case "recentupdate":
+ sess.Desc("issue.updated_unix").Desc("issue.created_unix").Desc("issue.id")
+ case "leastupdate":
+ sess.Asc("issue.updated_unix").Asc("issue.created_unix").Asc("issue.id")
+ case "mostcomment":
+ sess.Desc("issue.num_comments").Desc("issue.created_unix").Desc("issue.id")
+ case "leastcomment":
+ sess.Asc("issue.num_comments").Desc("issue.created_unix").Desc("issue.id")
+ case "priority":
+ sess.Desc("issue.priority").Desc("issue.created_unix").Desc("issue.id")
+ case "nearduedate":
+ // 253370764800 is 01/01/9999 @ 12:00am (UTC)
+ sess.Join("LEFT", "milestone", "issue.milestone_id = milestone.id").
+ OrderBy("CASE " +
+ "WHEN issue.deadline_unix = 0 AND (milestone.deadline_unix = 0 OR milestone.deadline_unix IS NULL) THEN 253370764800 " +
+ "WHEN milestone.deadline_unix = 0 OR milestone.deadline_unix IS NULL THEN issue.deadline_unix " +
+ "WHEN milestone.deadline_unix < issue.deadline_unix OR issue.deadline_unix = 0 THEN milestone.deadline_unix " +
+ "ELSE issue.deadline_unix END ASC").
+ Desc("issue.created_unix").
+ Desc("issue.id")
+ case "farduedate":
+ sess.Join("LEFT", "milestone", "issue.milestone_id = milestone.id").
+ OrderBy("CASE " +
+ "WHEN milestone.deadline_unix IS NULL THEN issue.deadline_unix " +
+ "WHEN milestone.deadline_unix < issue.deadline_unix OR issue.deadline_unix = 0 THEN milestone.deadline_unix " +
+ "ELSE issue.deadline_unix END DESC").
+ Desc("issue.created_unix").
+ Desc("issue.id")
+ case "priorityrepo":
+ sess.OrderBy("CASE "+
+ "WHEN issue.repo_id = ? THEN 1 "+
+ "ELSE 2 END ASC", priorityRepoID).
+ Desc("issue.created_unix").
+ Desc("issue.id")
+ case "project-column-sorting":
+ sess.Asc("project_issue.sorting").Desc("issue.created_unix").Desc("issue.id")
+ default:
+ sess.Desc("issue.created_unix").Desc("issue.id")
+ }
+}
+
+func applyLimit(sess *xorm.Session, opts *IssuesOptions) {
+ if opts.Paginator == nil || opts.Paginator.IsListAll() {
+ return
+ }
+
+ start := 0
+ if opts.Paginator.Page > 1 {
+ start = (opts.Paginator.Page - 1) * opts.Paginator.PageSize
+ }
+ sess.Limit(opts.Paginator.PageSize, start)
+}
+
+func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) {
+ if len(opts.LabelIDs) > 0 {
+ if opts.LabelIDs[0] == 0 {
+ sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_label)")
+ } else {
+ // deduplicate the label IDs for inclusion and exclusion
+ includedLabelIDs := make(container.Set[int64])
+ excludedLabelIDs := make(container.Set[int64])
+ for _, labelID := range opts.LabelIDs {
+ if labelID > 0 {
+ includedLabelIDs.Add(labelID)
+ } else if labelID < 0 { // 0 is not supported here, so just ignore it
+ excludedLabelIDs.Add(-labelID)
+ }
+ }
+ // ... and use them in a subquery of the form :
+ // where (select count(*) from issue_label where issue_id=issue.id and label_id in (2, 4, 6)) = 3
+ // This equality is guaranteed thanks to unique index (issue_id,label_id) on table issue_label.
+ if len(includedLabelIDs) > 0 {
+ subQuery := builder.Select("count(*)").From("issue_label").Where(builder.Expr("issue_id = issue.id")).
+ And(builder.In("label_id", includedLabelIDs.Values()))
+ sess.Where(builder.Eq{strconv.Itoa(len(includedLabelIDs)): subQuery})
+ }
+ // or (select count(*)...) = 0 for excluded labels
+ if len(excludedLabelIDs) > 0 {
+ subQuery := builder.Select("count(*)").From("issue_label").Where(builder.Expr("issue_id = issue.id")).
+ And(builder.In("label_id", excludedLabelIDs.Values()))
+ sess.Where(builder.Eq{"0": subQuery})
+ }
+ }
+ }
+
+ if len(opts.IncludedLabelNames) > 0 {
+ sess.In("issue.id", BuildLabelNamesIssueIDsCondition(opts.IncludedLabelNames))
+ }
+
+ if len(opts.ExcludedLabelNames) > 0 {
+ sess.And(builder.NotIn("issue.id", BuildLabelNamesIssueIDsCondition(opts.ExcludedLabelNames)))
+ }
+}
+
+func applyMilestoneCondition(sess *xorm.Session, opts *IssuesOptions) {
+ if len(opts.MilestoneIDs) == 1 && opts.MilestoneIDs[0] == db.NoConditionID {
+ sess.And("issue.milestone_id = 0")
+ } else if len(opts.MilestoneIDs) > 0 {
+ sess.In("issue.milestone_id", opts.MilestoneIDs)
+ }
+
+ if len(opts.IncludeMilestones) > 0 {
+ sess.In("issue.milestone_id",
+ builder.Select("id").
+ From("milestone").
+ Where(builder.In("name", opts.IncludeMilestones)))
+ }
+}
+
+func applyProjectCondition(sess *xorm.Session, opts *IssuesOptions) {
+ if opts.ProjectID > 0 { // specific project
+ sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id").
+ And("project_issue.project_id=?", opts.ProjectID)
+ } else if opts.ProjectID == db.NoConditionID { // show those that are in no project
+ sess.And(builder.NotIn("issue.id", builder.Select("issue_id").From("project_issue").And(builder.Neq{"project_id": 0})))
+ }
+ // opts.ProjectID == 0 means all projects,
+ // do not need to apply any condition
+}
+
+func applyProjectColumnCondition(sess *xorm.Session, opts *IssuesOptions) {
+ // opts.ProjectColumnID == 0 means all project columns,
+ // do not need to apply any condition
+ if opts.ProjectColumnID > 0 {
+ sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": opts.ProjectColumnID}))
+ } else if opts.ProjectColumnID == db.NoConditionID {
+ sess.In("issue.id", builder.Select("issue_id").From("project_issue").Where(builder.Eq{"project_board_id": 0}))
+ }
+}
+
+func applyRepoConditions(sess *xorm.Session, opts *IssuesOptions) {
+ if len(opts.RepoIDs) == 1 {
+ opts.RepoCond = builder.Eq{"issue.repo_id": opts.RepoIDs[0]}
+ } else if len(opts.RepoIDs) > 1 {
+ opts.RepoCond = builder.In("issue.repo_id", opts.RepoIDs)
+ }
+ if opts.AllPublic {
+ if opts.RepoCond == nil {
+ opts.RepoCond = builder.NewCond()
+ }
+ opts.RepoCond = opts.RepoCond.Or(builder.In("issue.repo_id", builder.Select("id").From("repository").Where(builder.Eq{"is_private": false})))
+ }
+ if opts.RepoCond != nil {
+ sess.And(opts.RepoCond)
+ }
+}
+
+func applyConditions(sess *xorm.Session, opts *IssuesOptions) {
+ if len(opts.IssueIDs) > 0 {
+ sess.In("issue.id", opts.IssueIDs)
+ }
+
+ applyRepoConditions(sess, opts)
+
+ if opts.IsClosed.Has() {
+ sess.And("issue.is_closed=?", opts.IsClosed.Value())
+ }
+
+ if opts.AssigneeID > 0 {
+ applyAssigneeCondition(sess, opts.AssigneeID)
+ } else if opts.AssigneeID == db.NoConditionID {
+ sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_assignees)")
+ }
+
+ if opts.PosterID > 0 {
+ applyPosterCondition(sess, opts.PosterID)
+ }
+
+ if opts.MentionedID > 0 {
+ applyMentionedCondition(sess, opts.MentionedID)
+ }
+
+ if opts.ReviewRequestedID > 0 {
+ applyReviewRequestedCondition(sess, opts.ReviewRequestedID)
+ }
+
+ if opts.ReviewedID > 0 {
+ applyReviewedCondition(sess, opts.ReviewedID)
+ }
+
+ if opts.SubscriberID > 0 {
+ applySubscribedCondition(sess, opts.SubscriberID)
+ }
+
+ applyMilestoneCondition(sess, opts)
+
+ if opts.UpdatedAfterUnix != 0 {
+ sess.And(builder.Gte{"issue.updated_unix": opts.UpdatedAfterUnix})
+ }
+ if opts.UpdatedBeforeUnix != 0 {
+ sess.And(builder.Lte{"issue.updated_unix": opts.UpdatedBeforeUnix})
+ }
+
+ applyProjectCondition(sess, opts)
+
+ applyProjectColumnCondition(sess, opts)
+
+ if opts.IsPull.Has() {
+ sess.And("issue.is_pull=?", opts.IsPull.Value())
+ }
+
+ if opts.IsArchived.Has() {
+ sess.And(builder.Eq{"repository.is_archived": opts.IsArchived.Value()})
+ }
+
+ applyLabelsCondition(sess, opts)
+
+ if opts.User != nil {
+ sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.User.ID, opts.Org, opts.Team, opts.IsPull.Value()))
+ }
+}
+
+// teamUnitsRepoCond returns query condition for those repo id in the special org team with special units access
+func teamUnitsRepoCond(id string, userID, orgID, teamID int64, units ...unit.Type) builder.Cond {
+ return builder.In(id,
+ builder.Select("repo_id").From("team_repo").Where(
+ builder.Eq{
+ "team_id": teamID,
+ }.And(
+ builder.Or(
+ // Check if the user is member of the team.
+ builder.In(
+ "team_id", builder.Select("team_id").From("team_user").Where(
+ builder.Eq{
+ "uid": userID,
+ },
+ ),
+ ),
+ // Check if the user is in the owner team of the organisation.
+ builder.Exists(builder.Select("team_id").From("team_user").
+ Where(builder.Eq{
+ "org_id": orgID,
+ "team_id": builder.Select("id").From("team").Where(
+ builder.Eq{
+ "org_id": orgID,
+ "lower_name": strings.ToLower(organization.OwnerTeamName),
+ }),
+ "uid": userID,
+ }),
+ ),
+ )).And(
+ builder.In(
+ "team_id", builder.Select("team_id").From("team_unit").Where(
+ builder.Eq{
+ "`team_unit`.org_id": orgID,
+ }.And(
+ builder.In("`team_unit`.type", units),
+ ),
+ ),
+ ),
+ ),
+ ))
+}
+
+// issuePullAccessibleRepoCond userID must not be zero, this condition require join repository table
+func issuePullAccessibleRepoCond(repoIDstr string, userID int64, org *organization.Organization, team *organization.Team, isPull bool) builder.Cond {
+ cond := builder.NewCond()
+ unitType := unit.TypeIssues
+ if isPull {
+ unitType = unit.TypePullRequests
+ }
+ if org != nil {
+ if team != nil {
+ cond = cond.And(teamUnitsRepoCond(repoIDstr, userID, org.ID, team.ID, unitType)) // special team member repos
+ } else {
+ cond = cond.And(
+ builder.Or(
+ repo_model.UserOrgUnitRepoCond(repoIDstr, userID, org.ID, unitType), // team member repos
+ repo_model.UserOrgPublicUnitRepoCond(userID, org.ID), // user org public non-member repos, TODO: check repo has issues
+ ),
+ )
+ }
+ } else {
+ cond = cond.And(
+ builder.Or(
+ repo_model.UserOwnedRepoCond(userID), // owned repos
+ repo_model.UserAccessRepoCond(repoIDstr, userID), // user can access repo in a unit independent way
+ repo_model.UserAssignedRepoCond(repoIDstr, userID), // user has been assigned accessible public repos
+ repo_model.UserMentionedRepoCond(repoIDstr, userID), // user has been mentioned accessible public repos
+ repo_model.UserCreateIssueRepoCond(repoIDstr, userID, isPull), // user has created issue/pr accessible public repos
+ ),
+ )
+ }
+ return cond
+}
+
+func applyAssigneeCondition(sess *xorm.Session, assigneeID int64) {
+ sess.Join("INNER", "issue_assignees", "issue.id = issue_assignees.issue_id").
+ And("issue_assignees.assignee_id = ?", assigneeID)
+}
+
+func applyPosterCondition(sess *xorm.Session, posterID int64) {
+ sess.And("issue.poster_id=?", posterID)
+}
+
+func applyMentionedCondition(sess *xorm.Session, mentionedID int64) {
+ sess.Join("INNER", "issue_user", "issue.id = issue_user.issue_id").
+ And("issue_user.is_mentioned = ?", true).
+ And("issue_user.uid = ?", mentionedID)
+}
+
+func applyReviewRequestedCondition(sess *xorm.Session, reviewRequestedID int64) {
+ existInTeamQuery := builder.Select("team_user.team_id").
+ From("team_user").
+ Where(builder.Eq{"team_user.uid": reviewRequestedID})
+
+ // if the review is approved or rejected, it should not be shown in the review requested list
+ maxReview := builder.Select("MAX(r.id)").
+ From("review as r").
+ Where(builder.In("r.type", []ReviewType{ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest})).
+ GroupBy("r.issue_id, r.reviewer_id, r.reviewer_team_id")
+
+ subQuery := builder.Select("review.issue_id").
+ From("review").
+ Where(builder.And(
+ builder.Eq{"review.type": ReviewTypeRequest},
+ builder.Or(
+ builder.Eq{"review.reviewer_id": reviewRequestedID},
+ builder.In("review.reviewer_team_id", existInTeamQuery),
+ ),
+ builder.In("review.id", maxReview),
+ ))
+ sess.Where("issue.poster_id <> ?", reviewRequestedID).
+ And(builder.In("issue.id", subQuery))
+}
+
+func applyReviewedCondition(sess *xorm.Session, reviewedID int64) {
+ // Query for pull requests where you are a reviewer or commenter, excluding
+ // any pull requests already returned by the review requested filter.
+ notPoster := builder.Neq{"issue.poster_id": reviewedID}
+ reviewed := builder.In("issue.id", builder.
+ Select("issue_id").
+ From("review").
+ Where(builder.And(
+ builder.Neq{"type": ReviewTypeRequest},
+ builder.Or(
+ builder.Eq{"reviewer_id": reviewedID},
+ builder.In("reviewer_team_id", builder.
+ Select("team_id").
+ From("team_user").
+ Where(builder.Eq{"uid": reviewedID}),
+ ),
+ ),
+ )),
+ )
+ commented := builder.In("issue.id", builder.
+ Select("issue_id").
+ From("comment").
+ Where(builder.And(
+ builder.Eq{"poster_id": reviewedID},
+ builder.In("type", CommentTypeComment, CommentTypeCode, CommentTypeReview),
+ )),
+ )
+ sess.And(notPoster, builder.Or(reviewed, commented))
+}
+
+func applySubscribedCondition(sess *xorm.Session, subscriberID int64) {
+ sess.And(
+ builder.
+ NotIn("issue.id",
+ builder.Select("issue_id").
+ From("issue_watch").
+ Where(builder.Eq{"is_watching": false, "user_id": subscriberID}),
+ ),
+ ).And(
+ builder.Or(
+ builder.In("issue.id", builder.
+ Select("issue_id").
+ From("issue_watch").
+ Where(builder.Eq{"is_watching": true, "user_id": subscriberID}),
+ ),
+ builder.In("issue.id", builder.
+ Select("issue_id").
+ From("comment").
+ Where(builder.Eq{"poster_id": subscriberID}),
+ ),
+ builder.Eq{"issue.poster_id": subscriberID},
+ builder.In("issue.repo_id", builder.
+ Select("id").
+ From("watch").
+ Where(builder.And(builder.Eq{"user_id": subscriberID},
+ builder.In("mode", repo_model.WatchModeNormal, repo_model.WatchModeAuto))),
+ ),
+ ),
+ )
+}
+
+// Issues returns a list of issues by given conditions.
+func Issues(ctx context.Context, opts *IssuesOptions) (IssueList, error) {
+ sess := db.GetEngine(ctx).
+ Join("INNER", "repository", "`issue`.repo_id = `repository`.id")
+ applyLimit(sess, opts)
+ applyConditions(sess, opts)
+ applySorts(sess, opts.SortType, opts.PriorityRepoID)
+
+ issues := IssueList{}
+ if err := sess.Find(&issues); err != nil {
+ return nil, fmt.Errorf("unable to query Issues: %w", err)
+ }
+
+ if err := issues.LoadAttributes(ctx); err != nil {
+ return nil, fmt.Errorf("unable to LoadAttributes for Issues: %w", err)
+ }
+
+ return issues, nil
+}
+
+// IssueIDs returns a list of issue ids by given conditions.
+func IssueIDs(ctx context.Context, opts *IssuesOptions, otherConds ...builder.Cond) ([]int64, int64, error) {
+ sess := db.GetEngine(ctx).
+ Join("INNER", "repository", "`issue`.repo_id = `repository`.id")
+ applyConditions(sess, opts)
+ for _, cond := range otherConds {
+ sess.And(cond)
+ }
+
+ applyLimit(sess, opts)
+ applySorts(sess, opts.SortType, opts.PriorityRepoID)
+
+ var res []int64
+ total, err := sess.Select("`issue`.id").Table(&Issue{}).FindAndCount(&res)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return res, total, nil
+}
diff --git a/models/issues/issue_stats.go b/models/issues/issue_stats.go
new file mode 100644
index 0000000..dc634cf
--- /dev/null
+++ b/models/issues/issue_stats.go
@@ -0,0 +1,191 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// IssueStats represents issue statistic information.
+type IssueStats struct {
+ OpenCount, ClosedCount int64
+ YourRepositoriesCount int64
+ AssignCount int64
+ CreateCount int64
+ MentionCount int64
+ ReviewRequestedCount int64
+ ReviewedCount int64
+}
+
+// Filter modes.
+const (
+ FilterModeAll = iota
+ FilterModeAssign
+ FilterModeCreate
+ FilterModeMention
+ FilterModeReviewRequested
+ FilterModeReviewed
+ FilterModeYourRepositories
+)
+
+const (
+ // MaxQueryParameters represents the max query parameters
+ // When queries are broken down in parts because of the number
+ // of parameters, attempt to break by this amount
+ MaxQueryParameters = 300
+)
+
+// CountIssuesByRepo map from repoID to number of issues matching the options
+func CountIssuesByRepo(ctx context.Context, opts *IssuesOptions) (map[int64]int64, error) {
+ sess := db.GetEngine(ctx).
+ Join("INNER", "repository", "`issue`.repo_id = `repository`.id")
+
+ applyConditions(sess, opts)
+
+ countsSlice := make([]*struct {
+ RepoID int64
+ Count int64
+ }, 0, 10)
+ if err := sess.GroupBy("issue.repo_id").
+ Select("issue.repo_id AS repo_id, COUNT(*) AS count").
+ Table("issue").
+ Find(&countsSlice); err != nil {
+ return nil, fmt.Errorf("unable to CountIssuesByRepo: %w", err)
+ }
+
+ countMap := make(map[int64]int64, len(countsSlice))
+ for _, c := range countsSlice {
+ countMap[c.RepoID] = c.Count
+ }
+ return countMap, nil
+}
+
+// CountIssues number return of issues by given conditions.
+func CountIssues(ctx context.Context, opts *IssuesOptions, otherConds ...builder.Cond) (int64, error) {
+ sess := db.GetEngine(ctx).
+ Select("COUNT(issue.id) AS count").
+ Table("issue").
+ Join("INNER", "repository", "`issue`.repo_id = `repository`.id")
+ applyConditions(sess, opts)
+
+ for _, cond := range otherConds {
+ sess.And(cond)
+ }
+
+ return sess.Count()
+}
+
+// GetIssueStats returns issue statistic information by given conditions.
+func GetIssueStats(ctx context.Context, opts *IssuesOptions) (*IssueStats, error) {
+ if len(opts.IssueIDs) <= MaxQueryParameters {
+ return getIssueStatsChunk(ctx, opts, opts.IssueIDs)
+ }
+
+ // If too long a list of IDs is provided, we get the statistics in
+ // smaller chunks and get accumulates. Note: this could potentially
+ // get us invalid results. The alternative is to insert the list of
+ // ids in a temporary table and join from them.
+ accum := &IssueStats{}
+ for i := 0; i < len(opts.IssueIDs); {
+ chunk := i + MaxQueryParameters
+ if chunk > len(opts.IssueIDs) {
+ chunk = len(opts.IssueIDs)
+ }
+ stats, err := getIssueStatsChunk(ctx, opts, opts.IssueIDs[i:chunk])
+ if err != nil {
+ return nil, err
+ }
+ accum.OpenCount += stats.OpenCount
+ accum.ClosedCount += stats.ClosedCount
+ accum.YourRepositoriesCount += stats.YourRepositoriesCount
+ accum.AssignCount += stats.AssignCount
+ accum.CreateCount += stats.CreateCount
+ accum.MentionCount += stats.MentionCount
+ accum.ReviewRequestedCount += stats.ReviewRequestedCount
+ accum.ReviewedCount += stats.ReviewedCount
+ i = chunk
+ }
+ return accum, nil
+}
+
+func getIssueStatsChunk(ctx context.Context, opts *IssuesOptions, issueIDs []int64) (*IssueStats, error) {
+ stats := &IssueStats{}
+
+ sess := db.GetEngine(ctx).
+ Join("INNER", "repository", "`issue`.repo_id = `repository`.id")
+
+ var err error
+ stats.OpenCount, err = applyIssuesOptions(sess, opts, issueIDs).
+ And("issue.is_closed = ?", false).
+ Count(new(Issue))
+ if err != nil {
+ return stats, err
+ }
+ stats.ClosedCount, err = applyIssuesOptions(sess, opts, issueIDs).
+ And("issue.is_closed = ?", true).
+ Count(new(Issue))
+ return stats, err
+}
+
+func applyIssuesOptions(sess *xorm.Session, opts *IssuesOptions, issueIDs []int64) *xorm.Session {
+ if len(opts.RepoIDs) > 1 {
+ sess.In("issue.repo_id", opts.RepoIDs)
+ } else if len(opts.RepoIDs) == 1 {
+ sess.And("issue.repo_id = ?", opts.RepoIDs[0])
+ }
+
+ if len(issueIDs) > 0 {
+ sess.In("issue.id", issueIDs)
+ }
+
+ applyLabelsCondition(sess, opts)
+
+ applyMilestoneCondition(sess, opts)
+
+ applyProjectCondition(sess, opts)
+
+ if opts.AssigneeID > 0 {
+ applyAssigneeCondition(sess, opts.AssigneeID)
+ } else if opts.AssigneeID == db.NoConditionID {
+ sess.Where("issue.id NOT IN (SELECT issue_id FROM issue_assignees)")
+ }
+
+ if opts.PosterID > 0 {
+ applyPosterCondition(sess, opts.PosterID)
+ }
+
+ if opts.MentionedID > 0 {
+ applyMentionedCondition(sess, opts.MentionedID)
+ }
+
+ if opts.ReviewRequestedID > 0 {
+ applyReviewRequestedCondition(sess, opts.ReviewRequestedID)
+ }
+
+ if opts.ReviewedID > 0 {
+ applyReviewedCondition(sess, opts.ReviewedID)
+ }
+
+ if opts.IsPull.Has() {
+ sess.And("issue.is_pull=?", opts.IsPull.Value())
+ }
+
+ return sess
+}
+
+// CountOrphanedIssues count issues without a repo
+func CountOrphanedIssues(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).
+ Table("issue").
+ Join("LEFT", "repository", "issue.repo_id=repository.id").
+ Where(builder.IsNull{"repository.id"}).
+ Select("COUNT(`issue`.`id`)").
+ Count()
+}
diff --git a/models/issues/issue_stats_test.go b/models/issues/issue_stats_test.go
new file mode 100644
index 0000000..fda75a6
--- /dev/null
+++ b/models/issues/issue_stats_test.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetIssueStats(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ ids, err := issues_model.GetIssueIDsByRepoID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ stats, err := issues_model.GetIssueStats(db.DefaultContext, &issues_model.IssuesOptions{IssueIDs: ids})
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(4), stats.OpenCount)
+ assert.Equal(t, int64(1), stats.ClosedCount)
+ assert.Equal(t, int64(0), stats.YourRepositoriesCount)
+ assert.Equal(t, int64(0), stats.AssignCount)
+ assert.Equal(t, int64(0), stats.CreateCount)
+ assert.Equal(t, int64(0), stats.MentionCount)
+ assert.Equal(t, int64(0), stats.ReviewRequestedCount)
+ assert.Equal(t, int64(0), stats.ReviewedCount)
+}
diff --git a/models/issues/issue_test.go b/models/issues/issue_test.go
new file mode 100644
index 0000000..580be96
--- /dev/null
+++ b/models/issues/issue_test.go
@@ -0,0 +1,498 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/builder"
+)
+
+func TestIssue_ReplaceLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(issueID int64, labelIDs, expectedLabelIDs []int64) {
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: issueID})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+
+ labels := make([]*issues_model.Label, len(labelIDs))
+ for i, labelID := range labelIDs {
+ labels[i] = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: labelID, RepoID: repo.ID})
+ }
+ require.NoError(t, issues_model.ReplaceIssueLabels(db.DefaultContext, issue, labels, doer))
+ unittest.AssertCount(t, &issues_model.IssueLabel{IssueID: issueID}, len(expectedLabelIDs))
+ for _, labelID := range expectedLabelIDs {
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issueID, LabelID: labelID})
+ }
+ }
+
+ testSuccess(1, []int64{2}, []int64{2})
+ testSuccess(1, []int64{1, 2}, []int64{1, 2})
+ testSuccess(1, []int64{}, []int64{})
+
+ // mutually exclusive scoped labels 7 and 8
+ testSuccess(18, []int64{6, 7}, []int64{6, 7})
+ testSuccess(18, []int64{7, 8}, []int64{8})
+ testSuccess(18, []int64{6, 8, 7}, []int64{6, 7})
+}
+
+func Test_GetIssueIDsByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ ids, err := issues_model.GetIssueIDsByRepoID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Len(t, ids, 5)
+}
+
+func TestIssueAPIURL(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+ err := issue.LoadAttributes(db.DefaultContext)
+
+ require.NoError(t, err)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/issues/1", issue.APIURL(db.DefaultContext))
+}
+
+func TestGetIssuesByIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(expectedIssueIDs, nonExistentIssueIDs []int64) {
+ issues, err := issues_model.GetIssuesByIDs(db.DefaultContext, append(expectedIssueIDs, nonExistentIssueIDs...), true)
+ require.NoError(t, err)
+ actualIssueIDs := make([]int64, len(issues))
+ for i, issue := range issues {
+ actualIssueIDs[i] = issue.ID
+ }
+ assert.Equal(t, expectedIssueIDs, actualIssueIDs)
+ }
+ testSuccess([]int64{1, 2, 3}, []int64{})
+ testSuccess([]int64{1, 2, 3}, []int64{unittest.NonexistentID})
+ testSuccess([]int64{3, 2, 1}, []int64{})
+}
+
+func TestGetParticipantIDsByIssue(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ checkParticipants := func(issueID int64, userIDs []int) {
+ issue, err := issues_model.GetIssueByID(db.DefaultContext, issueID)
+ require.NoError(t, err)
+
+ participants, err := issue.GetParticipantIDsByIssue(db.DefaultContext)
+ require.NoError(t, err)
+
+ participantsIDs := make([]int, len(participants))
+ for i, uid := range participants {
+ participantsIDs[i] = int(uid)
+ }
+ sort.Ints(participantsIDs)
+ sort.Ints(userIDs)
+ assert.Equal(t, userIDs, participantsIDs)
+ }
+
+ // User 1 is issue1 poster (see fixtures/issue.yml)
+ // User 2 only labeled issue1 (see fixtures/comment.yml)
+ // Users 3 and 5 made actual comments (see fixtures/comment.yml)
+ // User 3 is inactive, thus not active participant
+ checkParticipants(1, []int{1, 5})
+}
+
+func TestIssue_ClearLabels(t *testing.T) {
+ tests := []struct {
+ issueID int64
+ doerID int64
+ }{
+ {1, 2}, // non-pull-request, has labels
+ {2, 2}, // pull-request, has labels
+ {3, 2}, // pull-request, has no labels
+ }
+ for _, test := range tests {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: test.issueID})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: test.doerID})
+ require.NoError(t, issues_model.ClearIssueLabels(db.DefaultContext, issue, doer))
+ unittest.AssertNotExistsBean(t, &issues_model.IssueLabel{IssueID: test.issueID})
+ }
+}
+
+func TestUpdateIssueCols(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{})
+
+ const newTitle = "New Title for unit test"
+ issue.Title = newTitle
+
+ prevContent := issue.Content
+ issue.Content = "This should have no effect"
+
+ now := time.Now().Unix()
+ require.NoError(t, issues_model.UpdateIssueCols(db.DefaultContext, issue, "name"))
+ then := time.Now().Unix()
+
+ updatedIssue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: issue.ID})
+ assert.EqualValues(t, newTitle, updatedIssue.Title)
+ assert.EqualValues(t, prevContent, updatedIssue.Content)
+ unittest.AssertInt64InRange(t, now, then, int64(updatedIssue.UpdatedUnix))
+}
+
+func TestIssues(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ for _, test := range []struct {
+ Opts issues_model.IssuesOptions
+ ExpectedIssueIDs []int64
+ }{
+ {
+ issues_model.IssuesOptions{
+ AssigneeID: 1,
+ SortType: "oldest",
+ },
+ []int64{1, 6},
+ },
+ {
+ issues_model.IssuesOptions{
+ RepoCond: builder.In("repo_id", 1, 3),
+ SortType: "oldest",
+ Paginator: &db.ListOptions{
+ Page: 1,
+ PageSize: 4,
+ },
+ },
+ []int64{1, 2, 3, 5},
+ },
+ {
+ issues_model.IssuesOptions{
+ LabelIDs: []int64{1},
+ Paginator: &db.ListOptions{
+ Page: 1,
+ PageSize: 4,
+ },
+ },
+ []int64{2, 1},
+ },
+ {
+ issues_model.IssuesOptions{
+ LabelIDs: []int64{1, 2},
+ Paginator: &db.ListOptions{
+ Page: 1,
+ PageSize: 4,
+ },
+ },
+ []int64{}, // issues with **both** label 1 and 2, none of these issues matches, TODO: add more tests
+ },
+ {
+ issues_model.IssuesOptions{
+ LabelIDs: []int64{-1, 2},
+ },
+ []int64{5}, // issue without label 1 but with label 2.
+ },
+ {
+ issues_model.IssuesOptions{
+ RepoCond: builder.In("repo_id", 1),
+ LabelIDs: []int64{0},
+ },
+ []int64{11, 3}, // issues without any label (ordered by creation date desc.)(note: 11 is a pull request)
+ },
+ {
+ issues_model.IssuesOptions{
+ MilestoneIDs: []int64{1},
+ },
+ []int64{2},
+ },
+ } {
+ issues, err := issues_model.Issues(db.DefaultContext, &test.Opts)
+ require.NoError(t, err)
+ if assert.Len(t, issues, len(test.ExpectedIssueIDs)) {
+ for i, issue := range issues {
+ assert.EqualValues(t, test.ExpectedIssueIDs[i], issue.ID)
+ }
+ }
+ }
+}
+
+func TestIssue_loadTotalTimes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ ms, err := issues_model.GetIssueByID(db.DefaultContext, 2)
+ require.NoError(t, err)
+ require.NoError(t, ms.LoadTotalTimes(db.DefaultContext))
+ assert.Equal(t, int64(3682), ms.TotalTrackedTime)
+}
+
+func testInsertIssue(t *testing.T, title, content string, expectIndex int64) *issues_model.Issue {
+ var newIssue issues_model.Issue
+ t.Run(title, func(t *testing.T) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ issue := issues_model.Issue{
+ RepoID: repo.ID,
+ PosterID: user.ID,
+ Poster: user,
+ Title: title,
+ Content: content,
+ }
+ err := issues_model.NewIssue(db.DefaultContext, repo, &issue, nil, nil)
+ require.NoError(t, err)
+
+ has, err := db.GetEngine(db.DefaultContext).ID(issue.ID).Get(&newIssue)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, issue.Title, newIssue.Title)
+ assert.EqualValues(t, issue.Content, newIssue.Content)
+ if expectIndex > 0 {
+ assert.EqualValues(t, expectIndex, newIssue.Index)
+ }
+ })
+ return &newIssue
+}
+
+func TestIssue_InsertIssue(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // there are 5 issues and max index is 5 on repository 1, so this one should 6
+ issue := testInsertIssue(t, "my issue1", "special issue's comments?", 6)
+ _, err := db.DeleteByID[issues_model.Issue](db.DefaultContext, issue.ID)
+ require.NoError(t, err)
+
+ issue = testInsertIssue(t, `my issue2, this is my son's love \n \r \ `, "special issue's '' comments?", 7)
+ _, err = db.DeleteByID[issues_model.Issue](db.DefaultContext, issue.ID)
+ require.NoError(t, err)
+}
+
+func TestIssue_ResolveMentions(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(owner, repo, doer string, mentions []string, expected []int64) {
+ o := unittest.AssertExistsAndLoadBean(t, &user_model.User{LowerName: owner})
+ r := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{OwnerID: o.ID, LowerName: repo})
+ issue := &issues_model.Issue{RepoID: r.ID}
+ d := unittest.AssertExistsAndLoadBean(t, &user_model.User{LowerName: doer})
+ resolved, err := issues_model.ResolveIssueMentionsByVisibility(db.DefaultContext, issue, d, mentions)
+ require.NoError(t, err)
+ ids := make([]int64, len(resolved))
+ for i, user := range resolved {
+ ids[i] = user.ID
+ }
+ sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
+ assert.EqualValues(t, expected, ids)
+ }
+
+ // Public repo, existing user
+ testSuccess("user2", "repo1", "user1", []string{"user5"}, []int64{5})
+ // Public repo, non-existing user
+ testSuccess("user2", "repo1", "user1", []string{"nonexisting"}, []int64{})
+ // Public repo, doer
+ testSuccess("user2", "repo1", "user1", []string{"user1"}, []int64{})
+ // Public repo, blocked user
+ testSuccess("user2", "repo1", "user1", []string{"user4"}, []int64{})
+ // Private repo, team member
+ testSuccess("org17", "big_test_private_4", "user20", []string{"user2"}, []int64{2})
+ // Private repo, not a team member
+ testSuccess("org17", "big_test_private_4", "user20", []string{"user5"}, []int64{})
+ // Private repo, whole team
+ testSuccess("org17", "big_test_private_4", "user15", []string{"org17/owners"}, []int64{18})
+}
+
+func TestResourceIndex(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ beforeCount, err := issues_model.CountIssues(context.Background(), &issues_model.IssuesOptions{})
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ t.Run(fmt.Sprintf("issue %d", i+1), func(t *testing.T) {
+ t.Parallel()
+ testInsertIssue(t, fmt.Sprintf("issue %d", i+1), "my issue", 0)
+ wg.Done()
+ })
+ }
+
+ t.Run("Check the count", func(t *testing.T) {
+ t.Parallel()
+
+ wg.Wait()
+ afterCount, err := issues_model.CountIssues(context.Background(), &issues_model.IssuesOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, 100, afterCount-beforeCount)
+ })
+}
+
+func TestCorrectIssueStats(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Because the condition is to have chunked database look-ups,
+ // We have to more issues than `maxQueryParameters`, we will insert.
+ // maxQueryParameters + 10 issues into the testDatabase.
+ // Each new issues will have a constant description "Bugs are nasty"
+ // Which will be used later on.
+
+ issueAmount := issues_model.MaxQueryParameters + 10
+
+ var wg sync.WaitGroup
+ for i := 0; i < issueAmount; i++ {
+ wg.Add(1)
+ go func(i int) {
+ testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+
+ // Now we will get all issueID's that match the "Bugs are nasty" query.
+ issues, err := issues_model.Issues(context.TODO(), &issues_model.IssuesOptions{
+ Paginator: &db.ListOptions{
+ PageSize: issueAmount,
+ },
+ RepoIDs: []int64{1},
+ })
+ total := int64(len(issues))
+ var ids []int64
+ for _, issue := range issues {
+ if issue.Content == "Bugs are nasty" {
+ ids = append(ids, issue.ID)
+ }
+ }
+
+ // Just to be sure.
+ require.NoError(t, err)
+ assert.EqualValues(t, issueAmount, total)
+
+ // Now we will call the GetIssueStats with these IDs and if working,
+ // get the correct stats back.
+ issueStats, err := issues_model.GetIssueStats(db.DefaultContext, &issues_model.IssuesOptions{
+ RepoIDs: []int64{1},
+ IssueIDs: ids,
+ })
+
+ // Now check the values.
+ require.NoError(t, err)
+ assert.EqualValues(t, issueStats.OpenCount, issueAmount)
+}
+
+func TestMilestoneList_LoadTotalTrackedTimes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ miles := issues_model.MilestoneList{
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1}),
+ }
+
+ require.NoError(t, miles.LoadTotalTrackedTimes(db.DefaultContext))
+
+ assert.Equal(t, int64(3682), miles[0].TotalTrackedTime)
+}
+
+func TestLoadTotalTrackedTime(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ milestone := unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1})
+
+ require.NoError(t, milestone.LoadTotalTrackedTime(db.DefaultContext))
+
+ assert.Equal(t, int64(3682), milestone.TotalTrackedTime)
+}
+
+func TestCountIssues(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ count, err := issues_model.CountIssues(db.DefaultContext, &issues_model.IssuesOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, 22, count)
+}
+
+func TestIssueLoadAttributes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ setting.Service.EnableTimetracking = true
+
+ issueList := issues_model.IssueList{
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1}),
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 4}),
+ }
+
+ for _, issue := range issueList {
+ require.NoError(t, issue.LoadAttributes(db.DefaultContext))
+ assert.EqualValues(t, issue.RepoID, issue.Repo.ID)
+ for _, label := range issue.Labels {
+ assert.EqualValues(t, issue.RepoID, label.RepoID)
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: label.ID})
+ }
+ if issue.PosterID > 0 {
+ assert.EqualValues(t, issue.PosterID, issue.Poster.ID)
+ }
+ if issue.AssigneeID > 0 {
+ assert.EqualValues(t, issue.AssigneeID, issue.Assignee.ID)
+ }
+ if issue.MilestoneID > 0 {
+ assert.EqualValues(t, issue.MilestoneID, issue.Milestone.ID)
+ }
+ if issue.IsPull {
+ assert.EqualValues(t, issue.ID, issue.PullRequest.IssueID)
+ }
+ for _, attachment := range issue.Attachments {
+ assert.EqualValues(t, issue.ID, attachment.IssueID)
+ }
+ for _, comment := range issue.Comments {
+ assert.EqualValues(t, issue.ID, comment.IssueID)
+ }
+ if issue.ID == int64(1) {
+ assert.Equal(t, int64(400), issue.TotalTrackedTime)
+ assert.NotNil(t, issue.Project)
+ assert.Equal(t, int64(1), issue.Project.ID)
+ } else {
+ assert.Nil(t, issue.Project)
+ }
+ }
+}
+
+func assertCreateIssues(t *testing.T, isPull bool) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ reponame := "repo1"
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: reponame})
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ milestone := unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1})
+ assert.EqualValues(t, 1, milestone.ID)
+ reaction := &issues_model.Reaction{
+ Type: "heart",
+ UserID: owner.ID,
+ }
+
+ title := "issuetitle1"
+ is := &issues_model.Issue{
+ RepoID: repo.ID,
+ MilestoneID: milestone.ID,
+ Repo: repo,
+ Title: title,
+ Content: "issuecontent1",
+ IsPull: isPull,
+ PosterID: owner.ID,
+ Poster: owner,
+ IsClosed: true,
+ Labels: []*issues_model.Label{label},
+ Reactions: []*issues_model.Reaction{reaction},
+ }
+ err := issues_model.InsertIssues(db.DefaultContext, is)
+ require.NoError(t, err)
+
+ i := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{Title: title})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Reaction{Type: "heart", UserID: owner.ID, IssueID: i.ID})
+}
+
+func TestMigrate_CreateIssuesIsPullFalse(t *testing.T) {
+ assertCreateIssues(t, false)
+}
+
+func TestMigrate_CreateIssuesIsPullTrue(t *testing.T) {
+ assertCreateIssues(t, true)
+}
diff --git a/models/issues/issue_update.go b/models/issues/issue_update.go
new file mode 100644
index 0000000..dbfd2fc
--- /dev/null
+++ b/models/issues/issue_update.go
@@ -0,0 +1,795 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ project_model "code.gitea.io/gitea/models/project"
+ repo_model "code.gitea.io/gitea/models/repo"
+ system_model "code.gitea.io/gitea/models/system"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/references"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+func UpdateIssueCols(ctx context.Context, issue *Issue, cols ...string) error {
+ _, err := UpdateIssueColsWithCond(ctx, issue, builder.NewCond(), cols...)
+ return err
+}
+
+func UpdateIssueColsWithCond(ctx context.Context, issue *Issue, cond builder.Cond, cols ...string) (int64, error) {
+ sess := db.GetEngine(ctx).ID(issue.ID)
+ if issue.NoAutoTime {
+ cols = append(cols, []string{"updated_unix"}...)
+ sess.NoAutoTime()
+ }
+ return sess.Cols(cols...).Where(cond).Update(issue)
+}
+
+func changeIssueStatus(ctx context.Context, issue *Issue, doer *user_model.User, isClosed, isMergePull bool) (*Comment, error) {
+ // Reload the issue
+ currentIssue, err := GetIssueByID(ctx, issue.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Nothing should be performed if current status is same as target status
+ if currentIssue.IsClosed == isClosed {
+ if !issue.IsPull {
+ return nil, ErrIssueWasClosed{
+ ID: issue.ID,
+ }
+ }
+ return nil, ErrPullWasClosed{
+ ID: issue.ID,
+ }
+ }
+
+ issue.IsClosed = isClosed
+ return doChangeIssueStatus(ctx, issue, doer, isMergePull)
+}
+
+func doChangeIssueStatus(ctx context.Context, issue *Issue, doer *user_model.User, isMergePull bool) (*Comment, error) {
+ // Check for open dependencies
+ if issue.IsClosed && issue.Repo.IsDependenciesEnabled(ctx) {
+ // only check if dependencies are enabled and we're about to close an issue, otherwise reopening an issue would fail when there are unsatisfied dependencies
+ noDeps, err := IssueNoDependenciesLeft(ctx, issue)
+ if err != nil {
+ return nil, err
+ }
+
+ if !noDeps {
+ return nil, ErrDependenciesLeft{issue.ID}
+ }
+ }
+
+ if issue.IsClosed {
+ if issue.NoAutoTime {
+ issue.ClosedUnix = issue.UpdatedUnix
+ } else {
+ issue.ClosedUnix = timeutil.TimeStampNow()
+ }
+ } else {
+ issue.ClosedUnix = 0
+ }
+
+ if err := UpdateIssueCols(ctx, issue, "is_closed", "closed_unix"); err != nil {
+ return nil, err
+ }
+
+ // Update issue count of labels
+ if err := issue.LoadLabels(ctx); err != nil {
+ return nil, err
+ }
+ for idx := range issue.Labels {
+ if err := updateLabelCols(ctx, issue.Labels[idx], "num_issues", "num_closed_issue"); err != nil {
+ return nil, err
+ }
+ }
+
+ // Update issue count of milestone
+ if issue.MilestoneID > 0 {
+ if issue.NoAutoTime {
+ if err := UpdateMilestoneCountersWithDate(ctx, issue.MilestoneID, issue.UpdatedUnix); err != nil {
+ return nil, err
+ }
+ } else {
+ if err := UpdateMilestoneCounters(ctx, issue.MilestoneID); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // update repository's issue closed number
+ if err := repo_model.UpdateRepoIssueNumbers(ctx, issue.RepoID, issue.IsPull, true); err != nil {
+ return nil, err
+ }
+
+ // New action comment
+ cmtType := CommentTypeClose
+ if !issue.IsClosed {
+ cmtType = CommentTypeReopen
+ } else if isMergePull {
+ cmtType = CommentTypeMergePull
+ }
+
+ return CreateComment(ctx, &CreateCommentOptions{
+ Type: cmtType,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ })
+}
+
+// ChangeIssueStatus changes issue status to open or closed.
+func ChangeIssueStatus(ctx context.Context, issue *Issue, doer *user_model.User, isClosed bool) (*Comment, error) {
+ if err := issue.LoadRepo(ctx); err != nil {
+ return nil, err
+ }
+ if err := issue.LoadPoster(ctx); err != nil {
+ return nil, err
+ }
+
+ return changeIssueStatus(ctx, issue, doer, isClosed, false)
+}
+
+// ChangeIssueTitle changes the title of this issue, as the given user.
+func ChangeIssueTitle(ctx context.Context, issue *Issue, doer *user_model.User, oldTitle string) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = UpdateIssueCols(ctx, issue, "name"); err != nil {
+ return fmt.Errorf("updateIssueCols: %w", err)
+ }
+
+ if err = issue.LoadRepo(ctx); err != nil {
+ return fmt.Errorf("loadRepo: %w", err)
+ }
+
+ opts := &CreateCommentOptions{
+ Type: CommentTypeChangeTitle,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ OldTitle: oldTitle,
+ NewTitle: issue.Title,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return fmt.Errorf("createComment: %w", err)
+ }
+ if err = issue.AddCrossReferences(ctx, doer, true); err != nil {
+ return fmt.Errorf("addCrossReferences: %w", err)
+ }
+
+ return committer.Commit()
+}
+
+// ChangeIssueRef changes the branch of this issue, as the given user.
+func ChangeIssueRef(ctx context.Context, issue *Issue, doer *user_model.User, oldRef string) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = UpdateIssueCols(ctx, issue, "ref"); err != nil {
+ return fmt.Errorf("updateIssueCols: %w", err)
+ }
+
+ if err = issue.LoadRepo(ctx); err != nil {
+ return fmt.Errorf("loadRepo: %w", err)
+ }
+ oldRefFriendly := strings.TrimPrefix(oldRef, git.BranchPrefix)
+ newRefFriendly := strings.TrimPrefix(issue.Ref, git.BranchPrefix)
+
+ opts := &CreateCommentOptions{
+ Type: CommentTypeChangeIssueRef,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ OldRef: oldRefFriendly,
+ NewRef: newRefFriendly,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return fmt.Errorf("createComment: %w", err)
+ }
+
+ return committer.Commit()
+}
+
+// AddDeletePRBranchComment adds delete branch comment for pull request issue
+func AddDeletePRBranchComment(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, issueID int64, branchName string) error {
+ issue, err := GetIssueByID(ctx, issueID)
+ if err != nil {
+ return err
+ }
+ opts := &CreateCommentOptions{
+ Type: CommentTypeDeleteBranch,
+ Doer: doer,
+ Repo: repo,
+ Issue: issue,
+ OldRef: branchName,
+ }
+ _, err = CreateComment(ctx, opts)
+ return err
+}
+
+// UpdateIssueAttachments update attachments by UUIDs for the issue
+func UpdateIssueAttachments(ctx context.Context, issueID int64, uuids []string) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, uuids)
+ if err != nil {
+ return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err)
+ }
+ for i := 0; i < len(attachments); i++ {
+ attachments[i].IssueID = issueID
+ if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
+ return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
+ }
+ }
+ return committer.Commit()
+}
+
+// ChangeIssueContent changes issue content, as the given user.
+func ChangeIssueContent(ctx context.Context, issue *Issue, doer *user_model.User, content string, contentVersion int) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ hasContentHistory, err := HasIssueContentHistory(ctx, issue.ID, 0)
+ if err != nil {
+ return fmt.Errorf("HasIssueContentHistory: %w", err)
+ }
+ if !hasContentHistory {
+ if err = SaveIssueContentHistory(ctx, issue.PosterID, issue.ID, 0,
+ issue.CreatedUnix, issue.Content, true); err != nil {
+ return fmt.Errorf("SaveIssueContentHistory: %w", err)
+ }
+ }
+
+ issue.Content = content
+ issue.ContentVersion = contentVersion + 1
+
+ expectedContentVersion := builder.NewCond().And(builder.Eq{"content_version": contentVersion})
+ affected, err := UpdateIssueColsWithCond(ctx, issue, expectedContentVersion, "content", "content_version")
+ if err != nil {
+ return fmt.Errorf("UpdateIssueCols: %w", err)
+ }
+ if affected == 0 {
+ return ErrIssueAlreadyChanged
+ }
+
+ historyDate := timeutil.TimeStampNow()
+ if issue.NoAutoTime {
+ historyDate = issue.UpdatedUnix
+ }
+ if err = SaveIssueContentHistory(ctx, doer.ID, issue.ID, 0,
+ historyDate, issue.Content, false); err != nil {
+ return fmt.Errorf("SaveIssueContentHistory: %w", err)
+ }
+
+ if err = issue.AddCrossReferences(ctx, doer, true); err != nil {
+ return fmt.Errorf("addCrossReferences: %w", err)
+ }
+
+ return committer.Commit()
+}
+
+// NewIssueOptions represents the options of a new issue.
+type NewIssueOptions struct {
+ Repo *repo_model.Repository
+ Issue *Issue
+ LabelIDs []int64
+ Attachments []string // In UUID format.
+ IsPull bool
+}
+
+// NewIssueWithIndex creates issue with given index
+func NewIssueWithIndex(ctx context.Context, doer *user_model.User, opts NewIssueOptions) (err error) {
+ e := db.GetEngine(ctx)
+ opts.Issue.Title = strings.TrimSpace(opts.Issue.Title)
+
+ if opts.Issue.MilestoneID > 0 {
+ milestone, err := GetMilestoneByRepoID(ctx, opts.Issue.RepoID, opts.Issue.MilestoneID)
+ if err != nil && !IsErrMilestoneNotExist(err) {
+ return fmt.Errorf("getMilestoneByID: %w", err)
+ }
+
+ // Assume milestone is invalid and drop silently.
+ opts.Issue.MilestoneID = 0
+ if milestone != nil {
+ opts.Issue.MilestoneID = milestone.ID
+ opts.Issue.Milestone = milestone
+ }
+ }
+
+ if opts.Issue.Index <= 0 {
+ return fmt.Errorf("no issue index provided")
+ }
+ if opts.Issue.ID > 0 {
+ return fmt.Errorf("issue exist")
+ }
+
+ opts.Issue.Created = timeutil.TimeStampNanoNow()
+
+ if _, err := e.Insert(opts.Issue); err != nil {
+ return err
+ }
+
+ if opts.Issue.MilestoneID > 0 {
+ if err := UpdateMilestoneCounters(ctx, opts.Issue.MilestoneID); err != nil {
+ return err
+ }
+
+ opts := &CreateCommentOptions{
+ Type: CommentTypeMilestone,
+ Doer: doer,
+ Repo: opts.Repo,
+ Issue: opts.Issue,
+ OldMilestoneID: 0,
+ MilestoneID: opts.Issue.MilestoneID,
+ }
+ if _, err = CreateComment(ctx, opts); err != nil {
+ return err
+ }
+ }
+
+ if err := repo_model.UpdateRepoIssueNumbers(ctx, opts.Issue.RepoID, opts.IsPull, false); err != nil {
+ return err
+ }
+
+ if len(opts.LabelIDs) > 0 {
+ // During the session, SQLite3 driver cannot handle retrieve objects after update something.
+ // So we have to get all needed labels first.
+ labels := make([]*Label, 0, len(opts.LabelIDs))
+ if err = e.In("id", opts.LabelIDs).Find(&labels); err != nil {
+ return fmt.Errorf("find all labels [label_ids: %v]: %w", opts.LabelIDs, err)
+ }
+
+ if err = opts.Issue.LoadPoster(ctx); err != nil {
+ return err
+ }
+
+ for _, label := range labels {
+ // Silently drop invalid labels.
+ if label.RepoID != opts.Repo.ID && label.OrgID != opts.Repo.OwnerID {
+ continue
+ }
+
+ if err = newIssueLabel(ctx, opts.Issue, label, opts.Issue.Poster); err != nil {
+ return fmt.Errorf("addLabel [id: %d]: %w", label.ID, err)
+ }
+ }
+ }
+
+ if err = NewIssueUsers(ctx, opts.Repo, opts.Issue); err != nil {
+ return err
+ }
+
+ if len(opts.Attachments) > 0 {
+ attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, opts.Attachments)
+ if err != nil {
+ return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", opts.Attachments, err)
+ }
+
+ for i := 0; i < len(attachments); i++ {
+ attachments[i].IssueID = opts.Issue.ID
+ if _, err = e.ID(attachments[i].ID).Update(attachments[i]); err != nil {
+ return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
+ }
+ }
+ }
+ if err = opts.Issue.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ return opts.Issue.AddCrossReferences(ctx, doer, false)
+}
+
+// NewIssue creates new issue with labels for repository.
+func NewIssue(ctx context.Context, repo *repo_model.Repository, issue *Issue, labelIDs []int64, uuids []string) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ idx, err := db.GetNextResourceIndex(ctx, "issue_index", repo.ID)
+ if err != nil {
+ return fmt.Errorf("generate issue index failed: %w", err)
+ }
+
+ issue.Index = idx
+
+ if err = NewIssueWithIndex(ctx, issue.Poster, NewIssueOptions{
+ Repo: repo,
+ Issue: issue,
+ LabelIDs: labelIDs,
+ Attachments: uuids,
+ }); err != nil {
+ if repo_model.IsErrUserDoesNotHaveAccessToRepo(err) || IsErrNewIssueInsert(err) {
+ return err
+ }
+ return fmt.Errorf("newIssue: %w", err)
+ }
+
+ if err = committer.Commit(); err != nil {
+ return fmt.Errorf("Commit: %w", err)
+ }
+
+ return nil
+}
+
+// UpdateIssueMentions updates issue-user relations for mentioned users.
+func UpdateIssueMentions(ctx context.Context, issueID int64, mentions []*user_model.User) error {
+ if len(mentions) == 0 {
+ return nil
+ }
+ ids := make([]int64, len(mentions))
+ for i, u := range mentions {
+ ids[i] = u.ID
+ }
+ if err := UpdateIssueUsersByMentions(ctx, issueID, ids); err != nil {
+ return fmt.Errorf("UpdateIssueUsersByMentions: %w", err)
+ }
+ return nil
+}
+
+// UpdateIssueDeadline updates an issue deadline and adds comments. Setting a deadline to 0 means deleting it.
+func UpdateIssueDeadline(ctx context.Context, issue *Issue, deadlineUnix timeutil.TimeStamp, doer *user_model.User) (err error) {
+ // if the deadline hasn't changed do nothing
+ if issue.DeadlineUnix == deadlineUnix {
+ return nil
+ }
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // Update the deadline
+ if err = UpdateIssueCols(ctx, &Issue{ID: issue.ID, DeadlineUnix: deadlineUnix, NoAutoTime: issue.NoAutoTime, UpdatedUnix: issue.UpdatedUnix}, "deadline_unix"); err != nil {
+ return err
+ }
+
+ // Make the comment
+ if _, err = createDeadlineComment(ctx, doer, issue, deadlineUnix); err != nil {
+ return fmt.Errorf("createRemovedDueDateComment: %w", err)
+ }
+
+ return committer.Commit()
+}
+
+// FindAndUpdateIssueMentions finds users mentioned in the given content string, and saves them in the database.
+func FindAndUpdateIssueMentions(ctx context.Context, issue *Issue, doer *user_model.User, content string) (mentions []*user_model.User, err error) {
+ rawMentions := references.FindAllMentionsMarkdown(content)
+ mentions, err = ResolveIssueMentionsByVisibility(ctx, issue, doer, rawMentions)
+ if err != nil {
+ return nil, fmt.Errorf("UpdateIssueMentions [%d]: %w", issue.ID, err)
+ }
+ if err = UpdateIssueMentions(ctx, issue.ID, mentions); err != nil {
+ return nil, fmt.Errorf("UpdateIssueMentions [%d]: %w", issue.ID, err)
+ }
+ return mentions, err
+}
+
+// ResolveIssueMentionsByVisibility returns the users mentioned in an issue, removing those that
+// don't have access to reading it. Teams are expanded into their users, but organizations are ignored.
+func ResolveIssueMentionsByVisibility(ctx context.Context, issue *Issue, doer *user_model.User, mentions []string) (users []*user_model.User, err error) {
+ if len(mentions) == 0 {
+ return nil, nil
+ }
+ if err = issue.LoadRepo(ctx); err != nil {
+ return nil, err
+ }
+
+ resolved := make(map[string]bool, 10)
+ var mentionTeams []string
+
+ if err := issue.Repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ repoOwnerIsOrg := issue.Repo.Owner.IsOrganization()
+ if repoOwnerIsOrg {
+ mentionTeams = make([]string, 0, 5)
+ }
+
+ resolved[doer.LowerName] = true
+ for _, name := range mentions {
+ name := strings.ToLower(name)
+ if _, ok := resolved[name]; ok {
+ continue
+ }
+ if repoOwnerIsOrg && strings.Contains(name, "/") {
+ names := strings.Split(name, "/")
+ if len(names) < 2 || names[0] != issue.Repo.Owner.LowerName {
+ continue
+ }
+ mentionTeams = append(mentionTeams, names[1])
+ resolved[name] = true
+ } else {
+ resolved[name] = false
+ }
+ }
+
+ if issue.Repo.Owner.IsOrganization() && len(mentionTeams) > 0 {
+ teams := make([]*organization.Team, 0, len(mentionTeams))
+ if err := db.GetEngine(ctx).
+ Join("INNER", "team_repo", "team_repo.team_id = team.id").
+ Where("team_repo.repo_id=?", issue.Repo.ID).
+ In("team.lower_name", mentionTeams).
+ Find(&teams); err != nil {
+ return nil, fmt.Errorf("find mentioned teams: %w", err)
+ }
+ if len(teams) != 0 {
+ checked := make([]int64, 0, len(teams))
+ unittype := unit.TypeIssues
+ if issue.IsPull {
+ unittype = unit.TypePullRequests
+ }
+ for _, team := range teams {
+ if team.AccessMode >= perm.AccessModeAdmin {
+ checked = append(checked, team.ID)
+ resolved[issue.Repo.Owner.LowerName+"/"+team.LowerName] = true
+ continue
+ }
+ has, err := db.GetEngine(ctx).Get(&organization.TeamUnit{OrgID: issue.Repo.Owner.ID, TeamID: team.ID, Type: unittype})
+ if err != nil {
+ return nil, fmt.Errorf("get team units (%d): %w", team.ID, err)
+ }
+ if has {
+ checked = append(checked, team.ID)
+ resolved[issue.Repo.Owner.LowerName+"/"+team.LowerName] = true
+ }
+ }
+ if len(checked) != 0 {
+ teamusers := make([]*user_model.User, 0, 20)
+ if err := db.GetEngine(ctx).
+ Join("INNER", "team_user", "team_user.uid = `user`.id").
+ Join("LEFT", "forgejo_blocked_user", "forgejo_blocked_user.user_id = `user`.id").
+ In("`team_user`.team_id", checked).
+ And("`user`.is_active = ?", true).
+ And("`user`.prohibit_login = ?", false).
+ And(builder.Or(builder.IsNull{"`forgejo_blocked_user`.block_id"}, builder.Neq{"`forgejo_blocked_user`.block_id": doer.ID})).
+ Find(&teamusers); err != nil {
+ return nil, fmt.Errorf("get teams users: %w", err)
+ }
+ if len(teamusers) > 0 {
+ users = make([]*user_model.User, 0, len(teamusers))
+ for _, user := range teamusers {
+ if already, ok := resolved[user.LowerName]; !ok || !already {
+ users = append(users, user)
+ resolved[user.LowerName] = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Remove names already in the list to avoid querying the database if pending names remain
+ mentionUsers := make([]string, 0, len(resolved))
+ for name, already := range resolved {
+ if !already {
+ mentionUsers = append(mentionUsers, name)
+ }
+ }
+ if len(mentionUsers) == 0 {
+ return users, err
+ }
+
+ if users == nil {
+ users = make([]*user_model.User, 0, len(mentionUsers))
+ }
+
+ unchecked := make([]*user_model.User, 0, len(mentionUsers))
+ if err := db.GetEngine(ctx).
+ Join("LEFT", "forgejo_blocked_user", "forgejo_blocked_user.user_id = `user`.id").
+ Where("`user`.is_active = ?", true).
+ And("`user`.prohibit_login = ?", false).
+ And(builder.Or(builder.IsNull{"`forgejo_blocked_user`.block_id"}, builder.Neq{"`forgejo_blocked_user`.block_id": doer.ID})).
+ In("`user`.lower_name", mentionUsers).
+ Find(&unchecked); err != nil {
+ return nil, fmt.Errorf("find mentioned users: %w", err)
+ }
+ for _, user := range unchecked {
+ if already := resolved[user.LowerName]; already || user.IsOrganization() {
+ continue
+ }
+ // Normal users must have read access to the referencing issue
+ perm, err := access_model.GetUserRepoPermission(ctx, issue.Repo, user)
+ if err != nil {
+ return nil, fmt.Errorf("GetUserRepoPermission [%d]: %w", user.ID, err)
+ }
+ if !perm.CanReadIssuesOrPulls(issue.IsPull) {
+ continue
+ }
+ users = append(users, user)
+ }
+
+ return users, err
+}
+
+// UpdateIssuesMigrationsByType updates all migrated repositories' issues from gitServiceType to replace originalAuthorID to posterID
+func UpdateIssuesMigrationsByType(ctx context.Context, gitServiceType api.GitServiceType, originalAuthorID string, posterID int64) error {
+ _, err := db.GetEngine(ctx).Table("issue").
+ Where("repo_id IN (SELECT id FROM repository WHERE original_service_type = ?)", gitServiceType).
+ And("original_author_id = ?", originalAuthorID).
+ Update(map[string]any{
+ "poster_id": posterID,
+ "original_author": "",
+ "original_author_id": 0,
+ })
+ return err
+}
+
+// UpdateReactionsMigrationsByType updates all migrated repositories' reactions from gitServiceType to replace originalAuthorID to posterID
+func UpdateReactionsMigrationsByType(ctx context.Context, gitServiceType api.GitServiceType, originalAuthorID string, userID int64) error {
+ _, err := db.GetEngine(ctx).Table("reaction").
+ Where("original_author_id = ?", originalAuthorID).
+ And(migratedIssueCond(gitServiceType)).
+ Update(map[string]any{
+ "user_id": userID,
+ "original_author": "",
+ "original_author_id": 0,
+ })
+ return err
+}
+
+// DeleteIssuesByRepoID deletes issues by repositories id
+func DeleteIssuesByRepoID(ctx context.Context, repoID int64) (attachmentPaths []string, err error) {
+ // MariaDB has a performance bug: https://jira.mariadb.org/browse/MDEV-16289
+ // so here it uses "DELETE ... WHERE IN" with pre-queried IDs.
+ sess := db.GetEngine(ctx)
+
+ for {
+ issueIDs := make([]int64, 0, db.DefaultMaxInSize)
+
+ err := sess.Table(&Issue{}).Where("repo_id = ?", repoID).OrderBy("id").Limit(db.DefaultMaxInSize).Cols("id").Find(&issueIDs)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(issueIDs) == 0 {
+ break
+ }
+
+ // Delete content histories
+ _, err = sess.In("issue_id", issueIDs).Delete(&ContentHistory{})
+ if err != nil {
+ return nil, err
+ }
+
+ // Delete comments and attachments
+ _, err = sess.In("issue_id", issueIDs).Delete(&Comment{})
+ if err != nil {
+ return nil, err
+ }
+
+ // Dependencies for issues in this repository
+ _, err = sess.In("issue_id", issueIDs).Delete(&IssueDependency{})
+ if err != nil {
+ return nil, err
+ }
+
+ // Delete dependencies for issues in other repositories
+ _, err = sess.In("dependency_id", issueIDs).Delete(&IssueDependency{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&IssueUser{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&Reaction{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&IssueWatch{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&Stopwatch{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&TrackedTime{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&project_model.ProjectIssue{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("dependent_issue_id", issueIDs).Delete(&Comment{})
+ if err != nil {
+ return nil, err
+ }
+
+ var attachments []*repo_model.Attachment
+ err = sess.In("issue_id", issueIDs).Find(&attachments)
+ if err != nil {
+ return nil, err
+ }
+
+ for j := range attachments {
+ attachmentPaths = append(attachmentPaths, attachments[j].RelativePath())
+ }
+
+ _, err = sess.In("issue_id", issueIDs).Delete(&repo_model.Attachment{})
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = sess.In("id", issueIDs).Delete(&Issue{})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return attachmentPaths, err
+}
+
+// DeleteOrphanedIssues delete issues without a repo
+func DeleteOrphanedIssues(ctx context.Context) error {
+ var attachmentPaths []string
+ err := db.WithTx(ctx, func(ctx context.Context) error {
+ var ids []int64
+
+ if err := db.GetEngine(ctx).Table("issue").Distinct("issue.repo_id").
+ Join("LEFT", "repository", "issue.repo_id=repository.id").
+ Where(builder.IsNull{"repository.id"}).GroupBy("issue.repo_id").
+ Find(&ids); err != nil {
+ return err
+ }
+
+ for i := range ids {
+ paths, err := DeleteIssuesByRepoID(ctx, ids[i])
+ if err != nil {
+ return err
+ }
+ attachmentPaths = append(attachmentPaths, paths...)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove issue attachment files.
+ for i := range attachmentPaths {
+ system_model.RemoveAllWithNotice(ctx, "Delete issue attachment", attachmentPaths[i])
+ }
+ return nil
+}
diff --git a/models/issues/issue_user.go b/models/issues/issue_user.go
new file mode 100644
index 0000000..6b59e07
--- /dev/null
+++ b/models/issues/issue_user.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+)
+
+// IssueUser represents an issue-user relation.
+type IssueUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX unique(uid_to_issue)"` // User ID.
+ IssueID int64 `xorm:"INDEX unique(uid_to_issue)"`
+ IsRead bool
+ IsMentioned bool
+}
+
+func init() {
+ db.RegisterModel(new(IssueUser))
+}
+
+// NewIssueUsers inserts an issue related users
+func NewIssueUsers(ctx context.Context, repo *repo_model.Repository, issue *Issue) error {
+ assignees, err := repo_model.GetRepoAssignees(ctx, repo)
+ if err != nil {
+ return fmt.Errorf("getAssignees: %w", err)
+ }
+
+ // Poster can be anyone, append later if not one of assignees.
+ isPosterAssignee := false
+
+ // Leave a seat for poster itself to append later, but if poster is one of assignee
+ // and just waste 1 unit is cheaper than re-allocate memory once.
+ issueUsers := make([]*IssueUser, 0, len(assignees)+1)
+ for _, assignee := range assignees {
+ issueUsers = append(issueUsers, &IssueUser{
+ IssueID: issue.ID,
+ UID: assignee.ID,
+ })
+ isPosterAssignee = isPosterAssignee || assignee.ID == issue.PosterID
+ }
+ if !isPosterAssignee {
+ issueUsers = append(issueUsers, &IssueUser{
+ IssueID: issue.ID,
+ UID: issue.PosterID,
+ })
+ }
+
+ return db.Insert(ctx, issueUsers)
+}
+
+// UpdateIssueUserByRead updates issue-user relation for reading.
+func UpdateIssueUserByRead(ctx context.Context, uid, issueID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `issue_user` SET is_read=? WHERE uid=? AND issue_id=?", true, uid, issueID)
+ return err
+}
+
+// UpdateIssueUsersByMentions updates issue-user pairs by mentioning.
+func UpdateIssueUsersByMentions(ctx context.Context, issueID int64, uids []int64) error {
+ for _, uid := range uids {
+ iu := &IssueUser{
+ UID: uid,
+ IssueID: issueID,
+ }
+ has, err := db.GetEngine(ctx).Get(iu)
+ if err != nil {
+ return err
+ }
+
+ iu.IsMentioned = true
+ if has {
+ _, err = db.GetEngine(ctx).ID(iu.ID).Cols("is_mentioned").Update(iu)
+ } else {
+ _, err = db.GetEngine(ctx).Insert(iu)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetIssueMentionIDs returns all mentioned user IDs of an issue.
+func GetIssueMentionIDs(ctx context.Context, issueID int64) ([]int64, error) {
+ var ids []int64
+ return ids, db.GetEngine(ctx).Table(IssueUser{}).
+ Where("issue_id=?", issueID).
+ And("is_mentioned=?", true).
+ Select("uid").
+ Find(&ids)
+}
diff --git a/models/issues/issue_user_test.go b/models/issues/issue_user_test.go
new file mode 100644
index 0000000..e059e43
--- /dev/null
+++ b/models/issues/issue_user_test.go
@@ -0,0 +1,61 @@
+// Copyright 2017 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test_NewIssueUsers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ newIssue := &issues_model.Issue{
+ RepoID: repo.ID,
+ PosterID: 4,
+ Index: 6,
+ Title: "newTestIssueTitle",
+ Content: "newTestIssueContent",
+ }
+
+ // artificially insert new issue
+ unittest.AssertSuccessfulInsert(t, newIssue)
+
+ require.NoError(t, issues_model.NewIssueUsers(db.DefaultContext, repo, newIssue))
+
+ // issue_user table should now have entries for new issue
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueUser{IssueID: newIssue.ID, UID: newIssue.PosterID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueUser{IssueID: newIssue.ID, UID: repo.OwnerID})
+}
+
+func TestUpdateIssueUserByRead(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+
+ require.NoError(t, issues_model.UpdateIssueUserByRead(db.DefaultContext, 4, issue.ID))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueUser{IssueID: issue.ID, UID: 4}, "is_read=1")
+
+ require.NoError(t, issues_model.UpdateIssueUserByRead(db.DefaultContext, 4, issue.ID))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueUser{IssueID: issue.ID, UID: 4}, "is_read=1")
+
+ require.NoError(t, issues_model.UpdateIssueUserByRead(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+}
+
+func TestUpdateIssueUsersByMentions(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+
+ uids := []int64{2, 5}
+ require.NoError(t, issues_model.UpdateIssueUsersByMentions(db.DefaultContext, issue.ID, uids))
+ for _, uid := range uids {
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueUser{IssueID: issue.ID, UID: uid}, "is_mentioned=1")
+ }
+}
diff --git a/models/issues/issue_watch.go b/models/issues/issue_watch.go
new file mode 100644
index 0000000..9e616a0
--- /dev/null
+++ b/models/issues/issue_watch.go
@@ -0,0 +1,134 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// IssueWatch is connection request for receiving issue notification.
+type IssueWatch struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"UNIQUE(watch) NOT NULL"`
+ IssueID int64 `xorm:"UNIQUE(watch) NOT NULL"`
+ IsWatching bool `xorm:"NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(IssueWatch))
+}
+
+// IssueWatchList contains IssueWatch
+type IssueWatchList []*IssueWatch
+
+// CreateOrUpdateIssueWatch set watching for a user and issue
+func CreateOrUpdateIssueWatch(ctx context.Context, userID, issueID int64, isWatching bool) error {
+ iw, exists, err := GetIssueWatch(ctx, userID, issueID)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ iw = &IssueWatch{
+ UserID: userID,
+ IssueID: issueID,
+ IsWatching: isWatching,
+ }
+
+ if _, err := db.GetEngine(ctx).Insert(iw); err != nil {
+ return err
+ }
+ } else {
+ iw.IsWatching = isWatching
+
+ if _, err := db.GetEngine(ctx).ID(iw.ID).Cols("is_watching", "updated_unix").Update(iw); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetIssueWatch returns all IssueWatch objects from db by user and issue
+// the current Web-UI need iw object for watchers AND explicit non-watchers
+func GetIssueWatch(ctx context.Context, userID, issueID int64) (iw *IssueWatch, exists bool, err error) {
+ iw = new(IssueWatch)
+ exists, err = db.GetEngine(ctx).
+ Where("user_id = ?", userID).
+ And("issue_id = ?", issueID).
+ Get(iw)
+ return iw, exists, err
+}
+
+// CheckIssueWatch check if an user is watching an issue
+// it takes participants and repo watch into account
+func CheckIssueWatch(ctx context.Context, user *user_model.User, issue *Issue) (bool, error) {
+ iw, exist, err := GetIssueWatch(ctx, user.ID, issue.ID)
+ if err != nil {
+ return false, err
+ }
+ if exist {
+ return iw.IsWatching, nil
+ }
+ w, err := repo_model.GetWatch(ctx, user.ID, issue.RepoID)
+ if err != nil {
+ return false, err
+ }
+ return repo_model.IsWatchMode(w.Mode) || IsUserParticipantsOfIssue(ctx, user, issue), nil
+}
+
+// GetIssueWatchersIDs returns IDs of subscribers or explicit unsubscribers to a given issue id
+// but avoids joining with `user` for performance reasons
+// User permissions must be verified elsewhere if required
+func GetIssueWatchersIDs(ctx context.Context, issueID int64, watching bool) ([]int64, error) {
+ ids := make([]int64, 0, 64)
+ return ids, db.GetEngine(ctx).Table("issue_watch").
+ Where("issue_id=?", issueID).
+ And("is_watching = ?", watching).
+ Select("user_id").
+ Find(&ids)
+}
+
+// GetIssueWatchers returns watchers/unwatchers of a given issue
+func GetIssueWatchers(ctx context.Context, issueID int64, listOptions db.ListOptions) (IssueWatchList, error) {
+ sess := db.GetEngine(ctx).
+ Where("`issue_watch`.issue_id = ?", issueID).
+ And("`issue_watch`.is_watching = ?", true).
+ And("`user`.is_active = ?", true).
+ And("`user`.prohibit_login = ?", false).
+ Join("INNER", "`user`", "`user`.id = `issue_watch`.user_id")
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ watches := make([]*IssueWatch, 0, listOptions.PageSize)
+ return watches, sess.Find(&watches)
+ }
+ watches := make([]*IssueWatch, 0, 8)
+ return watches, sess.Find(&watches)
+}
+
+// CountIssueWatchers count watchers/unwatchers of a given issue
+func CountIssueWatchers(ctx context.Context, issueID int64) (int64, error) {
+ return db.GetEngine(ctx).
+ Where("`issue_watch`.issue_id = ?", issueID).
+ And("`issue_watch`.is_watching = ?", true).
+ And("`user`.is_active = ?", true).
+ And("`user`.prohibit_login = ?", false).
+ Join("INNER", "`user`", "`user`.id = `issue_watch`.user_id").Count(new(IssueWatch))
+}
+
+// RemoveIssueWatchersByRepoID remove issue watchers by repoID
+func RemoveIssueWatchersByRepoID(ctx context.Context, userID, repoID int64) error {
+ _, err := db.GetEngine(ctx).
+ Join("INNER", "issue", "`issue`.id = `issue_watch`.issue_id AND `issue`.repo_id = ?", repoID).
+ Where("`issue_watch`.user_id = ?", userID).
+ Delete(new(IssueWatch))
+ return err
+}
diff --git a/models/issues/issue_watch_test.go b/models/issues/issue_watch_test.go
new file mode 100644
index 0000000..573215d
--- /dev/null
+++ b/models/issues/issue_watch_test.go
@@ -0,0 +1,68 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCreateOrUpdateIssueWatch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ require.NoError(t, issues_model.CreateOrUpdateIssueWatch(db.DefaultContext, 3, 1, true))
+ iw := unittest.AssertExistsAndLoadBean(t, &issues_model.IssueWatch{UserID: 3, IssueID: 1})
+ assert.True(t, iw.IsWatching)
+
+ require.NoError(t, issues_model.CreateOrUpdateIssueWatch(db.DefaultContext, 1, 1, false))
+ iw = unittest.AssertExistsAndLoadBean(t, &issues_model.IssueWatch{UserID: 1, IssueID: 1})
+ assert.False(t, iw.IsWatching)
+}
+
+func TestGetIssueWatch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ _, exists, err := issues_model.GetIssueWatch(db.DefaultContext, 9, 1)
+ assert.True(t, exists)
+ require.NoError(t, err)
+
+ iw, exists, err := issues_model.GetIssueWatch(db.DefaultContext, 2, 2)
+ assert.True(t, exists)
+ require.NoError(t, err)
+ assert.False(t, iw.IsWatching)
+
+ _, exists, err = issues_model.GetIssueWatch(db.DefaultContext, 3, 1)
+ assert.False(t, exists)
+ require.NoError(t, err)
+}
+
+func TestGetIssueWatchers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ iws, err := issues_model.GetIssueWatchers(db.DefaultContext, 1, db.ListOptions{})
+ require.NoError(t, err)
+ // Watcher is inactive, thus 0
+ assert.Empty(t, iws)
+
+ iws, err = issues_model.GetIssueWatchers(db.DefaultContext, 2, db.ListOptions{})
+ require.NoError(t, err)
+ // Watcher is explicit not watching
+ assert.Empty(t, iws)
+
+ iws, err = issues_model.GetIssueWatchers(db.DefaultContext, 5, db.ListOptions{})
+ require.NoError(t, err)
+ // Issue has no Watchers
+ assert.Empty(t, iws)
+
+ iws, err = issues_model.GetIssueWatchers(db.DefaultContext, 7, db.ListOptions{})
+ require.NoError(t, err)
+ // Issue has one watcher
+ assert.Len(t, iws, 1)
+}
diff --git a/models/issues/issue_xref.go b/models/issues/issue_xref.go
new file mode 100644
index 0000000..9c9d5d6
--- /dev/null
+++ b/models/issues/issue_xref.go
@@ -0,0 +1,364 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/references"
+)
+
+type crossReference struct {
+ Issue *Issue
+ Action references.XRefAction
+}
+
+// crossReferencesContext is context to pass along findCrossReference functions
+type crossReferencesContext struct {
+ Type CommentType
+ Doer *user_model.User
+ OrigIssue *Issue
+ OrigComment *Comment
+ RemoveOld bool
+}
+
+func findOldCrossReferences(ctx context.Context, issueID, commentID int64) ([]*Comment, error) {
+ active := make([]*Comment, 0, 10)
+ return active, db.GetEngine(ctx).Where("`ref_action` IN (?, ?, ?)", references.XRefActionNone, references.XRefActionCloses, references.XRefActionReopens).
+ And("`ref_issue_id` = ?", issueID).
+ And("`ref_comment_id` = ?", commentID).
+ Find(&active)
+}
+
+func neuterCrossReferences(ctx context.Context, issueID, commentID int64) error {
+ active, err := findOldCrossReferences(ctx, issueID, commentID)
+ if err != nil {
+ return err
+ }
+ ids := make([]int64, len(active))
+ for i, c := range active {
+ ids[i] = c.ID
+ }
+ return neuterCrossReferencesIDs(ctx, nil, ids)
+}
+
+func neuterCrossReferencesIDs(stdCtx context.Context, ctx *crossReferencesContext, ids []int64) error {
+ sess := db.GetEngine(stdCtx).In("id", ids).Cols("`ref_action`")
+ if ctx != nil && ctx.OrigIssue.NoAutoTime {
+ sess.SetExpr("updated_unix", ctx.OrigIssue.UpdatedUnix).NoAutoTime()
+ }
+ _, err := sess.Update(&Comment{RefAction: references.XRefActionNeutered})
+ return err
+}
+
+// AddCrossReferences add cross repositories references.
+func (issue *Issue) AddCrossReferences(stdCtx context.Context, doer *user_model.User, removeOld bool) error {
+ var commentType CommentType
+ if issue.IsPull {
+ commentType = CommentTypePullRef
+ } else {
+ commentType = CommentTypeIssueRef
+ }
+ ctx := &crossReferencesContext{
+ Type: commentType,
+ Doer: doer,
+ OrigIssue: issue,
+ RemoveOld: removeOld,
+ }
+ return issue.createCrossReferences(stdCtx, ctx, issue.Title, issue.Content)
+}
+
+func (issue *Issue) createCrossReferences(stdCtx context.Context, ctx *crossReferencesContext, plaincontent, mdcontent string) error {
+ xreflist, err := ctx.OrigIssue.getCrossReferences(stdCtx, ctx, plaincontent, mdcontent)
+ if err != nil {
+ return err
+ }
+ if ctx.RemoveOld {
+ var commentID int64
+ if ctx.OrigComment != nil {
+ commentID = ctx.OrigComment.ID
+ }
+ active, err := findOldCrossReferences(stdCtx, ctx.OrigIssue.ID, commentID)
+ if err != nil {
+ return err
+ }
+ ids := make([]int64, 0, len(active))
+ for _, c := range active {
+ found := false
+ for i, x := range xreflist {
+ if x.Issue.ID == c.IssueID && x.Action == c.RefAction {
+ found = true
+ xreflist = append(xreflist[:i], xreflist[i+1:]...)
+ break
+ }
+ }
+ if !found {
+ ids = append(ids, c.ID)
+ }
+ }
+ if len(ids) > 0 {
+ if err = neuterCrossReferencesIDs(stdCtx, ctx, ids); err != nil {
+ return err
+ }
+ }
+ }
+ for _, xref := range xreflist {
+ var refCommentID int64
+ if ctx.OrigComment != nil {
+ refCommentID = ctx.OrigComment.ID
+ }
+ if ctx.OrigIssue.NoAutoTime {
+ xref.Issue.NoAutoTime = true
+ xref.Issue.UpdatedUnix = ctx.OrigIssue.UpdatedUnix
+ }
+ opts := &CreateCommentOptions{
+ Type: ctx.Type,
+ Doer: ctx.Doer,
+ Repo: xref.Issue.Repo,
+ Issue: xref.Issue,
+ RefRepoID: ctx.OrigIssue.RepoID,
+ RefIssueID: ctx.OrigIssue.ID,
+ RefCommentID: refCommentID,
+ RefAction: xref.Action,
+ RefIsPull: ctx.OrigIssue.IsPull,
+ }
+ _, err := CreateComment(stdCtx, opts)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (issue *Issue) getCrossReferences(stdCtx context.Context, ctx *crossReferencesContext, plaincontent, mdcontent string) ([]*crossReference, error) {
+ xreflist := make([]*crossReference, 0, 5)
+ var (
+ refRepo *repo_model.Repository
+ refIssue *Issue
+ refAction references.XRefAction
+ err error
+ )
+
+ allrefs := append(references.FindAllIssueReferences(plaincontent), references.FindAllIssueReferencesMarkdown(mdcontent)...)
+ for _, ref := range allrefs {
+ if ref.Owner == "" && ref.Name == "" {
+ // Issues in the same repository
+ if err := ctx.OrigIssue.LoadRepo(stdCtx); err != nil {
+ return nil, err
+ }
+ refRepo = ctx.OrigIssue.Repo
+ } else {
+ // Issues in other repositories
+ refRepo, err = repo_model.GetRepositoryByOwnerAndName(stdCtx, ref.Owner, ref.Name)
+ if err != nil {
+ if repo_model.IsErrRepoNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ }
+ if refIssue, refAction, err = ctx.OrigIssue.verifyReferencedIssue(stdCtx, ctx, refRepo, ref); err != nil {
+ return nil, err
+ }
+ if refIssue != nil {
+ xreflist = ctx.OrigIssue.updateCrossReferenceList(xreflist, &crossReference{
+ Issue: refIssue,
+ Action: refAction,
+ })
+ }
+ }
+
+ return xreflist, nil
+}
+
+func (issue *Issue) updateCrossReferenceList(list []*crossReference, xref *crossReference) []*crossReference {
+ if xref.Issue.ID == issue.ID {
+ return list
+ }
+ for i, r := range list {
+ if r.Issue.ID == xref.Issue.ID {
+ if xref.Action != references.XRefActionNone {
+ list[i].Action = xref.Action
+ }
+ return list
+ }
+ }
+ return append(list, xref)
+}
+
+// verifyReferencedIssue will check if the referenced issue exists, and whether the doer has permission to do what
+func (issue *Issue) verifyReferencedIssue(stdCtx context.Context, ctx *crossReferencesContext, repo *repo_model.Repository,
+ ref references.IssueReference,
+) (*Issue, references.XRefAction, error) {
+ refIssue := &Issue{RepoID: repo.ID, Index: ref.Index}
+ refAction := ref.Action
+ e := db.GetEngine(stdCtx)
+
+ if has, _ := e.Get(refIssue); !has {
+ return nil, references.XRefActionNone, nil
+ }
+ if err := refIssue.LoadRepo(stdCtx); err != nil {
+ return nil, references.XRefActionNone, err
+ }
+
+ // Close/reopen actions can only be set from pull requests to issues
+ if refIssue.IsPull || !issue.IsPull {
+ refAction = references.XRefActionNone
+ }
+
+ // Check doer permissions; set action to None if the doer can't change the destination
+ if refIssue.RepoID != ctx.OrigIssue.RepoID || ref.Action != references.XRefActionNone {
+ perm, err := access_model.GetUserRepoPermission(stdCtx, refIssue.Repo, ctx.Doer)
+ if err != nil {
+ return nil, references.XRefActionNone, err
+ }
+ if !perm.CanReadIssuesOrPulls(refIssue.IsPull) {
+ return nil, references.XRefActionNone, nil
+ }
+ // Accept close/reopening actions only if the poster is able to close the
+ // referenced issue manually at this moment. The only exception is
+ // the poster of a new PR referencing an issue on the same repo: then the merger
+ // should be responsible for checking whether the reference should resolve.
+ if ref.Action != references.XRefActionNone &&
+ ctx.Doer.ID != refIssue.PosterID &&
+ !perm.CanWriteIssuesOrPulls(refIssue.IsPull) &&
+ (refIssue.RepoID != ctx.OrigIssue.RepoID || ctx.OrigComment != nil) {
+ refAction = references.XRefActionNone
+ }
+ }
+
+ return refIssue, refAction, nil
+}
+
+// AddCrossReferences add cross references
+func (c *Comment) AddCrossReferences(stdCtx context.Context, doer *user_model.User, removeOld bool) error {
+ if c.Type != CommentTypeCode && c.Type != CommentTypeComment {
+ return nil
+ }
+ if err := c.LoadIssue(stdCtx); err != nil {
+ return err
+ }
+ ctx := &crossReferencesContext{
+ Type: CommentTypeCommentRef,
+ Doer: doer,
+ OrigIssue: c.Issue,
+ OrigComment: c,
+ RemoveOld: removeOld,
+ }
+ return c.Issue.createCrossReferences(stdCtx, ctx, "", c.Content)
+}
+
+func (c *Comment) neuterCrossReferences(ctx context.Context) error {
+ return neuterCrossReferences(ctx, c.IssueID, c.ID)
+}
+
+// LoadRefComment loads comment that created this reference from database
+func (c *Comment) LoadRefComment(ctx context.Context) (err error) {
+ if c.RefComment != nil {
+ return nil
+ }
+ c.RefComment, err = GetCommentByID(ctx, c.RefCommentID)
+ return err
+}
+
+// LoadRefIssue loads comment that created this reference from database
+func (c *Comment) LoadRefIssue(ctx context.Context) (err error) {
+ if c.RefIssue != nil {
+ return nil
+ }
+ c.RefIssue, err = GetIssueByID(ctx, c.RefIssueID)
+ if err == nil {
+ err = c.RefIssue.LoadRepo(ctx)
+ }
+ return err
+}
+
+// CommentTypeIsRef returns true if CommentType is a reference from another issue
+func CommentTypeIsRef(t CommentType) bool {
+ return t == CommentTypeCommentRef || t == CommentTypePullRef || t == CommentTypeIssueRef
+}
+
+// RefCommentLink returns the relative URL for the comment that created this reference
+func (c *Comment) RefCommentLink(ctx context.Context) string {
+ // Edge case for when the reference is inside the title or the description of the referring issue
+ if c.RefCommentID == 0 {
+ return c.RefIssueLink(ctx)
+ }
+ if err := c.LoadRefComment(ctx); err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadRefComment(%d): %v", c.RefCommentID, err)
+ return ""
+ }
+ return c.RefComment.Link(ctx)
+}
+
+// RefIssueLink returns the relative URL of the issue where this reference was created
+func (c *Comment) RefIssueLink(ctx context.Context) string {
+ if err := c.LoadRefIssue(ctx); err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadRefIssue(%d): %v", c.RefCommentID, err)
+ return ""
+ }
+ return c.RefIssue.Link()
+}
+
+// RefIssueTitle returns the title of the issue where this reference was created
+func (c *Comment) RefIssueTitle(ctx context.Context) string {
+ if err := c.LoadRefIssue(ctx); err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadRefIssue(%d): %v", c.RefCommentID, err)
+ return ""
+ }
+ return c.RefIssue.Title
+}
+
+// RefIssueIdent returns the user friendly identity (e.g. "#1234") of the issue where this reference was created
+func (c *Comment) RefIssueIdent(ctx context.Context) string {
+ if err := c.LoadRefIssue(ctx); err != nil { // Silently dropping errors :unamused:
+ log.Error("LoadRefIssue(%d): %v", c.RefCommentID, err)
+ return ""
+ }
+ // FIXME: check this name for cross-repository references (#7901 if it gets merged)
+ return fmt.Sprintf("#%d", c.RefIssue.Index)
+}
+
+// __________ .__ .__ __________ __
+// \______ \__ __| | | |\______ \ ____ ________ __ ____ _______/ |_
+// | ___/ | \ | | | | _// __ \/ ____/ | \_/ __ \ / ___/\ __\
+// | | | | / |_| |_| | \ ___< <_| | | /\ ___/ \___ \ | |
+// |____| |____/|____/____/____|_ /\___ >__ |____/ \___ >____ > |__|
+// \/ \/ |__| \/ \/
+
+// ResolveCrossReferences will return the list of references to close/reopen by this PR
+func (pr *PullRequest) ResolveCrossReferences(ctx context.Context) ([]*Comment, error) {
+ unfiltered := make([]*Comment, 0, 5)
+ if err := db.GetEngine(ctx).
+ Where("ref_repo_id = ? AND ref_issue_id = ?", pr.Issue.RepoID, pr.Issue.ID).
+ In("ref_action", []references.XRefAction{references.XRefActionCloses, references.XRefActionReopens}).
+ OrderBy("id").
+ Find(&unfiltered); err != nil {
+ return nil, fmt.Errorf("get reference: %w", err)
+ }
+
+ refs := make([]*Comment, 0, len(unfiltered))
+ for _, ref := range unfiltered {
+ found := false
+ for i, r := range refs {
+ if r.IssueID == ref.IssueID {
+ // Keep only the latest
+ refs[i] = ref
+ found = true
+ break
+ }
+ }
+ if !found {
+ refs = append(refs, ref)
+ }
+ }
+
+ return refs, nil
+}
diff --git a/models/issues/issue_xref_test.go b/models/issues/issue_xref_test.go
new file mode 100644
index 0000000..a24d1b0
--- /dev/null
+++ b/models/issues/issue_xref_test.go
@@ -0,0 +1,185 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "fmt"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/references"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestXRef_AddCrossReferences(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Issue #1 to test against
+ itarget := testCreateIssue(t, 1, 2, "title1", "content1", false)
+
+ // PR to close issue #1
+ content := fmt.Sprintf("content2, closes #%d", itarget.Index)
+ pr := testCreateIssue(t, 1, 2, "title2", content, true)
+ ref := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: pr.ID, RefCommentID: 0})
+ assert.Equal(t, issues_model.CommentTypePullRef, ref.Type)
+ assert.Equal(t, pr.RepoID, ref.RefRepoID)
+ assert.True(t, ref.RefIsPull)
+ assert.Equal(t, references.XRefActionCloses, ref.RefAction)
+
+ // Comment on PR to reopen issue #1
+ content = fmt.Sprintf("content2, reopens #%d", itarget.Index)
+ c := testCreateComment(t, 2, pr.ID, content)
+ ref = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: pr.ID, RefCommentID: c.ID})
+ assert.Equal(t, issues_model.CommentTypeCommentRef, ref.Type)
+ assert.Equal(t, pr.RepoID, ref.RefRepoID)
+ assert.True(t, ref.RefIsPull)
+ assert.Equal(t, references.XRefActionReopens, ref.RefAction)
+
+ // Issue mentioning issue #1
+ content = fmt.Sprintf("content3, mentions #%d", itarget.Index)
+ i := testCreateIssue(t, 1, 2, "title3", content, false)
+ ref = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: i.ID, RefCommentID: 0})
+ assert.Equal(t, issues_model.CommentTypeIssueRef, ref.Type)
+ assert.Equal(t, pr.RepoID, ref.RefRepoID)
+ assert.False(t, ref.RefIsPull)
+ assert.Equal(t, references.XRefActionNone, ref.RefAction)
+
+ // Issue #4 to test against
+ itarget = testCreateIssue(t, 3, 3, "title4", "content4", false)
+
+ // Cross-reference to issue #4 by admin
+ content = fmt.Sprintf("content5, mentions org3/repo3#%d", itarget.Index)
+ i = testCreateIssue(t, 2, 1, "title5", content, false)
+ ref = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: i.ID, RefCommentID: 0})
+ assert.Equal(t, issues_model.CommentTypeIssueRef, ref.Type)
+ assert.Equal(t, i.RepoID, ref.RefRepoID)
+ assert.False(t, ref.RefIsPull)
+ assert.Equal(t, references.XRefActionNone, ref.RefAction)
+
+ // Cross-reference to issue #4 with no permission
+ content = fmt.Sprintf("content6, mentions org3/repo3#%d", itarget.Index)
+ i = testCreateIssue(t, 4, 5, "title6", content, false)
+ unittest.AssertNotExistsBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: i.ID, RefCommentID: 0})
+}
+
+func TestXRef_NeuterCrossReferences(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Issue #1 to test against
+ itarget := testCreateIssue(t, 1, 2, "title1", "content1", false)
+
+ // Issue mentioning issue #1
+ title := fmt.Sprintf("title2, mentions #%d", itarget.Index)
+ i := testCreateIssue(t, 1, 2, title, "content2", false)
+ ref := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: i.ID, RefCommentID: 0})
+ assert.Equal(t, issues_model.CommentTypeIssueRef, ref.Type)
+ assert.Equal(t, references.XRefActionNone, ref.RefAction)
+
+ d := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ i.Title = "title2, no mentions"
+ require.NoError(t, issues_model.ChangeIssueTitle(db.DefaultContext, i, d, title))
+
+ ref = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: itarget.ID, RefIssueID: i.ID, RefCommentID: 0})
+ assert.Equal(t, issues_model.CommentTypeIssueRef, ref.Type)
+ assert.Equal(t, references.XRefActionNeutered, ref.RefAction)
+}
+
+func TestXRef_ResolveCrossReferences(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ d := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ i1 := testCreateIssue(t, 1, 2, "title1", "content1", false)
+ i2 := testCreateIssue(t, 1, 2, "title2", "content2", false)
+ i3 := testCreateIssue(t, 1, 2, "title3", "content3", false)
+ _, err := issues_model.ChangeIssueStatus(db.DefaultContext, i3, d, true)
+ require.NoError(t, err)
+
+ pr := testCreatePR(t, 1, 2, "titlepr", fmt.Sprintf("closes #%d", i1.Index))
+ rp := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: i1.ID, RefIssueID: pr.Issue.ID, RefCommentID: 0})
+
+ c1 := testCreateComment(t, 2, pr.Issue.ID, fmt.Sprintf("closes #%d", i2.Index))
+ r1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: i2.ID, RefIssueID: pr.Issue.ID, RefCommentID: c1.ID})
+
+ // Must be ignored
+ c2 := testCreateComment(t, 2, pr.Issue.ID, fmt.Sprintf("mentions #%d", i2.Index))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: i2.ID, RefIssueID: pr.Issue.ID, RefCommentID: c2.ID})
+
+ // Must be superseded by c4/r4
+ c3 := testCreateComment(t, 2, pr.Issue.ID, fmt.Sprintf("reopens #%d", i3.Index))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: i3.ID, RefIssueID: pr.Issue.ID, RefCommentID: c3.ID})
+
+ c4 := testCreateComment(t, 2, pr.Issue.ID, fmt.Sprintf("closes #%d", i3.Index))
+ r4 := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{IssueID: i3.ID, RefIssueID: pr.Issue.ID, RefCommentID: c4.ID})
+
+ refs, err := pr.ResolveCrossReferences(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, refs, 3)
+ assert.Equal(t, rp.ID, refs[0].ID, "bad ref rp: %+v", refs[0])
+ assert.Equal(t, r1.ID, refs[1].ID, "bad ref r1: %+v", refs[1])
+ assert.Equal(t, r4.ID, refs[2].ID, "bad ref r4: %+v", refs[2])
+}
+
+func testCreateIssue(t *testing.T, repo, doer int64, title, content string, ispull bool) *issues_model.Issue {
+ r := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repo})
+ d := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: doer})
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ require.NoError(t, err)
+ defer committer.Close()
+
+ idx, err := db.GetNextResourceIndex(ctx, "issue_index", r.ID)
+ require.NoError(t, err)
+ i := &issues_model.Issue{
+ RepoID: r.ID,
+ PosterID: d.ID,
+ Poster: d,
+ Title: title,
+ Content: content,
+ IsPull: ispull,
+ Index: idx,
+ }
+
+ err = issues_model.NewIssueWithIndex(ctx, d, issues_model.NewIssueOptions{
+ Repo: r,
+ Issue: i,
+ })
+ require.NoError(t, err)
+ i, err = issues_model.GetIssueByID(ctx, i.ID)
+ require.NoError(t, err)
+ require.NoError(t, i.AddCrossReferences(ctx, d, false))
+ require.NoError(t, committer.Commit())
+ return i
+}
+
+func testCreatePR(t *testing.T, repo, doer int64, title, content string) *issues_model.PullRequest {
+ r := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repo})
+ d := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: doer})
+ i := &issues_model.Issue{RepoID: r.ID, PosterID: d.ID, Poster: d, Title: title, Content: content, IsPull: true}
+ pr := &issues_model.PullRequest{HeadRepoID: repo, BaseRepoID: repo, HeadBranch: "head", BaseBranch: "base", Status: issues_model.PullRequestStatusMergeable}
+ require.NoError(t, issues_model.NewPullRequest(db.DefaultContext, r, i, nil, nil, pr))
+ pr.Issue = i
+ return pr
+}
+
+func testCreateComment(t *testing.T, doer, issue int64, content string) *issues_model.Comment {
+ d := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: doer})
+ i := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: issue})
+ c := &issues_model.Comment{Type: issues_model.CommentTypeComment, PosterID: doer, Poster: d, IssueID: issue, Issue: i, Content: content}
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ require.NoError(t, err)
+ defer committer.Close()
+ err = db.Insert(ctx, c)
+ require.NoError(t, err)
+ require.NoError(t, c.AddCrossReferences(ctx, d, false))
+ require.NoError(t, committer.Commit())
+ return c
+}
diff --git a/models/issues/label.go b/models/issues/label.go
new file mode 100644
index 0000000..61478e1
--- /dev/null
+++ b/models/issues/label.go
@@ -0,0 +1,509 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/label"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrRepoLabelNotExist represents a "RepoLabelNotExist" kind of error.
+type ErrRepoLabelNotExist struct {
+ LabelID int64
+ RepoID int64
+}
+
+// IsErrRepoLabelNotExist checks if an error is a RepoErrLabelNotExist.
+func IsErrRepoLabelNotExist(err error) bool {
+ _, ok := err.(ErrRepoLabelNotExist)
+ return ok
+}
+
+func (err ErrRepoLabelNotExist) Error() string {
+ return fmt.Sprintf("label does not exist [label_id: %d, repo_id: %d]", err.LabelID, err.RepoID)
+}
+
+func (err ErrRepoLabelNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrOrgLabelNotExist represents a "OrgLabelNotExist" kind of error.
+type ErrOrgLabelNotExist struct {
+ LabelID int64
+ OrgID int64
+}
+
+// IsErrOrgLabelNotExist checks if an error is a OrgErrLabelNotExist.
+func IsErrOrgLabelNotExist(err error) bool {
+ _, ok := err.(ErrOrgLabelNotExist)
+ return ok
+}
+
+func (err ErrOrgLabelNotExist) Error() string {
+ return fmt.Sprintf("label does not exist [label_id: %d, org_id: %d]", err.LabelID, err.OrgID)
+}
+
+func (err ErrOrgLabelNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrLabelNotExist represents a "LabelNotExist" kind of error.
+type ErrLabelNotExist struct {
+ LabelID int64
+}
+
+// IsErrLabelNotExist checks if an error is a ErrLabelNotExist.
+func IsErrLabelNotExist(err error) bool {
+ _, ok := err.(ErrLabelNotExist)
+ return ok
+}
+
+func (err ErrLabelNotExist) Error() string {
+ return fmt.Sprintf("label does not exist [label_id: %d]", err.LabelID)
+}
+
+func (err ErrLabelNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Label represents a label of repository for issues.
+type Label struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ OrgID int64 `xorm:"INDEX"`
+ Name string
+ Exclusive bool
+ Description string
+ Color string `xorm:"VARCHAR(7)"`
+ NumIssues int
+ NumClosedIssues int
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+
+ NumOpenIssues int `xorm:"-"`
+ NumOpenRepoIssues int64 `xorm:"-"`
+ IsChecked bool `xorm:"-"`
+ QueryString string `xorm:"-"`
+ IsSelected bool `xorm:"-"`
+ IsExcluded bool `xorm:"-"`
+
+ ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(Label))
+ db.RegisterModel(new(IssueLabel))
+}
+
+// CalOpenIssues sets the number of open issues of a label based on the already stored number of closed issues.
+func (l *Label) CalOpenIssues() {
+ l.NumOpenIssues = l.NumIssues - l.NumClosedIssues
+}
+
+// SetArchived set the label as archived
+func (l *Label) SetArchived(isArchived bool) {
+ if !isArchived {
+ l.ArchivedUnix = timeutil.TimeStamp(0)
+ } else if isArchived && !l.IsArchived() {
+ // Only change the date when it is newly archived.
+ l.ArchivedUnix = timeutil.TimeStampNow()
+ }
+}
+
+// IsArchived returns true if label is an archived
+func (l *Label) IsArchived() bool {
+ return !l.ArchivedUnix.IsZero()
+}
+
+// CalOpenOrgIssues calculates the open issues of a label for a specific repo
+func (l *Label) CalOpenOrgIssues(ctx context.Context, repoID, labelID int64) {
+ counts, _ := CountIssuesByRepo(ctx, &IssuesOptions{
+ RepoIDs: []int64{repoID},
+ LabelIDs: []int64{labelID},
+ IsClosed: optional.Some(false),
+ })
+
+ for _, count := range counts {
+ l.NumOpenRepoIssues += count
+ }
+}
+
+// LoadSelectedLabelsAfterClick calculates the set of selected labels when a label is clicked
+func (l *Label) LoadSelectedLabelsAfterClick(currentSelectedLabels []int64, currentSelectedExclusiveScopes []string) {
+ labelQuerySlice := []int64{}
+ labelSelected := false
+ exclusiveScope := l.ExclusiveScope()
+ for i, curSel := range currentSelectedLabels {
+ if curSel == l.ID {
+ labelSelected = true
+ } else if -curSel == l.ID {
+ labelSelected = true
+ l.IsExcluded = true
+ } else if curSel != 0 {
+ // Exclude other labels in the same scope from selection
+ if curSel < 0 || exclusiveScope == "" || exclusiveScope != currentSelectedExclusiveScopes[i] {
+ labelQuerySlice = append(labelQuerySlice, curSel)
+ }
+ }
+ }
+
+ if !labelSelected {
+ labelQuerySlice = append(labelQuerySlice, l.ID)
+ }
+ l.IsSelected = labelSelected
+
+ // Sort and deduplicate the ids to avoid the crawlers asking for the
+ // same thing with simply a different order of parameters
+ slices.Sort(labelQuerySlice)
+ labelQuerySlice = slices.Compact(labelQuerySlice)
+ // Quick conversion (strings.Join() doesn't accept slices of Int64)
+ labelQuerySliceStrings := make([]string, len(labelQuerySlice))
+ for i, x := range labelQuerySlice {
+ labelQuerySliceStrings[i] = strconv.FormatInt(x, 10)
+ }
+ l.QueryString = strings.Join(labelQuerySliceStrings, ",")
+}
+
+// BelongsToOrg returns true if label is an organization label
+func (l *Label) BelongsToOrg() bool {
+ return l.OrgID > 0
+}
+
+// BelongsToRepo returns true if label is a repository label
+func (l *Label) BelongsToRepo() bool {
+ return l.RepoID > 0
+}
+
+// ExclusiveScope returns scope substring of label name, or empty string if none exists
+func (l *Label) ExclusiveScope() string {
+ if !l.Exclusive {
+ return ""
+ }
+ lastIndex := strings.LastIndex(l.Name, "/")
+ if lastIndex == -1 || lastIndex == 0 || lastIndex == len(l.Name)-1 {
+ return ""
+ }
+ return l.Name[:lastIndex]
+}
+
+// NewLabel creates a new label
+func NewLabel(ctx context.Context, l *Label) error {
+ color, err := label.NormalizeColor(l.Color)
+ if err != nil {
+ return err
+ }
+ l.Color = color
+
+ return db.Insert(ctx, l)
+}
+
+// NewLabels creates new labels
+func NewLabels(ctx context.Context, labels ...*Label) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ for _, l := range labels {
+ color, err := label.NormalizeColor(l.Color)
+ if err != nil {
+ return err
+ }
+ l.Color = color
+
+ if err := db.Insert(ctx, l); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
+
+// UpdateLabel updates label information.
+func UpdateLabel(ctx context.Context, l *Label) error {
+ color, err := label.NormalizeColor(l.Color)
+ if err != nil {
+ return err
+ }
+ l.Color = color
+
+ return updateLabelCols(ctx, l, "name", "description", "color", "exclusive", "archived_unix")
+}
+
+// DeleteLabel delete a label
+func DeleteLabel(ctx context.Context, id, labelID int64) error {
+ l, err := GetLabelByID(ctx, labelID)
+ if err != nil {
+ if IsErrLabelNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ sess := db.GetEngine(ctx)
+
+ if l.BelongsToOrg() && l.OrgID != id {
+ return nil
+ }
+ if l.BelongsToRepo() && l.RepoID != id {
+ return nil
+ }
+
+ if _, err = db.DeleteByID[Label](ctx, labelID); err != nil {
+ return err
+ } else if _, err = sess.
+ Where("label_id = ?", labelID).
+ Delete(new(IssueLabel)); err != nil {
+ return err
+ }
+
+ // delete comments about now deleted label_id
+ if _, err = sess.Where("label_id = ?", labelID).Cols("label_id").Delete(&Comment{}); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// GetLabelByID returns a label by given ID.
+func GetLabelByID(ctx context.Context, labelID int64) (*Label, error) {
+ if labelID <= 0 {
+ return nil, ErrLabelNotExist{labelID}
+ }
+
+ l := &Label{}
+ has, err := db.GetEngine(ctx).ID(labelID).Get(l)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrLabelNotExist{l.ID}
+ }
+ return l, nil
+}
+
+// GetLabelsByIDs returns a list of labels by IDs
+func GetLabelsByIDs(ctx context.Context, labelIDs []int64, cols ...string) ([]*Label, error) {
+ labels := make([]*Label, 0, len(labelIDs))
+ return labels, db.GetEngine(ctx).Table("label").
+ In("id", labelIDs).
+ Asc("name").
+ Cols(cols...).
+ Find(&labels)
+}
+
+// GetLabelInRepoByName returns a label by name in given repository.
+func GetLabelInRepoByName(ctx context.Context, repoID int64, labelName string) (*Label, error) {
+ if len(labelName) == 0 || repoID <= 0 {
+ return nil, ErrRepoLabelNotExist{0, repoID}
+ }
+
+ l, exist, err := db.Get[Label](ctx, builder.Eq{"name": labelName, "repo_id": repoID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrRepoLabelNotExist{0, repoID}
+ }
+ return l, nil
+}
+
+// GetLabelInRepoByID returns a label by ID in given repository.
+func GetLabelInRepoByID(ctx context.Context, repoID, labelID int64) (*Label, error) {
+ if labelID <= 0 || repoID <= 0 {
+ return nil, ErrRepoLabelNotExist{labelID, repoID}
+ }
+
+ l, exist, err := db.Get[Label](ctx, builder.Eq{"id": labelID, "repo_id": repoID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrRepoLabelNotExist{labelID, repoID}
+ }
+ return l, nil
+}
+
+// GetLabelIDsInRepoByNames returns a list of labelIDs by names in a given
+// repository.
+// it silently ignores label names that do not belong to the repository.
+func GetLabelIDsInRepoByNames(ctx context.Context, repoID int64, labelNames []string) ([]int64, error) {
+ labelIDs := make([]int64, 0, len(labelNames))
+ return labelIDs, db.GetEngine(ctx).Table("label").
+ Where("repo_id = ?", repoID).
+ In("name", labelNames).
+ Asc("name").
+ Cols("id").
+ Find(&labelIDs)
+}
+
+// BuildLabelNamesIssueIDsCondition returns a builder where get issue ids match label names
+func BuildLabelNamesIssueIDsCondition(labelNames []string) *builder.Builder {
+ return builder.Select("issue_label.issue_id").
+ From("issue_label").
+ InnerJoin("label", "label.id = issue_label.label_id").
+ Where(
+ builder.In("label.name", labelNames),
+ ).
+ GroupBy("issue_label.issue_id")
+}
+
+// GetLabelsInRepoByIDs returns a list of labels by IDs in given repository,
+// it silently ignores label IDs that do not belong to the repository.
+func GetLabelsInRepoByIDs(ctx context.Context, repoID int64, labelIDs []int64) ([]*Label, error) {
+ labels := make([]*Label, 0, len(labelIDs))
+ return labels, db.GetEngine(ctx).
+ Where("repo_id = ?", repoID).
+ In("id", labelIDs).
+ Asc("name").
+ Find(&labels)
+}
+
+// GetLabelsByRepoID returns all labels that belong to given repository by ID.
+func GetLabelsByRepoID(ctx context.Context, repoID int64, sortType string, listOptions db.ListOptions) ([]*Label, error) {
+ if repoID <= 0 {
+ return nil, ErrRepoLabelNotExist{0, repoID}
+ }
+ labels := make([]*Label, 0, 10)
+ sess := db.GetEngine(ctx).Where("repo_id = ?", repoID)
+
+ switch sortType {
+ case "reversealphabetically":
+ sess.Desc("name")
+ case "leastissues":
+ sess.Asc("num_issues")
+ case "mostissues":
+ sess.Desc("num_issues")
+ default:
+ sess.Asc("name")
+ }
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+
+ return labels, sess.Find(&labels)
+}
+
+// CountLabelsByRepoID count number of all labels that belong to given repository by ID.
+func CountLabelsByRepoID(ctx context.Context, repoID int64) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_id = ?", repoID).Count(&Label{})
+}
+
+// GetLabelInOrgByName returns a label by name in given organization.
+func GetLabelInOrgByName(ctx context.Context, orgID int64, labelName string) (*Label, error) {
+ if len(labelName) == 0 || orgID <= 0 {
+ return nil, ErrOrgLabelNotExist{0, orgID}
+ }
+
+ l, exist, err := db.Get[Label](ctx, builder.Eq{"name": labelName, "org_id": orgID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrOrgLabelNotExist{0, orgID}
+ }
+ return l, nil
+}
+
+// GetLabelInOrgByID returns a label by ID in given organization.
+func GetLabelInOrgByID(ctx context.Context, orgID, labelID int64) (*Label, error) {
+ if labelID <= 0 || orgID <= 0 {
+ return nil, ErrOrgLabelNotExist{labelID, orgID}
+ }
+
+ l, exist, err := db.Get[Label](ctx, builder.Eq{"id": labelID, "org_id": orgID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrOrgLabelNotExist{labelID, orgID}
+ }
+ return l, nil
+}
+
+// GetLabelsInOrgByIDs returns a list of labels by IDs in given organization,
+// it silently ignores label IDs that do not belong to the organization.
+func GetLabelsInOrgByIDs(ctx context.Context, orgID int64, labelIDs []int64) ([]*Label, error) {
+ labels := make([]*Label, 0, len(labelIDs))
+ return labels, db.GetEngine(ctx).
+ Where("org_id = ?", orgID).
+ In("id", labelIDs).
+ Asc("name").
+ Find(&labels)
+}
+
+// GetLabelsByOrgID returns all labels that belong to given organization by ID.
+func GetLabelsByOrgID(ctx context.Context, orgID int64, sortType string, listOptions db.ListOptions) ([]*Label, error) {
+ if orgID <= 0 {
+ return nil, ErrOrgLabelNotExist{0, orgID}
+ }
+ labels := make([]*Label, 0, 10)
+ sess := db.GetEngine(ctx).Where("org_id = ?", orgID)
+
+ switch sortType {
+ case "reversealphabetically":
+ sess.Desc("name")
+ case "leastissues":
+ sess.Asc("num_issues")
+ case "mostissues":
+ sess.Desc("num_issues")
+ default:
+ sess.Asc("name")
+ }
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+
+ return labels, sess.Find(&labels)
+}
+
+// GetLabelIDsByNames returns a list of labelIDs by names.
+// It doesn't filter them by repo or org, so it could return labels belonging to different repos/orgs.
+// It's used for filtering issues via indexer, otherwise it would be useless.
+// Since it could return labels with the same name, so the length of returned ids could be more than the length of names.
+func GetLabelIDsByNames(ctx context.Context, labelNames []string) ([]int64, error) {
+ labelIDs := make([]int64, 0, len(labelNames))
+ return labelIDs, db.GetEngine(ctx).Table("label").
+ In("name", labelNames).
+ Cols("id").
+ Find(&labelIDs)
+}
+
+// CountLabelsByOrgID count all labels that belong to given organization by ID.
+func CountLabelsByOrgID(ctx context.Context, orgID int64) (int64, error) {
+ return db.GetEngine(ctx).Where("org_id = ?", orgID).Count(&Label{})
+}
+
+func updateLabelCols(ctx context.Context, l *Label, cols ...string) error {
+ _, err := db.GetEngine(ctx).ID(l.ID).
+ SetExpr("num_issues",
+ builder.Select("count(*)").From("issue_label").
+ Where(builder.Eq{"label_id": l.ID}),
+ ).
+ SetExpr("num_closed_issues",
+ builder.Select("count(*)").From("issue_label").
+ InnerJoin("issue", "issue_label.issue_id = issue.id").
+ Where(builder.Eq{
+ "issue_label.label_id": l.ID,
+ "issue.is_closed": true,
+ }),
+ ).
+ Cols(cols...).Update(l)
+ return err
+}
diff --git a/models/issues/label_test.go b/models/issues/label_test.go
new file mode 100644
index 0000000..b03fc1c
--- /dev/null
+++ b/models/issues/label_test.go
@@ -0,0 +1,422 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLabel_CalOpenIssues(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ label.CalOpenIssues()
+ assert.EqualValues(t, 2, label.NumOpenIssues)
+}
+
+func TestLabel_LoadSelectedLabelsAfterClick(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ // Loading the label id:8 (scope/label2) which have a scope and an
+ // exclusivity with id:7 (scope/label1)
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 8})
+
+ // First test : with negative and scope
+ label.LoadSelectedLabelsAfterClick([]int64{1, -8}, []string{"", "scope"})
+ assert.Equal(t, "1", label.QueryString)
+ assert.True(t, label.IsSelected)
+
+ // Second test : with duplicates
+ label.LoadSelectedLabelsAfterClick([]int64{1, 7, 1, 7, 7}, []string{"", "scope", "", "scope", "scope"})
+ assert.Equal(t, "1,8", label.QueryString)
+ assert.False(t, label.IsSelected)
+
+ // Third test : empty set
+ label.LoadSelectedLabelsAfterClick([]int64{}, []string{})
+ assert.False(t, label.IsSelected)
+ assert.Equal(t, "8", label.QueryString)
+}
+
+func TestLabel_ExclusiveScope(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 7})
+ assert.Equal(t, "scope", label.ExclusiveScope())
+
+ label = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 9})
+ assert.Equal(t, "scope/subscope", label.ExclusiveScope())
+}
+
+func TestNewLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ labels := []*issues_model.Label{
+ {RepoID: 2, Name: "labelName2", Color: "#123456"},
+ {RepoID: 3, Name: "labelName3", Color: "#123"},
+ {RepoID: 4, Name: "labelName4", Color: "ABCDEF"},
+ {RepoID: 5, Name: "labelName5", Color: "DEF"},
+ }
+ require.Error(t, issues_model.NewLabel(db.DefaultContext, &issues_model.Label{RepoID: 3, Name: "invalid Color", Color: ""}))
+ require.Error(t, issues_model.NewLabel(db.DefaultContext, &issues_model.Label{RepoID: 3, Name: "invalid Color", Color: "#45G"}))
+ require.Error(t, issues_model.NewLabel(db.DefaultContext, &issues_model.Label{RepoID: 3, Name: "invalid Color", Color: "#12345G"}))
+ require.Error(t, issues_model.NewLabel(db.DefaultContext, &issues_model.Label{RepoID: 3, Name: "invalid Color", Color: "45G"}))
+ require.Error(t, issues_model.NewLabel(db.DefaultContext, &issues_model.Label{RepoID: 3, Name: "invalid Color", Color: "12345G"}))
+ for _, label := range labels {
+ unittest.AssertNotExistsBean(t, label)
+ }
+ require.NoError(t, issues_model.NewLabels(db.DefaultContext, labels...))
+ for _, label := range labels {
+ unittest.AssertExistsAndLoadBean(t, label, unittest.Cond("id = ?", label.ID))
+ }
+ unittest.CheckConsistencyFor(t, &issues_model.Label{}, &repo_model.Repository{})
+}
+
+func TestGetLabelByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label, err := issues_model.GetLabelByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, label.ID)
+
+ _, err = issues_model.GetLabelByID(db.DefaultContext, unittest.NonexistentID)
+ assert.True(t, issues_model.IsErrLabelNotExist(err))
+}
+
+func TestGetLabelInRepoByName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label, err := issues_model.GetLabelInRepoByName(db.DefaultContext, 1, "label1")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, label.ID)
+ assert.Equal(t, "label1", label.Name)
+
+ _, err = issues_model.GetLabelInRepoByName(db.DefaultContext, 1, "")
+ assert.True(t, issues_model.IsErrRepoLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInRepoByName(db.DefaultContext, unittest.NonexistentID, "nonexistent")
+ assert.True(t, issues_model.IsErrRepoLabelNotExist(err))
+}
+
+func TestGetLabelInRepoByNames(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ labelIDs, err := issues_model.GetLabelIDsInRepoByNames(db.DefaultContext, 1, []string{"label1", "label2"})
+ require.NoError(t, err)
+
+ assert.Len(t, labelIDs, 2)
+
+ assert.Equal(t, int64(1), labelIDs[0])
+ assert.Equal(t, int64(2), labelIDs[1])
+}
+
+func TestGetLabelInRepoByNamesDiscardsNonExistentLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ // label3 doesn't exists.. See labels.yml
+ labelIDs, err := issues_model.GetLabelIDsInRepoByNames(db.DefaultContext, 1, []string{"label1", "label2", "label3"})
+ require.NoError(t, err)
+
+ assert.Len(t, labelIDs, 2)
+
+ assert.Equal(t, int64(1), labelIDs[0])
+ assert.Equal(t, int64(2), labelIDs[1])
+ require.NoError(t, err)
+}
+
+func TestGetLabelInRepoByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label, err := issues_model.GetLabelInRepoByID(db.DefaultContext, 1, 1)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, label.ID)
+
+ _, err = issues_model.GetLabelInRepoByID(db.DefaultContext, 1, -1)
+ assert.True(t, issues_model.IsErrRepoLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInRepoByID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ assert.True(t, issues_model.IsErrRepoLabelNotExist(err))
+}
+
+func TestGetLabelsInRepoByIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ labels, err := issues_model.GetLabelsInRepoByIDs(db.DefaultContext, 1, []int64{1, 2, unittest.NonexistentID})
+ require.NoError(t, err)
+ if assert.Len(t, labels, 2) {
+ assert.EqualValues(t, 1, labels[0].ID)
+ assert.EqualValues(t, 2, labels[1].ID)
+ }
+}
+
+func TestGetLabelsByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(repoID int64, sortType string, expectedIssueIDs []int64) {
+ labels, err := issues_model.GetLabelsByRepoID(db.DefaultContext, repoID, sortType, db.ListOptions{})
+ require.NoError(t, err)
+ assert.Len(t, labels, len(expectedIssueIDs))
+ for i, label := range labels {
+ assert.EqualValues(t, expectedIssueIDs[i], label.ID)
+ }
+ }
+ testSuccess(1, "leastissues", []int64{2, 1})
+ testSuccess(1, "mostissues", []int64{1, 2})
+ testSuccess(1, "reversealphabetically", []int64{2, 1})
+ testSuccess(1, "default", []int64{1, 2})
+}
+
+// Org versions
+
+func TestGetLabelInOrgByName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label, err := issues_model.GetLabelInOrgByName(db.DefaultContext, 3, "orglabel3")
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, label.ID)
+ assert.Equal(t, "orglabel3", label.Name)
+
+ _, err = issues_model.GetLabelInOrgByName(db.DefaultContext, 3, "")
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInOrgByName(db.DefaultContext, 0, "orglabel3")
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInOrgByName(db.DefaultContext, -1, "orglabel3")
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInOrgByName(db.DefaultContext, unittest.NonexistentID, "nonexistent")
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+}
+
+func TestGetLabelInOrgByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label, err := issues_model.GetLabelInOrgByID(db.DefaultContext, 3, 3)
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, label.ID)
+
+ _, err = issues_model.GetLabelInOrgByID(db.DefaultContext, 3, -1)
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInOrgByID(db.DefaultContext, 0, 3)
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInOrgByID(db.DefaultContext, -1, 3)
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelInOrgByID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+}
+
+func TestGetLabelsInOrgByIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ labels, err := issues_model.GetLabelsInOrgByIDs(db.DefaultContext, 3, []int64{3, 4, unittest.NonexistentID})
+ require.NoError(t, err)
+ if assert.Len(t, labels, 2) {
+ assert.EqualValues(t, 3, labels[0].ID)
+ assert.EqualValues(t, 4, labels[1].ID)
+ }
+}
+
+func TestGetLabelsByOrgID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(orgID int64, sortType string, expectedIssueIDs []int64) {
+ labels, err := issues_model.GetLabelsByOrgID(db.DefaultContext, orgID, sortType, db.ListOptions{})
+ require.NoError(t, err)
+ assert.Len(t, labels, len(expectedIssueIDs))
+ for i, label := range labels {
+ assert.EqualValues(t, expectedIssueIDs[i], label.ID)
+ }
+ }
+ testSuccess(3, "leastissues", []int64{3, 4})
+ testSuccess(3, "mostissues", []int64{4, 3})
+ testSuccess(3, "reversealphabetically", []int64{4, 3})
+ testSuccess(3, "default", []int64{3, 4})
+
+ var err error
+ _, err = issues_model.GetLabelsByOrgID(db.DefaultContext, 0, "leastissues", db.ListOptions{})
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+
+ _, err = issues_model.GetLabelsByOrgID(db.DefaultContext, -1, "leastissues", db.ListOptions{})
+ assert.True(t, issues_model.IsErrOrgLabelNotExist(err))
+}
+
+//
+
+func TestGetLabelsByIssueID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ labels, err := issues_model.GetLabelsByIssueID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ if assert.Len(t, labels, 1) {
+ assert.EqualValues(t, 1, labels[0].ID)
+ }
+
+ labels, err = issues_model.GetLabelsByIssueID(db.DefaultContext, unittest.NonexistentID)
+ require.NoError(t, err)
+ assert.Empty(t, labels)
+}
+
+func TestUpdateLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ // make sure update won't overwrite it
+ update := &issues_model.Label{
+ ID: label.ID,
+ Color: "#ffff00",
+ Name: "newLabelName",
+ Description: label.Description,
+ Exclusive: false,
+ ArchivedUnix: timeutil.TimeStamp(0),
+ }
+ label.Color = update.Color
+ label.Name = update.Name
+ require.NoError(t, issues_model.UpdateLabel(db.DefaultContext, update))
+ newLabel := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ assert.EqualValues(t, label.ID, newLabel.ID)
+ assert.EqualValues(t, label.Color, newLabel.Color)
+ assert.EqualValues(t, label.Name, newLabel.Name)
+ assert.EqualValues(t, label.Description, newLabel.Description)
+ assert.EqualValues(t, 0, newLabel.ArchivedUnix)
+ unittest.CheckConsistencyFor(t, &issues_model.Label{}, &repo_model.Repository{})
+}
+
+func TestDeleteLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ require.NoError(t, issues_model.DeleteLabel(db.DefaultContext, label.RepoID, label.ID))
+ unittest.AssertNotExistsBean(t, &issues_model.Label{ID: label.ID, RepoID: label.RepoID})
+
+ require.NoError(t, issues_model.DeleteLabel(db.DefaultContext, label.RepoID, label.ID))
+ unittest.AssertNotExistsBean(t, &issues_model.Label{ID: label.ID})
+
+ require.NoError(t, issues_model.DeleteLabel(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+ unittest.CheckConsistencyFor(t, &issues_model.Label{}, &repo_model.Repository{})
+}
+
+func TestHasIssueLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, issues_model.HasIssueLabel(db.DefaultContext, 1, 1))
+ assert.False(t, issues_model.HasIssueLabel(db.DefaultContext, 1, 2))
+ assert.False(t, issues_model.HasIssueLabel(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+}
+
+func TestNewIssueLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 2})
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 1})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ // add new IssueLabel
+ prevNumIssues := label.NumIssues
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, label, doer))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: label.ID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{
+ Type: issues_model.CommentTypeLabel,
+ PosterID: doer.ID,
+ IssueID: issue.ID,
+ LabelID: label.ID,
+ Content: "1",
+ })
+ label = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 2})
+ assert.EqualValues(t, prevNumIssues+1, label.NumIssues)
+
+ // re-add existing IssueLabel
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, label, doer))
+ unittest.CheckConsistencyFor(t, &issues_model.Issue{}, &issues_model.Label{})
+}
+
+func TestNewIssueExclusiveLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 18})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ otherLabel := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 6})
+ exclusiveLabelA := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 7})
+ exclusiveLabelB := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 8})
+
+ // coexisting regular and exclusive label
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, otherLabel, doer))
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, exclusiveLabelA, doer))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: otherLabel.ID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: exclusiveLabelA.ID})
+
+ // exclusive label replaces existing one
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, exclusiveLabelB, doer))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: otherLabel.ID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: exclusiveLabelB.ID})
+ unittest.AssertNotExistsBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: exclusiveLabelA.ID})
+
+ // exclusive label replaces existing one again
+ require.NoError(t, issues_model.NewIssueLabel(db.DefaultContext, issue, exclusiveLabelA, doer))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: otherLabel.ID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: exclusiveLabelA.ID})
+ unittest.AssertNotExistsBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: exclusiveLabelB.ID})
+}
+
+func TestNewIssueLabels(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ label1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ label2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 2})
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 5})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ require.NoError(t, issues_model.NewIssueLabels(db.DefaultContext, issue, []*issues_model.Label{label1, label2}, doer))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: label1.ID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{
+ Type: issues_model.CommentTypeLabel,
+ PosterID: doer.ID,
+ IssueID: issue.ID,
+ LabelID: label1.ID,
+ Content: "1",
+ })
+ unittest.AssertExistsAndLoadBean(t, &issues_model.IssueLabel{IssueID: issue.ID, LabelID: label1.ID})
+ label1 = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 1})
+ assert.EqualValues(t, 3, label1.NumIssues)
+ assert.EqualValues(t, 1, label1.NumClosedIssues)
+ label2 = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: 2})
+ assert.EqualValues(t, 1, label2.NumIssues)
+ assert.EqualValues(t, 1, label2.NumClosedIssues)
+
+ // corner case: test empty slice
+ require.NoError(t, issues_model.NewIssueLabels(db.DefaultContext, issue, []*issues_model.Label{}, doer))
+
+ unittest.CheckConsistencyFor(t, &issues_model.Issue{}, &issues_model.Label{})
+}
+
+func TestDeleteIssueLabel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(labelID, issueID, doerID int64) {
+ label := unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: labelID})
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: issueID})
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: doerID})
+
+ expectedNumIssues := label.NumIssues
+ expectedNumClosedIssues := label.NumClosedIssues
+ if unittest.BeanExists(t, &issues_model.IssueLabel{IssueID: issueID, LabelID: labelID}) {
+ expectedNumIssues--
+ if issue.IsClosed {
+ expectedNumClosedIssues--
+ }
+ }
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ defer committer.Close()
+ require.NoError(t, err)
+ require.NoError(t, issues_model.DeleteIssueLabel(ctx, issue, label, doer))
+ require.NoError(t, committer.Commit())
+
+ unittest.AssertNotExistsBean(t, &issues_model.IssueLabel{IssueID: issueID, LabelID: labelID})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{
+ Type: issues_model.CommentTypeLabel,
+ PosterID: doerID,
+ IssueID: issueID,
+ LabelID: labelID,
+ }, `content=""`)
+ label = unittest.AssertExistsAndLoadBean(t, &issues_model.Label{ID: labelID})
+ assert.EqualValues(t, expectedNumIssues, label.NumIssues)
+ assert.EqualValues(t, expectedNumClosedIssues, label.NumClosedIssues)
+ }
+ testSuccess(1, 1, 2)
+ testSuccess(2, 5, 2)
+ testSuccess(1, 1, 2) // delete non-existent IssueLabel
+
+ unittest.CheckConsistencyFor(t, &issues_model.Issue{}, &issues_model.Label{})
+}
diff --git a/models/issues/main_test.go b/models/issues/main_test.go
new file mode 100644
index 0000000..baabd66
--- /dev/null
+++ b/models/issues/main_test.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/repo"
+ _ "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestFixturesAreConsistent(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ unittest.CheckConsistencyFor(t,
+ &issues_model.Issue{},
+ &issues_model.PullRequest{},
+ &issues_model.Milestone{},
+ &issues_model.Label{},
+ )
+}
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/issues/milestone.go b/models/issues/milestone.go
new file mode 100644
index 0000000..4b3cb0e
--- /dev/null
+++ b/models/issues/milestone.go
@@ -0,0 +1,394 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/optional"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrMilestoneNotExist represents a "MilestoneNotExist" kind of error.
+type ErrMilestoneNotExist struct {
+ ID int64
+ RepoID int64
+ Name string
+}
+
+// IsErrMilestoneNotExist checks if an error is a ErrMilestoneNotExist.
+func IsErrMilestoneNotExist(err error) bool {
+ _, ok := err.(ErrMilestoneNotExist)
+ return ok
+}
+
+func (err ErrMilestoneNotExist) Error() string {
+ if len(err.Name) > 0 {
+ return fmt.Sprintf("milestone does not exist [name: %s, repo_id: %d]", err.Name, err.RepoID)
+ }
+ return fmt.Sprintf("milestone does not exist [id: %d, repo_id: %d]", err.ID, err.RepoID)
+}
+
+func (err ErrMilestoneNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Milestone represents a milestone of repository.
+type Milestone struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *repo_model.Repository `xorm:"-"`
+ Name string
+ Content string `xorm:"TEXT"`
+ RenderedContent template.HTML `xorm:"-"`
+ IsClosed bool
+ NumIssues int
+ NumClosedIssues int
+ NumOpenIssues int `xorm:"-"`
+ Completeness int // Percentage(1-100).
+ IsOverdue bool `xorm:"-"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ DeadlineUnix timeutil.TimeStamp
+ ClosedDateUnix timeutil.TimeStamp
+ DeadlineString string `xorm:"-"`
+
+ TotalTrackedTime int64 `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(Milestone))
+}
+
+// BeforeUpdate is invoked from XORM before updating this object.
+func (m *Milestone) BeforeUpdate() {
+ if m.NumIssues > 0 {
+ m.Completeness = m.NumClosedIssues * 100 / m.NumIssues
+ } else {
+ m.Completeness = 0
+ }
+}
+
+// AfterLoad is invoked from XORM after setting the value of a field of
+// this object.
+func (m *Milestone) AfterLoad() {
+ m.NumOpenIssues = m.NumIssues - m.NumClosedIssues
+ if m.DeadlineUnix.Year() == 9999 {
+ return
+ }
+
+ m.DeadlineString = m.DeadlineUnix.FormatDate()
+ if m.IsClosed {
+ m.IsOverdue = m.ClosedDateUnix >= m.DeadlineUnix
+ } else {
+ m.IsOverdue = timeutil.TimeStampNow() >= m.DeadlineUnix
+ }
+}
+
+// State returns string representation of milestone status.
+func (m *Milestone) State() api.StateType {
+ if m.IsClosed {
+ return api.StateClosed
+ }
+ return api.StateOpen
+}
+
+// NewMilestone creates new milestone of repository.
+func NewMilestone(ctx context.Context, m *Milestone) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ m.Name = strings.TrimSpace(m.Name)
+
+ if err = db.Insert(ctx, m); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `repository` SET num_milestones = num_milestones + 1 WHERE id = ?", m.RepoID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+// HasMilestoneByRepoID returns if the milestone exists in the repository.
+func HasMilestoneByRepoID(ctx context.Context, repoID, id int64) (bool, error) {
+ return db.GetEngine(ctx).ID(id).Where("repo_id=?", repoID).Exist(new(Milestone))
+}
+
+// GetMilestoneByRepoID returns the milestone in a repository.
+func GetMilestoneByRepoID(ctx context.Context, repoID, id int64) (*Milestone, error) {
+ m := new(Milestone)
+ has, err := db.GetEngine(ctx).ID(id).Where("repo_id=?", repoID).Get(m)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrMilestoneNotExist{ID: id, RepoID: repoID}
+ }
+ return m, nil
+}
+
+// GetMilestoneByRepoIDANDName return a milestone if one exist by name and repo
+func GetMilestoneByRepoIDANDName(ctx context.Context, repoID int64, name string) (*Milestone, error) {
+ var mile Milestone
+ has, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repoID, name).Get(&mile)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrMilestoneNotExist{Name: name, RepoID: repoID}
+ }
+ return &mile, nil
+}
+
+// UpdateMilestone updates information of given milestone.
+func UpdateMilestone(ctx context.Context, m *Milestone, oldIsClosed bool) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if m.IsClosed && !oldIsClosed {
+ m.ClosedDateUnix = timeutil.TimeStampNow()
+ }
+
+ if err := updateMilestone(ctx, m); err != nil {
+ return err
+ }
+
+ // if IsClosed changed, update milestone numbers of repository
+ if oldIsClosed != m.IsClosed {
+ if err := updateRepoMilestoneNum(ctx, m.RepoID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+func updateMilestone(ctx context.Context, m *Milestone) error {
+ m.Name = strings.TrimSpace(m.Name)
+ _, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
+ if err != nil {
+ return err
+ }
+ return UpdateMilestoneCounters(ctx, m.ID)
+}
+
+func updateMilestoneCounters(ctx context.Context, id int64, noAutoTime bool, updatedUnix timeutil.TimeStamp) error {
+ e := db.GetEngine(ctx)
+ sess := e.ID(id).
+ SetExpr("num_issues", builder.Select("count(*)").From("issue").Where(
+ builder.Eq{"milestone_id": id},
+ )).
+ SetExpr("num_closed_issues", builder.Select("count(*)").From("issue").Where(
+ builder.Eq{
+ "milestone_id": id,
+ "is_closed": true,
+ },
+ ))
+ if noAutoTime {
+ sess.SetExpr("updated_unix", updatedUnix).NoAutoTime()
+ }
+ _, err := sess.Update(&Milestone{})
+ if err != nil {
+ return err
+ }
+ _, err = e.Exec("UPDATE `milestone` SET completeness=100*num_closed_issues/(CASE WHEN num_issues > 0 THEN num_issues ELSE 1 END) WHERE id=?",
+ id,
+ )
+ return err
+}
+
+// UpdateMilestoneCounters calculates NumIssues, NumClosesIssues and Completeness
+func UpdateMilestoneCounters(ctx context.Context, id int64) error {
+ return updateMilestoneCounters(ctx, id, false, 0)
+}
+
+// UpdateMilestoneCountersWithDate calculates NumIssues, NumClosesIssues and Completeness and set the UpdatedUnix date
+func UpdateMilestoneCountersWithDate(ctx context.Context, id int64, updatedUnix timeutil.TimeStamp) error {
+ return updateMilestoneCounters(ctx, id, true, updatedUnix)
+}
+
+// ChangeMilestoneStatusByRepoIDAndID changes a milestone open/closed status if the milestone ID is in the repo.
+func ChangeMilestoneStatusByRepoIDAndID(ctx context.Context, repoID, milestoneID int64, isClosed bool) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ m := &Milestone{
+ ID: milestoneID,
+ RepoID: repoID,
+ }
+
+ has, err := db.GetEngine(ctx).ID(milestoneID).Where("repo_id = ?", repoID).Get(m)
+ if err != nil {
+ return err
+ } else if !has {
+ return ErrMilestoneNotExist{ID: milestoneID, RepoID: repoID}
+ }
+
+ if err := changeMilestoneStatus(ctx, m, isClosed); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// ChangeMilestoneStatus changes the milestone open/closed status.
+func ChangeMilestoneStatus(ctx context.Context, m *Milestone, isClosed bool) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := changeMilestoneStatus(ctx, m, isClosed); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func changeMilestoneStatus(ctx context.Context, m *Milestone, isClosed bool) error {
+ m.IsClosed = isClosed
+ if isClosed {
+ m.ClosedDateUnix = timeutil.TimeStampNow()
+ }
+
+ count, err := db.GetEngine(ctx).ID(m.ID).Where("repo_id = ? AND is_closed = ?", m.RepoID, !isClosed).Cols("is_closed", "closed_date_unix").Update(m)
+ if err != nil {
+ return err
+ }
+ if count < 1 {
+ return nil
+ }
+ return updateRepoMilestoneNum(ctx, m.RepoID)
+}
+
+// DeleteMilestoneByRepoID deletes a milestone from a repository.
+func DeleteMilestoneByRepoID(ctx context.Context, repoID, id int64) error {
+ m, err := GetMilestoneByRepoID(ctx, repoID, id)
+ if err != nil {
+ if IsErrMilestoneNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ repo, err := repo_model.GetRepositoryByID(ctx, m.RepoID)
+ if err != nil {
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err = db.DeleteByID[Milestone](ctx, m.ID); err != nil {
+ return err
+ }
+
+ numMilestones, err := db.Count[Milestone](ctx, FindMilestoneOptions{
+ RepoID: repo.ID,
+ })
+ if err != nil {
+ return err
+ }
+ numClosedMilestones, err := db.Count[Milestone](ctx, FindMilestoneOptions{
+ RepoID: repo.ID,
+ IsClosed: optional.Some(true),
+ })
+ if err != nil {
+ return err
+ }
+ repo.NumMilestones = int(numMilestones)
+ repo.NumClosedMilestones = int(numClosedMilestones)
+
+ if _, err = db.GetEngine(ctx).ID(repo.ID).Cols("num_milestones, num_closed_milestones").Update(repo); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `issue` SET milestone_id = 0 WHERE milestone_id = ?", m.ID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func updateRepoMilestoneNum(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET num_milestones=(SELECT count(*) FROM milestone WHERE repo_id=?),num_closed_milestones=(SELECT count(*) FROM milestone WHERE repo_id=? AND is_closed=?) WHERE id=?",
+ repoID,
+ repoID,
+ true,
+ repoID,
+ )
+ return err
+}
+
+// LoadTotalTrackedTime loads the tracked time for the milestone
+func (m *Milestone) LoadTotalTrackedTime(ctx context.Context) error {
+ type totalTimesByMilestone struct {
+ MilestoneID int64
+ Time int64
+ }
+ totalTime := &totalTimesByMilestone{MilestoneID: m.ID}
+ has, err := db.GetEngine(ctx).Table("issue").
+ Join("INNER", "milestone", "issue.milestone_id = milestone.id").
+ Join("LEFT", "tracked_time", "tracked_time.issue_id = issue.id").
+ Where("tracked_time.deleted = ?", false).
+ Select("milestone_id, sum(time) as time").
+ Where("milestone_id = ?", m.ID).
+ GroupBy("milestone_id").
+ Get(totalTime)
+ if err != nil {
+ return err
+ } else if !has {
+ return nil
+ }
+ m.TotalTrackedTime = totalTime.Time
+ return nil
+}
+
+// InsertMilestones creates milestones of repository.
+func InsertMilestones(ctx context.Context, ms ...*Milestone) (err error) {
+ if len(ms) == 0 {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ // to return the id, so we should not use batch insert
+ for _, m := range ms {
+ if _, err = sess.NoAutoTime().Insert(m); err != nil {
+ return err
+ }
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `repository` SET num_milestones = num_milestones + ? WHERE id = ?", len(ms), ms[0].RepoID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
diff --git a/models/issues/milestone_list.go b/models/issues/milestone_list.go
new file mode 100644
index 0000000..955ab23
--- /dev/null
+++ b/models/issues/milestone_list.go
@@ -0,0 +1,195 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/optional"
+
+ "xorm.io/builder"
+)
+
+// MilestoneList is a list of milestones offering additional functionality
+type MilestoneList []*Milestone
+
+func (milestones MilestoneList) getMilestoneIDs() []int64 {
+ ids := make([]int64, 0, len(milestones))
+ for _, ms := range milestones {
+ ids = append(ids, ms.ID)
+ }
+ return ids
+}
+
+// FindMilestoneOptions contain options to get milestones
+type FindMilestoneOptions struct {
+ db.ListOptions
+ RepoID int64
+ IsClosed optional.Option[bool]
+ Name string
+ SortType string
+ RepoCond builder.Cond
+ RepoIDs []int64
+}
+
+func (opts FindMilestoneOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID != 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.IsClosed.Has() {
+ cond = cond.And(builder.Eq{"is_closed": opts.IsClosed.Value()})
+ }
+ if opts.RepoCond != nil && opts.RepoCond.IsValid() {
+ cond = cond.And(builder.In("repo_id", builder.Select("id").From("repository").Where(opts.RepoCond)))
+ }
+ if len(opts.RepoIDs) > 0 {
+ cond = cond.And(builder.In("repo_id", opts.RepoIDs))
+ }
+ if len(opts.Name) != 0 {
+ cond = cond.And(db.BuildCaseInsensitiveLike("name", opts.Name))
+ }
+
+ return cond
+}
+
+func (opts FindMilestoneOptions) ToOrders() string {
+ switch opts.SortType {
+ case "furthestduedate":
+ return "deadline_unix DESC"
+ case "leastcomplete":
+ return "completeness ASC"
+ case "mostcomplete":
+ return "completeness DESC"
+ case "leastissues":
+ return "num_issues ASC"
+ case "mostissues":
+ return "num_issues DESC"
+ case "id":
+ return "id ASC"
+ case "name":
+ return "name DESC"
+ default:
+ return "deadline_unix ASC, name ASC"
+ }
+}
+
+// GetMilestoneIDsByNames returns a list of milestone ids by given names.
+// It doesn't filter them by repo, so it could return milestones belonging to different repos.
+// It's used for filtering issues via indexer, otherwise it would be useless.
+// Since it could return milestones with the same name, so the length of returned ids could be more than the length of names.
+func GetMilestoneIDsByNames(ctx context.Context, names []string) ([]int64, error) {
+ var ids []int64
+ return ids, db.GetEngine(ctx).Table("milestone").
+ Where(db.BuildCaseInsensitiveIn("name", names)).
+ Cols("id").
+ Find(&ids)
+}
+
+// LoadTotalTrackedTimes loads for every milestone in the list the TotalTrackedTime by a batch request
+func (milestones MilestoneList) LoadTotalTrackedTimes(ctx context.Context) error {
+ type totalTimesByMilestone struct {
+ MilestoneID int64
+ Time int64
+ }
+ if len(milestones) == 0 {
+ return nil
+ }
+ trackedTimes := make(map[int64]int64, len(milestones))
+
+ // Get total tracked time by milestone_id
+ rows, err := db.GetEngine(ctx).Table("issue").
+ Join("INNER", "milestone", "issue.milestone_id = milestone.id").
+ Join("LEFT", "tracked_time", "tracked_time.issue_id = issue.id").
+ Where("tracked_time.deleted = ?", false).
+ Select("milestone_id, sum(time) as time").
+ In("milestone_id", milestones.getMilestoneIDs()).
+ GroupBy("milestone_id").
+ Rows(new(totalTimesByMilestone))
+ if err != nil {
+ return err
+ }
+
+ defer rows.Close()
+
+ for rows.Next() {
+ var totalTime totalTimesByMilestone
+ err = rows.Scan(&totalTime)
+ if err != nil {
+ return err
+ }
+ trackedTimes[totalTime.MilestoneID] = totalTime.Time
+ }
+
+ for _, milestone := range milestones {
+ milestone.TotalTrackedTime = trackedTimes[milestone.ID]
+ }
+ return nil
+}
+
+// CountMilestonesByRepoCondAndKw map from repo conditions and the keyword of milestones' name to number of milestones matching the options`
+func CountMilestonesMap(ctx context.Context, opts FindMilestoneOptions) (map[int64]int64, error) {
+ sess := db.GetEngine(ctx).Where(opts.ToConds())
+
+ countsSlice := make([]*struct {
+ RepoID int64
+ Count int64
+ }, 0, 10)
+ if err := sess.GroupBy("repo_id").
+ Select("repo_id AS repo_id, COUNT(*) AS count").
+ Table("milestone").
+ Find(&countsSlice); err != nil {
+ return nil, err
+ }
+
+ countMap := make(map[int64]int64, len(countsSlice))
+ for _, c := range countsSlice {
+ countMap[c.RepoID] = c.Count
+ }
+ return countMap, nil
+}
+
+// MilestonesStats represents milestone statistic information.
+type MilestonesStats struct {
+ OpenCount, ClosedCount int64
+}
+
+// Total returns the total counts of milestones
+func (m MilestonesStats) Total() int64 {
+ return m.OpenCount + m.ClosedCount
+}
+
+// GetMilestonesStatsByRepoCondAndKw returns milestone statistic information for dashboard by given repo conditions and name keyword.
+func GetMilestonesStatsByRepoCondAndKw(ctx context.Context, repoCond builder.Cond, keyword string) (*MilestonesStats, error) {
+ var err error
+ stats := &MilestonesStats{}
+
+ sess := db.GetEngine(ctx).Where("is_closed = ?", false)
+ if len(keyword) > 0 {
+ sess = sess.And(builder.Like{"UPPER(name)", strings.ToUpper(keyword)})
+ }
+ if repoCond.IsValid() {
+ sess.And(builder.In("repo_id", builder.Select("id").From("repository").Where(repoCond)))
+ }
+ stats.OpenCount, err = sess.Count(new(Milestone))
+ if err != nil {
+ return nil, err
+ }
+
+ sess = db.GetEngine(ctx).Where("is_closed = ?", true)
+ if len(keyword) > 0 {
+ sess = sess.And(builder.Like{"UPPER(name)", strings.ToUpper(keyword)})
+ }
+ if repoCond.IsValid() {
+ sess.And(builder.In("repo_id", builder.Select("id").From("repository").Where(repoCond)))
+ }
+ stats.ClosedCount, err = sess.Count(new(Milestone))
+ if err != nil {
+ return nil, err
+ }
+
+ return stats, nil
+}
diff --git a/models/issues/milestone_test.go b/models/issues/milestone_test.go
new file mode 100644
index 0000000..314cba3
--- /dev/null
+++ b/models/issues/milestone_test.go
@@ -0,0 +1,371 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "sort"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMilestone_State(t *testing.T) {
+ assert.Equal(t, api.StateOpen, (&issues_model.Milestone{IsClosed: false}).State())
+ assert.Equal(t, api.StateClosed, (&issues_model.Milestone{IsClosed: true}).State())
+}
+
+func TestGetMilestoneByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ milestone, err := issues_model.GetMilestoneByRepoID(db.DefaultContext, 1, 1)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, milestone.ID)
+ assert.EqualValues(t, 1, milestone.RepoID)
+
+ _, err = issues_model.GetMilestoneByRepoID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ assert.True(t, issues_model.IsErrMilestoneNotExist(err))
+}
+
+func TestGetMilestonesByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(repoID int64, state api.StateType) {
+ var isClosed optional.Option[bool]
+ switch state {
+ case api.StateClosed, api.StateOpen:
+ isClosed = optional.Some(state == api.StateClosed)
+ }
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ milestones, err := db.Find[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoID: repo.ID,
+ IsClosed: isClosed,
+ })
+ require.NoError(t, err)
+
+ var n int
+
+ switch state {
+ case api.StateClosed:
+ n = repo.NumClosedMilestones
+
+ case api.StateAll:
+ n = repo.NumMilestones
+
+ case api.StateOpen:
+ fallthrough
+
+ default:
+ n = repo.NumOpenMilestones
+ }
+
+ assert.Len(t, milestones, n)
+ for _, milestone := range milestones {
+ assert.EqualValues(t, repoID, milestone.RepoID)
+ }
+ }
+ test(1, api.StateOpen)
+ test(1, api.StateAll)
+ test(1, api.StateClosed)
+ test(2, api.StateOpen)
+ test(2, api.StateAll)
+ test(2, api.StateClosed)
+ test(3, api.StateOpen)
+ test(3, api.StateClosed)
+ test(3, api.StateAll)
+
+ milestones, err := db.Find[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoID: unittest.NonexistentID,
+ IsClosed: optional.Some(false),
+ })
+ require.NoError(t, err)
+ assert.Empty(t, milestones)
+}
+
+func TestGetMilestones(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ test := func(sortType string, sortCond func(*issues_model.Milestone) int) {
+ for _, page := range []int{0, 1} {
+ milestones, err := db.Find[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ ListOptions: db.ListOptions{
+ Page: page,
+ PageSize: setting.UI.IssuePagingNum,
+ },
+ RepoID: repo.ID,
+ IsClosed: optional.Some(false),
+ SortType: sortType,
+ })
+ require.NoError(t, err)
+ assert.Len(t, milestones, repo.NumMilestones-repo.NumClosedMilestones)
+ values := make([]int, len(milestones))
+ for i, milestone := range milestones {
+ values[i] = sortCond(milestone)
+ }
+ assert.True(t, sort.IntsAreSorted(values))
+
+ milestones, err = db.Find[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ ListOptions: db.ListOptions{
+ Page: page,
+ PageSize: setting.UI.IssuePagingNum,
+ },
+ RepoID: repo.ID,
+ IsClosed: optional.Some(true),
+ Name: "",
+ SortType: sortType,
+ })
+ require.NoError(t, err)
+ assert.Len(t, milestones, repo.NumClosedMilestones)
+ values = make([]int, len(milestones))
+ for i, milestone := range milestones {
+ values[i] = sortCond(milestone)
+ }
+ assert.True(t, sort.IntsAreSorted(values))
+ }
+ }
+ test("furthestduedate", func(milestone *issues_model.Milestone) int {
+ return -int(milestone.DeadlineUnix)
+ })
+ test("leastcomplete", func(milestone *issues_model.Milestone) int {
+ return milestone.Completeness
+ })
+ test("mostcomplete", func(milestone *issues_model.Milestone) int {
+ return -milestone.Completeness
+ })
+ test("leastissues", func(milestone *issues_model.Milestone) int {
+ return milestone.NumIssues
+ })
+ test("mostissues", func(milestone *issues_model.Milestone) int {
+ return -milestone.NumIssues
+ })
+ test("soonestduedate", func(milestone *issues_model.Milestone) int {
+ return int(milestone.DeadlineUnix)
+ })
+}
+
+func TestCountRepoMilestones(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(repoID int64) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ count, err := db.Count[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoID: repoID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, repo.NumMilestones, count)
+ }
+ test(1)
+ test(2)
+ test(3)
+
+ count, err := db.Count[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoID: unittest.NonexistentID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+}
+
+func TestCountRepoClosedMilestones(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(repoID int64) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ count, err := db.Count[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoID: repoID,
+ IsClosed: optional.Some(true),
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, repo.NumClosedMilestones, count)
+ }
+ test(1)
+ test(2)
+ test(3)
+
+ count, err := db.Count[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoID: unittest.NonexistentID,
+ IsClosed: optional.Some(true),
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+}
+
+func TestCountMilestonesByRepoIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ milestonesCount := func(repoID int64) (int, int) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ return repo.NumOpenMilestones, repo.NumClosedMilestones
+ }
+ repo1OpenCount, repo1ClosedCount := milestonesCount(1)
+ repo2OpenCount, repo2ClosedCount := milestonesCount(2)
+
+ openCounts, err := issues_model.CountMilestonesMap(db.DefaultContext, issues_model.FindMilestoneOptions{
+ RepoIDs: []int64{1, 2},
+ IsClosed: optional.Some(false),
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, repo1OpenCount, openCounts[1])
+ assert.EqualValues(t, repo2OpenCount, openCounts[2])
+
+ closedCounts, err := issues_model.CountMilestonesMap(db.DefaultContext,
+ issues_model.FindMilestoneOptions{
+ RepoIDs: []int64{1, 2},
+ IsClosed: optional.Some(true),
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, repo1ClosedCount, closedCounts[1])
+ assert.EqualValues(t, repo2ClosedCount, closedCounts[2])
+}
+
+func TestGetMilestonesByRepoIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ test := func(sortType string, sortCond func(*issues_model.Milestone) int) {
+ for _, page := range []int{0, 1} {
+ openMilestones, err := db.Find[issues_model.Milestone](db.DefaultContext, issues_model.FindMilestoneOptions{
+ ListOptions: db.ListOptions{
+ Page: page,
+ PageSize: setting.UI.IssuePagingNum,
+ },
+ RepoIDs: []int64{repo1.ID, repo2.ID},
+ IsClosed: optional.Some(false),
+ SortType: sortType,
+ })
+ require.NoError(t, err)
+ assert.Len(t, openMilestones, repo1.NumOpenMilestones+repo2.NumOpenMilestones)
+ values := make([]int, len(openMilestones))
+ for i, milestone := range openMilestones {
+ values[i] = sortCond(milestone)
+ }
+ assert.True(t, sort.IntsAreSorted(values))
+
+ closedMilestones, err := db.Find[issues_model.Milestone](db.DefaultContext,
+ issues_model.FindMilestoneOptions{
+ ListOptions: db.ListOptions{
+ Page: page,
+ PageSize: setting.UI.IssuePagingNum,
+ },
+ RepoIDs: []int64{repo1.ID, repo2.ID},
+ IsClosed: optional.Some(true),
+ SortType: sortType,
+ })
+ require.NoError(t, err)
+ assert.Len(t, closedMilestones, repo1.NumClosedMilestones+repo2.NumClosedMilestones)
+ values = make([]int, len(closedMilestones))
+ for i, milestone := range closedMilestones {
+ values[i] = sortCond(milestone)
+ }
+ assert.True(t, sort.IntsAreSorted(values))
+ }
+ }
+ test("furthestduedate", func(milestone *issues_model.Milestone) int {
+ return -int(milestone.DeadlineUnix)
+ })
+ test("leastcomplete", func(milestone *issues_model.Milestone) int {
+ return milestone.Completeness
+ })
+ test("mostcomplete", func(milestone *issues_model.Milestone) int {
+ return -milestone.Completeness
+ })
+ test("leastissues", func(milestone *issues_model.Milestone) int {
+ return milestone.NumIssues
+ })
+ test("mostissues", func(milestone *issues_model.Milestone) int {
+ return -milestone.NumIssues
+ })
+ test("soonestduedate", func(milestone *issues_model.Milestone) int {
+ return int(milestone.DeadlineUnix)
+ })
+}
+
+func TestNewMilestone(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ milestone := &issues_model.Milestone{
+ RepoID: 1,
+ Name: "milestoneName",
+ Content: "milestoneContent",
+ }
+
+ require.NoError(t, issues_model.NewMilestone(db.DefaultContext, milestone))
+ unittest.AssertExistsAndLoadBean(t, milestone)
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: milestone.RepoID}, &issues_model.Milestone{})
+}
+
+func TestChangeMilestoneStatus(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ milestone := unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1})
+
+ require.NoError(t, issues_model.ChangeMilestoneStatus(db.DefaultContext, milestone, true))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1}, "is_closed=1")
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: milestone.RepoID}, &issues_model.Milestone{})
+
+ require.NoError(t, issues_model.ChangeMilestoneStatus(db.DefaultContext, milestone, false))
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1}, "is_closed=0")
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: milestone.RepoID}, &issues_model.Milestone{})
+}
+
+func TestDeleteMilestoneByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ require.NoError(t, issues_model.DeleteMilestoneByRepoID(db.DefaultContext, 1, 1))
+ unittest.AssertNotExistsBean(t, &issues_model.Milestone{ID: 1})
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: 1})
+
+ require.NoError(t, issues_model.DeleteMilestoneByRepoID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+}
+
+func TestUpdateMilestone(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ milestone := unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1})
+ milestone.Name = " newMilestoneName "
+ milestone.Content = "newMilestoneContent"
+ require.NoError(t, issues_model.UpdateMilestone(db.DefaultContext, milestone, milestone.IsClosed))
+ milestone = unittest.AssertExistsAndLoadBean(t, &issues_model.Milestone{ID: 1})
+ assert.EqualValues(t, "newMilestoneName", milestone.Name)
+ unittest.CheckConsistencyFor(t, &issues_model.Milestone{})
+}
+
+func TestUpdateMilestoneCounters(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{MilestoneID: 1},
+ "is_closed=0")
+
+ issue.IsClosed = true
+ issue.ClosedUnix = timeutil.TimeStampNow()
+ _, err := db.GetEngine(db.DefaultContext).ID(issue.ID).Cols("is_closed", "closed_unix").Update(issue)
+ require.NoError(t, err)
+ require.NoError(t, issues_model.UpdateMilestoneCounters(db.DefaultContext, issue.MilestoneID))
+ unittest.CheckConsistencyFor(t, &issues_model.Milestone{})
+
+ issue.IsClosed = false
+ issue.ClosedUnix = 0
+ _, err = db.GetEngine(db.DefaultContext).ID(issue.ID).Cols("is_closed", "closed_unix").Update(issue)
+ require.NoError(t, err)
+ require.NoError(t, issues_model.UpdateMilestoneCounters(db.DefaultContext, issue.MilestoneID))
+ unittest.CheckConsistencyFor(t, &issues_model.Milestone{})
+}
+
+func TestMigrate_InsertMilestones(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ reponame := "repo1"
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: reponame})
+ name := "milestonetest1"
+ ms := &issues_model.Milestone{
+ RepoID: repo.ID,
+ Name: name,
+ }
+ err := issues_model.InsertMilestones(db.DefaultContext, ms)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, ms)
+ repoModified := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repo.ID})
+ assert.EqualValues(t, repo.NumMilestones+1, repoModified.NumMilestones)
+
+ unittest.CheckConsistencyFor(t, &issues_model.Milestone{})
+}
diff --git a/models/issues/pull.go b/models/issues/pull.go
new file mode 100644
index 0000000..45e2e19
--- /dev/null
+++ b/models/issues/pull.go
@@ -0,0 +1,1105 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ org_model "code.gitea.io/gitea/models/organization"
+ pull_model "code.gitea.io/gitea/models/pull"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrPullRequestNotExist represents a "PullRequestNotExist" kind of error.
+type ErrPullRequestNotExist struct {
+ ID int64
+ IssueID int64
+ HeadRepoID int64
+ BaseRepoID int64
+ HeadBranch string
+ BaseBranch string
+}
+
+// IsErrPullRequestNotExist checks if an error is a ErrPullRequestNotExist.
+func IsErrPullRequestNotExist(err error) bool {
+ _, ok := err.(ErrPullRequestNotExist)
+ return ok
+}
+
+func (err ErrPullRequestNotExist) Error() string {
+ return fmt.Sprintf("pull request does not exist [id: %d, issue_id: %d, head_repo_id: %d, base_repo_id: %d, head_branch: %s, base_branch: %s]",
+ err.ID, err.IssueID, err.HeadRepoID, err.BaseRepoID, err.HeadBranch, err.BaseBranch)
+}
+
+func (err ErrPullRequestNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrPullRequestAlreadyExists represents a "PullRequestAlreadyExists"-error
+type ErrPullRequestAlreadyExists struct {
+ ID int64
+ IssueID int64
+ HeadRepoID int64
+ BaseRepoID int64
+ HeadBranch string
+ BaseBranch string
+}
+
+// IsErrPullRequestAlreadyExists checks if an error is a ErrPullRequestAlreadyExists.
+func IsErrPullRequestAlreadyExists(err error) bool {
+ _, ok := err.(ErrPullRequestAlreadyExists)
+ return ok
+}
+
+// Error does pretty-printing :D
+func (err ErrPullRequestAlreadyExists) Error() string {
+ return fmt.Sprintf("pull request already exists for these targets [id: %d, issue_id: %d, head_repo_id: %d, base_repo_id: %d, head_branch: %s, base_branch: %s]",
+ err.ID, err.IssueID, err.HeadRepoID, err.BaseRepoID, err.HeadBranch, err.BaseBranch)
+}
+
+func (err ErrPullRequestAlreadyExists) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrPullWasClosed is used close a closed pull request
+type ErrPullWasClosed struct {
+ ID int64
+ Index int64
+}
+
+// IsErrPullWasClosed checks if an error is a ErrErrPullWasClosed.
+func IsErrPullWasClosed(err error) bool {
+ _, ok := err.(ErrPullWasClosed)
+ return ok
+}
+
+func (err ErrPullWasClosed) Error() string {
+ return fmt.Sprintf("Pull request [%d] %d was already closed", err.ID, err.Index)
+}
+
+// PullRequestType defines pull request type
+type PullRequestType int
+
+// Enumerate all the pull request types
+const (
+ PullRequestGitea PullRequestType = iota
+ PullRequestGit
+)
+
+// PullRequestStatus defines pull request status
+type PullRequestStatus int
+
+// Enumerate all the pull request status
+const (
+ PullRequestStatusConflict PullRequestStatus = iota
+ PullRequestStatusChecking
+ PullRequestStatusMergeable
+ PullRequestStatusManuallyMerged
+ PullRequestStatusError
+ PullRequestStatusEmpty
+ PullRequestStatusAncestor
+)
+
+func (status PullRequestStatus) String() string {
+ switch status {
+ case PullRequestStatusConflict:
+ return "CONFLICT"
+ case PullRequestStatusChecking:
+ return "CHECKING"
+ case PullRequestStatusMergeable:
+ return "MERGEABLE"
+ case PullRequestStatusManuallyMerged:
+ return "MANUALLY_MERGED"
+ case PullRequestStatusError:
+ return "ERROR"
+ case PullRequestStatusEmpty:
+ return "EMPTY"
+ case PullRequestStatusAncestor:
+ return "ANCESTOR"
+ default:
+ return strconv.Itoa(int(status))
+ }
+}
+
+// PullRequestFlow the flow of pull request
+type PullRequestFlow int
+
+const (
+ // PullRequestFlowGithub github flow from head branch to base branch
+ PullRequestFlowGithub PullRequestFlow = iota
+ // PullRequestFlowAGit Agit flow pull request, head branch is not exist
+ PullRequestFlowAGit
+)
+
+// PullRequest represents relation between pull request and repositories.
+type PullRequest struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type PullRequestType
+ Status PullRequestStatus
+ ConflictedFiles []string `xorm:"TEXT JSON"`
+ CommitsAhead int
+ CommitsBehind int
+
+ ChangedProtectedFiles []string `xorm:"TEXT JSON"`
+
+ IssueID int64 `xorm:"INDEX"`
+ Issue *Issue `xorm:"-"`
+ Index int64
+ RequestedReviewers []*user_model.User `xorm:"-"`
+ RequestedReviewersTeams []*org_model.Team `xorm:"-"`
+ isRequestedReviewersLoaded bool `xorm:"-"`
+
+ HeadRepoID int64 `xorm:"INDEX"`
+ HeadRepo *repo_model.Repository `xorm:"-"`
+ BaseRepoID int64 `xorm:"INDEX"`
+ BaseRepo *repo_model.Repository `xorm:"-"`
+ HeadBranch string
+ HeadCommitID string `xorm:"-"`
+ BaseBranch string
+ MergeBase string `xorm:"VARCHAR(64)"`
+ AllowMaintainerEdit bool `xorm:"NOT NULL DEFAULT false"`
+
+ HasMerged bool `xorm:"INDEX"`
+ MergedCommitID string `xorm:"VARCHAR(64)"`
+ MergerID int64 `xorm:"INDEX"`
+ Merger *user_model.User `xorm:"-"`
+ MergedUnix timeutil.TimeStamp `xorm:"updated INDEX"`
+
+ isHeadRepoLoaded bool `xorm:"-"`
+
+ Flow PullRequestFlow `xorm:"NOT NULL DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(PullRequest))
+}
+
+// DeletePullsByBaseRepoID deletes all pull requests by the base repository ID
+func DeletePullsByBaseRepoID(ctx context.Context, repoID int64) error {
+ deleteCond := builder.Select("id").From("pull_request").Where(builder.Eq{"pull_request.base_repo_id": repoID})
+
+ // Delete scheduled auto merges
+ if _, err := db.GetEngine(ctx).In("pull_id", deleteCond).
+ Delete(&pull_model.AutoMerge{}); err != nil {
+ return err
+ }
+
+ // Delete review states
+ if _, err := db.GetEngine(ctx).In("pull_id", deleteCond).
+ Delete(&pull_model.ReviewState{}); err != nil {
+ return err
+ }
+
+ _, err := db.DeleteByBean(ctx, &PullRequest{BaseRepoID: repoID})
+ return err
+}
+
+func (pr *PullRequest) String() string {
+ if pr == nil {
+ return "<PullRequest nil>"
+ }
+
+ s := new(strings.Builder)
+ fmt.Fprintf(s, "<PullRequest [%d]", pr.ID)
+ if pr.BaseRepo != nil {
+ fmt.Fprintf(s, "%s#%d[%s...", pr.BaseRepo.FullName(), pr.Index, pr.BaseBranch)
+ } else {
+ fmt.Fprintf(s, "Repo[%d]#%d[%s...", pr.BaseRepoID, pr.Index, pr.BaseBranch)
+ }
+ if pr.HeadRepoID == pr.BaseRepoID {
+ fmt.Fprintf(s, "%s]", pr.HeadBranch)
+ } else if pr.HeadRepo != nil {
+ fmt.Fprintf(s, "%s:%s]", pr.HeadRepo.FullName(), pr.HeadBranch)
+ } else {
+ fmt.Fprintf(s, "Repo[%d]:%s]", pr.HeadRepoID, pr.HeadBranch)
+ }
+ s.WriteByte('>')
+ return s.String()
+}
+
+// MustHeadUserName returns the HeadRepo's username if failed return blank
+func (pr *PullRequest) MustHeadUserName(ctx context.Context) string {
+ if err := pr.LoadHeadRepo(ctx); err != nil {
+ if !repo_model.IsErrRepoNotExist(err) {
+ log.Error("LoadHeadRepo: %v", err)
+ } else {
+ log.Warn("LoadHeadRepo %d but repository does not exist: %v", pr.HeadRepoID, err)
+ }
+ return ""
+ }
+ if pr.HeadRepo == nil {
+ return ""
+ }
+ return pr.HeadRepo.OwnerName
+}
+
+// LoadAttributes loads pull request attributes from database
+// Note: don't try to get Issue because will end up recursive querying.
+func (pr *PullRequest) LoadAttributes(ctx context.Context) (err error) {
+ if pr.HasMerged && pr.Merger == nil {
+ pr.Merger, err = user_model.GetUserByID(ctx, pr.MergerID)
+ if user_model.IsErrUserNotExist(err) {
+ pr.MergerID = user_model.GhostUserID
+ pr.Merger = user_model.NewGhostUser()
+ } else if err != nil {
+ return fmt.Errorf("getUserByID [%d]: %w", pr.MergerID, err)
+ }
+ }
+
+ return nil
+}
+
+// LoadHeadRepo loads the head repository, pr.HeadRepo will remain nil if it does not exist
+// and thus ErrRepoNotExist will never be returned
+func (pr *PullRequest) LoadHeadRepo(ctx context.Context) (err error) {
+ if !pr.isHeadRepoLoaded && pr.HeadRepo == nil && pr.HeadRepoID > 0 {
+ if pr.HeadRepoID == pr.BaseRepoID {
+ if pr.BaseRepo != nil {
+ pr.HeadRepo = pr.BaseRepo
+ return nil
+ } else if pr.Issue != nil && pr.Issue.Repo != nil {
+ pr.HeadRepo = pr.Issue.Repo
+ return nil
+ }
+ }
+
+ pr.HeadRepo, err = repo_model.GetRepositoryByID(ctx, pr.HeadRepoID)
+ if err != nil && !repo_model.IsErrRepoNotExist(err) { // Head repo maybe deleted, but it should still work
+ return fmt.Errorf("pr[%d].LoadHeadRepo[%d]: %w", pr.ID, pr.HeadRepoID, err)
+ }
+ pr.isHeadRepoLoaded = true
+ }
+ return nil
+}
+
+// LoadRequestedReviewers loads the requested reviewers.
+func (pr *PullRequest) LoadRequestedReviewers(ctx context.Context) error {
+ if pr.isRequestedReviewersLoaded || len(pr.RequestedReviewers) > 0 {
+ return nil
+ }
+
+ reviews, err := GetReviewsByIssueID(ctx, pr.Issue.ID)
+ if err != nil {
+ return err
+ }
+ if err = reviews.LoadReviewers(ctx); err != nil {
+ return err
+ }
+ pr.isRequestedReviewersLoaded = true
+ for _, review := range reviews {
+ if review.ReviewerID != 0 {
+ pr.RequestedReviewers = append(pr.RequestedReviewers, review.Reviewer)
+ }
+ }
+
+ return nil
+}
+
+// LoadRequestedReviewersTeams loads the requested reviewers teams.
+func (pr *PullRequest) LoadRequestedReviewersTeams(ctx context.Context) error {
+ reviews, err := GetReviewsByIssueID(ctx, pr.Issue.ID)
+ if err != nil {
+ return err
+ }
+ if err = reviews.LoadReviewersTeams(ctx); err != nil {
+ return err
+ }
+
+ for _, review := range reviews {
+ if review.ReviewerTeamID != 0 {
+ pr.RequestedReviewersTeams = append(pr.RequestedReviewersTeams, review.ReviewerTeam)
+ }
+ }
+
+ return nil
+}
+
+// LoadBaseRepo loads the target repository. ErrRepoNotExist may be returned.
+func (pr *PullRequest) LoadBaseRepo(ctx context.Context) (err error) {
+ if pr.BaseRepo != nil {
+ return nil
+ }
+
+ if pr.HeadRepoID == pr.BaseRepoID && pr.HeadRepo != nil {
+ pr.BaseRepo = pr.HeadRepo
+ return nil
+ }
+
+ if pr.Issue != nil && pr.Issue.Repo != nil {
+ pr.BaseRepo = pr.Issue.Repo
+ return nil
+ }
+
+ pr.BaseRepo, err = repo_model.GetRepositoryByID(ctx, pr.BaseRepoID)
+ if err != nil {
+ return fmt.Errorf("pr[%d].LoadBaseRepo[%d]: %w", pr.ID, pr.BaseRepoID, err)
+ }
+ return nil
+}
+
+// LoadIssue loads issue information from database
+func (pr *PullRequest) LoadIssue(ctx context.Context) (err error) {
+ if pr.Issue != nil {
+ return nil
+ }
+
+ pr.Issue, err = GetIssueByID(ctx, pr.IssueID)
+ if err == nil {
+ pr.Issue.PullRequest = pr
+ }
+ return err
+}
+
+// ReviewCount represents a count of Reviews
+type ReviewCount struct {
+ IssueID int64
+ Type ReviewType
+ Count int64
+}
+
+// GetApprovalCounts returns the approval counts by type
+// FIXME: Only returns official counts due to double counting of non-official counts
+func (pr *PullRequest) GetApprovalCounts(ctx context.Context) ([]*ReviewCount, error) {
+ rCounts := make([]*ReviewCount, 0, 6)
+ sess := db.GetEngine(ctx).Where("issue_id = ?", pr.IssueID)
+ return rCounts, sess.Select("issue_id, type, count(id) as `count`").Where("official = ? AND dismissed = ?", true, false).GroupBy("issue_id, type").Table("review").Find(&rCounts)
+}
+
+// GetApprovers returns the approvers of the pull request
+func (pr *PullRequest) GetApprovers(ctx context.Context) string {
+ stringBuilder := strings.Builder{}
+ if err := pr.getReviewedByLines(ctx, &stringBuilder); err != nil {
+ log.Error("Unable to getReviewedByLines: Error: %v", err)
+ return ""
+ }
+
+ return stringBuilder.String()
+}
+
+func (pr *PullRequest) getReviewedByLines(ctx context.Context, writer io.Writer) error {
+ maxReviewers := setting.Repository.PullRequest.DefaultMergeMessageMaxApprovers
+
+ if maxReviewers == 0 {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // Note: This doesn't page as we only expect a very limited number of reviews
+ reviews, err := FindLatestReviews(ctx, FindReviewOptions{
+ Types: []ReviewType{ReviewTypeApprove},
+ IssueID: pr.IssueID,
+ OfficialOnly: setting.Repository.PullRequest.DefaultMergeMessageOfficialApproversOnly,
+ })
+ if err != nil {
+ log.Error("Unable to FindReviews for PR ID %d: %v", pr.ID, err)
+ return err
+ }
+
+ reviewersWritten := 0
+
+ for _, review := range reviews {
+ if maxReviewers > 0 && reviewersWritten > maxReviewers {
+ break
+ }
+
+ if err := review.LoadReviewer(ctx); err != nil && !user_model.IsErrUserNotExist(err) {
+ log.Error("Unable to LoadReviewer[%d] for PR ID %d : %v", review.ReviewerID, pr.ID, err)
+ return err
+ } else if review.Reviewer == nil {
+ continue
+ }
+ if _, err := writer.Write([]byte("Reviewed-by: ")); err != nil {
+ return err
+ }
+ if _, err := writer.Write([]byte(review.Reviewer.NewGitSig().String())); err != nil {
+ return err
+ }
+ if _, err := writer.Write([]byte{'\n'}); err != nil {
+ return err
+ }
+ reviewersWritten++
+ }
+ return committer.Commit()
+}
+
+// GetGitRefName returns git ref for hidden pull request branch
+func (pr *PullRequest) GetGitRefName() string {
+ return fmt.Sprintf("%s%d/head", git.PullPrefix, pr.Index)
+}
+
+func (pr *PullRequest) GetGitHeadBranchRefName() string {
+ return fmt.Sprintf("%s%s", git.BranchPrefix, pr.HeadBranch)
+}
+
+// GetReviewCommentsCount returns the number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)
+func (pr *PullRequest) GetReviewCommentsCount(ctx context.Context) int {
+ opts := FindCommentsOptions{
+ Type: CommentTypeReview,
+ IssueID: pr.IssueID,
+ }
+ conds := opts.ToConds()
+
+ count, err := db.GetEngine(ctx).Where(conds).Count(new(Comment))
+ if err != nil {
+ return 0
+ }
+ return int(count)
+}
+
+// IsChecking returns true if this pull request is still checking conflict.
+func (pr *PullRequest) IsChecking() bool {
+ return pr.Status == PullRequestStatusChecking
+}
+
+// CanAutoMerge returns true if this pull request can be merged automatically.
+func (pr *PullRequest) CanAutoMerge() bool {
+ return pr.Status == PullRequestStatusMergeable
+}
+
+// IsEmpty returns true if this pull request is empty.
+func (pr *PullRequest) IsEmpty() bool {
+ return pr.Status == PullRequestStatusEmpty
+}
+
+// IsAncestor returns true if the Head Commit of this PR is an ancestor of the Base Commit
+func (pr *PullRequest) IsAncestor() bool {
+ return pr.Status == PullRequestStatusAncestor
+}
+
+// IsFromFork return true if this PR is from a fork.
+func (pr *PullRequest) IsFromFork() bool {
+ return pr.HeadRepoID != pr.BaseRepoID
+}
+
+// SetMerged sets a pull request to merged and closes the corresponding issue
+func (pr *PullRequest) SetMerged(ctx context.Context) (bool, error) {
+ if pr.HasMerged {
+ return false, fmt.Errorf("PullRequest[%d] already merged", pr.Index)
+ }
+ if pr.MergedCommitID == "" || pr.MergedUnix == 0 || pr.Merger == nil {
+ return false, fmt.Errorf("Unable to merge PullRequest[%d], some required fields are empty", pr.Index)
+ }
+
+ pr.HasMerged = true
+ sess := db.GetEngine(ctx)
+
+ if _, err := sess.Exec("UPDATE `issue` SET `repo_id` = `repo_id` WHERE `id` = ?", pr.IssueID); err != nil {
+ return false, err
+ }
+
+ if _, err := sess.Exec("UPDATE `pull_request` SET `issue_id` = `issue_id` WHERE `id` = ?", pr.ID); err != nil {
+ return false, err
+ }
+
+ pr.Issue = nil
+ if err := pr.LoadIssue(ctx); err != nil {
+ return false, err
+ }
+
+ if tmpPr, err := GetPullRequestByID(ctx, pr.ID); err != nil {
+ return false, err
+ } else if tmpPr.HasMerged {
+ if pr.Issue.IsClosed {
+ return false, nil
+ }
+ return false, fmt.Errorf("PullRequest[%d] already merged but it's associated issue [%d] is not closed", pr.Index, pr.IssueID)
+ } else if pr.Issue.IsClosed {
+ return false, fmt.Errorf("PullRequest[%d] already closed", pr.Index)
+ }
+
+ if err := pr.Issue.LoadRepo(ctx); err != nil {
+ return false, err
+ }
+
+ if err := pr.Issue.Repo.LoadOwner(ctx); err != nil {
+ return false, err
+ }
+
+ if _, err := changeIssueStatus(ctx, pr.Issue, pr.Merger, true, true); err != nil {
+ return false, fmt.Errorf("Issue.changeStatus: %w", err)
+ }
+
+ // reset the conflicted files as there cannot be any if we're merged
+ pr.ConflictedFiles = []string{}
+
+ // We need to save all of the data used to compute this merge as it may have already been changed by TestPatch. FIXME: need to set some state to prevent TestPatch from running whilst we are merging.
+ if _, err := sess.Where("id = ?", pr.ID).Cols("has_merged, status, merge_base, merged_commit_id, merger_id, merged_unix, conflicted_files").Update(pr); err != nil {
+ return false, fmt.Errorf("Failed to update pr[%d]: %w", pr.ID, err)
+ }
+
+ return true, nil
+}
+
+// NewPullRequest creates new pull request with labels for repository.
+func NewPullRequest(ctx context.Context, repo *repo_model.Repository, issue *Issue, labelIDs []int64, uuids []string, pr *PullRequest) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ idx, err := db.GetNextResourceIndex(ctx, "issue_index", repo.ID)
+ if err != nil {
+ return fmt.Errorf("generate pull request index failed: %w", err)
+ }
+
+ issue.Index = idx
+
+ if err = NewIssueWithIndex(ctx, issue.Poster, NewIssueOptions{
+ Repo: repo,
+ Issue: issue,
+ LabelIDs: labelIDs,
+ Attachments: uuids,
+ IsPull: true,
+ }); err != nil {
+ if repo_model.IsErrUserDoesNotHaveAccessToRepo(err) || IsErrNewIssueInsert(err) {
+ return err
+ }
+ return fmt.Errorf("newIssue: %w", err)
+ }
+
+ pr.Index = issue.Index
+ pr.BaseRepo = repo
+ pr.IssueID = issue.ID
+ if err = db.Insert(ctx, pr); err != nil {
+ return fmt.Errorf("insert pull repo: %w", err)
+ }
+
+ if err = committer.Commit(); err != nil {
+ return fmt.Errorf("Commit: %w", err)
+ }
+
+ return nil
+}
+
+// GetUnmergedPullRequest returns a pull request that is open and has not been merged
+// by given head/base and repo/branch.
+func GetUnmergedPullRequest(ctx context.Context, headRepoID, baseRepoID int64, headBranch, baseBranch string, flow PullRequestFlow) (*PullRequest, error) {
+ pr := new(PullRequest)
+ has, err := db.GetEngine(ctx).
+ Where("head_repo_id=? AND head_branch=? AND base_repo_id=? AND base_branch=? AND has_merged=? AND flow = ? AND issue.is_closed=?",
+ headRepoID, headBranch, baseRepoID, baseBranch, false, flow, false).
+ Join("INNER", "issue", "issue.id=pull_request.issue_id").
+ Get(pr)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrPullRequestNotExist{0, 0, headRepoID, baseRepoID, headBranch, baseBranch}
+ }
+
+ return pr, nil
+}
+
+// GetLatestPullRequestByHeadInfo returns the latest pull request (regardless of its status)
+// by given head information (repo and branch).
+func GetLatestPullRequestByHeadInfo(ctx context.Context, repoID int64, branch string) (*PullRequest, error) {
+ pr := new(PullRequest)
+ has, err := db.GetEngine(ctx).
+ Where("head_repo_id = ? AND head_branch = ? AND flow = ?", repoID, branch, PullRequestFlowGithub).
+ OrderBy("id DESC").
+ Get(pr)
+ if !has {
+ return nil, err
+ }
+ return pr, err
+}
+
+// GetPullRequestByIndex returns a pull request by the given index
+func GetPullRequestByIndex(ctx context.Context, repoID, index int64) (*PullRequest, error) {
+ if index < 1 {
+ return nil, ErrPullRequestNotExist{}
+ }
+ pr := &PullRequest{
+ BaseRepoID: repoID,
+ Index: index,
+ }
+
+ has, err := db.GetEngine(ctx).Get(pr)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrPullRequestNotExist{0, 0, 0, repoID, "", ""}
+ }
+
+ if err = pr.LoadAttributes(ctx); err != nil {
+ return nil, err
+ }
+ if err = pr.LoadIssue(ctx); err != nil {
+ return nil, err
+ }
+
+ return pr, nil
+}
+
+// GetPullRequestByID returns a pull request by given ID.
+func GetPullRequestByID(ctx context.Context, id int64) (*PullRequest, error) {
+ pr := new(PullRequest)
+ has, err := db.GetEngine(ctx).ID(id).Get(pr)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrPullRequestNotExist{id, 0, 0, 0, "", ""}
+ }
+ return pr, pr.LoadAttributes(ctx)
+}
+
+// GetPullRequestByIssueIDWithNoAttributes returns pull request with no attributes loaded by given issue ID.
+func GetPullRequestByIssueIDWithNoAttributes(ctx context.Context, issueID int64) (*PullRequest, error) {
+ var pr PullRequest
+ has, err := db.GetEngine(ctx).Where("issue_id = ?", issueID).Get(&pr)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPullRequestNotExist{0, issueID, 0, 0, "", ""}
+ }
+ return &pr, nil
+}
+
+// GetPullRequestByIssueID returns pull request by given issue ID.
+func GetPullRequestByIssueID(ctx context.Context, issueID int64) (*PullRequest, error) {
+ pr, exist, err := db.Get[PullRequest](ctx, builder.Eq{"issue_id": issueID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrPullRequestNotExist{0, issueID, 0, 0, "", ""}
+ }
+ return pr, pr.LoadAttributes(ctx)
+}
+
+// GetPullRequestsByBaseHeadInfo returns the pull request by given base and head
+func GetPullRequestByBaseHeadInfo(ctx context.Context, baseID, headID int64, base, head string) (*PullRequest, error) {
+ pr := &PullRequest{}
+ sess := db.GetEngine(ctx).
+ Join("INNER", "issue", "issue.id = pull_request.issue_id").
+ Where("base_repo_id = ? AND base_branch = ? AND head_repo_id = ? AND head_branch = ?", baseID, base, headID, head)
+ has, err := sess.Get(pr)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPullRequestNotExist{
+ HeadRepoID: headID,
+ BaseRepoID: baseID,
+ HeadBranch: head,
+ BaseBranch: base,
+ }
+ }
+
+ if err = pr.LoadAttributes(ctx); err != nil {
+ return nil, err
+ }
+ if err = pr.LoadIssue(ctx); err != nil {
+ return nil, err
+ }
+
+ return pr, nil
+}
+
+// GetAllUnmergedAgitPullRequestByPoster get all unmerged agit flow pull request
+// By poster id.
+func GetAllUnmergedAgitPullRequestByPoster(ctx context.Context, uid int64) ([]*PullRequest, error) {
+ pulls := make([]*PullRequest, 0, 10)
+
+ err := db.GetEngine(ctx).
+ Where("has_merged=? AND flow = ? AND issue.is_closed=? AND issue.poster_id=?",
+ false, PullRequestFlowAGit, false, uid).
+ Join("INNER", "issue", "issue.id=pull_request.issue_id").
+ Find(&pulls)
+
+ return pulls, err
+}
+
+// Update updates all fields of pull request.
+func (pr *PullRequest) Update(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).ID(pr.ID).AllCols().Update(pr)
+ return err
+}
+
+// UpdateCols updates specific fields of pull request.
+func (pr *PullRequest) UpdateCols(ctx context.Context, cols ...string) error {
+ _, err := db.GetEngine(ctx).ID(pr.ID).Cols(cols...).Update(pr)
+ return err
+}
+
+// UpdateColsIfNotMerged updates specific fields of a pull request if it has not been merged
+func (pr *PullRequest) UpdateColsIfNotMerged(ctx context.Context, cols ...string) error {
+ _, err := db.GetEngine(ctx).Where("id = ? AND has_merged = ?", pr.ID, false).Cols(cols...).Update(pr)
+ return err
+}
+
+// IsWorkInProgress determine if the Pull Request is a Work In Progress by its title
+// Issue must be set before this method can be called.
+func (pr *PullRequest) IsWorkInProgress(ctx context.Context) bool {
+ if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("LoadIssue: %v", err)
+ return false
+ }
+ return HasWorkInProgressPrefix(pr.Issue.Title)
+}
+
+// HasWorkInProgressPrefix determines if the given PR title has a Work In Progress prefix
+func HasWorkInProgressPrefix(title string) bool {
+ for _, prefix := range setting.Repository.PullRequest.WorkInProgressPrefixes {
+ if strings.HasPrefix(strings.ToUpper(title), strings.ToUpper(prefix)) {
+ return true
+ }
+ }
+ return false
+}
+
+// IsFilesConflicted determines if the Pull Request has changes conflicting with the target branch.
+func (pr *PullRequest) IsFilesConflicted() bool {
+ return len(pr.ConflictedFiles) > 0
+}
+
+// GetWorkInProgressPrefix returns the prefix used to mark the pull request as a work in progress.
+// It returns an empty string when none were found
+func (pr *PullRequest) GetWorkInProgressPrefix(ctx context.Context) string {
+ if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("LoadIssue: %v", err)
+ return ""
+ }
+
+ for _, prefix := range setting.Repository.PullRequest.WorkInProgressPrefixes {
+ if strings.HasPrefix(strings.ToUpper(pr.Issue.Title), strings.ToUpper(prefix)) {
+ return pr.Issue.Title[0:len(prefix)]
+ }
+ }
+ return ""
+}
+
+// UpdateCommitDivergence update Divergence of a pull request
+func (pr *PullRequest) UpdateCommitDivergence(ctx context.Context, ahead, behind int) error {
+ if pr.ID == 0 {
+ return fmt.Errorf("pull ID is 0")
+ }
+ pr.CommitsAhead = ahead
+ pr.CommitsBehind = behind
+ _, err := db.GetEngine(ctx).ID(pr.ID).Cols("commits_ahead", "commits_behind").Update(pr)
+ return err
+}
+
+// IsSameRepo returns true if base repo and head repo is the same
+func (pr *PullRequest) IsSameRepo() bool {
+ return pr.BaseRepoID == pr.HeadRepoID
+}
+
+// GetBaseBranchLink returns the relative URL of the base branch
+func (pr *PullRequest) GetBaseBranchLink(ctx context.Context) string {
+ if err := pr.LoadBaseRepo(ctx); err != nil {
+ log.Error("LoadBaseRepo: %v", err)
+ return ""
+ }
+ if pr.BaseRepo == nil {
+ return ""
+ }
+ return pr.BaseRepo.Link() + "/src/branch/" + util.PathEscapeSegments(pr.BaseBranch)
+}
+
+// GetHeadBranchLink returns the relative URL of the head branch
+func (pr *PullRequest) GetHeadBranchLink(ctx context.Context) string {
+ if pr.Flow == PullRequestFlowAGit {
+ return ""
+ }
+
+ if err := pr.LoadHeadRepo(ctx); err != nil {
+ log.Error("LoadHeadRepo: %v", err)
+ return ""
+ }
+ if pr.HeadRepo == nil {
+ return ""
+ }
+ return pr.HeadRepo.Link() + "/src/branch/" + util.PathEscapeSegments(pr.HeadBranch)
+}
+
+// UpdateAllowEdits update if PR can be edited from maintainers
+func UpdateAllowEdits(ctx context.Context, pr *PullRequest) error {
+ if _, err := db.GetEngine(ctx).ID(pr.ID).Cols("allow_maintainer_edit").Update(pr); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Mergeable returns if the pullrequest is mergeable.
+func (pr *PullRequest) Mergeable(ctx context.Context) bool {
+ // If a pull request isn't mergeable if it's:
+ // - Being conflict checked.
+ // - Has a conflict.
+ // - Received a error while being conflict checked.
+ // - Is a work-in-progress pull request.
+ return pr.Status != PullRequestStatusChecking && pr.Status != PullRequestStatusConflict &&
+ pr.Status != PullRequestStatusError && !pr.IsWorkInProgress(ctx)
+}
+
+// HasEnoughApprovals returns true if pr has enough granted approvals.
+func HasEnoughApprovals(ctx context.Context, protectBranch *git_model.ProtectedBranch, pr *PullRequest) bool {
+ if protectBranch.RequiredApprovals == 0 {
+ return true
+ }
+ return GetGrantedApprovalsCount(ctx, protectBranch, pr) >= protectBranch.RequiredApprovals
+}
+
+// GetGrantedApprovalsCount returns the number of granted approvals for pr. A granted approval must be authored by a user in an approval whitelist.
+func GetGrantedApprovalsCount(ctx context.Context, protectBranch *git_model.ProtectedBranch, pr *PullRequest) int64 {
+ sess := db.GetEngine(ctx).Where("issue_id = ?", pr.IssueID).
+ And("type = ?", ReviewTypeApprove).
+ And("official = ?", true).
+ And("dismissed = ?", false)
+ if protectBranch.IgnoreStaleApprovals {
+ sess = sess.And("stale = ?", false)
+ }
+ approvals, err := sess.Count(new(Review))
+ if err != nil {
+ log.Error("GetGrantedApprovalsCount: %v", err)
+ return 0
+ }
+
+ return approvals
+}
+
+// MergeBlockedByRejectedReview returns true if merge is blocked by rejected reviews
+func MergeBlockedByRejectedReview(ctx context.Context, protectBranch *git_model.ProtectedBranch, pr *PullRequest) bool {
+ if !protectBranch.BlockOnRejectedReviews {
+ return false
+ }
+ rejectExist, err := db.GetEngine(ctx).Where("issue_id = ?", pr.IssueID).
+ And("type = ?", ReviewTypeReject).
+ And("official = ?", true).
+ And("dismissed = ?", false).
+ Exist(new(Review))
+ if err != nil {
+ log.Error("MergeBlockedByRejectedReview: %v", err)
+ return true
+ }
+
+ return rejectExist
+}
+
+// MergeBlockedByOfficialReviewRequests block merge because of some review request to official reviewer
+// of from official review
+func MergeBlockedByOfficialReviewRequests(ctx context.Context, protectBranch *git_model.ProtectedBranch, pr *PullRequest) bool {
+ if !protectBranch.BlockOnOfficialReviewRequests {
+ return false
+ }
+ has, err := db.GetEngine(ctx).Where("issue_id = ?", pr.IssueID).
+ And("type = ?", ReviewTypeRequest).
+ And("official = ?", true).
+ Exist(new(Review))
+ if err != nil {
+ log.Error("MergeBlockedByOfficialReviewRequests: %v", err)
+ return true
+ }
+
+ return has
+}
+
+// MergeBlockedByOutdatedBranch returns true if merge is blocked by an outdated head branch
+func MergeBlockedByOutdatedBranch(protectBranch *git_model.ProtectedBranch, pr *PullRequest) bool {
+ return protectBranch.BlockOnOutdatedBranch && pr.CommitsBehind > 0
+}
+
+// GetCodeOwnersFromContent returns the code owners configuration
+// Return empty slice if files missing
+// Return warning messages on parsing errors
+// We're trying to do the best we can when parsing a file.
+// Invalid lines are skipped. Non-existent users and teams too.
+func GetCodeOwnersFromContent(ctx context.Context, data string) ([]*CodeOwnerRule, []string) {
+ if len(data) == 0 {
+ return nil, nil
+ }
+
+ rules := make([]*CodeOwnerRule, 0)
+ lines := strings.Split(data, "\n")
+ warnings := make([]string, 0)
+
+ for i, line := range lines {
+ tokens := TokenizeCodeOwnersLine(line)
+ if len(tokens) == 0 {
+ continue
+ } else if len(tokens) < 2 {
+ warnings = append(warnings, fmt.Sprintf("Line: %d: incorrect format", i+1))
+ continue
+ }
+ rule, wr := ParseCodeOwnersLine(ctx, tokens)
+ for _, w := range wr {
+ warnings = append(warnings, fmt.Sprintf("Line: %d: %s", i+1, w))
+ }
+ if rule == nil {
+ continue
+ }
+
+ rules = append(rules, rule)
+ }
+
+ return rules, warnings
+}
+
+type CodeOwnerRule struct {
+ Rule *regexp.Regexp
+ Negative bool
+ Users []*user_model.User
+ Teams []*org_model.Team
+}
+
+func ParseCodeOwnersLine(ctx context.Context, tokens []string) (*CodeOwnerRule, []string) {
+ var err error
+ rule := &CodeOwnerRule{
+ Users: make([]*user_model.User, 0),
+ Teams: make([]*org_model.Team, 0),
+ Negative: strings.HasPrefix(tokens[0], "!"),
+ }
+
+ warnings := make([]string, 0)
+
+ rule.Rule, err = regexp.Compile(fmt.Sprintf("^%s$", strings.TrimPrefix(tokens[0], "!")))
+ if err != nil {
+ warnings = append(warnings, fmt.Sprintf("incorrect codeowner regexp: %s", err))
+ return nil, warnings
+ }
+
+ for _, user := range tokens[1:] {
+ user = strings.TrimPrefix(user, "@")
+
+ // Only @org/team can contain slashes
+ if strings.Contains(user, "/") {
+ s := strings.Split(user, "/")
+ if len(s) != 2 {
+ warnings = append(warnings, fmt.Sprintf("incorrect codeowner group: %s", user))
+ continue
+ }
+ orgName := s[0]
+ teamName := s[1]
+
+ org, err := org_model.GetOrgByName(ctx, orgName)
+ if err != nil {
+ warnings = append(warnings, fmt.Sprintf("incorrect codeowner organization: %s", user))
+ continue
+ }
+ teams, err := org.LoadTeams(ctx)
+ if err != nil {
+ warnings = append(warnings, fmt.Sprintf("incorrect codeowner team: %s", user))
+ continue
+ }
+
+ for _, team := range teams {
+ if team.Name == teamName {
+ rule.Teams = append(rule.Teams, team)
+ }
+ }
+ } else {
+ u, err := user_model.GetUserByName(ctx, user)
+ if err != nil {
+ warnings = append(warnings, fmt.Sprintf("incorrect codeowner user: %s", user))
+ continue
+ }
+ rule.Users = append(rule.Users, u)
+ }
+ }
+
+ if (len(rule.Users) == 0) && (len(rule.Teams) == 0) {
+ warnings = append(warnings, "no users/groups matched")
+ return nil, warnings
+ }
+
+ return rule, warnings
+}
+
+func TokenizeCodeOwnersLine(line string) []string {
+ if len(line) == 0 {
+ return nil
+ }
+
+ line = strings.TrimSpace(line)
+ line = strings.ReplaceAll(line, "\t", " ")
+
+ tokens := make([]string, 0)
+
+ escape := false
+ token := ""
+ for _, char := range line {
+ if escape {
+ token += string(char)
+ escape = false
+ } else if string(char) == "\\" {
+ escape = true
+ } else if string(char) == "#" {
+ break
+ } else if string(char) == " " {
+ if len(token) > 0 {
+ tokens = append(tokens, token)
+ token = ""
+ }
+ } else {
+ token += string(char)
+ }
+ }
+
+ if len(token) > 0 {
+ tokens = append(tokens, token)
+ }
+
+ return tokens
+}
+
+// InsertPullRequests inserted pull requests
+func InsertPullRequests(ctx context.Context, prs ...*PullRequest) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+ for _, pr := range prs {
+ if err := insertIssue(ctx, pr.Issue); err != nil {
+ return err
+ }
+ pr.IssueID = pr.Issue.ID
+ if _, err := sess.NoAutoTime().Insert(pr); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
+
+// GetPullRequestByMergedCommit returns a merged pull request by the given commit
+func GetPullRequestByMergedCommit(ctx context.Context, repoID int64, sha string) (*PullRequest, error) {
+ pr := new(PullRequest)
+ has, err := db.GetEngine(ctx).Where("base_repo_id = ? AND merged_commit_id = ?", repoID, sha).Get(pr)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrPullRequestNotExist{0, 0, 0, repoID, "", ""}
+ }
+
+ if err = pr.LoadAttributes(ctx); err != nil {
+ return nil, err
+ }
+ if err = pr.LoadIssue(ctx); err != nil {
+ return nil, err
+ }
+
+ return pr, nil
+}
diff --git a/models/issues/pull_list.go b/models/issues/pull_list.go
new file mode 100644
index 0000000..f3970fa
--- /dev/null
+++ b/models/issues/pull_list.go
@@ -0,0 +1,264 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+)
+
+// PullRequestsOptions holds the options for PRs
+type PullRequestsOptions struct {
+ db.ListOptions
+ State string
+ SortType string
+ Labels []int64
+ MilestoneID int64
+}
+
+func listPullRequestStatement(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) *xorm.Session {
+ sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", baseRepoID)
+
+ sess.Join("INNER", "issue", "pull_request.issue_id = issue.id")
+ switch opts.State {
+ case "closed", "open":
+ sess.And("issue.is_closed=?", opts.State == "closed")
+ }
+
+ if len(opts.Labels) > 0 {
+ sess.Join("INNER", "issue_label", "issue.id = issue_label.issue_id").
+ In("issue_label.label_id", opts.Labels)
+ }
+
+ if opts.MilestoneID > 0 {
+ sess.And("issue.milestone_id=?", opts.MilestoneID)
+ }
+
+ return sess
+}
+
+func GetUnmergedPullRequestsByHeadInfoMax(ctx context.Context, repoID, olderThan int64, branch string) ([]*PullRequest, error) {
+ prs := make([]*PullRequest, 0, 2)
+ sess := db.GetEngine(ctx).
+ Join("INNER", "issue", "issue.id = `pull_request`.issue_id").
+ Where("`pull_request`.head_repo_id = ? AND `pull_request`.head_branch = ? AND `pull_request`.has_merged = ? AND `issue`.is_closed = ? AND `pull_request`.flow = ? AND (`issue`.`created` IS NULL OR `issue`.`created` <= ?)", repoID, branch, false, false, PullRequestFlowGithub, olderThan)
+ return prs, sess.Find(&prs)
+}
+
+// GetUnmergedPullRequestsByHeadInfo returns all pull requests that are open and has not been merged
+func GetUnmergedPullRequestsByHeadInfo(ctx context.Context, repoID int64, branch string) ([]*PullRequest, error) {
+ prs := make([]*PullRequest, 0, 2)
+ sess := db.GetEngine(ctx).
+ Join("INNER", "issue", "issue.id = pull_request.issue_id").
+ Where("head_repo_id = ? AND head_branch = ? AND has_merged = ? AND issue.is_closed = ? AND flow = ?", repoID, branch, false, false, PullRequestFlowGithub)
+ return prs, sess.Find(&prs)
+}
+
+// CanMaintainerWriteToBranch check whether user is a maintainer and could write to the branch
+func CanMaintainerWriteToBranch(ctx context.Context, p access_model.Permission, branch string, user *user_model.User) bool {
+ if p.CanWrite(unit.TypeCode) {
+ return true
+ }
+
+ if len(p.Units) < 1 {
+ return false
+ }
+
+ prs, err := GetUnmergedPullRequestsByHeadInfo(ctx, p.Units[0].RepoID, branch)
+ if err != nil {
+ return false
+ }
+
+ for _, pr := range prs {
+ if pr.AllowMaintainerEdit {
+ err = pr.LoadBaseRepo(ctx)
+ if err != nil {
+ continue
+ }
+ prPerm, err := access_model.GetUserRepoPermission(ctx, pr.BaseRepo, user)
+ if err != nil {
+ continue
+ }
+ if prPerm.CanWrite(unit.TypeCode) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// HasUnmergedPullRequestsByHeadInfo checks if there are open and not merged pull request
+// by given head information (repo and branch)
+func HasUnmergedPullRequestsByHeadInfo(ctx context.Context, repoID int64, branch string) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("head_repo_id = ? AND head_branch = ? AND has_merged = ? AND issue.is_closed = ? AND flow = ?",
+ repoID, branch, false, false, PullRequestFlowGithub).
+ Join("INNER", "issue", "issue.id = pull_request.issue_id").
+ Exist(&PullRequest{})
+}
+
+// GetUnmergedPullRequestsByBaseInfo returns all pull requests that are open and has not been merged
+// by given base information (repo and branch).
+func GetUnmergedPullRequestsByBaseInfo(ctx context.Context, repoID int64, branch string) ([]*PullRequest, error) {
+ prs := make([]*PullRequest, 0, 2)
+ return prs, db.GetEngine(ctx).
+ Where("base_repo_id=? AND base_branch=? AND has_merged=? AND issue.is_closed=?",
+ repoID, branch, false, false).
+ OrderBy("issue.updated_unix DESC").
+ Join("INNER", "issue", "issue.id=pull_request.issue_id").
+ Find(&prs)
+}
+
+// GetPullRequestIDsByCheckStatus returns all pull requests according the special checking status.
+func GetPullRequestIDsByCheckStatus(ctx context.Context, status PullRequestStatus) ([]int64, error) {
+ prs := make([]int64, 0, 10)
+ return prs, db.GetEngine(ctx).Table("pull_request").
+ Where("status=?", status).
+ Cols("pull_request.id").
+ Find(&prs)
+}
+
+// PullRequests returns all pull requests for a base Repo by the given conditions
+func PullRequests(ctx context.Context, baseRepoID int64, opts *PullRequestsOptions) (PullRequestList, int64, error) {
+ if opts.Page <= 0 {
+ opts.Page = 1
+ }
+
+ countSession := listPullRequestStatement(ctx, baseRepoID, opts)
+ maxResults, err := countSession.Count(new(PullRequest))
+ if err != nil {
+ log.Error("Count PRs: %v", err)
+ return nil, maxResults, err
+ }
+
+ findSession := listPullRequestStatement(ctx, baseRepoID, opts)
+ applySorts(findSession, opts.SortType, 0)
+ findSession = db.SetSessionPagination(findSession, opts)
+ prs := make([]*PullRequest, 0, opts.PageSize)
+ return prs, maxResults, findSession.Find(&prs)
+}
+
+// PullRequestList defines a list of pull requests
+type PullRequestList []*PullRequest
+
+func (prs PullRequestList) getRepositoryIDs() []int64 {
+ repoIDs := make(container.Set[int64])
+ for _, pr := range prs {
+ if pr.BaseRepo == nil && pr.BaseRepoID > 0 {
+ repoIDs.Add(pr.BaseRepoID)
+ }
+ if pr.HeadRepo == nil && pr.HeadRepoID > 0 {
+ repoIDs.Add(pr.HeadRepoID)
+ }
+ }
+ return repoIDs.Values()
+}
+
+func (prs PullRequestList) LoadRepositories(ctx context.Context) error {
+ repoIDs := prs.getRepositoryIDs()
+ reposMap := make(map[int64]*repo_model.Repository, len(repoIDs))
+ if err := db.GetEngine(ctx).
+ In("id", repoIDs).
+ Find(&reposMap); err != nil {
+ return fmt.Errorf("find repos: %w", err)
+ }
+ for _, pr := range prs {
+ if pr.BaseRepo == nil {
+ pr.BaseRepo = reposMap[pr.BaseRepoID]
+ }
+ if pr.HeadRepo == nil {
+ pr.HeadRepo = reposMap[pr.HeadRepoID]
+ pr.isHeadRepoLoaded = true
+ }
+ }
+ return nil
+}
+
+func (prs PullRequestList) LoadAttributes(ctx context.Context) error {
+ if _, err := prs.LoadIssues(ctx); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (prs PullRequestList) LoadIssues(ctx context.Context) (IssueList, error) {
+ if len(prs) == 0 {
+ return nil, nil
+ }
+
+ // Load issues which are not loaded
+ issueIDs := container.FilterSlice(prs, func(pr *PullRequest) (int64, bool) {
+ return pr.IssueID, pr.Issue == nil && pr.IssueID > 0
+ })
+ issues := make(map[int64]*Issue, len(issueIDs))
+ if err := db.GetEngine(ctx).
+ In("id", issueIDs).
+ Find(&issues); err != nil {
+ return nil, fmt.Errorf("find issues: %w", err)
+ }
+
+ issueList := make(IssueList, 0, len(prs))
+ for _, pr := range prs {
+ if pr.Issue == nil {
+ pr.Issue = issues[pr.IssueID]
+ /*
+ Old code:
+ pr.Issue.PullRequest = pr // panic here means issueIDs and prs are not in sync
+
+ It's worth panic because it's almost impossible to happen under normal use.
+ But in integration testing, an asynchronous task could read a database that has been reset.
+ So returning an error would make more sense, let the caller has a choice to ignore it.
+ */
+ if pr.Issue == nil {
+ return nil, fmt.Errorf("issues and prs may be not in sync: cannot find issue %v for pr %v: %w", pr.IssueID, pr.ID, util.ErrNotExist)
+ }
+ }
+ pr.Issue.PullRequest = pr
+ if pr.Issue.Repo == nil {
+ pr.Issue.Repo = pr.BaseRepo
+ }
+ issueList = append(issueList, pr.Issue)
+ }
+ return issueList, nil
+}
+
+// GetIssueIDs returns all issue ids
+func (prs PullRequestList) GetIssueIDs() []int64 {
+ return container.FilterSlice(prs, func(pr *PullRequest) (int64, bool) {
+ return pr.IssueID, pr.IssueID > 0
+ })
+}
+
+// HasMergedPullRequestInRepo returns whether the user(poster) has merged pull-request in the repo
+func HasMergedPullRequestInRepo(ctx context.Context, repoID, posterID int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Join("INNER", "pull_request", "pull_request.issue_id = issue.id").
+ Where("repo_id=?", repoID).
+ And("poster_id=?", posterID).
+ And("is_pull=?", true).
+ And("pull_request.has_merged=?", true).
+ Select("issue.id").
+ Limit(1).
+ Get(new(Issue))
+}
+
+// GetPullRequestByIssueIDs returns all pull requests by issue ids
+func GetPullRequestByIssueIDs(ctx context.Context, issueIDs []int64) (PullRequestList, error) {
+ prs := make([]*PullRequest, 0, len(issueIDs))
+ return prs, db.GetEngine(ctx).
+ Where("issue_id > 0").
+ In("issue_id", issueIDs).
+ Find(&prs)
+}
diff --git a/models/issues/pull_test.go b/models/issues/pull_test.go
new file mode 100644
index 0000000..8e0c020
--- /dev/null
+++ b/models/issues/pull_test.go
@@ -0,0 +1,476 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/tests"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPullRequest_LoadAttributes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ require.NoError(t, pr.LoadAttributes(db.DefaultContext))
+ assert.NotNil(t, pr.Merger)
+ assert.Equal(t, pr.MergerID, pr.Merger.ID)
+}
+
+func TestPullRequest_LoadIssue(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ require.NoError(t, pr.LoadIssue(db.DefaultContext))
+ assert.NotNil(t, pr.Issue)
+ assert.Equal(t, int64(2), pr.Issue.ID)
+ require.NoError(t, pr.LoadIssue(db.DefaultContext))
+ assert.NotNil(t, pr.Issue)
+ assert.Equal(t, int64(2), pr.Issue.ID)
+}
+
+func TestPullRequest_LoadBaseRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ require.NoError(t, pr.LoadBaseRepo(db.DefaultContext))
+ assert.NotNil(t, pr.BaseRepo)
+ assert.Equal(t, pr.BaseRepoID, pr.BaseRepo.ID)
+ require.NoError(t, pr.LoadBaseRepo(db.DefaultContext))
+ assert.NotNil(t, pr.BaseRepo)
+ assert.Equal(t, pr.BaseRepoID, pr.BaseRepo.ID)
+}
+
+func TestPullRequest_LoadHeadRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ require.NoError(t, pr.LoadHeadRepo(db.DefaultContext))
+ assert.NotNil(t, pr.HeadRepo)
+ assert.Equal(t, pr.HeadRepoID, pr.HeadRepo.ID)
+}
+
+// TODO TestMerge
+
+// TODO TestNewPullRequest
+
+func TestPullRequestsNewest(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ prs, count, err := issues_model.PullRequests(db.DefaultContext, 1, &issues_model.PullRequestsOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ },
+ State: "open",
+ SortType: "newest",
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, count)
+ if assert.Len(t, prs, 3) {
+ assert.EqualValues(t, 5, prs[0].ID)
+ assert.EqualValues(t, 2, prs[1].ID)
+ assert.EqualValues(t, 1, prs[2].ID)
+ }
+}
+
+func TestLoadRequestedReviewers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ pull := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ require.NoError(t, pull.LoadIssue(db.DefaultContext))
+ issue := pull.Issue
+ require.NoError(t, issue.LoadRepo(db.DefaultContext))
+ assert.Empty(t, pull.RequestedReviewers)
+
+ user1, err := user_model.GetUserByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ comment, err := issues_model.AddReviewRequest(db.DefaultContext, issue, user1, &user_model.User{})
+ require.NoError(t, err)
+ assert.NotNil(t, comment)
+
+ require.NoError(t, pull.LoadRequestedReviewers(db.DefaultContext))
+ assert.Len(t, pull.RequestedReviewers, 1)
+
+ comment, err = issues_model.RemoveReviewRequest(db.DefaultContext, issue, user1, &user_model.User{})
+ require.NoError(t, err)
+ assert.NotNil(t, comment)
+
+ pull.RequestedReviewers = nil
+ require.NoError(t, pull.LoadRequestedReviewers(db.DefaultContext))
+ assert.Empty(t, pull.RequestedReviewers)
+}
+
+func TestPullRequestsOldest(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ prs, count, err := issues_model.PullRequests(db.DefaultContext, 1, &issues_model.PullRequestsOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ },
+ State: "open",
+ SortType: "oldest",
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, count)
+ if assert.Len(t, prs, 3) {
+ assert.EqualValues(t, 1, prs[0].ID)
+ assert.EqualValues(t, 2, prs[1].ID)
+ assert.EqualValues(t, 5, prs[2].ID)
+ }
+}
+
+func TestGetUnmergedPullRequest(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr, err := issues_model.GetUnmergedPullRequest(db.DefaultContext, 1, 1, "branch2", "master", issues_model.PullRequestFlowGithub)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), pr.ID)
+
+ _, err = issues_model.GetUnmergedPullRequest(db.DefaultContext, 1, 9223372036854775807, "branch1", "master", issues_model.PullRequestFlowGithub)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrPullRequestNotExist(err))
+}
+
+func TestHasUnmergedPullRequestsByHeadInfo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ exist, err := issues_model.HasUnmergedPullRequestsByHeadInfo(db.DefaultContext, 1, "branch2")
+ require.NoError(t, err)
+ assert.True(t, exist)
+
+ exist, err = issues_model.HasUnmergedPullRequestsByHeadInfo(db.DefaultContext, 1, "not_exist_branch")
+ require.NoError(t, err)
+ assert.False(t, exist)
+}
+
+func TestGetUnmergedPullRequestsByHeadInfo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ prs, err := issues_model.GetUnmergedPullRequestsByHeadInfo(db.DefaultContext, 1, "branch2")
+ require.NoError(t, err)
+ assert.Len(t, prs, 1)
+ for _, pr := range prs {
+ assert.Equal(t, int64(1), pr.HeadRepoID)
+ assert.Equal(t, "branch2", pr.HeadBranch)
+ }
+}
+
+func TestGetUnmergedPullRequestsByHeadInfoMax(t *testing.T) {
+ defer tests.AddFixtures("models/fixtures/TestGetUnmergedPullRequestsByHeadInfoMax/")()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repoID := int64(1)
+ olderThan := int64(0)
+
+ // for NULL created field the olderThan condition is ignored
+ prs, err := issues_model.GetUnmergedPullRequestsByHeadInfoMax(db.DefaultContext, repoID, olderThan, "branch2")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), prs[0].HeadRepoID)
+
+ // test for when the created field is set
+ branch := "branchmax"
+ prs, err = issues_model.GetUnmergedPullRequestsByHeadInfoMax(db.DefaultContext, repoID, olderThan, branch)
+ require.NoError(t, err)
+ assert.Empty(t, prs)
+ olderThan = time.Now().UnixNano()
+ require.NoError(t, err)
+ prs, err = issues_model.GetUnmergedPullRequestsByHeadInfoMax(db.DefaultContext, repoID, olderThan, branch)
+ require.NoError(t, err)
+ assert.Len(t, prs, 1)
+ for _, pr := range prs {
+ assert.Equal(t, int64(1), pr.HeadRepoID)
+ assert.Equal(t, branch, pr.HeadBranch)
+ }
+ pr := prs[0]
+
+ for _, testCase := range []struct {
+ table string
+ field string
+ id int64
+ match any
+ nomatch any
+ }{
+ {
+ table: "issue",
+ field: "is_closed",
+ id: pr.IssueID,
+ match: false,
+ nomatch: true,
+ },
+ {
+ table: "pull_request",
+ field: "flow",
+ id: pr.ID,
+ match: issues_model.PullRequestFlowGithub,
+ nomatch: issues_model.PullRequestFlowAGit,
+ },
+ {
+ table: "pull_request",
+ field: "head_repo_id",
+ id: pr.ID,
+ match: pr.HeadRepoID,
+ nomatch: 0,
+ },
+ {
+ table: "pull_request",
+ field: "head_branch",
+ id: pr.ID,
+ match: pr.HeadBranch,
+ nomatch: "something else",
+ },
+ {
+ table: "pull_request",
+ field: "has_merged",
+ id: pr.ID,
+ match: false,
+ nomatch: true,
+ },
+ } {
+ t.Run(testCase.field, func(t *testing.T) {
+ update := fmt.Sprintf("UPDATE `%s` SET `%s` = ? WHERE `id` = ?", testCase.table, testCase.field)
+
+ // expect no match
+ _, err = db.GetEngine(db.DefaultContext).Exec(update, testCase.nomatch, testCase.id)
+ require.NoError(t, err)
+ prs, err = issues_model.GetUnmergedPullRequestsByHeadInfoMax(db.DefaultContext, repoID, olderThan, branch)
+ require.NoError(t, err)
+ assert.Empty(t, prs)
+
+ // expect one match
+ _, err = db.GetEngine(db.DefaultContext).Exec(update, testCase.match, testCase.id)
+ require.NoError(t, err)
+ prs, err = issues_model.GetUnmergedPullRequestsByHeadInfoMax(db.DefaultContext, repoID, olderThan, branch)
+ require.NoError(t, err)
+ assert.Len(t, prs, 1)
+
+ // identical to the known PR
+ assert.Equal(t, pr.ID, prs[0].ID)
+ })
+ }
+}
+
+func TestGetUnmergedPullRequestsByBaseInfo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ prs, err := issues_model.GetUnmergedPullRequestsByBaseInfo(db.DefaultContext, 1, "master")
+ require.NoError(t, err)
+ assert.Len(t, prs, 1)
+ pr := prs[0]
+ assert.Equal(t, int64(2), pr.ID)
+ assert.Equal(t, int64(1), pr.BaseRepoID)
+ assert.Equal(t, "master", pr.BaseBranch)
+}
+
+func TestGetPullRequestByIndex(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr, err := issues_model.GetPullRequestByIndex(db.DefaultContext, 1, 2)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), pr.BaseRepoID)
+ assert.Equal(t, int64(2), pr.Index)
+
+ _, err = issues_model.GetPullRequestByIndex(db.DefaultContext, 9223372036854775807, 9223372036854775807)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrPullRequestNotExist(err))
+
+ _, err = issues_model.GetPullRequestByIndex(db.DefaultContext, 1, 0)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrPullRequestNotExist(err))
+}
+
+func TestGetPullRequestByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr, err := issues_model.GetPullRequestByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), pr.ID)
+ assert.Equal(t, int64(2), pr.IssueID)
+
+ _, err = issues_model.GetPullRequestByID(db.DefaultContext, 9223372036854775807)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrPullRequestNotExist(err))
+}
+
+func TestGetPullRequestByIssueID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr, err := issues_model.GetPullRequestByIssueID(db.DefaultContext, 2)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), pr.IssueID)
+
+ _, err = issues_model.GetPullRequestByIssueID(db.DefaultContext, 9223372036854775807)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrPullRequestNotExist(err))
+}
+
+func TestPullRequest_Update(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ pr.BaseBranch = "baseBranch"
+ pr.HeadBranch = "headBranch"
+ pr.Update(db.DefaultContext)
+
+ pr = unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: pr.ID})
+ assert.Equal(t, "baseBranch", pr.BaseBranch)
+ assert.Equal(t, "headBranch", pr.HeadBranch)
+ unittest.CheckConsistencyFor(t, pr)
+}
+
+func TestPullRequest_UpdateCols(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := &issues_model.PullRequest{
+ ID: 1,
+ BaseBranch: "baseBranch",
+ HeadBranch: "headBranch",
+ }
+ require.NoError(t, pr.UpdateCols(db.DefaultContext, "head_branch"))
+
+ pr = unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ assert.Equal(t, "master", pr.BaseBranch)
+ assert.Equal(t, "headBranch", pr.HeadBranch)
+ unittest.CheckConsistencyFor(t, pr)
+}
+
+func TestPullRequestList_LoadAttributes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ prs := []*issues_model.PullRequest{
+ unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1}),
+ unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2}),
+ }
+ require.NoError(t, issues_model.PullRequestList(prs).LoadAttributes(db.DefaultContext))
+ for _, pr := range prs {
+ assert.NotNil(t, pr.Issue)
+ assert.Equal(t, pr.IssueID, pr.Issue.ID)
+ }
+
+ require.NoError(t, issues_model.PullRequestList([]*issues_model.PullRequest{}).LoadAttributes(db.DefaultContext))
+}
+
+// TODO TestAddTestPullRequestTask
+
+func TestPullRequest_IsWorkInProgress(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2})
+ pr.LoadIssue(db.DefaultContext)
+
+ assert.False(t, pr.IsWorkInProgress(db.DefaultContext))
+
+ pr.Issue.Title = "WIP: " + pr.Issue.Title
+ assert.True(t, pr.IsWorkInProgress(db.DefaultContext))
+
+ pr.Issue.Title = "[wip]: " + pr.Issue.Title
+ assert.True(t, pr.IsWorkInProgress(db.DefaultContext))
+}
+
+func TestPullRequest_GetWorkInProgressPrefixWorkInProgress(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2})
+ pr.LoadIssue(db.DefaultContext)
+
+ assert.Empty(t, pr.GetWorkInProgressPrefix(db.DefaultContext))
+
+ original := pr.Issue.Title
+ pr.Issue.Title = "WIP: " + original
+ assert.Equal(t, "WIP:", pr.GetWorkInProgressPrefix(db.DefaultContext))
+
+ pr.Issue.Title = "[wip] " + original
+ assert.Equal(t, "[wip]", pr.GetWorkInProgressPrefix(db.DefaultContext))
+}
+
+func TestDeleteOrphanedObjects(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ countBefore, err := db.GetEngine(db.DefaultContext).Count(&issues_model.PullRequest{})
+ require.NoError(t, err)
+
+ _, err = db.GetEngine(db.DefaultContext).Insert(&issues_model.PullRequest{IssueID: 1000}, &issues_model.PullRequest{IssueID: 1001}, &issues_model.PullRequest{IssueID: 1003})
+ require.NoError(t, err)
+
+ orphaned, err := db.CountOrphanedObjects(db.DefaultContext, "pull_request", "issue", "pull_request.issue_id=issue.id")
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, orphaned)
+
+ err = db.DeleteOrphanedObjects(db.DefaultContext, "pull_request", "issue", "pull_request.issue_id=issue.id")
+ require.NoError(t, err)
+
+ countAfter, err := db.GetEngine(db.DefaultContext).Count(&issues_model.PullRequest{})
+ require.NoError(t, err)
+ assert.EqualValues(t, countBefore, countAfter)
+}
+
+func TestParseCodeOwnersLine(t *testing.T) {
+ type CodeOwnerTest struct {
+ Line string
+ Tokens []string
+ }
+
+ given := []CodeOwnerTest{
+ {Line: "", Tokens: nil},
+ {Line: "# comment", Tokens: []string{}},
+ {Line: "!.* @user1 @org1/team1", Tokens: []string{"!.*", "@user1", "@org1/team1"}},
+ {Line: `.*\\.js @user2 #comment`, Tokens: []string{`.*\.js`, "@user2"}},
+ {Line: `docs/(aws|google|azure)/[^/]*\\.(md|txt) @org3 @org2/team2`, Tokens: []string{`docs/(aws|google|azure)/[^/]*\.(md|txt)`, "@org3", "@org2/team2"}},
+ {Line: `\#path @org3`, Tokens: []string{`#path`, "@org3"}},
+ {Line: `path\ with\ spaces/ @org3`, Tokens: []string{`path with spaces/`, "@org3"}},
+ }
+
+ for _, g := range given {
+ tokens := issues_model.TokenizeCodeOwnersLine(g.Line)
+ assert.Equal(t, g.Tokens, tokens, "Codeowners tokenizer failed")
+ }
+}
+
+func TestGetApprovers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 5})
+ // Official reviews are already deduplicated. Allow unofficial reviews
+ // to assert that there are no duplicated approvers.
+ setting.Repository.PullRequest.DefaultMergeMessageOfficialApproversOnly = false
+ approvers := pr.GetApprovers(db.DefaultContext)
+ expected := "Reviewed-by: User Five <user5@example.com>\nReviewed-by: Org Six <org6@example.com>\n"
+ assert.EqualValues(t, expected, approvers)
+}
+
+func TestGetPullRequestByMergedCommit(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ pr, err := issues_model.GetPullRequestByMergedCommit(db.DefaultContext, 1, "1a8823cd1a9549fde083f992f6b9b87a7ab74fb3")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, pr.ID)
+
+ _, err = issues_model.GetPullRequestByMergedCommit(db.DefaultContext, 0, "1a8823cd1a9549fde083f992f6b9b87a7ab74fb3")
+ require.ErrorAs(t, err, &issues_model.ErrPullRequestNotExist{})
+ _, err = issues_model.GetPullRequestByMergedCommit(db.DefaultContext, 1, "")
+ require.ErrorAs(t, err, &issues_model.ErrPullRequestNotExist{})
+}
+
+func TestMigrate_InsertPullRequests(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ reponame := "repo1"
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: reponame})
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
+
+ i := &issues_model.Issue{
+ RepoID: repo.ID,
+ Repo: repo,
+ Title: "title1",
+ Content: "issuecontent1",
+ IsPull: true,
+ PosterID: owner.ID,
+ Poster: owner,
+ }
+
+ p := &issues_model.PullRequest{
+ Issue: i,
+ }
+
+ err := issues_model.InsertPullRequests(db.DefaultContext, p)
+ require.NoError(t, err)
+
+ _ = unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{IssueID: i.ID})
+
+ unittest.CheckConsistencyFor(t, &issues_model.Issue{}, &issues_model.PullRequest{})
+}
diff --git a/models/issues/reaction.go b/models/issues/reaction.go
new file mode 100644
index 0000000..eb7faef
--- /dev/null
+++ b/models/issues/reaction.go
@@ -0,0 +1,373 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrForbiddenIssueReaction is used when a forbidden reaction was try to created
+type ErrForbiddenIssueReaction struct {
+ Reaction string
+}
+
+// IsErrForbiddenIssueReaction checks if an error is a ErrForbiddenIssueReaction.
+func IsErrForbiddenIssueReaction(err error) bool {
+ _, ok := err.(ErrForbiddenIssueReaction)
+ return ok
+}
+
+func (err ErrForbiddenIssueReaction) Error() string {
+ return fmt.Sprintf("'%s' is not an allowed reaction", err.Reaction)
+}
+
+func (err ErrForbiddenIssueReaction) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrReactionAlreadyExist is used when a existing reaction was try to created
+type ErrReactionAlreadyExist struct {
+ Reaction string
+}
+
+// IsErrReactionAlreadyExist checks if an error is a ErrReactionAlreadyExist.
+func IsErrReactionAlreadyExist(err error) bool {
+ _, ok := err.(ErrReactionAlreadyExist)
+ return ok
+}
+
+func (err ErrReactionAlreadyExist) Error() string {
+ return fmt.Sprintf("reaction '%s' already exists", err.Reaction)
+}
+
+func (err ErrReactionAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// Reaction represents a reactions on issues and comments.
+type Reaction struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type string `xorm:"INDEX UNIQUE(s) NOT NULL"`
+ IssueID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"`
+ CommentID int64 `xorm:"INDEX UNIQUE(s)"`
+ UserID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"`
+ OriginalAuthorID int64 `xorm:"INDEX UNIQUE(s) NOT NULL DEFAULT(0)"`
+ OriginalAuthor string `xorm:"INDEX UNIQUE(s)"`
+ User *user_model.User `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+}
+
+// LoadUser load user of reaction
+func (r *Reaction) LoadUser(ctx context.Context) (*user_model.User, error) {
+ if r.User != nil {
+ return r.User, nil
+ }
+ user, err := user_model.GetUserByID(ctx, r.UserID)
+ if err != nil {
+ return nil, err
+ }
+ r.User = user
+ return user, nil
+}
+
+// RemapExternalUser ExternalUserRemappable interface
+func (r *Reaction) RemapExternalUser(externalName string, externalID, userID int64) error {
+ r.OriginalAuthor = externalName
+ r.OriginalAuthorID = externalID
+ r.UserID = userID
+ return nil
+}
+
+// GetUserID ExternalUserRemappable interface
+func (r *Reaction) GetUserID() int64 { return r.UserID }
+
+// GetExternalName ExternalUserRemappable interface
+func (r *Reaction) GetExternalName() string { return r.OriginalAuthor }
+
+// GetExternalID ExternalUserRemappable interface
+func (r *Reaction) GetExternalID() int64 { return r.OriginalAuthorID }
+
+func init() {
+ db.RegisterModel(new(Reaction))
+}
+
+// FindReactionsOptions describes the conditions to Find reactions
+type FindReactionsOptions struct {
+ db.ListOptions
+ IssueID int64
+ CommentID int64
+ UserID int64
+ Reaction string
+}
+
+func (opts *FindReactionsOptions) toConds() builder.Cond {
+ // If Issue ID is set add to Query
+ cond := builder.NewCond()
+ if opts.IssueID > 0 {
+ cond = cond.And(builder.Eq{"reaction.issue_id": opts.IssueID})
+ }
+ // If CommentID is > 0 add to Query
+ // If it is 0 Query ignore CommentID to select
+ // If it is -1 it explicit search of Issue Reactions where CommentID = 0
+ if opts.CommentID > 0 {
+ cond = cond.And(builder.Eq{"reaction.comment_id": opts.CommentID})
+ } else if opts.CommentID == -1 {
+ cond = cond.And(builder.Eq{"reaction.comment_id": 0})
+ }
+ if opts.UserID > 0 {
+ cond = cond.And(builder.Eq{
+ "reaction.user_id": opts.UserID,
+ "reaction.original_author_id": 0,
+ })
+ }
+ if opts.Reaction != "" {
+ cond = cond.And(builder.Eq{"reaction.type": opts.Reaction})
+ }
+
+ return cond
+}
+
+// FindCommentReactions returns a ReactionList of all reactions from an comment
+func FindCommentReactions(ctx context.Context, issueID, commentID int64) (ReactionList, int64, error) {
+ return FindReactions(ctx, FindReactionsOptions{
+ IssueID: issueID,
+ CommentID: commentID,
+ })
+}
+
+// FindIssueReactions returns a ReactionList of all reactions from an issue
+func FindIssueReactions(ctx context.Context, issueID int64, listOptions db.ListOptions) (ReactionList, int64, error) {
+ return FindReactions(ctx, FindReactionsOptions{
+ ListOptions: listOptions,
+ IssueID: issueID,
+ CommentID: -1,
+ })
+}
+
+// FindReactions returns a ReactionList of all reactions from an issue or a comment
+func FindReactions(ctx context.Context, opts FindReactionsOptions) (ReactionList, int64, error) {
+ sess := db.GetEngine(ctx).
+ Where(opts.toConds()).
+ In("reaction.`type`", setting.UI.Reactions).
+ Asc("reaction.issue_id", "reaction.comment_id", "reaction.created_unix", "reaction.id")
+ if opts.Page != 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+
+ reactions := make([]*Reaction, 0, opts.PageSize)
+ count, err := sess.FindAndCount(&reactions)
+ return reactions, count, err
+ }
+
+ reactions := make([]*Reaction, 0, 10)
+ count, err := sess.FindAndCount(&reactions)
+ return reactions, count, err
+}
+
+func createReaction(ctx context.Context, opts *ReactionOptions) (*Reaction, error) {
+ reaction := &Reaction{
+ Type: opts.Type,
+ UserID: opts.DoerID,
+ IssueID: opts.IssueID,
+ CommentID: opts.CommentID,
+ }
+ findOpts := FindReactionsOptions{
+ IssueID: opts.IssueID,
+ CommentID: opts.CommentID,
+ Reaction: opts.Type,
+ UserID: opts.DoerID,
+ }
+ if findOpts.CommentID == 0 {
+ // explicit search of Issue Reactions where CommentID = 0
+ findOpts.CommentID = -1
+ }
+
+ existingR, _, err := FindReactions(ctx, findOpts)
+ if err != nil {
+ return nil, err
+ }
+ if len(existingR) > 0 {
+ return existingR[0], ErrReactionAlreadyExist{Reaction: opts.Type}
+ }
+
+ if err := db.Insert(ctx, reaction); err != nil {
+ return nil, err
+ }
+
+ return reaction, nil
+}
+
+// ReactionOptions defines options for creating or deleting reactions
+type ReactionOptions struct {
+ Type string
+ DoerID int64
+ IssueID int64
+ CommentID int64
+}
+
+// CreateReaction creates reaction for issue or comment.
+func CreateReaction(ctx context.Context, opts *ReactionOptions) (*Reaction, error) {
+ if !setting.UI.ReactionsLookup.Contains(opts.Type) {
+ return nil, ErrForbiddenIssueReaction{opts.Type}
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ reaction, err := createReaction(ctx, opts)
+ if err != nil {
+ return reaction, err
+ }
+
+ if err := committer.Commit(); err != nil {
+ return nil, err
+ }
+ return reaction, nil
+}
+
+// DeleteReaction deletes reaction for issue or comment.
+func DeleteReaction(ctx context.Context, opts *ReactionOptions) error {
+ reaction := &Reaction{
+ Type: opts.Type,
+ UserID: opts.DoerID,
+ IssueID: opts.IssueID,
+ CommentID: opts.CommentID,
+ }
+
+ sess := db.GetEngine(ctx).Where("original_author_id = 0")
+ if opts.CommentID == -1 {
+ reaction.CommentID = 0
+ sess.MustCols("comment_id")
+ }
+
+ _, err := sess.Delete(reaction)
+ return err
+}
+
+// DeleteIssueReaction deletes a reaction on issue.
+func DeleteIssueReaction(ctx context.Context, doerID, issueID int64, content string) error {
+ return DeleteReaction(ctx, &ReactionOptions{
+ Type: content,
+ DoerID: doerID,
+ IssueID: issueID,
+ CommentID: -1,
+ })
+}
+
+// DeleteCommentReaction deletes a reaction on comment.
+func DeleteCommentReaction(ctx context.Context, doerID, issueID, commentID int64, content string) error {
+ return DeleteReaction(ctx, &ReactionOptions{
+ Type: content,
+ DoerID: doerID,
+ IssueID: issueID,
+ CommentID: commentID,
+ })
+}
+
+// ReactionList represents list of reactions
+type ReactionList []*Reaction
+
+// HasUser check if user has reacted
+func (list ReactionList) HasUser(userID int64) bool {
+ if userID == 0 {
+ return false
+ }
+ for _, reaction := range list {
+ if reaction.OriginalAuthor == "" && reaction.UserID == userID {
+ return true
+ }
+ }
+ return false
+}
+
+// GroupByType returns reactions grouped by type
+func (list ReactionList) GroupByType() map[string]ReactionList {
+ reactions := make(map[string]ReactionList)
+ for _, reaction := range list {
+ reactions[reaction.Type] = append(reactions[reaction.Type], reaction)
+ }
+ return reactions
+}
+
+func (list ReactionList) getUserIDs() []int64 {
+ return container.FilterSlice(list, func(reaction *Reaction) (int64, bool) {
+ if reaction.OriginalAuthor != "" {
+ return 0, false
+ }
+ return reaction.UserID, true
+ })
+}
+
+func valuesUser(m map[int64]*user_model.User) []*user_model.User {
+ values := make([]*user_model.User, 0, len(m))
+ for _, v := range m {
+ values = append(values, v)
+ }
+ return values
+}
+
+// LoadUsers loads reactions' all users
+func (list ReactionList) LoadUsers(ctx context.Context, repo *repo_model.Repository) ([]*user_model.User, error) {
+ if len(list) == 0 {
+ return nil, nil
+ }
+
+ userIDs := list.getUserIDs()
+ userMaps := make(map[int64]*user_model.User, len(userIDs))
+ err := db.GetEngine(ctx).
+ In("id", userIDs).
+ Find(&userMaps)
+ if err != nil {
+ return nil, fmt.Errorf("find user: %w", err)
+ }
+
+ for _, reaction := range list {
+ if reaction.OriginalAuthor != "" {
+ reaction.User = user_model.NewReplaceUser(fmt.Sprintf("%s(%s)", reaction.OriginalAuthor, repo.OriginalServiceType.Name()))
+ } else if user, ok := userMaps[reaction.UserID]; ok {
+ reaction.User = user
+ } else {
+ reaction.User = user_model.NewGhostUser()
+ }
+ }
+ return valuesUser(userMaps), nil
+}
+
+// GetFirstUsers returns first reacted user display names separated by comma
+func (list ReactionList) GetFirstUsers() string {
+ var buffer bytes.Buffer
+ rem := setting.UI.ReactionMaxUserNum
+ for _, reaction := range list {
+ if buffer.Len() > 0 {
+ buffer.WriteString(", ")
+ }
+ buffer.WriteString(reaction.User.Name)
+ if rem--; rem == 0 {
+ break
+ }
+ }
+ return buffer.String()
+}
+
+// GetMoreUserCount returns count of not shown users in reaction tooltip
+func (list ReactionList) GetMoreUserCount() int {
+ if len(list) <= setting.UI.ReactionMaxUserNum {
+ return 0
+ }
+ return len(list) - setting.UI.ReactionMaxUserNum
+}
diff --git a/models/issues/reaction_test.go b/models/issues/reaction_test.go
new file mode 100644
index 0000000..e02e6d7
--- /dev/null
+++ b/models/issues/reaction_test.go
@@ -0,0 +1,178 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func addReaction(t *testing.T, doerID, issueID, commentID int64, content string) {
+ var reaction *issues_model.Reaction
+ var err error
+ // NOTE: This doesn't do user blocking checking.
+ reaction, err = issues_model.CreateReaction(db.DefaultContext, &issues_model.ReactionOptions{
+ DoerID: doerID,
+ IssueID: issueID,
+ CommentID: commentID,
+ Type: content,
+ })
+
+ require.NoError(t, err)
+ assert.NotNil(t, reaction)
+}
+
+func TestIssueAddReaction(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ var issue1ID int64 = 1
+
+ addReaction(t, user1.ID, issue1ID, 0, "heart")
+
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Reaction{Type: "heart", UserID: user1.ID, IssueID: issue1ID})
+}
+
+func TestIssueAddDuplicateReaction(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ var issue1ID int64 = 1
+
+ addReaction(t, user1.ID, issue1ID, 0, "heart")
+
+ reaction, err := issues_model.CreateReaction(db.DefaultContext, &issues_model.ReactionOptions{
+ DoerID: user1.ID,
+ IssueID: issue1ID,
+ Type: "heart",
+ })
+ require.Error(t, err)
+ assert.Equal(t, issues_model.ErrReactionAlreadyExist{Reaction: "heart"}, err)
+
+ existingR := unittest.AssertExistsAndLoadBean(t, &issues_model.Reaction{Type: "heart", UserID: user1.ID, IssueID: issue1ID})
+ assert.Equal(t, existingR.ID, reaction.ID)
+}
+
+func TestIssueDeleteReaction(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ var issue1ID int64 = 1
+
+ addReaction(t, user1.ID, issue1ID, 0, "heart")
+
+ err := issues_model.DeleteIssueReaction(db.DefaultContext, user1.ID, issue1ID, "heart")
+ require.NoError(t, err)
+
+ unittest.AssertNotExistsBean(t, &issues_model.Reaction{Type: "heart", UserID: user1.ID, IssueID: issue1ID})
+}
+
+func TestIssueReactionCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ setting.UI.ReactionMaxUserNum = 2
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+ user4 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4})
+ ghost := user_model.NewGhostUser()
+
+ var issueID int64 = 2
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ addReaction(t, user1.ID, issueID, 0, "heart")
+ addReaction(t, user2.ID, issueID, 0, "heart")
+ addReaction(t, org3.ID, issueID, 0, "heart")
+ addReaction(t, org3.ID, issueID, 0, "+1")
+ addReaction(t, user4.ID, issueID, 0, "+1")
+ addReaction(t, user4.ID, issueID, 0, "heart")
+ addReaction(t, ghost.ID, issueID, 0, "-1")
+
+ reactionsList, _, err := issues_model.FindReactions(db.DefaultContext, issues_model.FindReactionsOptions{
+ IssueID: issueID,
+ })
+ require.NoError(t, err)
+ assert.Len(t, reactionsList, 7)
+ _, err = reactionsList.LoadUsers(db.DefaultContext, repo)
+ require.NoError(t, err)
+
+ reactions := reactionsList.GroupByType()
+ assert.Len(t, reactions["heart"], 4)
+ assert.Equal(t, 2, reactions["heart"].GetMoreUserCount())
+ assert.Equal(t, user1.Name+", "+user2.Name, reactions["heart"].GetFirstUsers())
+ assert.True(t, reactions["heart"].HasUser(1))
+ assert.False(t, reactions["heart"].HasUser(5))
+ assert.False(t, reactions["heart"].HasUser(0))
+ assert.Len(t, reactions["+1"], 2)
+ assert.Equal(t, 0, reactions["+1"].GetMoreUserCount())
+ assert.Len(t, reactions["-1"], 1)
+}
+
+func TestIssueCommentAddReaction(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ var issue1ID int64 = 1
+ var comment1ID int64 = 1
+
+ addReaction(t, user1.ID, issue1ID, comment1ID, "heart")
+
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Reaction{Type: "heart", UserID: user1.ID, IssueID: issue1ID, CommentID: comment1ID})
+}
+
+func TestIssueCommentDeleteReaction(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+ user4 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4})
+
+ var issue1ID int64 = 1
+ var comment1ID int64 = 1
+
+ addReaction(t, user1.ID, issue1ID, comment1ID, "heart")
+ addReaction(t, user2.ID, issue1ID, comment1ID, "heart")
+ addReaction(t, org3.ID, issue1ID, comment1ID, "heart")
+ addReaction(t, user4.ID, issue1ID, comment1ID, "+1")
+
+ reactionsList, _, err := issues_model.FindReactions(db.DefaultContext, issues_model.FindReactionsOptions{
+ IssueID: issue1ID,
+ CommentID: comment1ID,
+ })
+ require.NoError(t, err)
+ assert.Len(t, reactionsList, 4)
+
+ reactions := reactionsList.GroupByType()
+ assert.Len(t, reactions["heart"], 3)
+ assert.Len(t, reactions["+1"], 1)
+}
+
+func TestIssueCommentReactionCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ var issue1ID int64 = 1
+ var comment1ID int64 = 1
+
+ addReaction(t, user1.ID, issue1ID, comment1ID, "heart")
+ require.NoError(t, issues_model.DeleteCommentReaction(db.DefaultContext, user1.ID, issue1ID, comment1ID, "heart"))
+
+ unittest.AssertNotExistsBean(t, &issues_model.Reaction{Type: "heart", UserID: user1.ID, IssueID: issue1ID, CommentID: comment1ID})
+}
diff --git a/models/issues/review.go b/models/issues/review.go
new file mode 100644
index 0000000..a39c120
--- /dev/null
+++ b/models/issues/review.go
@@ -0,0 +1,1056 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrReviewNotExist represents a "ReviewNotExist" kind of error.
+type ErrReviewNotExist struct {
+ ID int64
+}
+
+// IsErrReviewNotExist checks if an error is a ErrReviewNotExist.
+func IsErrReviewNotExist(err error) bool {
+ _, ok := err.(ErrReviewNotExist)
+ return ok
+}
+
+func (err ErrReviewNotExist) Error() string {
+ return fmt.Sprintf("review does not exist [id: %d]", err.ID)
+}
+
+func (err ErrReviewNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrNotValidReviewRequest an not allowed review request modify
+type ErrNotValidReviewRequest struct {
+ Reason string
+ UserID int64
+ RepoID int64
+}
+
+// IsErrNotValidReviewRequest checks if an error is a ErrNotValidReviewRequest.
+func IsErrNotValidReviewRequest(err error) bool {
+ _, ok := err.(ErrNotValidReviewRequest)
+ return ok
+}
+
+func (err ErrNotValidReviewRequest) Error() string {
+ return fmt.Sprintf("%s [user_id: %d, repo_id: %d]",
+ err.Reason,
+ err.UserID,
+ err.RepoID)
+}
+
+func (err ErrNotValidReviewRequest) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrReviewRequestOnClosedPR represents an error when an user tries to request a re-review on a closed or merged PR.
+type ErrReviewRequestOnClosedPR struct{}
+
+// IsErrReviewRequestOnClosedPR checks if an error is an ErrReviewRequestOnClosedPR.
+func IsErrReviewRequestOnClosedPR(err error) bool {
+ _, ok := err.(ErrReviewRequestOnClosedPR)
+ return ok
+}
+
+func (err ErrReviewRequestOnClosedPR) Error() string {
+ return "cannot request a re-review on a closed or merged PR"
+}
+
+func (err ErrReviewRequestOnClosedPR) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ReviewType defines the sort of feedback a review gives
+type ReviewType int
+
+// ReviewTypeUnknown unknown review type
+const ReviewTypeUnknown ReviewType = -1
+
+const (
+ // ReviewTypePending is a review which is not published yet
+ ReviewTypePending ReviewType = iota
+ // ReviewTypeApprove approves changes
+ ReviewTypeApprove
+ // ReviewTypeComment gives general feedback
+ ReviewTypeComment
+ // ReviewTypeReject gives feedback blocking merge
+ ReviewTypeReject
+ // ReviewTypeRequest request review from others
+ ReviewTypeRequest
+)
+
+// Icon returns the corresponding icon for the review type
+func (rt ReviewType) Icon() string {
+ switch rt {
+ case ReviewTypeApprove:
+ return "check"
+ case ReviewTypeReject:
+ return "diff"
+ case ReviewTypeComment:
+ return "comment"
+ case ReviewTypeRequest:
+ return "dot-fill"
+ default:
+ return "comment"
+ }
+}
+
+// Review represents collection of code comments giving feedback for a PR
+type Review struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type ReviewType
+ Reviewer *user_model.User `xorm:"-"`
+ ReviewerID int64 `xorm:"index"`
+ ReviewerTeamID int64 `xorm:"NOT NULL DEFAULT 0"`
+ ReviewerTeam *organization.Team `xorm:"-"`
+ OriginalAuthor string
+ OriginalAuthorID int64
+ Issue *Issue `xorm:"-"`
+ IssueID int64 `xorm:"index"`
+ Content string `xorm:"TEXT"`
+ // Official is a review made by an assigned approver (counts towards approval)
+ Official bool `xorm:"NOT NULL DEFAULT false"`
+ CommitID string `xorm:"VARCHAR(64)"`
+ Stale bool `xorm:"NOT NULL DEFAULT false"`
+ Dismissed bool `xorm:"NOT NULL DEFAULT false"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+
+ // CodeComments are the initial code comments of the review
+ CodeComments CodeComments `xorm:"-"`
+
+ Comments []*Comment `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(Review))
+}
+
+// LoadCodeComments loads CodeComments
+func (r *Review) LoadCodeComments(ctx context.Context) (err error) {
+ if r.CodeComments != nil {
+ return err
+ }
+ if err = r.LoadIssue(ctx); err != nil {
+ return err
+ }
+ r.CodeComments, err = fetchCodeCommentsByReview(ctx, r.Issue, nil, r, false)
+ return err
+}
+
+func (r *Review) LoadIssue(ctx context.Context) (err error) {
+ if r.Issue != nil {
+ return err
+ }
+ r.Issue, err = GetIssueByID(ctx, r.IssueID)
+ return err
+}
+
+// LoadReviewer loads reviewer
+func (r *Review) LoadReviewer(ctx context.Context) (err error) {
+ if r.ReviewerID == 0 || r.Reviewer != nil {
+ return err
+ }
+ r.Reviewer, err = user_model.GetPossibleUserByID(ctx, r.ReviewerID)
+ if err != nil {
+ if !user_model.IsErrUserNotExist(err) {
+ return fmt.Errorf("GetPossibleUserByID [%d]: %w", r.ReviewerID, err)
+ }
+ r.ReviewerID = user_model.GhostUserID
+ r.Reviewer = user_model.NewGhostUser()
+ return nil
+ }
+ return err
+}
+
+// LoadReviewerTeam loads reviewer team
+func (r *Review) LoadReviewerTeam(ctx context.Context) (err error) {
+ if r.ReviewerTeamID == 0 || r.ReviewerTeam != nil {
+ return nil
+ }
+
+ r.ReviewerTeam, err = organization.GetTeamByID(ctx, r.ReviewerTeamID)
+ return err
+}
+
+// LoadAttributes loads all attributes except CodeComments
+func (r *Review) LoadAttributes(ctx context.Context) (err error) {
+ if err = r.LoadIssue(ctx); err != nil {
+ return err
+ }
+ if err = r.LoadCodeComments(ctx); err != nil {
+ return err
+ }
+ if err = r.LoadReviewer(ctx); err != nil {
+ return err
+ }
+ if err = r.LoadReviewerTeam(ctx); err != nil {
+ return err
+ }
+ return err
+}
+
+func (r *Review) HTMLTypeColorName() string {
+ switch r.Type {
+ case ReviewTypeApprove:
+ if r.Stale {
+ return "yellow"
+ }
+ return "green"
+ case ReviewTypeComment:
+ return "grey"
+ case ReviewTypeReject:
+ return "red"
+ case ReviewTypeRequest:
+ return "yellow"
+ }
+ return "grey"
+}
+
+// GetReviewByID returns the review by the given ID
+func GetReviewByID(ctx context.Context, id int64) (*Review, error) {
+ review := new(Review)
+ if has, err := db.GetEngine(ctx).ID(id).Get(review); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReviewNotExist{ID: id}
+ }
+ return review, nil
+}
+
+// CreateReviewOptions represent the options to create a review. Type, Issue and Reviewer are required.
+type CreateReviewOptions struct {
+ Content string
+ Type ReviewType
+ Issue *Issue
+ Reviewer *user_model.User
+ ReviewerTeam *organization.Team
+ Official bool
+ CommitID string
+ Stale bool
+}
+
+// IsOfficialReviewer check if at least one of the provided reviewers can make official reviews in issue (counts towards required approvals)
+func IsOfficialReviewer(ctx context.Context, issue *Issue, reviewer *user_model.User) (bool, error) {
+ if err := issue.LoadPullRequest(ctx); err != nil {
+ return false, err
+ }
+
+ pr := issue.PullRequest
+ rule, err := git_model.GetFirstMatchProtectedBranchRule(ctx, pr.BaseRepoID, pr.BaseBranch)
+ if err != nil {
+ return false, err
+ }
+ if rule == nil {
+ // if no rule is found, then user with write access can make official reviews
+ err := pr.LoadBaseRepo(ctx)
+ if err != nil {
+ return false, err
+ }
+ writeAccess, err := access_model.HasAccessUnit(ctx, reviewer, pr.BaseRepo, unit.TypeCode, perm.AccessModeWrite)
+ if err != nil {
+ return false, err
+ }
+ return writeAccess, nil
+ }
+
+ official, err := git_model.IsUserOfficialReviewer(ctx, rule, reviewer)
+ if official || err != nil {
+ return official, err
+ }
+
+ return false, nil
+}
+
+// IsOfficialReviewerTeam check if reviewer in this team can make official reviews in issue (counts towards required approvals)
+func IsOfficialReviewerTeam(ctx context.Context, issue *Issue, team *organization.Team) (bool, error) {
+ if err := issue.LoadPullRequest(ctx); err != nil {
+ return false, err
+ }
+ pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, issue.PullRequest.BaseRepoID, issue.PullRequest.BaseBranch)
+ if err != nil {
+ return false, err
+ }
+ if pb == nil {
+ return false, nil
+ }
+
+ if !pb.EnableApprovalsWhitelist {
+ return team.UnitAccessMode(ctx, unit.TypeCode) >= perm.AccessModeWrite, nil
+ }
+
+ return slices.Contains(pb.ApprovalsWhitelistTeamIDs, team.ID), nil
+}
+
+// CreateReview creates a new review based on opts
+func CreateReview(ctx context.Context, opts CreateReviewOptions) (*Review, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ review := &Review{
+ Issue: opts.Issue,
+ IssueID: opts.Issue.ID,
+ Reviewer: opts.Reviewer,
+ ReviewerTeam: opts.ReviewerTeam,
+ Content: opts.Content,
+ Official: opts.Official,
+ CommitID: opts.CommitID,
+ Stale: opts.Stale,
+ }
+
+ if opts.Reviewer != nil {
+ review.Type = opts.Type
+ review.ReviewerID = opts.Reviewer.ID
+
+ reviewCond := builder.Eq{"reviewer_id": opts.Reviewer.ID, "issue_id": opts.Issue.ID}
+ // make sure user review requests are cleared
+ if opts.Type != ReviewTypePending {
+ if _, err := sess.Where(reviewCond.And(builder.Eq{"type": ReviewTypeRequest})).Delete(new(Review)); err != nil {
+ return nil, err
+ }
+ }
+ // make sure if the created review gets dismissed no old review surface
+ // other types can be ignored, as they don't affect branch protection
+ if opts.Type == ReviewTypeApprove || opts.Type == ReviewTypeReject {
+ if _, err := sess.Where(reviewCond.And(builder.In("type", ReviewTypeApprove, ReviewTypeReject))).
+ Cols("dismissed").Update(&Review{Dismissed: true}); err != nil {
+ return nil, err
+ }
+ }
+ } else if opts.ReviewerTeam != nil {
+ review.Type = ReviewTypeRequest
+ review.ReviewerTeamID = opts.ReviewerTeam.ID
+ } else {
+ return nil, fmt.Errorf("provide either reviewer or reviewer team")
+ }
+
+ if _, err := sess.Insert(review); err != nil {
+ return nil, err
+ }
+ return review, committer.Commit()
+}
+
+// GetCurrentReview returns the current pending review of reviewer for given issue
+func GetCurrentReview(ctx context.Context, reviewer *user_model.User, issue *Issue) (*Review, error) {
+ if reviewer == nil {
+ return nil, nil
+ }
+ reviews, err := FindReviews(ctx, FindReviewOptions{
+ Types: []ReviewType{ReviewTypePending},
+ IssueID: issue.ID,
+ ReviewerID: reviewer.ID,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(reviews) == 0 {
+ return nil, ErrReviewNotExist{}
+ }
+ reviews[0].Reviewer = reviewer
+ reviews[0].Issue = issue
+ return reviews[0], nil
+}
+
+// ReviewExists returns whether a review exists for a particular line of code in the PR
+func ReviewExists(ctx context.Context, issue *Issue, treePath string, line int64) (bool, error) {
+ return db.GetEngine(ctx).Cols("id").Exist(&Comment{IssueID: issue.ID, TreePath: treePath, Line: line, Type: CommentTypeCode})
+}
+
+// ContentEmptyErr represents an content empty error
+type ContentEmptyErr struct{}
+
+func (ContentEmptyErr) Error() string {
+ return "Review content is empty"
+}
+
+// IsContentEmptyErr returns true if err is a ContentEmptyErr
+func IsContentEmptyErr(err error) bool {
+ _, ok := err.(ContentEmptyErr)
+ return ok
+}
+
+// SubmitReview creates a review out of the existing pending review or creates a new one if no pending review exist
+func SubmitReview(ctx context.Context, doer *user_model.User, issue *Issue, reviewType ReviewType, content, commitID string, stale bool, attachmentUUIDs []string) (*Review, *Comment, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ official := false
+
+ review, err := GetCurrentReview(ctx, doer, issue)
+ if err != nil {
+ if !IsErrReviewNotExist(err) {
+ return nil, nil, err
+ }
+
+ if reviewType != ReviewTypeApprove && len(strings.TrimSpace(content)) == 0 {
+ return nil, nil, ContentEmptyErr{}
+ }
+
+ if reviewType == ReviewTypeApprove || reviewType == ReviewTypeReject {
+ // Only reviewers latest review of type approve and reject shall count as "official", so existing reviews needs to be cleared
+ if _, err := db.Exec(ctx, "UPDATE `review` SET official=? WHERE issue_id=? AND reviewer_id=?", false, issue.ID, doer.ID); err != nil {
+ return nil, nil, err
+ }
+ if official, err = IsOfficialReviewer(ctx, issue, doer); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // No current review. Create a new one!
+ if review, err = CreateReview(ctx, CreateReviewOptions{
+ Type: reviewType,
+ Issue: issue,
+ Reviewer: doer,
+ Content: content,
+ Official: official,
+ CommitID: commitID,
+ Stale: stale,
+ }); err != nil {
+ return nil, nil, err
+ }
+ } else {
+ if err := review.LoadCodeComments(ctx); err != nil {
+ return nil, nil, err
+ }
+ if reviewType != ReviewTypeApprove && len(review.CodeComments) == 0 && len(strings.TrimSpace(content)) == 0 {
+ return nil, nil, ContentEmptyErr{}
+ }
+
+ if reviewType == ReviewTypeApprove || reviewType == ReviewTypeReject {
+ // Only reviewers latest review of type approve and reject shall count as "official", so existing reviews needs to be cleared
+ if _, err := db.Exec(ctx, "UPDATE `review` SET official=? WHERE issue_id=? AND reviewer_id=?", false, issue.ID, doer.ID); err != nil {
+ return nil, nil, err
+ }
+ if official, err = IsOfficialReviewer(ctx, issue, doer); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ review.Official = official
+ review.Issue = issue
+ review.Content = content
+ review.Type = reviewType
+ review.CommitID = commitID
+ review.Stale = stale
+
+ if _, err := sess.ID(review.ID).Cols("content, type, official, commit_id, stale").Update(review); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ comm, err := CreateComment(ctx, &CreateCommentOptions{
+ Type: CommentTypeReview,
+ Doer: doer,
+ Content: review.Content,
+ Issue: issue,
+ Repo: issue.Repo,
+ ReviewID: review.ID,
+ Attachments: attachmentUUIDs,
+ })
+ if err != nil || comm == nil {
+ return nil, nil, err
+ }
+
+ // try to remove team review request if need
+ if issue.Repo.Owner.IsOrganization() && (reviewType == ReviewTypeApprove || reviewType == ReviewTypeReject) {
+ teamReviewRequests := make([]*Review, 0, 10)
+ if err := sess.SQL("SELECT * FROM review WHERE issue_id = ? AND reviewer_team_id > 0 AND type = ?", issue.ID, ReviewTypeRequest).Find(&teamReviewRequests); err != nil {
+ return nil, nil, err
+ }
+
+ for _, teamReviewRequest := range teamReviewRequests {
+ ok, err := organization.IsTeamMember(ctx, issue.Repo.OwnerID, teamReviewRequest.ReviewerTeamID, doer.ID)
+ if err != nil {
+ return nil, nil, err
+ } else if !ok {
+ continue
+ }
+
+ if _, err := db.DeleteByID[Review](ctx, teamReviewRequest.ID); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ comm.Review = review
+ return review, comm, committer.Commit()
+}
+
+// GetReviewByIssueIDAndUserID get the latest review of reviewer for a pull request
+func GetReviewByIssueIDAndUserID(ctx context.Context, issueID, userID int64) (*Review, error) {
+ review := new(Review)
+
+ has, err := db.GetEngine(ctx).Where(
+ builder.In("type", ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest).
+ And(builder.Eq{"issue_id": issueID, "reviewer_id": userID, "original_author_id": 0})).
+ Desc("id").
+ Get(review)
+ if err != nil {
+ return nil, err
+ }
+
+ if !has {
+ return nil, ErrReviewNotExist{}
+ }
+
+ return review, nil
+}
+
+// GetTeamReviewerByIssueIDAndTeamID get the latest review request of reviewer team for a pull request
+func GetTeamReviewerByIssueIDAndTeamID(ctx context.Context, issueID, teamID int64) (*Review, error) {
+ review := new(Review)
+
+ has, err := db.GetEngine(ctx).Where(builder.Eq{"issue_id": issueID, "reviewer_team_id": teamID}).
+ Desc("id").
+ Get(review)
+ if err != nil {
+ return nil, err
+ }
+
+ if !has {
+ return nil, ErrReviewNotExist{0}
+ }
+
+ return review, err
+}
+
+// MarkReviewsAsStale marks existing reviews as stale
+func MarkReviewsAsStale(ctx context.Context, issueID int64) (err error) {
+ _, err = db.GetEngine(ctx).Exec("UPDATE `review` SET stale=? WHERE issue_id=?", true, issueID)
+
+ return err
+}
+
+// MarkReviewsAsNotStale marks existing reviews as not stale for a giving commit SHA
+func MarkReviewsAsNotStale(ctx context.Context, issueID int64, commitID string) (err error) {
+ _, err = db.GetEngine(ctx).Exec("UPDATE `review` SET stale=? WHERE issue_id=? AND commit_id=?", false, issueID, commitID)
+
+ return err
+}
+
+// DismissReview change the dismiss status of a review
+func DismissReview(ctx context.Context, review *Review, isDismiss bool) (err error) {
+ if review.Dismissed == isDismiss || (review.Type != ReviewTypeApprove && review.Type != ReviewTypeReject) {
+ return nil
+ }
+
+ review.Dismissed = isDismiss
+
+ if review.ID == 0 {
+ return ErrReviewNotExist{}
+ }
+
+ _, err = db.GetEngine(ctx).ID(review.ID).Cols("dismissed").Update(review)
+
+ return err
+}
+
+// InsertReviews inserts review and review comments
+func InsertReviews(ctx context.Context, reviews []*Review) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ for _, review := range reviews {
+ if _, err := sess.NoAutoTime().Insert(review); err != nil {
+ return err
+ }
+
+ if _, err := sess.NoAutoTime().Insert(&Comment{
+ Type: CommentTypeReview,
+ Content: review.Content,
+ PosterID: review.ReviewerID,
+ OriginalAuthor: review.OriginalAuthor,
+ OriginalAuthorID: review.OriginalAuthorID,
+ IssueID: review.IssueID,
+ ReviewID: review.ID,
+ CreatedUnix: review.CreatedUnix,
+ UpdatedUnix: review.UpdatedUnix,
+ }); err != nil {
+ return err
+ }
+
+ for _, c := range review.Comments {
+ c.ReviewID = review.ID
+ }
+
+ if len(review.Comments) > 0 {
+ if _, err := sess.NoAutoTime().Insert(review.Comments); err != nil {
+ return err
+ }
+ }
+ }
+
+ return committer.Commit()
+}
+
+// AddReviewRequest add a review request from one reviewer
+func AddReviewRequest(ctx context.Context, issue *Issue, reviewer, doer *user_model.User) (*Comment, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ review, err := GetReviewByIssueIDAndUserID(ctx, issue.ID, reviewer.ID)
+ if err != nil && !IsErrReviewNotExist(err) {
+ return nil, err
+ }
+
+ if review != nil {
+ // skip it when reviewer hase been request to review
+ if review.Type == ReviewTypeRequest {
+ return nil, committer.Commit() // still commit the transaction, or committer.Close() will rollback it, even if it's a reused transaction.
+ }
+
+ if issue.IsClosed {
+ return nil, ErrReviewRequestOnClosedPR{}
+ }
+
+ if issue.IsPull {
+ if err := issue.LoadPullRequest(ctx); err != nil {
+ return nil, err
+ }
+ if issue.PullRequest.HasMerged {
+ return nil, ErrReviewRequestOnClosedPR{}
+ }
+ }
+ }
+
+ // if the reviewer is an official reviewer,
+ // remove the official flag in the all previous reviews
+ official, err := IsOfficialReviewer(ctx, issue, reviewer)
+ if err != nil {
+ return nil, err
+ } else if official {
+ if _, err := sess.Exec("UPDATE `review` SET official=? WHERE issue_id=? AND reviewer_id=?", false, issue.ID, reviewer.ID); err != nil {
+ return nil, err
+ }
+ }
+
+ review, err = CreateReview(ctx, CreateReviewOptions{
+ Type: ReviewTypeRequest,
+ Issue: issue,
+ Reviewer: reviewer,
+ Official: official,
+ Stale: false,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ comment, err := CreateComment(ctx, &CreateCommentOptions{
+ Type: CommentTypeReviewRequest,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ RemovedAssignee: false, // Use RemovedAssignee as !isRequest
+ AssigneeID: reviewer.ID, // Use AssigneeID as reviewer ID
+ ReviewID: review.ID,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // func caller use the created comment to retrieve created review too.
+ comment.Review = review
+
+ return comment, committer.Commit()
+}
+
+// RemoveReviewRequest remove a review request from one reviewer
+func RemoveReviewRequest(ctx context.Context, issue *Issue, reviewer, doer *user_model.User) (*Comment, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ review, err := GetReviewByIssueIDAndUserID(ctx, issue.ID, reviewer.ID)
+ if err != nil && !IsErrReviewNotExist(err) {
+ return nil, err
+ }
+
+ if review == nil || review.Type != ReviewTypeRequest {
+ return nil, nil
+ }
+
+ if _, err = db.DeleteByBean(ctx, review); err != nil {
+ return nil, err
+ }
+
+ official, err := IsOfficialReviewer(ctx, issue, reviewer)
+ if err != nil {
+ return nil, err
+ } else if official {
+ if err := restoreLatestOfficialReview(ctx, issue.ID, reviewer.ID); err != nil {
+ return nil, err
+ }
+ }
+
+ comment, err := CreateComment(ctx, &CreateCommentOptions{
+ Type: CommentTypeReviewRequest,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ RemovedAssignee: true, // Use RemovedAssignee as !isRequest
+ AssigneeID: reviewer.ID, // Use AssigneeID as reviewer ID
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return comment, committer.Commit()
+}
+
+// Recalculate the latest official review for reviewer
+func restoreLatestOfficialReview(ctx context.Context, issueID, reviewerID int64) error {
+ review, err := GetReviewByIssueIDAndUserID(ctx, issueID, reviewerID)
+ if err != nil && !IsErrReviewNotExist(err) {
+ return err
+ }
+
+ if review != nil {
+ if _, err := db.Exec(ctx, "UPDATE `review` SET official=? WHERE id=?", true, review.ID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddTeamReviewRequest add a review request from one team
+func AddTeamReviewRequest(ctx context.Context, issue *Issue, reviewer *organization.Team, doer *user_model.User) (*Comment, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ review, err := GetTeamReviewerByIssueIDAndTeamID(ctx, issue.ID, reviewer.ID)
+ if err != nil && !IsErrReviewNotExist(err) {
+ return nil, err
+ }
+
+ // This team already has been requested to review - therefore skip this.
+ if review != nil {
+ return nil, nil
+ }
+
+ official, err := IsOfficialReviewerTeam(ctx, issue, reviewer)
+ if err != nil {
+ return nil, fmt.Errorf("isOfficialReviewerTeam(): %w", err)
+ } else if !official {
+ if official, err = IsOfficialReviewer(ctx, issue, doer); err != nil {
+ return nil, fmt.Errorf("isOfficialReviewer(): %w", err)
+ }
+ }
+
+ if review, err = CreateReview(ctx, CreateReviewOptions{
+ Type: ReviewTypeRequest,
+ Issue: issue,
+ ReviewerTeam: reviewer,
+ Official: official,
+ Stale: false,
+ }); err != nil {
+ return nil, err
+ }
+
+ if official {
+ if _, err := db.Exec(ctx, "UPDATE `review` SET official=? WHERE issue_id=? AND reviewer_team_id=?", false, issue.ID, reviewer.ID); err != nil {
+ return nil, err
+ }
+ }
+
+ comment, err := CreateComment(ctx, &CreateCommentOptions{
+ Type: CommentTypeReviewRequest,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ RemovedAssignee: false, // Use RemovedAssignee as !isRequest
+ AssigneeTeamID: reviewer.ID, // Use AssigneeTeamID as reviewer team ID
+ ReviewID: review.ID,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("CreateComment(): %w", err)
+ }
+
+ return comment, committer.Commit()
+}
+
+// RemoveTeamReviewRequest remove a review request from one team
+func RemoveTeamReviewRequest(ctx context.Context, issue *Issue, reviewer *organization.Team, doer *user_model.User) (*Comment, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ review, err := GetTeamReviewerByIssueIDAndTeamID(ctx, issue.ID, reviewer.ID)
+ if err != nil && !IsErrReviewNotExist(err) {
+ return nil, err
+ }
+
+ if review == nil {
+ return nil, nil
+ }
+
+ if _, err = db.DeleteByBean(ctx, review); err != nil {
+ return nil, err
+ }
+
+ official, err := IsOfficialReviewerTeam(ctx, issue, reviewer)
+ if err != nil {
+ return nil, fmt.Errorf("isOfficialReviewerTeam(): %w", err)
+ }
+
+ if official {
+ // recalculate which is the latest official review from that team
+ review, err := GetReviewByIssueIDAndUserID(ctx, issue.ID, -reviewer.ID)
+ if err != nil && !IsErrReviewNotExist(err) {
+ return nil, err
+ }
+
+ if review != nil {
+ if _, err := db.Exec(ctx, "UPDATE `review` SET official=? WHERE id=?", true, review.ID); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if doer == nil {
+ return nil, committer.Commit()
+ }
+
+ comment, err := CreateComment(ctx, &CreateCommentOptions{
+ Type: CommentTypeReviewRequest,
+ Doer: doer,
+ Repo: issue.Repo,
+ Issue: issue,
+ RemovedAssignee: true, // Use RemovedAssignee as !isRequest
+ AssigneeTeamID: reviewer.ID, // Use AssigneeTeamID as reviewer team ID
+ })
+ if err != nil {
+ return nil, fmt.Errorf("CreateComment(): %w", err)
+ }
+
+ return comment, committer.Commit()
+}
+
+// MarkConversation Add or remove Conversation mark for a code comment
+func MarkConversation(ctx context.Context, comment *Comment, doer *user_model.User, isResolve bool) (err error) {
+ if comment.Type != CommentTypeCode {
+ return nil
+ }
+
+ if isResolve {
+ if comment.ResolveDoerID != 0 {
+ return nil
+ }
+
+ if _, err = db.GetEngine(ctx).Exec("UPDATE `comment` SET resolve_doer_id=? WHERE id=?", doer.ID, comment.ID); err != nil {
+ return err
+ }
+ } else {
+ if comment.ResolveDoerID == 0 {
+ return nil
+ }
+
+ if _, err = db.GetEngine(ctx).Exec("UPDATE `comment` SET resolve_doer_id=? WHERE id=?", 0, comment.ID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CanMarkConversation Add or remove Conversation mark for a code comment permission check
+// the PR writer , offfcial reviewer and poster can do it
+func CanMarkConversation(ctx context.Context, issue *Issue, doer *user_model.User) (permResult bool, err error) {
+ if doer == nil || issue == nil {
+ return false, fmt.Errorf("issue or doer is nil")
+ }
+
+ if doer.ID != issue.PosterID {
+ if err = issue.LoadRepo(ctx); err != nil {
+ return false, err
+ }
+
+ p, err := access_model.GetUserRepoPermission(ctx, issue.Repo, doer)
+ if err != nil {
+ return false, err
+ }
+
+ permResult = p.CanAccess(perm.AccessModeWrite, unit.TypePullRequests)
+ if !permResult {
+ if permResult, err = IsOfficialReviewer(ctx, issue, doer); err != nil {
+ return false, err
+ }
+ }
+
+ if !permResult {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// DeleteReview delete a review and it's code comments
+func DeleteReview(ctx context.Context, r *Review) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if r.ID == 0 {
+ return fmt.Errorf("review is not allowed to be 0")
+ }
+
+ if r.Type == ReviewTypeRequest {
+ return fmt.Errorf("review request can not be deleted using this method")
+ }
+
+ opts := FindCommentsOptions{
+ Type: CommentTypeCode,
+ IssueID: r.IssueID,
+ ReviewID: r.ID,
+ }
+
+ if _, err := db.Delete[Comment](ctx, opts); err != nil {
+ return err
+ }
+
+ opts = FindCommentsOptions{
+ Type: CommentTypeReview,
+ IssueID: r.IssueID,
+ ReviewID: r.ID,
+ }
+
+ if _, err := db.Delete[Comment](ctx, opts); err != nil {
+ return err
+ }
+
+ opts = FindCommentsOptions{
+ Type: CommentTypeDismissReview,
+ IssueID: r.IssueID,
+ ReviewID: r.ID,
+ }
+
+ if _, err := db.Delete[Comment](ctx, opts); err != nil {
+ return err
+ }
+
+ if _, err := db.DeleteByID[Review](ctx, r.ID); err != nil {
+ return err
+ }
+
+ if r.Official {
+ if err := restoreLatestOfficialReview(ctx, r.IssueID, r.ReviewerID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+// GetCodeCommentsCount return count of CodeComments a Review has
+func (r *Review) GetCodeCommentsCount(ctx context.Context) int {
+ opts := FindCommentsOptions{
+ Type: CommentTypeCode,
+ IssueID: r.IssueID,
+ ReviewID: r.ID,
+ }
+ conds := opts.ToConds()
+ if r.ID == 0 {
+ conds = conds.And(builder.Eq{"invalidated": false})
+ }
+
+ count, err := db.GetEngine(ctx).Where(conds).Count(new(Comment))
+ if err != nil {
+ return 0
+ }
+ return int(count)
+}
+
+// HTMLURL formats a URL-string to the related review issue-comment
+func (r *Review) HTMLURL(ctx context.Context) string {
+ opts := FindCommentsOptions{
+ Type: CommentTypeReview,
+ IssueID: r.IssueID,
+ ReviewID: r.ID,
+ }
+ comment := new(Comment)
+ has, err := db.GetEngine(ctx).Where(opts.ToConds()).Get(comment)
+ if err != nil || !has {
+ return ""
+ }
+ return comment.HTMLURL(ctx)
+}
+
+// RemapExternalUser ExternalUserRemappable interface
+func (r *Review) RemapExternalUser(externalName string, externalID, userID int64) error {
+ r.OriginalAuthor = externalName
+ r.OriginalAuthorID = externalID
+ r.ReviewerID = userID
+ return nil
+}
+
+// GetUserID ExternalUserRemappable interface
+func (r *Review) GetUserID() int64 { return r.ReviewerID }
+
+// GetExternalName ExternalUserRemappable interface
+func (r *Review) GetExternalName() string { return r.OriginalAuthor }
+
+// GetExternalID ExternalUserRemappable interface
+func (r *Review) GetExternalID() int64 { return r.OriginalAuthorID }
+
+// UpdateReviewsMigrationsByType updates reviews' migrations information via given git service type and original id and poster id
+func UpdateReviewsMigrationsByType(ctx context.Context, tp structs.GitServiceType, originalAuthorID string, posterID int64) error {
+ _, err := db.GetEngine(ctx).Table("review").
+ Where("original_author_id = ?", originalAuthorID).
+ And(migratedIssueCond(tp)).
+ Update(map[string]any{
+ "reviewer_id": posterID,
+ "original_author": "",
+ "original_author_id": 0,
+ })
+ return err
+}
diff --git a/models/issues/review_list.go b/models/issues/review_list.go
new file mode 100644
index 0000000..a5ceb21
--- /dev/null
+++ b/models/issues/review_list.go
@@ -0,0 +1,200 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ organization_model "code.gitea.io/gitea/models/organization"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+
+ "xorm.io/builder"
+)
+
+type ReviewList []*Review
+
+// LoadReviewers loads reviewers
+func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
+ reviewerIDs := make([]int64, len(reviews))
+ for i := 0; i < len(reviews); i++ {
+ reviewerIDs[i] = reviews[i].ReviewerID
+ }
+ reviewers, err := user_model.GetPossibleUserByIDs(ctx, reviewerIDs)
+ if err != nil {
+ return err
+ }
+
+ userMap := make(map[int64]*user_model.User, len(reviewers))
+ for _, reviewer := range reviewers {
+ userMap[reviewer.ID] = reviewer
+ }
+ for _, review := range reviews {
+ review.Reviewer = userMap[review.ReviewerID]
+ }
+ return nil
+}
+
+// LoadReviewersTeams loads reviewers teams
+func (reviews ReviewList) LoadReviewersTeams(ctx context.Context) error {
+ reviewersTeamsIDs := make([]int64, 0)
+ for _, review := range reviews {
+ if review.ReviewerTeamID != 0 {
+ reviewersTeamsIDs = append(reviewersTeamsIDs, review.ReviewerTeamID)
+ }
+ }
+
+ teamsMap := make(map[int64]*organization_model.Team, 0)
+ for _, teamID := range reviewersTeamsIDs {
+ team, err := organization_model.GetTeamByID(ctx, teamID)
+ if err != nil {
+ return err
+ }
+
+ teamsMap[teamID] = team
+ }
+
+ for _, review := range reviews {
+ if review.ReviewerTeamID != 0 {
+ review.ReviewerTeam = teamsMap[review.ReviewerTeamID]
+ }
+ }
+
+ return nil
+}
+
+func (reviews ReviewList) LoadIssues(ctx context.Context) error {
+ issueIDs := container.FilterSlice(reviews, func(review *Review) (int64, bool) {
+ return review.IssueID, true
+ })
+
+ issues, err := GetIssuesByIDs(ctx, issueIDs)
+ if err != nil {
+ return err
+ }
+ if _, err := issues.LoadRepositories(ctx); err != nil {
+ return err
+ }
+ issueMap := make(map[int64]*Issue, len(issues))
+ for _, issue := range issues {
+ issueMap[issue.ID] = issue
+ }
+
+ for _, review := range reviews {
+ review.Issue = issueMap[review.IssueID]
+ }
+ return nil
+}
+
+// FindReviewOptions represent possible filters to find reviews
+type FindReviewOptions struct {
+ db.ListOptions
+ Types []ReviewType
+ IssueID int64
+ ReviewerID int64
+ OfficialOnly bool
+ Dismissed optional.Option[bool]
+}
+
+func (opts *FindReviewOptions) toCond() builder.Cond {
+ cond := builder.NewCond()
+ if opts.IssueID > 0 {
+ cond = cond.And(builder.Eq{"issue_id": opts.IssueID})
+ }
+ if opts.ReviewerID > 0 {
+ cond = cond.And(builder.Eq{"reviewer_id": opts.ReviewerID})
+ }
+ if len(opts.Types) > 0 {
+ cond = cond.And(builder.In("type", opts.Types))
+ }
+ if opts.OfficialOnly {
+ cond = cond.And(builder.Eq{"official": true})
+ }
+ if opts.Dismissed.Has() {
+ cond = cond.And(builder.Eq{"dismissed": opts.Dismissed.Value()})
+ }
+ return cond
+}
+
+// FindReviews returns reviews passing FindReviewOptions
+func FindReviews(ctx context.Context, opts FindReviewOptions) (ReviewList, error) {
+ reviews := make([]*Review, 0, 10)
+ sess := db.GetEngine(ctx).Where(opts.toCond())
+ if opts.Page > 0 && !opts.IsListAll() {
+ sess = db.SetSessionPagination(sess, &opts)
+ }
+ return reviews, sess.
+ Asc("created_unix").
+ Asc("id").
+ Find(&reviews)
+}
+
+// FindLatestReviews returns only latest reviews per user, passing FindReviewOptions
+func FindLatestReviews(ctx context.Context, opts FindReviewOptions) (ReviewList, error) {
+ reviews := make([]*Review, 0, 10)
+ cond := opts.toCond()
+ sess := db.GetEngine(ctx).Where(cond)
+ if opts.Page > 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+ }
+
+ sess.In("id", builder.
+ Select("max(id)").
+ From("review").
+ Where(cond).
+ GroupBy("reviewer_id"))
+
+ return reviews, sess.
+ Asc("created_unix").
+ Asc("id").
+ Find(&reviews)
+}
+
+// CountReviews returns count of reviews passing FindReviewOptions
+func CountReviews(ctx context.Context, opts FindReviewOptions) (int64, error) {
+ return db.GetEngine(ctx).Where(opts.toCond()).Count(&Review{})
+}
+
+// GetReviewersFromOriginalAuthorsByIssueID gets the latest review of each original authors for a pull request
+func GetReviewersFromOriginalAuthorsByIssueID(ctx context.Context, issueID int64) (ReviewList, error) {
+ reviews := make([]*Review, 0, 10)
+
+ // Get latest review of each reviewer, sorted in order they were made
+ if err := db.GetEngine(ctx).SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND original_author_id <> 0 GROUP BY issue_id, original_author_id) ORDER BY review.updated_unix ASC",
+ issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest).
+ Find(&reviews); err != nil {
+ return nil, err
+ }
+
+ return reviews, nil
+}
+
+// GetReviewsByIssueID gets the latest review of each reviewer for a pull request
+func GetReviewsByIssueID(ctx context.Context, issueID int64) (ReviewList, error) {
+ reviews := make([]*Review, 0, 10)
+
+ sess := db.GetEngine(ctx)
+
+ // Get latest review of each reviewer, sorted in order they were made
+ if err := sess.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND dismissed = ? AND original_author_id = 0 GROUP BY issue_id, reviewer_id) ORDER BY review.updated_unix ASC",
+ issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest, false).
+ Find(&reviews); err != nil {
+ return nil, err
+ }
+
+ teamReviewRequests := make([]*Review, 0, 5)
+ if err := sess.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id <> 0 AND original_author_id = 0 GROUP BY issue_id, reviewer_team_id) ORDER BY review.updated_unix ASC",
+ issueID).
+ Find(&teamReviewRequests); err != nil {
+ return nil, err
+ }
+
+ if len(teamReviewRequests) > 0 {
+ reviews = append(reviews, teamReviewRequests...)
+ }
+
+ return reviews, nil
+}
diff --git a/models/issues/review_test.go b/models/issues/review_test.go
new file mode 100644
index 0000000..51cb940
--- /dev/null
+++ b/models/issues/review_test.go
@@ -0,0 +1,321 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetReviewByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ review, err := issues_model.GetReviewByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, "Demo Review", review.Content)
+ assert.Equal(t, issues_model.ReviewTypeApprove, review.Type)
+
+ _, err = issues_model.GetReviewByID(db.DefaultContext, 23892)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrReviewNotExist(err), "IsErrReviewNotExist")
+}
+
+func TestReview_LoadAttributes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ review := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 1})
+ require.NoError(t, review.LoadAttributes(db.DefaultContext))
+ assert.NotNil(t, review.Issue)
+ assert.NotNil(t, review.Reviewer)
+
+ invalidReview1 := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 2})
+ require.Error(t, invalidReview1.LoadAttributes(db.DefaultContext))
+
+ invalidReview2 := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 3})
+ require.Error(t, invalidReview2.LoadAttributes(db.DefaultContext))
+}
+
+func TestReview_LoadCodeComments(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ review := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 4})
+ require.NoError(t, review.LoadAttributes(db.DefaultContext))
+ require.NoError(t, review.LoadCodeComments(db.DefaultContext))
+ assert.Len(t, review.CodeComments, 1)
+ assert.Equal(t, int64(4), review.CodeComments["README.md"][int64(4)][0].Line)
+}
+
+func TestReviewType_Icon(t *testing.T) {
+ assert.Equal(t, "check", issues_model.ReviewTypeApprove.Icon())
+ assert.Equal(t, "diff", issues_model.ReviewTypeReject.Icon())
+ assert.Equal(t, "comment", issues_model.ReviewTypeComment.Icon())
+ assert.Equal(t, "comment", issues_model.ReviewTypeUnknown.Icon())
+ assert.Equal(t, "dot-fill", issues_model.ReviewTypeRequest.Icon())
+ assert.Equal(t, "comment", issues_model.ReviewType(6).Icon())
+}
+
+func TestFindReviews(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ reviews, err := issues_model.FindReviews(db.DefaultContext, issues_model.FindReviewOptions{
+ Types: []issues_model.ReviewType{issues_model.ReviewTypeApprove},
+ IssueID: 2,
+ ReviewerID: 1,
+ })
+ require.NoError(t, err)
+ assert.Len(t, reviews, 1)
+ assert.Equal(t, "Demo Review", reviews[0].Content)
+}
+
+func TestFindLatestReviews(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ reviews, err := issues_model.FindLatestReviews(db.DefaultContext, issues_model.FindReviewOptions{
+ Types: []issues_model.ReviewType{issues_model.ReviewTypeApprove},
+ IssueID: 11,
+ })
+ require.NoError(t, err)
+ assert.Len(t, reviews, 2)
+ assert.Equal(t, "duplicate review from user5 (latest)", reviews[0].Content)
+ assert.Equal(t, "singular review from org6 and final review for this pr", reviews[1].Content)
+}
+
+func TestGetCurrentReview(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ review, err := issues_model.GetCurrentReview(db.DefaultContext, user, issue)
+ require.NoError(t, err)
+ assert.NotNil(t, review)
+ assert.Equal(t, issues_model.ReviewTypePending, review.Type)
+ assert.Equal(t, "Pending Review", review.Content)
+
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 7})
+ review2, err := issues_model.GetCurrentReview(db.DefaultContext, user2, issue)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrReviewNotExist(err))
+ assert.Nil(t, review2)
+}
+
+func TestCreateReview(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ review, err := issues_model.CreateReview(db.DefaultContext, issues_model.CreateReviewOptions{
+ Content: "New Review",
+ Type: issues_model.ReviewTypePending,
+ Issue: issue,
+ Reviewer: user,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, "New Review", review.Content)
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Review{Content: "New Review"})
+}
+
+func TestGetReviewersByIssueID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 3})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+ user4 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4})
+
+ expectedReviews := []*issues_model.Review{}
+ expectedReviews = append(expectedReviews,
+ &issues_model.Review{
+ Reviewer: org3,
+ Type: issues_model.ReviewTypeReject,
+ UpdatedUnix: 946684812,
+ },
+ &issues_model.Review{
+ Reviewer: user4,
+ Type: issues_model.ReviewTypeApprove,
+ UpdatedUnix: 946684813,
+ },
+ &issues_model.Review{
+ Reviewer: user2,
+ Type: issues_model.ReviewTypeReject,
+ UpdatedUnix: 946684814,
+ })
+
+ allReviews, err := issues_model.GetReviewsByIssueID(db.DefaultContext, issue.ID)
+ require.NoError(t, err)
+ for _, review := range allReviews {
+ require.NoError(t, review.LoadReviewer(db.DefaultContext))
+ }
+ if assert.Len(t, allReviews, 3) {
+ for i, review := range allReviews {
+ assert.Equal(t, expectedReviews[i].Reviewer, review.Reviewer)
+ assert.Equal(t, expectedReviews[i].Type, review.Type)
+ assert.Equal(t, expectedReviews[i].UpdatedUnix, review.UpdatedUnix)
+ }
+ }
+
+ allReviews, err = issues_model.GetReviewsByIssueID(db.DefaultContext, issue.ID)
+ require.NoError(t, err)
+ require.NoError(t, allReviews.LoadReviewers(db.DefaultContext))
+ if assert.Len(t, allReviews, 3) {
+ for i, review := range allReviews {
+ assert.Equal(t, expectedReviews[i].Reviewer, review.Reviewer)
+ assert.Equal(t, expectedReviews[i].Type, review.Type)
+ assert.Equal(t, expectedReviews[i].UpdatedUnix, review.UpdatedUnix)
+ }
+ }
+}
+
+func TestDismissReview(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ rejectReviewExample := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 9})
+ requestReviewExample := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 11})
+ approveReviewExample := unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 8})
+ assert.False(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.False(t, approveReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, rejectReviewExample, true))
+ rejectReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 9})
+ requestReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 11})
+ assert.True(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, requestReviewExample, true))
+ rejectReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 9})
+ requestReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 11})
+ assert.True(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.False(t, approveReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, requestReviewExample, true))
+ rejectReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 9})
+ requestReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 11})
+ assert.True(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.False(t, approveReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, requestReviewExample, false))
+ rejectReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 9})
+ requestReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 11})
+ assert.True(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.False(t, approveReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, requestReviewExample, false))
+ rejectReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 9})
+ requestReviewExample = unittest.AssertExistsAndLoadBean(t, &issues_model.Review{ID: 11})
+ assert.True(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.False(t, approveReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, rejectReviewExample, false))
+ assert.False(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.False(t, approveReviewExample.Dismissed)
+
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, approveReviewExample, true))
+ assert.False(t, rejectReviewExample.Dismissed)
+ assert.False(t, requestReviewExample.Dismissed)
+ assert.True(t, approveReviewExample.Dismissed)
+}
+
+func TestDeleteReview(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ review1, err := issues_model.CreateReview(db.DefaultContext, issues_model.CreateReviewOptions{
+ Content: "Official rejection",
+ Type: issues_model.ReviewTypeReject,
+ Official: false,
+ Issue: issue,
+ Reviewer: user,
+ })
+ require.NoError(t, err)
+
+ review2, err := issues_model.CreateReview(db.DefaultContext, issues_model.CreateReviewOptions{
+ Content: "Official approval",
+ Type: issues_model.ReviewTypeApprove,
+ Official: true,
+ Issue: issue,
+ Reviewer: user,
+ })
+ require.NoError(t, err)
+
+ require.NoError(t, issues_model.DeleteReview(db.DefaultContext, review2))
+
+ _, err = issues_model.GetReviewByID(db.DefaultContext, review2.ID)
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrReviewNotExist(err), "IsErrReviewNotExist")
+
+ review1, err = issues_model.GetReviewByID(db.DefaultContext, review1.ID)
+ require.NoError(t, err)
+ assert.True(t, review1.Official)
+}
+
+func TestDeleteDismissedReview(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 2})
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: issue.RepoID})
+ review, err := issues_model.CreateReview(db.DefaultContext, issues_model.CreateReviewOptions{
+ Content: "reject",
+ Type: issues_model.ReviewTypeReject,
+ Official: false,
+ Issue: issue,
+ Reviewer: user,
+ })
+ require.NoError(t, err)
+ require.NoError(t, issues_model.DismissReview(db.DefaultContext, review, true))
+ comment, err := issues_model.CreateComment(db.DefaultContext, &issues_model.CreateCommentOptions{
+ Type: issues_model.CommentTypeDismissReview,
+ Doer: user,
+ Repo: repo,
+ Issue: issue,
+ ReviewID: review.ID,
+ Content: "dismiss",
+ })
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{ID: comment.ID})
+ require.NoError(t, issues_model.DeleteReview(db.DefaultContext, review))
+ unittest.AssertNotExistsBean(t, &issues_model.Comment{ID: comment.ID})
+}
+
+func TestAddReviewRequest(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ pull := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1})
+ require.NoError(t, pull.LoadIssue(db.DefaultContext))
+ issue := pull.Issue
+ require.NoError(t, issue.LoadRepo(db.DefaultContext))
+ reviewer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ _, err := issues_model.CreateReview(db.DefaultContext, issues_model.CreateReviewOptions{
+ Issue: issue,
+ Reviewer: reviewer,
+ Type: issues_model.ReviewTypeReject,
+ })
+
+ require.NoError(t, err)
+ pull.HasMerged = false
+ require.NoError(t, pull.UpdateCols(db.DefaultContext, "has_merged"))
+ issue.IsClosed = true
+ _, err = issues_model.AddReviewRequest(db.DefaultContext, issue, reviewer, &user_model.User{})
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrReviewRequestOnClosedPR(err))
+
+ pull.HasMerged = true
+ require.NoError(t, pull.UpdateCols(db.DefaultContext, "has_merged"))
+ issue.IsClosed = false
+ _, err = issues_model.AddReviewRequest(db.DefaultContext, issue, reviewer, &user_model.User{})
+ require.Error(t, err)
+ assert.True(t, issues_model.IsErrReviewRequestOnClosedPR(err))
+}
diff --git a/models/issues/stopwatch.go b/models/issues/stopwatch.go
new file mode 100644
index 0000000..93eaf88
--- /dev/null
+++ b/models/issues/stopwatch.go
@@ -0,0 +1,281 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrIssueStopwatchNotExist represents an error that stopwatch is not exist
+type ErrIssueStopwatchNotExist struct {
+ UserID int64
+ IssueID int64
+}
+
+func (err ErrIssueStopwatchNotExist) Error() string {
+ return fmt.Sprintf("issue stopwatch doesn't exist[uid: %d, issue_id: %d", err.UserID, err.IssueID)
+}
+
+func (err ErrIssueStopwatchNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Stopwatch represents a stopwatch for time tracking.
+type Stopwatch struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX"`
+ UserID int64 `xorm:"INDEX"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+func init() {
+ db.RegisterModel(new(Stopwatch))
+}
+
+// Seconds returns the amount of time passed since creation, based on local server time
+func (s Stopwatch) Seconds() int64 {
+ return int64(timeutil.TimeStampNow() - s.CreatedUnix)
+}
+
+// Duration returns a human-readable duration string based on local server time
+func (s Stopwatch) Duration() string {
+ return util.SecToTime(s.Seconds())
+}
+
+func getStopwatch(ctx context.Context, userID, issueID int64) (sw *Stopwatch, exists bool, err error) {
+ sw = new(Stopwatch)
+ exists, err = db.GetEngine(ctx).
+ Where("user_id = ?", userID).
+ And("issue_id = ?", issueID).
+ Get(sw)
+ return sw, exists, err
+}
+
+// GetUIDsAndNotificationCounts between the two provided times
+func GetUIDsAndStopwatch(ctx context.Context) (map[int64][]*Stopwatch, error) {
+ sws := []*Stopwatch{}
+ if err := db.GetEngine(ctx).Where("issue_id != 0").Find(&sws); err != nil {
+ return nil, err
+ }
+ res := map[int64][]*Stopwatch{}
+ if len(sws) == 0 {
+ return res, nil
+ }
+
+ for _, sw := range sws {
+ res[sw.UserID] = append(res[sw.UserID], sw)
+ }
+ return res, nil
+}
+
+// GetUserStopwatches return list of all stopwatches of a user
+func GetUserStopwatches(ctx context.Context, userID int64, listOptions db.ListOptions) ([]*Stopwatch, error) {
+ sws := make([]*Stopwatch, 0, 8)
+ sess := db.GetEngine(ctx).Where("stopwatch.user_id = ?", userID)
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+
+ err := sess.Find(&sws)
+ if err != nil {
+ return nil, err
+ }
+ return sws, nil
+}
+
+// CountUserStopwatches return count of all stopwatches of a user
+func CountUserStopwatches(ctx context.Context, userID int64) (int64, error) {
+ return db.GetEngine(ctx).Where("user_id = ?", userID).Count(&Stopwatch{})
+}
+
+// StopwatchExists returns true if the stopwatch exists
+func StopwatchExists(ctx context.Context, userID, issueID int64) bool {
+ _, exists, _ := getStopwatch(ctx, userID, issueID)
+ return exists
+}
+
+// HasUserStopwatch returns true if the user has a stopwatch
+func HasUserStopwatch(ctx context.Context, userID int64) (exists bool, sw *Stopwatch, issue *Issue, err error) {
+ type stopwatchIssueRepo struct {
+ Stopwatch `xorm:"extends"`
+ Issue `xorm:"extends"`
+ repo.Repository `xorm:"extends"`
+ }
+
+ swIR := new(stopwatchIssueRepo)
+ exists, err = db.GetEngine(ctx).
+ Table("stopwatch").
+ Where("user_id = ?", userID).
+ Join("INNER", "issue", "issue.id = stopwatch.issue_id").
+ Join("INNER", "repository", "repository.id = issue.repo_id").
+ Get(swIR)
+ if exists {
+ sw = &swIR.Stopwatch
+ issue = &swIR.Issue
+ issue.Repo = &swIR.Repository
+ }
+ return exists, sw, issue, err
+}
+
+// FinishIssueStopwatchIfPossible if stopwatch exist then finish it otherwise ignore
+func FinishIssueStopwatchIfPossible(ctx context.Context, user *user_model.User, issue *Issue) error {
+ _, exists, err := getStopwatch(ctx, user.ID, issue.ID)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return nil
+ }
+ return FinishIssueStopwatch(ctx, user, issue)
+}
+
+// CreateOrStopIssueStopwatch create an issue stopwatch if it's not exist, otherwise finish it
+func CreateOrStopIssueStopwatch(ctx context.Context, user *user_model.User, issue *Issue) error {
+ _, exists, err := getStopwatch(ctx, user.ID, issue.ID)
+ if err != nil {
+ return err
+ }
+ if exists {
+ return FinishIssueStopwatch(ctx, user, issue)
+ }
+ return CreateIssueStopwatch(ctx, user, issue)
+}
+
+// FinishIssueStopwatch if stopwatch exist then finish it otherwise return an error
+func FinishIssueStopwatch(ctx context.Context, user *user_model.User, issue *Issue) error {
+ sw, exists, err := getStopwatch(ctx, user.ID, issue.ID)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return ErrIssueStopwatchNotExist{
+ UserID: user.ID,
+ IssueID: issue.ID,
+ }
+ }
+
+ // Create tracked time out of the time difference between start date and actual date
+ timediff := time.Now().Unix() - int64(sw.CreatedUnix)
+
+ // Create TrackedTime
+ tt := &TrackedTime{
+ Created: time.Now(),
+ IssueID: issue.ID,
+ UserID: user.ID,
+ Time: timediff,
+ }
+
+ if err := db.Insert(ctx, tt); err != nil {
+ return err
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Doer: user,
+ Issue: issue,
+ Repo: issue.Repo,
+ Content: util.SecToTime(timediff),
+ Type: CommentTypeStopTracking,
+ TimeID: tt.ID,
+ }); err != nil {
+ return err
+ }
+ _, err = db.DeleteByBean(ctx, sw)
+ return err
+}
+
+// CreateIssueStopwatch creates a stopwatch if not exist, otherwise return an error
+func CreateIssueStopwatch(ctx context.Context, user *user_model.User, issue *Issue) error {
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ // if another stopwatch is running: stop it
+ exists, _, otherIssue, err := HasUserStopwatch(ctx, user.ID)
+ if err != nil {
+ return err
+ }
+ if exists {
+ if err := FinishIssueStopwatch(ctx, user, otherIssue); err != nil {
+ return err
+ }
+ }
+
+ // Create stopwatch
+ sw := &Stopwatch{
+ UserID: user.ID,
+ IssueID: issue.ID,
+ }
+
+ if err := db.Insert(ctx, sw); err != nil {
+ return err
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Doer: user,
+ Issue: issue,
+ Repo: issue.Repo,
+ Type: CommentTypeStartTracking,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CancelStopwatch removes the given stopwatch and logs it into issue's timeline.
+func CancelStopwatch(ctx context.Context, user *user_model.User, issue *Issue) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ if err := cancelStopwatch(ctx, user, issue); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func cancelStopwatch(ctx context.Context, user *user_model.User, issue *Issue) error {
+ e := db.GetEngine(ctx)
+ sw, exists, err := getStopwatch(ctx, user.ID, issue.ID)
+ if err != nil {
+ return err
+ }
+
+ if exists {
+ if _, err := e.Delete(sw); err != nil {
+ return err
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Doer: user,
+ Issue: issue,
+ Repo: issue.Repo,
+ Type: CommentTypeCancelTracking,
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/models/issues/stopwatch_test.go b/models/issues/stopwatch_test.go
new file mode 100644
index 0000000..af86e8b
--- /dev/null
+++ b/models/issues/stopwatch_test.go
@@ -0,0 +1,119 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCancelStopwatch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1, err := user_model.GetUserByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ issue1, err := issues_model.GetIssueByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ issue2, err := issues_model.GetIssueByID(db.DefaultContext, 2)
+ require.NoError(t, err)
+
+ err = issues_model.CancelStopwatch(db.DefaultContext, user1, issue1)
+ require.NoError(t, err)
+ unittest.AssertNotExistsBean(t, &issues_model.Stopwatch{UserID: user1.ID, IssueID: issue1.ID})
+
+ _ = unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{Type: issues_model.CommentTypeCancelTracking, PosterID: user1.ID, IssueID: issue1.ID})
+
+ require.NoError(t, issues_model.CancelStopwatch(db.DefaultContext, user1, issue2))
+}
+
+func TestStopwatchExists(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ assert.True(t, issues_model.StopwatchExists(db.DefaultContext, 1, 1))
+ assert.False(t, issues_model.StopwatchExists(db.DefaultContext, 1, 2))
+}
+
+func TestHasUserStopwatch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ exists, sw, _, err := issues_model.HasUserStopwatch(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.True(t, exists)
+ assert.Equal(t, int64(1), sw.ID)
+
+ exists, _, _, err = issues_model.HasUserStopwatch(db.DefaultContext, 3)
+ require.NoError(t, err)
+ assert.False(t, exists)
+}
+
+func TestCreateOrStopIssueStopwatch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user2, err := user_model.GetUserByID(db.DefaultContext, 2)
+ require.NoError(t, err)
+ org3, err := user_model.GetUserByID(db.DefaultContext, 3)
+ require.NoError(t, err)
+
+ issue1, err := issues_model.GetIssueByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ issue2, err := issues_model.GetIssueByID(db.DefaultContext, 2)
+ require.NoError(t, err)
+
+ require.NoError(t, issues_model.CreateOrStopIssueStopwatch(db.DefaultContext, org3, issue1))
+ sw := unittest.AssertExistsAndLoadBean(t, &issues_model.Stopwatch{UserID: 3, IssueID: 1})
+ assert.LessOrEqual(t, sw.CreatedUnix, timeutil.TimeStampNow())
+
+ require.NoError(t, issues_model.CreateOrStopIssueStopwatch(db.DefaultContext, user2, issue2))
+ unittest.AssertNotExistsBean(t, &issues_model.Stopwatch{UserID: 2, IssueID: 2})
+ unittest.AssertExistsAndLoadBean(t, &issues_model.TrackedTime{UserID: 2, IssueID: 2})
+}
+
+func TestGetUIDsAndStopwatch(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/issues/TestGetUIDsAndStopwatch/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ uidStopwatches, err := issues_model.GetUIDsAndStopwatch(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, map[int64][]*issues_model.Stopwatch{
+ 1: {
+ {
+ ID: 1,
+ UserID: 1,
+ IssueID: 1,
+ CreatedUnix: timeutil.TimeStamp(1500988001),
+ },
+ {
+ ID: 3,
+ UserID: 1,
+ IssueID: 2,
+ CreatedUnix: timeutil.TimeStamp(1500988004),
+ },
+ },
+ 2: {
+ {
+ ID: 2,
+ UserID: 2,
+ IssueID: 2,
+ CreatedUnix: timeutil.TimeStamp(1500988002),
+ },
+ },
+ }, uidStopwatches)
+}
diff --git a/models/issues/tracked_time.go b/models/issues/tracked_time.go
new file mode 100644
index 0000000..caa582a
--- /dev/null
+++ b/models/issues/tracked_time.go
@@ -0,0 +1,386 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// TrackedTime represents a time that was spent for a specific issue.
+type TrackedTime struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX"`
+ Issue *Issue `xorm:"-"`
+ UserID int64 `xorm:"INDEX"`
+ User *user_model.User `xorm:"-"`
+ Created time.Time `xorm:"-"`
+ CreatedUnix int64 `xorm:"created"`
+ Time int64 `xorm:"NOT NULL"`
+ Deleted bool `xorm:"NOT NULL DEFAULT false"`
+}
+
+func init() {
+ db.RegisterModel(new(TrackedTime))
+}
+
+// TrackedTimeList is a List of TrackedTime's
+type TrackedTimeList []*TrackedTime
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (t *TrackedTime) AfterLoad() {
+ t.Created = time.Unix(t.CreatedUnix, 0).In(setting.DefaultUILocation)
+}
+
+// LoadAttributes load Issue, User
+func (t *TrackedTime) LoadAttributes(ctx context.Context) (err error) {
+ // Load the issue
+ if t.Issue == nil {
+ t.Issue, err = GetIssueByID(ctx, t.IssueID)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return err
+ }
+ }
+ // Now load the repo for the issue (which we may have just loaded)
+ if t.Issue != nil {
+ err = t.Issue.LoadRepo(ctx)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return err
+ }
+ }
+ // Load the user
+ if t.User == nil {
+ t.User, err = user_model.GetUserByID(ctx, t.UserID)
+ if err != nil {
+ if !errors.Is(err, util.ErrNotExist) {
+ return err
+ }
+ t.User = user_model.NewGhostUser()
+ }
+ }
+ return nil
+}
+
+// LoadAttributes load Issue, User
+func (tl TrackedTimeList) LoadAttributes(ctx context.Context) error {
+ for _, t := range tl {
+ if err := t.LoadAttributes(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// FindTrackedTimesOptions represent the filters for tracked times. If an ID is 0 it will be ignored.
+type FindTrackedTimesOptions struct {
+ db.ListOptions
+ IssueID int64
+ UserID int64
+ RepositoryID int64
+ MilestoneID int64
+ CreatedAfterUnix int64
+ CreatedBeforeUnix int64
+}
+
+// toCond will convert each condition into a xorm-Cond
+func (opts *FindTrackedTimesOptions) ToConds() builder.Cond {
+ cond := builder.NewCond().And(builder.Eq{"tracked_time.deleted": false})
+ if opts.IssueID != 0 {
+ cond = cond.And(builder.Eq{"issue_id": opts.IssueID})
+ }
+ if opts.UserID != 0 {
+ cond = cond.And(builder.Eq{"user_id": opts.UserID})
+ }
+ if opts.RepositoryID != 0 {
+ cond = cond.And(builder.Eq{"issue.repo_id": opts.RepositoryID})
+ }
+ if opts.MilestoneID != 0 {
+ cond = cond.And(builder.Eq{"issue.milestone_id": opts.MilestoneID})
+ }
+ if opts.CreatedAfterUnix != 0 {
+ cond = cond.And(builder.Gte{"tracked_time.created_unix": opts.CreatedAfterUnix})
+ }
+ if opts.CreatedBeforeUnix != 0 {
+ cond = cond.And(builder.Lte{"tracked_time.created_unix": opts.CreatedBeforeUnix})
+ }
+ return cond
+}
+
+func (opts *FindTrackedTimesOptions) ToJoins() []db.JoinFunc {
+ if opts.RepositoryID > 0 || opts.MilestoneID > 0 {
+ return []db.JoinFunc{
+ func(e db.Engine) error {
+ e.Join("INNER", "issue", "issue.id = tracked_time.issue_id")
+ return nil
+ },
+ }
+ }
+ return nil
+}
+
+// toSession will convert the given options to a xorm Session by using the conditions from toCond and joining with issue table if required
+func (opts *FindTrackedTimesOptions) toSession(e db.Engine) db.Engine {
+ sess := e
+ if opts.RepositoryID > 0 || opts.MilestoneID > 0 {
+ sess = e.Join("INNER", "issue", "issue.id = tracked_time.issue_id")
+ }
+
+ sess = sess.Where(opts.ToConds())
+
+ if opts.Page != 0 {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+
+ return sess
+}
+
+// GetTrackedTimes returns all tracked times that fit to the given options.
+func GetTrackedTimes(ctx context.Context, options *FindTrackedTimesOptions) (trackedTimes TrackedTimeList, err error) {
+ err = options.toSession(db.GetEngine(ctx)).Find(&trackedTimes)
+ return trackedTimes, err
+}
+
+// CountTrackedTimes returns count of tracked times that fit to the given options.
+func CountTrackedTimes(ctx context.Context, opts *FindTrackedTimesOptions) (int64, error) {
+ sess := db.GetEngine(ctx).Where(opts.ToConds())
+ if opts.RepositoryID > 0 || opts.MilestoneID > 0 {
+ sess = sess.Join("INNER", "issue", "issue.id = tracked_time.issue_id")
+ }
+ return sess.Count(&TrackedTime{})
+}
+
+// GetTrackedSeconds return sum of seconds
+func GetTrackedSeconds(ctx context.Context, opts FindTrackedTimesOptions) (trackedSeconds int64, err error) {
+ return opts.toSession(db.GetEngine(ctx)).SumInt(&TrackedTime{}, "time")
+}
+
+// AddTime will add the given time (in seconds) to the issue
+func AddTime(ctx context.Context, user *user_model.User, issue *Issue, amount int64, created time.Time) (*TrackedTime, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ t, err := addTime(ctx, user, issue, amount, created)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return nil, err
+ }
+
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Issue: issue,
+ Repo: issue.Repo,
+ Doer: user,
+ // Content before v1.21 did store the formatted string instead of seconds,
+ // so use "|" as delimiter to mark the new format
+ Content: fmt.Sprintf("|%d", amount),
+ Type: CommentTypeAddTimeManual,
+ TimeID: t.ID,
+ }); err != nil {
+ return nil, err
+ }
+
+ return t, committer.Commit()
+}
+
+func addTime(ctx context.Context, user *user_model.User, issue *Issue, amount int64, created time.Time) (*TrackedTime, error) {
+ if created.IsZero() {
+ created = time.Now()
+ }
+ tt := &TrackedTime{
+ IssueID: issue.ID,
+ UserID: user.ID,
+ Time: amount,
+ Created: created,
+ }
+ return tt, db.Insert(ctx, tt)
+}
+
+// TotalTimesForEachUser returns the spent time in seconds for each user by an issue
+func TotalTimesForEachUser(ctx context.Context, options *FindTrackedTimesOptions) (map[*user_model.User]int64, error) {
+ trackedTimes, err := GetTrackedTimes(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ // Adding total time per user ID
+ totalTimesByUser := make(map[int64]int64)
+ for _, t := range trackedTimes {
+ totalTimesByUser[t.UserID] += t.Time
+ }
+
+ totalTimes := make(map[*user_model.User]int64)
+ // Fetching User and making time human readable
+ for userID, total := range totalTimesByUser {
+ user, err := user_model.GetUserByID(ctx, userID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ totalTimes[user] = total
+ }
+ return totalTimes, nil
+}
+
+// DeleteIssueUserTimes deletes times for issue
+func DeleteIssueUserTimes(ctx context.Context, issue *Issue, user *user_model.User) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ opts := FindTrackedTimesOptions{
+ IssueID: issue.ID,
+ UserID: user.ID,
+ }
+
+ removedTime, err := deleteTimes(ctx, opts)
+ if err != nil {
+ return err
+ }
+ if removedTime == 0 {
+ return db.ErrNotExist{Resource: "tracked_time"}
+ }
+
+ if err := issue.LoadRepo(ctx); err != nil {
+ return err
+ }
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Issue: issue,
+ Repo: issue.Repo,
+ Doer: user,
+ // Content before v1.21 did store the formatted string instead of seconds,
+ // so use "|" as delimiter to mark the new format
+ Content: fmt.Sprintf("|%d", removedTime),
+ Type: CommentTypeDeleteTimeManual,
+ }); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// DeleteTime delete a specific Time
+func DeleteTime(ctx context.Context, t *TrackedTime) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := t.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ if err := deleteTime(ctx, t); err != nil {
+ return err
+ }
+
+ if _, err := CreateComment(ctx, &CreateCommentOptions{
+ Issue: t.Issue,
+ Repo: t.Issue.Repo,
+ Doer: t.User,
+ // Content before v1.21 did store the formatted string instead of seconds,
+ // so use "|" as delimiter to mark the new format
+ Content: fmt.Sprintf("|%d", t.Time),
+ Type: CommentTypeDeleteTimeManual,
+ }); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func deleteTimes(ctx context.Context, opts FindTrackedTimesOptions) (removedTime int64, err error) {
+ removedTime, err = GetTrackedSeconds(ctx, opts)
+ if err != nil || removedTime == 0 {
+ return removedTime, err
+ }
+
+ _, err = opts.toSession(db.GetEngine(ctx)).Table("tracked_time").Cols("deleted").Update(&TrackedTime{Deleted: true})
+ return removedTime, err
+}
+
+func deleteTime(ctx context.Context, t *TrackedTime) error {
+ if t.Deleted {
+ return db.ErrNotExist{Resource: "tracked_time", ID: t.ID}
+ }
+ t.Deleted = true
+ _, err := db.GetEngine(ctx).ID(t.ID).Cols("deleted").Update(t)
+ return err
+}
+
+// GetTrackedTimeByID returns raw TrackedTime without loading attributes by id
+func GetTrackedTimeByID(ctx context.Context, id int64) (*TrackedTime, error) {
+ time := new(TrackedTime)
+ has, err := db.GetEngine(ctx).ID(id).Get(time)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, db.ErrNotExist{Resource: "tracked_time", ID: id}
+ }
+ return time, nil
+}
+
+// GetIssueTotalTrackedTime returns the total tracked time for issues by given conditions.
+func GetIssueTotalTrackedTime(ctx context.Context, opts *IssuesOptions, isClosed optional.Option[bool]) (int64, error) {
+ if len(opts.IssueIDs) <= MaxQueryParameters {
+ return getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs)
+ }
+
+ // If too long a list of IDs is provided,
+ // we get the statistics in smaller chunks and get accumulates
+ var accum int64
+ for i := 0; i < len(opts.IssueIDs); {
+ chunk := i + MaxQueryParameters
+ if chunk > len(opts.IssueIDs) {
+ chunk = len(opts.IssueIDs)
+ }
+ time, err := getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs[i:chunk])
+ if err != nil {
+ return 0, err
+ }
+ accum += time
+ i = chunk
+ }
+ return accum, nil
+}
+
+func getIssueTotalTrackedTimeChunk(ctx context.Context, opts *IssuesOptions, isClosed optional.Option[bool], issueIDs []int64) (int64, error) {
+ sumSession := func(opts *IssuesOptions, issueIDs []int64) *xorm.Session {
+ sess := db.GetEngine(ctx).
+ Table("tracked_time").
+ Where("tracked_time.deleted = ?", false).
+ Join("INNER", "issue", "tracked_time.issue_id = issue.id")
+
+ return applyIssuesOptions(sess, opts, issueIDs)
+ }
+
+ type trackedTime struct {
+ Time int64
+ }
+
+ session := sumSession(opts, issueIDs)
+ if isClosed.Has() {
+ session = session.And("issue.is_closed = ?", isClosed.Value())
+ }
+ return session.SumInt(new(trackedTime), "tracked_time.time")
+}
diff --git a/models/issues/tracked_time_test.go b/models/issues/tracked_time_test.go
new file mode 100644
index 0000000..4d4e232
--- /dev/null
+++ b/models/issues/tracked_time_test.go
@@ -0,0 +1,135 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package issues_test
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddTime(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ org3, err := user_model.GetUserByID(db.DefaultContext, 3)
+ require.NoError(t, err)
+
+ issue1, err := issues_model.GetIssueByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ // 3661 = 1h 1min 1s
+ trackedTime, err := issues_model.AddTime(db.DefaultContext, org3, issue1, 3661, time.Now())
+ require.NoError(t, err)
+ assert.Equal(t, int64(3), trackedTime.UserID)
+ assert.Equal(t, int64(1), trackedTime.IssueID)
+ assert.Equal(t, int64(3661), trackedTime.Time)
+
+ tt := unittest.AssertExistsAndLoadBean(t, &issues_model.TrackedTime{UserID: 3, IssueID: 1})
+ assert.Equal(t, int64(3661), tt.Time)
+
+ comment := unittest.AssertExistsAndLoadBean(t, &issues_model.Comment{Type: issues_model.CommentTypeAddTimeManual, PosterID: 3, IssueID: 1})
+ assert.Equal(t, "|3661", comment.Content)
+}
+
+func TestGetTrackedTimes(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // by Issue
+ times, err := issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{IssueID: 1})
+ require.NoError(t, err)
+ assert.Len(t, times, 1)
+ assert.Equal(t, int64(400), times[0].Time)
+
+ times, err = issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{IssueID: -1})
+ require.NoError(t, err)
+ assert.Empty(t, times)
+
+ // by User
+ times, err = issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{UserID: 1})
+ require.NoError(t, err)
+ assert.Len(t, times, 3)
+ assert.Equal(t, int64(400), times[0].Time)
+
+ times, err = issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{UserID: 3})
+ require.NoError(t, err)
+ assert.Empty(t, times)
+
+ // by Repo
+ times, err = issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{RepositoryID: 2})
+ require.NoError(t, err)
+ assert.Len(t, times, 3)
+ assert.Equal(t, int64(1), times[0].Time)
+ issue, err := issues_model.GetIssueByID(db.DefaultContext, times[0].IssueID)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), issue.RepoID)
+
+ times, err = issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{RepositoryID: 1})
+ require.NoError(t, err)
+ assert.Len(t, times, 5)
+
+ times, err = issues_model.GetTrackedTimes(db.DefaultContext, &issues_model.FindTrackedTimesOptions{RepositoryID: 10})
+ require.NoError(t, err)
+ assert.Empty(t, times)
+}
+
+func TestTotalTimesForEachUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ total, err := issues_model.TotalTimesForEachUser(db.DefaultContext, &issues_model.FindTrackedTimesOptions{IssueID: 1})
+ require.NoError(t, err)
+ assert.Len(t, total, 1)
+ for user, time := range total {
+ assert.EqualValues(t, 1, user.ID)
+ assert.EqualValues(t, 400, time)
+ }
+
+ total, err = issues_model.TotalTimesForEachUser(db.DefaultContext, &issues_model.FindTrackedTimesOptions{IssueID: 2})
+ require.NoError(t, err)
+ assert.Len(t, total, 2)
+ for user, time := range total {
+ if user.ID == 2 {
+ assert.EqualValues(t, 3662, time)
+ } else if user.ID == 1 {
+ assert.EqualValues(t, 20, time)
+ } else {
+ require.Error(t, assert.AnError)
+ }
+ }
+
+ total, err = issues_model.TotalTimesForEachUser(db.DefaultContext, &issues_model.FindTrackedTimesOptions{IssueID: 5})
+ require.NoError(t, err)
+ assert.Len(t, total, 1)
+ for user, time := range total {
+ assert.EqualValues(t, 2, user.ID)
+ assert.EqualValues(t, 1, time)
+ }
+
+ total, err = issues_model.TotalTimesForEachUser(db.DefaultContext, &issues_model.FindTrackedTimesOptions{IssueID: 4})
+ require.NoError(t, err)
+ assert.Len(t, total, 2)
+}
+
+func TestGetIssueTotalTrackedTime(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ ttt, err := issues_model.GetIssueTotalTrackedTime(db.DefaultContext, &issues_model.IssuesOptions{MilestoneIDs: []int64{1}}, optional.Some(false))
+ require.NoError(t, err)
+ assert.EqualValues(t, 3682, ttt)
+
+ ttt, err = issues_model.GetIssueTotalTrackedTime(db.DefaultContext, &issues_model.IssuesOptions{MilestoneIDs: []int64{1}}, optional.Some(true))
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, ttt)
+
+ ttt, err = issues_model.GetIssueTotalTrackedTime(db.DefaultContext, &issues_model.IssuesOptions{MilestoneIDs: []int64{1}}, optional.None[bool]())
+ require.NoError(t, err)
+ assert.EqualValues(t, 3682, ttt)
+}
diff --git a/models/main_test.go b/models/main_test.go
new file mode 100644
index 0000000..a694130
--- /dev/null
+++ b/models/main_test.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "testing"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/system"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TestFixturesAreConsistent assert that test fixtures are consistent
+func TestFixturesAreConsistent(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ unittest.CheckConsistencyFor(t,
+ &user_model.User{},
+ &repo_model.Repository{},
+ &organization.Team{},
+ &activities_model.Action{})
+}
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/migrations/base/db.go b/models/migrations/base/db.go
new file mode 100644
index 0000000..333fa31
--- /dev/null
+++ b/models/migrations/base/db.go
@@ -0,0 +1,436 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package base
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+// RecreateTables will recreate the tables for the provided beans using the newly provided bean definition and move all data to that new table
+// WARNING: YOU MUST PROVIDE THE FULL BEAN DEFINITION
+func RecreateTables(beans ...any) func(*xorm.Engine) error {
+ return func(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ sess = sess.StoreEngine("InnoDB")
+ for _, bean := range beans {
+ log.Info("Recreating Table: %s for Bean: %s", x.TableName(bean), reflect.Indirect(reflect.ValueOf(bean)).Type().Name())
+ if err := RecreateTable(sess, bean); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+ }
+}
+
+// RecreateTable will recreate the table using the newly provided bean definition and move all data to that new table
+// WARNING: YOU MUST PROVIDE THE FULL BEAN DEFINITION
+// WARNING: YOU MUST COMMIT THE SESSION AT THE END
+func RecreateTable(sess *xorm.Session, bean any) error {
+ // TODO: This will not work if there are foreign keys
+
+ tableName := sess.Engine().TableName(bean)
+ tempTableName := fmt.Sprintf("tmp_recreate__%s", tableName)
+
+ // We need to move the old table away and create a new one with the correct columns
+ // We will need to do this in stages to prevent data loss
+ //
+ // First create the temporary table
+ if err := sess.Table(tempTableName).CreateTable(bean); err != nil {
+ log.Error("Unable to create table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ if err := sess.Table(tempTableName).CreateUniques(bean); err != nil {
+ log.Error("Unable to create uniques for table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ if err := sess.Table(tempTableName).CreateIndexes(bean); err != nil {
+ log.Error("Unable to create indexes for table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ // Work out the column names from the bean - these are the columns to select from the old table and install into the new table
+ table, err := sess.Engine().TableInfo(bean)
+ if err != nil {
+ log.Error("Unable to get table info. Error: %v", err)
+
+ return err
+ }
+ newTableColumns := table.Columns()
+ if len(newTableColumns) == 0 {
+ return fmt.Errorf("no columns in new table")
+ }
+ hasID := false
+ for _, column := range newTableColumns {
+ hasID = hasID || (column.IsPrimaryKey && column.IsAutoIncrement)
+ }
+
+ sqlStringBuilder := &strings.Builder{}
+ _, _ = sqlStringBuilder.WriteString("INSERT INTO `")
+ _, _ = sqlStringBuilder.WriteString(tempTableName)
+ _, _ = sqlStringBuilder.WriteString("` (`")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ for _, column := range newTableColumns[1:] {
+ _, _ = sqlStringBuilder.WriteString(", `")
+ _, _ = sqlStringBuilder.WriteString(column.Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ }
+ _, _ = sqlStringBuilder.WriteString(")")
+ _, _ = sqlStringBuilder.WriteString(" SELECT ")
+ if newTableColumns[0].Default != "" {
+ _, _ = sqlStringBuilder.WriteString("COALESCE(`")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name)
+ _, _ = sqlStringBuilder.WriteString("`, ")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Default)
+ _, _ = sqlStringBuilder.WriteString(")")
+ } else {
+ _, _ = sqlStringBuilder.WriteString("`")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ }
+
+ for _, column := range newTableColumns[1:] {
+ if column.Default != "" {
+ _, _ = sqlStringBuilder.WriteString(", COALESCE(`")
+ _, _ = sqlStringBuilder.WriteString(column.Name)
+ _, _ = sqlStringBuilder.WriteString("`, ")
+ _, _ = sqlStringBuilder.WriteString(column.Default)
+ _, _ = sqlStringBuilder.WriteString(")")
+ } else {
+ _, _ = sqlStringBuilder.WriteString(", `")
+ _, _ = sqlStringBuilder.WriteString(column.Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ }
+ }
+ _, _ = sqlStringBuilder.WriteString(" FROM `")
+ _, _ = sqlStringBuilder.WriteString(tableName)
+ _, _ = sqlStringBuilder.WriteString("`")
+
+ if _, err := sess.Exec(sqlStringBuilder.String()); err != nil {
+ log.Error("Unable to set copy data in to temp table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ switch {
+ case setting.Database.Type.IsSQLite3():
+ // SQLite will drop all the constraints on the old table
+ if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
+ log.Error("Unable to drop old table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ if err := sess.Table(tempTableName).DropIndexes(bean); err != nil {
+ log.Error("Unable to drop indexes on temporary table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+
+ if err := sess.Table(tableName).CreateIndexes(bean); err != nil {
+ log.Error("Unable to recreate indexes on table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ if err := sess.Table(tableName).CreateUniques(bean); err != nil {
+ log.Error("Unable to recreate uniques on table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ case setting.Database.Type.IsMySQL():
+ // MySQL will drop all the constraints on the old table
+ if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
+ log.Error("Unable to drop old table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ if err := sess.Table(tempTableName).DropIndexes(bean); err != nil {
+ log.Error("Unable to drop indexes on temporary table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ // SQLite and MySQL will move all the constraints from the temporary table to the new table
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+
+ if err := sess.Table(tableName).CreateIndexes(bean); err != nil {
+ log.Error("Unable to recreate indexes on table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ if err := sess.Table(tableName).CreateUniques(bean); err != nil {
+ log.Error("Unable to recreate uniques on table %s. Error: %v", tableName, err)
+ return err
+ }
+ case setting.Database.Type.IsPostgreSQL():
+ var originalSequences []string
+ type sequenceData struct {
+ LastValue int `xorm:"'last_value'"`
+ IsCalled bool `xorm:"'is_called'"`
+ }
+ sequenceMap := map[string]sequenceData{}
+
+ schema := sess.Engine().Dialect().URI().Schema
+ sess.Engine().SetSchema("")
+ if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE ? || '_%' AND sequence_catalog = ?", tableName, setting.Database.Name).Find(&originalSequences); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+ sess.Engine().SetSchema(schema)
+
+ for _, sequence := range originalSequences {
+ sequenceData := sequenceData{}
+ if _, err := sess.Table(sequence).Cols("last_value", "is_called").Get(&sequenceData); err != nil {
+ log.Error("Unable to get last_value and is_called from %s. Error: %v", sequence, err)
+ return err
+ }
+ sequenceMap[sequence] = sequenceData
+ }
+
+ // CASCADE causes postgres to drop all the constraints on the old table
+ if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s` CASCADE", tableName)); err != nil {
+ log.Error("Unable to drop old table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ // CASCADE causes postgres to move all the constraints from the temporary table to the new table
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+
+ var indices []string
+ sess.Engine().SetSchema("")
+ if err := sess.Table("pg_indexes").Cols("indexname").Where("tablename = ? ", tableName).Find(&indices); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+ sess.Engine().SetSchema(schema)
+
+ for _, index := range indices {
+ newIndexName := strings.Replace(index, "tmp_recreate__", "", 1)
+ if _, err := sess.Exec(fmt.Sprintf("ALTER INDEX `%s` RENAME TO `%s`", index, newIndexName)); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", index, newIndexName, err)
+ return err
+ }
+ }
+
+ var sequences []string
+ sess.Engine().SetSchema("")
+ if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__' || ? || '_%' AND sequence_catalog = ?", tableName, setting.Database.Name).Find(&sequences); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+ sess.Engine().SetSchema(schema)
+
+ for _, sequence := range sequences {
+ newSequenceName := strings.Replace(sequence, "tmp_recreate__", "", 1)
+ if _, err := sess.Exec(fmt.Sprintf("ALTER SEQUENCE `%s` RENAME TO `%s`", sequence, newSequenceName)); err != nil {
+ log.Error("Unable to rename %s sequence to %s. Error: %v", sequence, newSequenceName, err)
+ return err
+ }
+ val, ok := sequenceMap[newSequenceName]
+ if newSequenceName == tableName+"_id_seq" {
+ if ok && val.LastValue != 0 {
+ if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', %d, %t)", newSequenceName, val.LastValue, val.IsCalled)); err != nil {
+ log.Error("Unable to reset %s to %d. Error: %v", newSequenceName, val, err)
+ return err
+ }
+ } else {
+ // We're going to try to guess this
+ if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM `%s`), 1), false)", newSequenceName, tableName)); err != nil {
+ log.Error("Unable to reset %s. Error: %v", newSequenceName, err)
+ return err
+ }
+ }
+ } else if ok {
+ if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', %d, %t)", newSequenceName, val.LastValue, val.IsCalled)); err != nil {
+ log.Error("Unable to reset %s to %d. Error: %v", newSequenceName, val, err)
+ return err
+ }
+ }
+ }
+
+ default:
+ log.Fatal("Unrecognized DB")
+ }
+ return nil
+}
+
+// WARNING: YOU MUST COMMIT THE SESSION AT THE END
+func DropTableColumns(sess *xorm.Session, tableName string, columnNames ...string) (err error) {
+ if tableName == "" || len(columnNames) == 0 {
+ return nil
+ }
+ // TODO: This will not work if there are foreign keys
+
+ switch {
+ case setting.Database.Type.IsSQLite3():
+ // First drop the indexes on the columns
+ res, errIndex := sess.Query(fmt.Sprintf("PRAGMA index_list(`%s`)", tableName))
+ if errIndex != nil {
+ return errIndex
+ }
+ for _, row := range res {
+ indexName := row["name"]
+ indexRes, err := sess.Query(fmt.Sprintf("PRAGMA index_info(`%s`)", indexName))
+ if err != nil {
+ return err
+ }
+ if len(indexRes) != 1 {
+ continue
+ }
+ indexColumn := string(indexRes[0]["name"])
+ for _, name := range columnNames {
+ if name == indexColumn {
+ _, err := sess.Exec(fmt.Sprintf("DROP INDEX `%s`", indexName))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Here we need to get the columns from the original table
+ sql := fmt.Sprintf("SELECT sql FROM sqlite_master WHERE tbl_name='%s' and type='table'", tableName)
+ res, err := sess.Query(sql)
+ if err != nil {
+ return err
+ }
+ tableSQL := string(res[0]["sql"])
+
+ // Get the string offset for column definitions: `CREATE TABLE ( column-definitions... )`
+ columnDefinitionsIndex := strings.Index(tableSQL, "(")
+ if columnDefinitionsIndex < 0 {
+ return errors.New("couldn't find column definitions")
+ }
+
+ // Separate out the column definitions
+ tableSQL = tableSQL[columnDefinitionsIndex:]
+
+ // Remove the required columnNames
+ for _, name := range columnNames {
+ tableSQL = regexp.MustCompile(regexp.QuoteMeta("`"+name+"`")+"[^`,)]*?[,)]").ReplaceAllString(tableSQL, "")
+ }
+
+ // Ensure the query is ended properly
+ tableSQL = strings.TrimSpace(tableSQL)
+ if tableSQL[len(tableSQL)-1] != ')' {
+ if tableSQL[len(tableSQL)-1] == ',' {
+ tableSQL = tableSQL[:len(tableSQL)-1]
+ }
+ tableSQL += ")"
+ }
+
+ // Find all the columns in the table
+ columns := regexp.MustCompile("`([^`]*)`").FindAllString(tableSQL, -1)
+
+ tableSQL = fmt.Sprintf("CREATE TABLE `new_%s_new` ", tableName) + tableSQL
+ if _, err := sess.Exec(tableSQL); err != nil {
+ return err
+ }
+
+ // Now restore the data
+ columnsSeparated := strings.Join(columns, ",")
+ insertSQL := fmt.Sprintf("INSERT INTO `new_%s_new` (%s) SELECT %s FROM %s", tableName, columnsSeparated, columnsSeparated, tableName)
+ if _, err := sess.Exec(insertSQL); err != nil {
+ return err
+ }
+
+ // Now drop the old table
+ if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
+ return err
+ }
+
+ // Rename the table
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `new_%s_new` RENAME TO `%s`", tableName, tableName)); err != nil {
+ return err
+ }
+
+ case setting.Database.Type.IsPostgreSQL():
+ cols := ""
+ for _, col := range columnNames {
+ if cols != "" {
+ cols += ", "
+ }
+ cols += "DROP COLUMN `" + col + "` CASCADE"
+ }
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` %s", tableName, cols)); err != nil {
+ return fmt.Errorf("Drop table `%s` columns %v: %v", tableName, columnNames, err)
+ }
+ case setting.Database.Type.IsMySQL():
+ // Drop indexes on columns first
+ sql := fmt.Sprintf("SHOW INDEX FROM %s WHERE column_name IN ('%s')", tableName, strings.Join(columnNames, "','"))
+ res, err := sess.Query(sql)
+ if err != nil {
+ return err
+ }
+ for _, index := range res {
+ indexName := index["column_name"]
+ if len(indexName) > 0 {
+ _, err := sess.Exec(fmt.Sprintf("DROP INDEX `%s` ON `%s`", indexName, tableName))
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // Now drop the columns
+ cols := ""
+ for _, col := range columnNames {
+ if cols != "" {
+ cols += ", "
+ }
+ cols += "DROP COLUMN `" + col + "`"
+ }
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` %s", tableName, cols)); err != nil {
+ return fmt.Errorf("Drop table `%s` columns %v: %v", tableName, columnNames, err)
+ }
+ default:
+ log.Fatal("Unrecognized DB")
+ }
+
+ return nil
+}
+
+// ModifyColumn will modify column's type or other property. SQLITE is not supported
+func ModifyColumn(x *xorm.Engine, tableName string, col *schemas.Column) error {
+ var indexes map[string]*schemas.Index
+ var err error
+
+ defer func() {
+ for _, index := range indexes {
+ _, err = x.Exec(x.Dialect().CreateIndexSQL(tableName, index))
+ if err != nil {
+ log.Error("Create index %s on table %s failed: %v", index.Name, tableName, err)
+ }
+ }
+ }()
+
+ alterSQL := x.Dialect().ModifyColumnSQL(tableName, col)
+ if _, err := x.Exec(alterSQL); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/models/migrations/base/db_test.go b/models/migrations/base/db_test.go
new file mode 100644
index 0000000..4010a14
--- /dev/null
+++ b/models/migrations/base/db_test.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package base
+
+import (
+ "testing"
+
+ migrations_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm/names"
+)
+
+func Test_DropTableColumns(t *testing.T) {
+ x, deferable := migrations_tests.PrepareTestEnv(t, 0)
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ type DropTest struct {
+ ID int64 `xorm:"pk autoincr"`
+ FirstColumn string
+ ToDropColumn string `xorm:"unique"`
+ AnotherColumn int64
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ columns := []string{
+ "first_column",
+ "to_drop_column",
+ "another_column",
+ "created_unix",
+ "updated_unix",
+ }
+
+ x.SetMapper(names.GonicMapper{})
+
+ for i := range columns {
+ if err := x.Sync(new(DropTest)); err != nil {
+ t.Errorf("unable to create DropTest table: %v", err)
+ return
+ }
+
+ sess := x.NewSession()
+ if err := sess.Begin(); err != nil {
+ sess.Close()
+ t.Errorf("unable to begin transaction: %v", err)
+ return
+ }
+ if err := DropTableColumns(sess, "drop_test", columns[i:]...); err != nil {
+ sess.Close()
+ t.Errorf("Unable to drop columns[%d:]: %s from drop_test: %v", i, columns[i:], err)
+ return
+ }
+ if err := sess.Commit(); err != nil {
+ sess.Close()
+ t.Errorf("unable to commit transaction: %v", err)
+ return
+ }
+ sess.Close()
+ if err := x.DropTables(new(DropTest)); err != nil {
+ t.Errorf("unable to drop table: %v", err)
+ return
+ }
+ for j := range columns[i+1:] {
+ if err := x.Sync(new(DropTest)); err != nil {
+ t.Errorf("unable to create DropTest table: %v", err)
+ return
+ }
+ dropcols := append([]string{columns[i]}, columns[j+i+1:]...)
+ sess := x.NewSession()
+ if err := sess.Begin(); err != nil {
+ sess.Close()
+ t.Errorf("unable to begin transaction: %v", err)
+ return
+ }
+ if err := DropTableColumns(sess, "drop_test", dropcols...); err != nil {
+ sess.Close()
+ t.Errorf("Unable to drop columns: %s from drop_test: %v", dropcols, err)
+ return
+ }
+ if err := sess.Commit(); err != nil {
+ sess.Close()
+ t.Errorf("unable to commit transaction: %v", err)
+ return
+ }
+ sess.Close()
+ if err := x.DropTables(new(DropTest)); err != nil {
+ t.Errorf("unable to drop table: %v", err)
+ return
+ }
+ }
+ }
+}
diff --git a/models/migrations/base/hash.go b/models/migrations/base/hash.go
new file mode 100644
index 0000000..00fd1ef
--- /dev/null
+++ b/models/migrations/base/hash.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package base
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+
+ "golang.org/x/crypto/pbkdf2"
+)
+
+func HashToken(token, salt string) string {
+ tempHash := pbkdf2.Key([]byte(token), []byte(salt), 10000, 50, sha256.New)
+ return hex.EncodeToString(tempHash)
+}
diff --git a/models/migrations/base/main_test.go b/models/migrations/base/main_test.go
new file mode 100644
index 0000000..c625ef0
--- /dev/null
+++ b/models/migrations/base/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package base
+
+import (
+ "testing"
+
+ migrations_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migrations_tests.MainTest(m)
+}
diff --git a/models/migrations/fixtures/Test_AddCombinedIndexToIssueUser/issue_user.yml b/models/migrations/fixtures/Test_AddCombinedIndexToIssueUser/issue_user.yml
new file mode 100644
index 0000000..b9995ac
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddCombinedIndexToIssueUser/issue_user.yml
@@ -0,0 +1,13 @@
+-
+ id: 1
+ uid: 1
+ issue_id: 1
+ is_read: true
+ is_mentioned: false
+
+-
+ id: 2
+ uid: 2
+ issue_id: 1
+ is_read: true
+ is_mentioned: false
diff --git a/models/migrations/fixtures/Test_AddConfidentialClientColumnToOAuth2ApplicationTable/oauth2_application.yml b/models/migrations/fixtures/Test_AddConfidentialClientColumnToOAuth2ApplicationTable/oauth2_application.yml
new file mode 100644
index 0000000..a88c2ef
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddConfidentialClientColumnToOAuth2ApplicationTable/oauth2_application.yml
@@ -0,0 +1,2 @@
+-
+ id: 1
diff --git a/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/expected_webhook.yml b/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/expected_webhook.yml
new file mode 100644
index 0000000..f623999
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/expected_webhook.yml
@@ -0,0 +1,9 @@
+# for matrix, the access_token has been moved to "header_authorization"
+-
+ id: 1
+ meta: '{"homeserver_url":"https://matrix.example.com","room_id":"roomID","message_type":1}'
+ header_authorization: "Bearer s3cr3t"
+-
+ id: 2
+ meta: ''
+ header_authorization: ""
diff --git a/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/hook_task.yml b/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/hook_task.yml
new file mode 100644
index 0000000..8f61d6e
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/hook_task.yml
@@ -0,0 +1,8 @@
+# unsafe payload
+- id: 1
+ hook_id: 1
+ payload_content: '{"homeserver_url":"https://matrix.example.com","room_id":"roomID","access_token":"s3cr3t","message_type":1}'
+# safe payload
+- id: 2
+ hook_id: 2
+ payload_content: '{"homeserver_url":"https://matrix.example.com","room_id":"roomID","message_type":1}'
diff --git a/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/webhook.yml b/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/webhook.yml
new file mode 100644
index 0000000..ec6f9bf
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddHeaderAuthorizationEncryptedColWebhook/webhook.yml
@@ -0,0 +1,10 @@
+# matrix webhook
+- id: 1
+ type: matrix
+ meta: '{"homeserver_url":"https://matrix.example.com","room_id":"roomID","access_token":"s3cr3t","message_type":1}'
+ header_authorization_encrypted: ''
+# gitea webhook
+- id: 2
+ type: gitea
+ meta: ''
+ header_authorization_encrypted: ''
diff --git a/models/migrations/fixtures/Test_AddIssueResourceIndexTable/issue.yml b/models/migrations/fixtures/Test_AddIssueResourceIndexTable/issue.yml
new file mode 100644
index 0000000..f95d479
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddIssueResourceIndexTable/issue.yml
@@ -0,0 +1,4 @@
+-
+ id: 1
+ repo_id: 1
+ index: 1
diff --git a/models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task.yml b/models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task.yml
new file mode 100644
index 0000000..716a2a0
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task.yml
@@ -0,0 +1,16 @@
+- id: 11
+ uuid: uuid11
+ hook_id: 1
+ payload_content: >
+ {"data":"payload"}
+ event_type: create
+ delivered: 1706106005
+
+- id: 101
+ uuid: uuid101
+ hook_id: 1
+ payload_content: >
+ {"data":"payload"}
+ event_type: create
+ delivered: 1706106006
+ is_delivered: true
diff --git a/models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task_migrated.yml b/models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task_migrated.yml
new file mode 100644
index 0000000..913d927
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddPayloadVersionToHookTaskTable/hook_task_migrated.yml
@@ -0,0 +1,18 @@
+- id: 11
+ uuid: uuid11
+ hook_id: 1
+ payload_content: >
+ {"data":"payload"}
+ event_type: create
+ delivered: 1706106005
+ payload_version: 1
+
+- id: 101
+ uuid: uuid101
+ hook_id: 1
+ payload_content: >
+ {"data":"payload"}
+ event_type: create
+ delivered: 1706106006
+ is_delivered: true
+ payload_version: 1
diff --git a/models/migrations/fixtures/Test_AddRepoIDForAttachment/attachment.yml b/models/migrations/fixtures/Test_AddRepoIDForAttachment/attachment.yml
new file mode 100644
index 0000000..056236b
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddRepoIDForAttachment/attachment.yml
@@ -0,0 +1,11 @@
+-
+ id: 1
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11
+ issue_id: 1
+ release_id: 0
+
+-
+ id: 2
+ uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12
+ issue_id: 0
+ release_id: 1
diff --git a/models/migrations/fixtures/Test_AddRepoIDForAttachment/issue.yml b/models/migrations/fixtures/Test_AddRepoIDForAttachment/issue.yml
new file mode 100644
index 0000000..7f32550
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddRepoIDForAttachment/issue.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ repo_id: 1
diff --git a/models/migrations/fixtures/Test_AddRepoIDForAttachment/release.yml b/models/migrations/fixtures/Test_AddRepoIDForAttachment/release.yml
new file mode 100644
index 0000000..7f32550
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddRepoIDForAttachment/release.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ repo_id: 1
diff --git a/models/migrations/fixtures/Test_AddUniqueIndexForProjectIssue/project_issue.yml b/models/migrations/fixtures/Test_AddUniqueIndexForProjectIssue/project_issue.yml
new file mode 100644
index 0000000..6feaeb3
--- /dev/null
+++ b/models/migrations/fixtures/Test_AddUniqueIndexForProjectIssue/project_issue.yml
@@ -0,0 +1,9 @@
+-
+ id: 1
+ project_id: 1
+ issue_id: 1
+
+-
+ id: 2
+ project_id: 1
+ issue_id: 1
diff --git a/models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project.yml b/models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project.yml
new file mode 100644
index 0000000..2450d20
--- /dev/null
+++ b/models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project.yml
@@ -0,0 +1,23 @@
+-
+ id: 1
+ title: project without default column
+ owner_id: 2
+ repo_id: 0
+ is_closed: false
+ creator_id: 2
+ board_type: 1
+ type: 2
+ created_unix: 1688973000
+ updated_unix: 1688973000
+
+-
+ id: 2
+ title: project with multiple default columns
+ owner_id: 2
+ repo_id: 0
+ is_closed: false
+ creator_id: 2
+ board_type: 1
+ type: 2
+ created_unix: 1688973000
+ updated_unix: 1688973000
diff --git a/models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project_board.yml b/models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project_board.yml
new file mode 100644
index 0000000..2e1b1c7
--- /dev/null
+++ b/models/migrations/fixtures/Test_CheckProjectColumnsConsistency/project_board.yml
@@ -0,0 +1,26 @@
+-
+ id: 1
+ project_id: 1
+ title: Done
+ creator_id: 2
+ default: false
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 2
+ project_id: 2
+ title: Backlog
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
+
+-
+ id: 3
+ project_id: 2
+ title: Uncategorized
+ creator_id: 2
+ default: true
+ created_unix: 1588117528
+ updated_unix: 1588117528
diff --git a/models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/issue_label.yml b/models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/issue_label.yml
new file mode 100644
index 0000000..b02cb57
--- /dev/null
+++ b/models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/issue_label.yml
@@ -0,0 +1,28 @@
+# Issue_Label 1 should not be deleted
+-
+ id: 1
+ issue_id: 1
+ label_id: 1
+
+# Issue_label 2 should be deleted
+-
+ id: 2
+ issue_id: 5
+ label_id: 99
+
+# Issue_Label 3 should not be deleted
+-
+ id: 3
+ issue_id: 2
+ label_id: 1
+
+# Issue_Label 4 should not be deleted
+-
+ id: 4
+ issue_id: 2
+ label_id: 4
+
+-
+ id: 5
+ issue_id: 2
+ label_id: 87
diff --git a/models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/label.yml b/models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/label.yml
new file mode 100644
index 0000000..fa9658a
--- /dev/null
+++ b/models/migrations/fixtures/Test_DeleteOrphanedIssueLabels/label.yml
@@ -0,0 +1,43 @@
+-
+ id: 1
+ repo_id: 1
+ org_id: 0
+ name: label1
+ color: '#abcdef'
+ num_issues: 2
+ num_closed_issues: 0
+
+-
+ id: 2
+ repo_id: 1
+ org_id: 0
+ name: label2
+ color: '#000000'
+ num_issues: 1
+ num_closed_issues: 1
+-
+ id: 3
+ repo_id: 0
+ org_id: 3
+ name: orglabel3
+ color: '#abcdef'
+ num_issues: 0
+ num_closed_issues: 0
+
+-
+ id: 4
+ repo_id: 0
+ org_id: 3
+ name: orglabel4
+ color: '#000000'
+ num_issues: 1
+ num_closed_issues: 0
+
+-
+ id: 5
+ repo_id: 10
+ org_id: 0
+ name: pull-test-label
+ color: '#000000'
+ num_issues: 0
+ num_closed_issues: 0
diff --git a/models/migrations/fixtures/Test_RemigrateU2FCredentials/expected_webauthn_credential.yml b/models/migrations/fixtures/Test_RemigrateU2FCredentials/expected_webauthn_credential.yml
new file mode 100644
index 0000000..0e68a5d
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemigrateU2FCredentials/expected_webauthn_credential.yml
@@ -0,0 +1,12 @@
+-
+ id: 1
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
+-
+ id: 2
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
+-
+ id: 3
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
+-
+ id: 4
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
diff --git a/models/migrations/fixtures/Test_RemigrateU2FCredentials/u2f_registration.yml b/models/migrations/fixtures/Test_RemigrateU2FCredentials/u2f_registration.yml
new file mode 100644
index 0000000..5a7b70f
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemigrateU2FCredentials/u2f_registration.yml
@@ -0,0 +1,21 @@
+-
+ id: 1
+ name: "u2fkey-correctly-migrated"
+ user_id: 1
+ raw: 0x05040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf240efe2e213b889daf3fc88e3952e8dd6b4cfd82f1a1212e2ab4b19389455ecf3e67f0aeafc91b9c0d413c9d6215a45177c1d5076358aa6ee20e1b30e3d7467cae2308202bd308201a5a00302010202041e8f8734300d06092a864886f70d01010b0500302e312c302a0603550403132359756269636f2055324620526f6f742043412053657269616c203435373230303633313020170d3134303830313030303030305a180f32303530303930343030303030305a306e310b300906035504061302534531123010060355040a0c0959756269636f20414231223020060355040b0c1941757468656e74696361746f72204174746573746174696f6e3127302506035504030c1e59756269636f205532462045452053657269616c203531323732323734303059301306072a8648ce3d020106082a8648ce3d03010703420004a879f82338ed1494bac0704bcc7fc663d1b271715976243101c7605115d7c1529e281c1c67322d384b5cd55dd3e9818d5fd85c22af326e0c64fc20afe33f2366a36c306a302206092b0601040182c40a020415312e332e362e312e342e312e34313438322e312e373013060b2b0601040182e51c0201010404030204303021060b2b0601040182e51c010104041204102fc0579f811347eab116bb5a8db9202a300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101008693ff62df0d5779d4748d7fc8d10227318a8e580e6a3a57c108e94e03c38568b366894fce5624be4a3efd7f34118b3d993743f792a1989160c8fc9ae0b04e3df9ee15e3e88c04fc82a8dcbf5818e108dcc2968577ae79ff662b94734e3dec4597305d73e6e55ee2beb9cd9678ca0935e533eb638f8e26fabb817cda441fbe9831832ae5f6e2ad992f9ebbdb4c62238b8f8d7ab481d6d3263bcdbf9e4a57550370988ad5813440fa032cadb6723cadd8f8d7ba809f75b43cffa0a5b9add14232ef9d9e14812638233c4ca4a873b9f8ac98e32ba19167606e15909fcddb4a2dffbdae4620249f9a6646ac81e4832d1119febfaa731a882da25a77827d46d190173046022100b579338a44c236d3f214b2e150011a08cf251193ecfae2244edb0a5794e9b301022100fab468862c47d98204d437cf2be8c54a5a4ecd1ebb1c61a6c23da7b9c75f6841
+ counter: 0
+- id: 2
+ name: "u2fkey-incorrectly-migrated"
+ user_id: 1
+ raw: 0x05040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf240efe2e213b889daf3fc88e3952e8dd6b4cfd82f1a1212e2ab4b19389455ecf3e67f0aeafc91b9c0d413c9d6215a45177c1d5076358aa6ee20e1b30e3d7467cae2308202bd308201a5a00302010202041e8f8734300d06092a864886f70d01010b0500302e312c302a0603550403132359756269636f2055324620526f6f742043412053657269616c203435373230303633313020170d3134303830313030303030305a180f32303530303930343030303030305a306e310b300906035504061302534531123010060355040a0c0959756269636f20414231223020060355040b0c1941757468656e74696361746f72204174746573746174696f6e3127302506035504030c1e59756269636f205532462045452053657269616c203531323732323734303059301306072a8648ce3d020106082a8648ce3d03010703420004a879f82338ed1494bac0704bcc7fc663d1b271715976243101c7605115d7c1529e281c1c67322d384b5cd55dd3e9818d5fd85c22af326e0c64fc20afe33f2366a36c306a302206092b0601040182c40a020415312e332e362e312e342e312e34313438322e312e373013060b2b0601040182e51c0201010404030204303021060b2b0601040182e51c010104041204102fc0579f811347eab116bb5a8db9202a300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101008693ff62df0d5779d4748d7fc8d10227318a8e580e6a3a57c108e94e03c38568b366894fce5624be4a3efd7f34118b3d993743f792a1989160c8fc9ae0b04e3df9ee15e3e88c04fc82a8dcbf5818e108dcc2968577ae79ff662b94734e3dec4597305d73e6e55ee2beb9cd9678ca0935e533eb638f8e26fabb817cda441fbe9831832ae5f6e2ad992f9ebbdb4c62238b8f8d7ab481d6d3263bcdbf9e4a57550370988ad5813440fa032cadb6723cadd8f8d7ba809f75b43cffa0a5b9add14232ef9d9e14812638233c4ca4a873b9f8ac98e32ba19167606e15909fcddb4a2dffbdae4620249f9a6646ac81e4832d1119febfaa731a882da25a77827d46d190173046022100b579338a44c236d3f214b2e150011a08cf251193ecfae2244edb0a5794e9b301022100fab468862c47d98204d437cf2be8c54a5a4ecd1ebb1c61a6c23da7b9c75f6841
+ counter: 0
+- id: 3
+ name: "u2fkey-deleted"
+ user_id: 1
+ raw: 0x05040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf240efe2e213b889daf3fc88e3952e8dd6b4cfd82f1a1212e2ab4b19389455ecf3e67f0aeafc91b9c0d413c9d6215a45177c1d5076358aa6ee20e1b30e3d7467cae2308202bd308201a5a00302010202041e8f8734300d06092a864886f70d01010b0500302e312c302a0603550403132359756269636f2055324620526f6f742043412053657269616c203435373230303633313020170d3134303830313030303030305a180f32303530303930343030303030305a306e310b300906035504061302534531123010060355040a0c0959756269636f20414231223020060355040b0c1941757468656e74696361746f72204174746573746174696f6e3127302506035504030c1e59756269636f205532462045452053657269616c203531323732323734303059301306072a8648ce3d020106082a8648ce3d03010703420004a879f82338ed1494bac0704bcc7fc663d1b271715976243101c7605115d7c1529e281c1c67322d384b5cd55dd3e9818d5fd85c22af326e0c64fc20afe33f2366a36c306a302206092b0601040182c40a020415312e332e362e312e342e312e34313438322e312e373013060b2b0601040182e51c0201010404030204303021060b2b0601040182e51c010104041204102fc0579f811347eab116bb5a8db9202a300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101008693ff62df0d5779d4748d7fc8d10227318a8e580e6a3a57c108e94e03c38568b366894fce5624be4a3efd7f34118b3d993743f792a1989160c8fc9ae0b04e3df9ee15e3e88c04fc82a8dcbf5818e108dcc2968577ae79ff662b94734e3dec4597305d73e6e55ee2beb9cd9678ca0935e533eb638f8e26fabb817cda441fbe9831832ae5f6e2ad992f9ebbdb4c62238b8f8d7ab481d6d3263bcdbf9e4a57550370988ad5813440fa032cadb6723cadd8f8d7ba809f75b43cffa0a5b9add14232ef9d9e14812638233c4ca4a873b9f8ac98e32ba19167606e15909fcddb4a2dffbdae4620249f9a6646ac81e4832d1119febfaa731a882da25a77827d46d190173046022100b579338a44c236d3f214b2e150011a08cf251193ecfae2244edb0a5794e9b301022100fab468862c47d98204d437cf2be8c54a5a4ecd1ebb1c61a6c23da7b9c75f6841
+ counter: 0
+- id: 4
+ name: "u2fkey-wrong-user-id"
+ user_id: 2
+ raw: 0x05040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf240efe2e213b889daf3fc88e3952e8dd6b4cfd82f1a1212e2ab4b19389455ecf3e67f0aeafc91b9c0d413c9d6215a45177c1d5076358aa6ee20e1b30e3d7467cae2308202bd308201a5a00302010202041e8f8734300d06092a864886f70d01010b0500302e312c302a0603550403132359756269636f2055324620526f6f742043412053657269616c203435373230303633313020170d3134303830313030303030305a180f32303530303930343030303030305a306e310b300906035504061302534531123010060355040a0c0959756269636f20414231223020060355040b0c1941757468656e74696361746f72204174746573746174696f6e3127302506035504030c1e59756269636f205532462045452053657269616c203531323732323734303059301306072a8648ce3d020106082a8648ce3d03010703420004a879f82338ed1494bac0704bcc7fc663d1b271715976243101c7605115d7c1529e281c1c67322d384b5cd55dd3e9818d5fd85c22af326e0c64fc20afe33f2366a36c306a302206092b0601040182c40a020415312e332e362e312e342e312e34313438322e312e373013060b2b0601040182e51c0201010404030204303021060b2b0601040182e51c010104041204102fc0579f811347eab116bb5a8db9202a300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101008693ff62df0d5779d4748d7fc8d10227318a8e580e6a3a57c108e94e03c38568b366894fce5624be4a3efd7f34118b3d993743f792a1989160c8fc9ae0b04e3df9ee15e3e88c04fc82a8dcbf5818e108dcc2968577ae79ff662b94734e3dec4597305d73e6e55ee2beb9cd9678ca0935e533eb638f8e26fabb817cda441fbe9831832ae5f6e2ad992f9ebbdb4c62238b8f8d7ab481d6d3263bcdbf9e4a57550370988ad5813440fa032cadb6723cadd8f8d7ba809f75b43cffa0a5b9add14232ef9d9e14812638233c4ca4a873b9f8ac98e32ba19167606e15909fcddb4a2dffbdae4620249f9a6646ac81e4832d1119febfaa731a882da25a77827d46d190173046022100b579338a44c236d3f214b2e150011a08cf251193ecfae2244edb0a5794e9b301022100fab468862c47d98204d437cf2be8c54a5a4ecd1ebb1c61a6c23da7b9c75f6841
+ counter: 0
diff --git a/models/migrations/fixtures/Test_RemigrateU2FCredentials/webauthn_credential.yml b/models/migrations/fixtures/Test_RemigrateU2FCredentials/webauthn_credential.yml
new file mode 100644
index 0000000..7f9f10f
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemigrateU2FCredentials/webauthn_credential.yml
@@ -0,0 +1,30 @@
+-
+ id: 1
+ lower_name: "u2fkey-correctly-migrated"
+ name: "u2fkey-correctly-migrated"
+ user_id: 1
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
+ public_key: 0x040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf2
+ attestation_type: 'fido-u2f'
+ sign_count: 1
+ clone_warning: false
+-
+ id: 2
+ lower_name: "u2fkey-incorrectly-migrated"
+ name: "u2fkey-incorrectly-migrated"
+ user_id: 1
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8A"
+ public_key: 0x040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf2
+ attestation_type: 'fido-u2f'
+ sign_count: 1
+ clone_warning: false
+-
+ id: 4
+ lower_name: "u2fkey-wrong-user-id"
+ name: "u2fkey-wrong-user-id"
+ user_id: 1
+ credential_id: "THIS SHOULD CHANGE"
+ public_key: 0x040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf2
+ attestation_type: 'fido-u2f'
+ sign_count: 1
+ clone_warning: false
diff --git a/models/migrations/fixtures/Test_RemoveInvalidLabels/comment.yml b/models/migrations/fixtures/Test_RemoveInvalidLabels/comment.yml
new file mode 100644
index 0000000..4f44e29
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemoveInvalidLabels/comment.yml
@@ -0,0 +1,52 @@
+# type Comment struct {
+# ID int64 `xorm:"pk autoincr"`
+# Type int `xorm:"INDEX"`
+# IssueID int64 `xorm:"INDEX"`
+# LabelID int64
+# }
+#
+# we are only interested in type 7
+#
+
+-
+ id: 1 # Should remain
+ type: 6
+ issue_id: 1
+ label_id: 0
+ should_remain: true
+-
+ id: 2 # Should remain
+ type: 7
+ issue_id: 1 # repo_id: 1
+ label_id: 1 # repo_id: 1
+ should_remain: true
+-
+ id: 3 # Should remain
+ type: 7
+ issue_id: 2 # repo_id: 2 owner_id: 1
+ label_id: 2 # org_id: 1
+ should_remain: true
+-
+ id: 4 # Should be DELETED
+ type: 7
+ issue_id: 1 # repo_id: 1
+ label_id: 3 # repo_id: 2
+ should_remain: false
+-
+ id: 5 # Should remain
+ type: 7
+ issue_id: 3 # repo_id: 1
+ label_id: 1 # repo_id: 1
+ should_remain: true
+-
+ id: 6 # Should be DELETED
+ type: 7
+ issue_id: 3 # repo_id: 1 owner_id: 2
+ label_id: 2 # org_id: 1
+ should_remain: false
+-
+ id: 7 # Should be DELETED
+ type: 7
+ issue_id: 3 # repo_id: 1 owner_id: 2
+ label_id: 5 # repo_id: 3
+ should_remain: false
diff --git a/models/migrations/fixtures/Test_RemoveInvalidLabels/issue.yml b/models/migrations/fixtures/Test_RemoveInvalidLabels/issue.yml
new file mode 100644
index 0000000..46ad46c
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemoveInvalidLabels/issue.yml
@@ -0,0 +1,21 @@
+# type Issue struct {
+# ID int64 `xorm:"pk autoincr"`
+# RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
+# Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
+# }
+-
+ id: 1
+ repo_id: 1
+ index: 1
+-
+ id: 2
+ repo_id: 2
+ index: 1
+-
+ id: 3
+ repo_id: 1
+ index: 2
+-
+ id: 4
+ repo_id: 3
+ index: 1
diff --git a/models/migrations/fixtures/Test_RemoveInvalidLabels/issue_label.yml b/models/migrations/fixtures/Test_RemoveInvalidLabels/issue_label.yml
new file mode 100644
index 0000000..5f5b8cb
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemoveInvalidLabels/issue_label.yml
@@ -0,0 +1,35 @@
+# type IssueLabel struct {
+# ID int64 `xorm:"pk autoincr"`
+# IssueID int64 `xorm:"UNIQUE(s)"`
+# LabelID int64 `xorm:"UNIQUE(s)"`
+# }
+-
+ id: 1 # Should remain - matches comment 2
+ issue_id: 1
+ label_id: 1
+ should_remain: true
+-
+ id: 2 # Should remain
+ issue_id: 2
+ label_id: 2
+ should_remain: true
+-
+ id: 3 # Should be deleted
+ issue_id: 1
+ label_id: 3
+ should_remain: false
+-
+ id: 4 # Should remain
+ issue_id: 3
+ label_id: 1
+ should_remain: true
+-
+ id: 5 # Should be deleted
+ issue_id: 3
+ label_id: 2
+ should_remain: false
+-
+ id: 6 # Should be deleted
+ issue_id: 3
+ label_id: 5
+ should_remain: false
diff --git a/models/migrations/fixtures/Test_RemoveInvalidLabels/label.yml b/models/migrations/fixtures/Test_RemoveInvalidLabels/label.yml
new file mode 100644
index 0000000..0f5a3eb
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemoveInvalidLabels/label.yml
@@ -0,0 +1,25 @@
+# type Label struct {
+# ID int64 `xorm:"pk autoincr"`
+# RepoID int64 `xorm:"INDEX"`
+# OrgID int64 `xorm:"INDEX"`
+# }
+-
+ id: 1
+ repo_id: 1
+ org_id: 0
+-
+ id: 2
+ repo_id: 0
+ org_id: 1
+-
+ id: 3
+ repo_id: 2
+ org_id: 0
+-
+ id: 4
+ repo_id: 1
+ org_id: 0
+-
+ id: 5
+ repo_id: 3
+ org_id: 0
diff --git a/models/migrations/fixtures/Test_RemoveInvalidLabels/repository.yml b/models/migrations/fixtures/Test_RemoveInvalidLabels/repository.yml
new file mode 100644
index 0000000..180f11b
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemoveInvalidLabels/repository.yml
@@ -0,0 +1,17 @@
+# type Repository struct {
+# ID int64 `xorm:"pk autoincr"`
+# OwnerID int64 `xorm:"UNIQUE(s) index"`
+# LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+# }
+-
+ id: 1
+ owner_id: 2
+ lower_name: "repo1"
+-
+ id: 2
+ owner_id: 1
+ lower_name: "repo2"
+-
+ id: 3
+ owner_id: 2
+ lower_name: "repo3"
diff --git a/models/migrations/fixtures/Test_RemoveSSHSignaturesFromReleaseNotes/release.yml b/models/migrations/fixtures/Test_RemoveSSHSignaturesFromReleaseNotes/release.yml
new file mode 100644
index 0000000..caa0b40
--- /dev/null
+++ b/models/migrations/fixtures/Test_RemoveSSHSignaturesFromReleaseNotes/release.yml
@@ -0,0 +1,22 @@
+# type Release struct {
+# ID int64 `xorm:"pk autoincr"`
+# Note string `xorm:"TEXT"`
+# }
+-
+ id: 1
+ note: |
+ -----BEGIN SSH SIGNATURE-----
+ some signature
+ -----END SSH SIGNATURE-----
+
+-
+ id: 2
+ note: |
+ A message.
+ -----BEGIN SSH SIGNATURE-----
+ some signature
+ -----END SSH SIGNATURE-----
+
+-
+ id: 3
+ note: "no signature present here"
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/comment.yml b/models/migrations/fixtures/Test_RepositoryFormat/comment.yml
new file mode 100644
index 0000000..1197b08
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/comment.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ commit_sha: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/commit_status.yml b/models/migrations/fixtures/Test_RepositoryFormat/commit_status.yml
new file mode 100644
index 0000000..ca0aaec
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/commit_status.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ context_hash: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/pull_request.yml b/models/migrations/fixtures/Test_RepositoryFormat/pull_request.yml
new file mode 100644
index 0000000..380cc07
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/pull_request.yml
@@ -0,0 +1,5 @@
+-
+ id: 1
+ commit_sha: 19fe5caf872476db265596eaac1dc35ad1c6422d
+ merge_base: 19fe5caf872476db265596eaac1dc35ad1c6422d
+ merged_commit_id: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/release.yml b/models/migrations/fixtures/Test_RepositoryFormat/release.yml
new file mode 100644
index 0000000..ffabe4a
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/release.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ sha1: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/repo_archiver.yml b/models/migrations/fixtures/Test_RepositoryFormat/repo_archiver.yml
new file mode 100644
index 0000000..f04cb3b
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/repo_archiver.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ commit_id: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/repo_indexer_status.yml b/models/migrations/fixtures/Test_RepositoryFormat/repo_indexer_status.yml
new file mode 100644
index 0000000..1197b08
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/repo_indexer_status.yml
@@ -0,0 +1,3 @@
+-
+ id: 1
+ commit_sha: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/repository.yml b/models/migrations/fixtures/Test_RepositoryFormat/repository.yml
new file mode 100644
index 0000000..5a36759
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/repository.yml
@@ -0,0 +1,11 @@
+# type Repository struct {
+# ID int64 `xorm:"pk autoincr"`
+# }
+-
+ id: 1
+-
+ id: 2
+-
+ id: 3
+-
+ id: 10
diff --git a/models/migrations/fixtures/Test_RepositoryFormat/review_state.yml b/models/migrations/fixtures/Test_RepositoryFormat/review_state.yml
new file mode 100644
index 0000000..dd64980
--- /dev/null
+++ b/models/migrations/fixtures/Test_RepositoryFormat/review_state.yml
@@ -0,0 +1,5 @@
+-
+ id: 1
+ user_id: 1
+ pull_id: 1
+ commit_sha: 19fe5caf872476db265596eaac1dc35ad1c6422d
diff --git a/models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/expected_webauthn_credential.yml b/models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/expected_webauthn_credential.yml
new file mode 100644
index 0000000..55a237a
--- /dev/null
+++ b/models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/expected_webauthn_credential.yml
@@ -0,0 +1,9 @@
+-
+ id: 1
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
+-
+ id: 2
+ credential_id: "051CLMMKB62S6M9M2A4H54K7MMCQALFJ36G4TGB2S9A47APLTILU6C6744CEBG4EKCGV357N21BSLH8JD33GQMFAR6DQ70S76P34J6FR="
+-
+ id: 4
+ credential_id: "APU4B1NDTEVTEM60V4T0FRL7SRJMO9KIE2AKFQ8JDGTQ7VHFI41FDEFTDLBVQEAE4ER49QV2GTGVFDNBO31BPOA3OQN6879OT6MTU3G="
diff --git a/models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/webauthn_credential.yml b/models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/webauthn_credential.yml
new file mode 100644
index 0000000..ebb73f4
--- /dev/null
+++ b/models/migrations/fixtures/Test_StoreWebauthnCredentialIDAsBytes/webauthn_credential.yml
@@ -0,0 +1,30 @@
+-
+ id: 1
+ lower_name: "u2fkey-correctly-migrated"
+ name: "u2fkey-correctly-migrated"
+ user_id: 1
+ credential_id: "TVHE44TOH7DF7V48SEAIT3EMMJ7TGBOQ289E5AQB34S98LFCUFJ7U2NAVI8RJG6K2F4TC8AQ8KBNO7AGEOQOL9NE43GR63HTEHJSLOG="
+ public_key: 0x040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf2
+ attestation_type: 'fido-u2f'
+ sign_count: 1
+ clone_warning: false
+-
+ id: 2
+ lower_name: "non-u2f-key"
+ name: "non-u2f-key"
+ user_id: 1
+ credential_id: "051CLMMKB62S6M9M2A4H54K7MMCQALFJ36G4TGB2S9A47APLTILU6C6744CEBG4EKCGV357N21BSLH8JD33GQMFAR6DQ70S76P34J6FR"
+ public_key: 0x040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf2
+ attestation_type: 'none'
+ sign_count: 1
+ clone_warning: false
+-
+ id: 4
+ lower_name: "packed-key"
+ name: "packed-key"
+ user_id: 1
+ credential_id: "APU4B1NDTEVTEM60V4T0FRL7SRJMO9KIE2AKFQ8JDGTQ7VHFI41FDEFTDLBVQEAE4ER49QV2GTGVFDNBO31BPOA3OQN6879OT6MTU3G="
+ public_key: 0x040d0967a2cad045011631187576492a0beb5b377954b4f694c5afc8bdf25270f87f09a9ab6ce9c282f447ba71b2f2bae2105b32b847e0704f310f48644e3eddf2
+ attestation_type: 'fido-u2f'
+ sign_count: 1
+ clone_warning: false
diff --git a/models/migrations/fixtures/Test_UnwrapLDAPSourceCfg/login_source.yml b/models/migrations/fixtures/Test_UnwrapLDAPSourceCfg/login_source.yml
new file mode 100644
index 0000000..4b72ba1
--- /dev/null
+++ b/models/migrations/fixtures/Test_UnwrapLDAPSourceCfg/login_source.yml
@@ -0,0 +1,48 @@
+# type LoginSource struct {
+# ID int64 `xorm:"pk autoincr"`
+# Type int
+# Cfg []byte `xorm:"TEXT"`
+# Expected []byte `xorm:"TEXT"`
+# }
+-
+ id: 1
+ type: 1
+ is_actived: false
+ cfg: "{\"Source\":{\"A\":\"string\",\"B\":1}}"
+ expected: "{\"Source\":{\"A\":\"string\",\"B\":1}}"
+-
+ id: 2
+ type: 2
+ is_actived: true
+ cfg: "{\"Source\":{\"A\":\"string2\",\"B\":2}}"
+ expected: "{\"A\":\"string2\",\"B\":2}"
+-
+ id: 3
+ type: 3
+ is_actived: false
+ cfg: "{\"Source\":{\"A\":\"string3\",\"B\":3}}"
+ expected: "{\"Source\":{\"A\":\"string3\",\"B\":3}}"
+-
+ id: 4
+ type: 4
+ is_actived: true
+ cfg: "{\"Source\":{\"A\":\"string4\",\"B\":4}}"
+ expected: "{\"Source\":{\"A\":\"string4\",\"B\":4}}"
+-
+ id: 5
+ type: 5
+ is_actived: false
+ cfg: "{\"Source\":{\"A\":\"string5\",\"B\":5}}"
+ expected: "{\"A\":\"string5\",\"B\":5}"
+-
+ id: 6
+ type: 2
+ is_actived: true
+ cfg: "{\"A\":\"string6\",\"B\":6}"
+ expected: "{\"A\":\"string6\",\"B\":6}"
+-
+ id: 7
+ type: 5
+ is_actived: false
+ cfg: "{\"A\":\"string7\",\"B\":7}"
+ expected: "{\"A\":\"string7\",\"B\":7}"
diff --git a/models/migrations/fixtures/Test_UpdateBadgeColName/badge.yml b/models/migrations/fixtures/Test_UpdateBadgeColName/badge.yml
new file mode 100644
index 0000000..7025144
--- /dev/null
+++ b/models/migrations/fixtures/Test_UpdateBadgeColName/badge.yml
@@ -0,0 +1,4 @@
+-
+ id: 1
+ description: the badge
+ image_url: https://gitea.com/myimage.png
diff --git a/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/expected_milestone.yml b/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/expected_milestone.yml
new file mode 100644
index 0000000..9326fa5
--- /dev/null
+++ b/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/expected_milestone.yml
@@ -0,0 +1,19 @@
+# type Milestone struct {
+# ID int64 `xorm:"pk autoincr"`
+# IsClosed bool
+# NumIssues int
+# NumClosedIssues int
+# Completeness int // Percentage(1-100).
+# }
+-
+ id: 1
+ is_closed: false
+ num_issues: 3
+ num_closed_issues: 1
+ completeness: 33
+-
+ id: 2
+ is_closed: true
+ num_issues: 5
+ num_closed_issues: 5
+ completeness: 100
diff --git a/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/issue.yml b/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/issue.yml
new file mode 100644
index 0000000..fdaacd9
--- /dev/null
+++ b/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/issue.yml
@@ -0,0 +1,25 @@
+# type Issue struct {
+# ID int64 `xorm:"pk autoincr"`
+# RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
+# Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
+# MilestoneID int64 `xorm:"INDEX"`
+# IsClosed bool `xorm:"INDEX"`
+# }
+-
+ id: 1
+ repo_id: 1
+ index: 1
+ milestone_id: 1
+ is_closed: false
+-
+ id: 2
+ repo_id: 1
+ index: 2
+ milestone_id: 1
+ is_closed: true
+-
+ id: 4
+ repo_id: 1
+ index: 3
+ milestone_id: 1
+ is_closed: false
diff --git a/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/milestone.yml b/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/milestone.yml
new file mode 100644
index 0000000..0bcf4cf
--- /dev/null
+++ b/models/migrations/fixtures/Test_UpdateOpenMilestoneCounts/milestone.yml
@@ -0,0 +1,19 @@
+# type Milestone struct {
+# ID int64 `xorm:"pk autoincr"`
+# IsClosed bool
+# NumIssues int
+# NumClosedIssues int
+# Completeness int // Percentage(1-100).
+# }
+-
+ id: 1
+ is_closed: false
+ num_issues: 4
+ num_closed_issues: 2
+ completeness: 50
+-
+ id: 2
+ is_closed: true
+ num_issues: 5
+ num_closed_issues: 5
+ completeness: 100
diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go
new file mode 100644
index 0000000..d7e951f
--- /dev/null
+++ b/models/migrations/migrations.go
@@ -0,0 +1,721 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package migrations
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/forgejo_migrations"
+ "code.gitea.io/gitea/models/migrations/v1_10"
+ "code.gitea.io/gitea/models/migrations/v1_11"
+ "code.gitea.io/gitea/models/migrations/v1_12"
+ "code.gitea.io/gitea/models/migrations/v1_13"
+ "code.gitea.io/gitea/models/migrations/v1_14"
+ "code.gitea.io/gitea/models/migrations/v1_15"
+ "code.gitea.io/gitea/models/migrations/v1_16"
+ "code.gitea.io/gitea/models/migrations/v1_17"
+ "code.gitea.io/gitea/models/migrations/v1_18"
+ "code.gitea.io/gitea/models/migrations/v1_19"
+ "code.gitea.io/gitea/models/migrations/v1_20"
+ "code.gitea.io/gitea/models/migrations/v1_21"
+ "code.gitea.io/gitea/models/migrations/v1_22"
+ "code.gitea.io/gitea/models/migrations/v1_23"
+ "code.gitea.io/gitea/models/migrations/v1_6"
+ "code.gitea.io/gitea/models/migrations/v1_7"
+ "code.gitea.io/gitea/models/migrations/v1_8"
+ "code.gitea.io/gitea/models/migrations/v1_9"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ forgejo_services "code.gitea.io/gitea/services/forgejo"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/names"
+)
+
+const minDBVersion = 70 // Gitea 1.5.3
+
+// Migration describes on migration from lower version to high version
+type Migration interface {
+ Description() string
+ Migrate(*xorm.Engine) error
+}
+
+type migration struct {
+ description string
+ migrate func(*xorm.Engine) error
+}
+
+// NewMigration creates a new migration
+func NewMigration(desc string, fn func(*xorm.Engine) error) Migration {
+ return &migration{desc, fn}
+}
+
+// Description returns the migration's description
+func (m *migration) Description() string {
+ return m.description
+}
+
+// Migrate executes the migration
+func (m *migration) Migrate(x *xorm.Engine) error {
+ return m.migrate(x)
+}
+
+// Version describes the version table. Should have only one row with id==1
+type Version struct {
+ ID int64 `xorm:"pk autoincr"`
+ Version int64
+}
+
+// Use noopMigration when there is a migration that has been no-oped
+var noopMigration = func(_ *xorm.Engine) error { return nil }
+
+// This is a sequence of migrations. Add new migrations to the bottom of the list.
+// If you want to "retire" a migration, remove it from the top of the list and
+// update minDBVersion accordingly
+var migrations = []Migration{
+ // Gitea 1.5.0 ends at v69
+
+ // v70 -> v71
+ NewMigration("add issue_dependencies", v1_6.AddIssueDependencies),
+ // v71 -> v72
+ NewMigration("protect each scratch token", v1_6.AddScratchHash),
+ // v72 -> v73
+ NewMigration("add review", v1_6.AddReview),
+
+ // Gitea 1.6.0 ends at v73
+
+ // v73 -> v74
+ NewMigration("add must_change_password column for users table", v1_7.AddMustChangePassword),
+ // v74 -> v75
+ NewMigration("add approval whitelists to protected branches", v1_7.AddApprovalWhitelistsToProtectedBranches),
+ // v75 -> v76
+ NewMigration("clear nonused data which not deleted when user was deleted", v1_7.ClearNonusedData),
+
+ // Gitea 1.7.0 ends at v76
+
+ // v76 -> v77
+ NewMigration("add pull request rebase with merge commit", v1_8.AddPullRequestRebaseWithMerge),
+ // v77 -> v78
+ NewMigration("add theme to users", v1_8.AddUserDefaultTheme),
+ // v78 -> v79
+ NewMigration("rename repo is_bare to repo is_empty", v1_8.RenameRepoIsBareToIsEmpty),
+ // v79 -> v80
+ NewMigration("add can close issues via commit in any branch", v1_8.AddCanCloseIssuesViaCommitInAnyBranch),
+ // v80 -> v81
+ NewMigration("add is locked to issues", v1_8.AddIsLockedToIssues),
+ // v81 -> v82
+ NewMigration("update U2F counter type", v1_8.ChangeU2FCounterType),
+
+ // Gitea 1.8.0 ends at v82
+
+ // v82 -> v83
+ NewMigration("hot fix for wrong release sha1 on release table", v1_9.FixReleaseSha1OnReleaseTable),
+ // v83 -> v84
+ NewMigration("add uploader id for table attachment", v1_9.AddUploaderIDForAttachment),
+ // v84 -> v85
+ NewMigration("add table to store original imported gpg keys", v1_9.AddGPGKeyImport),
+ // v85 -> v86
+ NewMigration("hash application token", v1_9.HashAppToken),
+ // v86 -> v87
+ NewMigration("add http method to webhook", v1_9.AddHTTPMethodToWebhook),
+ // v87 -> v88
+ NewMigration("add avatar field to repository", v1_9.AddAvatarFieldToRepository),
+
+ // Gitea 1.9.0 ends at v88
+
+ // v88 -> v89
+ NewMigration("add commit status context field to commit_status", v1_10.AddCommitStatusContext),
+ // v89 -> v90
+ NewMigration("add original author/url migration info to issues, comments, and repo ", v1_10.AddOriginalMigrationInfo),
+ // v90 -> v91
+ NewMigration("change length of some repository columns", v1_10.ChangeSomeColumnsLengthOfRepo),
+ // v91 -> v92
+ NewMigration("add index on owner_id of repository and type, review_id of comment", v1_10.AddIndexOnRepositoryAndComment),
+ // v92 -> v93
+ NewMigration("remove orphaned repository index statuses", v1_10.RemoveLingeringIndexStatus),
+ // v93 -> v94
+ NewMigration("add email notification enabled preference to user", v1_10.AddEmailNotificationEnabledToUser),
+ // v94 -> v95
+ NewMigration("add enable_status_check, status_check_contexts to protected_branch", v1_10.AddStatusCheckColumnsForProtectedBranches),
+ // v95 -> v96
+ NewMigration("add table columns for cross referencing issues", v1_10.AddCrossReferenceColumns),
+ // v96 -> v97
+ NewMigration("delete orphaned attachments", v1_10.DeleteOrphanedAttachments),
+ // v97 -> v98
+ NewMigration("add repo_admin_change_team_access to user", v1_10.AddRepoAdminChangeTeamAccessColumnForUser),
+ // v98 -> v99
+ NewMigration("add original author name and id on migrated release", v1_10.AddOriginalAuthorOnMigratedReleases),
+ // v99 -> v100
+ NewMigration("add task table and status column for repository table", v1_10.AddTaskTable),
+ // v100 -> v101
+ NewMigration("update migration repositories' service type", v1_10.UpdateMigrationServiceTypes),
+ // v101 -> v102
+ NewMigration("change length of some external login users columns", v1_10.ChangeSomeColumnsLengthOfExternalLoginUser),
+
+ // Gitea 1.10.0 ends at v102
+
+ // v102 -> v103
+ NewMigration("update migration repositories' service type", v1_11.DropColumnHeadUserNameOnPullRequest),
+ // v103 -> v104
+ NewMigration("Add WhitelistDeployKeys to protected branch", v1_11.AddWhitelistDeployKeysToBranches),
+ // v104 -> v105
+ NewMigration("remove unnecessary columns from label", v1_11.RemoveLabelUneededCols),
+ // v105 -> v106
+ NewMigration("add includes_all_repositories to teams", v1_11.AddTeamIncludesAllRepositories),
+ // v106 -> v107
+ NewMigration("add column `mode` to table watch", v1_11.AddModeColumnToWatch),
+ // v107 -> v108
+ NewMigration("Add template options to repository", v1_11.AddTemplateToRepo),
+ // v108 -> v109
+ NewMigration("Add comment_id on table notification", v1_11.AddCommentIDOnNotification),
+ // v109 -> v110
+ NewMigration("add can_create_org_repo to team", v1_11.AddCanCreateOrgRepoColumnForTeam),
+ // v110 -> v111
+ NewMigration("change review content type to text", v1_11.ChangeReviewContentToText),
+ // v111 -> v112
+ NewMigration("update branch protection for can push and whitelist enable", v1_11.AddBranchProtectionCanPushAndEnableWhitelist),
+ // v112 -> v113
+ NewMigration("remove release attachments which repository deleted", v1_11.RemoveAttachmentMissedRepo),
+ // v113 -> v114
+ NewMigration("new feature: change target branch of pull requests", v1_11.FeatureChangeTargetBranch),
+ // v114 -> v115
+ NewMigration("Remove authentication credentials from stored URL", v1_11.SanitizeOriginalURL),
+ // v115 -> v116
+ NewMigration("add user_id prefix to existing user avatar name", v1_11.RenameExistingUserAvatarName),
+ // v116 -> v117
+ NewMigration("Extend TrackedTimes", v1_11.ExtendTrackedTimes),
+
+ // Gitea 1.11.0 ends at v117
+
+ // v117 -> v118
+ NewMigration("Add block on rejected reviews branch protection", v1_12.AddBlockOnRejectedReviews),
+ // v118 -> v119
+ NewMigration("Add commit id and stale to reviews", v1_12.AddReviewCommitAndStale),
+ // v119 -> v120
+ NewMigration("Fix migrated repositories' git service type", v1_12.FixMigratedRepositoryServiceType),
+ // v120 -> v121
+ NewMigration("Add owner_name on table repository", v1_12.AddOwnerNameOnRepository),
+ // v121 -> v122
+ NewMigration("add is_restricted column for users table", v1_12.AddIsRestricted),
+ // v122 -> v123
+ NewMigration("Add Require Signed Commits to ProtectedBranch", v1_12.AddRequireSignedCommits),
+ // v123 -> v124
+ NewMigration("Add original information for reactions", v1_12.AddReactionOriginals),
+ // v124 -> v125
+ NewMigration("Add columns to user and repository", v1_12.AddUserRepoMissingColumns),
+ // v125 -> v126
+ NewMigration("Add some columns on review for migration", v1_12.AddReviewMigrateInfo),
+ // v126 -> v127
+ NewMigration("Fix topic repository count", v1_12.FixTopicRepositoryCount),
+ // v127 -> v128
+ NewMigration("add repository code language statistics", v1_12.AddLanguageStats),
+ // v128 -> v129
+ NewMigration("fix merge base for pull requests", v1_12.FixMergeBase),
+ // v129 -> v130
+ NewMigration("remove dependencies from deleted repositories", v1_12.PurgeUnusedDependencies),
+ // v130 -> v131
+ NewMigration("Expand webhooks for more granularity", v1_12.ExpandWebhooks),
+ // v131 -> v132
+ NewMigration("Add IsSystemWebhook column to webhooks table", v1_12.AddSystemWebhookColumn),
+ // v132 -> v133
+ NewMigration("Add Branch Protection Protected Files Column", v1_12.AddBranchProtectionProtectedFilesColumn),
+ // v133 -> v134
+ NewMigration("Add EmailHash Table", v1_12.AddEmailHashTable),
+ // v134 -> v135
+ NewMigration("Refix merge base for merged pull requests", v1_12.RefixMergeBase),
+ // v135 -> v136
+ NewMigration("Add OrgID column to Labels table", v1_12.AddOrgIDLabelColumn),
+ // v136 -> v137
+ NewMigration("Add CommitsAhead and CommitsBehind Column to PullRequest Table", v1_12.AddCommitDivergenceToPulls),
+ // v137 -> v138
+ NewMigration("Add Branch Protection Block Outdated Branch", v1_12.AddBlockOnOutdatedBranch),
+ // v138 -> v139
+ NewMigration("Add ResolveDoerID to Comment table", v1_12.AddResolveDoerIDCommentColumn),
+ // v139 -> v140
+ NewMigration("prepend refs/heads/ to issue refs", v1_12.PrependRefsHeadsToIssueRefs),
+
+ // Gitea 1.12.0 ends at v140
+
+ // v140 -> v141
+ NewMigration("Save detected language file size to database instead of percent", v1_13.FixLanguageStatsToSaveSize),
+ // v141 -> v142
+ NewMigration("Add KeepActivityPrivate to User table", v1_13.AddKeepActivityPrivateUserColumn),
+ // v142 -> v143
+ NewMigration("Ensure Repository.IsArchived is not null", v1_13.SetIsArchivedToFalse),
+ // v143 -> v144
+ NewMigration("recalculate Stars number for all user", v1_13.RecalculateStars),
+ // v144 -> v145
+ NewMigration("update Matrix Webhook http method to 'PUT'", v1_13.UpdateMatrixWebhookHTTPMethod),
+ // v145 -> v146
+ NewMigration("Increase Language field to 50 in LanguageStats", v1_13.IncreaseLanguageField),
+ // v146 -> v147
+ NewMigration("Add projects info to repository table", v1_13.AddProjectsInfo),
+ // v147 -> v148
+ NewMigration("create review for 0 review id code comments", v1_13.CreateReviewsForCodeComments),
+ // v148 -> v149
+ NewMigration("remove issue dependency comments who refer to non existing issues", v1_13.PurgeInvalidDependenciesComments),
+ // v149 -> v150
+ NewMigration("Add Created and Updated to Milestone table", v1_13.AddCreatedAndUpdatedToMilestones),
+ // v150 -> v151
+ NewMigration("add primary key to repo_topic", v1_13.AddPrimaryKeyToRepoTopic),
+ // v151 -> v152
+ NewMigration("set default password algorithm to Argon2", v1_13.SetDefaultPasswordToArgon2),
+ // v152 -> v153
+ NewMigration("add TrustModel field to Repository", v1_13.AddTrustModelToRepository),
+ // v153 > v154
+ NewMigration("add Team review request support", v1_13.AddTeamReviewRequestSupport),
+ // v154 > v155
+ NewMigration("add timestamps to Star, Label, Follow, Watch and Collaboration", v1_13.AddTimeStamps),
+
+ // Gitea 1.13.0 ends at v155
+
+ // v155 -> v156
+ NewMigration("add changed_protected_files column for pull_request table", v1_14.AddChangedProtectedFilesPullRequestColumn),
+ // v156 -> v157
+ NewMigration("fix publisher ID for tag releases", v1_14.FixPublisherIDforTagReleases),
+ // v157 -> v158
+ NewMigration("ensure repo topics are up-to-date", v1_14.FixRepoTopics),
+ // v158 -> v159
+ NewMigration("code comment replies should have the commitID of the review they are replying to", v1_14.UpdateCodeCommentReplies),
+ // v159 -> v160
+ NewMigration("update reactions constraint", v1_14.UpdateReactionConstraint),
+ // v160 -> v161
+ NewMigration("Add block on official review requests branch protection", v1_14.AddBlockOnOfficialReviewRequests),
+ // v161 -> v162
+ NewMigration("Convert task type from int to string", v1_14.ConvertTaskTypeToString),
+ // v162 -> v163
+ NewMigration("Convert webhook task type from int to string", v1_14.ConvertWebhookTaskTypeToString),
+ // v163 -> v164
+ NewMigration("Convert topic name from 25 to 50", v1_14.ConvertTopicNameFrom25To50),
+ // v164 -> v165
+ NewMigration("Add scope and nonce columns to oauth2_grant table", v1_14.AddScopeAndNonceColumnsToOAuth2Grant),
+ // v165 -> v166
+ NewMigration("Convert hook task type from char(16) to varchar(16) and trim the column", v1_14.ConvertHookTaskTypeToVarcharAndTrim),
+ // v166 -> v167
+ NewMigration("Where Password is Valid with Empty String delete it", v1_14.RecalculateUserEmptyPWD),
+ // v167 -> v168
+ NewMigration("Add user redirect", v1_14.AddUserRedirect),
+ // v168 -> v169
+ NewMigration("Recreate user table to fix default values", v1_14.RecreateUserTableToFixDefaultValues),
+ // v169 -> v170
+ NewMigration("Update DeleteBranch comments to set the old_ref to the commit_sha", v1_14.CommentTypeDeleteBranchUseOldRef),
+ // v170 -> v171
+ NewMigration("Add Dismissed to Review table", v1_14.AddDismissedReviewColumn),
+ // v171 -> v172
+ NewMigration("Add Sorting to ProjectBoard table", v1_14.AddSortingColToProjectBoard),
+ // v172 -> v173
+ NewMigration("Add sessions table for go-chi/session", v1_14.AddSessionTable),
+ // v173 -> v174
+ NewMigration("Add time_id column to Comment", v1_14.AddTimeIDCommentColumn),
+ // v174 -> v175
+ NewMigration("Create repo transfer table", v1_14.AddRepoTransfer),
+ // v175 -> v176
+ NewMigration("Fix Postgres ID Sequences broken by recreate-table", v1_14.FixPostgresIDSequences),
+ // v176 -> v177
+ NewMigration("Remove invalid labels from comments", v1_14.RemoveInvalidLabels),
+ // v177 -> v178
+ NewMigration("Delete orphaned IssueLabels", v1_14.DeleteOrphanedIssueLabels),
+
+ // Gitea 1.14.0 ends at v178
+
+ // v178 -> v179
+ NewMigration("Add LFS columns to Mirror", v1_15.AddLFSMirrorColumns),
+ // v179 -> v180
+ NewMigration("Convert avatar url to text", v1_15.ConvertAvatarURLToText),
+ // v180 -> v181
+ NewMigration("Delete credentials from past migrations", v1_15.DeleteMigrationCredentials),
+ // v181 -> v182
+ NewMigration("Always save primary email on email address table", v1_15.AddPrimaryEmail2EmailAddress),
+ // v182 -> v183
+ NewMigration("Add issue resource index table", v1_15.AddIssueResourceIndexTable),
+ // v183 -> v184
+ NewMigration("Create PushMirror table", v1_15.CreatePushMirrorTable),
+ // v184 -> v185
+ NewMigration("Rename Task errors to message", v1_15.RenameTaskErrorsToMessage),
+ // v185 -> v186
+ NewMigration("Add new table repo_archiver", v1_15.AddRepoArchiver),
+ // v186 -> v187
+ NewMigration("Create protected tag table", v1_15.CreateProtectedTagTable),
+ // v187 -> v188
+ NewMigration("Drop unneeded webhook related columns", v1_15.DropWebhookColumns),
+ // v188 -> v189
+ NewMigration("Add key is verified to gpg key", v1_15.AddKeyIsVerified),
+
+ // Gitea 1.15.0 ends at v189
+
+ // v189 -> v190
+ NewMigration("Unwrap ldap.Sources", v1_16.UnwrapLDAPSourceCfg),
+ // v190 -> v191
+ NewMigration("Add agit flow pull request support", v1_16.AddAgitFlowPullRequest),
+ // v191 -> v192
+ NewMigration("Alter issue/comment table TEXT fields to LONGTEXT", v1_16.AlterIssueAndCommentTextFieldsToLongText),
+ // v192 -> v193
+ NewMigration("RecreateIssueResourceIndexTable to have a primary key instead of an unique index", v1_16.RecreateIssueResourceIndexTable),
+ // v193 -> v194
+ NewMigration("Add repo id column for attachment table", v1_16.AddRepoIDForAttachment),
+ // v194 -> v195
+ NewMigration("Add Branch Protection Unprotected Files Column", v1_16.AddBranchProtectionUnprotectedFilesColumn),
+ // v195 -> v196
+ NewMigration("Add table commit_status_index", v1_16.AddTableCommitStatusIndex),
+ // v196 -> v197
+ NewMigration("Add Color to ProjectBoard table", v1_16.AddColorColToProjectBoard),
+ // v197 -> v198
+ NewMigration("Add renamed_branch table", v1_16.AddRenamedBranchTable),
+ // v198 -> v199
+ NewMigration("Add issue content history table", v1_16.AddTableIssueContentHistory),
+ // v199 -> v200
+ NewMigration("No-op (remote version is using AppState now)", noopMigration),
+ // v200 -> v201
+ NewMigration("Add table app_state", v1_16.AddTableAppState),
+ // v201 -> v202
+ NewMigration("Drop table remote_version (if exists)", v1_16.DropTableRemoteVersion),
+ // v202 -> v203
+ NewMigration("Create key/value table for user settings", v1_16.CreateUserSettingsTable),
+ // v203 -> v204
+ NewMigration("Add Sorting to ProjectIssue table", v1_16.AddProjectIssueSorting),
+ // v204 -> v205
+ NewMigration("Add key is verified to ssh key", v1_16.AddSSHKeyIsVerified),
+ // v205 -> v206
+ NewMigration("Migrate to higher varchar on user struct", v1_16.MigrateUserPasswordSalt),
+ // v206 -> v207
+ NewMigration("Add authorize column to team_unit table", v1_16.AddAuthorizeColForTeamUnit),
+ // v207 -> v208
+ NewMigration("Add webauthn table and migrate u2f data to webauthn - NO-OPED", v1_16.AddWebAuthnCred),
+ // v208 -> v209
+ NewMigration("Use base32.HexEncoding instead of base64 encoding for cred ID as it is case insensitive - NO-OPED", v1_16.UseBase32HexForCredIDInWebAuthnCredential),
+ // v209 -> v210
+ NewMigration("Increase WebAuthentication CredentialID size to 410 - NO-OPED", v1_16.IncreaseCredentialIDTo410),
+ // v210 -> v211
+ NewMigration("v208 was completely broken - remigrate", v1_16.RemigrateU2FCredentials),
+
+ // Gitea 1.16.2 ends at v211
+
+ // v211 -> v212
+ NewMigration("Create ForeignReference table", v1_17.CreateForeignReferenceTable),
+ // v212 -> v213
+ NewMigration("Add package tables", v1_17.AddPackageTables),
+ // v213 -> v214
+ NewMigration("Add allow edits from maintainers to PullRequest table", v1_17.AddAllowMaintainerEdit),
+ // v214 -> v215
+ NewMigration("Add auto merge table", v1_17.AddAutoMergeTable),
+ // v215 -> v216
+ NewMigration("allow to view files in PRs", v1_17.AddReviewViewedFiles),
+ // v216 -> v217
+ NewMigration("No-op (Improve Action table indices v1)", noopMigration),
+ // v217 -> v218
+ NewMigration("Alter hook_task table TEXT fields to LONGTEXT", v1_17.AlterHookTaskTextFieldsToLongText),
+ // v218 -> v219
+ NewMigration("Improve Action table indices v2", v1_17.ImproveActionTableIndices),
+ // v219 -> v220
+ NewMigration("Add sync_on_commit column to push_mirror table", v1_17.AddSyncOnCommitColForPushMirror),
+ // v220 -> v221
+ NewMigration("Add container repository property", v1_17.AddContainerRepositoryProperty),
+ // v221 -> v222
+ NewMigration("Store WebAuthentication CredentialID as bytes and increase size to at least 1024", v1_17.StoreWebauthnCredentialIDAsBytes),
+ // v222 -> v223
+ NewMigration("Drop old CredentialID column", v1_17.DropOldCredentialIDColumn),
+ // v223 -> v224
+ NewMigration("Rename CredentialIDBytes column to CredentialID", v1_17.RenameCredentialIDBytes),
+
+ // Gitea 1.17.0 ends at v224
+
+ // v224 -> v225
+ NewMigration("Add badges to users", v1_18.CreateUserBadgesTable),
+ // v225 -> v226
+ NewMigration("Alter gpg_key/public_key content TEXT fields to MEDIUMTEXT", v1_18.AlterPublicGPGKeyContentFieldsToMediumText),
+ // v226 -> v227
+ NewMigration("Conan and generic packages do not need to be semantically versioned", v1_18.FixPackageSemverField),
+ // v227 -> v228
+ NewMigration("Create key/value table for system settings", v1_18.CreateSystemSettingsTable),
+ // v228 -> v229
+ NewMigration("Add TeamInvite table", v1_18.AddTeamInviteTable),
+ // v229 -> v230
+ NewMigration("Update counts of all open milestones", v1_18.UpdateOpenMilestoneCounts),
+ // v230 -> v231
+ NewMigration("Add ConfidentialClient column (default true) to OAuth2Application table", v1_18.AddConfidentialClientColumnToOAuth2ApplicationTable),
+
+ // Gitea 1.18.0 ends at v231
+
+ // v231 -> v232
+ NewMigration("Add index for hook_task", v1_19.AddIndexForHookTask),
+ // v232 -> v233
+ NewMigration("Alter package_version.metadata_json to LONGTEXT", v1_19.AlterPackageVersionMetadataToLongText),
+ // v233 -> v234
+ NewMigration("Add header_authorization_encrypted column to webhook table", v1_19.AddHeaderAuthorizationEncryptedColWebhook),
+ // v234 -> v235
+ NewMigration("Add package cleanup rule table", v1_19.CreatePackageCleanupRuleTable),
+ // v235 -> v236
+ NewMigration("Add index for access_token", v1_19.AddIndexForAccessToken),
+ // v236 -> v237
+ NewMigration("Create secrets table", v1_19.CreateSecretsTable),
+ // v237 -> v238
+ NewMigration("Drop ForeignReference table", v1_19.DropForeignReferenceTable),
+ // v238 -> v239
+ NewMigration("Add updated unix to LFSMetaObject", v1_19.AddUpdatedUnixToLFSMetaObject),
+ // v239 -> v240
+ NewMigration("Add scope for access_token", v1_19.AddScopeForAccessTokens),
+ // v240 -> v241
+ NewMigration("Add actions tables", v1_19.AddActionsTables),
+ // v241 -> v242
+ NewMigration("Add card_type column to project table", v1_19.AddCardTypeToProjectTable),
+ // v242 -> v243
+ NewMigration("Alter gpg_key_import content TEXT field to MEDIUMTEXT", v1_19.AlterPublicGPGKeyImportContentFieldToMediumText),
+ // v243 -> v244
+ NewMigration("Add exclusive label", v1_19.AddExclusiveLabel),
+
+ // Gitea 1.19.0 ends at v244
+
+ // v244 -> v245
+ NewMigration("Add NeedApproval to actions tables", v1_20.AddNeedApprovalToActionRun),
+ // v245 -> v246
+ NewMigration("Rename Webhook org_id to owner_id", v1_20.RenameWebhookOrgToOwner),
+ // v246 -> v247
+ NewMigration("Add missed column owner_id for project table", v1_20.AddNewColumnForProject),
+ // v247 -> v248
+ NewMigration("Fix incorrect project type", v1_20.FixIncorrectProjectType),
+ // v248 -> v249
+ NewMigration("Add version column to action_runner table", v1_20.AddVersionToActionRunner),
+ // v249 -> v250
+ NewMigration("Improve Action table indices v3", v1_20.ImproveActionTableIndices),
+ // v250 -> v251
+ NewMigration("Change Container Metadata", v1_20.ChangeContainerMetadataMultiArch),
+ // v251 -> v252
+ NewMigration("Fix incorrect owner team unit access mode", v1_20.FixIncorrectOwnerTeamUnitAccessMode),
+ // v252 -> v253
+ NewMigration("Fix incorrect admin team unit access mode", v1_20.FixIncorrectAdminTeamUnitAccessMode),
+ // v253 -> v254
+ NewMigration("Fix ExternalTracker and ExternalWiki accessMode in owner and admin team", v1_20.FixExternalTrackerAndExternalWikiAccessModeInOwnerAndAdminTeam),
+ // v254 -> v255
+ NewMigration("Add ActionTaskOutput table", v1_20.AddActionTaskOutputTable),
+ // v255 -> v256
+ NewMigration("Add ArchivedUnix Column", v1_20.AddArchivedUnixToRepository),
+ // v256 -> v257
+ NewMigration("Add is_internal column to package", v1_20.AddIsInternalColumnToPackage),
+ // v257 -> v258
+ NewMigration("Add Actions Artifact table", v1_20.CreateActionArtifactTable),
+ // v258 -> v259
+ NewMigration("Add PinOrder Column", v1_20.AddPinOrderToIssue),
+ // v259 -> v260
+ NewMigration("Convert scoped access tokens", v1_20.ConvertScopedAccessTokens),
+
+ // Gitea 1.20.0 ends at 260
+
+ // v260 -> v261
+ NewMigration("Drop custom_labels column of action_runner table", v1_21.DropCustomLabelsColumnOfActionRunner),
+ // v261 -> v262
+ NewMigration("Add variable table", v1_21.CreateVariableTable),
+ // v262 -> v263
+ NewMigration("Add TriggerEvent to action_run table", v1_21.AddTriggerEventToActionRun),
+ // v263 -> v264
+ NewMigration("Add git_size and lfs_size columns to repository table", v1_21.AddGitSizeAndLFSSizeToRepositoryTable),
+ // v264 -> v265
+ NewMigration("Add branch table", v1_21.AddBranchTable),
+ // v265 -> v266
+ NewMigration("Alter Actions Artifact table", v1_21.AlterActionArtifactTable),
+ // v266 -> v267
+ NewMigration("Reduce commit status", v1_21.ReduceCommitStatus),
+ // v267 -> v268
+ NewMigration("Add action_tasks_version table", v1_21.CreateActionTasksVersionTable),
+ // v268 -> v269
+ NewMigration("Update Action Ref", v1_21.UpdateActionsRefIndex),
+ // v269 -> v270
+ NewMigration("Drop deleted branch table", v1_21.DropDeletedBranchTable),
+ // v270 -> v271
+ NewMigration("Fix PackageProperty typo", v1_21.FixPackagePropertyTypo),
+ // v271 -> v272
+ NewMigration("Allow archiving labels", v1_21.AddArchivedUnixColumInLabelTable),
+ // v272 -> v273
+ NewMigration("Add Version to ActionRun table", v1_21.AddVersionToActionRunTable),
+ // v273 -> v274
+ NewMigration("Add Action Schedule Table", v1_21.AddActionScheduleTable),
+ // v274 -> v275
+ NewMigration("Add Actions artifacts expiration date", v1_21.AddExpiredUnixColumnInActionArtifactTable),
+ // v275 -> v276
+ NewMigration("Add ScheduleID for ActionRun", v1_21.AddScheduleIDForActionRun),
+ // v276 -> v277
+ NewMigration("Add RemoteAddress to mirrors", v1_21.AddRemoteAddressToMirrors),
+ // v277 -> v278
+ NewMigration("Add Index to issue_user.issue_id", v1_21.AddIndexToIssueUserIssueID),
+ // v278 -> v279
+ NewMigration("Add Index to comment.dependent_issue_id", v1_21.AddIndexToCommentDependentIssueID),
+ // v279 -> v280
+ NewMigration("Add Index to action.user_id", v1_21.AddIndexToActionUserID),
+
+ // Gitea 1.21.0 ends at 280
+
+ // v280 -> v281
+ NewMigration("Rename user themes", v1_22.RenameUserThemes),
+ // v281 -> v282
+ NewMigration("Add auth_token table", v1_22.CreateAuthTokenTable),
+ // v282 -> v283
+ NewMigration("Add Index to pull_auto_merge.doer_id", v1_22.AddIndexToPullAutoMergeDoerID),
+ // v283 -> v284
+ NewMigration("Add combined Index to issue_user.uid and issue_id", v1_22.AddCombinedIndexToIssueUser),
+ // v284 -> v285
+ NewMigration("Add ignore stale approval column on branch table", v1_22.AddIgnoreStaleApprovalsColumnToProtectedBranchTable),
+ // v285 -> v286
+ NewMigration("Add PreviousDuration to ActionRun", v1_22.AddPreviousDurationToActionRun),
+ // v286 -> v287
+ NewMigration("Add support for SHA256 git repositories", v1_22.AdjustDBForSha256),
+ // v287 -> v288
+ NewMigration("Use Slug instead of ID for Badges", v1_22.UseSlugInsteadOfIDForBadges),
+ // v288 -> v289
+ NewMigration("Add user_blocking table", v1_22.AddUserBlockingTable),
+ // v289 -> v290
+ NewMigration("Add default_wiki_branch to repository table", v1_22.AddDefaultWikiBranch),
+ // v290 -> v291
+ NewMigration("Add PayloadVersion to HookTask", v1_22.AddPayloadVersionToHookTaskTable),
+ // v291 -> v292
+ NewMigration("Add Index to attachment.comment_id", v1_22.AddCommentIDIndexofAttachment),
+ // v292 -> v293
+ NewMigration("Ensure every project has exactly one default column - No Op", noopMigration),
+ // v293 -> v294
+ NewMigration("Ensure every project has exactly one default column", v1_22.CheckProjectColumnsConsistency),
+
+ // Gitea 1.22.0-rc0 ends at 294
+
+ // v294 -> v295
+ NewMigration("Add unique index for project issue table", v1_22.AddUniqueIndexForProjectIssue),
+ // v295 -> v296
+ NewMigration("Add commit status summary table", v1_22.AddCommitStatusSummary),
+ // v296 -> v297
+ NewMigration("Add missing field of commit status summary table", v1_22.AddCommitStatusSummary2),
+ // v297 -> v298
+ NewMigration("Add everyone_access_mode for repo_unit", noopMigration),
+ // v298 -> v299
+ NewMigration("Drop wrongly created table o_auth2_application", v1_22.DropWronglyCreatedTable),
+
+ // Gitea 1.22.0-rc1 ends at 299
+
+ // v299 -> v300
+ NewMigration("Add content version to issue and comment table", v1_23.AddContentVersionToIssueAndComment),
+ // v300 -> v301
+ NewMigration("Add force-push branch protection support", v1_23.AddForcePushBranchProtection),
+ // v301 -> v302
+ NewMigration("Add skip_secondary_authorization option to oauth2 application table", v1_23.AddSkipSecondaryAuthColumnToOAuth2ApplicationTable),
+ // v302 -> v303
+ NewMigration("Add index to action_task stopped log_expired", v1_23.AddIndexToActionTaskStoppedLogExpired),
+}
+
+// GetCurrentDBVersion returns the current db version
+func GetCurrentDBVersion(x *xorm.Engine) (int64, error) {
+ if err := x.Sync(new(Version)); err != nil {
+ return -1, fmt.Errorf("sync: %w", err)
+ }
+
+ currentVersion := &Version{ID: 1}
+ has, err := x.Get(currentVersion)
+ if err != nil {
+ return -1, fmt.Errorf("get: %w", err)
+ }
+ if !has {
+ return -1, nil
+ }
+ return currentVersion.Version, nil
+}
+
+// ExpectedVersion returns the expected db version
+func ExpectedVersion() int64 {
+ return int64(minDBVersion + len(migrations))
+}
+
+// EnsureUpToDate will check if the db is at the correct version
+func EnsureUpToDate(x *xorm.Engine) error {
+ currentDB, err := GetCurrentDBVersion(x)
+ if err != nil {
+ return err
+ }
+
+ if currentDB < 0 {
+ return fmt.Errorf("Database has not been initialized")
+ }
+
+ if minDBVersion > currentDB {
+ return fmt.Errorf("DB version %d (<= %d) is too old for auto-migration. Upgrade to Gitea 1.6.4 first then upgrade to this version", currentDB, minDBVersion)
+ }
+
+ expected := ExpectedVersion()
+
+ if currentDB != expected {
+ return fmt.Errorf(`Current database version %d is not equal to the expected version %d. Please run "forgejo [--config /path/to/app.ini] migrate" to update the database version`, currentDB, expected)
+ }
+
+ return forgejo_migrations.EnsureUpToDate(x)
+}
+
+// Migrate database to current version
+func Migrate(x *xorm.Engine) error {
+ // Set a new clean the default mapper to GonicMapper as that is the default for Gitea.
+ x.SetMapper(names.GonicMapper{})
+ if err := x.Sync(new(Version)); err != nil {
+ return fmt.Errorf("sync: %w", err)
+ }
+
+ var previousVersion int64
+ currentVersion := &Version{ID: 1}
+ has, err := x.Get(currentVersion)
+ if err != nil {
+ return fmt.Errorf("get: %w", err)
+ } else if !has {
+ // If the version record does not exist we think
+ // it is a fresh installation and we can skip all migrations.
+ currentVersion.ID = 0
+ currentVersion.Version = int64(minDBVersion + len(migrations))
+
+ if _, err = x.InsertOne(currentVersion); err != nil {
+ return fmt.Errorf("insert: %w", err)
+ }
+ } else {
+ previousVersion = currentVersion.Version
+ }
+
+ v := currentVersion.Version
+ if minDBVersion > v {
+ log.Fatal(`Forgejo no longer supports auto-migration from your previously installed version.
+Please try upgrading to a lower version first (suggested v1.6.4), then upgrade to this version.`)
+ return nil
+ }
+
+ // Downgrading Forgejo database version is not supported
+ if int(v-minDBVersion) > len(migrations) {
+ msg := fmt.Sprintf("Your database (migration version: %d) is for a newer Forgejo, you can not use the newer database for this old Forgejo release (%d).", v, minDBVersion+len(migrations))
+ msg += "\nForgejo will exit to keep your database safe and unchanged. Please use the correct Forgejo release, do not change the migration version manually (incorrect manual operation may lose data)."
+ if !setting.IsProd {
+ msg += fmt.Sprintf("\nIf you are in development and really know what you're doing, you can force changing the migration version by executing: UPDATE version SET version=%d WHERE id=1;", minDBVersion+len(migrations))
+ }
+ log.Fatal("Migration Error: %s", msg)
+ return nil
+ }
+
+ // Some migration tasks depend on the git command
+ if git.DefaultContext == nil {
+ if err = git.InitSimple(context.Background()); err != nil {
+ return err
+ }
+ }
+
+ if err := forgejo_services.PreMigrationSanityChecks(x, previousVersion, setting.CfgProvider); err != nil {
+ return err
+ }
+
+ // Migrate
+ for i, m := range migrations[v-minDBVersion:] {
+ log.Info("Migration[%d]: %s", v+int64(i), m.Description())
+ // Reset the mapper between each migration - migrations are not supposed to depend on each other
+ x.SetMapper(names.GonicMapper{})
+ if err = m.Migrate(x); err != nil {
+ return fmt.Errorf("migration[%d]: %s failed: %w", v+int64(i), m.Description(), err)
+ }
+ currentVersion.Version = v + int64(i) + 1
+ if _, err = x.ID(1).Update(currentVersion); err != nil {
+ return err
+ }
+ }
+
+ // Execute Forgejo specific migrations.
+ return forgejo_migrations.Migrate(x)
+}
diff --git a/models/migrations/test/tests.go b/models/migrations/test/tests.go
new file mode 100644
index 0000000..0e37233
--- /dev/null
+++ b/models/migrations/test/tests.go
@@ -0,0 +1,274 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+//nolint:forbidigo
+package test
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/testlogger"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm"
+)
+
+// PrepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0.
+// Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
+//
+// fixtures in `models/migrations/fixtures/<TestName>` will be loaded automatically
+func PrepareTestEnv(t *testing.T, skip int, syncModels ...any) (*xorm.Engine, func()) {
+ t.Helper()
+ ourSkip := 2
+ ourSkip += skip
+ deferFn := testlogger.PrintCurrentTest(t, ourSkip)
+ require.NoError(t, os.RemoveAll(setting.RepoRootPath))
+ require.NoError(t, unittest.CopyDir(path.Join(filepath.Dir(setting.AppPath), "tests/gitea-repositories-meta"), setting.RepoRootPath))
+ ownerDirs, err := os.ReadDir(setting.RepoRootPath)
+ if err != nil {
+ require.NoError(t, err, "unable to read the new repo root: %v\n", err)
+ }
+ for _, ownerDir := range ownerDirs {
+ if !ownerDir.Type().IsDir() {
+ continue
+ }
+ repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
+ if err != nil {
+ require.NoError(t, err, "unable to read the new repo root: %v\n", err)
+ }
+ for _, repoDir := range repoDirs {
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
+ }
+ }
+
+ if err := deleteDB(); err != nil {
+ t.Errorf("unable to reset database: %v", err)
+ return nil, deferFn
+ }
+
+ x, err := newXORMEngine()
+ require.NoError(t, err)
+ if x != nil {
+ oldDefer := deferFn
+ deferFn = func() {
+ oldDefer()
+ if err := x.Close(); err != nil {
+ t.Errorf("error during close: %v", err)
+ }
+ if err := deleteDB(); err != nil {
+ t.Errorf("unable to reset database: %v", err)
+ }
+ }
+ }
+ if err != nil {
+ return x, deferFn
+ }
+
+ if len(syncModels) > 0 {
+ if err := x.Sync(syncModels...); err != nil {
+ t.Errorf("error during sync: %v", err)
+ return x, deferFn
+ }
+ }
+
+ fixturesDir := filepath.Join(filepath.Dir(setting.AppPath), "models", "migrations", "fixtures", t.Name())
+
+ if _, err := os.Stat(fixturesDir); err == nil {
+ t.Logf("initializing fixtures from: %s", fixturesDir)
+ if err := unittest.InitFixtures(
+ unittest.FixturesOptions{
+ Dir: fixturesDir,
+ }, x); err != nil {
+ t.Errorf("error whilst initializing fixtures from %s: %v", fixturesDir, err)
+ return x, deferFn
+ }
+ if err := unittest.LoadFixtures(x); err != nil {
+ t.Errorf("error whilst loading fixtures from %s: %v", fixturesDir, err)
+ return x, deferFn
+ }
+ } else if !os.IsNotExist(err) {
+ t.Errorf("unexpected error whilst checking for existence of fixtures: %v", err)
+ } else {
+ t.Logf("no fixtures found in: %s", fixturesDir)
+ }
+
+ return x, deferFn
+}
+
+func MainTest(m *testing.M) {
+ log.RegisterEventWriter("test", testlogger.NewTestLoggerWriter)
+
+ giteaRoot := base.SetupGiteaRoot()
+ if giteaRoot == "" {
+ fmt.Println("Environment variable $GITEA_ROOT not set")
+ os.Exit(1)
+ }
+ giteaBinary := "gitea"
+ if runtime.GOOS == "windows" {
+ giteaBinary += ".exe"
+ }
+ setting.AppPath = path.Join(giteaRoot, giteaBinary)
+ if _, err := os.Stat(setting.AppPath); err != nil {
+ fmt.Printf("Could not find gitea binary at %s\n", setting.AppPath)
+ os.Exit(1)
+ }
+
+ giteaConf := os.Getenv("GITEA_CONF")
+ if giteaConf == "" {
+ giteaConf = path.Join(filepath.Dir(setting.AppPath), "tests/sqlite.ini")
+ fmt.Printf("Environment variable $GITEA_CONF not set - defaulting to %s\n", giteaConf)
+ }
+
+ if !path.IsAbs(giteaConf) {
+ setting.CustomConf = path.Join(giteaRoot, giteaConf)
+ } else {
+ setting.CustomConf = giteaConf
+ }
+
+ tmpDataPath, err := os.MkdirTemp("", "data")
+ if err != nil {
+ fmt.Printf("Unable to create temporary data path %v\n", err)
+ os.Exit(1)
+ }
+
+ setting.CustomPath = filepath.Join(setting.AppWorkPath, "custom")
+ setting.AppDataPath = tmpDataPath
+
+ unittest.InitSettings()
+ if err = git.InitFull(context.Background()); err != nil {
+ fmt.Printf("Unable to InitFull: %v\n", err)
+ os.Exit(1)
+ }
+ setting.LoadDBSetting()
+ setting.InitLoggersForTest()
+
+ exitStatus := m.Run()
+
+ if err := testlogger.WriterCloser.Reset(); err != nil && exitStatus == 0 {
+ fmt.Printf("testlogger.WriterCloser.Reset: error ignored: %v\n", err)
+ }
+ if err := removeAllWithRetry(setting.RepoRootPath); err != nil {
+ fmt.Fprintf(os.Stderr, "os.RemoveAll: %v\n", err)
+ }
+ if err := removeAllWithRetry(tmpDataPath); err != nil {
+ fmt.Fprintf(os.Stderr, "os.RemoveAll: %v\n", err)
+ }
+ os.Exit(exitStatus)
+}
+
+func newXORMEngine() (*xorm.Engine, error) {
+ if err := db.InitEngine(context.Background()); err != nil {
+ return nil, err
+ }
+ x := unittest.GetXORMEngine()
+ return x, nil
+}
+
+func deleteDB() error {
+ switch {
+ case setting.Database.Type.IsSQLite3():
+ if err := util.Remove(setting.Database.Path); err != nil {
+ return err
+ }
+ return os.MkdirAll(path.Dir(setting.Database.Path), os.ModePerm)
+
+ case setting.Database.Type.IsMySQL():
+ db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/",
+ setting.Database.User, setting.Database.Passwd, setting.Database.Host))
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ databaseName := strings.SplitN(setting.Database.Name, "?", 2)[0]
+
+ if _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", databaseName)); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", databaseName)); err != nil {
+ return err
+ }
+ return nil
+ case setting.Database.Type.IsPostgreSQL():
+ db, err := sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/?sslmode=%s",
+ setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.SSLMode))
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ if _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", setting.Database.Name)); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", setting.Database.Name)); err != nil {
+ return err
+ }
+ db.Close()
+
+ // Check if we need to setup a specific schema
+ if len(setting.Database.Schema) != 0 {
+ db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
+ setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ schrows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
+ if err != nil {
+ return err
+ }
+ defer schrows.Close()
+
+ if !schrows.Next() {
+ // Create and setup a DB schema
+ _, err = db.Exec(fmt.Sprintf("CREATE SCHEMA %s", setting.Database.Schema))
+ if err != nil {
+ return err
+ }
+ }
+
+ // Make the user's default search path the created schema; this will affect new connections
+ _, err = db.Exec(fmt.Sprintf(`ALTER USER "%s" SET search_path = %s`, setting.Database.User, setting.Database.Schema))
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func removeAllWithRetry(dir string) error {
+ var err error
+ for i := 0; i < 20; i++ {
+ err = os.RemoveAll(dir)
+ if err == nil {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ return err
+}
diff --git a/models/migrations/v1_10/v100.go b/models/migrations/v1_10/v100.go
new file mode 100644
index 0000000..5d2fd8e
--- /dev/null
+++ b/models/migrations/v1_10/v100.go
@@ -0,0 +1,82 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import (
+ "net/url"
+ "strings"
+ "time"
+
+ "xorm.io/xorm"
+)
+
+func UpdateMigrationServiceTypes(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64
+ OriginalServiceType int `xorm:"index default(0)"`
+ OriginalURL string `xorm:"VARCHAR(2048)"`
+ }
+
+ if err := x.Sync(new(Repository)); err != nil {
+ return err
+ }
+
+ var last int
+ const batchSize = 50
+ for {
+ results := make([]Repository, 0, batchSize)
+ err := x.Where("original_url <> '' AND original_url IS NOT NULL").
+ And("original_service_type = 0 OR original_service_type IS NULL").
+ OrderBy("id").
+ Limit(batchSize, last).
+ Find(&results)
+ if err != nil {
+ return err
+ }
+ if len(results) == 0 {
+ break
+ }
+ last += len(results)
+
+ const PlainGitService = 1 // 1 plain git service
+ const GithubService = 2 // 2 github.com
+
+ for _, res := range results {
+ u, err := url.Parse(res.OriginalURL)
+ if err != nil {
+ return err
+ }
+ serviceType := PlainGitService
+ if strings.EqualFold(u.Host, "github.com") {
+ serviceType = GithubService
+ }
+ _, err = x.Exec("UPDATE repository SET original_service_type = ? WHERE id = ?", serviceType, res.ID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ type ExternalLoginUser struct {
+ ExternalID string `xorm:"pk NOT NULL"`
+ UserID int64 `xorm:"INDEX NOT NULL"`
+ LoginSourceID int64 `xorm:"pk NOT NULL"`
+ RawData map[string]any `xorm:"TEXT JSON"`
+ Provider string `xorm:"index VARCHAR(25)"`
+ Email string
+ Name string
+ FirstName string
+ LastName string
+ NickName string
+ Description string
+ AvatarURL string
+ Location string
+ AccessToken string
+ AccessTokenSecret string
+ RefreshToken string
+ ExpiresAt time.Time
+ }
+
+ return x.Sync(new(ExternalLoginUser))
+}
diff --git a/models/migrations/v1_10/v101.go b/models/migrations/v1_10/v101.go
new file mode 100644
index 0000000..f023a2a
--- /dev/null
+++ b/models/migrations/v1_10/v101.go
@@ -0,0 +1,18 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func ChangeSomeColumnsLengthOfExternalLoginUser(x *xorm.Engine) error {
+ type ExternalLoginUser struct {
+ AccessToken string `xorm:"TEXT"`
+ AccessTokenSecret string `xorm:"TEXT"`
+ RefreshToken string `xorm:"TEXT"`
+ }
+
+ return x.Sync(new(ExternalLoginUser))
+}
diff --git a/models/migrations/v1_10/v88.go b/models/migrations/v1_10/v88.go
new file mode 100644
index 0000000..7e86ac3
--- /dev/null
+++ b/models/migrations/v1_10/v88.go
@@ -0,0 +1,65 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import (
+ "crypto/sha1"
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func hashContext(context string) string {
+ return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
+}
+
+func AddCommitStatusContext(x *xorm.Engine) error {
+ type CommitStatus struct {
+ ID int64 `xorm:"pk autoincr"`
+ ContextHash string `xorm:"char(40) index"`
+ Context string `xorm:"TEXT"`
+ }
+
+ if err := x.Sync(new(CommitStatus)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ start := 0
+ for {
+ statuses := make([]*CommitStatus, 0, 100)
+ err := sess.OrderBy("id").Limit(100, start).Find(&statuses)
+ if err != nil {
+ return err
+ }
+ if len(statuses) == 0 {
+ break
+ }
+
+ if err = sess.Begin(); err != nil {
+ return err
+ }
+
+ for _, status := range statuses {
+ status.ContextHash = hashContext(status.Context)
+ if _, err := sess.ID(status.ID).Cols("context_hash").Update(status); err != nil {
+ return err
+ }
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+
+ if len(statuses) < 100 {
+ break
+ }
+
+ start += len(statuses)
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_10/v89.go b/models/migrations/v1_10/v89.go
new file mode 100644
index 0000000..d5f27ff
--- /dev/null
+++ b/models/migrations/v1_10/v89.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddOriginalMigrationInfo(x *xorm.Engine) error {
+ // Issue see models/issue.go
+ type Issue struct {
+ OriginalAuthor string
+ OriginalAuthorID int64
+ }
+
+ if err := x.Sync(new(Issue)); err != nil {
+ return err
+ }
+
+ // Issue see models/issue_comment.go
+ type Comment struct {
+ OriginalAuthor string
+ OriginalAuthorID int64
+ }
+
+ if err := x.Sync(new(Comment)); err != nil {
+ return err
+ }
+
+ // Issue see models/repo.go
+ type Repository struct {
+ OriginalURL string
+ }
+
+ return x.Sync(new(Repository))
+}
diff --git a/models/migrations/v1_10/v90.go b/models/migrations/v1_10/v90.go
new file mode 100644
index 0000000..295d4b1
--- /dev/null
+++ b/models/migrations/v1_10/v90.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func ChangeSomeColumnsLengthOfRepo(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ Description string `xorm:"TEXT"`
+ Website string `xorm:"VARCHAR(2048)"`
+ OriginalURL string `xorm:"VARCHAR(2048)"`
+ }
+
+ return x.Sync(new(Repository))
+}
diff --git a/models/migrations/v1_10/v91.go b/models/migrations/v1_10/v91.go
new file mode 100644
index 0000000..48cac2d
--- /dev/null
+++ b/models/migrations/v1_10/v91.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddIndexOnRepositoryAndComment(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"index"`
+ }
+
+ if err := x.Sync(new(Repository)); err != nil {
+ return err
+ }
+
+ type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int `xorm:"index"`
+ ReviewID int64 `xorm:"index"`
+ }
+
+ return x.Sync(new(Comment))
+}
diff --git a/models/migrations/v1_10/v92.go b/models/migrations/v1_10/v92.go
new file mode 100644
index 0000000..9080108
--- /dev/null
+++ b/models/migrations/v1_10/v92.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import (
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func RemoveLingeringIndexStatus(x *xorm.Engine) error {
+ _, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_indexer_status`"))
+ return err
+}
diff --git a/models/migrations/v1_10/v93.go b/models/migrations/v1_10/v93.go
new file mode 100644
index 0000000..ee59a8d
--- /dev/null
+++ b/models/migrations/v1_10/v93.go
@@ -0,0 +1,15 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddEmailNotificationEnabledToUser(x *xorm.Engine) error {
+ // User see models/user.go
+ type User struct {
+ EmailNotificationsPreference string `xorm:"VARCHAR(20) NOT NULL DEFAULT 'enabled'"`
+ }
+
+ return x.Sync(new(User))
+}
diff --git a/models/migrations/v1_10/v94.go b/models/migrations/v1_10/v94.go
new file mode 100644
index 0000000..c131af1
--- /dev/null
+++ b/models/migrations/v1_10/v94.go
@@ -0,0 +1,23 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddStatusCheckColumnsForProtectedBranches(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ EnableStatusCheck bool `xorm:"NOT NULL DEFAULT false"`
+ StatusCheckContexts []string `xorm:"JSON TEXT"`
+ }
+
+ if err := x.Sync(new(ProtectedBranch)); err != nil {
+ return err
+ }
+
+ _, err := x.Cols("enable_status_check", "status_check_contexts").Update(&ProtectedBranch{
+ EnableStatusCheck: false,
+ StatusCheckContexts: []string{},
+ })
+ return err
+}
diff --git a/models/migrations/v1_10/v95.go b/models/migrations/v1_10/v95.go
new file mode 100644
index 0000000..3b1f67f
--- /dev/null
+++ b/models/migrations/v1_10/v95.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddCrossReferenceColumns(x *xorm.Engine) error {
+ // Comment see models/comment.go
+ type Comment struct {
+ RefRepoID int64 `xorm:"index"`
+ RefIssueID int64 `xorm:"index"`
+ RefCommentID int64 `xorm:"index"`
+ RefAction int64 `xorm:"SMALLINT"`
+ RefIsPull bool
+ }
+
+ return x.Sync(new(Comment))
+}
diff --git a/models/migrations/v1_10/v96.go b/models/migrations/v1_10/v96.go
new file mode 100644
index 0000000..34c8240
--- /dev/null
+++ b/models/migrations/v1_10/v96.go
@@ -0,0 +1,64 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import (
+ "path/filepath"
+
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+)
+
+func DeleteOrphanedAttachments(x *xorm.Engine) error {
+ type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ IssueID int64 `xorm:"INDEX"`
+ ReleaseID int64 `xorm:"INDEX"`
+ CommentID int64
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ limit := setting.Database.IterateBufferSize
+ if limit <= 0 {
+ limit = 50
+ }
+
+ for {
+ attachments := make([]Attachment, 0, limit)
+ if err := sess.Where("`issue_id` = 0 and (`release_id` = 0 or `release_id` not in (select `id` from `release`))").
+ Cols("id, uuid").Limit(limit).
+ Asc("id").
+ Find(&attachments); err != nil {
+ return err
+ }
+ if len(attachments) == 0 {
+ return nil
+ }
+
+ ids := make([]int64, 0, limit)
+ for _, attachment := range attachments {
+ ids = append(ids, attachment.ID)
+ }
+ if len(ids) > 0 {
+ if _, err := sess.In("id", ids).Delete(new(Attachment)); err != nil {
+ return err
+ }
+ }
+
+ for _, attachment := range attachments {
+ uuid := attachment.UUID
+ if err := util.RemoveAll(filepath.Join(setting.Attachment.Storage.Path, uuid[0:1], uuid[1:2], uuid)); err != nil {
+ return err
+ }
+ }
+ if len(attachments) < limit {
+ return nil
+ }
+ }
+}
diff --git a/models/migrations/v1_10/v97.go b/models/migrations/v1_10/v97.go
new file mode 100644
index 0000000..dee45b3
--- /dev/null
+++ b/models/migrations/v1_10/v97.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddRepoAdminChangeTeamAccessColumnForUser(x *xorm.Engine) error {
+ type User struct {
+ RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(User))
+}
diff --git a/models/migrations/v1_10/v98.go b/models/migrations/v1_10/v98.go
new file mode 100644
index 0000000..bdd9aed
--- /dev/null
+++ b/models/migrations/v1_10/v98.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import "xorm.io/xorm"
+
+func AddOriginalAuthorOnMigratedReleases(x *xorm.Engine) error {
+ type Release struct {
+ ID int64
+ OriginalAuthor string
+ OriginalAuthorID int64 `xorm:"index"`
+ }
+
+ return x.Sync(new(Release))
+}
diff --git a/models/migrations/v1_10/v99.go b/models/migrations/v1_10/v99.go
new file mode 100644
index 0000000..ebe6597
--- /dev/null
+++ b/models/migrations/v1_10/v99.go
@@ -0,0 +1,38 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_10 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddTaskTable(x *xorm.Engine) error {
+ // TaskType defines task type
+ type TaskType int
+
+ // TaskStatus defines task status
+ type TaskStatus int
+
+ type Task struct {
+ ID int64
+ DoerID int64 `xorm:"index"` // operator
+ OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero
+ RepoID int64 `xorm:"index"`
+ Type TaskType
+ Status TaskStatus `xorm:"index"`
+ StartTime timeutil.TimeStamp
+ EndTime timeutil.TimeStamp
+ PayloadContent string `xorm:"TEXT"`
+ Errors string `xorm:"TEXT"` // if task failed, saved the error reason
+ Created timeutil.TimeStamp `xorm:"created"`
+ }
+
+ type Repository struct {
+ Status int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ return x.Sync(new(Task), new(Repository))
+}
diff --git a/models/migrations/v1_11/v102.go b/models/migrations/v1_11/v102.go
new file mode 100644
index 0000000..9358e4c
--- /dev/null
+++ b/models/migrations/v1_11/v102.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func DropColumnHeadUserNameOnPullRequest(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "pull_request", "head_user_name"); err != nil {
+ return err
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_11/v103.go b/models/migrations/v1_11/v103.go
new file mode 100644
index 0000000..53527da
--- /dev/null
+++ b/models/migrations/v1_11/v103.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddWhitelistDeployKeysToBranches(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ ID int64
+ WhitelistDeployKeys bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_11/v104.go b/models/migrations/v1_11/v104.go
new file mode 100644
index 0000000..3e8ee64
--- /dev/null
+++ b/models/migrations/v1_11/v104.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func RemoveLabelUneededCols(x *xorm.Engine) error {
+ // Make sure the columns exist before dropping them
+ type Label struct {
+ QueryString string
+ IsSelected bool
+ }
+ if err := x.Sync(new(Label)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "label", "query_string"); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "label", "is_selected"); err != nil {
+ return err
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_11/v105.go b/models/migrations/v1_11/v105.go
new file mode 100644
index 0000000..b91340c
--- /dev/null
+++ b/models/migrations/v1_11/v105.go
@@ -0,0 +1,23 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddTeamIncludesAllRepositories(x *xorm.Engine) error {
+ type Team struct {
+ ID int64 `xorm:"pk autoincr"`
+ IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ if err := x.Sync(new(Team)); err != nil {
+ return err
+ }
+
+ _, err := x.Exec("UPDATE `team` SET `includes_all_repositories` = ? WHERE `name`=?",
+ true, "Owners")
+ return err
+}
diff --git a/models/migrations/v1_11/v106.go b/models/migrations/v1_11/v106.go
new file mode 100644
index 0000000..ecb11cd
--- /dev/null
+++ b/models/migrations/v1_11/v106.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+// RepoWatchMode specifies what kind of watch the user has on a repository
+type RepoWatchMode int8
+
+// Watch is connection request for receiving repository notification.
+type Watch struct {
+ ID int64 `xorm:"pk autoincr"`
+ Mode RepoWatchMode `xorm:"SMALLINT NOT NULL DEFAULT 1"`
+}
+
+func AddModeColumnToWatch(x *xorm.Engine) error {
+ if err := x.Sync(new(Watch)); err != nil {
+ return err
+ }
+ _, err := x.Exec("UPDATE `watch` SET `mode` = 1")
+ return err
+}
diff --git a/models/migrations/v1_11/v107.go b/models/migrations/v1_11/v107.go
new file mode 100644
index 0000000..f0bfe58
--- /dev/null
+++ b/models/migrations/v1_11/v107.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddTemplateToRepo(x *xorm.Engine) error {
+ type Repository struct {
+ IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ TemplateID int64 `xorm:"INDEX"`
+ }
+
+ return x.Sync(new(Repository))
+}
diff --git a/models/migrations/v1_11/v108.go b/models/migrations/v1_11/v108.go
new file mode 100644
index 0000000..a850962
--- /dev/null
+++ b/models/migrations/v1_11/v108.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddCommentIDOnNotification(x *xorm.Engine) error {
+ type Notification struct {
+ ID int64 `xorm:"pk autoincr"`
+ CommentID int64
+ }
+
+ return x.Sync(new(Notification))
+}
diff --git a/models/migrations/v1_11/v109.go b/models/migrations/v1_11/v109.go
new file mode 100644
index 0000000..ea565cc
--- /dev/null
+++ b/models/migrations/v1_11/v109.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddCanCreateOrgRepoColumnForTeam(x *xorm.Engine) error {
+ type Team struct {
+ CanCreateOrgRepo bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(Team))
+}
diff --git a/models/migrations/v1_11/v110.go b/models/migrations/v1_11/v110.go
new file mode 100644
index 0000000..fce9be8
--- /dev/null
+++ b/models/migrations/v1_11/v110.go
@@ -0,0 +1,26 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func ChangeReviewContentToText(x *xorm.Engine) error {
+ switch x.Dialect().URI().DBType {
+ case schemas.MYSQL:
+ _, err := x.Exec("ALTER TABLE review MODIFY COLUMN content TEXT")
+ return err
+ case schemas.ORACLE:
+ _, err := x.Exec("ALTER TABLE review MODIFY content TEXT")
+ return err
+ case schemas.POSTGRES:
+ _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TYPE TEXT")
+ return err
+ default:
+ // SQLite doesn't support ALTER COLUMN, and it seem to already make String to _TEXT_ default so no migration needed
+ return nil
+ }
+}
diff --git a/models/migrations/v1_11/v111.go b/models/migrations/v1_11/v111.go
new file mode 100644
index 0000000..cc3dc0d
--- /dev/null
+++ b/models/migrations/v1_11/v111.go
@@ -0,0 +1,437 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ CanPush bool `xorm:"NOT NULL DEFAULT false"`
+ EnableApprovalsWhitelist bool `xorm:"NOT NULL DEFAULT false"`
+ ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int
+
+ // Permissions
+ IsAdmin bool
+ // IsRestricted bool `xorm:"NOT NULL DEFAULT false"` glitch: this column was added in v1_12/v121.go
+ // Visibility int `xorm:"NOT NULL DEFAULT 0"` glitch: this column was added in v1_12/v124.go
+ }
+
+ type Review struct {
+ ID int64 `xorm:"pk autoincr"`
+ Official bool `xorm:"NOT NULL DEFAULT false"`
+
+ ReviewerID int64 `xorm:"index"`
+ IssueID int64 `xorm:"index"`
+ }
+
+ if err := x.Sync(new(ProtectedBranch)); err != nil {
+ return err
+ }
+
+ if err := x.Sync(new(Review)); err != nil {
+ return err
+ }
+
+ const (
+ // ReviewTypeApprove approves changes
+ ReviewTypeApprove int = 1
+ // ReviewTypeReject gives feedback blocking merge
+ ReviewTypeReject int = 3
+
+ // VisibleTypePublic Visible for everyone
+ // VisibleTypePublic int = 0
+ // VisibleTypePrivate Visible only for organization's members
+ // VisibleTypePrivate int = 2
+
+ // unit.UnitTypeCode is unit type code
+ UnitTypeCode int = 1
+
+ // AccessModeNone no access
+ AccessModeNone int = 0
+ // AccessModeRead read access
+ AccessModeRead int = 1
+ // AccessModeWrite write access
+ AccessModeWrite int = 2
+ // AccessModeOwner owner access
+ AccessModeOwner int = 4
+ )
+
+ // Repository represents a git repository.
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+
+ IsPrivate bool `xorm:"INDEX"`
+ }
+
+ type PullRequest struct {
+ ID int64 `xorm:"pk autoincr"`
+
+ BaseRepoID int64 `xorm:"INDEX"`
+ BaseBranch string
+ }
+
+ // RepoUnit describes all units of a repository
+ type RepoUnit struct {
+ ID int64
+ RepoID int64 `xorm:"INDEX(s)"`
+ Type int `xorm:"INDEX(s)"`
+ }
+
+ type Permission struct {
+ AccessMode int
+ Units []*RepoUnit
+ UnitsMode map[int]int
+ }
+
+ type TeamUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ UID int64 `xorm:"UNIQUE(s)"`
+ }
+
+ type Collaboration struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Mode int `xorm:"DEFAULT 2 NOT NULL"`
+ }
+
+ type Access struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"UNIQUE(s)"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Mode int
+ }
+
+ type TeamUnit struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ Type int `xorm:"UNIQUE(s)"`
+ }
+
+ // Team represents a organization team.
+ type Team struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ Authorize int
+ }
+
+ // getUserRepoPermission static function based on issues_model.IsOfficialReviewer at 5d78792385
+ getUserRepoPermission := func(sess *xorm.Session, repo *Repository, user *User) (Permission, error) {
+ var perm Permission
+
+ repoOwner := new(User)
+ has, err := sess.ID(repo.OwnerID).Get(repoOwner)
+ if err != nil || !has {
+ return perm, err
+ }
+
+ // Prevent strangers from checking out public repo of private organization
+ // Allow user if they are collaborator of a repo within a private organization but not a member of the organization itself
+ hasOrgVisible := true
+ // Not SignedUser
+ if user == nil {
+ // hasOrgVisible = repoOwner.Visibility == VisibleTypePublic // VisibleTypePublic is the default
+ } else if !user.IsAdmin {
+ _, err := sess.
+ Where("uid=?", user.ID).
+ And("org_id=?", repoOwner.ID).
+ Table("org_user").
+ Exist()
+ if err != nil {
+ hasOrgVisible = false
+ }
+ // VisibleTypePublic is the default so the condition below is always false
+ // if (repoOwner.Visibility == VisibleTypePrivate) && !hasMemberWithUserID {
+ // hasOrgVisible = false
+ // }
+ }
+
+ isCollaborator, err := sess.Get(&Collaboration{RepoID: repo.ID, UserID: user.ID})
+ if err != nil {
+ return perm, err
+ }
+
+ if repoOwner.Type == 1 && !hasOrgVisible && !isCollaborator {
+ perm.AccessMode = AccessModeNone
+ return perm, err
+ }
+
+ var units []*RepoUnit
+ if err := sess.Where("repo_id = ?", repo.ID).Find(&units); err != nil {
+ return perm, err
+ }
+ perm.Units = units
+
+ // anonymous visit public repo
+ if user == nil {
+ perm.AccessMode = AccessModeRead
+ return perm, err
+ }
+
+ // Admin or the owner has super access to the repository
+ if user.IsAdmin || user.ID == repo.OwnerID {
+ perm.AccessMode = AccessModeOwner
+ return perm, err
+ }
+
+ accessLevel := func(user *User, repo *Repository) (int, error) {
+ mode := AccessModeNone
+ var userID int64
+ restricted := false
+
+ if user != nil {
+ userID = user.ID
+ restricted = false
+ }
+
+ if !restricted && !repo.IsPrivate {
+ mode = AccessModeRead
+ }
+
+ if userID == 0 {
+ return mode, nil
+ }
+
+ if userID == repo.OwnerID {
+ return AccessModeOwner, nil
+ }
+
+ a := &Access{UserID: userID, RepoID: repo.ID}
+ if has, err := sess.Get(a); !has || err != nil {
+ return mode, err
+ }
+ return a.Mode, nil
+ }
+
+ // plain user
+ perm.AccessMode, err = accessLevel(user, repo)
+ if err != nil {
+ return perm, err
+ }
+
+ // If Owner is no Org
+ if repoOwner.Type != 1 {
+ return perm, err
+ }
+
+ perm.UnitsMode = make(map[int]int)
+
+ // Collaborators on organization
+ if isCollaborator {
+ for _, u := range units {
+ perm.UnitsMode[u.Type] = perm.AccessMode
+ }
+ }
+
+ // get units mode from teams
+ var teams []*Team
+ err = sess.
+ Join("INNER", "team_user", "team_user.team_id = team.id").
+ Join("INNER", "team_repo", "team_repo.team_id = team.id").
+ Where("team.org_id = ?", repo.OwnerID).
+ And("team_user.uid=?", user.ID).
+ And("team_repo.repo_id=?", repo.ID).
+ Find(&teams)
+ if err != nil {
+ return perm, err
+ }
+
+ // if user in an owner team
+ for _, team := range teams {
+ if team.Authorize >= AccessModeOwner {
+ perm.AccessMode = AccessModeOwner
+ perm.UnitsMode = nil
+ return perm, err
+ }
+ }
+
+ for _, u := range units {
+ var found bool
+ for _, team := range teams {
+ var teamU []*TeamUnit
+ var unitEnabled bool
+ err = sess.Where("team_id = ?", team.ID).Find(&teamU)
+
+ for _, tu := range teamU {
+ if tu.Type == u.Type {
+ unitEnabled = true
+ break
+ }
+ }
+
+ if unitEnabled {
+ m := perm.UnitsMode[u.Type]
+ if m < team.Authorize {
+ perm.UnitsMode[u.Type] = team.Authorize
+ }
+ found = true
+ }
+ }
+
+ // for a public repo on an organization, a non-restricted user has read permission on non-team defined units.
+ if !found && !repo.IsPrivate {
+ if _, ok := perm.UnitsMode[u.Type]; !ok {
+ perm.UnitsMode[u.Type] = AccessModeRead
+ }
+ }
+ }
+
+ // remove no permission units
+ perm.Units = make([]*RepoUnit, 0, len(units))
+ for t := range perm.UnitsMode {
+ for _, u := range units {
+ if u.Type == t {
+ perm.Units = append(perm.Units, u)
+ }
+ }
+ }
+
+ return perm, err
+ }
+
+ // isOfficialReviewer static function based on 5d78792385
+ isOfficialReviewer := func(sess *xorm.Session, issueID int64, reviewer *User) (bool, error) {
+ pr := new(PullRequest)
+ has, err := sess.ID(issueID).Get(pr)
+ if err != nil {
+ return false, err
+ } else if !has {
+ return false, fmt.Errorf("PullRequest for issueID %d not exist", issueID)
+ }
+
+ baseRepo := new(Repository)
+ has, err = sess.ID(pr.BaseRepoID).Get(baseRepo)
+ if err != nil {
+ return false, err
+ } else if !has {
+ return false, fmt.Errorf("baseRepo with id %d not exist", pr.BaseRepoID)
+ }
+ protectedBranch := new(ProtectedBranch)
+ has, err = sess.Where("repo_id=? AND branch_name=?", baseRepo.ID, pr.BaseBranch).Get(protectedBranch)
+ if err != nil {
+ return false, err
+ }
+ if !has {
+ return false, nil
+ }
+
+ if !protectedBranch.EnableApprovalsWhitelist {
+ perm, err := getUserRepoPermission(sess, baseRepo, reviewer)
+ if err != nil {
+ return false, err
+ }
+ if perm.UnitsMode == nil {
+ for _, u := range perm.Units {
+ if u.Type == UnitTypeCode {
+ return AccessModeWrite <= perm.AccessMode, nil
+ }
+ }
+ return false, nil
+ }
+ return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil
+ }
+ for _, id := range protectedBranch.ApprovalsWhitelistUserIDs {
+ if id == reviewer.ID {
+ return true, nil
+ }
+ }
+
+ // isUserInTeams
+ return sess.Where("uid=?", reviewer.ID).In("team_id", protectedBranch.ApprovalsWhitelistTeamIDs).Exist(new(TeamUser))
+ }
+
+ if _, err := x.Exec("UPDATE `protected_branch` SET `enable_whitelist` = ? WHERE enable_whitelist IS NULL", false); err != nil {
+ return err
+ }
+ if _, err := x.Exec("UPDATE `protected_branch` SET `can_push` = `enable_whitelist`"); err != nil {
+ return err
+ }
+ if _, err := x.Exec("UPDATE `protected_branch` SET `enable_approvals_whitelist` = ? WHERE `required_approvals` > ?", true, 0); err != nil {
+ return err
+ }
+
+ var pageSize int64 = 20
+ qresult, err := x.QueryInterface("SELECT max(id) as max_id FROM issue")
+ if err != nil {
+ return err
+ }
+ var totalIssues int64
+ totalIssues, ok := qresult[0]["max_id"].(int64)
+ if !ok {
+ // If there are no issues at all we ignore it
+ return nil
+ }
+ totalPages := totalIssues / pageSize
+
+ executeBody := func(page, pageSize int64) error {
+ // Find latest review of each user in each pull request, and set official field if appropriate
+ reviews := []*Review{}
+
+ if err := x.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id > ? AND issue_id <= ? AND type in (?, ?) GROUP BY issue_id, reviewer_id)",
+ page*pageSize, (page+1)*pageSize, ReviewTypeApprove, ReviewTypeReject).
+ Find(&reviews); err != nil {
+ return err
+ }
+
+ if len(reviews) == 0 {
+ return nil
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ var updated int
+ for _, review := range reviews {
+ reviewer := new(User)
+ has, err := sess.ID(review.ReviewerID).Get(reviewer)
+ if err != nil || !has {
+ // Error might occur if user doesn't exist, ignore it.
+ continue
+ }
+
+ official, err := isOfficialReviewer(sess, review.IssueID, reviewer)
+ if err != nil {
+ // Branch might not be proteced or other error, ignore it.
+ continue
+ }
+ review.Official = official
+ updated++
+ if _, err := sess.ID(review.ID).Cols("official").Update(review); err != nil {
+ return err
+ }
+ }
+
+ if updated > 0 {
+ return sess.Commit()
+ }
+ return nil
+ }
+
+ var page int64
+ for page = 0; page <= totalPages; page++ {
+ if err := executeBody(page, pageSize); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_11/v112.go b/models/migrations/v1_11/v112.go
new file mode 100644
index 0000000..0857663
--- /dev/null
+++ b/models/migrations/v1_11/v112.go
@@ -0,0 +1,47 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func RemoveAttachmentMissedRepo(x *xorm.Engine) error {
+ type Attachment struct {
+ UUID string `xorm:"uuid"`
+ }
+ var start int
+ attachments := make([]*Attachment, 0, 50)
+ for {
+ err := x.Select("uuid").Where(builder.NotIn("release_id", builder.Select("id").From("`release`"))).
+ And("release_id > 0").
+ OrderBy("id").Limit(50, start).Find(&attachments)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < len(attachments); i++ {
+ uuid := attachments[i].UUID
+ if err = util.RemoveAll(filepath.Join(setting.Attachment.Storage.Path, uuid[0:1], uuid[1:2], uuid)); err != nil {
+ fmt.Printf("Error: %v", err) //nolint:forbidigo
+ }
+ }
+
+ if len(attachments) < 50 {
+ break
+ }
+ start += 50
+ attachments = attachments[:0]
+ }
+
+ _, err := x.Exec("DELETE FROM attachment WHERE release_id > 0 AND release_id NOT IN (SELECT id FROM `release`)")
+ return err
+}
diff --git a/models/migrations/v1_11/v113.go b/models/migrations/v1_11/v113.go
new file mode 100644
index 0000000..dea344a
--- /dev/null
+++ b/models/migrations/v1_11/v113.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func FeatureChangeTargetBranch(x *xorm.Engine) error {
+ type Comment struct {
+ OldRef string
+ NewRef string
+ }
+
+ if err := x.Sync(new(Comment)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_11/v114.go b/models/migrations/v1_11/v114.go
new file mode 100644
index 0000000..95adcee
--- /dev/null
+++ b/models/migrations/v1_11/v114.go
@@ -0,0 +1,50 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "net/url"
+
+ "xorm.io/xorm"
+)
+
+func SanitizeOriginalURL(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64
+ OriginalURL string `xorm:"VARCHAR(2048)"`
+ }
+
+ var last int
+ const batchSize = 50
+ for {
+ results := make([]Repository, 0, batchSize)
+ err := x.Where("original_url <> '' AND original_url IS NOT NULL").
+ And("original_service_type = 0 OR original_service_type IS NULL").
+ OrderBy("id").
+ Limit(batchSize, last).
+ Find(&results)
+ if err != nil {
+ return err
+ }
+ if len(results) == 0 {
+ break
+ }
+ last += len(results)
+
+ for _, res := range results {
+ u, err := url.Parse(res.OriginalURL)
+ if err != nil {
+ // it is ok to continue here, we only care about fixing URLs that we can read
+ continue
+ }
+ u.User = nil
+ originalURL := u.String()
+ _, err = x.Exec("UPDATE repository SET original_url = ? WHERE id = ?", originalURL, res.ID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/models/migrations/v1_11/v115.go b/models/migrations/v1_11/v115.go
new file mode 100644
index 0000000..8c631cf
--- /dev/null
+++ b/models/migrations/v1_11/v115.go
@@ -0,0 +1,159 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "time"
+
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+)
+
+func RenameExistingUserAvatarName(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ LowerName string `xorm:"UNIQUE NOT NULL"`
+ Avatar string
+ }
+
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+
+ count, err := x.Count(new(User))
+ if err != nil {
+ return err
+ }
+ log.Info("%d User Avatar(s) to migrate ...", count)
+
+ deleteList := make(container.Set[string])
+ start := 0
+ migrated := 0
+ for {
+ if err := sess.Begin(); err != nil {
+ return fmt.Errorf("session.Begin: %w", err)
+ }
+ users := make([]*User, 0, 50)
+ if err := sess.Table("user").Asc("id").Limit(50, start).Find(&users); err != nil {
+ return fmt.Errorf("select users from id [%d]: %w", start, err)
+ }
+ if len(users) == 0 {
+ _ = sess.Rollback()
+ break
+ }
+
+ log.Info("select users [%d - %d]", start, start+len(users))
+ start += 50
+
+ for _, user := range users {
+ oldAvatar := user.Avatar
+
+ if stat, err := os.Stat(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)); err != nil || !stat.Mode().IsRegular() {
+ if err == nil {
+ err = fmt.Errorf("Error: \"%s\" is not a regular file", oldAvatar)
+ }
+ log.Warn("[user: %s] os.Stat: %v", user.LowerName, err)
+ // avatar doesn't exist in the storage
+ // no need to move avatar and update database
+ // we can just skip this
+ continue
+ }
+
+ newAvatar, err := copyOldAvatarToNewLocation(user.ID, oldAvatar)
+ if err != nil {
+ _ = sess.Rollback()
+ return fmt.Errorf("[user: %s] %w", user.LowerName, err)
+ } else if newAvatar == oldAvatar {
+ continue
+ }
+
+ user.Avatar = newAvatar
+ if _, err := sess.ID(user.ID).Cols("avatar").Update(user); err != nil {
+ _ = sess.Rollback()
+ return fmt.Errorf("[user: %s] user table update: %w", user.LowerName, err)
+ }
+
+ deleteList.Add(filepath.Join(setting.Avatar.Storage.Path, oldAvatar))
+ migrated++
+ select {
+ case <-ticker.C:
+ log.Info(
+ "%d/%d (%2.0f%%) User Avatar(s) migrated (%d old avatars to be deleted) in %d batches. %d Remaining ...",
+ migrated,
+ count,
+ float64(migrated)/float64(count)*100,
+ len(deleteList),
+ int(math.Ceil(float64(migrated)/float64(50))),
+ count-int64(migrated))
+ default:
+ }
+ }
+ if err := sess.Commit(); err != nil {
+ _ = sess.Rollback()
+ return fmt.Errorf("commit session: %w", err)
+ }
+ }
+
+ deleteCount := len(deleteList)
+ log.Info("Deleting %d old avatars ...", deleteCount)
+ i := 0
+ for file := range deleteList {
+ if err := util.Remove(file); err != nil {
+ log.Warn("util.Remove: %v", err)
+ }
+ i++
+ select {
+ case <-ticker.C:
+ log.Info(
+ "%d/%d (%2.0f%%) Old User Avatar(s) deleted. %d Remaining ...",
+ i,
+ deleteCount,
+ float64(i)/float64(deleteCount)*100,
+ deleteCount-i)
+ default:
+ }
+ }
+
+ log.Info("Completed migrating %d User Avatar(s) and deleting %d Old Avatars", count, deleteCount)
+
+ return nil
+}
+
+// copyOldAvatarToNewLocation copies oldAvatar to newAvatarLocation
+// and returns newAvatar location
+func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error) {
+ fr, err := os.Open(filepath.Join(setting.Avatar.Storage.Path, oldAvatar))
+ if err != nil {
+ return "", fmt.Errorf("os.Open: %w", err)
+ }
+ defer fr.Close()
+
+ data, err := io.ReadAll(fr)
+ if err != nil {
+ return "", fmt.Errorf("io.ReadAll: %w", err)
+ }
+
+ newAvatar := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userID, md5.Sum(data)))))
+ if newAvatar == oldAvatar {
+ return newAvatar, nil
+ }
+
+ if err := os.WriteFile(filepath.Join(setting.Avatar.Storage.Path, newAvatar), data, 0o666); err != nil {
+ return "", fmt.Errorf("os.WriteFile: %w", err)
+ }
+
+ return newAvatar, nil
+}
diff --git a/models/migrations/v1_11/v116.go b/models/migrations/v1_11/v116.go
new file mode 100644
index 0000000..85aa76c
--- /dev/null
+++ b/models/migrations/v1_11/v116.go
@@ -0,0 +1,32 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_11 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func ExtendTrackedTimes(x *xorm.Engine) error {
+ type TrackedTime struct {
+ Time int64 `xorm:"NOT NULL"`
+ Deleted bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if _, err := sess.Exec("DELETE FROM tracked_time WHERE time IS NULL"); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(TrackedTime)); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_12/v117.go b/models/migrations/v1_12/v117.go
new file mode 100644
index 0000000..8eadcde
--- /dev/null
+++ b/models/migrations/v1_12/v117.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddBlockOnRejectedReviews(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ BlockOnRejectedReviews bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_12/v118.go b/models/migrations/v1_12/v118.go
new file mode 100644
index 0000000..eb022dc
--- /dev/null
+++ b/models/migrations/v1_12/v118.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddReviewCommitAndStale(x *xorm.Engine) error {
+ type Review struct {
+ CommitID string `xorm:"VARCHAR(40)"`
+ Stale bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ type ProtectedBranch struct {
+ DismissStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ // Old reviews will have commit ID set to "" and not stale
+ if err := x.Sync(new(Review)); err != nil {
+ return err
+ }
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_12/v119.go b/models/migrations/v1_12/v119.go
new file mode 100644
index 0000000..60bfe6a
--- /dev/null
+++ b/models/migrations/v1_12/v119.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func FixMigratedRepositoryServiceType(x *xorm.Engine) error {
+ // structs.GithubService:
+ // GithubService = 2
+ _, err := x.Exec("UPDATE repository SET original_service_type = ? WHERE original_url LIKE 'https://github.com/%'", 2)
+ return err
+}
diff --git a/models/migrations/v1_12/v120.go b/models/migrations/v1_12/v120.go
new file mode 100644
index 0000000..3f7ed8d
--- /dev/null
+++ b/models/migrations/v1_12/v120.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddOwnerNameOnRepository(x *xorm.Engine) error {
+ type Repository struct {
+ OwnerName string
+ }
+ if err := x.Sync(new(Repository)); err != nil {
+ return err
+ }
+ _, err := x.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)")
+ return err
+}
diff --git a/models/migrations/v1_12/v121.go b/models/migrations/v1_12/v121.go
new file mode 100644
index 0000000..175ec91
--- /dev/null
+++ b/models/migrations/v1_12/v121.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import "xorm.io/xorm"
+
+func AddIsRestricted(x *xorm.Engine) error {
+ // User see models/user.go
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ IsRestricted bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(User))
+}
diff --git a/models/migrations/v1_12/v122.go b/models/migrations/v1_12/v122.go
new file mode 100644
index 0000000..6e31d86
--- /dev/null
+++ b/models/migrations/v1_12/v122.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddRequireSignedCommits(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_12/v123.go b/models/migrations/v1_12/v123.go
new file mode 100644
index 0000000..b0c3af0
--- /dev/null
+++ b/models/migrations/v1_12/v123.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddReactionOriginals(x *xorm.Engine) error {
+ type Reaction struct {
+ OriginalAuthorID int64 `xorm:"INDEX NOT NULL DEFAULT(0)"`
+ OriginalAuthor string
+ }
+
+ return x.Sync(new(Reaction))
+}
diff --git a/models/migrations/v1_12/v124.go b/models/migrations/v1_12/v124.go
new file mode 100644
index 0000000..d2ba03f
--- /dev/null
+++ b/models/migrations/v1_12/v124.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddUserRepoMissingColumns(x *xorm.Engine) error {
+ type VisibleType int
+ type User struct {
+ PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'pbkdf2'"`
+ Visibility VisibleType `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ type Repository struct {
+ IsArchived bool `xorm:"INDEX"`
+ Topics []string `xorm:"TEXT JSON"`
+ }
+
+ return x.Sync(new(User), new(Repository))
+}
diff --git a/models/migrations/v1_12/v125.go b/models/migrations/v1_12/v125.go
new file mode 100644
index 0000000..ec4ffaa
--- /dev/null
+++ b/models/migrations/v1_12/v125.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddReviewMigrateInfo(x *xorm.Engine) error {
+ type Review struct {
+ OriginalAuthor string
+ OriginalAuthorID int64
+ }
+
+ if err := x.Sync(new(Review)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v126.go b/models/migrations/v1_12/v126.go
new file mode 100644
index 0000000..ca9ec3a
--- /dev/null
+++ b/models/migrations/v1_12/v126.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func FixTopicRepositoryCount(x *xorm.Engine) error {
+ _, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_topic`"))
+ if err != nil {
+ return err
+ }
+
+ _, err = x.Exec(builder.Update(
+ builder.Eq{
+ "`repo_count`": builder.Select("count(*)").From("`repo_topic`").Where(builder.Eq{
+ "`repo_topic`.`topic_id`": builder.Expr("`topic`.`id`"),
+ }),
+ }).From("`topic`").Where(builder.Eq{"'1'": "1"}))
+ return err
+}
diff --git a/models/migrations/v1_12/v127.go b/models/migrations/v1_12/v127.go
new file mode 100644
index 0000000..00e391d
--- /dev/null
+++ b/models/migrations/v1_12/v127.go
@@ -0,0 +1,44 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddLanguageStats(x *xorm.Engine) error {
+ // LanguageStat see models/repo_language_stats.go
+ type LanguageStat struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CommitID string
+ IsPrimary bool
+ Language string `xorm:"VARCHAR(30) UNIQUE(s) INDEX NOT NULL"`
+ Percentage float32 `xorm:"NUMERIC(5,2) NOT NULL DEFAULT 0"`
+ Color string `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+ }
+
+ type RepoIndexerType int
+
+ // RepoIndexerStatus see models/repo_stats_indexer.go
+ type RepoIndexerStatus struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX(s)"`
+ CommitSha string `xorm:"VARCHAR(40)"`
+ IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(LanguageStat)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ if err := x.Sync(new(RepoIndexerStatus)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v128.go b/models/migrations/v1_12/v128.go
new file mode 100644
index 0000000..6eea133
--- /dev/null
+++ b/models/migrations/v1_12/v128.go
@@ -0,0 +1,127 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+ "math"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func FixMergeBase(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ OwnerName string
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ }
+
+ type PullRequest struct {
+ ID int64 `xorm:"pk autoincr"`
+ Index int64
+ HeadRepoID int64 `xorm:"INDEX"`
+ BaseRepoID int64 `xorm:"INDEX"`
+ HeadBranch string
+ BaseBranch string
+ MergeBase string `xorm:"VARCHAR(40)"`
+
+ HasMerged bool `xorm:"INDEX"`
+ MergedCommitID string `xorm:"VARCHAR(40)"`
+ }
+
+ limit := setting.Database.IterateBufferSize
+ if limit <= 0 {
+ limit = 50
+ }
+
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+
+ count, err := x.Count(new(PullRequest))
+ if err != nil {
+ return err
+ }
+ log.Info("%d Pull Request(s) to migrate ...", count)
+
+ i := 0
+ start := 0
+ for {
+ prs := make([]PullRequest, 0, 50)
+ if err := x.Limit(limit, start).Asc("id").Find(&prs); err != nil {
+ return fmt.Errorf("Find: %w", err)
+ }
+ if len(prs) == 0 {
+ break
+ }
+
+ start += 50
+ for _, pr := range prs {
+ baseRepo := &Repository{ID: pr.BaseRepoID}
+ has, err := x.Table("repository").Get(baseRepo)
+ if err != nil {
+ return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err)
+ }
+ if !has {
+ log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID)
+ continue
+ }
+ userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName))
+ repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git")
+
+ gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index)
+
+ if !pr.HasMerged {
+ var err error
+ pr.MergeBase, _, err = git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(pr.BaseBranch, gitRefName).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ var err2 error
+ pr.MergeBase, _, err2 = git.NewCommand(git.DefaultContext, "rev-parse").AddDynamicArguments(git.BranchPrefix + pr.BaseBranch).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err2 != nil {
+ log.Error("Unable to get merge base for PR ID %d, Index %d in %s/%s. Error: %v & %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err, err2)
+ continue
+ }
+ }
+ } else {
+ parentsString, _, err := git.NewCommand(git.DefaultContext, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ log.Warn("Unable to get parents for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err)
+ continue
+ }
+ parents := strings.Split(strings.TrimSpace(parentsString), " ")
+ if len(parents) < 2 {
+ continue
+ }
+
+ refs := append([]string{}, parents[1:]...)
+ refs = append(refs, gitRefName)
+ cmd := git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(refs...)
+
+ pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ log.Error("Unable to get merge base for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err)
+ continue
+ }
+ }
+ pr.MergeBase = strings.TrimSpace(pr.MergeBase)
+ x.ID(pr.ID).Cols("merge_base").Update(pr)
+ i++
+ select {
+ case <-ticker.C:
+ log.Info("%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", i, count, float64(i)/float64(count)*100, int(math.Ceil(float64(i)/float64(limit))), count-int64(i))
+ default:
+ }
+ }
+ }
+ log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(i)/float64(limit))))
+ return nil
+}
diff --git a/models/migrations/v1_12/v129.go b/models/migrations/v1_12/v129.go
new file mode 100644
index 0000000..cf22824
--- /dev/null
+++ b/models/migrations/v1_12/v129.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func PurgeUnusedDependencies(x *xorm.Engine) error {
+ if _, err := x.Exec("DELETE FROM issue_dependency WHERE issue_id NOT IN (SELECT id FROM issue)"); err != nil {
+ return err
+ }
+ _, err := x.Exec("DELETE FROM issue_dependency WHERE dependency_id NOT IN (SELECT id FROM issue)")
+ return err
+}
diff --git a/models/migrations/v1_12/v130.go b/models/migrations/v1_12/v130.go
new file mode 100644
index 0000000..391810c
--- /dev/null
+++ b/models/migrations/v1_12/v130.go
@@ -0,0 +1,111 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func ExpandWebhooks(x *xorm.Engine) error {
+ type HookEvents struct {
+ Create bool `json:"create"`
+ Delete bool `json:"delete"`
+ Fork bool `json:"fork"`
+ Issues bool `json:"issues"`
+ IssueAssign bool `json:"issue_assign"`
+ IssueLabel bool `json:"issue_label"`
+ IssueMilestone bool `json:"issue_milestone"`
+ IssueComment bool `json:"issue_comment"`
+ Push bool `json:"push"`
+ PullRequest bool `json:"pull_request"`
+ PullRequestAssign bool `json:"pull_request_assign"`
+ PullRequestLabel bool `json:"pull_request_label"`
+ PullRequestMilestone bool `json:"pull_request_milestone"`
+ PullRequestComment bool `json:"pull_request_comment"`
+ PullRequestReview bool `json:"pull_request_review"`
+ PullRequestSync bool `json:"pull_request_sync"`
+ Repository bool `json:"repository"`
+ Release bool `json:"release"`
+ }
+
+ type HookEvent struct {
+ PushOnly bool `json:"push_only"`
+ SendEverything bool `json:"send_everything"`
+ ChooseEvents bool `json:"choose_events"`
+ BranchFilter string `json:"branch_filter"`
+
+ HookEvents `json:"events"`
+ }
+
+ type Webhook struct {
+ ID int64
+ Events string
+ }
+
+ var bytes []byte
+ var last int
+ batchSize := setting.Database.IterateBufferSize
+ sess := x.NewSession()
+ defer sess.Close()
+ for {
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ results := make([]Webhook, 0, batchSize)
+ err := x.OrderBy("id").
+ Limit(batchSize, last).
+ Find(&results)
+ if err != nil {
+ return err
+ }
+ if len(results) == 0 {
+ break
+ }
+ last += len(results)
+
+ for _, res := range results {
+ var events HookEvent
+ if err = json.Unmarshal([]byte(res.Events), &events); err != nil {
+ return err
+ }
+
+ if !events.ChooseEvents {
+ continue
+ }
+
+ if events.Issues {
+ events.IssueAssign = true
+ events.IssueLabel = true
+ events.IssueMilestone = true
+ events.IssueComment = true
+ }
+
+ if events.PullRequest {
+ events.PullRequestAssign = true
+ events.PullRequestLabel = true
+ events.PullRequestMilestone = true
+ events.PullRequestComment = true
+ events.PullRequestReview = true
+ events.PullRequestSync = true
+ }
+
+ if bytes, err = json.Marshal(&events); err != nil {
+ return err
+ }
+
+ _, err = sess.Exec("UPDATE webhook SET events = ? WHERE id = ?", string(bytes), res.ID)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v131.go b/models/migrations/v1_12/v131.go
new file mode 100644
index 0000000..5184bc3
--- /dev/null
+++ b/models/migrations/v1_12/v131.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddSystemWebhookColumn(x *xorm.Engine) error {
+ type Webhook struct {
+ IsSystemWebhook bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ if err := x.Sync(new(Webhook)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v132.go b/models/migrations/v1_12/v132.go
new file mode 100644
index 0000000..3b2b28f
--- /dev/null
+++ b/models/migrations/v1_12/v132.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddBranchProtectionProtectedFilesColumn(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ ProtectedFilePatterns string `xorm:"TEXT"`
+ }
+
+ if err := x.Sync(new(ProtectedBranch)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v133.go b/models/migrations/v1_12/v133.go
new file mode 100644
index 0000000..c9087fc
--- /dev/null
+++ b/models/migrations/v1_12/v133.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import "xorm.io/xorm"
+
+func AddEmailHashTable(x *xorm.Engine) error {
+ // EmailHash represents a pre-generated hash map
+ type EmailHash struct {
+ Hash string `xorm:"pk varchar(32)"`
+ Email string `xorm:"UNIQUE NOT NULL"`
+ }
+ return x.Sync(new(EmailHash))
+}
diff --git a/models/migrations/v1_12/v134.go b/models/migrations/v1_12/v134.go
new file mode 100644
index 0000000..23c2916
--- /dev/null
+++ b/models/migrations/v1_12/v134.go
@@ -0,0 +1,115 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+ "math"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func RefixMergeBase(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ OwnerName string
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ }
+
+ type PullRequest struct {
+ ID int64 `xorm:"pk autoincr"`
+ Index int64
+ HeadRepoID int64 `xorm:"INDEX"`
+ BaseRepoID int64 `xorm:"INDEX"`
+ HeadBranch string
+ BaseBranch string
+ MergeBase string `xorm:"VARCHAR(40)"`
+
+ HasMerged bool `xorm:"INDEX"`
+ MergedCommitID string `xorm:"VARCHAR(40)"`
+ }
+
+ limit := setting.Database.IterateBufferSize
+ if limit <= 0 {
+ limit = 50
+ }
+
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+ count, err := x.Where("has_merged = ?", true).Count(new(PullRequest))
+ if err != nil {
+ return err
+ }
+ log.Info("%d Merged Pull Request(s) to migrate ...", count)
+
+ i := 0
+ start := 0
+ for {
+ prs := make([]PullRequest, 0, 50)
+ if err := x.Limit(limit, start).Asc("id").Where("has_merged = ?", true).Find(&prs); err != nil {
+ return fmt.Errorf("Find: %w", err)
+ }
+ if len(prs) == 0 {
+ break
+ }
+
+ start += 50
+ for _, pr := range prs {
+ baseRepo := &Repository{ID: pr.BaseRepoID}
+ has, err := x.Table("repository").Get(baseRepo)
+ if err != nil {
+ return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err)
+ }
+ if !has {
+ log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID)
+ continue
+ }
+ userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName))
+ repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git")
+
+ gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index)
+
+ parentsString, _, err := git.NewCommand(git.DefaultContext, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ log.Warn("Unable to get parents for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err)
+ continue
+ }
+ parents := strings.Split(strings.TrimSpace(parentsString), " ")
+ if len(parents) < 3 {
+ continue
+ }
+
+ // we should recalculate
+ refs := append([]string{}, parents[1:]...)
+ refs = append(refs, gitRefName)
+ cmd := git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(refs...)
+
+ pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ log.Error("Unable to get merge base for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err)
+ continue
+ }
+ pr.MergeBase = strings.TrimSpace(pr.MergeBase)
+ x.ID(pr.ID).Cols("merge_base").Update(pr)
+ i++
+ select {
+ case <-ticker.C:
+ log.Info("%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", i, count, float64(i)/float64(count)*100, int(math.Ceil(float64(i)/float64(limit))), count-int64(i))
+ default:
+ }
+ }
+ }
+
+ log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(i)/float64(limit))))
+ return nil
+}
diff --git a/models/migrations/v1_12/v135.go b/models/migrations/v1_12/v135.go
new file mode 100644
index 0000000..8898011
--- /dev/null
+++ b/models/migrations/v1_12/v135.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddOrgIDLabelColumn(x *xorm.Engine) error {
+ type Label struct {
+ OrgID int64 `xorm:"INDEX"`
+ }
+
+ if err := x.Sync(new(Label)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v136.go b/models/migrations/v1_12/v136.go
new file mode 100644
index 0000000..d91ff92
--- /dev/null
+++ b/models/migrations/v1_12/v136.go
@@ -0,0 +1,125 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+ "math"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AddCommitDivergenceToPulls(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ OwnerName string
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ }
+
+ type PullRequest struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX"`
+ Index int64
+
+ CommitsAhead int
+ CommitsBehind int
+
+ BaseRepoID int64 `xorm:"INDEX"`
+ BaseBranch string
+
+ HasMerged bool `xorm:"INDEX"`
+ MergedCommitID string `xorm:"VARCHAR(40)"`
+ }
+
+ if err := x.Sync(new(PullRequest)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ last := 0
+ migrated := 0
+
+ batchSize := setting.Database.IterateBufferSize
+ sess := x.NewSession()
+ defer sess.Close()
+
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+ count, err := sess.Where("has_merged = ?", false).Count(new(PullRequest))
+ if err != nil {
+ return err
+ }
+ log.Info("%d Unmerged Pull Request(s) to migrate ...", count)
+
+ for {
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ results := make([]*PullRequest, 0, batchSize)
+ err := sess.Where("has_merged = ?", false).OrderBy("id").Limit(batchSize, last).Find(&results)
+ if err != nil {
+ return err
+ }
+ if len(results) == 0 {
+ break
+ }
+ last += batchSize
+
+ for _, pr := range results {
+ baseRepo := &Repository{ID: pr.BaseRepoID}
+ has, err := x.Table("repository").Get(baseRepo)
+ if err != nil {
+ return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err)
+ }
+ if !has {
+ log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID)
+ continue
+ }
+ userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName))
+ repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git")
+
+ gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index)
+
+ divergence, err := git.GetDivergingCommits(graceful.GetManager().HammerContext(), repoPath, pr.BaseBranch, gitRefName)
+ if err != nil {
+ log.Warn("Could not recalculate Divergence for pull: %d", pr.ID)
+ pr.CommitsAhead = 0
+ pr.CommitsBehind = 0
+ }
+ pr.CommitsAhead = divergence.Ahead
+ pr.CommitsBehind = divergence.Behind
+
+ if _, err = sess.ID(pr.ID).Cols("commits_ahead", "commits_behind").Update(pr); err != nil {
+ return fmt.Errorf("Update Cols: %w", err)
+ }
+ migrated++
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ select {
+ case <-ticker.C:
+ log.Info(
+ "%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...",
+ migrated,
+ count,
+ float64(migrated)/float64(count)*100,
+ int(math.Ceil(float64(migrated)/float64(batchSize))),
+ count-int64(migrated))
+ default:
+ }
+ }
+ log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(migrated)/float64(batchSize))))
+ return nil
+}
diff --git a/models/migrations/v1_12/v137.go b/models/migrations/v1_12/v137.go
new file mode 100644
index 0000000..0d86b72
--- /dev/null
+++ b/models/migrations/v1_12/v137.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddBlockOnOutdatedBranch(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ BlockOnOutdatedBranch bool `xorm:"NOT NULL DEFAULT false"`
+ }
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_12/v138.go b/models/migrations/v1_12/v138.go
new file mode 100644
index 0000000..8c8d353
--- /dev/null
+++ b/models/migrations/v1_12/v138.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddResolveDoerIDCommentColumn(x *xorm.Engine) error {
+ type Comment struct {
+ ResolveDoerID int64
+ }
+
+ if err := x.Sync(new(Comment)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_12/v139.go b/models/migrations/v1_12/v139.go
new file mode 100644
index 0000000..5b65769
--- /dev/null
+++ b/models/migrations/v1_12/v139.go
@@ -0,0 +1,23 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_12 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func PrependRefsHeadsToIssueRefs(x *xorm.Engine) error {
+ var query string
+
+ if setting.Database.Type.IsMySQL() {
+ query = "UPDATE `issue` SET `ref` = CONCAT('refs/heads/', `ref`) WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%';"
+ } else {
+ query = "UPDATE `issue` SET `ref` = 'refs/heads/' || `ref` WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%'"
+ }
+
+ _, err := x.Exec(query)
+ return err
+}
diff --git a/models/migrations/v1_13/v140.go b/models/migrations/v1_13/v140.go
new file mode 100644
index 0000000..2d33370
--- /dev/null
+++ b/models/migrations/v1_13/v140.go
@@ -0,0 +1,56 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func FixLanguageStatsToSaveSize(x *xorm.Engine) error {
+ // LanguageStat see models/repo_language_stats.go
+ type LanguageStat struct {
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ // RepoIndexerType specifies the repository indexer type
+ type RepoIndexerType int
+
+ const (
+ // RepoIndexerTypeCode code indexer - 0
+ RepoIndexerTypeCode RepoIndexerType = iota //nolint:unused
+ // RepoIndexerTypeStats repository stats indexer - 1
+ RepoIndexerTypeStats
+ )
+
+ // RepoIndexerStatus see models/repo_indexer.go
+ type RepoIndexerStatus struct {
+ IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(LanguageStat)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ x.Delete(&RepoIndexerStatus{IndexerType: RepoIndexerTypeStats})
+
+ // Delete language stat statuses
+ truncExpr := "TRUNCATE TABLE"
+ if setting.Database.Type.IsSQLite3() {
+ truncExpr = "DELETE FROM"
+ }
+
+ // Delete language stats
+ if _, err := x.Exec(fmt.Sprintf("%s language_stat", truncExpr)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ return base.DropTableColumns(sess, "language_stat", "percentage")
+}
diff --git a/models/migrations/v1_13/v141.go b/models/migrations/v1_13/v141.go
new file mode 100644
index 0000000..ae211e0
--- /dev/null
+++ b/models/migrations/v1_13/v141.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddKeepActivityPrivateUserColumn(x *xorm.Engine) error {
+ type User struct {
+ KeepActivityPrivate bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ if err := x.Sync(new(User)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_13/v142.go b/models/migrations/v1_13/v142.go
new file mode 100644
index 0000000..7c7c01a
--- /dev/null
+++ b/models/migrations/v1_13/v142.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func SetIsArchivedToFalse(x *xorm.Engine) error {
+ type Repository struct {
+ IsArchived bool `xorm:"INDEX"`
+ }
+ count, err := x.Where(builder.IsNull{"is_archived"}).Cols("is_archived").Update(&Repository{
+ IsArchived: false,
+ })
+ if err == nil {
+ log.Debug("Updated %d repositories with is_archived IS NULL", count)
+ }
+ return err
+}
diff --git a/models/migrations/v1_13/v143.go b/models/migrations/v1_13/v143.go
new file mode 100644
index 0000000..885768d
--- /dev/null
+++ b/models/migrations/v1_13/v143.go
@@ -0,0 +1,51 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/xorm"
+)
+
+func RecalculateStars(x *xorm.Engine) (err error) {
+ // because of issue https://github.com/go-gitea/gitea/issues/11949,
+ // recalculate Stars number for all users to fully fix it.
+
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ }
+
+ const batchSize = 100
+ sess := x.NewSession()
+ defer sess.Close()
+
+ for start := 0; ; start += batchSize {
+ users := make([]User, 0, batchSize)
+ if err := sess.Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil {
+ return err
+ }
+ if len(users) == 0 {
+ break
+ }
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ for _, user := range users {
+ if _, err := sess.Exec("UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil {
+ return err
+ }
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ }
+
+ log.Debug("recalculate Stars number for all user finished")
+
+ return err
+}
diff --git a/models/migrations/v1_13/v144.go b/models/migrations/v1_13/v144.go
new file mode 100644
index 0000000..f5a0bc5
--- /dev/null
+++ b/models/migrations/v1_13/v144.go
@@ -0,0 +1,25 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func UpdateMatrixWebhookHTTPMethod(x *xorm.Engine) error {
+ matrixHookTaskType := 9 // value comes from the models package
+ type Webhook struct {
+ HTTPMethod string
+ }
+
+ cond := builder.Eq{"hook_task_type": matrixHookTaskType}.And(builder.Neq{"http_method": "PUT"})
+ count, err := x.Where(cond).Cols("http_method").Update(&Webhook{HTTPMethod: "PUT"})
+ if err == nil {
+ log.Debug("Updated %d Matrix webhooks with http_method 'PUT'", count)
+ }
+ return err
+}
diff --git a/models/migrations/v1_13/v145.go b/models/migrations/v1_13/v145.go
new file mode 100644
index 0000000..5b38f1c
--- /dev/null
+++ b/models/migrations/v1_13/v145.go
@@ -0,0 +1,55 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func IncreaseLanguageField(x *xorm.Engine) error {
+ type LanguageStat struct {
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Language string `xorm:"VARCHAR(50) UNIQUE(s) INDEX NOT NULL"`
+ }
+
+ if err := x.Sync(new(LanguageStat)); err != nil {
+ return err
+ }
+
+ if setting.Database.Type.IsSQLite3() {
+ // SQLite maps VARCHAR to TEXT without size so we're done
+ return nil
+ }
+
+ // need to get the correct type for the new column
+ inferredTable, err := x.TableInfo(new(LanguageStat))
+ if err != nil {
+ return err
+ }
+ column := inferredTable.GetColumn("language")
+ sqlType := x.Dialect().SQLType(column)
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ switch {
+ case setting.Database.Type.IsMySQL():
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat MODIFY COLUMN language %s", sqlType)); err != nil {
+ return err
+ }
+ case setting.Database.Type.IsPostgreSQL():
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat ALTER COLUMN language TYPE %s", sqlType)); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_13/v146.go b/models/migrations/v1_13/v146.go
new file mode 100644
index 0000000..7d9a878
--- /dev/null
+++ b/models/migrations/v1_13/v146.go
@@ -0,0 +1,83 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddProjectsInfo(x *xorm.Engine) error {
+ // Create new tables
+ type (
+ ProjectType uint8
+ ProjectBoardType uint8
+ )
+
+ type Project struct {
+ ID int64 `xorm:"pk autoincr"`
+ Title string `xorm:"INDEX NOT NULL"`
+ Description string `xorm:"TEXT"`
+ RepoID int64 `xorm:"INDEX"`
+ CreatorID int64 `xorm:"NOT NULL"`
+ IsClosed bool `xorm:"INDEX"`
+
+ BoardType ProjectBoardType
+ Type ProjectType
+
+ ClosedDateUnix timeutil.TimeStamp
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ if err := x.Sync(new(Project)); err != nil {
+ return err
+ }
+
+ type Comment struct {
+ OldProjectID int64
+ ProjectID int64
+ }
+
+ if err := x.Sync(new(Comment)); err != nil {
+ return err
+ }
+
+ type Repository struct {
+ ID int64
+ NumProjects int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(Repository)); err != nil {
+ return err
+ }
+
+ // ProjectIssue saves relation from issue to a project
+ type ProjectIssue struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX"`
+ ProjectID int64 `xorm:"INDEX"`
+ ProjectBoardID int64 `xorm:"INDEX"`
+ }
+
+ if err := x.Sync(new(ProjectIssue)); err != nil {
+ return err
+ }
+
+ type ProjectBoard struct {
+ ID int64 `xorm:"pk autoincr"`
+ Title string
+ Default bool `xorm:"NOT NULL DEFAULT false"`
+
+ ProjectID int64 `xorm:"INDEX NOT NULL"`
+ CreatorID int64 `xorm:"NOT NULL"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ return x.Sync(new(ProjectBoard))
+}
diff --git a/models/migrations/v1_13/v147.go b/models/migrations/v1_13/v147.go
new file mode 100644
index 0000000..510ef39
--- /dev/null
+++ b/models/migrations/v1_13/v147.go
@@ -0,0 +1,153 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateReviewsForCodeComments(x *xorm.Engine) error {
+ // Review
+ type Review struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int
+ ReviewerID int64 `xorm:"index"`
+ OriginalAuthor string
+ OriginalAuthorID int64
+ IssueID int64 `xorm:"index"`
+ Content string `xorm:"TEXT"`
+ // Official is a review made by an assigned approver (counts towards approval)
+ Official bool `xorm:"NOT NULL DEFAULT false"`
+ CommitID string `xorm:"VARCHAR(40)"`
+ Stale bool `xorm:"NOT NULL DEFAULT false"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ const ReviewTypeComment = 2
+
+ // Comment represents a comment in commit and issue page.
+ type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int `xorm:"INDEX"`
+ PosterID int64 `xorm:"INDEX"`
+ OriginalAuthor string
+ OriginalAuthorID int64
+ IssueID int64 `xorm:"INDEX"`
+ LabelID int64
+ OldProjectID int64
+ ProjectID int64
+ OldMilestoneID int64
+ MilestoneID int64
+ AssigneeID int64
+ RemovedAssignee bool
+ ResolveDoerID int64
+ OldTitle string
+ NewTitle string
+ OldRef string
+ NewRef string
+ DependentIssueID int64
+
+ CommitID int64
+ Line int64 // - previous line / + proposed line
+ TreePath string
+ Content string `xorm:"TEXT"`
+
+ // Path represents the 4 lines of code cemented by this comment
+ PatchQuoted string `xorm:"TEXT patch"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+
+ // Reference issue in commit message
+ CommitSHA string `xorm:"VARCHAR(40)"`
+
+ ReviewID int64 `xorm:"index"`
+ Invalidated bool
+
+ // Reference an issue or pull from another comment, issue or PR
+ // All information is about the origin of the reference
+ RefRepoID int64 `xorm:"index"` // Repo where the referencing
+ RefIssueID int64 `xorm:"index"`
+ RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's)
+ RefAction int `xorm:"SMALLINT"` // What happens if RefIssueID resolves
+ RefIsPull bool
+ }
+
+ if err := x.Sync(new(Review), new(Comment)); err != nil {
+ return err
+ }
+
+ updateComment := func(comments []*Comment) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ for _, comment := range comments {
+ review := &Review{
+ Type: ReviewTypeComment,
+ ReviewerID: comment.PosterID,
+ IssueID: comment.IssueID,
+ Official: false,
+ CommitID: comment.CommitSHA,
+ Stale: comment.Invalidated,
+ OriginalAuthor: comment.OriginalAuthor,
+ OriginalAuthorID: comment.OriginalAuthorID,
+ CreatedUnix: comment.CreatedUnix,
+ UpdatedUnix: comment.CreatedUnix,
+ }
+ if _, err := sess.NoAutoTime().Insert(review); err != nil {
+ return err
+ }
+
+ reviewComment := &Comment{
+ Type: 22,
+ PosterID: comment.PosterID,
+ Content: "",
+ IssueID: comment.IssueID,
+ ReviewID: review.ID,
+ OriginalAuthor: comment.OriginalAuthor,
+ OriginalAuthorID: comment.OriginalAuthorID,
+ CreatedUnix: comment.CreatedUnix,
+ UpdatedUnix: comment.CreatedUnix,
+ }
+ if _, err := sess.NoAutoTime().Insert(reviewComment); err != nil {
+ return err
+ }
+
+ comment.ReviewID = review.ID
+ if _, err := sess.ID(comment.ID).Cols("review_id").NoAutoTime().Update(comment); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+ }
+
+ start := 0
+ batchSize := 100
+ for {
+ comments := make([]*Comment, 0, batchSize)
+ if err := x.Where("review_id = 0 and type = 21").Limit(batchSize, start).Find(&comments); err != nil {
+ return err
+ }
+
+ if err := updateComment(comments); err != nil {
+ return err
+ }
+
+ start += len(comments)
+
+ if len(comments) < batchSize {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_13/v148.go b/models/migrations/v1_13/v148.go
new file mode 100644
index 0000000..7bb8ab7
--- /dev/null
+++ b/models/migrations/v1_13/v148.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func PurgeInvalidDependenciesComments(x *xorm.Engine) error {
+ _, err := x.Exec("DELETE FROM comment WHERE dependent_issue_id != 0 AND dependent_issue_id NOT IN (SELECT id FROM issue)")
+ return err
+}
diff --git a/models/migrations/v1_13/v149.go b/models/migrations/v1_13/v149.go
new file mode 100644
index 0000000..2a1db04
--- /dev/null
+++ b/models/migrations/v1_13/v149.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddCreatedAndUpdatedToMilestones(x *xorm.Engine) error {
+ type Milestone struct {
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ if err := x.Sync(new(Milestone)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_13/v150.go b/models/migrations/v1_13/v150.go
new file mode 100644
index 0000000..d5ba489
--- /dev/null
+++ b/models/migrations/v1_13/v150.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddPrimaryKeyToRepoTopic(x *xorm.Engine) error {
+ // Topic represents a topic of repositories
+ type Topic struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string `xorm:"UNIQUE VARCHAR(25)"`
+ RepoCount int
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ // RepoTopic represents associated repositories and topics
+ type RepoTopic struct {
+ RepoID int64 `xorm:"pk"`
+ TopicID int64 `xorm:"pk"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ base.RecreateTable(sess, &Topic{})
+ base.RecreateTable(sess, &RepoTopic{})
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_13/v151.go b/models/migrations/v1_13/v151.go
new file mode 100644
index 0000000..ea4a8ea
--- /dev/null
+++ b/models/migrations/v1_13/v151.go
@@ -0,0 +1,166 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func SetDefaultPasswordToArgon2(x *xorm.Engine) error {
+ switch {
+ case setting.Database.Type.IsMySQL():
+ _, err := x.Exec("ALTER TABLE `user` ALTER passwd_hash_algo SET DEFAULT 'argon2';")
+ return err
+ case setting.Database.Type.IsPostgreSQL():
+ _, err := x.Exec("ALTER TABLE `user` ALTER COLUMN passwd_hash_algo SET DEFAULT 'argon2';")
+ return err
+ case setting.Database.Type.IsSQLite3():
+ // drop through
+ default:
+ log.Fatal("Unrecognized DB")
+ }
+
+ tables, err := x.DBMetas()
+ if err != nil {
+ return err
+ }
+
+ // Now for SQLite we have to recreate the table
+ var table *schemas.Table
+ tableName := "user"
+
+ for _, table = range tables {
+ if table.Name == tableName {
+ break
+ }
+ }
+ if table == nil || table.Name != tableName {
+ type User struct {
+ PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"`
+ }
+ return x.Sync(new(User))
+ }
+ column := table.GetColumn("passwd_hash_algo")
+ if column == nil {
+ type User struct {
+ PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"`
+ }
+ return x.Sync(new(User))
+ }
+
+ tempTableName := "tmp_recreate__user"
+ column.Default = "'argon2'"
+
+ createTableSQL, _, err := x.Dialect().CreateTableSQL(context.Background(), x.DB(), table, tempTableName)
+ if err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if _, err := sess.Exec(createTableSQL); err != nil {
+ log.Error("Unable to create table %s. Error: %v\n", tempTableName, err, createTableSQL)
+ return err
+ }
+ for _, index := range table.Indexes {
+ if _, err := sess.Exec(x.Dialect().CreateIndexSQL(tempTableName, index)); err != nil {
+ log.Error("Unable to create indexes on temporary table %s. Error: %v", tempTableName, err)
+ return err
+ }
+ }
+
+ newTableColumns := table.Columns()
+ if len(newTableColumns) == 0 {
+ return fmt.Errorf("no columns in new table")
+ }
+ hasID := false
+ for _, column := range newTableColumns {
+ hasID = hasID || (column.IsPrimaryKey && column.IsAutoIncrement)
+ }
+
+ sqlStringBuilder := &strings.Builder{}
+ _, _ = sqlStringBuilder.WriteString("INSERT INTO `")
+ _, _ = sqlStringBuilder.WriteString(tempTableName)
+ _, _ = sqlStringBuilder.WriteString("` (`")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ for _, column := range newTableColumns[1:] {
+ _, _ = sqlStringBuilder.WriteString(", `")
+ _, _ = sqlStringBuilder.WriteString(column.Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ }
+ _, _ = sqlStringBuilder.WriteString(")")
+ _, _ = sqlStringBuilder.WriteString(" SELECT ")
+ if newTableColumns[0].Default != "" {
+ _, _ = sqlStringBuilder.WriteString("COALESCE(`")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name)
+ _, _ = sqlStringBuilder.WriteString("`, ")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Default)
+ _, _ = sqlStringBuilder.WriteString(")")
+ } else {
+ _, _ = sqlStringBuilder.WriteString("`")
+ _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ }
+
+ for _, column := range newTableColumns[1:] {
+ if column.Default != "" {
+ _, _ = sqlStringBuilder.WriteString(", COALESCE(`")
+ _, _ = sqlStringBuilder.WriteString(column.Name)
+ _, _ = sqlStringBuilder.WriteString("`, ")
+ _, _ = sqlStringBuilder.WriteString(column.Default)
+ _, _ = sqlStringBuilder.WriteString(")")
+ } else {
+ _, _ = sqlStringBuilder.WriteString(", `")
+ _, _ = sqlStringBuilder.WriteString(column.Name)
+ _, _ = sqlStringBuilder.WriteString("`")
+ }
+ }
+ _, _ = sqlStringBuilder.WriteString(" FROM `")
+ _, _ = sqlStringBuilder.WriteString(tableName)
+ _, _ = sqlStringBuilder.WriteString("`")
+
+ if _, err := sess.Exec(sqlStringBuilder.String()); err != nil {
+ log.Error("Unable to set copy data in to temp table %s. Error: %v", tempTableName, err)
+ return err
+ }
+
+ // SQLite will drop all the constraints on the old table
+ if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
+ log.Error("Unable to drop old table %s. Error: %v", tableName, err)
+ return err
+ }
+
+ for _, index := range table.Indexes {
+ if _, err := sess.Exec(x.Dialect().DropIndexSQL(tempTableName, index)); err != nil {
+ log.Error("Unable to drop indexes on temporary table %s. Error: %v", tempTableName, err)
+ return err
+ }
+ }
+
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
+ return err
+ }
+
+ for _, index := range table.Indexes {
+ if _, err := sess.Exec(x.Dialect().CreateIndexSQL(tableName, index)); err != nil {
+ log.Error("Unable to recreate indexes on table %s. Error: %v", tableName, err)
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_13/v152.go b/models/migrations/v1_13/v152.go
new file mode 100644
index 0000000..502c82a
--- /dev/null
+++ b/models/migrations/v1_13/v152.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import "xorm.io/xorm"
+
+func AddTrustModelToRepository(x *xorm.Engine) error {
+ type Repository struct {
+ TrustModel int
+ }
+ return x.Sync(new(Repository))
+}
diff --git a/models/migrations/v1_13/v153.go b/models/migrations/v1_13/v153.go
new file mode 100644
index 0000000..0b2dd3e
--- /dev/null
+++ b/models/migrations/v1_13/v153.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddTeamReviewRequestSupport(x *xorm.Engine) error {
+ type Review struct {
+ ReviewerTeamID int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ type Comment struct {
+ AssigneeTeamID int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(Review)); err != nil {
+ return err
+ }
+
+ return x.Sync(new(Comment))
+}
diff --git a/models/migrations/v1_13/v154.go b/models/migrations/v1_13/v154.go
new file mode 100644
index 0000000..60cc567
--- /dev/null
+++ b/models/migrations/v1_13/v154.go
@@ -0,0 +1,55 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_13 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddTimeStamps(x *xorm.Engine) error {
+ // this will add timestamps where it is useful to have
+
+ // Star represents a starred repo by an user.
+ type Star struct {
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ }
+ if err := x.Sync(new(Star)); err != nil {
+ return err
+ }
+
+ // Label represents a label of repository for issues.
+ type Label struct {
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ if err := x.Sync(new(Label)); err != nil {
+ return err
+ }
+
+ // Follow represents relations of user and their followers.
+ type Follow struct {
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ }
+ if err := x.Sync(new(Follow)); err != nil {
+ return err
+ }
+
+ // Watch is connection request for receiving repository notification.
+ type Watch struct {
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ if err := x.Sync(new(Watch)); err != nil {
+ return err
+ }
+
+ // Collaboration represent the relation between an individual and a repository.
+ type Collaboration struct {
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ return x.Sync(new(Collaboration))
+}
diff --git a/models/migrations/v1_14/main_test.go b/models/migrations/v1_14/main_test.go
new file mode 100644
index 0000000..cf7fcb5
--- /dev/null
+++ b/models/migrations/v1_14/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_14/v155.go b/models/migrations/v1_14/v155.go
new file mode 100644
index 0000000..e814f59
--- /dev/null
+++ b/models/migrations/v1_14/v155.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddChangedProtectedFilesPullRequestColumn(x *xorm.Engine) error {
+ type PullRequest struct {
+ ChangedProtectedFiles []string `xorm:"TEXT JSON"`
+ }
+
+ if err := x.Sync(new(PullRequest)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_14/v156.go b/models/migrations/v1_14/v156.go
new file mode 100644
index 0000000..2cf4954
--- /dev/null
+++ b/models/migrations/v1_14/v156.go
@@ -0,0 +1,177 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+// Copy paste from models/repo.go because we cannot import models package
+func repoPath(userName, repoName string) string {
+ return filepath.Join(userPath(userName), strings.ToLower(repoName)+".git")
+}
+
+func userPath(userName string) string {
+ return filepath.Join(setting.RepoRootPath, strings.ToLower(userName))
+}
+
+func FixPublisherIDforTagReleases(x *xorm.Engine) error {
+ type Release struct {
+ ID int64
+ RepoID int64
+ Sha1 string
+ TagName string
+ PublisherID int64
+ }
+
+ type Repository struct {
+ ID int64
+ OwnerID int64
+ OwnerName string
+ Name string
+ }
+
+ type User struct {
+ ID int64
+ Name string
+ Email string
+ }
+
+ const batchSize = 100
+ sess := x.NewSession()
+ defer sess.Close()
+
+ var (
+ repo *Repository
+ gitRepo *git.Repository
+ user *User
+ )
+ defer func() {
+ if gitRepo != nil {
+ gitRepo.Close()
+ }
+ }()
+ for start := 0; ; start += batchSize {
+ releases := make([]*Release, 0, batchSize)
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Limit(batchSize, start).
+ Where("publisher_id = 0 OR publisher_id is null").
+ Asc("repo_id", "id").Where("is_tag=?", true).
+ Find(&releases); err != nil {
+ return err
+ }
+
+ if len(releases) == 0 {
+ break
+ }
+
+ for _, release := range releases {
+ if repo == nil || repo.ID != release.RepoID {
+ if gitRepo != nil {
+ gitRepo.Close()
+ gitRepo = nil
+ }
+ repo = new(Repository)
+ has, err := sess.ID(release.RepoID).Get(repo)
+ if err != nil {
+ log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err)
+ return err
+ } else if !has {
+ log.Warn("Release[%d] is orphaned and refers to non-existing repository %d", release.ID, release.RepoID)
+ log.Warn("This release should be deleted")
+ continue
+ }
+
+ if repo.OwnerName == "" {
+ // v120.go migration may not have been run correctly - we'll just replicate it here
+ // because this appears to be a common-ish problem.
+ if _, err := sess.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)"); err != nil {
+ log.Error("Error whilst updating repository[%d] owner name", repo.ID)
+ return err
+ }
+
+ if _, err := sess.ID(release.RepoID).Get(repo); err != nil {
+ log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err)
+ return err
+ }
+ }
+ gitRepo, err = git.OpenRepository(git.DefaultContext, repoPath(repo.OwnerName, repo.Name))
+ if err != nil {
+ log.Error("Error whilst opening git repo for [%d]%s/%s. Error: %v", repo.ID, repo.OwnerName, repo.Name, err)
+ return err
+ }
+ }
+
+ commit, err := gitRepo.GetTagCommit(release.TagName)
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name)
+ continue
+ }
+ log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err)
+ return fmt.Errorf("GetTagCommit: %w", err)
+ }
+
+ if commit.Author.Email == "" {
+ log.Warn("Tag: %s in Repo[%d]%s/%s does not have a tagger.", release.TagName, repo.ID, repo.OwnerName, repo.Name)
+ commit, err = gitRepo.GetCommit(commit.ID.String())
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name)
+ continue
+ }
+ log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err)
+ return fmt.Errorf("GetCommit: %w", err)
+ }
+ }
+
+ if commit.Author.Email == "" {
+ log.Warn("Tag: %s in Repo[%d]%s/%s does not have a Tagger and its underlying commit does not have an Author either!", release.TagName, repo.ID, repo.OwnerName, repo.Name)
+ continue
+ }
+
+ if user == nil || !strings.EqualFold(user.Email, commit.Author.Email) {
+ user = new(User)
+ _, err = sess.Where("email=?", commit.Author.Email).Get(user)
+ if err != nil {
+ log.Error("Error whilst getting commit author by email: %s for Tag: %s in [%d]%s/%s. Error: %v", commit.Author.Email, release.TagName, repo.ID, repo.OwnerName, repo.Name, err)
+ return err
+ }
+
+ user.Email = commit.Author.Email
+ }
+
+ if user.ID <= 0 {
+ continue
+ }
+
+ release.PublisherID = user.ID
+ if _, err := sess.ID(release.ID).Cols("publisher_id").Update(release); err != nil {
+ log.Error("Error whilst updating publisher[%d] for release[%d] with tag name %s. Error: %v", release.PublisherID, release.ID, release.TagName, err)
+ return err
+ }
+ }
+ if gitRepo != nil {
+ gitRepo.Close()
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_14/v157.go b/models/migrations/v1_14/v157.go
new file mode 100644
index 0000000..7187278
--- /dev/null
+++ b/models/migrations/v1_14/v157.go
@@ -0,0 +1,66 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func FixRepoTopics(x *xorm.Engine) error {
+ type Topic struct { //nolint:unused
+ ID int64 `xorm:"pk autoincr"`
+ Name string `xorm:"UNIQUE VARCHAR(25)"`
+ RepoCount int
+ }
+
+ type RepoTopic struct { //nolint:unused
+ RepoID int64 `xorm:"pk"`
+ TopicID int64 `xorm:"pk"`
+ }
+
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ Topics []string `xorm:"TEXT JSON"`
+ }
+
+ const batchSize = 100
+ sess := x.NewSession()
+ defer sess.Close()
+ repos := make([]*Repository, 0, batchSize)
+ topics := make([]string, 0, batchSize)
+ for start := 0; ; start += batchSize {
+ repos = repos[:0]
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Limit(batchSize, start).Find(&repos); err != nil {
+ return err
+ }
+
+ if len(repos) == 0 {
+ break
+ }
+
+ for _, repo := range repos {
+ topics = topics[:0]
+ if err := sess.Select("name").Table("topic").
+ Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id").
+ Where("repo_topic.repo_id = ?", repo.ID).Desc("topic.repo_count").Find(&topics); err != nil {
+ return err
+ }
+ repo.Topics = topics
+ if _, err := sess.ID(repo.ID).Cols("topics").Update(repo); err != nil {
+ return err
+ }
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_14/v158.go b/models/migrations/v1_14/v158.go
new file mode 100644
index 0000000..2d688b1
--- /dev/null
+++ b/models/migrations/v1_14/v158.go
@@ -0,0 +1,101 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+ "strconv"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func UpdateCodeCommentReplies(x *xorm.Engine) error {
+ type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ CommitSHA string `xorm:"VARCHAR(40)"`
+ Patch string `xorm:"TEXT patch"`
+ Invalidated bool
+
+ // Not extracted but used in the below query
+ Type int `xorm:"INDEX"`
+ Line int64 // - previous line / + proposed line
+ TreePath string
+ ReviewID int64 `xorm:"index"`
+ }
+
+ if err := x.Sync(new(Comment)); err != nil {
+ return err
+ }
+
+ sqlSelect := `SELECT comment.id as id, first.commit_sha as commit_sha, first.patch as patch, first.invalidated as invalidated`
+ sqlTail := ` FROM comment INNER JOIN (
+ SELECT C.id, C.review_id, C.line, C.tree_path, C.patch, C.commit_sha, C.invalidated
+ FROM comment AS C
+ WHERE C.type = 21
+ AND C.created_unix =
+ (SELECT MIN(comment.created_unix)
+ FROM comment
+ WHERE comment.review_id = C.review_id
+ AND comment.type = 21
+ AND comment.line = C.line
+ AND comment.tree_path = C.tree_path)
+ ) AS first
+ ON comment.review_id = first.review_id
+ AND comment.tree_path = first.tree_path AND comment.line = first.line
+ WHERE comment.type = 21
+ AND comment.id != first.id
+ AND comment.commit_sha != first.commit_sha`
+
+ var (
+ sqlCmd string
+ start = 0
+ batchSize = 100
+ sess = x.NewSession()
+ )
+ defer sess.Close()
+ for {
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ comments := make([]*Comment, 0, batchSize)
+
+ switch {
+ case setting.Database.Type.IsMySQL():
+ sqlCmd = sqlSelect + sqlTail + " LIMIT " + strconv.Itoa(batchSize) + ", " + strconv.Itoa(start)
+ case setting.Database.Type.IsPostgreSQL():
+ fallthrough
+ case setting.Database.Type.IsSQLite3():
+ sqlCmd = sqlSelect + sqlTail + " LIMIT " + strconv.Itoa(batchSize) + " OFFSET " + strconv.Itoa(start)
+ default:
+ return fmt.Errorf("Unsupported database type")
+ }
+
+ if err := sess.SQL(sqlCmd).Find(&comments); err != nil {
+ log.Error("failed to select: %v", err)
+ return err
+ }
+
+ for _, comment := range comments {
+ if _, err := sess.Table("comment").ID(comment.ID).Cols("commit_sha", "patch", "invalidated").Update(comment); err != nil {
+ log.Error("failed to update comment[%d]: %v %v", comment.ID, comment, err)
+ return err
+ }
+ }
+
+ start += len(comments)
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ if len(comments) < batchSize {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_14/v159.go b/models/migrations/v1_14/v159.go
new file mode 100644
index 0000000..149ae0f
--- /dev/null
+++ b/models/migrations/v1_14/v159.go
@@ -0,0 +1,38 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func UpdateReactionConstraint(x *xorm.Engine) error {
+ // Reaction represents a reactions on issues and comments.
+ type Reaction struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type string `xorm:"INDEX UNIQUE(s) NOT NULL"`
+ IssueID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"`
+ CommentID int64 `xorm:"INDEX UNIQUE(s)"`
+ UserID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"`
+ OriginalAuthorID int64 `xorm:"INDEX UNIQUE(s) NOT NULL DEFAULT(0)"`
+ OriginalAuthor string `xorm:"INDEX UNIQUE(s)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := base.RecreateTable(sess, &Reaction{}); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v160.go b/models/migrations/v1_14/v160.go
new file mode 100644
index 0000000..4dea91b
--- /dev/null
+++ b/models/migrations/v1_14/v160.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddBlockOnOfficialReviewRequests(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ BlockOnOfficialReviewRequests bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_14/v161.go b/models/migrations/v1_14/v161.go
new file mode 100644
index 0000000..ac7e821
--- /dev/null
+++ b/models/migrations/v1_14/v161.go
@@ -0,0 +1,73 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func ConvertTaskTypeToString(x *xorm.Engine) error {
+ const (
+ GOGS int = iota + 1
+ SLACK
+ GITEA
+ DISCORD
+ DINGTALK
+ TELEGRAM
+ MSTEAMS
+ FEISHU
+ MATRIX
+ WECHATWORK
+ )
+
+ hookTaskTypes := map[int]string{
+ GITEA: "gitea",
+ GOGS: "gogs",
+ SLACK: "slack",
+ DISCORD: "discord",
+ DINGTALK: "dingtalk",
+ TELEGRAM: "telegram",
+ MSTEAMS: "msteams",
+ FEISHU: "feishu",
+ MATRIX: "matrix",
+ WECHATWORK: "wechatwork",
+ }
+
+ type HookTask struct {
+ Typ string `xorm:"VARCHAR(16) index"`
+ }
+ if err := x.Sync(new(HookTask)); err != nil {
+ return err
+ }
+
+ // to keep the migration could be rerun
+ exist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "hook_task", "type")
+ if err != nil {
+ return err
+ }
+ if !exist {
+ return nil
+ }
+
+ for i, s := range hookTaskTypes {
+ if _, err := x.Exec("UPDATE hook_task set typ = ? where `type`=?", s, i); err != nil {
+ return err
+ }
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "hook_task", "type"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v162.go b/models/migrations/v1_14/v162.go
new file mode 100644
index 0000000..2e4e0b8
--- /dev/null
+++ b/models/migrations/v1_14/v162.go
@@ -0,0 +1,62 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func ConvertWebhookTaskTypeToString(x *xorm.Engine) error {
+ const (
+ GOGS int = iota + 1
+ SLACK
+ GITEA
+ DISCORD
+ DINGTALK
+ TELEGRAM
+ MSTEAMS
+ FEISHU
+ MATRIX
+ WECHATWORK
+ )
+
+ hookTaskTypes := map[int]string{
+ GITEA: "gitea",
+ GOGS: "gogs",
+ SLACK: "slack",
+ DISCORD: "discord",
+ DINGTALK: "dingtalk",
+ TELEGRAM: "telegram",
+ MSTEAMS: "msteams",
+ FEISHU: "feishu",
+ MATRIX: "matrix",
+ WECHATWORK: "wechatwork",
+ }
+
+ type Webhook struct {
+ Type string `xorm:"char(16) index"`
+ }
+ if err := x.Sync(new(Webhook)); err != nil {
+ return err
+ }
+
+ for i, s := range hookTaskTypes {
+ if _, err := x.Exec("UPDATE webhook set type = ? where hook_task_type=?", s, i); err != nil {
+ return err
+ }
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "webhook", "hook_task_type"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v163.go b/models/migrations/v1_14/v163.go
new file mode 100644
index 0000000..0cd8ba6
--- /dev/null
+++ b/models/migrations/v1_14/v163.go
@@ -0,0 +1,35 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func ConvertTopicNameFrom25To50(x *xorm.Engine) error {
+ type Topic struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string `xorm:"UNIQUE VARCHAR(50)"`
+ RepoCount int
+ CreatedUnix int64 `xorm:"INDEX created"`
+ UpdatedUnix int64 `xorm:"INDEX updated"`
+ }
+
+ if err := x.Sync(new(Topic)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.RecreateTable(sess, new(Topic)); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v164.go b/models/migrations/v1_14/v164.go
new file mode 100644
index 0000000..54f6951
--- /dev/null
+++ b/models/migrations/v1_14/v164.go
@@ -0,0 +1,37 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+// OAuth2Grant here is a snapshot of models.OAuth2Grant for this version
+// of the database, as it does not appear to have been added as a part
+// of a previous migration.
+type OAuth2Grant struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"INDEX unique(user_application)"`
+ ApplicationID int64 `xorm:"INDEX unique(user_application)"`
+ Counter int64 `xorm:"NOT NULL DEFAULT 1"`
+ Scope string `xorm:"TEXT"`
+ Nonce string `xorm:"TEXT"`
+ CreatedUnix int64 `xorm:"created"`
+ UpdatedUnix int64 `xorm:"updated"`
+}
+
+// TableName sets the database table name to be the correct one, as the
+// autogenerated table name for this struct is "o_auth2_grant".
+func (grant *OAuth2Grant) TableName() string {
+ return "oauth2_grant"
+}
+
+func AddScopeAndNonceColumnsToOAuth2Grant(x *xorm.Engine) error {
+ if err := x.Sync(new(OAuth2Grant)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_14/v165.go b/models/migrations/v1_14/v165.go
new file mode 100644
index 0000000..5b1a779
--- /dev/null
+++ b/models/migrations/v1_14/v165.go
@@ -0,0 +1,57 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func ConvertHookTaskTypeToVarcharAndTrim(x *xorm.Engine) error {
+ dbType := x.Dialect().URI().DBType
+ if dbType == schemas.SQLITE { // For SQLITE, varchar or char will always be represented as TEXT
+ return nil
+ }
+
+ type HookTask struct { //nolint:unused
+ Typ string `xorm:"VARCHAR(16) index"`
+ }
+
+ if err := base.ModifyColumn(x, "hook_task", &schemas.Column{
+ Name: "typ",
+ SQLType: schemas.SQLType{
+ Name: "VARCHAR",
+ },
+ Length: 16,
+ Nullable: true, // To keep compatible as nullable
+ DefaultIsEmpty: true,
+ }); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec("UPDATE hook_task SET typ = TRIM(typ)"); err != nil {
+ return err
+ }
+
+ type Webhook struct { //nolint:unused
+ Type string `xorm:"VARCHAR(16) index"`
+ }
+
+ if err := base.ModifyColumn(x, "webhook", &schemas.Column{
+ Name: "type",
+ SQLType: schemas.SQLType{
+ Name: "VARCHAR",
+ },
+ Length: 16,
+ Nullable: true, // To keep compatible as nullable
+ DefaultIsEmpty: true,
+ }); err != nil {
+ return err
+ }
+
+ _, err := x.Exec("UPDATE webhook SET type = TRIM(type)")
+ return err
+}
diff --git a/models/migrations/v1_14/v166.go b/models/migrations/v1_14/v166.go
new file mode 100644
index 0000000..e573158
--- /dev/null
+++ b/models/migrations/v1_14/v166.go
@@ -0,0 +1,112 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+
+ "golang.org/x/crypto/argon2"
+ "golang.org/x/crypto/bcrypt"
+ "golang.org/x/crypto/pbkdf2"
+ "golang.org/x/crypto/scrypt"
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func RecalculateUserEmptyPWD(x *xorm.Engine) (err error) {
+ const (
+ algoBcrypt = "bcrypt"
+ algoScrypt = "scrypt"
+ algoArgon2 = "argon2"
+ algoPbkdf2 = "pbkdf2"
+ )
+
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ Passwd string `xorm:"NOT NULL"`
+ PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"`
+ MustChangePassword bool `xorm:"NOT NULL DEFAULT false"`
+ LoginType int
+ LoginName string
+ Type int
+ Salt string `xorm:"VARCHAR(10)"`
+ }
+
+ // hashPassword hash password based on algo and salt
+ // state 461406070c
+ hashPassword := func(passwd, salt, algo string) string {
+ var tempPasswd []byte
+
+ switch algo {
+ case algoBcrypt:
+ tempPasswd, _ = bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost)
+ return string(tempPasswd)
+ case algoScrypt:
+ tempPasswd, _ = scrypt.Key([]byte(passwd), []byte(salt), 65536, 16, 2, 50)
+ case algoArgon2:
+ tempPasswd = argon2.IDKey([]byte(passwd), []byte(salt), 2, 65536, 8, 50)
+ case algoPbkdf2:
+ fallthrough
+ default:
+ tempPasswd = pbkdf2.Key([]byte(passwd), []byte(salt), 10000, 50, sha256.New)
+ }
+
+ return hex.EncodeToString(tempPasswd)
+ }
+
+ // ValidatePassword checks if given password matches the one belongs to the user.
+ // state 461406070c, changed since it's not necessary to be time constant
+ ValidatePassword := func(u *User, passwd string) bool {
+ tempHash := hashPassword(passwd, u.Salt, u.PasswdHashAlgo)
+
+ if u.PasswdHashAlgo != algoBcrypt && u.Passwd == tempHash {
+ return true
+ }
+ if u.PasswdHashAlgo == algoBcrypt && bcrypt.CompareHashAndPassword([]byte(u.Passwd), []byte(passwd)) == nil {
+ return true
+ }
+ return false
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ const batchSize = 100
+
+ for start := 0; ; start += batchSize {
+ users := make([]*User, 0, batchSize)
+ if err = sess.Limit(batchSize, start).Where(builder.Neq{"passwd": ""}, 0).Find(&users); err != nil {
+ return err
+ }
+ if len(users) == 0 {
+ break
+ }
+
+ if err = sess.Begin(); err != nil {
+ return err
+ }
+
+ for _, user := range users {
+ if ValidatePassword(user, "") {
+ user.Passwd = ""
+ user.Salt = ""
+ user.PasswdHashAlgo = ""
+ if _, err = sess.ID(user.ID).Cols("passwd", "salt", "passwd_hash_algo").Update(user); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err = sess.Commit(); err != nil {
+ return err
+ }
+ }
+
+ // delete salt and algo where password is empty
+ _, err = sess.Where(builder.Eq{"passwd": ""}.And(builder.Neq{"salt": ""}.Or(builder.Neq{"passwd_hash_algo": ""}))).
+ Cols("salt", "passwd_hash_algo").Update(&User{})
+
+ return err
+}
diff --git a/models/migrations/v1_14/v167.go b/models/migrations/v1_14/v167.go
new file mode 100644
index 0000000..9d416f6
--- /dev/null
+++ b/models/migrations/v1_14/v167.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddUserRedirect(x *xorm.Engine) (err error) {
+ type UserRedirect struct {
+ ID int64 `xorm:"pk autoincr"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RedirectUserID int64
+ }
+
+ if err := x.Sync(new(UserRedirect)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_14/v168.go b/models/migrations/v1_14/v168.go
new file mode 100644
index 0000000..a30a885
--- /dev/null
+++ b/models/migrations/v1_14/v168.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import "xorm.io/xorm"
+
+func RecreateUserTableToFixDefaultValues(_ *xorm.Engine) error {
+ return nil
+}
diff --git a/models/migrations/v1_14/v169.go b/models/migrations/v1_14/v169.go
new file mode 100644
index 0000000..5b81bb5
--- /dev/null
+++ b/models/migrations/v1_14/v169.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func CommentTypeDeleteBranchUseOldRef(x *xorm.Engine) error {
+ _, err := x.Exec("UPDATE comment SET old_ref = commit_sha, commit_sha = '' WHERE type = 11")
+ return err
+}
diff --git a/models/migrations/v1_14/v170.go b/models/migrations/v1_14/v170.go
new file mode 100644
index 0000000..7b6498a
--- /dev/null
+++ b/models/migrations/v1_14/v170.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddDismissedReviewColumn(x *xorm.Engine) error {
+ type Review struct {
+ Dismissed bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ if err := x.Sync(new(Review)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_14/v171.go b/models/migrations/v1_14/v171.go
new file mode 100644
index 0000000..51a35a0
--- /dev/null
+++ b/models/migrations/v1_14/v171.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddSortingColToProjectBoard(x *xorm.Engine) error {
+ type ProjectBoard struct {
+ Sorting int8 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(ProjectBoard)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_14/v172.go b/models/migrations/v1_14/v172.go
new file mode 100644
index 0000000..0f9bef9
--- /dev/null
+++ b/models/migrations/v1_14/v172.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddSessionTable(x *xorm.Engine) error {
+ type Session struct {
+ Key string `xorm:"pk CHAR(16)"`
+ Data []byte `xorm:"BLOB"`
+ Expiry timeutil.TimeStamp
+ }
+ return x.Sync(new(Session))
+}
diff --git a/models/migrations/v1_14/v173.go b/models/migrations/v1_14/v173.go
new file mode 100644
index 0000000..2d9eee9
--- /dev/null
+++ b/models/migrations/v1_14/v173.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddTimeIDCommentColumn(x *xorm.Engine) error {
+ type Comment struct {
+ TimeID int64
+ }
+
+ if err := x.Sync(new(Comment)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_14/v174.go b/models/migrations/v1_14/v174.go
new file mode 100644
index 0000000..c839e15
--- /dev/null
+++ b/models/migrations/v1_14/v174.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddRepoTransfer(x *xorm.Engine) error {
+ type RepoTransfer struct {
+ ID int64 `xorm:"pk autoincr"`
+ DoerID int64
+ RecipientID int64
+ RepoID int64
+ TeamIDs []int64
+ CreatedUnix int64 `xorm:"INDEX NOT NULL created"`
+ UpdatedUnix int64 `xorm:"INDEX NOT NULL updated"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(RepoTransfer)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v175.go b/models/migrations/v1_14/v175.go
new file mode 100644
index 0000000..70d72b2
--- /dev/null
+++ b/models/migrations/v1_14/v175.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+ "regexp"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func FixPostgresIDSequences(x *xorm.Engine) error {
+ if !setting.Database.Type.IsPostgreSQL() {
+ return nil
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ var sequences []string
+ schema := sess.Engine().Dialect().URI().Schema
+
+ sess.Engine().SetSchema("")
+ if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__%_id_seq%' AND sequence_catalog = ?", setting.Database.Name).Find(&sequences); err != nil {
+ log.Error("Unable to find sequences: %v", err)
+ return err
+ }
+ sess.Engine().SetSchema(schema)
+
+ sequenceRegexp := regexp.MustCompile(`tmp_recreate__(\w+)_id_seq.*`)
+
+ for _, sequence := range sequences {
+ tableName := sequenceRegexp.FindStringSubmatch(sequence)[1]
+ newSequenceName := tableName + "_id_seq"
+ if _, err := sess.Exec(fmt.Sprintf("ALTER SEQUENCE `%s` RENAME TO `%s`", sequence, newSequenceName)); err != nil {
+ log.Error("Unable to rename %s to %s. Error: %v", sequence, newSequenceName, err)
+ return err
+ }
+ if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM `%s`), 1), false)", newSequenceName, tableName)); err != nil {
+ log.Error("Unable to reset sequence %s for %s. Error: %v", newSequenceName, tableName, err)
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v176.go b/models/migrations/v1_14/v176.go
new file mode 100644
index 0000000..1ed49f7
--- /dev/null
+++ b/models/migrations/v1_14/v176.go
@@ -0,0 +1,76 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+// RemoveInvalidLabels looks through the database to look for comments and issue_labels
+// that refer to labels do not belong to the repository or organization that repository
+// that the issue is in
+func RemoveInvalidLabels(x *xorm.Engine) error {
+ type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int `xorm:"INDEX"`
+ IssueID int64 `xorm:"INDEX"`
+ LabelID int64
+ }
+
+ type Issue struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
+ Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
+ }
+
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ }
+
+ type Label struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ OrgID int64 `xorm:"INDEX"`
+ }
+
+ type IssueLabel struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"UNIQUE(s)"`
+ LabelID int64 `xorm:"UNIQUE(s)"`
+ }
+
+ if err := x.Sync(new(Comment), new(Issue), new(Repository), new(Label), new(IssueLabel)); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec(`DELETE FROM issue_label WHERE issue_label.id IN (
+ SELECT il_too.id FROM (
+ SELECT il_too_too.id
+ FROM issue_label AS il_too_too
+ INNER JOIN label ON il_too_too.label_id = label.id
+ INNER JOIN issue on issue.id = il_too_too.issue_id
+ INNER JOIN repository on repository.id = issue.repo_id
+ WHERE
+ (label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id)
+ ) AS il_too )`); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec(`DELETE FROM comment WHERE comment.id IN (
+ SELECT il_too.id FROM (
+ SELECT com.id
+ FROM comment AS com
+ INNER JOIN label ON com.label_id = label.id
+ INNER JOIN issue on issue.id = com.issue_id
+ INNER JOIN repository on repository.id = issue.repo_id
+ WHERE
+ com.type = ? AND ((label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id))
+ ) AS il_too)`, 7); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_14/v176_test.go b/models/migrations/v1_14/v176_test.go
new file mode 100644
index 0000000..f5e644e
--- /dev/null
+++ b/models/migrations/v1_14/v176_test.go
@@ -0,0 +1,128 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_RemoveInvalidLabels(t *testing.T) {
+ // Models used by the migration
+ type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int `xorm:"INDEX"`
+ IssueID int64 `xorm:"INDEX"`
+ LabelID int64
+ ShouldRemain bool // <- Flag for testing the migration
+ }
+
+ type Issue struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"`
+ Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository.
+ }
+
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ }
+
+ type Label struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ OrgID int64 `xorm:"INDEX"`
+ }
+
+ type IssueLabel struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"UNIQUE(s)"`
+ LabelID int64 `xorm:"UNIQUE(s)"`
+ ShouldRemain bool // <- Flag for testing the migration
+ }
+
+ // load and prepare the test database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(Comment), new(Issue), new(Repository), new(IssueLabel), new(Label))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ var issueLabels []*IssueLabel
+ ilPreMigration := map[int64]*IssueLabel{}
+ ilPostMigration := map[int64]*IssueLabel{}
+
+ var comments []*Comment
+ comPreMigration := map[int64]*Comment{}
+ comPostMigration := map[int64]*Comment{}
+
+ // Get pre migration values
+ if err := x.Find(&issueLabels); err != nil {
+ t.Errorf("Unable to find issueLabels: %v", err)
+ return
+ }
+ for _, issueLabel := range issueLabels {
+ ilPreMigration[issueLabel.ID] = issueLabel
+ }
+ if err := x.Find(&comments); err != nil {
+ t.Errorf("Unable to find comments: %v", err)
+ return
+ }
+ for _, comment := range comments {
+ comPreMigration[comment.ID] = comment
+ }
+
+ // Run the migration
+ if err := RemoveInvalidLabels(x); err != nil {
+ t.Errorf("unable to RemoveInvalidLabels: %v", err)
+ }
+
+ // Get the post migration values
+ issueLabels = issueLabels[:0]
+ if err := x.Find(&issueLabels); err != nil {
+ t.Errorf("Unable to find issueLabels: %v", err)
+ return
+ }
+ for _, issueLabel := range issueLabels {
+ ilPostMigration[issueLabel.ID] = issueLabel
+ }
+ comments = comments[:0]
+ if err := x.Find(&comments); err != nil {
+ t.Errorf("Unable to find comments: %v", err)
+ return
+ }
+ for _, comment := range comments {
+ comPostMigration[comment.ID] = comment
+ }
+
+ // Finally test results of the migration
+ for id, comment := range comPreMigration {
+ post, ok := comPostMigration[id]
+ if ok {
+ if !comment.ShouldRemain {
+ t.Errorf("Comment[%d] remained but should have been deleted", id)
+ }
+ assert.Equal(t, comment, post)
+ } else if comment.ShouldRemain {
+ t.Errorf("Comment[%d] was deleted but should have remained", id)
+ }
+ }
+
+ for id, il := range ilPreMigration {
+ post, ok := ilPostMigration[id]
+ if ok {
+ if !il.ShouldRemain {
+ t.Errorf("IssueLabel[%d] remained but should have been deleted", id)
+ }
+ assert.Equal(t, il, post)
+ } else if il.ShouldRemain {
+ t.Errorf("IssueLabel[%d] was deleted but should have remained", id)
+ }
+ }
+}
diff --git a/models/migrations/v1_14/v177.go b/models/migrations/v1_14/v177.go
new file mode 100644
index 0000000..6e1838f
--- /dev/null
+++ b/models/migrations/v1_14/v177.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+// DeleteOrphanedIssueLabels looks through the database for issue_labels where the label no longer exists and deletes them.
+func DeleteOrphanedIssueLabels(x *xorm.Engine) error {
+ type IssueLabel struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"UNIQUE(s)"`
+ LabelID int64 `xorm:"UNIQUE(s)"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(IssueLabel)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ if _, err := sess.Exec(`DELETE FROM issue_label WHERE issue_label.id IN (
+ SELECT ill.id FROM (
+ SELECT il.id
+ FROM issue_label AS il
+ LEFT JOIN label ON il.label_id = label.id
+ WHERE
+ label.id IS NULL
+ ) AS ill)`); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_14/v177_test.go b/models/migrations/v1_14/v177_test.go
new file mode 100644
index 0000000..cf5e745
--- /dev/null
+++ b/models/migrations/v1_14/v177_test.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_14 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_DeleteOrphanedIssueLabels(t *testing.T) {
+ // Create the models used in the migration
+ type IssueLabel struct {
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"UNIQUE(s)"`
+ LabelID int64 `xorm:"UNIQUE(s)"`
+ }
+
+ type Label struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ OrgID int64 `xorm:"INDEX"`
+ Name string
+ Description string
+ Color string `xorm:"VARCHAR(7)"`
+ NumIssues int
+ NumClosedIssues int
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(IssueLabel), new(Label))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ var issueLabels []*IssueLabel
+ preMigration := map[int64]*IssueLabel{}
+ postMigration := map[int64]*IssueLabel{}
+
+ // Load issue labels that exist in the database pre-migration
+ if err := x.Find(&issueLabels); err != nil {
+ require.NoError(t, err)
+ return
+ }
+ for _, issueLabel := range issueLabels {
+ preMigration[issueLabel.ID] = issueLabel
+ }
+
+ // Run the migration
+ if err := DeleteOrphanedIssueLabels(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ // Load the remaining issue-labels
+ issueLabels = issueLabels[:0]
+ if err := x.Find(&issueLabels); err != nil {
+ require.NoError(t, err)
+ return
+ }
+ for _, issueLabel := range issueLabels {
+ postMigration[issueLabel.ID] = issueLabel
+ }
+
+ // Now test what is left
+ if _, ok := postMigration[2]; ok {
+ t.Errorf("Orphaned Label[2] survived the migration")
+ return
+ }
+
+ if _, ok := postMigration[5]; ok {
+ t.Errorf("Orphaned Label[5] survived the migration")
+ return
+ }
+
+ for id, post := range postMigration {
+ pre := preMigration[id]
+ assert.Equal(t, pre, post, "migration changed issueLabel %d", id)
+ }
+}
diff --git a/models/migrations/v1_15/main_test.go b/models/migrations/v1_15/main_test.go
new file mode 100644
index 0000000..e496065
--- /dev/null
+++ b/models/migrations/v1_15/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_15/v178.go b/models/migrations/v1_15/v178.go
new file mode 100644
index 0000000..6d236eb
--- /dev/null
+++ b/models/migrations/v1_15/v178.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddLFSMirrorColumns(x *xorm.Engine) error {
+ type Mirror struct {
+ LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
+ LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
+ }
+
+ return x.Sync(new(Mirror))
+}
diff --git a/models/migrations/v1_15/v179.go b/models/migrations/v1_15/v179.go
new file mode 100644
index 0000000..f6b142e
--- /dev/null
+++ b/models/migrations/v1_15/v179.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func ConvertAvatarURLToText(x *xorm.Engine) error {
+ dbType := x.Dialect().URI().DBType
+ if dbType == schemas.SQLITE { // For SQLITE, varchar or char will always be represented as TEXT
+ return nil
+ }
+
+ // Some oauth2 providers may give very long avatar urls (i.e. Google)
+ return base.ModifyColumn(x, "external_login_user", &schemas.Column{
+ Name: "avatar_url",
+ SQLType: schemas.SQLType{
+ Name: schemas.Text,
+ },
+ Nullable: true,
+ DefaultIsEmpty: true,
+ })
+}
diff --git a/models/migrations/v1_15/v180.go b/models/migrations/v1_15/v180.go
new file mode 100644
index 0000000..c71e771
--- /dev/null
+++ b/models/migrations/v1_15/v180.go
@@ -0,0 +1,121 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func DeleteMigrationCredentials(x *xorm.Engine) (err error) {
+ // Task represents a task
+ type Task struct {
+ ID int64
+ DoerID int64 `xorm:"index"` // operator
+ OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero
+ RepoID int64 `xorm:"index"`
+ Type int
+ Status int `xorm:"index"`
+ StartTime int64
+ EndTime int64
+ PayloadContent string `xorm:"TEXT"`
+ Errors string `xorm:"TEXT"` // if task failed, saved the error reason
+ Created int64 `xorm:"created"`
+ }
+
+ const TaskTypeMigrateRepo = 0
+ const TaskStatusStopped = 2
+
+ const batchSize = 100
+
+ // only match migration tasks, that are not pending or running
+ cond := builder.Eq{
+ "type": TaskTypeMigrateRepo,
+ }.And(builder.Gte{
+ "status": TaskStatusStopped,
+ })
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ for start := 0; ; start += batchSize {
+ tasks := make([]*Task, 0, batchSize)
+ if err := sess.Limit(batchSize, start).Where(cond, 0).Find(&tasks); err != nil {
+ return err
+ }
+ if len(tasks) == 0 {
+ break
+ }
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ for _, t := range tasks {
+ if t.PayloadContent, err = removeCredentials(t.PayloadContent); err != nil {
+ return err
+ }
+ if _, err := sess.ID(t.ID).Cols("payload_content").Update(t); err != nil {
+ return err
+ }
+ }
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func removeCredentials(payload string) (string, error) {
+ // MigrateOptions defines the way a repository gets migrated
+ // this is for internal usage by migrations module and func who interact with it
+ type MigrateOptions struct {
+ // required: true
+ CloneAddr string `json:"clone_addr" binding:"Required"`
+ CloneAddrEncrypted string `json:"clone_addr_encrypted,omitempty"`
+ AuthUsername string `json:"auth_username"`
+ AuthPassword string `json:"-"`
+ AuthPasswordEncrypted string `json:"auth_password_encrypted,omitempty"`
+ AuthToken string `json:"-"`
+ AuthTokenEncrypted string `json:"auth_token_encrypted,omitempty"`
+ // required: true
+ UID int `json:"uid" binding:"Required"`
+ // required: true
+ RepoName string `json:"repo_name" binding:"Required"`
+ Mirror bool `json:"mirror"`
+ LFS bool `json:"lfs"`
+ LFSEndpoint string `json:"lfs_endpoint"`
+ Private bool `json:"private"`
+ Description string `json:"description"`
+ OriginalURL string
+ GitServiceType int
+ Wiki bool
+ Issues bool
+ Milestones bool
+ Labels bool
+ Releases bool
+ Comments bool
+ PullRequests bool
+ ReleaseAssets bool
+ MigrateToRepoID int64
+ MirrorInterval string `json:"mirror_interval"`
+ }
+
+ var opts MigrateOptions
+ err := json.Unmarshal([]byte(payload), &opts)
+ if err != nil {
+ return "", err
+ }
+
+ opts.AuthPassword = ""
+ opts.AuthToken = ""
+ opts.CloneAddr = util.SanitizeCredentialURLs(opts.CloneAddr)
+
+ confBytes, err := json.Marshal(opts)
+ if err != nil {
+ return "", err
+ }
+ return string(confBytes), nil
+}
diff --git a/models/migrations/v1_15/v181.go b/models/migrations/v1_15/v181.go
new file mode 100644
index 0000000..2185ed0
--- /dev/null
+++ b/models/migrations/v1_15/v181.go
@@ -0,0 +1,91 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "strings"
+
+ "xorm.io/xorm"
+)
+
+func AddPrimaryEmail2EmailAddress(x *xorm.Engine) error {
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ Email string `xorm:"NOT NULL"`
+ IsActive bool `xorm:"INDEX"` // Activate primary email
+ }
+
+ type EmailAddress1 struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX NOT NULL"`
+ Email string `xorm:"UNIQUE NOT NULL"`
+ LowerEmail string
+ IsActivated bool
+ IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"`
+ }
+
+ // Add lower_email and is_primary columns
+ if err := x.Table("email_address").Sync(new(EmailAddress1)); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec("UPDATE email_address SET lower_email=LOWER(email), is_primary=?", false); err != nil {
+ return err
+ }
+
+ type EmailAddress struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX NOT NULL"`
+ Email string `xorm:"UNIQUE NOT NULL"`
+ LowerEmail string `xorm:"UNIQUE NOT NULL"`
+ IsActivated bool
+ IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"`
+ }
+
+ // change lower_email as unique
+ if err := x.Sync(new(EmailAddress)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ const batchSize = 100
+
+ for start := 0; ; start += batchSize {
+ users := make([]*User, 0, batchSize)
+ if err := sess.Limit(batchSize, start).Find(&users); err != nil {
+ return err
+ }
+ if len(users) == 0 {
+ break
+ }
+
+ for _, user := range users {
+ exist, err := sess.Where("email=?", user.Email).Table("email_address").Exist()
+ if err != nil {
+ return err
+ }
+ if !exist {
+ if _, err := sess.Insert(&EmailAddress{
+ UID: user.ID,
+ Email: user.Email,
+ LowerEmail: strings.ToLower(user.Email),
+ IsActivated: user.IsActive,
+ IsPrimary: true,
+ }); err != nil {
+ return err
+ }
+ } else {
+ if _, err := sess.Where("email=?", user.Email).Cols("is_primary").Update(&EmailAddress{
+ IsPrimary: true,
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_15/v181_test.go b/models/migrations/v1_15/v181_test.go
new file mode 100644
index 0000000..ead26f5
--- /dev/null
+++ b/models/migrations/v1_15/v181_test.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "strings"
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddPrimaryEmail2EmailAddress(t *testing.T) {
+ type User struct {
+ ID int64
+ Email string
+ IsActive bool
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(User))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ err := AddPrimaryEmail2EmailAddress(x)
+ require.NoError(t, err)
+
+ type EmailAddress struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX NOT NULL"`
+ Email string `xorm:"UNIQUE NOT NULL"`
+ LowerEmail string `xorm:"UNIQUE NOT NULL"`
+ IsActivated bool
+ IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"`
+ }
+
+ users := make([]User, 0, 20)
+ err = x.Find(&users)
+ require.NoError(t, err)
+
+ for _, user := range users {
+ var emailAddress EmailAddress
+ has, err := x.Where("lower_email=?", strings.ToLower(user.Email)).Get(&emailAddress)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.True(t, emailAddress.IsPrimary)
+ assert.EqualValues(t, user.IsActive, emailAddress.IsActivated)
+ assert.EqualValues(t, user.ID, emailAddress.UID)
+ }
+}
diff --git a/models/migrations/v1_15/v182.go b/models/migrations/v1_15/v182.go
new file mode 100644
index 0000000..9ca500c
--- /dev/null
+++ b/models/migrations/v1_15/v182.go
@@ -0,0 +1,41 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIssueResourceIndexTable(x *xorm.Engine) error {
+ type ResourceIndex struct {
+ GroupID int64 `xorm:"pk"`
+ MaxIndex int64 `xorm:"index"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Table("issue_index").Sync(new(ResourceIndex)); err != nil {
+ return err
+ }
+
+ // Remove data we're goint to rebuild
+ if _, err := sess.Table("issue_index").Where("1=1").Delete(&ResourceIndex{}); err != nil {
+ return err
+ }
+
+ // Create current data for all repositories with issues and PRs
+ if _, err := sess.Exec("INSERT INTO issue_index (group_id, max_index) " +
+ "SELECT max_data.repo_id, max_data.max_index " +
+ "FROM ( SELECT issue.repo_id AS repo_id, max(issue.`index`) AS max_index " +
+ "FROM issue GROUP BY issue.repo_id) AS max_data"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_15/v182_test.go b/models/migrations/v1_15/v182_test.go
new file mode 100644
index 0000000..eb21311
--- /dev/null
+++ b/models/migrations/v1_15/v182_test.go
@@ -0,0 +1,61 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddIssueResourceIndexTable(t *testing.T) {
+ // Create the models used in the migration
+ type Issue struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Index int64 `xorm:"UNIQUE(s)"`
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(Issue))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ // Run the migration
+ if err := AddIssueResourceIndexTable(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ type ResourceIndex struct {
+ GroupID int64 `xorm:"pk"`
+ MaxIndex int64 `xorm:"index"`
+ }
+
+ start := 0
+ const batchSize = 1000
+ for {
+ indexes := make([]ResourceIndex, 0, batchSize)
+ err := x.Table("issue_index").Limit(batchSize, start).Find(&indexes)
+ require.NoError(t, err)
+
+ for _, idx := range indexes {
+ var maxIndex int
+ has, err := x.SQL("SELECT max(`index`) FROM issue WHERE repo_id = ?", idx.GroupID).Get(&maxIndex)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, maxIndex, idx.MaxIndex)
+ }
+ if len(indexes) < batchSize {
+ break
+ }
+ start += len(indexes)
+ }
+}
diff --git a/models/migrations/v1_15/v183.go b/models/migrations/v1_15/v183.go
new file mode 100644
index 0000000..effad1b
--- /dev/null
+++ b/models/migrations/v1_15/v183.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreatePushMirrorTable(x *xorm.Engine) error {
+ type PushMirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ RemoteName string
+
+ Interval time.Duration
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
+ LastError string `xorm:"text"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(PushMirror)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_15/v184.go b/models/migrations/v1_15/v184.go
new file mode 100644
index 0000000..871c9db
--- /dev/null
+++ b/models/migrations/v1_15/v184.go
@@ -0,0 +1,66 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func RenameTaskErrorsToMessage(x *xorm.Engine) error {
+ type Task struct {
+ Errors string `xorm:"TEXT"` // if task failed, saved the error reason
+ Type int
+ Status int `xorm:"index"`
+ }
+
+ // This migration maybe rerun so that we should check if it has been run
+ messageExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "message")
+ if err != nil {
+ return err
+ }
+
+ if messageExist {
+ errorsExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "errors")
+ if err != nil {
+ return err
+ }
+ if !errorsExist {
+ return nil
+ }
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(Task)); err != nil {
+ return fmt.Errorf("error on Sync: %w", err)
+ }
+
+ if messageExist {
+ // if both errors and message exist, drop message at first
+ if err := base.DropTableColumns(sess, "task", "message"); err != nil {
+ return err
+ }
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `task` CHANGE errors message text"); err != nil {
+ return err
+ }
+ } else {
+ if _, err := sess.Exec("ALTER TABLE `task` RENAME COLUMN errors TO message"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_15/v185.go b/models/migrations/v1_15/v185.go
new file mode 100644
index 0000000..e5878ec
--- /dev/null
+++ b/models/migrations/v1_15/v185.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddRepoArchiver(x *xorm.Engine) error {
+ // RepoArchiver represents all archivers
+ type RepoArchiver struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"index unique(s)"`
+ Type int `xorm:"unique(s)"`
+ Status int
+ CommitID string `xorm:"VARCHAR(40) unique(s)"`
+ CreatedUnix int64 `xorm:"INDEX NOT NULL created"`
+ }
+ return x.Sync(new(RepoArchiver))
+}
diff --git a/models/migrations/v1_15/v186.go b/models/migrations/v1_15/v186.go
new file mode 100644
index 0000000..01aab3a
--- /dev/null
+++ b/models/migrations/v1_15/v186.go
@@ -0,0 +1,25 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateProtectedTagTable(x *xorm.Engine) error {
+ type ProtectedTag struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64
+ NamePattern string
+ AllowlistUserIDs []int64 `xorm:"JSON TEXT"`
+ AllowlistTeamIDs []int64 `xorm:"JSON TEXT"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ return x.Sync(new(ProtectedTag))
+}
diff --git a/models/migrations/v1_15/v187.go b/models/migrations/v1_15/v187.go
new file mode 100644
index 0000000..21cd677
--- /dev/null
+++ b/models/migrations/v1_15/v187.go
@@ -0,0 +1,47 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func DropWebhookColumns(x *xorm.Engine) error {
+ // Make sure the columns exist before dropping them
+ type Webhook struct {
+ Signature string `xorm:"TEXT"`
+ IsSSL bool `xorm:"is_ssl"`
+ }
+ if err := x.Sync(new(Webhook)); err != nil {
+ return err
+ }
+
+ type HookTask struct {
+ Typ string `xorm:"VARCHAR(16) index"`
+ URL string `xorm:"TEXT"`
+ Signature string `xorm:"TEXT"`
+ HTTPMethod string `xorm:"http_method"`
+ ContentType int
+ IsSSL bool
+ }
+ if err := x.Sync(new(HookTask)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "webhook", "signature", "is_ssl"); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "hook_task", "typ", "url", "signature", "http_method", "content_type", "is_ssl"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_15/v188.go b/models/migrations/v1_15/v188.go
new file mode 100644
index 0000000..71e45ca
--- /dev/null
+++ b/models/migrations/v1_15/v188.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_15 //nolint
+
+import "xorm.io/xorm"
+
+func AddKeyIsVerified(x *xorm.Engine) error {
+ type GPGKey struct {
+ Verified bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(GPGKey))
+}
diff --git a/models/migrations/v1_16/main_test.go b/models/migrations/v1_16/main_test.go
new file mode 100644
index 0000000..4961177
--- /dev/null
+++ b/models/migrations/v1_16/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_16/v189.go b/models/migrations/v1_16/v189.go
new file mode 100644
index 0000000..5649645
--- /dev/null
+++ b/models/migrations/v1_16/v189.go
@@ -0,0 +1,111 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/json"
+
+ "xorm.io/xorm"
+)
+
+func UnwrapLDAPSourceCfg(x *xorm.Engine) error {
+ jsonUnmarshalHandleDoubleEncode := func(bs []byte, v any) error {
+ err := json.Unmarshal(bs, v)
+ if err != nil {
+ ok := true
+ rs := []byte{}
+ temp := make([]byte, 2)
+ for _, rn := range string(bs) {
+ if rn > 0xffff {
+ ok = false
+ break
+ }
+ binary.LittleEndian.PutUint16(temp, uint16(rn))
+ rs = append(rs, temp...)
+ }
+ if ok {
+ if rs[0] == 0xff && rs[1] == 0xfe {
+ rs = rs[2:]
+ }
+ err = json.Unmarshal(rs, v)
+ }
+ }
+ if err != nil && len(bs) > 2 && bs[0] == 0xff && bs[1] == 0xfe {
+ err = json.Unmarshal(bs[2:], v)
+ }
+ return err
+ }
+
+ // LoginSource represents an external way for authorizing users.
+ type LoginSource struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int
+ IsActived bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ IsActive bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ Cfg string `xorm:"TEXT"`
+ }
+
+ const ldapType = 2
+ const dldapType = 5
+
+ type WrappedSource struct {
+ Source map[string]any
+ }
+
+ // change lower_email as unique
+ if err := x.Sync(new(LoginSource)); err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ const batchSize = 100
+ for start := 0; ; start += batchSize {
+ sources := make([]*LoginSource, 0, batchSize)
+ if err := sess.Limit(batchSize, start).Where("`type` = ? OR `type` = ?", ldapType, dldapType).Find(&sources); err != nil {
+ return err
+ }
+ if len(sources) == 0 {
+ break
+ }
+
+ for _, source := range sources {
+ wrapped := &WrappedSource{
+ Source: map[string]any{},
+ }
+ err := jsonUnmarshalHandleDoubleEncode([]byte(source.Cfg), &wrapped)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal %s: %w", source.Cfg, err)
+ }
+ if len(wrapped.Source) > 0 {
+ bs, err := json.Marshal(wrapped.Source)
+ if err != nil {
+ return err
+ }
+ source.Cfg = string(bs)
+ if _, err := sess.ID(source.ID).Cols("cfg").Update(source); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ if _, err := x.SetExpr("is_active", "is_actived").Update(&LoginSource{}); err != nil {
+ return fmt.Errorf("SetExpr Update failed: %w", err)
+ }
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "login_source", "is_actived"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_16/v189_test.go b/models/migrations/v1_16/v189_test.go
new file mode 100644
index 0000000..88c6ebd
--- /dev/null
+++ b/models/migrations/v1_16/v189_test.go
@@ -0,0 +1,83 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/modules/json"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// LoginSource represents an external way for authorizing users.
+type LoginSourceOriginalV189 struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int
+ IsActived bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ Cfg string `xorm:"TEXT"`
+ Expected string `xorm:"TEXT"`
+}
+
+func (ls *LoginSourceOriginalV189) TableName() string {
+ return "login_source"
+}
+
+func Test_UnwrapLDAPSourceCfg(t *testing.T) {
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(LoginSourceOriginalV189))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ // LoginSource represents an external way for authorizing users.
+ type LoginSource struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int
+ IsActive bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ Cfg string `xorm:"TEXT"`
+ Expected string `xorm:"TEXT"`
+ }
+
+ // Run the migration
+ if err := UnwrapLDAPSourceCfg(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ const batchSize = 100
+ for start := 0; ; start += batchSize {
+ sources := make([]*LoginSource, 0, batchSize)
+ if err := x.Table("login_source").Limit(batchSize, start).Find(&sources); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ if len(sources) == 0 {
+ break
+ }
+
+ for _, source := range sources {
+ converted := map[string]any{}
+ expected := map[string]any{}
+
+ if err := json.Unmarshal([]byte(source.Cfg), &converted); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ if err := json.Unmarshal([]byte(source.Expected), &expected); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ assert.EqualValues(t, expected, converted, "UnwrapLDAPSourceCfg failed for %d", source.ID)
+ assert.EqualValues(t, source.ID%2 == 0, source.IsActive, "UnwrapLDAPSourceCfg failed for %d", source.ID)
+ }
+ }
+}
diff --git a/models/migrations/v1_16/v190.go b/models/migrations/v1_16/v190.go
new file mode 100644
index 0000000..5953802
--- /dev/null
+++ b/models/migrations/v1_16/v190.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddAgitFlowPullRequest(x *xorm.Engine) error {
+ type PullRequestFlow int
+
+ type PullRequest struct {
+ Flow PullRequestFlow `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(PullRequest)); err != nil {
+ return fmt.Errorf("sync2: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_16/v191.go b/models/migrations/v1_16/v191.go
new file mode 100644
index 0000000..c618783
--- /dev/null
+++ b/models/migrations/v1_16/v191.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AlterIssueAndCommentTextFieldsToLongText(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `issue` CHANGE `content` `content` LONGTEXT"); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("ALTER TABLE `comment` CHANGE `content` `content` LONGTEXT, CHANGE `patch` `patch` LONGTEXT"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_16/v192.go b/models/migrations/v1_16/v192.go
new file mode 100644
index 0000000..2d5d158
--- /dev/null
+++ b/models/migrations/v1_16/v192.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func RecreateIssueResourceIndexTable(x *xorm.Engine) error {
+ type IssueIndex struct {
+ GroupID int64 `xorm:"pk"`
+ MaxIndex int64 `xorm:"index"`
+ }
+
+ return base.RecreateTables(new(IssueIndex))(x)
+}
diff --git a/models/migrations/v1_16/v193.go b/models/migrations/v1_16/v193.go
new file mode 100644
index 0000000..8d3ce7a
--- /dev/null
+++ b/models/migrations/v1_16/v193.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddRepoIDForAttachment(x *xorm.Engine) error {
+ type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ RepoID int64 `xorm:"INDEX"` // this should not be zero
+ IssueID int64 `xorm:"INDEX"` // maybe zero when creating
+ ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating
+ UploaderID int64 `xorm:"INDEX DEFAULT 0"`
+ }
+ if err := x.Sync(new(Attachment)); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec("UPDATE `attachment` set repo_id = (SELECT repo_id FROM `issue` WHERE `issue`.id = `attachment`.issue_id) WHERE `attachment`.issue_id > 0"); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec("UPDATE `attachment` set repo_id = (SELECT repo_id FROM `release` WHERE `release`.id = `attachment`.release_id) WHERE `attachment`.release_id > 0"); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_16/v193_test.go b/models/migrations/v1_16/v193_test.go
new file mode 100644
index 0000000..0da6708
--- /dev/null
+++ b/models/migrations/v1_16/v193_test.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddRepoIDForAttachment(t *testing.T) {
+ type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ IssueID int64 `xorm:"INDEX"` // maybe zero when creating
+ ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating
+ UploaderID int64 `xorm:"INDEX DEFAULT 0"`
+ }
+
+ type Issue struct {
+ ID int64
+ RepoID int64
+ }
+
+ type Release struct {
+ ID int64
+ RepoID int64
+ }
+
+ // Prepare and load the testing database
+ x, deferrable := migration_tests.PrepareTestEnv(t, 0, new(Attachment), new(Issue), new(Release))
+ defer deferrable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ // Run the migration
+ if err := AddRepoIDForAttachment(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ type NewAttachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ RepoID int64 `xorm:"INDEX"` // this should not be zero
+ IssueID int64 `xorm:"INDEX"` // maybe zero when creating
+ ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating
+ UploaderID int64 `xorm:"INDEX DEFAULT 0"`
+ }
+
+ var issueAttachments []*NewAttachment
+ err := x.Table("attachment").Where("issue_id > 0").Find(&issueAttachments)
+ require.NoError(t, err)
+ for _, attach := range issueAttachments {
+ assert.Positive(t, attach.RepoID)
+ assert.Positive(t, attach.IssueID)
+ var issue Issue
+ has, err := x.ID(attach.IssueID).Get(&issue)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, attach.RepoID, issue.RepoID)
+ }
+
+ var releaseAttachments []*NewAttachment
+ err = x.Table("attachment").Where("release_id > 0").Find(&releaseAttachments)
+ require.NoError(t, err)
+ for _, attach := range releaseAttachments {
+ assert.Positive(t, attach.RepoID)
+ assert.Positive(t, attach.ReleaseID)
+ var release Release
+ has, err := x.ID(attach.ReleaseID).Get(&release)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, attach.RepoID, release.RepoID)
+ }
+}
diff --git a/models/migrations/v1_16/v194.go b/models/migrations/v1_16/v194.go
new file mode 100644
index 0000000..6aa13c5
--- /dev/null
+++ b/models/migrations/v1_16/v194.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddBranchProtectionUnprotectedFilesColumn(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ UnprotectedFilePatterns string `xorm:"TEXT"`
+ }
+
+ if err := x.Sync(new(ProtectedBranch)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_16/v195.go b/models/migrations/v1_16/v195.go
new file mode 100644
index 0000000..6d7e941
--- /dev/null
+++ b/models/migrations/v1_16/v195.go
@@ -0,0 +1,46 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddTableCommitStatusIndex(x *xorm.Engine) error {
+ // CommitStatusIndex represents a table for commit status index
+ type CommitStatusIndex struct {
+ ID int64
+ RepoID int64 `xorm:"unique(repo_sha)"`
+ SHA string `xorm:"unique(repo_sha)"`
+ MaxIndex int64 `xorm:"index"`
+ }
+
+ if err := x.Sync(new(CommitStatusIndex)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ // Remove data we're goint to rebuild
+ if _, err := sess.Table("commit_status_index").Where("1=1").Delete(&CommitStatusIndex{}); err != nil {
+ return err
+ }
+
+ // Create current data for all repositories with issues and PRs
+ if _, err := sess.Exec("INSERT INTO commit_status_index (repo_id, sha, max_index) " +
+ "SELECT max_data.repo_id, max_data.sha, max_data.max_index " +
+ "FROM ( SELECT commit_status.repo_id AS repo_id, commit_status.sha AS sha, max(commit_status.`index`) AS max_index " +
+ "FROM commit_status GROUP BY commit_status.repo_id, commit_status.sha) AS max_data"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_16/v195_test.go b/models/migrations/v1_16/v195_test.go
new file mode 100644
index 0000000..9a62fc9
--- /dev/null
+++ b/models/migrations/v1_16/v195_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddTableCommitStatusIndex(t *testing.T) {
+ // Create the models used in the migration
+ type CommitStatus struct {
+ ID int64 `xorm:"pk autoincr"`
+ Index int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
+ SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_sha_index)"`
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(CommitStatus))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ // Run the migration
+ if err := AddTableCommitStatusIndex(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ type CommitStatusIndex struct {
+ ID int64
+ RepoID int64 `xorm:"unique(repo_sha)"`
+ SHA string `xorm:"unique(repo_sha)"`
+ MaxIndex int64 `xorm:"index"`
+ }
+
+ start := 0
+ const batchSize = 1000
+ for {
+ indexes := make([]CommitStatusIndex, 0, batchSize)
+ err := x.Table("commit_status_index").Limit(batchSize, start).Find(&indexes)
+ require.NoError(t, err)
+
+ for _, idx := range indexes {
+ var maxIndex int
+ has, err := x.SQL("SELECT max(`index`) FROM commit_status WHERE repo_id = ? AND sha = ?", idx.RepoID, idx.SHA).Get(&maxIndex)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, maxIndex, idx.MaxIndex)
+ }
+ if len(indexes) < batchSize {
+ break
+ }
+ start += len(indexes)
+ }
+}
diff --git a/models/migrations/v1_16/v196.go b/models/migrations/v1_16/v196.go
new file mode 100644
index 0000000..7cbafc6
--- /dev/null
+++ b/models/migrations/v1_16/v196.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddColorColToProjectBoard(x *xorm.Engine) error {
+ type ProjectBoard struct {
+ Color string `xorm:"VARCHAR(7)"`
+ }
+
+ if err := x.Sync(new(ProjectBoard)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_16/v197.go b/models/migrations/v1_16/v197.go
new file mode 100644
index 0000000..97888b2
--- /dev/null
+++ b/models/migrations/v1_16/v197.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddRenamedBranchTable(x *xorm.Engine) error {
+ type RenamedBranch struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX NOT NULL"`
+ From string
+ To string
+ CreatedUnix int64 `xorm:"created"`
+ }
+ return x.Sync(new(RenamedBranch))
+}
diff --git a/models/migrations/v1_16/v198.go b/models/migrations/v1_16/v198.go
new file mode 100644
index 0000000..115bb31
--- /dev/null
+++ b/models/migrations/v1_16/v198.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddTableIssueContentHistory(x *xorm.Engine) error {
+ type IssueContentHistory struct {
+ ID int64 `xorm:"pk autoincr"`
+ PosterID int64
+ IssueID int64 `xorm:"INDEX"`
+ CommentID int64 `xorm:"INDEX"`
+ EditedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ ContentText string `xorm:"LONGTEXT"`
+ IsFirstCreated bool
+ IsDeleted bool
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Sync(new(IssueContentHistory)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_16/v199.go b/models/migrations/v1_16/v199.go
new file mode 100644
index 0000000..6adcf89
--- /dev/null
+++ b/models/migrations/v1_16/v199.go
@@ -0,0 +1,6 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+// We used to use a table `remote_version` to store information for updater, now we use `AppState`, so this migration task is a no-op now.
diff --git a/models/migrations/v1_16/v200.go b/models/migrations/v1_16/v200.go
new file mode 100644
index 0000000..c08c20e
--- /dev/null
+++ b/models/migrations/v1_16/v200.go
@@ -0,0 +1,22 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddTableAppState(x *xorm.Engine) error {
+ type AppState struct {
+ ID string `xorm:"pk varchar(200)"`
+ Revision int64
+ Content string `xorm:"LONGTEXT"`
+ }
+ if err := x.Sync(new(AppState)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_16/v201.go b/models/migrations/v1_16/v201.go
new file mode 100644
index 0000000..35e0c9f
--- /dev/null
+++ b/models/migrations/v1_16/v201.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func DropTableRemoteVersion(x *xorm.Engine) error {
+ // drop the orphaned table introduced in `v199`, now the update checker also uses AppState, do not need this table
+ _ = x.DropTables("remote_version")
+ return nil
+}
diff --git a/models/migrations/v1_16/v202.go b/models/migrations/v1_16/v202.go
new file mode 100644
index 0000000..6ba3615
--- /dev/null
+++ b/models/migrations/v1_16/v202.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func CreateUserSettingsTable(x *xorm.Engine) error {
+ type UserSetting struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"index unique(key_userid)"` // to load all of someone's settings
+ SettingKey string `xorm:"varchar(255) index unique(key_userid)"` // ensure key is always lowercase
+ SettingValue string `xorm:"text"`
+ }
+ if err := x.Sync(new(UserSetting)); err != nil {
+ return fmt.Errorf("sync2: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_16/v203.go b/models/migrations/v1_16/v203.go
new file mode 100644
index 0000000..e8e6b52
--- /dev/null
+++ b/models/migrations/v1_16/v203.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddProjectIssueSorting(x *xorm.Engine) error {
+ // ProjectIssue saves relation from issue to a project
+ type ProjectIssue struct {
+ Sorting int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ return x.Sync(new(ProjectIssue))
+}
diff --git a/models/migrations/v1_16/v204.go b/models/migrations/v1_16/v204.go
new file mode 100644
index 0000000..ece03e1
--- /dev/null
+++ b/models/migrations/v1_16/v204.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import "xorm.io/xorm"
+
+func AddSSHKeyIsVerified(x *xorm.Engine) error {
+ type PublicKey struct {
+ Verified bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(PublicKey))
+}
diff --git a/models/migrations/v1_16/v205.go b/models/migrations/v1_16/v205.go
new file mode 100644
index 0000000..d6c5770
--- /dev/null
+++ b/models/migrations/v1_16/v205.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func MigrateUserPasswordSalt(x *xorm.Engine) error {
+ dbType := x.Dialect().URI().DBType
+ // For SQLITE, the max length doesn't matter.
+ if dbType == schemas.SQLITE {
+ return nil
+ }
+
+ if err := base.ModifyColumn(x, "user", &schemas.Column{
+ Name: "rands",
+ SQLType: schemas.SQLType{
+ Name: "VARCHAR",
+ },
+ Length: 32,
+ // MySQL will like us again.
+ Nullable: true,
+ DefaultIsEmpty: true,
+ }); err != nil {
+ return err
+ }
+
+ return base.ModifyColumn(x, "user", &schemas.Column{
+ Name: "salt",
+ SQLType: schemas.SQLType{
+ Name: "VARCHAR",
+ },
+ Length: 32,
+ Nullable: true,
+ DefaultIsEmpty: true,
+ })
+}
diff --git a/models/migrations/v1_16/v206.go b/models/migrations/v1_16/v206.go
new file mode 100644
index 0000000..581a7d7
--- /dev/null
+++ b/models/migrations/v1_16/v206.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+func AddAuthorizeColForTeamUnit(x *xorm.Engine) error {
+ type TeamUnit struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ Type int `xorm:"UNIQUE(s)"`
+ AccessMode int
+ }
+
+ if err := x.Sync(new(TeamUnit)); err != nil {
+ return fmt.Errorf("sync2: %w", err)
+ }
+
+ // migrate old permission
+ _, err := x.Exec("UPDATE team_unit SET access_mode = (SELECT authorize FROM team WHERE team.id = team_unit.team_id)")
+ return err
+}
diff --git a/models/migrations/v1_16/v207.go b/models/migrations/v1_16/v207.go
new file mode 100644
index 0000000..91208f0
--- /dev/null
+++ b/models/migrations/v1_16/v207.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddWebAuthnCred(x *xorm.Engine) error {
+ // NO-OP Don't migrate here - let v210 do this.
+
+ return nil
+}
diff --git a/models/migrations/v1_16/v208.go b/models/migrations/v1_16/v208.go
new file mode 100644
index 0000000..1a11ef0
--- /dev/null
+++ b/models/migrations/v1_16/v208.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func UseBase32HexForCredIDInWebAuthnCredential(x *xorm.Engine) error {
+ // noop
+ return nil
+}
diff --git a/models/migrations/v1_16/v209.go b/models/migrations/v1_16/v209.go
new file mode 100644
index 0000000..be3100e
--- /dev/null
+++ b/models/migrations/v1_16/v209.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func IncreaseCredentialIDTo410(x *xorm.Engine) error {
+ // no-op
+ // v208 was completely wrong
+ // So now we have to no-op again.
+
+ return nil
+}
diff --git a/models/migrations/v1_16/v210.go b/models/migrations/v1_16/v210.go
new file mode 100644
index 0000000..db45b11
--- /dev/null
+++ b/models/migrations/v1_16/v210.go
@@ -0,0 +1,177 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "crypto/ecdh"
+ "encoding/base32"
+ "errors"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func parseU2FRegistration(raw []byte) (pubKey *ecdh.PublicKey, keyHandle []byte, err error) {
+ if len(raw) < 69 {
+ return nil, nil, errors.New("data is too short")
+ }
+ if raw[0] != 0x05 {
+ return nil, nil, errors.New("invalid reserved byte")
+ }
+ raw = raw[1:]
+
+ pubKey, err = ecdh.P256().NewPublicKey(raw[:65])
+ if err != nil {
+ return nil, nil, err
+ }
+ raw = raw[65:]
+
+ khLen := int(raw[0])
+ if len(raw) < khLen {
+ return nil, nil, errors.New("invalid key handle")
+ }
+ raw = raw[1:]
+ keyHandle = raw[:khLen]
+
+ return pubKey, keyHandle, nil
+}
+
+// v208 migration was completely broken
+func RemigrateU2FCredentials(x *xorm.Engine) error {
+ // Create webauthnCredential table
+ type webauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID string `xorm:"INDEX VARCHAR(410)"` // CredentalID in U2F is at most 255bytes / 5 * 8 = 408 - add a few extra characters for safety
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ if err := x.Sync(&webauthnCredential{}); err != nil {
+ return err
+ }
+
+ switch x.Dialect().URI().DBType {
+ case schemas.MYSQL:
+ _, err := x.Exec("ALTER TABLE webauthn_credential MODIFY COLUMN credential_id VARCHAR(410)")
+ if err != nil {
+ return err
+ }
+ case schemas.POSTGRES:
+ _, err := x.Exec("ALTER TABLE webauthn_credential ALTER COLUMN credential_id TYPE VARCHAR(410)")
+ if err != nil {
+ return err
+ }
+ default:
+ // SQLite doesn't support ALTER COLUMN, and it already makes String _TEXT_ by default so no migration needed
+ // nor is there any need to re-migrate
+ }
+
+ exist, err := x.IsTableExist("u2f_registration")
+ if err != nil {
+ return err
+ }
+ if !exist {
+ return nil
+ }
+
+ // Now migrate the old u2f registrations to the new format
+ type u2fRegistration struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ UserID int64 `xorm:"INDEX"`
+ Raw []byte
+ Counter uint32 `xorm:"BIGINT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ var start int
+ regs := make([]*u2fRegistration, 0, 50)
+ for {
+ err := x.OrderBy("id").Limit(50, start).Find(&regs)
+ if err != nil {
+ return err
+ }
+
+ err = func() error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return fmt.Errorf("unable to allow start session. Error: %w", err)
+ }
+ for _, reg := range regs {
+ pubKey, keyHandle, err := parseU2FRegistration(reg.Raw)
+ if err != nil {
+ continue
+ }
+ remigrated := &webauthnCredential{
+ ID: reg.ID,
+ Name: reg.Name,
+ LowerName: strings.ToLower(reg.Name),
+ UserID: reg.UserID,
+ CredentialID: base32.HexEncoding.EncodeToString(keyHandle),
+ PublicKey: pubKey.Bytes(),
+ AttestationType: "fido-u2f",
+ AAGUID: []byte{},
+ SignCount: reg.Counter,
+ UpdatedUnix: reg.UpdatedUnix,
+ CreatedUnix: reg.CreatedUnix,
+ }
+
+ has, err := sess.ID(reg.ID).Get(new(webauthnCredential))
+ if err != nil {
+ return fmt.Errorf("unable to get webauthn_credential[%d]. Error: %w", reg.ID, err)
+ }
+ if !has {
+ has, err := sess.Where("`lower_name`=?", remigrated.LowerName).And("`user_id`=?", remigrated.UserID).Exist(new(webauthnCredential))
+ if err != nil {
+ return fmt.Errorf("unable to check webauthn_credential[lower_name: %s, user_id: %d]. Error: %w", remigrated.LowerName, remigrated.UserID, err)
+ }
+ if !has {
+ _, err = sess.Insert(remigrated)
+ if err != nil {
+ return fmt.Errorf("unable to (re)insert webauthn_credential[%d]. Error: %w", reg.ID, err)
+ }
+
+ continue
+ }
+ }
+
+ _, err = sess.ID(remigrated.ID).AllCols().Update(remigrated)
+ if err != nil {
+ return fmt.Errorf("unable to update webauthn_credential[%d]. Error: %w", reg.ID, err)
+ }
+ }
+ return sess.Commit()
+ }()
+ if err != nil {
+ return err
+ }
+
+ if len(regs) < 50 {
+ break
+ }
+ start += 50
+ regs = regs[:0]
+ }
+
+ if x.Dialect().URI().DBType == schemas.POSTGRES {
+ if _, err := x.Exec("SELECT setval('webauthn_credential_id_seq', COALESCE((SELECT MAX(id)+1 FROM `webauthn_credential`), 1), false)"); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_16/v210_test.go b/models/migrations/v1_16/v210_test.go
new file mode 100644
index 0000000..7321350
--- /dev/null
+++ b/models/migrations/v1_16/v210_test.go
@@ -0,0 +1,88 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_16 //nolint
+
+import (
+ "encoding/hex"
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm/schemas"
+)
+
+func TestParseU2FRegistration(t *testing.T) {
+ // test vectors from https://github.com/tstranex/u2f/blob/d21a03e0b1d9fc1df59ff54e7a513655c1748b0c/register_test.go#L15
+
+ const testRegRespHex = "0504b174bc49c7ca254b70d2e5c207cee9cf174820ebd77ea3c65508c26da51b657c1cc6b952f8621697936482da0a6d3d3826a59095daf6cd7c03e2e60385d2f6d9402a552dfdb7477ed65fd84133f86196010b2215b57da75d315b7b9e8fe2e3925a6019551bab61d16591659cbaf00b4950f7abfe6660e2e006f76868b772d70c253082013c3081e4a003020102020a47901280001155957352300a06082a8648ce3d0403023017311530130603550403130c476e756262792050696c6f74301e170d3132303831343138323933325a170d3133303831343138323933325a3031312f302d0603550403132650696c6f74476e756262792d302e342e312d34373930313238303030313135353935373335323059301306072a8648ce3d020106082a8648ce3d030107034200048d617e65c9508e64bcc5673ac82a6799da3c1446682c258c463fffdf58dfd2fa3e6c378b53d795c4a4dffb4199edd7862f23abaf0203b4b8911ba0569994e101300a06082a8648ce3d0403020347003044022060cdb6061e9c22262d1aac1d96d8c70829b2366531dda268832cb836bcd30dfa0220631b1459f09e6330055722c8d89b7f48883b9089b88d60d1d9795902b30410df304502201471899bcc3987e62e8202c9b39c33c19033f7340352dba80fcab017db9230e402210082677d673d891933ade6f617e5dbde2e247e70423fd5ad7804a6d3d3961ef871"
+
+ regResp, err := hex.DecodeString(testRegRespHex)
+ require.NoError(t, err)
+ pubKey, keyHandle, err := parseU2FRegistration(regResp)
+ require.NoError(t, err)
+ assert.Equal(t, "04b174bc49c7ca254b70d2e5c207cee9cf174820ebd77ea3c65508c26da51b657c1cc6b952f8621697936482da0a6d3d3826a59095daf6cd7c03e2e60385d2f6d9", hex.EncodeToString(pubKey.Bytes()))
+ assert.Equal(t, "2a552dfdb7477ed65fd84133f86196010b2215b57da75d315b7b9e8fe2e3925a6019551bab61d16591659cbaf00b4950f7abfe6660e2e006f76868b772d70c25", hex.EncodeToString(keyHandle))
+}
+
+func Test_RemigrateU2FCredentials(t *testing.T) {
+ // Create webauthnCredential table
+ type WebauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID string `xorm:"INDEX VARCHAR(410)"` // CredentalID in U2F is at most 255bytes / 5 * 8 = 408 - add a few extra characters for safety
+ PublicKey []byte
+ AttestationType string
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ }
+
+ // Now migrate the old u2f registrations to the new format
+ type U2fRegistration struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ UserID int64 `xorm:"INDEX"`
+ Raw []byte
+ Counter uint32 `xorm:"BIGINT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ type ExpectedWebauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ CredentialID string `xorm:"INDEX VARCHAR(410)"` // CredentalID in U2F is at most 255bytes / 5 * 8 = 408 - add a few extra characters for safety
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(WebauthnCredential), new(U2fRegistration), new(ExpectedWebauthnCredential))
+ if x == nil || t.Failed() {
+ defer deferable()
+ return
+ }
+ defer deferable()
+
+ if x.Dialect().URI().DBType == schemas.SQLITE {
+ return
+ }
+
+ // Run the migration
+ if err := RemigrateU2FCredentials(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ expected := []ExpectedWebauthnCredential{}
+ err := x.Table("expected_webauthn_credential").Asc("id").Find(&expected)
+ require.NoError(t, err)
+
+ got := []ExpectedWebauthnCredential{}
+ err = x.Table("webauthn_credential").Select("id, credential_id").Asc("id").Find(&got)
+ require.NoError(t, err)
+
+ assert.EqualValues(t, expected, got)
+}
diff --git a/models/migrations/v1_17/main_test.go b/models/migrations/v1_17/main_test.go
new file mode 100644
index 0000000..8a787f6
--- /dev/null
+++ b/models/migrations/v1_17/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_17/v211.go b/models/migrations/v1_17/v211.go
new file mode 100644
index 0000000..9b72c86
--- /dev/null
+++ b/models/migrations/v1_17/v211.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func CreateForeignReferenceTable(_ *xorm.Engine) error {
+ return nil // This table was dropped in v1_19/v237.go
+}
diff --git a/models/migrations/v1_17/v212.go b/models/migrations/v1_17/v212.go
new file mode 100644
index 0000000..e3f9437
--- /dev/null
+++ b/models/migrations/v1_17/v212.go
@@ -0,0 +1,93 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddPackageTables(x *xorm.Engine) error {
+ type Package struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RepoID int64 `xorm:"INDEX"`
+ Type string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"NOT NULL"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ SemverCompatible bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ if err := x.Sync(new(Package)); err != nil {
+ return err
+ }
+
+ type PackageVersion struct {
+ ID int64 `xorm:"pk autoincr"`
+ PackageID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CreatorID int64 `xorm:"NOT NULL DEFAULT 0"`
+ Version string `xorm:"NOT NULL"`
+ LowerVersion string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+ IsInternal bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ MetadataJSON string `xorm:"metadata_json TEXT"`
+ DownloadCount int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(PackageVersion)); err != nil {
+ return err
+ }
+
+ type PackageProperty struct {
+ ID int64 `xorm:"pk autoincr"`
+ RefType int64 `xorm:"INDEX NOT NULL"`
+ RefID int64 `xorm:"INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ Value string `xorm:"TEXT NOT NULL"`
+ }
+
+ if err := x.Sync(new(PackageProperty)); err != nil {
+ return err
+ }
+
+ type PackageFile struct {
+ ID int64 `xorm:"pk autoincr"`
+ VersionID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ BlobID int64 `xorm:"INDEX NOT NULL"`
+ Name string `xorm:"NOT NULL"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CompositeKey string `xorm:"UNIQUE(s) INDEX"`
+ IsLead bool `xorm:"NOT NULL DEFAULT false"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+ }
+
+ if err := x.Sync(new(PackageFile)); err != nil {
+ return err
+ }
+
+ type PackageBlob struct {
+ ID int64 `xorm:"pk autoincr"`
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ HashMD5 string `xorm:"hash_md5 char(32) UNIQUE(md5) INDEX NOT NULL"`
+ HashSHA1 string `xorm:"hash_sha1 char(40) UNIQUE(sha1) INDEX NOT NULL"`
+ HashSHA256 string `xorm:"hash_sha256 char(64) UNIQUE(sha256) INDEX NOT NULL"`
+ HashSHA512 string `xorm:"hash_sha512 char(128) UNIQUE(sha512) INDEX NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+ }
+
+ if err := x.Sync(new(PackageBlob)); err != nil {
+ return err
+ }
+
+ type PackageBlobUpload struct {
+ ID string `xorm:"pk"`
+ BytesReceived int64 `xorm:"NOT NULL DEFAULT 0"`
+ HashStateBytes []byte `xorm:"BLOB"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated INDEX NOT NULL"`
+ }
+
+ return x.Sync(new(PackageBlobUpload))
+}
diff --git a/models/migrations/v1_17/v213.go b/models/migrations/v1_17/v213.go
new file mode 100644
index 0000000..bb3f466
--- /dev/null
+++ b/models/migrations/v1_17/v213.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddAllowMaintainerEdit(x *xorm.Engine) error {
+ // PullRequest represents relation between pull request and repositories.
+ type PullRequest struct {
+ AllowMaintainerEdit bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(PullRequest))
+}
diff --git a/models/migrations/v1_17/v214.go b/models/migrations/v1_17/v214.go
new file mode 100644
index 0000000..2268164
--- /dev/null
+++ b/models/migrations/v1_17/v214.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddAutoMergeTable(x *xorm.Engine) error {
+ type MergeStyle string
+ type PullAutoMerge struct {
+ ID int64 `xorm:"pk autoincr"`
+ PullID int64 `xorm:"UNIQUE"`
+ DoerID int64 `xorm:"NOT NULL"`
+ MergeStyle MergeStyle `xorm:"varchar(30)"`
+ Message string `xorm:"LONGTEXT"`
+ CreatedUnix int64 `xorm:"created"`
+ }
+
+ return x.Sync(&PullAutoMerge{})
+}
diff --git a/models/migrations/v1_17/v215.go b/models/migrations/v1_17/v215.go
new file mode 100644
index 0000000..b338f85
--- /dev/null
+++ b/models/migrations/v1_17/v215.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "code.gitea.io/gitea/models/pull"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddReviewViewedFiles(x *xorm.Engine) error {
+ type ReviewState struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"NOT NULL UNIQUE(pull_commit_user)"`
+ PullID int64 `xorm:"NOT NULL INDEX UNIQUE(pull_commit_user) DEFAULT 0"`
+ CommitSHA string `xorm:"NOT NULL VARCHAR(40) UNIQUE(pull_commit_user)"`
+ UpdatedFiles map[string]pull.ViewedState `xorm:"NOT NULL LONGTEXT JSON"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ return x.Sync(new(ReviewState))
+}
diff --git a/models/migrations/v1_17/v216.go b/models/migrations/v1_17/v216.go
new file mode 100644
index 0000000..268f472
--- /dev/null
+++ b/models/migrations/v1_17/v216.go
@@ -0,0 +1,7 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+// This migration added non-ideal indices to the action table which on larger datasets slowed things down
+// it has been superseded by v218.go
diff --git a/models/migrations/v1_17/v217.go b/models/migrations/v1_17/v217.go
new file mode 100644
index 0000000..3f970b6
--- /dev/null
+++ b/models/migrations/v1_17/v217.go
@@ -0,0 +1,25 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AlterHookTaskTextFieldsToLongText(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `hook_task` CHANGE `payload_content` `payload_content` LONGTEXT, CHANGE `request_content` `request_content` LONGTEXT, change `response_content` `response_content` LONGTEXT"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_17/v218.go b/models/migrations/v1_17/v218.go
new file mode 100644
index 0000000..4c05a9b
--- /dev/null
+++ b/models/migrations/v1_17/v218.go
@@ -0,0 +1,52 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+type improveActionTableIndicesAction struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 // Receiver user id.
+ OpType int
+ ActUserID int64 // Action user id.
+ RepoID int64
+ CommentID int64 `xorm:"INDEX"`
+ IsDeleted bool `xorm:"NOT NULL DEFAULT false"`
+ RefName string
+ IsPrivate bool `xorm:"NOT NULL DEFAULT false"`
+ Content string `xorm:"TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+// TableName sets the name of this table
+func (*improveActionTableIndicesAction) TableName() string {
+ return "action"
+}
+
+// TableIndices implements xorm's TableIndices interface
+func (*improveActionTableIndicesAction) TableIndices() []*schemas.Index {
+ repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType)
+ repoIndex.AddColumn("repo_id", "user_id", "is_deleted")
+
+ actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType)
+ actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted")
+ indices := []*schemas.Index{actUserIndex, repoIndex}
+ if setting.Database.Type.IsPostgreSQL() {
+ cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType)
+ cudIndex.AddColumn("created_unix", "user_id", "is_deleted")
+ indices = append(indices, cudIndex)
+ }
+
+ return indices
+}
+
+func ImproveActionTableIndices(x *xorm.Engine) error {
+ return x.Sync(&improveActionTableIndicesAction{})
+}
diff --git a/models/migrations/v1_17/v219.go b/models/migrations/v1_17/v219.go
new file mode 100644
index 0000000..d266029
--- /dev/null
+++ b/models/migrations/v1_17/v219.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "time"
+
+ "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddSyncOnCommitColForPushMirror(x *xorm.Engine) error {
+ type PushMirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *repo.Repository `xorm:"-"`
+ RemoteName string
+
+ SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"`
+ Interval time.Duration
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
+ LastError string `xorm:"text"`
+ }
+
+ return x.Sync(new(PushMirror))
+}
diff --git a/models/migrations/v1_17/v220.go b/models/migrations/v1_17/v220.go
new file mode 100644
index 0000000..d400716
--- /dev/null
+++ b/models/migrations/v1_17/v220.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ packages_model "code.gitea.io/gitea/models/packages"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func AddContainerRepositoryProperty(x *xorm.Engine) (err error) {
+ if x.Dialect().URI().DBType == schemas.SQLITE {
+ _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, u.lower_name || '/' || p.lower_name FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?",
+ packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer)
+ } else {
+ _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, CONCAT(u.lower_name, '/', p.lower_name) FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?",
+ packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer)
+ }
+ return err
+}
diff --git a/models/migrations/v1_17/v221.go b/models/migrations/v1_17/v221.go
new file mode 100644
index 0000000..9e15938
--- /dev/null
+++ b/models/migrations/v1_17/v221.go
@@ -0,0 +1,74 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "encoding/base32"
+ "fmt"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func StoreWebauthnCredentialIDAsBytes(x *xorm.Engine) error {
+ // Create webauthnCredential table
+ type webauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID string `xorm:"INDEX VARCHAR(410)"`
+ // Note the lack of INDEX here - these will be created once the column is renamed in v223.go
+ CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ if err := x.Sync(&webauthnCredential{}); err != nil {
+ return err
+ }
+
+ var start int
+ creds := make([]*webauthnCredential, 0, 50)
+ for {
+ err := x.Select("id, credential_id").OrderBy("id").Limit(50, start).Find(&creds)
+ if err != nil {
+ return err
+ }
+
+ err = func() error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return fmt.Errorf("unable to allow start session. Error: %w", err)
+ }
+ for _, cred := range creds {
+ cred.CredentialIDBytes, err = base32.HexEncoding.DecodeString(cred.CredentialID)
+ if err != nil {
+ return fmt.Errorf("unable to parse credential id %s for credential[%d]: %w", cred.CredentialID, cred.ID, err)
+ }
+ count, err := sess.ID(cred.ID).Cols("credential_id_bytes").Update(cred)
+ if count != 1 || err != nil {
+ return fmt.Errorf("unable to update credential id bytes for credential[%d]: %d,%w", cred.ID, count, err)
+ }
+ }
+ return sess.Commit()
+ }()
+ if err != nil {
+ return err
+ }
+
+ if len(creds) < 50 {
+ break
+ }
+ start += 50
+ creds = creds[:0]
+ }
+ return nil
+}
diff --git a/models/migrations/v1_17/v221_test.go b/models/migrations/v1_17/v221_test.go
new file mode 100644
index 0000000..0f6db2a
--- /dev/null
+++ b/models/migrations/v1_17/v221_test.go
@@ -0,0 +1,63 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "encoding/base32"
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_StoreWebauthnCredentialIDAsBytes(t *testing.T) {
+ // Create webauthnCredential table
+ type WebauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID string `xorm:"INDEX VARCHAR(410)"`
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ }
+
+ type ExpectedWebauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ CredentialID string // CredentialID is at most 1023 bytes as per spec released 20 July 2022
+ }
+
+ type ConvertedWebauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(WebauthnCredential), new(ExpectedWebauthnCredential))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ err := StoreWebauthnCredentialIDAsBytes(x)
+ require.NoError(t, err)
+
+ expected := []ExpectedWebauthnCredential{}
+ err = x.Table("expected_webauthn_credential").Asc("id").Find(&expected)
+ require.NoError(t, err)
+
+ got := []ConvertedWebauthnCredential{}
+ err = x.Table("webauthn_credential").Select("id, credential_id_bytes").Asc("id").Find(&got)
+ require.NoError(t, err)
+
+ for i, e := range expected {
+ credIDBytes, _ := base32.HexEncoding.DecodeString(e.CredentialID)
+ assert.Equal(t, credIDBytes, got[i].CredentialIDBytes)
+ }
+}
diff --git a/models/migrations/v1_17/v222.go b/models/migrations/v1_17/v222.go
new file mode 100644
index 0000000..2ffb94e
--- /dev/null
+++ b/models/migrations/v1_17/v222.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func DropOldCredentialIDColumn(x *xorm.Engine) error {
+ // This migration maybe rerun so that we should check if it has been run
+ credentialIDExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id")
+ if err != nil {
+ return err
+ }
+ if !credentialIDExist {
+ // Column is already non-extant
+ return nil
+ }
+ credentialIDBytesExists, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id_bytes")
+ if err != nil {
+ return err
+ }
+ if !credentialIDBytesExists {
+ // looks like 221 hasn't properly run
+ return fmt.Errorf("webauthn_credential does not have a credential_id_bytes column... it is not safe to run this migration")
+ }
+
+ // Create webauthnCredential table
+ type webauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID string `xorm:"INDEX VARCHAR(410)"`
+ // Note the lack of the INDEX on CredentialIDBytes - we will add this in v223.go
+ CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ if err := x.Sync(&webauthnCredential{}); err != nil {
+ return err
+ }
+
+ // Drop the old credential ID
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := base.DropTableColumns(sess, "webauthn_credential", "credential_id"); err != nil {
+ return fmt.Errorf("unable to drop old credentialID column: %w", err)
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_17/v223.go b/models/migrations/v1_17/v223.go
new file mode 100644
index 0000000..3592eb1
--- /dev/null
+++ b/models/migrations/v1_17/v223.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_17 //nolint
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func RenameCredentialIDBytes(x *xorm.Engine) error {
+ // This migration maybe rerun so that we should check if it has been run
+ credentialIDExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id")
+ if err != nil {
+ return err
+ }
+ if credentialIDExist {
+ credentialIDBytesExists, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id_bytes")
+ if err != nil {
+ return err
+ }
+ if !credentialIDBytesExists {
+ return nil
+ }
+ }
+
+ err = func() error {
+ // webauthnCredential table
+ type webauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ // Note the lack of INDEX here
+ CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(webauthnCredential)); err != nil {
+ return fmt.Errorf("error on Sync: %w", err)
+ }
+
+ if credentialIDExist {
+ // if both errors and message exist, drop message at first
+ if err := base.DropTableColumns(sess, "webauthn_credential", "credential_id"); err != nil {
+ return err
+ }
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `webauthn_credential` CHANGE credential_id_bytes credential_id VARBINARY(1024)"); err != nil {
+ return err
+ }
+ } else {
+ if _, err := sess.Exec("ALTER TABLE `webauthn_credential` RENAME COLUMN credential_id_bytes TO credential_id"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+ }()
+ if err != nil {
+ return err
+ }
+
+ // Create webauthnCredential table
+ type webauthnCredential struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string
+ LowerName string `xorm:"unique(s)"`
+ UserID int64 `xorm:"INDEX unique(s)"`
+ CredentialID []byte `xorm:"INDEX VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022
+ PublicKey []byte
+ AttestationType string
+ AAGUID []byte
+ SignCount uint32 `xorm:"BIGINT"`
+ CloneWarning bool
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+ return x.Sync(&webauthnCredential{})
+}
diff --git a/models/migrations/v1_18/main_test.go b/models/migrations/v1_18/main_test.go
new file mode 100644
index 0000000..329aa20
--- /dev/null
+++ b/models/migrations/v1_18/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_18/v224.go b/models/migrations/v1_18/v224.go
new file mode 100644
index 0000000..f3d522b
--- /dev/null
+++ b/models/migrations/v1_18/v224.go
@@ -0,0 +1,27 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func CreateUserBadgesTable(x *xorm.Engine) error {
+ type Badge struct {
+ ID int64 `xorm:"pk autoincr"`
+ Description string
+ ImageURL string
+ }
+
+ type userBadge struct {
+ ID int64 `xorm:"pk autoincr"`
+ BadgeID int64
+ UserID int64 `xorm:"INDEX"`
+ }
+
+ if err := x.Sync(new(Badge)); err != nil {
+ return err
+ }
+ return x.Sync(new(userBadge))
+}
diff --git a/models/migrations/v1_18/v225.go b/models/migrations/v1_18/v225.go
new file mode 100644
index 0000000..b0ac377
--- /dev/null
+++ b/models/migrations/v1_18/v225.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AlterPublicGPGKeyContentFieldsToMediumText(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `gpg_key` CHANGE `content` `content` MEDIUMTEXT"); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("ALTER TABLE `public_key` CHANGE `content` `content` MEDIUMTEXT"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_18/v226.go b/models/migrations/v1_18/v226.go
new file mode 100644
index 0000000..f87e24b
--- /dev/null
+++ b/models/migrations/v1_18/v226.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func FixPackageSemverField(x *xorm.Engine) error {
+ _, err := x.Exec(builder.Update(builder.Eq{"semver_compatible": false}).From("`package`").Where(builder.In("`type`", "conan", "generic")))
+ return err
+}
diff --git a/models/migrations/v1_18/v227.go b/models/migrations/v1_18/v227.go
new file mode 100644
index 0000000..5fe5dcd
--- /dev/null
+++ b/models/migrations/v1_18/v227.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+type SystemSetting struct {
+ ID int64 `xorm:"pk autoincr"`
+ SettingKey string `xorm:"varchar(255) unique"` // ensure key is always lowercase
+ SettingValue string `xorm:"text"`
+ Version int `xorm:"version"` // prevent to override
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+func CreateSystemSettingsTable(x *xorm.Engine) error {
+ return x.Sync(new(SystemSetting))
+}
diff --git a/models/migrations/v1_18/v228.go b/models/migrations/v1_18/v228.go
new file mode 100644
index 0000000..3e7a36d
--- /dev/null
+++ b/models/migrations/v1_18/v228.go
@@ -0,0 +1,25 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddTeamInviteTable(x *xorm.Engine) error {
+ type TeamInvite struct {
+ ID int64 `xorm:"pk autoincr"`
+ Token string `xorm:"UNIQUE(token) INDEX NOT NULL DEFAULT ''"`
+ InviterID int64 `xorm:"NOT NULL DEFAULT 0"`
+ OrgID int64 `xorm:"INDEX NOT NULL DEFAULT 0"`
+ TeamID int64 `xorm:"UNIQUE(team_mail) INDEX NOT NULL DEFAULT 0"`
+ Email string `xorm:"UNIQUE(team_mail) NOT NULL DEFAULT ''"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ return x.Sync(new(TeamInvite))
+}
diff --git a/models/migrations/v1_18/v229.go b/models/migrations/v1_18/v229.go
new file mode 100644
index 0000000..10d9f35
--- /dev/null
+++ b/models/migrations/v1_18/v229.go
@@ -0,0 +1,46 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/models/issues"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func UpdateOpenMilestoneCounts(x *xorm.Engine) error {
+ var openMilestoneIDs []int64
+ err := x.Table("milestone").Select("id").Where(builder.Neq{"is_closed": 1}).Find(&openMilestoneIDs)
+ if err != nil {
+ return fmt.Errorf("error selecting open milestone IDs: %w", err)
+ }
+
+ for _, id := range openMilestoneIDs {
+ _, err := x.ID(id).
+ SetExpr("num_issues", builder.Select("count(*)").From("issue").Where(
+ builder.Eq{"milestone_id": id},
+ )).
+ SetExpr("num_closed_issues", builder.Select("count(*)").From("issue").Where(
+ builder.Eq{
+ "milestone_id": id,
+ "is_closed": true,
+ },
+ )).
+ Update(&issues.Milestone{})
+ if err != nil {
+ return fmt.Errorf("error updating issue counts in milestone %d: %w", id, err)
+ }
+ _, err = x.Exec("UPDATE `milestone` SET completeness=100*num_closed_issues/(CASE WHEN num_issues > 0 THEN num_issues ELSE 1 END) WHERE id=?",
+ id,
+ )
+ if err != nil {
+ return fmt.Errorf("error setting completeness on milestone %d: %w", id, err)
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_18/v229_test.go b/models/migrations/v1_18/v229_test.go
new file mode 100644
index 0000000..b20d0ff
--- /dev/null
+++ b/models/migrations/v1_18/v229_test.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/issues"
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_UpdateOpenMilestoneCounts(t *testing.T) {
+ type ExpectedMilestone issues.Milestone
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(issues.Milestone), new(ExpectedMilestone), new(issues.Issue))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ if err := UpdateOpenMilestoneCounts(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ expected := []ExpectedMilestone{}
+ err := x.Table("expected_milestone").Asc("id").Find(&expected)
+ require.NoError(t, err)
+
+ got := []issues.Milestone{}
+ err = x.Table("milestone").Asc("id").Find(&got)
+ require.NoError(t, err)
+
+ for i, e := range expected {
+ got := got[i]
+ assert.Equal(t, e.ID, got.ID)
+ assert.Equal(t, e.NumIssues, got.NumIssues)
+ assert.Equal(t, e.NumClosedIssues, got.NumClosedIssues)
+ }
+}
diff --git a/models/migrations/v1_18/v230.go b/models/migrations/v1_18/v230.go
new file mode 100644
index 0000000..ea5b4d0
--- /dev/null
+++ b/models/migrations/v1_18/v230.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+// AddConfidentialColumnToOAuth2ApplicationTable: add ConfidentialClient column, setting existing rows to true
+func AddConfidentialClientColumnToOAuth2ApplicationTable(x *xorm.Engine) error {
+ type oauth2Application struct {
+ ID int64
+ ConfidentialClient bool `xorm:"NOT NULL DEFAULT TRUE"`
+ }
+ return x.Sync(new(oauth2Application))
+}
diff --git a/models/migrations/v1_18/v230_test.go b/models/migrations/v1_18/v230_test.go
new file mode 100644
index 0000000..82b3b8f
--- /dev/null
+++ b/models/migrations/v1_18/v230_test.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_18 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) {
+ // premigration
+ type oauth2Application struct {
+ ID int64
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(oauth2Application))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ if err := AddConfidentialClientColumnToOAuth2ApplicationTable(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ // postmigration
+ type ExpectedOAuth2Application struct {
+ ID int64
+ ConfidentialClient bool
+ }
+
+ got := []ExpectedOAuth2Application{}
+ err := x.Table("oauth2_application").Select("id, confidential_client").Find(&got)
+ require.NoError(t, err)
+
+ assert.NotEmpty(t, got)
+ for _, e := range got {
+ assert.True(t, e.ConfidentialClient)
+ }
+}
diff --git a/models/migrations/v1_19/main_test.go b/models/migrations/v1_19/main_test.go
new file mode 100644
index 0000000..18696a7
--- /dev/null
+++ b/models/migrations/v1_19/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_19/v231.go b/models/migrations/v1_19/v231.go
new file mode 100644
index 0000000..79e4613
--- /dev/null
+++ b/models/migrations/v1_19/v231.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIndexForHookTask(x *xorm.Engine) error {
+ type HookTask struct {
+ ID int64 `xorm:"pk autoincr"`
+ HookID int64 `xorm:"index"`
+ UUID string `xorm:"unique"`
+ }
+
+ return x.Sync(new(HookTask))
+}
diff --git a/models/migrations/v1_19/v232.go b/models/migrations/v1_19/v232.go
new file mode 100644
index 0000000..9caf587
--- /dev/null
+++ b/models/migrations/v1_19/v232.go
@@ -0,0 +1,25 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AlterPackageVersionMetadataToLongText(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `package_version` MODIFY COLUMN `metadata_json` LONGTEXT"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_19/v233.go b/models/migrations/v1_19/v233.go
new file mode 100644
index 0000000..ba4cd8e
--- /dev/null
+++ b/models/migrations/v1_19/v233.go
@@ -0,0 +1,181 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/secret"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func batchProcess[T any](x *xorm.Engine, buf []T, query func(limit, start int) *xorm.Session, process func(*xorm.Session, T) error) error {
+ size := cap(buf)
+ start := 0
+ for {
+ err := query(size, start).Find(&buf)
+ if err != nil {
+ return err
+ }
+ if len(buf) == 0 {
+ return nil
+ }
+
+ err = func() error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return fmt.Errorf("unable to allow start session. Error: %w", err)
+ }
+ for _, record := range buf {
+ if err := process(sess, record); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+ }()
+ if err != nil {
+ return err
+ }
+
+ if len(buf) < size {
+ return nil
+ }
+ start += size
+ buf = buf[:0]
+ }
+}
+
+func AddHeaderAuthorizationEncryptedColWebhook(x *xorm.Engine) error {
+ // Add the column to the table
+ type Webhook struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type string `xorm:"VARCHAR(16) 'type'"`
+ Meta string `xorm:"TEXT"` // store hook-specific attributes
+
+ // HeaderAuthorizationEncrypted should be accessed using HeaderAuthorization() and SetHeaderAuthorization()
+ HeaderAuthorizationEncrypted string `xorm:"TEXT"`
+ }
+ err := x.Sync(new(Webhook))
+ if err != nil {
+ return err
+ }
+
+ // Migrate the matrix webhooks
+
+ type MatrixMeta struct {
+ HomeserverURL string `json:"homeserver_url"`
+ Room string `json:"room_id"`
+ MessageType int `json:"message_type"`
+ }
+ type MatrixMetaWithAccessToken struct {
+ MatrixMeta
+ AccessToken string `json:"access_token"`
+ }
+
+ err = batchProcess(x,
+ make([]*Webhook, 0, 50),
+ func(limit, start int) *xorm.Session {
+ return x.Where("type=?", "matrix").OrderBy("id").Limit(limit, start)
+ },
+ func(sess *xorm.Session, hook *Webhook) error {
+ // retrieve token from meta
+ var withToken MatrixMetaWithAccessToken
+ err := json.Unmarshal([]byte(hook.Meta), &withToken)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal matrix meta for webhook[id=%d]: %w", hook.ID, err)
+ }
+ if withToken.AccessToken == "" {
+ return nil
+ }
+
+ // encrypt token
+ authorization := "Bearer " + withToken.AccessToken
+ hook.HeaderAuthorizationEncrypted, err = secret.EncryptSecret(setting.SecretKey, authorization)
+ if err != nil {
+ return fmt.Errorf("unable to encrypt access token for webhook[id=%d]: %w", hook.ID, err)
+ }
+
+ // remove token from meta
+ withoutToken, err := json.Marshal(withToken.MatrixMeta)
+ if err != nil {
+ return fmt.Errorf("unable to marshal matrix meta for webhook[id=%d]: %w", hook.ID, err)
+ }
+ hook.Meta = string(withoutToken)
+
+ // save in database
+ count, err := sess.ID(hook.ID).Cols("meta", "header_authorization_encrypted").Update(hook)
+ if count != 1 || err != nil {
+ return fmt.Errorf("unable to update header_authorization_encrypted for webhook[id=%d]: %d,%w", hook.ID, count, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove access_token from HookTask
+
+ type HookTask struct {
+ ID int64 `xorm:"pk autoincr"`
+ HookID int64
+ PayloadContent string `xorm:"LONGTEXT"`
+ }
+
+ type MatrixPayloadSafe struct {
+ Body string `json:"body"`
+ MsgType string `json:"msgtype"`
+ Format string `json:"format"`
+ FormattedBody string `json:"formatted_body"`
+ Commits []*api.PayloadCommit `json:"io.gitea.commits,omitempty"`
+ }
+ type MatrixPayloadUnsafe struct {
+ MatrixPayloadSafe
+ AccessToken string `json:"access_token"`
+ }
+
+ err = batchProcess(x,
+ make([]*HookTask, 0, 50),
+ func(limit, start int) *xorm.Session {
+ return x.Where(builder.And(
+ builder.In("hook_id", builder.Select("id").From("webhook").Where(builder.Eq{"type": "matrix"})),
+ builder.Like{"payload_content", "access_token"},
+ )).OrderBy("id").Limit(limit, 0) // ignore the provided "start", since other payload were already converted and don't contain 'payload_content' anymore
+ },
+ func(sess *xorm.Session, hookTask *HookTask) error {
+ // retrieve token from payload_content
+ var withToken MatrixPayloadUnsafe
+ err := json.Unmarshal([]byte(hookTask.PayloadContent), &withToken)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal payload_content for hook_task[id=%d]: %w", hookTask.ID, err)
+ }
+ if withToken.AccessToken == "" {
+ return nil
+ }
+
+ // remove token from payload_content
+ withoutToken, err := json.Marshal(withToken.MatrixPayloadSafe)
+ if err != nil {
+ return fmt.Errorf("unable to marshal payload_content for hook_task[id=%d]: %w", hookTask.ID, err)
+ }
+ hookTask.PayloadContent = string(withoutToken)
+
+ // save in database
+ count, err := sess.ID(hookTask.ID).Cols("payload_content").Update(hookTask)
+ if count != 1 || err != nil {
+ return fmt.Errorf("unable to update payload_content for hook_task[id=%d]: %d,%w", hookTask.ID, count, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_19/v233_test.go b/models/migrations/v1_19/v233_test.go
new file mode 100644
index 0000000..94e9bc3
--- /dev/null
+++ b/models/migrations/v1_19/v233_test.go
@@ -0,0 +1,86 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/secret"
+ "code.gitea.io/gitea/modules/setting"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddHeaderAuthorizationEncryptedColWebhook(t *testing.T) {
+ // Create Webhook table
+ type Webhook struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type webhook_module.HookType `xorm:"VARCHAR(16) 'type'"`
+ Meta string `xorm:"TEXT"` // store hook-specific attributes
+
+ // HeaderAuthorizationEncrypted should be accessed using HeaderAuthorization() and SetHeaderAuthorization()
+ HeaderAuthorizationEncrypted string `xorm:"TEXT"`
+ }
+
+ type ExpectedWebhook struct {
+ ID int64 `xorm:"pk autoincr"`
+ Meta string
+ HeaderAuthorization string
+ }
+
+ type HookTask struct {
+ ID int64 `xorm:"pk autoincr"`
+ HookID int64
+ PayloadContent string `xorm:"LONGTEXT"`
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(Webhook), new(ExpectedWebhook), new(HookTask))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ if err := AddHeaderAuthorizationEncryptedColWebhook(x); err != nil {
+ require.NoError(t, err)
+ return
+ }
+
+ expected := []ExpectedWebhook{}
+ err := x.Table("expected_webhook").Asc("id").Find(&expected)
+ require.NoError(t, err)
+
+ got := []Webhook{}
+ err = x.Table("webhook").Select("id, meta, header_authorization_encrypted").Asc("id").Find(&got)
+ require.NoError(t, err)
+
+ for i, e := range expected {
+ assert.Equal(t, e.Meta, got[i].Meta)
+
+ if e.HeaderAuthorization == "" {
+ assert.Equal(t, "", got[i].HeaderAuthorizationEncrypted)
+ } else {
+ cipherhex := got[i].HeaderAuthorizationEncrypted
+ cleartext, err := secret.DecryptSecret(setting.SecretKey, cipherhex)
+ require.NoError(t, err)
+ assert.Equal(t, e.HeaderAuthorization, cleartext)
+ }
+ }
+
+ // ensure that no hook_task has some remaining "access_token"
+ hookTasks := []HookTask{}
+ err = x.Table("hook_task").Select("id, payload_content").Asc("id").Find(&hookTasks)
+ require.NoError(t, err)
+
+ for _, h := range hookTasks {
+ var m map[string]any
+ err := json.Unmarshal([]byte(h.PayloadContent), &m)
+ require.NoError(t, err)
+ assert.Nil(t, m["access_token"])
+ }
+}
diff --git a/models/migrations/v1_19/v234.go b/models/migrations/v1_19/v234.go
new file mode 100644
index 0000000..728a580
--- /dev/null
+++ b/models/migrations/v1_19/v234.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreatePackageCleanupRuleTable(x *xorm.Engine) error {
+ type PackageCleanupRule struct {
+ ID int64 `xorm:"pk autoincr"`
+ Enabled bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL DEFAULT 0"`
+ Type string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ KeepCount int `xorm:"NOT NULL DEFAULT 0"`
+ KeepPattern string `xorm:"NOT NULL DEFAULT ''"`
+ RemoveDays int `xorm:"NOT NULL DEFAULT 0"`
+ RemovePattern string `xorm:"NOT NULL DEFAULT ''"`
+ MatchFullName bool `xorm:"NOT NULL DEFAULT false"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL DEFAULT 0"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated NOT NULL DEFAULT 0"`
+ }
+
+ return x.Sync(new(PackageCleanupRule))
+}
diff --git a/models/migrations/v1_19/v235.go b/models/migrations/v1_19/v235.go
new file mode 100644
index 0000000..3715de3
--- /dev/null
+++ b/models/migrations/v1_19/v235.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIndexForAccessToken(x *xorm.Engine) error {
+ type AccessToken struct {
+ TokenLastEight string `xorm:"INDEX token_last_eight"`
+ }
+
+ return x.Sync(new(AccessToken))
+}
diff --git a/models/migrations/v1_19/v236.go b/models/migrations/v1_19/v236.go
new file mode 100644
index 0000000..f172a85
--- /dev/null
+++ b/models/migrations/v1_19/v236.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateSecretsTable(x *xorm.Engine) error {
+ type Secret struct {
+ ID int64
+ OwnerID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL"`
+ RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL DEFAULT 0"`
+ Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"`
+ Data string `xorm:"LONGTEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+ }
+
+ return x.Sync(new(Secret))
+}
diff --git a/models/migrations/v1_19/v237.go b/models/migrations/v1_19/v237.go
new file mode 100644
index 0000000..b23c765
--- /dev/null
+++ b/models/migrations/v1_19/v237.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func DropForeignReferenceTable(x *xorm.Engine) error {
+ // Drop the table introduced in `v211`, it's considered badly designed and doesn't look like to be used.
+ // See: https://github.com/go-gitea/gitea/issues/21086#issuecomment-1318217453
+ type ForeignReference struct{}
+ return x.DropTables(new(ForeignReference))
+}
diff --git a/models/migrations/v1_19/v238.go b/models/migrations/v1_19/v238.go
new file mode 100644
index 0000000..266e6ce
--- /dev/null
+++ b/models/migrations/v1_19/v238.go
@@ -0,0 +1,27 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+// AddUpdatedUnixToLFSMetaObject adds an updated column to the LFSMetaObject to allow for garbage collection
+func AddUpdatedUnixToLFSMetaObject(x *xorm.Engine) error {
+ // Drop the table introduced in `v211`, it's considered badly designed and doesn't look like to be used.
+ // See: https://github.com/go-gitea/gitea/issues/21086#issuecomment-1318217453
+ // LFSMetaObject stores metadata for LFS tracked files.
+ type LFSMetaObject struct {
+ ID int64 `xorm:"pk autoincr"`
+ Oid string `json:"oid" xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Size int64 `json:"size" xorm:"NOT NULL"`
+ RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ return x.Sync(new(LFSMetaObject))
+}
diff --git a/models/migrations/v1_19/v239.go b/models/migrations/v1_19/v239.go
new file mode 100644
index 0000000..10076f2
--- /dev/null
+++ b/models/migrations/v1_19/v239.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddScopeForAccessTokens(x *xorm.Engine) error {
+ type AccessToken struct {
+ Scope string
+ }
+
+ if err := x.Sync(new(AccessToken)); err != nil {
+ return err
+ }
+
+ // all previous tokens have `all` and `sudo` scopes
+ _, err := x.Exec("UPDATE access_token SET scope = ? WHERE scope IS NULL OR scope = ''", "all,sudo")
+ return err
+}
diff --git a/models/migrations/v1_19/v240.go b/models/migrations/v1_19/v240.go
new file mode 100644
index 0000000..4505f86
--- /dev/null
+++ b/models/migrations/v1_19/v240.go
@@ -0,0 +1,176 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddActionsTables(x *xorm.Engine) error {
+ type ActionRunner struct {
+ ID int64
+ UUID string `xorm:"CHAR(36) UNIQUE"`
+ Name string `xorm:"VARCHAR(255)"`
+ OwnerID int64 `xorm:"index"` // org level runner, 0 means system
+ RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
+ Description string `xorm:"TEXT"`
+ Base int // 0 native 1 docker 2 virtual machine
+ RepoRange string // glob match which repositories could use this runner
+
+ Token string `xorm:"-"`
+ TokenHash string `xorm:"UNIQUE"` // sha256 of token
+ TokenSalt string
+ // TokenLastEight string `xorm:"token_last_eight"` // it's unnecessary because we don't find runners by token
+
+ LastOnline timeutil.TimeStamp `xorm:"index"`
+ LastActive timeutil.TimeStamp `xorm:"index"`
+
+ // Store OS and Artch.
+ AgentLabels []string
+ // Store custom labes use defined.
+ CustomLabels []string
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ Deleted timeutil.TimeStamp `xorm:"deleted"`
+ }
+
+ type ActionRunnerToken struct {
+ ID int64
+ Token string `xorm:"UNIQUE"`
+ OwnerID int64 `xorm:"index"` // org level runner, 0 means system
+ RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
+ IsActive bool
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ Deleted timeutil.TimeStamp `xorm:"deleted"`
+ }
+
+ type ActionRun struct {
+ ID int64
+ Title string
+ RepoID int64 `xorm:"index unique(repo_index)"`
+ OwnerID int64 `xorm:"index"`
+ WorkflowID string `xorm:"index"` // the name of workflow file
+ Index int64 `xorm:"index unique(repo_index)"` // a unique number for each run of a repository
+ TriggerUserID int64
+ Ref string
+ CommitSHA string
+ Event string
+ IsForkPullRequest bool
+ EventPayload string `xorm:"LONGTEXT"`
+ Status int `xorm:"index"`
+ Started timeutil.TimeStamp
+ Stopped timeutil.TimeStamp
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ type ActionRunJob struct {
+ ID int64
+ RunID int64 `xorm:"index"`
+ RepoID int64 `xorm:"index"`
+ OwnerID int64 `xorm:"index"`
+ CommitSHA string `xorm:"index"`
+ IsForkPullRequest bool
+ Name string `xorm:"VARCHAR(255)"`
+ Attempt int64
+ WorkflowPayload []byte
+ JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id
+ Needs []string `xorm:"JSON TEXT"`
+ RunsOn []string `xorm:"JSON TEXT"`
+ TaskID int64 // the latest task of the job
+ Status int `xorm:"index"`
+ Started timeutil.TimeStamp
+ Stopped timeutil.TimeStamp
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated index"`
+ }
+
+ type Repository struct {
+ NumActionRuns int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ type ActionRunIndex db.ResourceIndex
+
+ type ActionTask struct {
+ ID int64
+ JobID int64
+ Attempt int64
+ RunnerID int64 `xorm:"index"`
+ Status int `xorm:"index"`
+ Started timeutil.TimeStamp `xorm:"index"`
+ Stopped timeutil.TimeStamp
+
+ RepoID int64 `xorm:"index"`
+ OwnerID int64 `xorm:"index"`
+ CommitSHA string `xorm:"index"`
+ IsForkPullRequest bool
+
+ TokenHash string `xorm:"UNIQUE"` // sha256 of token
+ TokenSalt string
+ TokenLastEight string `xorm:"index token_last_eight"`
+
+ LogFilename string // file name of log
+ LogInStorage bool // read log from database or from storage
+ LogLength int64 // lines count
+ LogSize int64 // blob size
+ LogIndexes []int64 `xorm:"LONGBLOB"` // line number to offset
+ LogExpired bool // files that are too old will be deleted
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated index"`
+ }
+
+ type ActionTaskStep struct {
+ ID int64
+ Name string `xorm:"VARCHAR(255)"`
+ TaskID int64 `xorm:"index unique(task_index)"`
+ Index int64 `xorm:"index unique(task_index)"`
+ RepoID int64 `xorm:"index"`
+ Status int `xorm:"index"`
+ LogIndex int64
+ LogLength int64
+ Started timeutil.TimeStamp
+ Stopped timeutil.TimeStamp
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ type dbfsMeta struct {
+ ID int64 `xorm:"pk autoincr"`
+ FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
+ BlockSize int64 `xorm:"BIGINT NOT NULL"`
+ FileSize int64 `xorm:"BIGINT NOT NULL"`
+ CreateTimestamp int64 `xorm:"BIGINT NOT NULL"`
+ ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
+ }
+
+ type dbfsData struct {
+ ID int64 `xorm:"pk autoincr"`
+ Revision int64 `xorm:"BIGINT NOT NULL"`
+ MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
+ BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
+ BlobSize int64 `xorm:"BIGINT NOT NULL"`
+ BlobData []byte `xorm:"BLOB NOT NULL"`
+ }
+
+ return x.Sync(
+ new(ActionRunner),
+ new(ActionRunnerToken),
+ new(ActionRun),
+ new(ActionRunJob),
+ new(Repository),
+ new(ActionRunIndex),
+ new(ActionTask),
+ new(ActionTaskStep),
+ new(dbfsMeta),
+ new(dbfsData),
+ )
+}
diff --git a/models/migrations/v1_19/v241.go b/models/migrations/v1_19/v241.go
new file mode 100644
index 0000000..a617d6f
--- /dev/null
+++ b/models/migrations/v1_19/v241.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+// AddCardTypeToProjectTable: add CardType column, setting existing rows to CardTypeTextOnly
+func AddCardTypeToProjectTable(x *xorm.Engine) error {
+ type Project struct {
+ CardType int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ return x.Sync(new(Project))
+}
diff --git a/models/migrations/v1_19/v242.go b/models/migrations/v1_19/v242.go
new file mode 100644
index 0000000..4470835
--- /dev/null
+++ b/models/migrations/v1_19/v242.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+// AlterPublicGPGKeyImportContentFieldToMediumText: set GPGKeyImport Content field to MEDIUMTEXT
+func AlterPublicGPGKeyImportContentFieldToMediumText(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ if _, err := sess.Exec("ALTER TABLE `gpg_key_import` CHANGE `content` `content` MEDIUMTEXT"); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_19/v243.go b/models/migrations/v1_19/v243.go
new file mode 100644
index 0000000..55bbfaf
--- /dev/null
+++ b/models/migrations/v1_19/v243.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_19 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddExclusiveLabel(x *xorm.Engine) error {
+ type Label struct {
+ Exclusive bool
+ }
+
+ return x.Sync(new(Label))
+}
diff --git a/models/migrations/v1_20/main_test.go b/models/migrations/v1_20/main_test.go
new file mode 100644
index 0000000..e8d95b0
--- /dev/null
+++ b/models/migrations/v1_20/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_20/v244.go b/models/migrations/v1_20/v244.go
new file mode 100644
index 0000000..977566a
--- /dev/null
+++ b/models/migrations/v1_20/v244.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddNeedApprovalToActionRun(x *xorm.Engine) error {
+ /*
+ New index: TriggerUserID
+ New fields: NeedApproval, ApprovedBy
+ */
+ type ActionRun struct {
+ TriggerUserID int64 `xorm:"index"`
+ NeedApproval bool // may need approval if it's a fork pull request
+ ApprovedBy int64 `xorm:"index"` // who approved
+ }
+
+ return x.Sync(new(ActionRun))
+}
diff --git a/models/migrations/v1_20/v245.go b/models/migrations/v1_20/v245.go
new file mode 100644
index 0000000..b0d4c21
--- /dev/null
+++ b/models/migrations/v1_20/v245.go
@@ -0,0 +1,69 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func RenameWebhookOrgToOwner(x *xorm.Engine) error {
+ type Webhook struct {
+ OrgID int64 `xorm:"INDEX"`
+ }
+
+ // This migration maybe rerun so that we should check if it has been run
+ ownerExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webhook", "owner_id")
+ if err != nil {
+ return err
+ }
+
+ if ownerExist {
+ orgExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webhook", "org_id")
+ if err != nil {
+ return err
+ }
+ if !orgExist {
+ return nil
+ }
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(Webhook)); err != nil {
+ return err
+ }
+
+ if ownerExist {
+ if err := base.DropTableColumns(sess, "webhook", "owner_id"); err != nil {
+ return err
+ }
+ }
+
+ if setting.Database.Type.IsMySQL() {
+ inferredTable, err := x.TableInfo(new(Webhook))
+ if err != nil {
+ return err
+ }
+ sqlType := x.Dialect().SQLType(inferredTable.GetColumn("org_id"))
+ if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `webhook` CHANGE org_id owner_id %s", sqlType)); err != nil {
+ return err
+ }
+ } else {
+ if _, err := sess.Exec("ALTER TABLE `webhook` RENAME COLUMN org_id TO owner_id"); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_20/v246.go b/models/migrations/v1_20/v246.go
new file mode 100644
index 0000000..e6340ef
--- /dev/null
+++ b/models/migrations/v1_20/v246.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddNewColumnForProject(x *xorm.Engine) error {
+ type Project struct {
+ OwnerID int64 `xorm:"INDEX"`
+ }
+
+ return x.Sync(new(Project))
+}
diff --git a/models/migrations/v1_20/v247.go b/models/migrations/v1_20/v247.go
new file mode 100644
index 0000000..59fc5c4
--- /dev/null
+++ b/models/migrations/v1_20/v247.go
@@ -0,0 +1,50 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/xorm"
+)
+
+// FixIncorrectProjectType: set individual project's type from 3(TypeOrganization) to 1(TypeIndividual)
+func FixIncorrectProjectType(x *xorm.Engine) error {
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type int
+ }
+
+ const (
+ UserTypeIndividual int = 0
+
+ TypeIndividual uint8 = 1
+ TypeOrganization uint8 = 3
+ )
+
+ type Project struct {
+ OwnerID int64 `xorm:"INDEX"`
+ Type uint8
+ Owner *User `xorm:"extends"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ count, err := sess.Table("project").
+ Where("type = ? AND owner_id IN (SELECT id FROM `user` WHERE type = ?)", TypeOrganization, UserTypeIndividual).
+ Update(&Project{
+ Type: TypeIndividual,
+ })
+ if err != nil {
+ return err
+ }
+ log.Debug("Updated %d projects to belong to a user instead of an organization", count)
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_20/v248.go b/models/migrations/v1_20/v248.go
new file mode 100644
index 0000000..4055521
--- /dev/null
+++ b/models/migrations/v1_20/v248.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import "xorm.io/xorm"
+
+func AddVersionToActionRunner(x *xorm.Engine) error {
+ type ActionRunner struct {
+ Version string `xorm:"VARCHAR(64)"` // the version of act_runner
+ }
+
+ return x.Sync(new(ActionRunner))
+}
diff --git a/models/migrations/v1_20/v249.go b/models/migrations/v1_20/v249.go
new file mode 100644
index 0000000..02951a7
--- /dev/null
+++ b/models/migrations/v1_20/v249.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+type Action struct {
+ UserID int64 // Receiver user id.
+ ActUserID int64 // Action user id.
+ RepoID int64
+ IsDeleted bool `xorm:"NOT NULL DEFAULT false"`
+ IsPrivate bool `xorm:"NOT NULL DEFAULT false"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+// TableName sets the name of this table
+func (a *Action) TableName() string {
+ return "action"
+}
+
+// TableIndices implements xorm's TableIndices interface
+func (a *Action) TableIndices() []*schemas.Index {
+ repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType)
+ repoIndex.AddColumn("repo_id", "user_id", "is_deleted")
+
+ actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType)
+ actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted")
+
+ cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType)
+ cudIndex.AddColumn("created_unix", "user_id", "is_deleted")
+
+ indices := []*schemas.Index{actUserIndex, repoIndex, cudIndex}
+
+ return indices
+}
+
+func ImproveActionTableIndices(x *xorm.Engine) error {
+ return x.Sync(new(Action))
+}
diff --git a/models/migrations/v1_20/v250.go b/models/migrations/v1_20/v250.go
new file mode 100644
index 0000000..86388ef
--- /dev/null
+++ b/models/migrations/v1_20/v250.go
@@ -0,0 +1,135 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "strings"
+
+ "code.gitea.io/gitea/modules/json"
+
+ "xorm.io/xorm"
+)
+
+func ChangeContainerMetadataMultiArch(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ type PackageVersion struct {
+ ID int64 `xorm:"pk autoincr"`
+ MetadataJSON string `xorm:"metadata_json"`
+ }
+
+ type PackageBlob struct{}
+
+ // Get all relevant packages (manifest list images have a container.manifest.reference property)
+
+ var pvs []*PackageVersion
+ err := sess.
+ Table("package_version").
+ Select("id, metadata_json").
+ Where("id IN (SELECT DISTINCT ref_id FROM package_property WHERE ref_type = 0 AND name = 'container.manifest.reference')").
+ Find(&pvs)
+ if err != nil {
+ return err
+ }
+
+ type MetadataOld struct {
+ Type string `json:"type"`
+ IsTagged bool `json:"is_tagged"`
+ Platform string `json:"platform,omitempty"`
+ Description string `json:"description,omitempty"`
+ Authors []string `json:"authors,omitempty"`
+ Licenses string `json:"license,omitempty"`
+ ProjectURL string `json:"project_url,omitempty"`
+ RepositoryURL string `json:"repository_url,omitempty"`
+ DocumentationURL string `json:"documentation_url,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ ImageLayers []string `json:"layer_creation,omitempty"`
+ MultiArch map[string]string `json:"multiarch,omitempty"`
+ }
+
+ type Manifest struct {
+ Platform string `json:"platform"`
+ Digest string `json:"digest"`
+ Size int64 `json:"size"`
+ }
+
+ type MetadataNew struct {
+ Type string `json:"type"`
+ IsTagged bool `json:"is_tagged"`
+ Platform string `json:"platform,omitempty"`
+ Description string `json:"description,omitempty"`
+ Authors []string `json:"authors,omitempty"`
+ Licenses string `json:"license,omitempty"`
+ ProjectURL string `json:"project_url,omitempty"`
+ RepositoryURL string `json:"repository_url,omitempty"`
+ DocumentationURL string `json:"documentation_url,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ ImageLayers []string `json:"layer_creation,omitempty"`
+ Manifests []*Manifest `json:"manifests,omitempty"`
+ }
+
+ for _, pv := range pvs {
+ var old *MetadataOld
+ if err := json.Unmarshal([]byte(pv.MetadataJSON), &old); err != nil {
+ return err
+ }
+
+ // Calculate the size of every contained manifest
+
+ manifests := make([]*Manifest, 0, len(old.MultiArch))
+ for platform, digest := range old.MultiArch {
+ size, err := sess.
+ Table("package_blob").
+ Join("INNER", "package_file", "package_blob.id = package_file.blob_id").
+ Join("INNER", "package_version pv", "pv.id = package_file.version_id").
+ Join("INNER", "package_version pv2", "pv2.package_id = pv.package_id").
+ Where("pv.lower_version = ? AND pv2.id = ?", strings.ToLower(digest), pv.ID).
+ SumInt(new(PackageBlob), "size")
+ if err != nil {
+ return err
+ }
+
+ manifests = append(manifests, &Manifest{
+ Platform: platform,
+ Digest: digest,
+ Size: size,
+ })
+ }
+
+ // Convert to new metadata format
+
+ newMetadata := &MetadataNew{
+ Type: old.Type,
+ IsTagged: old.IsTagged,
+ Platform: old.Platform,
+ Description: old.Description,
+ Authors: old.Authors,
+ Licenses: old.Licenses,
+ ProjectURL: old.ProjectURL,
+ RepositoryURL: old.RepositoryURL,
+ DocumentationURL: old.DocumentationURL,
+ Labels: old.Labels,
+ ImageLayers: old.ImageLayers,
+ Manifests: manifests,
+ }
+
+ metadataJSON, err := json.Marshal(newMetadata)
+ if err != nil {
+ return err
+ }
+
+ pv.MetadataJSON = string(metadataJSON)
+
+ if _, err := sess.ID(pv.ID).Update(pv); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_20/v251.go b/models/migrations/v1_20/v251.go
new file mode 100644
index 0000000..7743248
--- /dev/null
+++ b/models/migrations/v1_20/v251.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/xorm"
+)
+
+func FixIncorrectOwnerTeamUnitAccessMode(x *xorm.Engine) error {
+ type UnitType int
+ type AccessMode int
+
+ type TeamUnit struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ Type UnitType `xorm:"UNIQUE(s)"`
+ AccessMode AccessMode
+ }
+
+ const (
+ // AccessModeOwner owner access
+ AccessModeOwner = 4
+ )
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ count, err := sess.Table("team_unit").
+ Where("team_id IN (SELECT id FROM team WHERE authorize = ?)", AccessModeOwner).
+ Update(&TeamUnit{
+ AccessMode: AccessModeOwner,
+ })
+ if err != nil {
+ return err
+ }
+ log.Debug("Updated %d owner team unit access mode to belong to owner instead of none", count)
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_20/v252.go b/models/migrations/v1_20/v252.go
new file mode 100644
index 0000000..ab61cd9
--- /dev/null
+++ b/models/migrations/v1_20/v252.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/xorm"
+)
+
+func FixIncorrectAdminTeamUnitAccessMode(x *xorm.Engine) error {
+ type UnitType int
+ type AccessMode int
+
+ type TeamUnit struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ Type UnitType `xorm:"UNIQUE(s)"`
+ AccessMode AccessMode
+ }
+
+ const (
+ // AccessModeAdmin admin access
+ AccessModeAdmin = 3
+ )
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ count, err := sess.Table("team_unit").
+ Where("team_id IN (SELECT id FROM team WHERE authorize = ?)", AccessModeAdmin).
+ Update(&TeamUnit{
+ AccessMode: AccessModeAdmin,
+ })
+ if err != nil {
+ return err
+ }
+ log.Debug("Updated %d admin team unit access mode to belong to admin instead of none", count)
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_20/v253.go b/models/migrations/v1_20/v253.go
new file mode 100644
index 0000000..96c494b
--- /dev/null
+++ b/models/migrations/v1_20/v253.go
@@ -0,0 +1,49 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/xorm"
+)
+
+func FixExternalTrackerAndExternalWikiAccessModeInOwnerAndAdminTeam(x *xorm.Engine) error {
+ type UnitType int
+ type AccessMode int
+
+ type TeamUnit struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type UnitType `xorm:"UNIQUE(s)"`
+ AccessMode AccessMode
+ }
+
+ const (
+ // AccessModeRead read access
+ AccessModeRead = 1
+
+ // Unit Type
+ TypeExternalWiki = 6
+ TypeExternalTracker = 7
+ )
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ count, err := sess.Table("team_unit").
+ Where("type IN (?, ?) AND access_mode > ?", TypeExternalWiki, TypeExternalTracker, AccessModeRead).
+ Update(&TeamUnit{
+ AccessMode: AccessModeRead,
+ })
+ if err != nil {
+ return err
+ }
+ log.Debug("Updated %d ExternalTracker and ExternalWiki access mode to belong to owner and admin", count)
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_20/v254.go b/models/migrations/v1_20/v254.go
new file mode 100644
index 0000000..1e26979
--- /dev/null
+++ b/models/migrations/v1_20/v254.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddActionTaskOutputTable(x *xorm.Engine) error {
+ type ActionTaskOutput struct {
+ ID int64
+ TaskID int64 `xorm:"INDEX UNIQUE(task_id_output_key)"`
+ OutputKey string `xorm:"VARCHAR(255) UNIQUE(task_id_output_key)"`
+ OutputValue string `xorm:"MEDIUMTEXT"`
+ }
+ return x.Sync(new(ActionTaskOutput))
+}
diff --git a/models/migrations/v1_20/v255.go b/models/migrations/v1_20/v255.go
new file mode 100644
index 0000000..14b70f8
--- /dev/null
+++ b/models/migrations/v1_20/v255.go
@@ -0,0 +1,23 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddArchivedUnixToRepository(x *xorm.Engine) error {
+ type Repository struct {
+ ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT 0"`
+ }
+
+ if err := x.Sync(new(Repository)); err != nil {
+ return err
+ }
+
+ _, err := x.Exec("UPDATE repository SET archived_unix = updated_unix WHERE is_archived = ? AND archived_unix = 0", true)
+ return err
+}
diff --git a/models/migrations/v1_20/v256.go b/models/migrations/v1_20/v256.go
new file mode 100644
index 0000000..822153b
--- /dev/null
+++ b/models/migrations/v1_20/v256.go
@@ -0,0 +1,23 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIsInternalColumnToPackage(x *xorm.Engine) error {
+ type Package struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RepoID int64 `xorm:"INDEX"`
+ Type string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"NOT NULL"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ SemverCompatible bool `xorm:"NOT NULL DEFAULT false"`
+ IsInternal bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(Package))
+}
diff --git a/models/migrations/v1_20/v257.go b/models/migrations/v1_20/v257.go
new file mode 100644
index 0000000..6c6ca4c
--- /dev/null
+++ b/models/migrations/v1_20/v257.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateActionArtifactTable(x *xorm.Engine) error {
+ // ActionArtifact is a file that is stored in the artifact storage.
+ type ActionArtifact struct {
+ ID int64 `xorm:"pk autoincr"`
+ RunID int64 `xorm:"index UNIQUE(runid_name)"` // The run id of the artifact
+ RunnerID int64
+ RepoID int64 `xorm:"index"`
+ OwnerID int64
+ CommitSHA string
+ StoragePath string // The path to the artifact in the storage
+ FileSize int64 // The size of the artifact in bytes
+ FileCompressedSize int64 // The size of the artifact in bytes after gzip compression
+ ContentEncoding string // The content encoding of the artifact
+ ArtifactPath string // The path to the artifact when runner uploads it
+ ArtifactName string `xorm:"UNIQUE(runid_name)"` // The name of the artifact when runner uploads it
+ Status int64 `xorm:"index"` // The status of the artifact
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated index"`
+ }
+
+ return x.Sync(new(ActionArtifact))
+}
diff --git a/models/migrations/v1_20/v258.go b/models/migrations/v1_20/v258.go
new file mode 100644
index 0000000..47174ce
--- /dev/null
+++ b/models/migrations/v1_20/v258.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddPinOrderToIssue(x *xorm.Engine) error {
+ type Issue struct {
+ PinOrder int `xorm:"DEFAULT 0"`
+ }
+
+ return x.Sync(new(Issue))
+}
diff --git a/models/migrations/v1_20/v259.go b/models/migrations/v1_20/v259.go
new file mode 100644
index 0000000..5b8ced4
--- /dev/null
+++ b/models/migrations/v1_20/v259.go
@@ -0,0 +1,360 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/xorm"
+)
+
+// unknownAccessTokenScope represents the scope for an access token that isn't
+// known be an old token or a new token.
+type unknownAccessTokenScope string
+
+// AccessTokenScope represents the scope for an access token.
+type AccessTokenScope string
+
+// for all categories, write implies read
+const (
+ AccessTokenScopeAll AccessTokenScope = "all"
+ AccessTokenScopePublicOnly AccessTokenScope = "public-only" // limited to public orgs/repos
+
+ AccessTokenScopeReadActivityPub AccessTokenScope = "read:activitypub"
+ AccessTokenScopeWriteActivityPub AccessTokenScope = "write:activitypub"
+
+ AccessTokenScopeReadAdmin AccessTokenScope = "read:admin"
+ AccessTokenScopeWriteAdmin AccessTokenScope = "write:admin"
+
+ AccessTokenScopeReadMisc AccessTokenScope = "read:misc"
+ AccessTokenScopeWriteMisc AccessTokenScope = "write:misc"
+
+ AccessTokenScopeReadNotification AccessTokenScope = "read:notification"
+ AccessTokenScopeWriteNotification AccessTokenScope = "write:notification"
+
+ AccessTokenScopeReadOrganization AccessTokenScope = "read:organization"
+ AccessTokenScopeWriteOrganization AccessTokenScope = "write:organization"
+
+ AccessTokenScopeReadPackage AccessTokenScope = "read:package"
+ AccessTokenScopeWritePackage AccessTokenScope = "write:package"
+
+ AccessTokenScopeReadIssue AccessTokenScope = "read:issue"
+ AccessTokenScopeWriteIssue AccessTokenScope = "write:issue"
+
+ AccessTokenScopeReadRepository AccessTokenScope = "read:repository"
+ AccessTokenScopeWriteRepository AccessTokenScope = "write:repository"
+
+ AccessTokenScopeReadUser AccessTokenScope = "read:user"
+ AccessTokenScopeWriteUser AccessTokenScope = "write:user"
+)
+
+// accessTokenScopeBitmap represents a bitmap of access token scopes.
+type accessTokenScopeBitmap uint64
+
+// Bitmap of each scope, including the child scopes.
+const (
+ // AccessTokenScopeAllBits is the bitmap of all access token scopes
+ accessTokenScopeAllBits accessTokenScopeBitmap = accessTokenScopeWriteActivityPubBits |
+ accessTokenScopeWriteAdminBits | accessTokenScopeWriteMiscBits | accessTokenScopeWriteNotificationBits |
+ accessTokenScopeWriteOrganizationBits | accessTokenScopeWritePackageBits | accessTokenScopeWriteIssueBits |
+ accessTokenScopeWriteRepositoryBits | accessTokenScopeWriteUserBits
+
+ accessTokenScopePublicOnlyBits accessTokenScopeBitmap = 1 << iota
+
+ accessTokenScopeReadActivityPubBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteActivityPubBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadActivityPubBits
+
+ accessTokenScopeReadAdminBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteAdminBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadAdminBits
+
+ accessTokenScopeReadMiscBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteMiscBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadMiscBits
+
+ accessTokenScopeReadNotificationBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteNotificationBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadNotificationBits
+
+ accessTokenScopeReadOrganizationBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteOrganizationBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadOrganizationBits
+
+ accessTokenScopeReadPackageBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWritePackageBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadPackageBits
+
+ accessTokenScopeReadIssueBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteIssueBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadIssueBits
+
+ accessTokenScopeReadRepositoryBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteRepositoryBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadRepositoryBits
+
+ accessTokenScopeReadUserBits accessTokenScopeBitmap = 1 << iota
+ accessTokenScopeWriteUserBits accessTokenScopeBitmap = 1<<iota | accessTokenScopeReadUserBits
+
+ // The current implementation only supports up to 64 token scopes.
+ // If we need to support > 64 scopes,
+ // refactoring the whole implementation in this file (and only this file) is needed.
+)
+
+// allAccessTokenScopes contains all access token scopes.
+// The order is important: parent scope must precede child scopes.
+var allAccessTokenScopes = []AccessTokenScope{
+ AccessTokenScopePublicOnly,
+ AccessTokenScopeWriteActivityPub, AccessTokenScopeReadActivityPub,
+ AccessTokenScopeWriteAdmin, AccessTokenScopeReadAdmin,
+ AccessTokenScopeWriteMisc, AccessTokenScopeReadMisc,
+ AccessTokenScopeWriteNotification, AccessTokenScopeReadNotification,
+ AccessTokenScopeWriteOrganization, AccessTokenScopeReadOrganization,
+ AccessTokenScopeWritePackage, AccessTokenScopeReadPackage,
+ AccessTokenScopeWriteIssue, AccessTokenScopeReadIssue,
+ AccessTokenScopeWriteRepository, AccessTokenScopeReadRepository,
+ AccessTokenScopeWriteUser, AccessTokenScopeReadUser,
+}
+
+// allAccessTokenScopeBits contains all access token scopes.
+var allAccessTokenScopeBits = map[AccessTokenScope]accessTokenScopeBitmap{
+ AccessTokenScopeAll: accessTokenScopeAllBits,
+ AccessTokenScopePublicOnly: accessTokenScopePublicOnlyBits,
+ AccessTokenScopeReadActivityPub: accessTokenScopeReadActivityPubBits,
+ AccessTokenScopeWriteActivityPub: accessTokenScopeWriteActivityPubBits,
+ AccessTokenScopeReadAdmin: accessTokenScopeReadAdminBits,
+ AccessTokenScopeWriteAdmin: accessTokenScopeWriteAdminBits,
+ AccessTokenScopeReadMisc: accessTokenScopeReadMiscBits,
+ AccessTokenScopeWriteMisc: accessTokenScopeWriteMiscBits,
+ AccessTokenScopeReadNotification: accessTokenScopeReadNotificationBits,
+ AccessTokenScopeWriteNotification: accessTokenScopeWriteNotificationBits,
+ AccessTokenScopeReadOrganization: accessTokenScopeReadOrganizationBits,
+ AccessTokenScopeWriteOrganization: accessTokenScopeWriteOrganizationBits,
+ AccessTokenScopeReadPackage: accessTokenScopeReadPackageBits,
+ AccessTokenScopeWritePackage: accessTokenScopeWritePackageBits,
+ AccessTokenScopeReadIssue: accessTokenScopeReadIssueBits,
+ AccessTokenScopeWriteIssue: accessTokenScopeWriteIssueBits,
+ AccessTokenScopeReadRepository: accessTokenScopeReadRepositoryBits,
+ AccessTokenScopeWriteRepository: accessTokenScopeWriteRepositoryBits,
+ AccessTokenScopeReadUser: accessTokenScopeReadUserBits,
+ AccessTokenScopeWriteUser: accessTokenScopeWriteUserBits,
+}
+
+// hasScope returns true if the string has the given scope
+func (bitmap accessTokenScopeBitmap) hasScope(scope AccessTokenScope) (bool, error) {
+ expectedBits, ok := allAccessTokenScopeBits[scope]
+ if !ok {
+ return false, fmt.Errorf("invalid access token scope: %s", scope)
+ }
+
+ return bitmap&expectedBits == expectedBits, nil
+}
+
+// toScope returns a normalized scope string without any duplicates.
+func (bitmap accessTokenScopeBitmap) toScope(unknownScopes *[]unknownAccessTokenScope) AccessTokenScope {
+ var scopes []string
+
+ // Preserve unknown scopes, and put them at the beginning so that it's clear
+ // when debugging.
+ if unknownScopes != nil {
+ for _, unknownScope := range *unknownScopes {
+ scopes = append(scopes, string(unknownScope))
+ }
+ }
+
+ // iterate over all scopes, and reconstruct the bitmap
+ // if the reconstructed bitmap doesn't change, then the scope is already included
+ var reconstruct accessTokenScopeBitmap
+
+ for _, singleScope := range allAccessTokenScopes {
+ // no need for error checking here, since we know the scope is valid
+ if ok, _ := bitmap.hasScope(singleScope); ok {
+ current := reconstruct | allAccessTokenScopeBits[singleScope]
+ if current == reconstruct {
+ continue
+ }
+
+ reconstruct = current
+ scopes = append(scopes, string(singleScope))
+ }
+ }
+
+ scope := AccessTokenScope(strings.Join(scopes, ","))
+ scope = AccessTokenScope(strings.ReplaceAll(
+ string(scope),
+ "write:activitypub,write:admin,write:misc,write:notification,write:organization,write:package,write:issue,write:repository,write:user",
+ "all",
+ ))
+ return scope
+}
+
+// parse the scope string into a bitmap, thus removing possible duplicates.
+func (s AccessTokenScope) parse() (accessTokenScopeBitmap, *[]unknownAccessTokenScope) {
+ var bitmap accessTokenScopeBitmap
+ var unknownScopes []unknownAccessTokenScope
+
+ // The following is the more performant equivalent of 'for _, v := range strings.Split(remainingScope, ",")' as this is hot code
+ remainingScopes := string(s)
+ for len(remainingScopes) > 0 {
+ i := strings.IndexByte(remainingScopes, ',')
+ var v string
+ if i < 0 {
+ v = remainingScopes
+ remainingScopes = ""
+ } else if i+1 >= len(remainingScopes) {
+ v = remainingScopes[:i]
+ remainingScopes = ""
+ } else {
+ v = remainingScopes[:i]
+ remainingScopes = remainingScopes[i+1:]
+ }
+ singleScope := AccessTokenScope(v)
+ if singleScope == "" {
+ continue
+ }
+ if singleScope == AccessTokenScopeAll {
+ bitmap |= accessTokenScopeAllBits
+ continue
+ }
+
+ bits, ok := allAccessTokenScopeBits[singleScope]
+ if !ok {
+ unknownScopes = append(unknownScopes, unknownAccessTokenScope(string(singleScope)))
+ }
+ bitmap |= bits
+ }
+
+ return bitmap, &unknownScopes
+}
+
+// NormalizePreservingUnknown returns a normalized scope string without any
+// duplicates. Unknown scopes are included.
+func (s AccessTokenScope) NormalizePreservingUnknown() AccessTokenScope {
+ bitmap, unknownScopes := s.parse()
+
+ return bitmap.toScope(unknownScopes)
+}
+
+// OldAccessTokenScope represents the scope for an access token.
+type OldAccessTokenScope string
+
+const (
+ OldAccessTokenScopeAll OldAccessTokenScope = "all"
+
+ OldAccessTokenScopeRepo OldAccessTokenScope = "repo"
+ OldAccessTokenScopeRepoStatus OldAccessTokenScope = "repo:status"
+ OldAccessTokenScopePublicRepo OldAccessTokenScope = "public_repo"
+
+ OldAccessTokenScopeAdminOrg OldAccessTokenScope = "admin:org"
+ OldAccessTokenScopeWriteOrg OldAccessTokenScope = "write:org"
+ OldAccessTokenScopeReadOrg OldAccessTokenScope = "read:org"
+
+ OldAccessTokenScopeAdminPublicKey OldAccessTokenScope = "admin:public_key"
+ OldAccessTokenScopeWritePublicKey OldAccessTokenScope = "write:public_key"
+ OldAccessTokenScopeReadPublicKey OldAccessTokenScope = "read:public_key"
+
+ OldAccessTokenScopeAdminRepoHook OldAccessTokenScope = "admin:repo_hook"
+ OldAccessTokenScopeWriteRepoHook OldAccessTokenScope = "write:repo_hook"
+ OldAccessTokenScopeReadRepoHook OldAccessTokenScope = "read:repo_hook"
+
+ OldAccessTokenScopeAdminOrgHook OldAccessTokenScope = "admin:org_hook"
+
+ OldAccessTokenScopeNotification OldAccessTokenScope = "notification"
+
+ OldAccessTokenScopeUser OldAccessTokenScope = "user"
+ OldAccessTokenScopeReadUser OldAccessTokenScope = "read:user"
+ OldAccessTokenScopeUserEmail OldAccessTokenScope = "user:email"
+ OldAccessTokenScopeUserFollow OldAccessTokenScope = "user:follow"
+
+ OldAccessTokenScopeDeleteRepo OldAccessTokenScope = "delete_repo"
+
+ OldAccessTokenScopePackage OldAccessTokenScope = "package"
+ OldAccessTokenScopeWritePackage OldAccessTokenScope = "write:package"
+ OldAccessTokenScopeReadPackage OldAccessTokenScope = "read:package"
+ OldAccessTokenScopeDeletePackage OldAccessTokenScope = "delete:package"
+
+ OldAccessTokenScopeAdminGPGKey OldAccessTokenScope = "admin:gpg_key"
+ OldAccessTokenScopeWriteGPGKey OldAccessTokenScope = "write:gpg_key"
+ OldAccessTokenScopeReadGPGKey OldAccessTokenScope = "read:gpg_key"
+
+ OldAccessTokenScopeAdminApplication OldAccessTokenScope = "admin:application"
+ OldAccessTokenScopeWriteApplication OldAccessTokenScope = "write:application"
+ OldAccessTokenScopeReadApplication OldAccessTokenScope = "read:application"
+
+ OldAccessTokenScopeSudo OldAccessTokenScope = "sudo"
+)
+
+var accessTokenScopeMap = map[OldAccessTokenScope][]AccessTokenScope{
+ OldAccessTokenScopeAll: {AccessTokenScopeAll},
+ OldAccessTokenScopeRepo: {AccessTokenScopeWriteRepository},
+ OldAccessTokenScopeRepoStatus: {AccessTokenScopeWriteRepository},
+ OldAccessTokenScopePublicRepo: {AccessTokenScopePublicOnly, AccessTokenScopeWriteRepository},
+ OldAccessTokenScopeAdminOrg: {AccessTokenScopeWriteOrganization},
+ OldAccessTokenScopeWriteOrg: {AccessTokenScopeWriteOrganization},
+ OldAccessTokenScopeReadOrg: {AccessTokenScopeReadOrganization},
+ OldAccessTokenScopeAdminPublicKey: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeWritePublicKey: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeReadPublicKey: {AccessTokenScopeReadUser},
+ OldAccessTokenScopeAdminRepoHook: {AccessTokenScopeWriteRepository},
+ OldAccessTokenScopeWriteRepoHook: {AccessTokenScopeWriteRepository},
+ OldAccessTokenScopeReadRepoHook: {AccessTokenScopeReadRepository},
+ OldAccessTokenScopeAdminOrgHook: {AccessTokenScopeWriteOrganization},
+ OldAccessTokenScopeNotification: {AccessTokenScopeWriteNotification},
+ OldAccessTokenScopeUser: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeReadUser: {AccessTokenScopeReadUser},
+ OldAccessTokenScopeUserEmail: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeUserFollow: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeDeleteRepo: {AccessTokenScopeWriteRepository},
+ OldAccessTokenScopePackage: {AccessTokenScopeWritePackage},
+ OldAccessTokenScopeWritePackage: {AccessTokenScopeWritePackage},
+ OldAccessTokenScopeReadPackage: {AccessTokenScopeReadPackage},
+ OldAccessTokenScopeDeletePackage: {AccessTokenScopeWritePackage},
+ OldAccessTokenScopeAdminGPGKey: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeWriteGPGKey: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeReadGPGKey: {AccessTokenScopeReadUser},
+ OldAccessTokenScopeAdminApplication: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeWriteApplication: {AccessTokenScopeWriteUser},
+ OldAccessTokenScopeReadApplication: {AccessTokenScopeReadUser},
+ OldAccessTokenScopeSudo: {AccessTokenScopeWriteAdmin},
+}
+
+type AccessToken struct {
+ ID int64 `xorm:"pk autoincr"`
+ Scope string
+}
+
+func ConvertScopedAccessTokens(x *xorm.Engine) error {
+ var tokens []*AccessToken
+
+ if err := x.Find(&tokens); err != nil {
+ return err
+ }
+
+ for _, token := range tokens {
+ var scopes []string
+ allNewScopesMap := make(map[AccessTokenScope]bool)
+ for _, oldScope := range strings.Split(token.Scope, ",") {
+ if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists {
+ for _, newScope := range newScopes {
+ allNewScopesMap[newScope] = true
+ }
+ } else {
+ log.Debug("access token scope not recognized as old token scope %s; preserving it", oldScope)
+ scopes = append(scopes, oldScope)
+ }
+ }
+
+ for s := range allNewScopesMap {
+ scopes = append(scopes, string(s))
+ }
+ scope := AccessTokenScope(strings.Join(scopes, ","))
+
+ // normalize the scope
+ normScope := scope.NormalizePreservingUnknown()
+
+ token.Scope = string(normScope)
+
+ // update the db entry with the new scope
+ if _, err := x.Cols("scope").ID(token.ID).Update(token); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_20/v259_test.go b/models/migrations/v1_20/v259_test.go
new file mode 100644
index 0000000..ae219ea
--- /dev/null
+++ b/models/migrations/v1_20/v259_test.go
@@ -0,0 +1,111 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_20 //nolint
+
+import (
+ "sort"
+ "strings"
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type testCase struct {
+ Old OldAccessTokenScope
+ New AccessTokenScope
+}
+
+func createOldTokenScope(scopes ...OldAccessTokenScope) OldAccessTokenScope {
+ s := make([]string, 0, len(scopes))
+ for _, os := range scopes {
+ s = append(s, string(os))
+ }
+ return OldAccessTokenScope(strings.Join(s, ","))
+}
+
+func createNewTokenScope(scopes ...AccessTokenScope) AccessTokenScope {
+ s := make([]string, 0, len(scopes))
+ for _, os := range scopes {
+ s = append(s, string(os))
+ }
+ return AccessTokenScope(strings.Join(s, ","))
+}
+
+func Test_ConvertScopedAccessTokens(t *testing.T) {
+ tests := []testCase{
+ {
+ createOldTokenScope(OldAccessTokenScopeRepo, OldAccessTokenScopeUserFollow),
+ createNewTokenScope(AccessTokenScopeWriteRepository, AccessTokenScopeWriteUser),
+ },
+ {
+ createOldTokenScope(OldAccessTokenScopeUser, OldAccessTokenScopeWritePackage, OldAccessTokenScopeSudo),
+ createNewTokenScope(AccessTokenScopeWriteAdmin, AccessTokenScopeWritePackage, AccessTokenScopeWriteUser),
+ },
+ {
+ createOldTokenScope(),
+ createNewTokenScope(),
+ },
+ {
+ createOldTokenScope(OldAccessTokenScopeReadGPGKey, OldAccessTokenScopeReadOrg, OldAccessTokenScopeAll),
+ createNewTokenScope(AccessTokenScopeAll),
+ },
+ {
+ createOldTokenScope(OldAccessTokenScopeReadGPGKey, "invalid"),
+ createNewTokenScope("invalid", AccessTokenScopeReadUser),
+ },
+ }
+
+ // add a test for each individual mapping
+ for oldScope, newScope := range accessTokenScopeMap {
+ tests = append(tests, testCase{
+ oldScope,
+ createNewTokenScope(newScope...),
+ })
+ }
+
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(AccessToken))
+ defer deferable()
+ if x == nil || t.Failed() {
+ t.Skip()
+ return
+ }
+
+ // verify that no fixtures were loaded
+ count, err := x.Count(&AccessToken{})
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), count)
+
+ for _, tc := range tests {
+ _, err = x.Insert(&AccessToken{
+ Scope: string(tc.Old),
+ })
+ require.NoError(t, err)
+ }
+
+ // migrate the scopes
+ err = ConvertScopedAccessTokens(x)
+ require.NoError(t, err)
+
+ // migrate the scopes again (migration should be idempotent)
+ err = ConvertScopedAccessTokens(x)
+ require.NoError(t, err)
+
+ tokens := make([]AccessToken, 0)
+ err = x.Find(&tokens)
+ require.NoError(t, err)
+ assert.Equal(t, len(tests), len(tokens))
+
+ // sort the tokens (insertion order by auto-incrementing primary key)
+ sort.Slice(tokens, func(i, j int) bool {
+ return tokens[i].ID < tokens[j].ID
+ })
+
+ // verify that the converted scopes are equal to the expected test result
+ for idx, newToken := range tokens {
+ assert.Equal(t, string(tests[idx].New), newToken.Scope)
+ }
+}
diff --git a/models/migrations/v1_21/main_test.go b/models/migrations/v1_21/main_test.go
new file mode 100644
index 0000000..0148170
--- /dev/null
+++ b/models/migrations/v1_21/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_21/v260.go b/models/migrations/v1_21/v260.go
new file mode 100644
index 0000000..6ca52c5
--- /dev/null
+++ b/models/migrations/v1_21/v260.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func DropCustomLabelsColumnOfActionRunner(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ // drop "custom_labels" cols
+ if err := base.DropTableColumns(sess, "action_runner", "custom_labels"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_21/v261.go b/models/migrations/v1_21/v261.go
new file mode 100644
index 0000000..4ec1160
--- /dev/null
+++ b/models/migrations/v1_21/v261.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateVariableTable(x *xorm.Engine) error {
+ type ActionVariable struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(owner_repo_name)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name)"`
+ Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"`
+ Data string `xorm:"LONGTEXT NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ return x.Sync(new(ActionVariable))
+}
diff --git a/models/migrations/v1_21/v262.go b/models/migrations/v1_21/v262.go
new file mode 100644
index 0000000..23e9005
--- /dev/null
+++ b/models/migrations/v1_21/v262.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddTriggerEventToActionRun(x *xorm.Engine) error {
+ type ActionRun struct {
+ TriggerEvent string
+ }
+
+ return x.Sync(new(ActionRun))
+}
diff --git a/models/migrations/v1_21/v263.go b/models/migrations/v1_21/v263.go
new file mode 100644
index 0000000..2c7cbad
--- /dev/null
+++ b/models/migrations/v1_21/v263.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+)
+
+// AddGitSizeAndLFSSizeToRepositoryTable: add GitSize and LFSSize columns to Repository
+func AddGitSizeAndLFSSizeToRepositoryTable(x *xorm.Engine) error {
+ type Repository struct {
+ GitSize int64 `xorm:"NOT NULL DEFAULT 0"`
+ LFSSize int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(Repository)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ _, err := sess.Exec(`UPDATE repository SET lfs_size=(SELECT SUM(size) FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID) WHERE EXISTS (SELECT 1 FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID)`)
+ if err != nil {
+ return err
+ }
+
+ _, err = sess.Exec(`UPDATE repository SET size = 0 WHERE size IS NULL`)
+ if err != nil {
+ return err
+ }
+
+ _, err = sess.Exec(`UPDATE repository SET git_size = size - lfs_size WHERE size > lfs_size`)
+ if err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_21/v264.go b/models/migrations/v1_21/v264.go
new file mode 100644
index 0000000..e81a17a
--- /dev/null
+++ b/models/migrations/v1_21/v264.go
@@ -0,0 +1,93 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddBranchTable(x *xorm.Engine) error {
+ type Branch struct {
+ ID int64
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Name string `xorm:"UNIQUE(s) NOT NULL"`
+ CommitID string
+ CommitMessage string `xorm:"TEXT"`
+ PusherID int64
+ IsDeleted bool `xorm:"index"`
+ DeletedByID int64
+ DeletedUnix timeutil.TimeStamp `xorm:"index"`
+ CommitTime timeutil.TimeStamp // The commit
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ if err := x.Sync(new(Branch)); err != nil {
+ return err
+ }
+
+ if exist, err := x.IsTableExist("deleted_branches"); err != nil {
+ return err
+ } else if !exist {
+ return nil
+ }
+
+ type DeletedBranch struct {
+ ID int64
+ RepoID int64 `xorm:"index UNIQUE(s)"`
+ Name string `xorm:"UNIQUE(s) NOT NULL"`
+ Commit string
+ DeletedByID int64
+ DeletedUnix timeutil.TimeStamp
+ }
+
+ var adminUserID int64
+ has, err := x.Table("user").
+ Select("id").
+ Where("is_admin=?", true).
+ Asc("id"). // Reliably get the admin with the lowest ID.
+ Get(&adminUserID)
+ if err != nil {
+ return err
+ } else if !has {
+ return fmt.Errorf("no admin user found")
+ }
+
+ branches := make([]Branch, 0, 100)
+ if err := db.Iterate(context.Background(), nil, func(ctx context.Context, deletedBranch *DeletedBranch) error {
+ branches = append(branches, Branch{
+ RepoID: deletedBranch.RepoID,
+ Name: deletedBranch.Name,
+ CommitID: deletedBranch.Commit,
+ PusherID: adminUserID,
+ IsDeleted: true,
+ DeletedByID: deletedBranch.DeletedByID,
+ DeletedUnix: deletedBranch.DeletedUnix,
+ })
+ if len(branches) >= 100 {
+ _, err := x.Insert(&branches)
+ if err != nil {
+ return err
+ }
+ branches = branches[:0]
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ if len(branches) > 0 {
+ if _, err := x.Insert(&branches); err != nil {
+ return err
+ }
+ }
+
+ return x.DropTables(new(DeletedBranch))
+}
diff --git a/models/migrations/v1_21/v265.go b/models/migrations/v1_21/v265.go
new file mode 100644
index 0000000..800eb95
--- /dev/null
+++ b/models/migrations/v1_21/v265.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AlterActionArtifactTable(x *xorm.Engine) error {
+ // ActionArtifact is a file that is stored in the artifact storage.
+ type ActionArtifact struct {
+ RunID int64 `xorm:"index unique(runid_name_path)"` // The run id of the artifact
+ ArtifactPath string `xorm:"index unique(runid_name_path)"` // The path to the artifact when runner uploads it
+ ArtifactName string `xorm:"index unique(runid_name_path)"` // The name of the artifact when
+ }
+
+ return x.Sync(new(ActionArtifact))
+}
diff --git a/models/migrations/v1_21/v266.go b/models/migrations/v1_21/v266.go
new file mode 100644
index 0000000..79a5f5e
--- /dev/null
+++ b/models/migrations/v1_21/v266.go
@@ -0,0 +1,23 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func ReduceCommitStatus(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if _, err := sess.Exec(`UPDATE commit_status SET state='pending' WHERE state='running'`); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_21/v267.go b/models/migrations/v1_21/v267.go
new file mode 100644
index 0000000..bc0e954
--- /dev/null
+++ b/models/migrations/v1_21/v267.go
@@ -0,0 +1,23 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateActionTasksVersionTable(x *xorm.Engine) error {
+ type ActionTasksVersion struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(owner_repo)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(owner_repo)"`
+ Version int64
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ return x.Sync(new(ActionTasksVersion))
+}
diff --git a/models/migrations/v1_21/v268.go b/models/migrations/v1_21/v268.go
new file mode 100644
index 0000000..332793f
--- /dev/null
+++ b/models/migrations/v1_21/v268.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+// UpdateActionsRefIndex updates the index of actions ref field
+func UpdateActionsRefIndex(x *xorm.Engine) error {
+ type ActionRun struct {
+ Ref string `xorm:"index"` // the commit/tag/… causing the run
+ }
+ return x.Sync(new(ActionRun))
+}
diff --git a/models/migrations/v1_21/v269.go b/models/migrations/v1_21/v269.go
new file mode 100644
index 0000000..475ec02
--- /dev/null
+++ b/models/migrations/v1_21/v269.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func DropDeletedBranchTable(x *xorm.Engine) error {
+ return x.DropTables("deleted_branch")
+}
diff --git a/models/migrations/v1_21/v270.go b/models/migrations/v1_21/v270.go
new file mode 100644
index 0000000..b9cc84d
--- /dev/null
+++ b/models/migrations/v1_21/v270.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func FixPackagePropertyTypo(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if _, err := sess.Exec(`UPDATE package_property SET name = 'rpm.metadata' WHERE name = 'rpm.metdata'`); err != nil {
+ return err
+ }
+ if _, err := sess.Exec(`UPDATE package_property SET name = 'conda.metadata' WHERE name = 'conda.metdata'`); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_21/v271.go b/models/migrations/v1_21/v271.go
new file mode 100644
index 0000000..098f649
--- /dev/null
+++ b/models/migrations/v1_21/v271.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddArchivedUnixColumInLabelTable(x *xorm.Engine) error {
+ type Label struct {
+ ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT NULL"`
+ }
+ return x.Sync(new(Label))
+}
diff --git a/models/migrations/v1_21/v272.go b/models/migrations/v1_21/v272.go
new file mode 100644
index 0000000..a729c49
--- /dev/null
+++ b/models/migrations/v1_21/v272.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+import (
+ "xorm.io/xorm"
+)
+
+func AddVersionToActionRunTable(x *xorm.Engine) error {
+ type ActionRun struct {
+ Version int `xorm:"version default 0"`
+ }
+ return x.Sync(new(ActionRun))
+}
diff --git a/models/migrations/v1_21/v273.go b/models/migrations/v1_21/v273.go
new file mode 100644
index 0000000..61c79f4
--- /dev/null
+++ b/models/migrations/v1_21/v273.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddActionScheduleTable(x *xorm.Engine) error {
+ type ActionSchedule struct {
+ ID int64
+ Title string
+ Specs []string
+ RepoID int64 `xorm:"index"`
+ OwnerID int64 `xorm:"index"`
+ WorkflowID string
+ TriggerUserID int64
+ Ref string
+ CommitSHA string
+ Event string
+ EventPayload string `xorm:"LONGTEXT"`
+ Content []byte
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ type ActionScheduleSpec struct {
+ ID int64
+ RepoID int64 `xorm:"index"`
+ ScheduleID int64 `xorm:"index"`
+ Spec string
+ Next timeutil.TimeStamp `xorm:"index"`
+ Prev timeutil.TimeStamp
+
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+ }
+
+ return x.Sync(
+ new(ActionSchedule),
+ new(ActionScheduleSpec),
+ )
+}
diff --git a/models/migrations/v1_21/v274.go b/models/migrations/v1_21/v274.go
new file mode 100644
index 0000000..df5994f
--- /dev/null
+++ b/models/migrations/v1_21/v274.go
@@ -0,0 +1,36 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+import (
+ "time"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddExpiredUnixColumnInActionArtifactTable(x *xorm.Engine) error {
+ type ActionArtifact struct {
+ ExpiredUnix timeutil.TimeStamp `xorm:"index"` // time when the artifact will be expired
+ }
+ if err := x.Sync(new(ActionArtifact)); err != nil {
+ return err
+ }
+ return updateArtifactsExpiredUnixTo90Days(x)
+}
+
+func updateArtifactsExpiredUnixTo90Days(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ expiredTime := time.Now().AddDate(0, 0, 90).Unix()
+ if _, err := sess.Exec(`UPDATE action_artifact SET expired_unix=? WHERE status='2' AND expired_unix is NULL`, expiredTime); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_21/v275.go b/models/migrations/v1_21/v275.go
new file mode 100644
index 0000000..78804a5
--- /dev/null
+++ b/models/migrations/v1_21/v275.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddScheduleIDForActionRun(x *xorm.Engine) error {
+ type ActionRun struct {
+ ScheduleID int64
+ }
+ return x.Sync(new(ActionRun))
+}
diff --git a/models/migrations/v1_21/v276.go b/models/migrations/v1_21/v276.go
new file mode 100644
index 0000000..67e9501
--- /dev/null
+++ b/models/migrations/v1_21/v276.go
@@ -0,0 +1,156 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AddRemoteAddressToMirrors(x *xorm.Engine) error {
+ type Mirror struct {
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+ }
+
+ type PushMirror struct {
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+ }
+
+ if err := x.Sync(new(Mirror), new(PushMirror)); err != nil {
+ return err
+ }
+
+ if err := migratePullMirrors(x); err != nil {
+ return err
+ }
+
+ return migratePushMirrors(x)
+}
+
+func migratePullMirrors(x *xorm.Engine) error {
+ type Mirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+ RepoOwner string
+ RepoName string
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ limit := setting.Database.IterateBufferSize
+ if limit <= 0 {
+ limit = 50
+ }
+
+ start := 0
+
+ for {
+ var mirrors []Mirror
+ if err := sess.Select("mirror.id, mirror.repo_id, mirror.remote_address, repository.owner_name as repo_owner, repository.name as repo_name").
+ Join("INNER", "repository", "repository.id = mirror.repo_id").
+ Limit(limit, start).Find(&mirrors); err != nil {
+ return err
+ }
+
+ if len(mirrors) == 0 {
+ break
+ }
+ start += len(mirrors)
+
+ for _, m := range mirrors {
+ remoteAddress, err := repo_model.GetPushMirrorRemoteAddress(m.RepoOwner, m.RepoName, "origin")
+ if err != nil {
+ return err
+ }
+
+ m.RemoteAddress = remoteAddress
+
+ if _, err = sess.ID(m.ID).Cols("remote_address").Update(m); err != nil {
+ return err
+ }
+ }
+
+ if start%1000 == 0 { // avoid a too big transaction
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return sess.Commit()
+}
+
+func migratePushMirrors(x *xorm.Engine) error {
+ type PushMirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ RemoteName string
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+ RepoOwner string
+ RepoName string
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ limit := setting.Database.IterateBufferSize
+ if limit <= 0 {
+ limit = 50
+ }
+
+ start := 0
+
+ for {
+ var mirrors []PushMirror
+ if err := sess.Select("push_mirror.id, push_mirror.repo_id, push_mirror.remote_name, push_mirror.remote_address, repository.owner_name as repo_owner, repository.name as repo_name").
+ Join("INNER", "repository", "repository.id = push_mirror.repo_id").
+ Limit(limit, start).Find(&mirrors); err != nil {
+ return err
+ }
+
+ if len(mirrors) == 0 {
+ break
+ }
+ start += len(mirrors)
+
+ for _, m := range mirrors {
+ remoteAddress, err := repo_model.GetPushMirrorRemoteAddress(m.RepoOwner, m.RepoName, m.RemoteName)
+ if err != nil {
+ return err
+ }
+
+ m.RemoteAddress = remoteAddress
+
+ if _, err = sess.ID(m.ID).Cols("remote_address").Update(m); err != nil {
+ return err
+ }
+ }
+
+ if start%1000 == 0 { // avoid a too big transaction
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_21/v277.go b/models/migrations/v1_21/v277.go
new file mode 100644
index 0000000..1252916
--- /dev/null
+++ b/models/migrations/v1_21/v277.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIndexToIssueUserIssueID(x *xorm.Engine) error {
+ type IssueUser struct {
+ IssueID int64 `xorm:"INDEX"`
+ }
+
+ return x.Sync(new(IssueUser))
+}
diff --git a/models/migrations/v1_21/v278.go b/models/migrations/v1_21/v278.go
new file mode 100644
index 0000000..d6a462d
--- /dev/null
+++ b/models/migrations/v1_21/v278.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIndexToCommentDependentIssueID(x *xorm.Engine) error {
+ type Comment struct {
+ DependentIssueID int64 `xorm:"index"`
+ }
+
+ return x.Sync(new(Comment))
+}
diff --git a/models/migrations/v1_21/v279.go b/models/migrations/v1_21/v279.go
new file mode 100644
index 0000000..2abd1bb
--- /dev/null
+++ b/models/migrations/v1_21/v279.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_21 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIndexToActionUserID(x *xorm.Engine) error {
+ type Action struct {
+ UserID int64 `xorm:"INDEX"`
+ }
+
+ _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreDropIndices: true,
+ IgnoreConstrains: true,
+ }, new(Action))
+ return err
+}
diff --git a/models/migrations/v1_22/main_test.go b/models/migrations/v1_22/main_test.go
new file mode 100644
index 0000000..2005789
--- /dev/null
+++ b/models/migrations/v1_22/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_22/v280.go b/models/migrations/v1_22/v280.go
new file mode 100644
index 0000000..a8ee4a3
--- /dev/null
+++ b/models/migrations/v1_22/v280.go
@@ -0,0 +1,29 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func RenameUserThemes(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if _, err := sess.Exec("UPDATE `user` SET `theme` = 'gitea-light' WHERE `theme` = 'gitea'"); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE `user` SET `theme` = 'gitea-dark' WHERE `theme` = 'arc-green'"); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE `user` SET `theme` = 'gitea-auto' WHERE `theme` = 'auto'"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_22/v281.go b/models/migrations/v1_22/v281.go
new file mode 100644
index 0000000..fc1866a
--- /dev/null
+++ b/models/migrations/v1_22/v281.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func CreateAuthTokenTable(x *xorm.Engine) error {
+ type AuthToken struct {
+ ID string `xorm:"pk"`
+ TokenHash string
+ UserID int64 `xorm:"INDEX"`
+ ExpiresUnix timeutil.TimeStamp `xorm:"INDEX"`
+ }
+
+ return x.Sync(new(AuthToken))
+}
diff --git a/models/migrations/v1_22/v282.go b/models/migrations/v1_22/v282.go
new file mode 100644
index 0000000..baad9e0
--- /dev/null
+++ b/models/migrations/v1_22/v282.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddIndexToPullAutoMergeDoerID(x *xorm.Engine) error {
+ type PullAutoMerge struct {
+ DoerID int64 `xorm:"INDEX NOT NULL"`
+ }
+
+ return x.Sync(&PullAutoMerge{})
+}
diff --git a/models/migrations/v1_22/v283.go b/models/migrations/v1_22/v283.go
new file mode 100644
index 0000000..86946d1
--- /dev/null
+++ b/models/migrations/v1_22/v283.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddCombinedIndexToIssueUser(x *xorm.Engine) error {
+ type OldIssueUser struct {
+ IssueID int64
+ UID int64
+ Cnt int64
+ }
+
+ var duplicatedIssueUsers []OldIssueUser
+ if err := x.SQL("select * from (select issue_id, uid, count(1) as cnt from issue_user group by issue_id, uid) a where a.cnt > 1").
+ Find(&duplicatedIssueUsers); err != nil {
+ return err
+ }
+ for _, issueUser := range duplicatedIssueUsers {
+ var ids []int64
+ if err := x.SQL("SELECT id FROM issue_user WHERE issue_id = ? and uid = ? limit ?", issueUser.IssueID, issueUser.UID, issueUser.Cnt-1).Find(&ids); err != nil {
+ return err
+ }
+ if _, err := x.Table("issue_user").In("id", ids).Delete(); err != nil {
+ return err
+ }
+ }
+
+ type IssueUser struct {
+ UID int64 `xorm:"INDEX unique(uid_to_issue)"` // User ID.
+ IssueID int64 `xorm:"INDEX unique(uid_to_issue)"`
+ }
+
+ return x.Sync(&IssueUser{})
+}
diff --git a/models/migrations/v1_22/v283_test.go b/models/migrations/v1_22/v283_test.go
new file mode 100644
index 0000000..5f6c04a
--- /dev/null
+++ b/models/migrations/v1_22/v283_test.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddCombinedIndexToIssueUser(t *testing.T) {
+ type IssueUser struct { // old struct
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX"` // User ID.
+ IssueID int64 `xorm:"INDEX"`
+ IsRead bool
+ IsMentioned bool
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(IssueUser))
+ defer deferable()
+
+ require.NoError(t, AddCombinedIndexToIssueUser(x))
+}
diff --git a/models/migrations/v1_22/v284.go b/models/migrations/v1_22/v284.go
new file mode 100644
index 0000000..2b95078
--- /dev/null
+++ b/models/migrations/v1_22/v284.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+import (
+ "xorm.io/xorm"
+)
+
+func AddIgnoreStaleApprovalsColumnToProtectedBranchTable(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ IgnoreStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
+ }
+ _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreIndices: true,
+ IgnoreConstrains: true,
+ }, new(ProtectedBranch))
+ return err
+}
diff --git a/models/migrations/v1_22/v285.go b/models/migrations/v1_22/v285.go
new file mode 100644
index 0000000..a55cc17
--- /dev/null
+++ b/models/migrations/v1_22/v285.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "time"
+
+ "xorm.io/xorm"
+)
+
+func AddPreviousDurationToActionRun(x *xorm.Engine) error {
+ type ActionRun struct {
+ PreviousDuration time.Duration
+ }
+
+ _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreIndices: true,
+ IgnoreConstrains: true,
+ }, &ActionRun{})
+ return err
+}
diff --git a/models/migrations/v1_22/v286.go b/models/migrations/v1_22/v286.go
new file mode 100644
index 0000000..97ff649
--- /dev/null
+++ b/models/migrations/v1_22/v286.go
@@ -0,0 +1,75 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+package v1_22 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func expandHashReferencesToSha256(x *xorm.Engine) error {
+ alteredTables := [][2]string{
+ {"commit_status", "context_hash"},
+ {"comment", "commit_sha"},
+ {"pull_request", "merge_base"},
+ {"pull_request", "merged_commit_id"},
+ {"review", "commit_id"},
+ {"review_state", "commit_sha"},
+ {"repo_archiver", "commit_id"},
+ {"release", "sha1"},
+ {"repo_indexer_status", "commit_sha"},
+ }
+
+ db := x.NewSession()
+ defer db.Close()
+
+ if err := db.Begin(); err != nil {
+ return err
+ }
+
+ if !setting.Database.Type.IsSQLite3() {
+ for _, alts := range alteredTables {
+ var err error
+ if setting.Database.Type.IsMySQL() {
+ _, err = db.Exec(fmt.Sprintf("ALTER TABLE `%s` MODIFY COLUMN `%s` VARCHAR(64)", alts[0], alts[1]))
+ } else {
+ _, err = db.Exec(fmt.Sprintf("ALTER TABLE `%s` ALTER COLUMN `%s` TYPE VARCHAR(64)", alts[0], alts[1]))
+ }
+ if err != nil {
+ return fmt.Errorf("alter column '%s' of table '%s' failed: %w", alts[1], alts[0], err)
+ }
+ }
+ }
+ log.Debug("Updated database tables to hold SHA256 git hash references")
+
+ return db.Commit()
+}
+
+func addObjectFormatNameToRepository(x *xorm.Engine) error {
+ type Repository struct {
+ ObjectFormatName string `xorm:"VARCHAR(6) NOT NULL DEFAULT 'sha1'"`
+ }
+
+ if _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreIndices: true,
+ IgnoreConstrains: true,
+ }, new(Repository)); err != nil {
+ return err
+ }
+
+ // Here to catch weird edge-cases where column constraints above are
+ // not applied by the DB backend
+ _, err := x.Exec("UPDATE `repository` set `object_format_name` = 'sha1' WHERE `object_format_name` = '' or `object_format_name` IS NULL")
+ return err
+}
+
+func AdjustDBForSha256(x *xorm.Engine) error {
+ if err := expandHashReferencesToSha256(x); err != nil {
+ return err
+ }
+ return addObjectFormatNameToRepository(x)
+}
diff --git a/models/migrations/v1_22/v286_test.go b/models/migrations/v1_22/v286_test.go
new file mode 100644
index 0000000..76b00e5
--- /dev/null
+++ b/models/migrations/v1_22/v286_test.go
@@ -0,0 +1,119 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm"
+)
+
+func PrepareOldRepository(t *testing.T) (*xorm.Engine, func()) {
+ type Repository struct { // old struct
+ ID int64 `xorm:"pk autoincr"`
+ }
+
+ type CommitStatus struct {
+ ID int64
+ ContextHash string `xorm:"char(40) index"`
+ }
+
+ type RepoArchiver struct {
+ ID int64
+ RepoID int64 `xorm:"index unique(s)"`
+ Type int `xorm:"unique(s)"`
+ CommitID string `xorm:"VARCHAR(40) unique(s)"`
+ }
+
+ type ReviewState struct {
+ ID int64
+ UserID int64 `xorm:"NOT NULL UNIQUE(pull_commit_user)"`
+ PullID int64 `xorm:"NOT NULL INDEX UNIQUE(pull_commit_user) DEFAULT 0"`
+ CommitSHA string `xorm:"NOT NULL VARCHAR(40) UNIQUE(pull_commit_user)"`
+ }
+
+ type Comment struct {
+ ID int64
+ CommitSHA string
+ }
+
+ type PullRequest struct {
+ ID int64
+ CommitSHA string
+ MergeBase string
+ MergedCommitID string
+ }
+
+ type Release struct {
+ ID int64
+ Sha1 string
+ }
+
+ type RepoIndexerStatus struct {
+ ID int64
+ CommitSHA string
+ }
+
+ type Review struct {
+ ID int64
+ CommitID string
+ }
+
+ // Prepare and load the testing database
+ return migration_tests.PrepareTestEnv(t, 0,
+ new(Repository),
+ new(CommitStatus),
+ new(RepoArchiver),
+ new(ReviewState),
+ new(Review),
+ new(Comment),
+ new(PullRequest),
+ new(Release),
+ new(RepoIndexerStatus),
+ )
+}
+
+func Test_RepositoryFormat(t *testing.T) {
+ x, deferable := PrepareOldRepository(t)
+ defer deferable()
+
+ require.NoError(t, AdjustDBForSha256(x))
+
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ ObjectFormatName string `xorg:"not null default('sha1')"`
+ }
+
+ repo := new(Repository)
+
+ // check we have some records to migrate
+ count, err := x.Count(new(Repository))
+ require.NoError(t, err)
+ assert.EqualValues(t, 4, count)
+
+ repo.ObjectFormatName = "sha256"
+ _, err = x.Insert(repo)
+ require.NoError(t, err)
+ id := repo.ID
+
+ count, err = x.Count(new(Repository))
+ require.NoError(t, err)
+ assert.EqualValues(t, 5, count)
+
+ repo = new(Repository)
+ ok, err := x.ID(2).Get(repo)
+ require.NoError(t, err)
+ assert.True(t, ok)
+ assert.EqualValues(t, "sha1", repo.ObjectFormatName)
+
+ repo = new(Repository)
+ ok, err = x.ID(id).Get(repo)
+ require.NoError(t, err)
+ assert.True(t, ok)
+ assert.EqualValues(t, "sha256", repo.ObjectFormatName)
+}
diff --git a/models/migrations/v1_22/v287.go b/models/migrations/v1_22/v287.go
new file mode 100644
index 0000000..c8b1593
--- /dev/null
+++ b/models/migrations/v1_22/v287.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+type BadgeUnique struct {
+ ID int64 `xorm:"pk autoincr"`
+ Slug string `xorm:"UNIQUE"`
+}
+
+func (BadgeUnique) TableName() string {
+ return "badge"
+}
+
+func UseSlugInsteadOfIDForBadges(x *xorm.Engine) error {
+ type Badge struct {
+ Slug string
+ }
+
+ err := x.Sync(new(Badge))
+ if err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ _, err = sess.Exec("UPDATE `badge` SET `slug` = `id` Where `slug` IS NULL")
+ if err != nil {
+ return err
+ }
+
+ err = sess.Sync(new(BadgeUnique))
+ if err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_22/v288.go b/models/migrations/v1_22/v288.go
new file mode 100644
index 0000000..7c93bfc
--- /dev/null
+++ b/models/migrations/v1_22/v288.go
@@ -0,0 +1,26 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+type Blocking struct {
+ ID int64 `xorm:"pk autoincr"`
+ BlockerID int64 `xorm:"UNIQUE(block)"`
+ BlockeeID int64 `xorm:"UNIQUE(block)"`
+ Note string
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+}
+
+func (*Blocking) TableName() string {
+ return "user_blocking"
+}
+
+func AddUserBlockingTable(x *xorm.Engine) error {
+ return x.Sync(&Blocking{})
+}
diff --git a/models/migrations/v1_22/v289.go b/models/migrations/v1_22/v289.go
new file mode 100644
index 0000000..b9941aa
--- /dev/null
+++ b/models/migrations/v1_22/v289.go
@@ -0,0 +1,21 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func AddDefaultWikiBranch(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64
+ DefaultWikiBranch string
+ }
+ if _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreIndices: true,
+ IgnoreConstrains: true,
+ }, &Repository{}); err != nil {
+ return err
+ }
+ _, err := x.Exec("UPDATE `repository` SET default_wiki_branch = 'master' WHERE (default_wiki_branch IS NULL) OR (default_wiki_branch = '')")
+ return err
+}
diff --git a/models/migrations/v1_22/v290.go b/models/migrations/v1_22/v290.go
new file mode 100644
index 0000000..e3c58b0
--- /dev/null
+++ b/models/migrations/v1_22/v290.go
@@ -0,0 +1,46 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "xorm.io/xorm"
+)
+
+// HookTask represents a hook task.
+// exact copy of models/webhook/hooktask.go when this migration was created
+// - xorm:"-" fields deleted
+type HookTask struct {
+ ID int64 `xorm:"pk autoincr"`
+ HookID int64 `xorm:"index"`
+ UUID string `xorm:"unique"`
+ PayloadContent string `xorm:"LONGTEXT"`
+ EventType webhook_module.HookEventType
+ IsDelivered bool
+ Delivered timeutil.TimeStampNano
+
+ // History info.
+ IsSucceed bool
+ RequestContent string `xorm:"LONGTEXT"`
+ ResponseContent string `xorm:"LONGTEXT"`
+
+ // Version number to allow for smooth version upgrades:
+ // - Version 1: PayloadContent contains the JSON as send to the URL
+ // - Version 2: PayloadContent contains the original event
+ PayloadVersion int `xorm:"DEFAULT 1"`
+}
+
+func AddPayloadVersionToHookTaskTable(x *xorm.Engine) error {
+ // create missing column
+ if _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreIndices: true,
+ IgnoreConstrains: true,
+ }, new(HookTask)); err != nil {
+ return err
+ }
+ _, err := x.Exec("UPDATE hook_task SET payload_version = 1 WHERE payload_version IS NULL")
+ return err
+}
diff --git a/models/migrations/v1_22/v290_test.go b/models/migrations/v1_22/v290_test.go
new file mode 100644
index 0000000..ced200f
--- /dev/null
+++ b/models/migrations/v1_22/v290_test.go
@@ -0,0 +1,59 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "strconv"
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/modules/timeutil"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_AddPayloadVersionToHookTaskTable(t *testing.T) {
+ type HookTaskMigrated HookTask
+
+ // HookTask represents a hook task, as of before the migration
+ type HookTask struct {
+ ID int64 `xorm:"pk autoincr"`
+ HookID int64 `xorm:"index"`
+ UUID string `xorm:"unique"`
+ PayloadContent string `xorm:"LONGTEXT"`
+ EventType webhook_module.HookEventType
+ IsDelivered bool
+ Delivered timeutil.TimeStampNano
+
+ // History info.
+ IsSucceed bool
+ RequestContent string `xorm:"LONGTEXT"`
+ ResponseContent string `xorm:"LONGTEXT"`
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(HookTask), new(HookTaskMigrated))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ require.NoError(t, AddPayloadVersionToHookTaskTable(x))
+
+ expected := []HookTaskMigrated{}
+ require.NoError(t, x.Table("hook_task_migrated").Asc("id").Find(&expected))
+ assert.Len(t, expected, 2)
+
+ got := []HookTaskMigrated{}
+ require.NoError(t, x.Table("hook_task").Asc("id").Find(&got))
+
+ for i, expected := range expected {
+ expected, got := expected, got[i]
+ t.Run(strconv.FormatInt(expected.ID, 10), func(t *testing.T) {
+ assert.Equal(t, expected.PayloadVersion, got.PayloadVersion)
+ })
+ }
+}
diff --git a/models/migrations/v1_22/v291.go b/models/migrations/v1_22/v291.go
new file mode 100644
index 0000000..74726fa
--- /dev/null
+++ b/models/migrations/v1_22/v291.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func AddCommentIDIndexofAttachment(x *xorm.Engine) error {
+ type Attachment struct {
+ CommentID int64 `xorm:"INDEX"`
+ }
+
+ _, err := x.SyncWithOptions(xorm.SyncOptions{
+ IgnoreDropIndices: true,
+ IgnoreConstrains: true,
+ }, &Attachment{})
+ return err
+}
diff --git a/models/migrations/v1_22/v292.go b/models/migrations/v1_22/v292.go
new file mode 100644
index 0000000..beca556
--- /dev/null
+++ b/models/migrations/v1_22/v292.go
@@ -0,0 +1,9 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+// NOTE: noop the original migration has bug which some projects will be skip, so
+// these projects will have no default board.
+// So that this migration will be skipped and go to v293.go
+// This file is a placeholder so that readers can know what happened
diff --git a/models/migrations/v1_22/v293.go b/models/migrations/v1_22/v293.go
new file mode 100644
index 0000000..53cc719
--- /dev/null
+++ b/models/migrations/v1_22/v293.go
@@ -0,0 +1,108 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+// CheckProjectColumnsConsistency ensures there is exactly one default board per project present
+func CheckProjectColumnsConsistency(x *xorm.Engine) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ limit := setting.Database.IterateBufferSize
+ if limit <= 0 {
+ limit = 50
+ }
+
+ type Project struct {
+ ID int64
+ CreatorID int64
+ BoardID int64
+ }
+
+ type ProjectBoard struct {
+ ID int64 `xorm:"pk autoincr"`
+ Title string
+ Default bool `xorm:"NOT NULL DEFAULT false"` // issues not assigned to a specific board will be assigned to this board
+ Sorting int8 `xorm:"NOT NULL DEFAULT 0"`
+ Color string `xorm:"VARCHAR(7)"`
+
+ ProjectID int64 `xorm:"INDEX NOT NULL"`
+ CreatorID int64 `xorm:"NOT NULL"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ for {
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ // all these projects without defaults will be fixed in the same loop, so
+ // we just need to always get projects without defaults until no such project
+ var projects []*Project
+ if err := sess.Select("project.id as id, project.creator_id, project_board.id as board_id").
+ Join("LEFT", "project_board", "project_board.project_id = project.id AND project_board.`default`=?", true).
+ Where("project_board.id is NULL OR project_board.id = 0").
+ Limit(limit).
+ Find(&projects); err != nil {
+ return err
+ }
+
+ for _, p := range projects {
+ if _, err := sess.Insert(ProjectBoard{
+ ProjectID: p.ID,
+ Default: true,
+ Title: "Uncategorized",
+ CreatorID: p.CreatorID,
+ }); err != nil {
+ return err
+ }
+ }
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+
+ if len(projects) == 0 {
+ break
+ }
+ }
+ sess.Close()
+
+ return removeDuplicatedBoardDefault(x)
+}
+
+func removeDuplicatedBoardDefault(x *xorm.Engine) error {
+ type ProjectInfo struct {
+ ProjectID int64
+ DefaultNum int
+ }
+ var projects []ProjectInfo
+ if err := x.Select("project_id, count(*) AS default_num").
+ Table("project_board").
+ Where("`default` = ?", true).
+ GroupBy("project_id").
+ Having("count(*) > 1").
+ Find(&projects); err != nil {
+ return err
+ }
+
+ for _, project := range projects {
+ if _, err := x.Where("project_id=?", project.ProjectID).
+ Table("project_board").
+ Limit(project.DefaultNum - 1).
+ Update(map[string]bool{
+ "`default`": false,
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/models/migrations/v1_22/v293_test.go b/models/migrations/v1_22/v293_test.go
new file mode 100644
index 0000000..85bb464
--- /dev/null
+++ b/models/migrations/v1_22/v293_test.go
@@ -0,0 +1,45 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+ "code.gitea.io/gitea/models/project"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_CheckProjectColumnsConsistency(t *testing.T) {
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(project.Project), new(project.Column))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ require.NoError(t, CheckProjectColumnsConsistency(x))
+
+ // check if default column was added
+ var defaultColumn project.Column
+ has, err := x.Where("project_id=? AND `default` = ?", 1, true).Get(&defaultColumn)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.Equal(t, int64(1), defaultColumn.ProjectID)
+ assert.True(t, defaultColumn.Default)
+
+ // check if multiple defaults, previous were removed and last will be kept
+ expectDefaultColumn, err := project.GetColumn(db.DefaultContext, 2)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), expectDefaultColumn.ProjectID)
+ assert.False(t, expectDefaultColumn.Default)
+
+ expectNonDefaultColumn, err := project.GetColumn(db.DefaultContext, 3)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), expectNonDefaultColumn.ProjectID)
+ assert.True(t, expectNonDefaultColumn.Default)
+}
diff --git a/models/migrations/v1_22/v294.go b/models/migrations/v1_22/v294.go
new file mode 100644
index 0000000..314b451
--- /dev/null
+++ b/models/migrations/v1_22/v294.go
@@ -0,0 +1,44 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+// AddUniqueIndexForProjectIssue adds unique indexes for project issue table
+func AddUniqueIndexForProjectIssue(x *xorm.Engine) error {
+ // remove possible duplicated records in table project_issue
+ type result struct {
+ IssueID int64
+ ProjectID int64
+ Cnt int
+ }
+ var results []result
+ if err := x.Select("issue_id, project_id, count(*) as cnt").
+ Table("project_issue").
+ GroupBy("issue_id, project_id").
+ Having("count(*) > 1").
+ Find(&results); err != nil {
+ return err
+ }
+ for _, r := range results {
+ var ids []int64
+ if err := x.SQL("SELECT id FROM project_issue WHERE issue_id = ? and project_id = ? limit ?", r.IssueID, r.ProjectID, r.Cnt-1).Find(&ids); err != nil {
+ return err
+ }
+ if _, err := x.Table("project_issue").In("id", ids).Delete(); err != nil {
+ return err
+ }
+ }
+
+ // add unique index for project_issue table
+ type ProjectIssue struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX unique(s)"`
+ ProjectID int64 `xorm:"INDEX unique(s)"`
+ }
+
+ return x.Sync(new(ProjectIssue))
+}
diff --git a/models/migrations/v1_22/v294_test.go b/models/migrations/v1_22/v294_test.go
new file mode 100644
index 0000000..c465d53
--- /dev/null
+++ b/models/migrations/v1_22/v294_test.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import (
+ "slices"
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm/schemas"
+)
+
+func Test_AddUniqueIndexForProjectIssue(t *testing.T) {
+ type ProjectIssue struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX"`
+ ProjectID int64 `xorm:"INDEX"`
+ }
+
+ // Prepare and load the testing database
+ x, deferable := migration_tests.PrepareTestEnv(t, 0, new(ProjectIssue))
+ defer deferable()
+ if x == nil || t.Failed() {
+ return
+ }
+
+ cnt, err := x.Table("project_issue").Where("project_id=1 AND issue_id=1").Count()
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, cnt)
+
+ require.NoError(t, AddUniqueIndexForProjectIssue(x))
+
+ cnt, err = x.Table("project_issue").Where("project_id=1 AND issue_id=1").Count()
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, cnt)
+
+ tables, err := x.DBMetas()
+ require.NoError(t, err)
+ assert.Len(t, tables, 1)
+ found := false
+ for _, index := range tables[0].Indexes {
+ if index.Type == schemas.UniqueType {
+ found = true
+ slices.Equal(index.Cols, []string{"project_id", "issue_id"})
+ break
+ }
+ }
+ assert.True(t, found)
+}
diff --git a/models/migrations/v1_22/v295.go b/models/migrations/v1_22/v295.go
new file mode 100644
index 0000000..17bdadb
--- /dev/null
+++ b/models/migrations/v1_22/v295.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func AddCommitStatusSummary(x *xorm.Engine) error {
+ type CommitStatusSummary struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_id_sha)"`
+ SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_id_sha)"`
+ State string `xorm:"VARCHAR(7) NOT NULL"`
+ }
+ // there is no migrations because if there is no data on this table, it will fall back to get data
+ // from commit status
+ return x.Sync2(new(CommitStatusSummary))
+}
diff --git a/models/migrations/v1_22/v296.go b/models/migrations/v1_22/v296.go
new file mode 100644
index 0000000..1ecacab
--- /dev/null
+++ b/models/migrations/v1_22/v296.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func AddCommitStatusSummary2(x *xorm.Engine) error {
+ type CommitStatusSummary struct {
+ ID int64 `xorm:"pk autoincr"`
+ TargetURL string `xorm:"TEXT"`
+ }
+ // there is no migrations because if there is no data on this table, it will fall back to get data
+ // from commit status
+ return x.Sync(new(CommitStatusSummary))
+}
diff --git a/models/migrations/v1_22/v298.go b/models/migrations/v1_22/v298.go
new file mode 100644
index 0000000..b9f3b95
--- /dev/null
+++ b/models/migrations/v1_22/v298.go
@@ -0,0 +1,10 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_22 //nolint
+
+import "xorm.io/xorm"
+
+func DropWronglyCreatedTable(x *xorm.Engine) error {
+ return x.DropTables("o_auth2_application")
+}
diff --git a/models/migrations/v1_23/main_test.go b/models/migrations/v1_23/main_test.go
new file mode 100644
index 0000000..e3425e4
--- /dev/null
+++ b/models/migrations/v1_23/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_23 //nolint
+
+import (
+ "testing"
+
+ migration_tests "code.gitea.io/gitea/models/migrations/test"
+)
+
+func TestMain(m *testing.M) {
+ migration_tests.MainTest(m)
+}
diff --git a/models/migrations/v1_23/v299.go b/models/migrations/v1_23/v299.go
new file mode 100644
index 0000000..f6db960
--- /dev/null
+++ b/models/migrations/v1_23/v299.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_23 //nolint
+
+import "xorm.io/xorm"
+
+func AddContentVersionToIssueAndComment(x *xorm.Engine) error {
+ type Issue struct {
+ ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ type Comment struct {
+ ContentVersion int `xorm:"NOT NULL DEFAULT 0"`
+ }
+
+ return x.Sync(new(Comment), new(Issue))
+}
diff --git a/models/migrations/v1_23/v300.go b/models/migrations/v1_23/v300.go
new file mode 100644
index 0000000..f1f1ccc
--- /dev/null
+++ b/models/migrations/v1_23/v300.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_23 //nolint
+
+import "xorm.io/xorm"
+
+func AddForcePushBranchProtection(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ CanForcePush bool `xorm:"NOT NULL DEFAULT false"`
+ EnableForcePushAllowlist bool `xorm:"NOT NULL DEFAULT false"`
+ ForcePushAllowlistUserIDs []int64 `xorm:"JSON TEXT"`
+ ForcePushAllowlistTeamIDs []int64 `xorm:"JSON TEXT"`
+ ForcePushAllowlistDeployKeys bool `xorm:"NOT NULL DEFAULT false"`
+ }
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_23/v301.go b/models/migrations/v1_23/v301.go
new file mode 100644
index 0000000..b7797f6
--- /dev/null
+++ b/models/migrations/v1_23/v301.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_23 //nolint
+
+import "xorm.io/xorm"
+
+// AddSkipSeconderyAuthToOAuth2ApplicationTable: add SkipSecondaryAuthorization column, setting existing rows to false
+func AddSkipSecondaryAuthColumnToOAuth2ApplicationTable(x *xorm.Engine) error {
+ type oauth2Application struct {
+ SkipSecondaryAuthorization bool `xorm:"NOT NULL DEFAULT FALSE"`
+ }
+ return x.Sync(new(oauth2Application))
+}
diff --git a/models/migrations/v1_23/v302.go b/models/migrations/v1_23/v302.go
new file mode 100644
index 0000000..d7ea03e
--- /dev/null
+++ b/models/migrations/v1_23/v302.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_23 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddIndexToActionTaskStoppedLogExpired(x *xorm.Engine) error {
+ type ActionTask struct {
+ Stopped timeutil.TimeStamp `xorm:"index(stopped_log_expired)"`
+ LogExpired bool `xorm:"index(stopped_log_expired)"`
+ }
+ return x.Sync(new(ActionTask))
+}
diff --git a/models/migrations/v1_6/v70.go b/models/migrations/v1_6/v70.go
new file mode 100644
index 0000000..74434a8
--- /dev/null
+++ b/models/migrations/v1_6/v70.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_6 //nolint
+
+import (
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AddIssueDependencies(x *xorm.Engine) (err error) {
+ type IssueDependency struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"NOT NULL"`
+ IssueID int64 `xorm:"NOT NULL"`
+ DependencyID int64 `xorm:"NOT NULL"`
+ Created time.Time `xorm:"-"`
+ CreatedUnix int64 `xorm:"created"`
+ Updated time.Time `xorm:"-"`
+ UpdatedUnix int64 `xorm:"updated"`
+ }
+
+ const (
+ v16UnitTypeCode = iota + 1 // 1 code
+ v16UnitTypeIssues // 2 issues
+ v16UnitTypePRs // 3 PRs
+ v16UnitTypeCommits // 4 Commits
+ v16UnitTypeReleases // 5 Releases
+ v16UnitTypeWiki // 6 Wiki
+ v16UnitTypeSettings // 7 Settings
+ v16UnitTypeExternalWiki // 8 ExternalWiki
+ v16UnitTypeExternalTracker // 9 ExternalTracker
+ )
+
+ if err = x.Sync(new(IssueDependency)); err != nil {
+ return fmt.Errorf("Error creating issue_dependency_table column definition: %w", err)
+ }
+
+ // Update Comment definition
+ // This (copied) struct does only contain fields used by xorm as the only use here is to update the database
+
+ // CommentType defines the comment type
+ type CommentType int
+
+ // TimeStamp defines a timestamp
+ type TimeStamp int64
+
+ type Comment struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type CommentType
+ PosterID int64 `xorm:"INDEX"`
+ IssueID int64 `xorm:"INDEX"`
+ LabelID int64
+ OldMilestoneID int64
+ MilestoneID int64
+ OldAssigneeID int64
+ AssigneeID int64
+ OldTitle string
+ NewTitle string
+ DependentIssueID int64
+
+ CommitID int64
+ Line int64
+ Content string `xorm:"TEXT"`
+
+ CreatedUnix TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix TimeStamp `xorm:"INDEX updated"`
+
+ // Reference issue in commit message
+ CommitSHA string `xorm:"VARCHAR(40)"`
+ }
+
+ if err = x.Sync(new(Comment)); err != nil {
+ return fmt.Errorf("Error updating issue_comment table column definition: %w", err)
+ }
+
+ // RepoUnit describes all units of a repository
+ type RepoUnit struct {
+ ID int64
+ RepoID int64 `xorm:"INDEX(s)"`
+ Type int `xorm:"INDEX(s)"`
+ Config map[string]any `xorm:"JSON"`
+ CreatedUnix int64 `xorm:"INDEX CREATED"`
+ Created time.Time `xorm:"-"`
+ }
+
+ // Updating existing issue units
+ units := make([]*RepoUnit, 0, 100)
+ err = x.Where("`type` = ?", v16UnitTypeIssues).Find(&units)
+ if err != nil {
+ return fmt.Errorf("Query repo units: %w", err)
+ }
+ for _, unit := range units {
+ if unit.Config == nil {
+ unit.Config = make(map[string]any)
+ }
+ if _, ok := unit.Config["EnableDependencies"]; !ok {
+ unit.Config["EnableDependencies"] = setting.Service.DefaultEnableDependencies
+ }
+ if _, err := x.ID(unit.ID).Cols("config").Update(unit); err != nil {
+ return err
+ }
+ }
+
+ return err
+}
diff --git a/models/migrations/v1_6/v71.go b/models/migrations/v1_6/v71.go
new file mode 100644
index 0000000..5861872
--- /dev/null
+++ b/models/migrations/v1_6/v71.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_6 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+)
+
+func AddScratchHash(x *xorm.Engine) error {
+ // TwoFactor see models/twofactor.go
+ type TwoFactor struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"UNIQUE"`
+ Secret string
+ ScratchToken string
+ ScratchSalt string
+ ScratchHash string
+ LastUsedPasscode string `xorm:"VARCHAR(10)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ if err := x.Sync(new(TwoFactor)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ // transform all tokens to hashes
+ const batchSize = 100
+ for start := 0; ; start += batchSize {
+ tfas := make([]*TwoFactor, 0, batchSize)
+ if err := sess.Limit(batchSize, start).Find(&tfas); err != nil {
+ return err
+ }
+ if len(tfas) == 0 {
+ break
+ }
+
+ for _, tfa := range tfas {
+ // generate salt
+ salt, err := util.CryptoRandomString(10)
+ if err != nil {
+ return err
+ }
+ tfa.ScratchSalt = salt
+ tfa.ScratchHash = base.HashToken(tfa.ScratchToken, salt)
+
+ if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil {
+ return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err)
+ }
+ }
+ }
+
+ // Commit and begin new transaction for dropping columns
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := base.DropTableColumns(sess, "two_factor", "scratch_token"); err != nil {
+ return err
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_6/v72.go b/models/migrations/v1_6/v72.go
new file mode 100644
index 0000000..04cef9a
--- /dev/null
+++ b/models/migrations/v1_6/v72.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_6 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddReview(x *xorm.Engine) error {
+ // Review see models/review.go
+ type Review struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type string
+ ReviewerID int64 `xorm:"index"`
+ IssueID int64 `xorm:"index"`
+ Content string
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ }
+
+ if err := x.Sync(new(Review)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return nil
+}
diff --git a/models/migrations/v1_7/v73.go b/models/migrations/v1_7/v73.go
new file mode 100644
index 0000000..b5a748a
--- /dev/null
+++ b/models/migrations/v1_7/v73.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_7 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddMustChangePassword(x *xorm.Engine) error {
+ // User see models/user.go
+ type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ MustChangePassword bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(User))
+}
diff --git a/models/migrations/v1_7/v74.go b/models/migrations/v1_7/v74.go
new file mode 100644
index 0000000..f0567e3
--- /dev/null
+++ b/models/migrations/v1_7/v74.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_7 //nolint
+
+import "xorm.io/xorm"
+
+func AddApprovalWhitelistsToProtectedBranches(x *xorm.Engine) error {
+ type ProtectedBranch struct {
+ ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"`
+ }
+ return x.Sync(new(ProtectedBranch))
+}
diff --git a/models/migrations/v1_7/v75.go b/models/migrations/v1_7/v75.go
new file mode 100644
index 0000000..fa74309
--- /dev/null
+++ b/models/migrations/v1_7/v75.go
@@ -0,0 +1,32 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_7 //nolint
+
+import (
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func ClearNonusedData(x *xorm.Engine) error {
+ condDelete := func(colName string) builder.Cond {
+ return builder.NotIn(colName, builder.Select("id").From("`user`"))
+ }
+
+ if _, err := x.Exec(builder.Delete(condDelete("uid")).From("team_user")); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec(builder.Delete(condDelete("user_id")).From("collaboration")); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec(builder.Delete(condDelete("user_id")).From("stopwatch")); err != nil {
+ return err
+ }
+
+ if _, err := x.Exec(builder.Delete(condDelete("owner_id")).From("gpg_key")); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/models/migrations/v1_8/v76.go b/models/migrations/v1_8/v76.go
new file mode 100644
index 0000000..d3fbd94
--- /dev/null
+++ b/models/migrations/v1_8/v76.go
@@ -0,0 +1,74 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_8 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddPullRequestRebaseWithMerge(x *xorm.Engine) error {
+ // RepoUnit describes all units of a repository
+ type RepoUnit struct {
+ ID int64
+ RepoID int64 `xorm:"INDEX(s)"`
+ Type int `xorm:"INDEX(s)"`
+ Config map[string]any `xorm:"JSON"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+ }
+
+ const (
+ v16UnitTypeCode = iota + 1 // 1 code
+ v16UnitTypeIssues // 2 issues
+ v16UnitTypePRs // 3 PRs
+ v16UnitTypeCommits // 4 Commits
+ v16UnitTypeReleases // 5 Releases
+ v16UnitTypeWiki // 6 Wiki
+ v16UnitTypeSettings // 7 Settings
+ v16UnitTypeExternalWiki // 8 ExternalWiki
+ v16UnitTypeExternalTracker // 9 ExternalTracker
+ )
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ // Updating existing issue units
+ units := make([]*RepoUnit, 0, 100)
+ if err := sess.Where("`type` = ?", v16UnitTypePRs).Find(&units); err != nil {
+ return fmt.Errorf("Query repo units: %w", err)
+ }
+ for _, unit := range units {
+ if unit.Config == nil {
+ unit.Config = make(map[string]any)
+ }
+ // Allow the new merge style if all other merge styles are allowed
+ allowMergeRebase := true
+
+ if allowMerge, ok := unit.Config["AllowMerge"]; ok {
+ allowMergeRebase = allowMergeRebase && allowMerge.(bool)
+ }
+
+ if allowRebase, ok := unit.Config["AllowRebase"]; ok {
+ allowMergeRebase = allowMergeRebase && allowRebase.(bool)
+ }
+
+ if allowSquash, ok := unit.Config["AllowSquash"]; ok {
+ allowMergeRebase = allowMergeRebase && allowSquash.(bool)
+ }
+
+ if _, ok := unit.Config["AllowRebaseMerge"]; !ok {
+ unit.Config["AllowRebaseMerge"] = allowMergeRebase
+ }
+ if _, err := sess.ID(unit.ID).Cols("config").Update(unit); err != nil {
+ return err
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_8/v77.go b/models/migrations/v1_8/v77.go
new file mode 100644
index 0000000..8b19993
--- /dev/null
+++ b/models/migrations/v1_8/v77.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_8 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddUserDefaultTheme(x *xorm.Engine) error {
+ type User struct {
+ Theme string `xorm:"VARCHAR(30) NOT NULL DEFAULT ''"`
+ }
+
+ return x.Sync(new(User))
+}
diff --git a/models/migrations/v1_8/v78.go b/models/migrations/v1_8/v78.go
new file mode 100644
index 0000000..8f041c1
--- /dev/null
+++ b/models/migrations/v1_8/v78.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_8 //nolint
+
+import (
+ "code.gitea.io/gitea/models/migrations/base"
+
+ "xorm.io/xorm"
+)
+
+func RenameRepoIsBareToIsEmpty(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ IsBare bool
+ IsEmpty bool `xorm:"INDEX"`
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(Repository)); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE repository SET is_empty = is_bare;"); err != nil {
+ return err
+ }
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := base.DropTableColumns(sess, "repository", "is_bare"); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_8/v79.go b/models/migrations/v1_8/v79.go
new file mode 100644
index 0000000..eb3a9ed
--- /dev/null
+++ b/models/migrations/v1_8/v79.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_8 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func AddCanCloseIssuesViaCommitInAnyBranch(x *xorm.Engine) error {
+ type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ if err := x.Sync(new(Repository)); err != nil {
+ return err
+ }
+
+ _, err := x.Exec("UPDATE repository SET close_issues_via_commit_in_any_branch = ?",
+ setting.Repository.DefaultCloseIssuesViaCommitsInAnyBranch)
+ return err
+}
diff --git a/models/migrations/v1_8/v80.go b/models/migrations/v1_8/v80.go
new file mode 100644
index 0000000..cebbbea
--- /dev/null
+++ b/models/migrations/v1_8/v80.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_8 //nolint
+
+import "xorm.io/xorm"
+
+func AddIsLockedToIssues(x *xorm.Engine) error {
+ // Issue see models/issue.go
+ type Issue struct {
+ ID int64 `xorm:"pk autoincr"`
+ IsLocked bool `xorm:"NOT NULL DEFAULT false"`
+ }
+
+ return x.Sync(new(Issue))
+}
diff --git a/models/migrations/v1_8/v81.go b/models/migrations/v1_8/v81.go
new file mode 100644
index 0000000..734fc24
--- /dev/null
+++ b/models/migrations/v1_8/v81.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_8 //nolint
+
+import (
+ "fmt"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+func ChangeU2FCounterType(x *xorm.Engine) error {
+ var err error
+
+ switch x.Dialect().URI().DBType {
+ case schemas.MYSQL:
+ _, err = x.Exec("ALTER TABLE `u2f_registration` MODIFY `counter` BIGINT")
+ case schemas.POSTGRES:
+ _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` SET DATA TYPE bigint")
+ }
+
+ if err != nil {
+ return fmt.Errorf("Error changing u2f_registration counter column type: %w", err)
+ }
+
+ return nil
+}
diff --git a/models/migrations/v1_9/v82.go b/models/migrations/v1_9/v82.go
new file mode 100644
index 0000000..26806dd
--- /dev/null
+++ b/models/migrations/v1_9/v82.go
@@ -0,0 +1,133 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_9 //nolint
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/xorm"
+)
+
+func FixReleaseSha1OnReleaseTable(x *xorm.Engine) error {
+ type Release struct {
+ ID int64
+ RepoID int64
+ Sha1 string
+ TagName string
+ }
+
+ type Repository struct {
+ ID int64
+ OwnerID int64
+ Name string
+ }
+
+ type User struct {
+ ID int64
+ Name string
+ }
+
+ // UserPath returns the path absolute path of user repositories.
+ UserPath := func(userName string) string {
+ return filepath.Join(setting.RepoRootPath, strings.ToLower(userName))
+ }
+
+ // RepoPath returns repository path by given user and repository name.
+ RepoPath := func(userName, repoName string) string {
+ return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
+ }
+
+ // Update release sha1
+ const batchSize = 100
+ sess := x.NewSession()
+ defer sess.Close()
+
+ var (
+ err error
+ count int
+ gitRepoCache = make(map[int64]*git.Repository)
+ repoCache = make(map[int64]*Repository)
+ userCache = make(map[int64]*User)
+ )
+
+ if err = sess.Begin(); err != nil {
+ return err
+ }
+
+ for start := 0; ; start += batchSize {
+ releases := make([]*Release, 0, batchSize)
+ if err = sess.Limit(batchSize, start).Asc("id").Where("is_tag=?", false).Find(&releases); err != nil {
+ return err
+ }
+ if len(releases) == 0 {
+ break
+ }
+
+ for _, release := range releases {
+ gitRepo, ok := gitRepoCache[release.RepoID]
+ if !ok {
+ repo, ok := repoCache[release.RepoID]
+ if !ok {
+ repo = new(Repository)
+ has, err := sess.ID(release.RepoID).Get(repo)
+ if err != nil {
+ return err
+ } else if !has {
+ return fmt.Errorf("Repository %d is not exist", release.RepoID)
+ }
+
+ repoCache[release.RepoID] = repo
+ }
+
+ user, ok := userCache[repo.OwnerID]
+ if !ok {
+ user = new(User)
+ has, err := sess.ID(repo.OwnerID).Get(user)
+ if err != nil {
+ return err
+ } else if !has {
+ return fmt.Errorf("User %d is not exist", repo.OwnerID)
+ }
+
+ userCache[repo.OwnerID] = user
+ }
+
+ gitRepo, err = git.OpenRepository(git.DefaultContext, RepoPath(user.Name, repo.Name))
+ if err != nil {
+ return err
+ }
+ defer gitRepo.Close()
+ gitRepoCache[release.RepoID] = gitRepo
+ }
+
+ release.Sha1, err = gitRepo.GetTagCommitID(release.TagName)
+ if err != nil && !git.IsErrNotExist(err) {
+ return err
+ }
+
+ if err == nil {
+ if _, err = sess.ID(release.ID).Cols("sha1").Update(release); err != nil {
+ return err
+ }
+ }
+
+ count++
+ if count >= 1000 {
+ if err = sess.Commit(); err != nil {
+ return err
+ }
+ if err = sess.Begin(); err != nil {
+ return err
+ }
+ count = 0
+ }
+ }
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_9/v83.go b/models/migrations/v1_9/v83.go
new file mode 100644
index 0000000..10e6c45
--- /dev/null
+++ b/models/migrations/v1_9/v83.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_9 //nolint
+
+import (
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/xorm"
+)
+
+func AddUploaderIDForAttachment(x *xorm.Engine) error {
+ type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ IssueID int64 `xorm:"INDEX"`
+ ReleaseID int64 `xorm:"INDEX"`
+ UploaderID int64 `xorm:"INDEX DEFAULT 0"`
+ CommentID int64
+ Name string
+ DownloadCount int64 `xorm:"DEFAULT 0"`
+ Size int64 `xorm:"DEFAULT 0"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ }
+
+ return x.Sync(new(Attachment))
+}
diff --git a/models/migrations/v1_9/v84.go b/models/migrations/v1_9/v84.go
new file mode 100644
index 0000000..c7155fe
--- /dev/null
+++ b/models/migrations/v1_9/v84.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_9 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddGPGKeyImport(x *xorm.Engine) error {
+ type GPGKeyImport struct {
+ KeyID string `xorm:"pk CHAR(16) NOT NULL"`
+ Content string `xorm:"TEXT NOT NULL"`
+ }
+
+ return x.Sync(new(GPGKeyImport))
+}
diff --git a/models/migrations/v1_9/v85.go b/models/migrations/v1_9/v85.go
new file mode 100644
index 0000000..a23d7c5
--- /dev/null
+++ b/models/migrations/v1_9/v85.go
@@ -0,0 +1,118 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_9 //nolint
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/models/migrations/base"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+)
+
+func HashAppToken(x *xorm.Engine) error {
+ // AccessToken see models/token.go
+ type AccessToken struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX"`
+ Name string
+ Sha1 string
+ Token string `xorm:"-"`
+ TokenHash string // sha256 of token - we will ensure UNIQUE later
+ TokenSalt string
+ TokenLastEight string `xorm:"token_last_eight"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ HasRecentActivity bool `xorm:"-"`
+ HasUsed bool `xorm:"-"`
+ }
+
+ // First remove the index
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := sess.Sync(new(AccessToken)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ // transform all tokens to hashes
+ const batchSize = 100
+ for start := 0; ; start += batchSize {
+ tokens := make([]*AccessToken, 0, batchSize)
+ if err := sess.Limit(batchSize, start).Find(&tokens); err != nil {
+ return err
+ }
+ if len(tokens) == 0 {
+ break
+ }
+
+ for _, token := range tokens {
+ // generate salt
+ salt, err := util.CryptoRandomString(10)
+ if err != nil {
+ return err
+ }
+ token.TokenSalt = salt
+ token.TokenHash = base.HashToken(token.Sha1, salt)
+ if len(token.Sha1) < 8 {
+ log.Warn("Unable to transform token %s with name %s belonging to user ID %d, skipping transformation", token.Sha1, token.Name, token.UID)
+ continue
+ }
+ token.TokenLastEight = token.Sha1[len(token.Sha1)-8:]
+ token.Sha1 = "" // ensure to blank out column in case drop column doesn't work
+
+ if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil {
+ return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err)
+ }
+ }
+ }
+
+ // Commit and begin new transaction for dropping columns
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if err := base.DropTableColumns(sess, "access_token", "sha1"); err != nil {
+ return err
+ }
+ if err := sess.Commit(); err != nil {
+ return err
+ }
+ return resyncHashAppTokenWithUniqueHash(x)
+}
+
+func resyncHashAppTokenWithUniqueHash(x *xorm.Engine) error {
+ // AccessToken see models/token.go
+ type AccessToken struct {
+ TokenHash string `xorm:"UNIQUE"` // sha256 of token - we will ensure UNIQUE later
+ }
+ sess := x.NewSession()
+ defer sess.Close()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ if err := sess.Sync(new(AccessToken)); err != nil {
+ return fmt.Errorf("Sync: %w", err)
+ }
+ return sess.Commit()
+}
diff --git a/models/migrations/v1_9/v86.go b/models/migrations/v1_9/v86.go
new file mode 100644
index 0000000..cf2725d
--- /dev/null
+++ b/models/migrations/v1_9/v86.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_9 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddHTTPMethodToWebhook(x *xorm.Engine) error {
+ type Webhook struct {
+ HTTPMethod string `xorm:"http_method DEFAULT 'POST'"`
+ }
+
+ return x.Sync(new(Webhook))
+}
diff --git a/models/migrations/v1_9/v87.go b/models/migrations/v1_9/v87.go
new file mode 100644
index 0000000..fa01b6e
--- /dev/null
+++ b/models/migrations/v1_9/v87.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1_9 //nolint
+
+import (
+ "xorm.io/xorm"
+)
+
+func AddAvatarFieldToRepository(x *xorm.Engine) error {
+ type Repository struct {
+ // ID(10-20)-md5(32) - must fit into 64 symbols
+ Avatar string `xorm:"VARCHAR(64)"`
+ }
+
+ return x.Sync(new(Repository))
+}
diff --git a/models/org.go b/models/org.go
new file mode 100644
index 0000000..5f61f05
--- /dev/null
+++ b/models/org.go
@@ -0,0 +1,102 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+)
+
+// RemoveOrgUser removes user from given organization.
+func RemoveOrgUser(ctx context.Context, orgID, userID int64) error {
+ ou := new(organization.OrgUser)
+
+ has, err := db.GetEngine(ctx).
+ Where("uid=?", userID).
+ And("org_id=?", orgID).
+ Get(ou)
+ if err != nil {
+ return fmt.Errorf("get org-user: %w", err)
+ } else if !has {
+ return nil
+ }
+
+ org, err := organization.GetOrgByID(ctx, orgID)
+ if err != nil {
+ return fmt.Errorf("GetUserByID [%d]: %w", orgID, err)
+ }
+
+ // Check if the user to delete is the last member in owner team.
+ if isOwner, err := organization.IsOrganizationOwner(ctx, orgID, userID); err != nil {
+ return err
+ } else if isOwner {
+ t, err := organization.GetOwnerTeam(ctx, org.ID)
+ if err != nil {
+ return err
+ }
+ if t.NumMembers == 1 {
+ if err := t.LoadMembers(ctx); err != nil {
+ return err
+ }
+ if t.Members[0].ID == userID {
+ return organization.ErrLastOrgOwner{UID: userID}
+ }
+ }
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err := db.DeleteByID[organization.OrgUser](ctx, ou.ID); err != nil {
+ return err
+ } else if _, err = db.Exec(ctx, "UPDATE `user` SET num_members=num_members-1 WHERE id=?", orgID); err != nil {
+ return err
+ }
+
+ // Delete all repository accesses and unwatch them.
+ env, err := organization.AccessibleReposEnv(ctx, org, userID)
+ if err != nil {
+ return fmt.Errorf("AccessibleReposEnv: %w", err)
+ }
+ repoIDs, err := env.RepoIDs(1, org.NumRepos)
+ if err != nil {
+ return fmt.Errorf("GetUserRepositories [%d]: %w", userID, err)
+ }
+ for _, repoID := range repoIDs {
+ if err = repo_model.WatchRepo(ctx, userID, repoID, false); err != nil {
+ return err
+ }
+ }
+
+ if len(repoIDs) > 0 {
+ if _, err = db.GetEngine(ctx).
+ Where("user_id = ?", userID).
+ In("repo_id", repoIDs).
+ Delete(new(access_model.Access)); err != nil {
+ return err
+ }
+ }
+
+ // Delete member in their teams.
+ teams, err := organization.GetUserOrgTeams(ctx, org.ID, userID)
+ if err != nil {
+ return err
+ }
+ for _, t := range teams {
+ if err = removeTeamMember(ctx, t, userID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
diff --git a/models/org_team.go b/models/org_team.go
new file mode 100644
index 0000000..1a45243
--- /dev/null
+++ b/models/org_team.go
@@ -0,0 +1,551 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+func AddRepository(ctx context.Context, t *organization.Team, repo *repo_model.Repository) (err error) {
+ if err = organization.AddTeamRepo(ctx, t.OrgID, t.ID, repo.ID); err != nil {
+ return err
+ }
+
+ if err = organization.IncrTeamRepoNum(ctx, t.ID); err != nil {
+ return fmt.Errorf("update team: %w", err)
+ }
+
+ t.NumRepos++
+
+ if err = access_model.RecalculateTeamAccesses(ctx, repo, 0); err != nil {
+ return fmt.Errorf("recalculateAccesses: %w", err)
+ }
+
+ // Make all team members watch this repo if enabled in global settings
+ if setting.Service.AutoWatchNewRepos {
+ if err = t.LoadMembers(ctx); err != nil {
+ return fmt.Errorf("getMembers: %w", err)
+ }
+ for _, u := range t.Members {
+ if err = repo_model.WatchRepo(ctx, u.ID, repo.ID, true); err != nil {
+ return fmt.Errorf("watchRepo: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// addAllRepositories adds all repositories to the team.
+// If the team already has some repositories they will be left unchanged.
+func addAllRepositories(ctx context.Context, t *organization.Team) error {
+ orgRepos, err := organization.GetOrgRepositories(ctx, t.OrgID)
+ if err != nil {
+ return fmt.Errorf("get org repos: %w", err)
+ }
+
+ for _, repo := range orgRepos {
+ if !organization.HasTeamRepo(ctx, t.OrgID, t.ID, repo.ID) {
+ if err := AddRepository(ctx, t, repo); err != nil {
+ return fmt.Errorf("AddRepository: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// AddAllRepositories adds all repositories to the team
+func AddAllRepositories(ctx context.Context, t *organization.Team) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = addAllRepositories(ctx, t); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// RemoveAllRepositories removes all repositories from team and recalculates access
+func RemoveAllRepositories(ctx context.Context, t *organization.Team) (err error) {
+ if t.IncludesAllRepositories {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = removeAllRepositories(ctx, t); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// removeAllRepositories removes all repositories from team and recalculates access
+// Note: Shall not be called if team includes all repositories
+func removeAllRepositories(ctx context.Context, t *organization.Team) (err error) {
+ e := db.GetEngine(ctx)
+ // Delete all accesses.
+ for _, repo := range t.Repos {
+ if err := access_model.RecalculateTeamAccesses(ctx, repo, t.ID); err != nil {
+ return err
+ }
+
+ // Remove watches from all users and now unaccessible repos
+ for _, user := range t.Members {
+ has, err := access_model.HasAccess(ctx, user.ID, repo)
+ if err != nil {
+ return err
+ } else if has {
+ continue
+ }
+
+ if err = repo_model.WatchRepo(ctx, user.ID, repo.ID, false); err != nil {
+ return err
+ }
+
+ // Remove all IssueWatches a user has subscribed to in the repositories
+ if err = issues_model.RemoveIssueWatchersByRepoID(ctx, user.ID, repo.ID); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Delete team-repo
+ if _, err := e.
+ Where("team_id=?", t.ID).
+ Delete(new(organization.TeamRepo)); err != nil {
+ return err
+ }
+
+ t.NumRepos = 0
+ if _, err = e.ID(t.ID).Cols("num_repos").Update(t); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// NewTeam creates a record of new team.
+// It's caller's responsibility to assign organization ID.
+func NewTeam(ctx context.Context, t *organization.Team) (err error) {
+ if len(t.Name) == 0 {
+ return util.NewInvalidArgumentErrorf("empty team name")
+ }
+
+ if err = organization.IsUsableTeamName(t.Name); err != nil {
+ return err
+ }
+
+ has, err := db.ExistByID[user_model.User](ctx, t.OrgID)
+ if err != nil {
+ return err
+ }
+ if !has {
+ return organization.ErrOrgNotExist{ID: t.OrgID}
+ }
+
+ t.LowerName = strings.ToLower(t.Name)
+ has, err = db.Exist[organization.Team](ctx, builder.Eq{
+ "org_id": t.OrgID,
+ "lower_name": t.LowerName,
+ })
+ if err != nil {
+ return err
+ }
+ if has {
+ return organization.ErrTeamAlreadyExist{OrgID: t.OrgID, Name: t.LowerName}
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = db.Insert(ctx, t); err != nil {
+ return err
+ }
+
+ // insert units for team
+ if len(t.Units) > 0 {
+ for _, unit := range t.Units {
+ unit.TeamID = t.ID
+ }
+ if err = db.Insert(ctx, &t.Units); err != nil {
+ return err
+ }
+ }
+
+ // Add all repositories to the team if it has access to all of them.
+ if t.IncludesAllRepositories {
+ err = addAllRepositories(ctx, t)
+ if err != nil {
+ return fmt.Errorf("addAllRepositories: %w", err)
+ }
+ }
+
+ // Update organization number of teams.
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_teams=num_teams+1 WHERE id = ?", t.OrgID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+// UpdateTeam updates information of team.
+func UpdateTeam(ctx context.Context, t *organization.Team, authChanged, includeAllChanged bool) (err error) {
+ if len(t.Name) == 0 {
+ return util.NewInvalidArgumentErrorf("empty team name")
+ }
+
+ if len(t.Description) > 255 {
+ t.Description = t.Description[:255]
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ t.LowerName = strings.ToLower(t.Name)
+ has, err := db.Exist[organization.Team](ctx, builder.Eq{
+ "org_id": t.OrgID,
+ "lower_name": t.LowerName,
+ }.And(builder.Neq{"id": t.ID}),
+ )
+ if err != nil {
+ return err
+ } else if has {
+ return organization.ErrTeamAlreadyExist{OrgID: t.OrgID, Name: t.LowerName}
+ }
+
+ sess := db.GetEngine(ctx)
+ if _, err = sess.ID(t.ID).Cols("name", "lower_name", "description",
+ "can_create_org_repo", "authorize", "includes_all_repositories").Update(t); err != nil {
+ return fmt.Errorf("update: %w", err)
+ }
+
+ // update units for team
+ if len(t.Units) > 0 {
+ for _, unit := range t.Units {
+ unit.TeamID = t.ID
+ }
+ // Delete team-unit.
+ if _, err := sess.
+ Where("team_id=?", t.ID).
+ Delete(new(organization.TeamUnit)); err != nil {
+ return err
+ }
+ if _, err = sess.Cols("org_id", "team_id", "type", "access_mode").Insert(&t.Units); err != nil {
+ return err
+ }
+ }
+
+ // Update access for team members if needed.
+ if authChanged {
+ if err = t.LoadRepositories(ctx); err != nil {
+ return fmt.Errorf("LoadRepositories: %w", err)
+ }
+
+ for _, repo := range t.Repos {
+ if err = access_model.RecalculateTeamAccesses(ctx, repo, 0); err != nil {
+ return fmt.Errorf("recalculateTeamAccesses: %w", err)
+ }
+ }
+ }
+
+ // Add all repositories to the team if it has access to all of them.
+ if includeAllChanged && t.IncludesAllRepositories {
+ err = addAllRepositories(ctx, t)
+ if err != nil {
+ return fmt.Errorf("addAllRepositories: %w", err)
+ }
+ }
+
+ return committer.Commit()
+}
+
+// DeleteTeam deletes given team.
+// It's caller's responsibility to assign organization ID.
+func DeleteTeam(ctx context.Context, t *organization.Team) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := t.LoadRepositories(ctx); err != nil {
+ return err
+ }
+
+ if err := t.LoadMembers(ctx); err != nil {
+ return err
+ }
+
+ // update branch protections
+ {
+ protections := make([]*git_model.ProtectedBranch, 0, 10)
+ err := db.GetEngine(ctx).In("repo_id",
+ builder.Select("id").From("repository").Where(builder.Eq{"owner_id": t.OrgID})).
+ Find(&protections)
+ if err != nil {
+ return fmt.Errorf("findProtectedBranches: %w", err)
+ }
+ for _, p := range protections {
+ if err := git_model.RemoveTeamIDFromProtectedBranch(ctx, p, t.ID); err != nil {
+ return err
+ }
+ }
+ }
+
+ if !t.IncludesAllRepositories {
+ if err := removeAllRepositories(ctx, t); err != nil {
+ return err
+ }
+ }
+
+ if err := db.DeleteBeans(ctx,
+ &organization.Team{ID: t.ID},
+ &organization.TeamUser{OrgID: t.OrgID, TeamID: t.ID},
+ &organization.TeamUnit{TeamID: t.ID},
+ &organization.TeamInvite{TeamID: t.ID},
+ &issues_model.Review{Type: issues_model.ReviewTypeRequest, ReviewerTeamID: t.ID}, // batch delete the binding relationship between team and PR (request review from team)
+ ); err != nil {
+ return err
+ }
+
+ for _, tm := range t.Members {
+ if err := removeInvalidOrgUser(ctx, tm.ID, t.OrgID); err != nil {
+ return err
+ }
+ }
+
+ // Update organization number of teams.
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_teams=num_teams-1 WHERE id=?", t.OrgID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// AddTeamMember adds new membership of given team to given organization,
+// the user will have membership to given organization automatically when needed.
+func AddTeamMember(ctx context.Context, team *organization.Team, userID int64) error {
+ isAlreadyMember, err := organization.IsTeamMember(ctx, team.OrgID, team.ID, userID)
+ if err != nil || isAlreadyMember {
+ return err
+ }
+
+ if err := organization.AddOrgUser(ctx, team.OrgID, userID); err != nil {
+ return err
+ }
+
+ err = db.WithTx(ctx, func(ctx context.Context) error {
+ // check in transaction
+ isAlreadyMember, err = organization.IsTeamMember(ctx, team.OrgID, team.ID, userID)
+ if err != nil || isAlreadyMember {
+ return err
+ }
+
+ sess := db.GetEngine(ctx)
+
+ if err := db.Insert(ctx, &organization.TeamUser{
+ UID: userID,
+ OrgID: team.OrgID,
+ TeamID: team.ID,
+ }); err != nil {
+ return err
+ } else if _, err := sess.Incr("num_members").ID(team.ID).Update(new(organization.Team)); err != nil {
+ return err
+ }
+
+ team.NumMembers++
+
+ // Give access to team repositories.
+ // update exist access if mode become bigger
+ subQuery := builder.Select("repo_id").From("team_repo").
+ Where(builder.Eq{"team_id": team.ID})
+
+ if _, err := sess.Where("user_id=?", userID).
+ In("repo_id", subQuery).
+ And("mode < ?", team.AccessMode).
+ SetExpr("mode", team.AccessMode).
+ Update(new(access_model.Access)); err != nil {
+ return fmt.Errorf("update user accesses: %w", err)
+ }
+
+ // for not exist access
+ var repoIDs []int64
+ accessSubQuery := builder.Select("repo_id").From("access").Where(builder.Eq{"user_id": userID})
+ if err := sess.SQL(subQuery.And(builder.NotIn("repo_id", accessSubQuery))).Find(&repoIDs); err != nil {
+ return fmt.Errorf("select id accesses: %w", err)
+ }
+
+ accesses := make([]*access_model.Access, 0, 100)
+ for i, repoID := range repoIDs {
+ accesses = append(accesses, &access_model.Access{RepoID: repoID, UserID: userID, Mode: team.AccessMode})
+ if (i%100 == 0 || i == len(repoIDs)-1) && len(accesses) > 0 {
+ if err = db.Insert(ctx, accesses); err != nil {
+ return fmt.Errorf("insert new user accesses: %w", err)
+ }
+ accesses = accesses[:0]
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // this behaviour may spend much time so run it in a goroutine
+ // FIXME: Update watch repos batchly
+ if setting.Service.AutoWatchNewRepos {
+ // Get team and its repositories.
+ if err := team.LoadRepositories(ctx); err != nil {
+ log.Error("team.LoadRepositories failed: %v", err)
+ }
+ // FIXME: in the goroutine, it can't access the "ctx", it could only use db.DefaultContext at the moment
+ go func(repos []*repo_model.Repository) {
+ for _, repo := range repos {
+ if err = repo_model.WatchRepo(db.DefaultContext, userID, repo.ID, true); err != nil {
+ log.Error("watch repo failed: %v", err)
+ }
+ }
+ }(team.Repos)
+ }
+
+ return nil
+}
+
+func removeTeamMember(ctx context.Context, team *organization.Team, userID int64) error {
+ e := db.GetEngine(ctx)
+ isMember, err := organization.IsTeamMember(ctx, team.OrgID, team.ID, userID)
+ if err != nil || !isMember {
+ return err
+ }
+
+ // Check if the user to delete is the last member in owner team.
+ if team.IsOwnerTeam() && team.NumMembers == 1 {
+ return organization.ErrLastOrgOwner{UID: userID}
+ }
+
+ team.NumMembers--
+
+ if err := team.LoadRepositories(ctx); err != nil {
+ return err
+ }
+
+ if _, err := e.Delete(&organization.TeamUser{
+ UID: userID,
+ OrgID: team.OrgID,
+ TeamID: team.ID,
+ }); err != nil {
+ return err
+ } else if _, err = e.
+ ID(team.ID).
+ Cols("num_members").
+ Update(team); err != nil {
+ return err
+ }
+
+ // Delete access to team repositories.
+ for _, repo := range team.Repos {
+ if err := access_model.RecalculateUserAccess(ctx, repo, userID); err != nil {
+ return err
+ }
+
+ // Remove watches from now unaccessible
+ if err := ReconsiderWatches(ctx, repo, userID); err != nil {
+ return err
+ }
+
+ // Remove issue assignments from now unaccessible
+ if err := ReconsiderRepoIssuesAssignee(ctx, repo, userID); err != nil {
+ return err
+ }
+ }
+
+ return removeInvalidOrgUser(ctx, userID, team.OrgID)
+}
+
+func removeInvalidOrgUser(ctx context.Context, userID, orgID int64) error {
+ // Check if the user is a member of any team in the organization.
+ if count, err := db.GetEngine(ctx).Count(&organization.TeamUser{
+ UID: userID,
+ OrgID: orgID,
+ }); err != nil {
+ return err
+ } else if count == 0 {
+ return RemoveOrgUser(ctx, orgID, userID)
+ }
+ return nil
+}
+
+// RemoveTeamMember removes member from given team of given organization.
+func RemoveTeamMember(ctx context.Context, team *organization.Team, userID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ if err := removeTeamMember(ctx, team, userID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func ReconsiderRepoIssuesAssignee(ctx context.Context, repo *repo_model.Repository, uid int64) error {
+ user, err := user_model.GetUserByID(ctx, uid)
+ if err != nil {
+ return err
+ }
+
+ if canAssigned, err := access_model.CanBeAssigned(ctx, user, repo, true); err != nil || canAssigned {
+ return err
+ }
+
+ if _, err := db.GetEngine(ctx).Where(builder.Eq{"assignee_id": uid}).
+ In("issue_id", builder.Select("id").From("issue").Where(builder.Eq{"repo_id": repo.ID})).
+ Delete(&issues_model.IssueAssignees{}); err != nil {
+ return fmt.Errorf("Could not delete assignee[%d] %w", uid, err)
+ }
+ return nil
+}
+
+func ReconsiderWatches(ctx context.Context, repo *repo_model.Repository, uid int64) error {
+ if has, err := access_model.HasAccess(ctx, uid, repo); err != nil || has {
+ return err
+ }
+ if err := repo_model.WatchRepo(ctx, uid, repo.ID, false); err != nil {
+ return err
+ }
+
+ // Remove all IssueWatches a user has subscribed to in the repository
+ return issues_model.RemoveIssueWatchersByRepoID(ctx, uid, repo.ID)
+}
diff --git a/models/org_team_test.go b/models/org_team_test.go
new file mode 100644
index 0000000..2819607
--- /dev/null
+++ b/models/org_team_test.go
@@ -0,0 +1,170 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTeam_AddMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamID, userID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ require.NoError(t, AddTeamMember(db.DefaultContext, team, userID))
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{UID: userID, TeamID: teamID})
+ unittest.CheckConsistencyFor(t, &organization.Team{ID: teamID}, &user_model.User{ID: team.OrgID})
+ }
+ test(1, 2)
+ test(1, 4)
+ test(3, 2)
+}
+
+func TestTeam_RemoveMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(teamID, userID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ require.NoError(t, RemoveTeamMember(db.DefaultContext, team, userID))
+ unittest.AssertNotExistsBean(t, &organization.TeamUser{UID: userID, TeamID: teamID})
+ unittest.CheckConsistencyFor(t, &organization.Team{ID: teamID})
+ }
+ testSuccess(1, 4)
+ testSuccess(2, 2)
+ testSuccess(3, 2)
+ testSuccess(3, unittest.NonexistentID)
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 1})
+ err := RemoveTeamMember(db.DefaultContext, team, 2)
+ assert.True(t, organization.IsErrLastOrgOwner(err))
+}
+
+func TestIsUsableTeamName(t *testing.T) {
+ require.NoError(t, organization.IsUsableTeamName("usable"))
+ assert.True(t, db.IsErrNameReserved(organization.IsUsableTeamName("new")))
+}
+
+func TestNewTeam(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ const teamName = "newTeamName"
+ team := &organization.Team{Name: teamName, OrgID: 3}
+ require.NoError(t, NewTeam(db.DefaultContext, team))
+ unittest.AssertExistsAndLoadBean(t, &organization.Team{Name: teamName})
+ unittest.CheckConsistencyFor(t, &organization.Team{}, &user_model.User{ID: team.OrgID})
+}
+
+func TestUpdateTeam(t *testing.T) {
+ // successful update
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 2})
+ team.LowerName = "newname"
+ team.Name = "newName"
+ team.Description = strings.Repeat("A long description!", 100)
+ team.AccessMode = perm.AccessModeAdmin
+ require.NoError(t, UpdateTeam(db.DefaultContext, team, true, false))
+
+ team = unittest.AssertExistsAndLoadBean(t, &organization.Team{Name: "newName"})
+ assert.True(t, strings.HasPrefix(team.Description, "A long description!"))
+
+ access := unittest.AssertExistsAndLoadBean(t, &access_model.Access{UserID: 4, RepoID: 3})
+ assert.EqualValues(t, perm.AccessModeAdmin, access.Mode)
+
+ unittest.CheckConsistencyFor(t, &organization.Team{ID: team.ID})
+}
+
+func TestUpdateTeam2(t *testing.T) {
+ // update to already-existing team
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 2})
+ team.LowerName = "owners"
+ team.Name = "Owners"
+ team.Description = strings.Repeat("A long description!", 100)
+ err := UpdateTeam(db.DefaultContext, team, true, false)
+ assert.True(t, organization.IsErrTeamAlreadyExist(err))
+
+ unittest.CheckConsistencyFor(t, &organization.Team{ID: team.ID})
+}
+
+func TestDeleteTeam(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 2})
+ require.NoError(t, DeleteTeam(db.DefaultContext, team))
+ unittest.AssertNotExistsBean(t, &organization.Team{ID: team.ID})
+ unittest.AssertNotExistsBean(t, &organization.TeamRepo{TeamID: team.ID})
+ unittest.AssertNotExistsBean(t, &organization.TeamUser{TeamID: team.ID})
+
+ // check that team members don't have "leftover" access to repos
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+ accessMode, err := access_model.AccessLevel(db.DefaultContext, user, repo)
+ require.NoError(t, err)
+ assert.Less(t, accessMode, perm.AccessModeWrite)
+}
+
+func TestAddTeamMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamID, userID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ require.NoError(t, AddTeamMember(db.DefaultContext, team, userID))
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{UID: userID, TeamID: teamID})
+ unittest.CheckConsistencyFor(t, &organization.Team{ID: teamID}, &user_model.User{ID: team.OrgID})
+ }
+ test(1, 2)
+ test(1, 4)
+ test(3, 2)
+}
+
+func TestRemoveTeamMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(teamID, userID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ require.NoError(t, RemoveTeamMember(db.DefaultContext, team, userID))
+ unittest.AssertNotExistsBean(t, &organization.TeamUser{UID: userID, TeamID: teamID})
+ unittest.CheckConsistencyFor(t, &organization.Team{ID: teamID})
+ }
+ testSuccess(1, 4)
+ testSuccess(2, 2)
+ testSuccess(3, 2)
+ testSuccess(3, unittest.NonexistentID)
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 1})
+ err := RemoveTeamMember(db.DefaultContext, team, 2)
+ assert.True(t, organization.IsErrLastOrgOwner(err))
+}
+
+func TestRepository_RecalculateAccesses3(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ team5 := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 5})
+ user29 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 29})
+
+ has, err := db.GetEngine(db.DefaultContext).Get(&access_model.Access{UserID: 29, RepoID: 23})
+ require.NoError(t, err)
+ assert.False(t, has)
+
+ // adding user29 to team5 should add an explicit access row for repo 23
+ // even though repo 23 is public
+ require.NoError(t, AddTeamMember(db.DefaultContext, team5, user29.ID))
+
+ has, err = db.GetEngine(db.DefaultContext).Get(&access_model.Access{UserID: 29, RepoID: 23})
+ require.NoError(t, err)
+ assert.True(t, has)
+}
diff --git a/models/org_test.go b/models/org_test.go
new file mode 100644
index 0000000..bb5e524
--- /dev/null
+++ b/models/org_test.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUser_RemoveMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+
+ // remove a user that is a member
+ unittest.AssertExistsAndLoadBean(t, &organization.OrgUser{UID: 4, OrgID: 3})
+ prevNumMembers := org.NumMembers
+ require.NoError(t, RemoveOrgUser(db.DefaultContext, org.ID, 4))
+ unittest.AssertNotExistsBean(t, &organization.OrgUser{UID: 4, OrgID: 3})
+ org = unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ assert.Equal(t, prevNumMembers-1, org.NumMembers)
+
+ // remove a user that is not a member
+ unittest.AssertNotExistsBean(t, &organization.OrgUser{UID: 5, OrgID: 3})
+ prevNumMembers = org.NumMembers
+ require.NoError(t, RemoveOrgUser(db.DefaultContext, org.ID, 5))
+ unittest.AssertNotExistsBean(t, &organization.OrgUser{UID: 5, OrgID: 3})
+ org = unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ assert.Equal(t, prevNumMembers, org.NumMembers)
+
+ unittest.CheckConsistencyFor(t, &user_model.User{}, &organization.Team{})
+}
+
+func TestRemoveOrgUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(orgID, userID int64) {
+ org := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: orgID})
+ expectedNumMembers := org.NumMembers
+ if unittest.BeanExists(t, &organization.OrgUser{OrgID: orgID, UID: userID}) {
+ expectedNumMembers--
+ }
+ require.NoError(t, RemoveOrgUser(db.DefaultContext, orgID, userID))
+ unittest.AssertNotExistsBean(t, &organization.OrgUser{OrgID: orgID, UID: userID})
+ org = unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: orgID})
+ assert.EqualValues(t, expectedNumMembers, org.NumMembers)
+ }
+ testSuccess(3, 4)
+ testSuccess(3, 4)
+
+ err := RemoveOrgUser(db.DefaultContext, 7, 5)
+ require.Error(t, err)
+ assert.True(t, organization.IsErrLastOrgOwner(err))
+ unittest.AssertExistsAndLoadBean(t, &organization.OrgUser{OrgID: 7, UID: 5})
+ unittest.CheckConsistencyFor(t, &user_model.User{}, &organization.Team{})
+}
diff --git a/models/organization/TestInconsistentOwnerTeam/team.yml b/models/organization/TestInconsistentOwnerTeam/team.yml
new file mode 100644
index 0000000..90e3ad4
--- /dev/null
+++ b/models/organization/TestInconsistentOwnerTeam/team.yml
@@ -0,0 +1,10 @@
+-
+ id: 1000
+ org_id: 1000
+ lower_name: owners
+ name: Owners
+ authorize: 4 # owner
+ num_repos: 0
+ num_members: 0
+ includes_all_repositories: true
+ can_create_org_repo: true
diff --git a/models/organization/TestInconsistentOwnerTeam/team_unit.yml b/models/organization/TestInconsistentOwnerTeam/team_unit.yml
new file mode 100644
index 0000000..91e03d6
--- /dev/null
+++ b/models/organization/TestInconsistentOwnerTeam/team_unit.yml
@@ -0,0 +1,59 @@
+-
+ id: 1000
+ team_id: 1000
+ type: 1
+ access_mode: 0 # None
+
+-
+ id: 1001
+ team_id: 1000
+ type: 2
+ access_mode: 0
+
+-
+ id: 1002
+ team_id: 1000
+ type: 3
+ access_mode: 0
+
+-
+ id: 1003
+ team_id: 1000
+ type: 4
+ access_mode: 0
+
+-
+ id: 1004
+ team_id: 1000
+ type: 5
+ access_mode: 0
+
+-
+ id: 1005
+ team_id: 1000
+ type: 6
+ access_mode: 0
+
+-
+ id: 1006
+ team_id: 1000
+ type: 7
+ access_mode: 0
+
+-
+ id: 1007
+ team_id: 1000
+ type: 8
+ access_mode: 0
+
+-
+ id: 1008
+ team_id: 1000
+ type: 9
+ access_mode: 0
+
+-
+ id: 1009
+ team_id: 1000
+ type: 10
+ access_mode: 0
diff --git a/models/organization/main_test.go b/models/organization/main_test.go
new file mode 100644
index 0000000..c35898a
--- /dev/null
+++ b/models/organization/main_test.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/organization"
+ _ "code.gitea.io/gitea/models/repo"
+ _ "code.gitea.io/gitea/models/user"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/organization/mini_org.go b/models/organization/mini_org.go
new file mode 100644
index 0000000..b1b2462
--- /dev/null
+++ b/models/organization/mini_org.go
@@ -0,0 +1,78 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "xorm.io/builder"
+)
+
+// MinimalOrg represents a simple organization with only the needed columns
+type MinimalOrg = Organization
+
+// GetUserOrgsList returns all organizations the given user has access to
+func GetUserOrgsList(ctx context.Context, user *user_model.User) ([]*MinimalOrg, error) {
+ schema, err := db.TableInfo(new(user_model.User))
+ if err != nil {
+ return nil, err
+ }
+
+ outputCols := []string{
+ "id",
+ "name",
+ "full_name",
+ "visibility",
+ "avatar",
+ "avatar_email",
+ "use_custom_avatar",
+ }
+
+ groupByCols := &strings.Builder{}
+ for _, col := range outputCols {
+ fmt.Fprintf(groupByCols, "`%s`.%s,", schema.Name, col)
+ }
+ groupByStr := groupByCols.String()
+ groupByStr = groupByStr[0 : len(groupByStr)-1]
+
+ sess := db.GetEngine(ctx)
+ sess = sess.Select(groupByStr+", count(distinct repo_id) as org_count").
+ Table("user").
+ Join("INNER", "team", "`team`.org_id = `user`.id").
+ Join("INNER", "team_user", "`team`.id = `team_user`.team_id").
+ Join("LEFT", builder.
+ Select("id as repo_id, owner_id as repo_owner_id").
+ From("repository").
+ Where(repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)), "`repository`.repo_owner_id = `team`.org_id").
+ Where("`team_user`.uid = ?", user.ID).
+ GroupBy(groupByStr)
+
+ type OrgCount struct {
+ Organization `xorm:"extends"`
+ OrgCount int
+ }
+
+ orgCounts := make([]*OrgCount, 0, 10)
+
+ if err := sess.
+ Asc("`user`.name").
+ Find(&orgCounts); err != nil {
+ return nil, err
+ }
+
+ orgs := make([]*MinimalOrg, len(orgCounts))
+ for i, orgCount := range orgCounts {
+ orgCount.Organization.NumRepos = orgCount.OrgCount
+ orgs[i] = &orgCount.Organization
+ }
+
+ return orgs, nil
+}
diff --git a/models/organization/org.go b/models/organization/org.go
new file mode 100644
index 0000000..379c30b
--- /dev/null
+++ b/models/organization/org.go
@@ -0,0 +1,833 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ secret_model "code.gitea.io/gitea/models/secret"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ________ .__ __ .__
+// \_____ \_______ _________ ____ |__|____________ _/ |_|__| ____ ____
+// / | \_ __ \/ ___\__ \ / \| \___ /\__ \\ __\ |/ _ \ / \
+// / | \ | \/ /_/ > __ \| | \ |/ / / __ \| | | ( <_> ) | \
+// \_______ /__| \___ (____ /___| /__/_____ \(____ /__| |__|\____/|___| /
+// \/ /_____/ \/ \/ \/ \/ \/
+
+// ErrOrgNotExist represents a "OrgNotExist" kind of error.
+type ErrOrgNotExist struct {
+ ID int64
+ Name string
+}
+
+// IsErrOrgNotExist checks if an error is a ErrOrgNotExist.
+func IsErrOrgNotExist(err error) bool {
+ _, ok := err.(ErrOrgNotExist)
+ return ok
+}
+
+func (err ErrOrgNotExist) Error() string {
+ return fmt.Sprintf("org does not exist [id: %d, name: %s]", err.ID, err.Name)
+}
+
+func (err ErrOrgNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrLastOrgOwner represents a "LastOrgOwner" kind of error.
+type ErrLastOrgOwner struct {
+ UID int64
+}
+
+// IsErrLastOrgOwner checks if an error is a ErrLastOrgOwner.
+func IsErrLastOrgOwner(err error) bool {
+ _, ok := err.(ErrLastOrgOwner)
+ return ok
+}
+
+func (err ErrLastOrgOwner) Error() string {
+ return fmt.Sprintf("user is the last member of owner team [uid: %d]", err.UID)
+}
+
+// ErrUserNotAllowedCreateOrg represents a "UserNotAllowedCreateOrg" kind of error.
+type ErrUserNotAllowedCreateOrg struct{}
+
+// IsErrUserNotAllowedCreateOrg checks if an error is an ErrUserNotAllowedCreateOrg.
+func IsErrUserNotAllowedCreateOrg(err error) bool {
+ _, ok := err.(ErrUserNotAllowedCreateOrg)
+ return ok
+}
+
+func (err ErrUserNotAllowedCreateOrg) Error() string {
+ return "user is not allowed to create organizations"
+}
+
+func (err ErrUserNotAllowedCreateOrg) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// Organization represents an organization
+type Organization user_model.User
+
+// OrgFromUser converts user to organization
+func OrgFromUser(user *user_model.User) *Organization {
+ return (*Organization)(user)
+}
+
+// TableName represents the real table name of Organization
+func (Organization) TableName() string {
+ return "user"
+}
+
+// IsOwnedBy returns true if given user is in the owner team.
+func (org *Organization) IsOwnedBy(ctx context.Context, uid int64) (bool, error) {
+ return IsOrganizationOwner(ctx, org.ID, uid)
+}
+
+// IsOrgAdmin returns true if given user is in the owner team or an admin team.
+func (org *Organization) IsOrgAdmin(ctx context.Context, uid int64) (bool, error) {
+ return IsOrganizationAdmin(ctx, org.ID, uid)
+}
+
+// IsOrgMember returns true if given user is member of organization.
+func (org *Organization) IsOrgMember(ctx context.Context, uid int64) (bool, error) {
+ return IsOrganizationMember(ctx, org.ID, uid)
+}
+
+// CanCreateOrgRepo returns true if given user can create repo in organization
+func (org *Organization) CanCreateOrgRepo(ctx context.Context, uid int64) (bool, error) {
+ return CanCreateOrgRepo(ctx, org.ID, uid)
+}
+
+// GetTeam returns named team of organization.
+func (org *Organization) GetTeam(ctx context.Context, name string) (*Team, error) {
+ return GetTeam(ctx, org.ID, name)
+}
+
+// GetOwnerTeam returns owner team of organization.
+func (org *Organization) GetOwnerTeam(ctx context.Context) (*Team, error) {
+ return org.GetTeam(ctx, OwnerTeamName)
+}
+
+// FindOrgTeams returns all teams of a given organization
+func FindOrgTeams(ctx context.Context, orgID int64) ([]*Team, error) {
+ var teams []*Team
+ return teams, db.GetEngine(ctx).
+ Where("org_id=?", orgID).
+ OrderBy("CASE WHEN name LIKE '" + OwnerTeamName + "' THEN '' ELSE name END").
+ Find(&teams)
+}
+
+// LoadTeams load teams if not loaded.
+func (org *Organization) LoadTeams(ctx context.Context) ([]*Team, error) {
+ return FindOrgTeams(ctx, org.ID)
+}
+
+// GetMembers returns all members of organization.
+func (org *Organization) GetMembers(ctx context.Context) (user_model.UserList, map[int64]bool, error) {
+ return FindOrgMembers(ctx, &FindOrgMembersOpts{
+ OrgID: org.ID,
+ })
+}
+
+// HasMemberWithUserID returns true if user with userID is part of the u organisation.
+func (org *Organization) HasMemberWithUserID(ctx context.Context, userID int64) bool {
+ return org.hasMemberWithUserID(ctx, userID)
+}
+
+func (org *Organization) hasMemberWithUserID(ctx context.Context, userID int64) bool {
+ isMember, err := IsOrganizationMember(ctx, org.ID, userID)
+ if err != nil {
+ log.Error("IsOrganizationMember: %v", err)
+ return false
+ }
+ return isMember
+}
+
+// AvatarLink returns the full avatar link with http host
+func (org *Organization) AvatarLink(ctx context.Context) string {
+ return org.AsUser().AvatarLink(ctx)
+}
+
+// HTMLURL returns the organization's full link.
+func (org *Organization) HTMLURL() string {
+ return org.AsUser().HTMLURL()
+}
+
+// OrganisationLink returns the organization sub page link.
+func (org *Organization) OrganisationLink() string {
+ return org.AsUser().OrganisationLink()
+}
+
+// ShortName ellipses username to length
+func (org *Organization) ShortName(length int) string {
+ return org.AsUser().ShortName(length)
+}
+
+// HomeLink returns the user or organization home page link.
+func (org *Organization) HomeLink() string {
+ return org.AsUser().HomeLink()
+}
+
+// CanCreateRepo returns if user login can create a repository
+// NOTE: functions calling this assume a failure due to repository count limit; if new checks are added, those functions should be revised
+func (org *Organization) CanCreateRepo() bool {
+ return org.AsUser().CanCreateRepo()
+}
+
+// FindOrgMembersOpts represensts find org members conditions
+type FindOrgMembersOpts struct {
+ db.ListOptions
+ OrgID int64
+ PublicOnly bool
+}
+
+// CountOrgMembers counts the organization's members
+func CountOrgMembers(ctx context.Context, opts *FindOrgMembersOpts) (int64, error) {
+ sess := db.GetEngine(ctx).Where("org_id=?", opts.OrgID)
+ if opts.PublicOnly {
+ sess.And("is_public = ?", true)
+ }
+ return sess.Count(new(OrgUser))
+}
+
+// FindOrgMembers loads organization members according conditions
+func FindOrgMembers(ctx context.Context, opts *FindOrgMembersOpts) (user_model.UserList, map[int64]bool, error) {
+ ous, err := GetOrgUsersByOrgID(ctx, opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ids := make([]int64, len(ous))
+ idsIsPublic := make(map[int64]bool, len(ous))
+ for i, ou := range ous {
+ ids[i] = ou.UID
+ idsIsPublic[ou.UID] = ou.IsPublic
+ }
+
+ users, err := user_model.GetUsersByIDs(ctx, ids)
+ if err != nil {
+ return nil, nil, err
+ }
+ return users, idsIsPublic, nil
+}
+
+// AsUser returns the org as user object
+func (org *Organization) AsUser() *user_model.User {
+ return (*user_model.User)(org)
+}
+
+// DisplayName returns full name if it's not empty,
+// returns username otherwise.
+func (org *Organization) DisplayName() string {
+ return org.AsUser().DisplayName()
+}
+
+// CustomAvatarRelativePath returns user custom avatar relative path.
+func (org *Organization) CustomAvatarRelativePath() string {
+ return org.Avatar
+}
+
+// UnitPermission returns unit permission
+func (org *Organization) UnitPermission(ctx context.Context, doer *user_model.User, unitType unit.Type) perm.AccessMode {
+ if doer != nil {
+ teams, err := GetUserOrgTeams(ctx, org.ID, doer.ID)
+ if err != nil {
+ log.Error("GetUserOrgTeams: %v", err)
+ return perm.AccessModeNone
+ }
+
+ if err := teams.LoadUnits(ctx); err != nil {
+ log.Error("LoadUnits: %v", err)
+ return perm.AccessModeNone
+ }
+
+ if len(teams) > 0 {
+ return teams.UnitMaxAccess(unitType)
+ }
+ }
+
+ if org.Visibility.IsPublic() || (org.Visibility.IsLimited() && doer != nil) {
+ return perm.AccessModeRead
+ }
+
+ return perm.AccessModeNone
+}
+
+// CreateOrganization creates record of a new organization.
+func CreateOrganization(ctx context.Context, org *Organization, owner *user_model.User) (err error) {
+ if !owner.CanCreateOrganization() {
+ return ErrUserNotAllowedCreateOrg{}
+ }
+
+ if err = user_model.IsUsableUsername(org.Name); err != nil {
+ return err
+ }
+
+ isExist, err := user_model.IsUserExist(ctx, 0, org.Name)
+ if err != nil {
+ return err
+ } else if isExist {
+ return user_model.ErrUserAlreadyExist{Name: org.Name}
+ }
+
+ org.LowerName = strings.ToLower(org.Name)
+ if org.Rands, err = user_model.GetUserSalt(); err != nil {
+ return err
+ }
+ if org.Salt, err = user_model.GetUserSalt(); err != nil {
+ return err
+ }
+ org.UseCustomAvatar = true
+ org.MaxRepoCreation = -1
+ org.NumTeams = 1
+ org.NumMembers = 1
+ org.Type = user_model.UserTypeOrganization
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = user_model.DeleteUserRedirect(ctx, org.Name); err != nil {
+ return err
+ }
+
+ if err = db.Insert(ctx, org); err != nil {
+ return fmt.Errorf("insert organization: %w", err)
+ }
+ if err = user_model.GenerateRandomAvatar(ctx, org.AsUser()); err != nil {
+ return fmt.Errorf("generate random avatar: %w", err)
+ }
+
+ // Add initial creator to organization and owner team.
+ if err = db.Insert(ctx, &OrgUser{
+ UID: owner.ID,
+ OrgID: org.ID,
+ IsPublic: setting.Service.DefaultOrgMemberVisible,
+ }); err != nil {
+ return fmt.Errorf("insert org-user relation: %w", err)
+ }
+
+ // Create default owner team.
+ t := &Team{
+ OrgID: org.ID,
+ LowerName: strings.ToLower(OwnerTeamName),
+ Name: OwnerTeamName,
+ AccessMode: perm.AccessModeOwner,
+ NumMembers: 1,
+ IncludesAllRepositories: true,
+ CanCreateOrgRepo: true,
+ }
+ if err = db.Insert(ctx, t); err != nil {
+ return fmt.Errorf("insert owner team: %w", err)
+ }
+
+ // insert units for team
+ units := make([]TeamUnit, 0, len(unit.AllRepoUnitTypes))
+ for _, tp := range unit.AllRepoUnitTypes {
+ up := perm.AccessModeOwner
+ if tp == unit.TypeExternalTracker || tp == unit.TypeExternalWiki {
+ up = perm.AccessModeRead
+ }
+ units = append(units, TeamUnit{
+ OrgID: org.ID,
+ TeamID: t.ID,
+ Type: tp,
+ AccessMode: up,
+ })
+ }
+
+ if err = db.Insert(ctx, &units); err != nil {
+ return err
+ }
+
+ if err = db.Insert(ctx, &TeamUser{
+ UID: owner.ID,
+ OrgID: org.ID,
+ TeamID: t.ID,
+ }); err != nil {
+ return fmt.Errorf("insert team-user relation: %w", err)
+ }
+
+ return committer.Commit()
+}
+
+// GetOrgByName returns organization by given name.
+func GetOrgByName(ctx context.Context, name string) (*Organization, error) {
+ if len(name) == 0 {
+ return nil, ErrOrgNotExist{0, name}
+ }
+ u := &Organization{
+ LowerName: strings.ToLower(name),
+ Type: user_model.UserTypeOrganization,
+ }
+ has, err := db.GetEngine(ctx).Get(u)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrOrgNotExist{0, name}
+ }
+ return u, nil
+}
+
+// DeleteOrganization deletes models associated to an organization.
+func DeleteOrganization(ctx context.Context, org *Organization) error {
+ if org.Type != user_model.UserTypeOrganization {
+ return fmt.Errorf("%s is a user not an organization", org.Name)
+ }
+
+ if err := db.DeleteBeans(ctx,
+ &Team{OrgID: org.ID},
+ &OrgUser{OrgID: org.ID},
+ &TeamUser{OrgID: org.ID},
+ &TeamUnit{OrgID: org.ID},
+ &TeamInvite{OrgID: org.ID},
+ &secret_model.Secret{OwnerID: org.ID},
+ &actions_model.ActionRunner{OwnerID: org.ID},
+ &actions_model.ActionRunnerToken{OwnerID: org.ID},
+ ); err != nil {
+ return fmt.Errorf("DeleteBeans: %w", err)
+ }
+
+ if _, err := db.GetEngine(ctx).ID(org.ID).Delete(new(user_model.User)); err != nil {
+ return fmt.Errorf("Delete: %w", err)
+ }
+
+ return nil
+}
+
+// GetOrgUserMaxAuthorizeLevel returns highest authorize level of user in an organization
+func (org *Organization) GetOrgUserMaxAuthorizeLevel(ctx context.Context, uid int64) (perm.AccessMode, error) {
+ var authorize perm.AccessMode
+ _, err := db.GetEngine(ctx).
+ Select("max(team.authorize)").
+ Table("team").
+ Join("INNER", "team_user", "team_user.team_id = team.id").
+ Where("team_user.uid = ?", uid).
+ And("team_user.org_id = ?", org.ID).
+ Get(&authorize)
+ return authorize, err
+}
+
+// GetUsersWhoCanCreateOrgRepo returns users which are able to create repo in organization
+func GetUsersWhoCanCreateOrgRepo(ctx context.Context, orgID int64) (map[int64]*user_model.User, error) {
+ // Use a map, in order to de-duplicate users.
+ users := make(map[int64]*user_model.User)
+ return users, db.GetEngine(ctx).
+ Join("INNER", "`team_user`", "`team_user`.uid=`user`.id").
+ Join("INNER", "`team`", "`team`.id=`team_user`.team_id").
+ Where(builder.Eq{"team.can_create_org_repo": true}.Or(builder.Eq{"team.authorize": perm.AccessModeOwner})).
+ And("team_user.org_id = ?", orgID).Find(&users)
+}
+
+// SearchOrganizationsOptions options to filter organizations
+type SearchOrganizationsOptions struct {
+ db.ListOptions
+ All bool
+}
+
+// FindOrgOptions finds orgs options
+type FindOrgOptions struct {
+ db.ListOptions
+ UserID int64
+ IncludePrivate bool
+}
+
+func queryUserOrgIDs(userID int64, includePrivate bool) *builder.Builder {
+ cond := builder.Eq{"uid": userID}
+ if !includePrivate {
+ cond["is_public"] = true
+ }
+ return builder.Select("org_id").From("org_user").Where(cond)
+}
+
+func (opts FindOrgOptions) ToConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{"`user`.`type`": user_model.UserTypeOrganization}
+ if opts.UserID > 0 {
+ cond = cond.And(builder.In("`user`.`id`", queryUserOrgIDs(opts.UserID, opts.IncludePrivate)))
+ }
+ if !opts.IncludePrivate {
+ cond = cond.And(builder.Eq{"`user`.visibility": structs.VisibleTypePublic})
+ }
+ return cond
+}
+
+func (opts FindOrgOptions) ToOrders() string {
+ return "`user`.name ASC"
+}
+
+// HasOrgOrUserVisible tells if the given user can see the given org or user
+func HasOrgOrUserVisible(ctx context.Context, orgOrUser, user *user_model.User) bool {
+ // If user is nil, it's an anonymous user/request.
+ // The Ghost user is handled like an anonymous user.
+ if user == nil || user.IsGhost() {
+ return orgOrUser.Visibility == structs.VisibleTypePublic
+ }
+
+ if user.IsAdmin || orgOrUser.ID == user.ID {
+ return true
+ }
+
+ if (orgOrUser.Visibility == structs.VisibleTypePrivate || user.IsRestricted) && !OrgFromUser(orgOrUser).hasMemberWithUserID(ctx, user.ID) {
+ return false
+ }
+ return true
+}
+
+// HasOrgsVisible tells if the given user can see at least one of the orgs provided
+func HasOrgsVisible(ctx context.Context, orgs []*Organization, user *user_model.User) bool {
+ if len(orgs) == 0 {
+ return false
+ }
+
+ for _, org := range orgs {
+ if HasOrgOrUserVisible(ctx, org.AsUser(), user) {
+ return true
+ }
+ }
+ return false
+}
+
+// GetOrgsCanCreateRepoByUserID returns a list of organizations where given user ID
+// are allowed to create repos.
+func GetOrgsCanCreateRepoByUserID(ctx context.Context, userID int64) ([]*Organization, error) {
+ orgs := make([]*Organization, 0, 10)
+
+ return orgs, db.GetEngine(ctx).Where(builder.In("id", builder.Select("`user`.id").From("`user`").
+ Join("INNER", "`team_user`", "`team_user`.org_id = `user`.id").
+ Join("INNER", "`team`", "`team`.id = `team_user`.team_id").
+ Where(builder.Eq{"`team_user`.uid": userID}).
+ And(builder.Eq{"`team`.authorize": perm.AccessModeOwner}.Or(builder.Eq{"`team`.can_create_org_repo": true})))).
+ Asc("`user`.name").
+ Find(&orgs)
+}
+
+// GetOrgUsersByOrgID returns all organization-user relations by organization ID.
+func GetOrgUsersByOrgID(ctx context.Context, opts *FindOrgMembersOpts) ([]*OrgUser, error) {
+ sess := db.GetEngine(ctx).Where("org_id=?", opts.OrgID)
+ if opts.PublicOnly {
+ sess.And("is_public = ?", true)
+ }
+ if opts.ListOptions.PageSize > 0 {
+ sess = db.SetSessionPagination(sess, opts)
+
+ ous := make([]*OrgUser, 0, opts.PageSize)
+ return ous, sess.Find(&ous)
+ }
+
+ var ous []*OrgUser
+ return ous, sess.Find(&ous)
+}
+
+// ChangeOrgUserStatus changes public or private membership status.
+func ChangeOrgUserStatus(ctx context.Context, orgID, uid int64, public bool) error {
+ ou := new(OrgUser)
+ has, err := db.GetEngine(ctx).
+ Where("uid=?", uid).
+ And("org_id=?", orgID).
+ Get(ou)
+ if err != nil {
+ return err
+ } else if !has {
+ return nil
+ }
+
+ ou.IsPublic = public
+ _, err = db.GetEngine(ctx).ID(ou.ID).Cols("is_public").Update(ou)
+ return err
+}
+
+// AddOrgUser adds new user to given organization.
+func AddOrgUser(ctx context.Context, orgID, uid int64) error {
+ isAlreadyMember, err := IsOrganizationMember(ctx, orgID, uid)
+ if err != nil || isAlreadyMember {
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // check in transaction
+ isAlreadyMember, err = IsOrganizationMember(ctx, orgID, uid)
+ if err != nil || isAlreadyMember {
+ return err
+ }
+
+ ou := &OrgUser{
+ UID: uid,
+ OrgID: orgID,
+ IsPublic: setting.Service.DefaultOrgMemberVisible,
+ }
+
+ if err := db.Insert(ctx, ou); err != nil {
+ return err
+ } else if _, err = db.Exec(ctx, "UPDATE `user` SET num_members = num_members + 1 WHERE id = ?", orgID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// GetOrgByID returns the user object by given ID if exists.
+func GetOrgByID(ctx context.Context, id int64) (*Organization, error) {
+ u := new(Organization)
+ has, err := db.GetEngine(ctx).ID(id).Get(u)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, user_model.ErrUserNotExist{
+ UID: id,
+ }
+ }
+ return u, nil
+}
+
+// RemoveOrgRepo removes all team-repository relations of organization.
+func RemoveOrgRepo(ctx context.Context, orgID, repoID int64) error {
+ teamRepos := make([]*TeamRepo, 0, 10)
+ e := db.GetEngine(ctx)
+ if err := e.Find(&teamRepos, &TeamRepo{OrgID: orgID, RepoID: repoID}); err != nil {
+ return err
+ }
+
+ if len(teamRepos) == 0 {
+ return nil
+ }
+
+ if _, err := e.Delete(&TeamRepo{
+ OrgID: orgID,
+ RepoID: repoID,
+ }); err != nil {
+ return err
+ }
+
+ teamIDs := make([]int64, len(teamRepos))
+ for i, teamRepo := range teamRepos {
+ teamIDs[i] = teamRepo.TeamID
+ }
+
+ _, err := e.Decr("num_repos").In("id", teamIDs).Update(new(Team))
+ return err
+}
+
+func (org *Organization) getUserTeams(ctx context.Context, userID int64, cols ...string) ([]*Team, error) {
+ teams := make([]*Team, 0, org.NumTeams)
+ return teams, db.GetEngine(ctx).
+ Where("`team_user`.org_id = ?", org.ID).
+ Join("INNER", "team_user", "`team_user`.team_id = team.id").
+ Join("INNER", "`user`", "`user`.id=team_user.uid").
+ And("`team_user`.uid = ?", userID).
+ Asc("`user`.name").
+ Cols(cols...).
+ Find(&teams)
+}
+
+func (org *Organization) getUserTeamIDs(ctx context.Context, userID int64) ([]int64, error) {
+ teamIDs := make([]int64, 0, org.NumTeams)
+ return teamIDs, db.GetEngine(ctx).
+ Table("team").
+ Cols("team.id").
+ Where("`team_user`.org_id = ?", org.ID).
+ Join("INNER", "team_user", "`team_user`.team_id = team.id").
+ And("`team_user`.uid = ?", userID).
+ Find(&teamIDs)
+}
+
+// TeamsWithAccessToRepo returns all teams that have given access level to the repository.
+func (org *Organization) TeamsWithAccessToRepo(ctx context.Context, repoID int64, mode perm.AccessMode) ([]*Team, error) {
+ return GetTeamsWithAccessToRepo(ctx, org.ID, repoID, mode)
+}
+
+// GetUserTeamIDs returns of all team IDs of the organization that user is member of.
+func (org *Organization) GetUserTeamIDs(ctx context.Context, userID int64) ([]int64, error) {
+ return org.getUserTeamIDs(ctx, userID)
+}
+
+// GetUserTeams returns all teams that belong to user,
+// and that the user has joined.
+func (org *Organization) GetUserTeams(ctx context.Context, userID int64) ([]*Team, error) {
+ return org.getUserTeams(ctx, userID)
+}
+
+// AccessibleReposEnvironment operations involving the repositories that are
+// accessible to a particular user
+type AccessibleReposEnvironment interface {
+ CountRepos() (int64, error)
+ RepoIDs(page, pageSize int) ([]int64, error)
+ Repos(page, pageSize int) (repo_model.RepositoryList, error)
+ MirrorRepos() (repo_model.RepositoryList, error)
+ AddKeyword(keyword string)
+ SetSort(db.SearchOrderBy)
+}
+
+type accessibleReposEnv struct {
+ org *Organization
+ user *user_model.User
+ team *Team
+ teamIDs []int64
+ ctx context.Context
+ keyword string
+ orderBy db.SearchOrderBy
+}
+
+// AccessibleReposEnv builds an AccessibleReposEnvironment for the repositories in `org`
+// that are accessible to the specified user.
+func AccessibleReposEnv(ctx context.Context, org *Organization, userID int64) (AccessibleReposEnvironment, error) {
+ var user *user_model.User
+
+ if userID > 0 {
+ u, err := user_model.GetUserByID(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ user = u
+ }
+
+ teamIDs, err := org.getUserTeamIDs(ctx, userID)
+ if err != nil {
+ return nil, err
+ }
+ return &accessibleReposEnv{
+ org: org,
+ user: user,
+ teamIDs: teamIDs,
+ ctx: ctx,
+ orderBy: db.SearchOrderByRecentUpdated,
+ }, nil
+}
+
+// AccessibleTeamReposEnv an AccessibleReposEnvironment for the repositories in `org`
+// that are accessible to the specified team.
+func (org *Organization) AccessibleTeamReposEnv(ctx context.Context, team *Team) AccessibleReposEnvironment {
+ return &accessibleReposEnv{
+ org: org,
+ team: team,
+ ctx: ctx,
+ orderBy: db.SearchOrderByRecentUpdated,
+ }
+}
+
+func (env *accessibleReposEnv) cond() builder.Cond {
+ cond := builder.NewCond()
+ if env.team != nil {
+ cond = cond.And(builder.Eq{"team_repo.team_id": env.team.ID})
+ } else {
+ if env.user == nil || !env.user.IsRestricted {
+ cond = cond.Or(builder.Eq{
+ "`repository`.owner_id": env.org.ID,
+ "`repository`.is_private": false,
+ })
+ }
+ if len(env.teamIDs) > 0 {
+ cond = cond.Or(builder.In("team_repo.team_id", env.teamIDs))
+ }
+ }
+ if env.keyword != "" {
+ cond = cond.And(builder.Like{"`repository`.lower_name", strings.ToLower(env.keyword)})
+ }
+ return cond
+}
+
+func (env *accessibleReposEnv) CountRepos() (int64, error) {
+ repoCount, err := db.GetEngine(env.ctx).
+ Join("INNER", "team_repo", "`team_repo`.repo_id=`repository`.id").
+ Where(env.cond()).
+ Distinct("`repository`.id").
+ Count(&repo_model.Repository{})
+ if err != nil {
+ return 0, fmt.Errorf("count user repositories in organization: %w", err)
+ }
+ return repoCount, nil
+}
+
+func (env *accessibleReposEnv) RepoIDs(page, pageSize int) ([]int64, error) {
+ if page <= 0 {
+ page = 1
+ }
+
+ repoIDs := make([]int64, 0, pageSize)
+ return repoIDs, db.GetEngine(env.ctx).
+ Table("repository").
+ Join("INNER", "team_repo", "`team_repo`.repo_id=`repository`.id").
+ Where(env.cond()).
+ GroupBy("`repository`.id,`repository`."+strings.Fields(string(env.orderBy))[0]).
+ OrderBy(string(env.orderBy)).
+ Limit(pageSize, (page-1)*pageSize).
+ Cols("`repository`.id").
+ Find(&repoIDs)
+}
+
+func (env *accessibleReposEnv) Repos(page, pageSize int) (repo_model.RepositoryList, error) {
+ repoIDs, err := env.RepoIDs(page, pageSize)
+ if err != nil {
+ return nil, fmt.Errorf("GetUserRepositoryIDs: %w", err)
+ }
+
+ repos := make([]*repo_model.Repository, 0, len(repoIDs))
+ if len(repoIDs) == 0 {
+ return repos, nil
+ }
+
+ return repos, db.GetEngine(env.ctx).
+ In("`repository`.id", repoIDs).
+ OrderBy(string(env.orderBy)).
+ Find(&repos)
+}
+
+func (env *accessibleReposEnv) MirrorRepoIDs() ([]int64, error) {
+ repoIDs := make([]int64, 0, 10)
+ return repoIDs, db.GetEngine(env.ctx).
+ Table("repository").
+ Join("INNER", "team_repo", "`team_repo`.repo_id=`repository`.id AND `repository`.is_mirror=?", true).
+ Where(env.cond()).
+ GroupBy("`repository`.id, `repository`.updated_unix").
+ OrderBy(string(env.orderBy)).
+ Cols("`repository`.id").
+ Find(&repoIDs)
+}
+
+func (env *accessibleReposEnv) MirrorRepos() (repo_model.RepositoryList, error) {
+ repoIDs, err := env.MirrorRepoIDs()
+ if err != nil {
+ return nil, fmt.Errorf("MirrorRepoIDs: %w", err)
+ }
+
+ repos := make([]*repo_model.Repository, 0, len(repoIDs))
+ if len(repoIDs) == 0 {
+ return repos, nil
+ }
+
+ return repos, db.GetEngine(env.ctx).
+ In("`repository`.id", repoIDs).
+ Find(&repos)
+}
+
+func (env *accessibleReposEnv) AddKeyword(keyword string) {
+ env.keyword = keyword
+}
+
+func (env *accessibleReposEnv) SetSort(orderBy db.SearchOrderBy) {
+ env.orderBy = orderBy
+}
diff --git a/models/organization/org_repo.go b/models/organization/org_repo.go
new file mode 100644
index 0000000..f7e5992
--- /dev/null
+++ b/models/organization/org_repo.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+)
+
+// GetOrgRepositories get repos belonging to the given organization
+func GetOrgRepositories(ctx context.Context, orgID int64) (repo_model.RepositoryList, error) {
+ var orgRepos []*repo_model.Repository
+ return orgRepos, db.GetEngine(ctx).Where("owner_id = ?", orgID).Find(&orgRepos)
+}
diff --git a/models/organization/org_test.go b/models/organization/org_test.go
new file mode 100644
index 0000000..21d954b
--- /dev/null
+++ b/models/organization/org_test.go
@@ -0,0 +1,548 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUser_IsOwnedBy(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ for _, testCase := range []struct {
+ OrgID int64
+ UserID int64
+ ExpectedOwner bool
+ }{
+ {3, 2, true},
+ {3, 1, false},
+ {3, 3, false},
+ {3, 4, false},
+ {2, 2, false}, // user2 is not an organization
+ {2, 3, false},
+ } {
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: testCase.OrgID})
+ isOwner, err := org.IsOwnedBy(db.DefaultContext, testCase.UserID)
+ require.NoError(t, err)
+ assert.Equal(t, testCase.ExpectedOwner, isOwner)
+ }
+}
+
+func TestUser_IsOrgMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ for _, testCase := range []struct {
+ OrgID int64
+ UserID int64
+ ExpectedMember bool
+ }{
+ {3, 2, true},
+ {3, 4, true},
+ {3, 1, false},
+ {3, 3, false},
+ {2, 2, false}, // user2 is not an organization
+ {2, 3, false},
+ } {
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: testCase.OrgID})
+ isMember, err := org.IsOrgMember(db.DefaultContext, testCase.UserID)
+ require.NoError(t, err)
+ assert.Equal(t, testCase.ExpectedMember, isMember)
+ }
+}
+
+func TestUser_GetTeam(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ team, err := org.GetTeam(db.DefaultContext, "team1")
+ require.NoError(t, err)
+ assert.Equal(t, org.ID, team.OrgID)
+ assert.Equal(t, "team1", team.LowerName)
+
+ _, err = org.GetTeam(db.DefaultContext, "does not exist")
+ assert.True(t, organization.IsErrTeamNotExist(err))
+
+ nonOrg := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 2})
+ _, err = nonOrg.GetTeam(db.DefaultContext, "team")
+ assert.True(t, organization.IsErrTeamNotExist(err))
+}
+
+func TestUser_GetOwnerTeam(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ team, err := org.GetOwnerTeam(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, org.ID, team.OrgID)
+
+ nonOrg := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 2})
+ _, err = nonOrg.GetOwnerTeam(db.DefaultContext)
+ assert.True(t, organization.IsErrTeamNotExist(err))
+}
+
+func TestUser_GetTeams(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ teams, err := org.LoadTeams(db.DefaultContext)
+ require.NoError(t, err)
+ if assert.Len(t, teams, 5) {
+ assert.Equal(t, int64(1), teams[0].ID)
+ assert.Equal(t, int64(2), teams[1].ID)
+ assert.Equal(t, int64(12), teams[2].ID)
+ assert.Equal(t, int64(14), teams[3].ID)
+ assert.Equal(t, int64(7), teams[4].ID)
+ }
+}
+
+func TestUser_GetMembers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ members, _, err := org.GetMembers(db.DefaultContext)
+ require.NoError(t, err)
+ if assert.Len(t, members, 3) {
+ assert.Equal(t, int64(2), members[0].ID)
+ assert.Equal(t, int64(28), members[1].ID)
+ assert.Equal(t, int64(4), members[2].ID)
+ }
+}
+
+func TestGetOrgByName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ org, err := organization.GetOrgByName(db.DefaultContext, "org3")
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, org.ID)
+ assert.Equal(t, "org3", org.Name)
+
+ _, err = organization.GetOrgByName(db.DefaultContext, "user2") // user2 is an individual
+ assert.True(t, organization.IsErrOrgNotExist(err))
+
+ _, err = organization.GetOrgByName(db.DefaultContext, "") // corner case
+ assert.True(t, organization.IsErrOrgNotExist(err))
+}
+
+func TestCountOrganizations(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ expected, err := db.GetEngine(db.DefaultContext).Where("type=?", user_model.UserTypeOrganization).Count(&organization.Organization{})
+ require.NoError(t, err)
+ cnt, err := db.Count[organization.Organization](db.DefaultContext, organization.FindOrgOptions{IncludePrivate: true})
+ require.NoError(t, err)
+ assert.Equal(t, expected, cnt)
+}
+
+func TestIsOrganizationOwner(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(orgID, userID int64, expected bool) {
+ isOwner, err := organization.IsOrganizationOwner(db.DefaultContext, orgID, userID)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, isOwner)
+ }
+ test(3, 2, true)
+ test(3, 3, false)
+ test(6, 5, true)
+ test(6, 4, false)
+ test(unittest.NonexistentID, unittest.NonexistentID, false)
+}
+
+func TestIsOrganizationMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(orgID, userID int64, expected bool) {
+ isMember, err := organization.IsOrganizationMember(db.DefaultContext, orgID, userID)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, isMember)
+ }
+ test(3, 2, true)
+ test(3, 3, false)
+ test(3, 4, true)
+ test(6, 5, true)
+ test(6, 4, false)
+ test(unittest.NonexistentID, unittest.NonexistentID, false)
+}
+
+func TestIsPublicMembership(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(orgID, userID int64, expected bool) {
+ isMember, err := organization.IsPublicMembership(db.DefaultContext, orgID, userID)
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, isMember)
+ }
+ test(3, 2, true)
+ test(3, 3, false)
+ test(3, 4, false)
+ test(6, 5, true)
+ test(6, 4, false)
+ test(unittest.NonexistentID, unittest.NonexistentID, false)
+}
+
+func TestFindOrgs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ orgs, err := db.Find[organization.Organization](db.DefaultContext, organization.FindOrgOptions{
+ UserID: 4,
+ IncludePrivate: true,
+ })
+ require.NoError(t, err)
+ if assert.Len(t, orgs, 1) {
+ assert.EqualValues(t, 3, orgs[0].ID)
+ }
+
+ orgs, err = db.Find[organization.Organization](db.DefaultContext, organization.FindOrgOptions{
+ UserID: 4,
+ IncludePrivate: false,
+ })
+ require.NoError(t, err)
+ assert.Empty(t, orgs)
+
+ total, err := db.Count[organization.Organization](db.DefaultContext, organization.FindOrgOptions{
+ UserID: 4,
+ IncludePrivate: true,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, total)
+}
+
+func TestGetOrgUsersByOrgID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ orgUsers, err := organization.GetOrgUsersByOrgID(db.DefaultContext, &organization.FindOrgMembersOpts{
+ ListOptions: db.ListOptions{},
+ OrgID: 3,
+ PublicOnly: false,
+ })
+ require.NoError(t, err)
+ if assert.Len(t, orgUsers, 3) {
+ assert.Equal(t, organization.OrgUser{
+ ID: orgUsers[0].ID,
+ OrgID: 3,
+ UID: 2,
+ IsPublic: true,
+ }, *orgUsers[0])
+ assert.Equal(t, organization.OrgUser{
+ ID: orgUsers[1].ID,
+ OrgID: 3,
+ UID: 4,
+ IsPublic: false,
+ }, *orgUsers[1])
+ assert.Equal(t, organization.OrgUser{
+ ID: orgUsers[2].ID,
+ OrgID: 3,
+ UID: 28,
+ IsPublic: true,
+ }, *orgUsers[2])
+ }
+
+ orgUsers, err = organization.GetOrgUsersByOrgID(db.DefaultContext, &organization.FindOrgMembersOpts{
+ ListOptions: db.ListOptions{},
+ OrgID: unittest.NonexistentID,
+ PublicOnly: false,
+ })
+ require.NoError(t, err)
+ assert.Empty(t, orgUsers)
+}
+
+func TestChangeOrgUserStatus(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(orgID, userID int64, public bool) {
+ require.NoError(t, organization.ChangeOrgUserStatus(db.DefaultContext, orgID, userID, public))
+ orgUser := unittest.AssertExistsAndLoadBean(t, &organization.OrgUser{OrgID: orgID, UID: userID})
+ assert.Equal(t, public, orgUser.IsPublic)
+ }
+
+ testSuccess(3, 2, false)
+ testSuccess(3, 2, false)
+ testSuccess(3, 4, true)
+ require.NoError(t, organization.ChangeOrgUserStatus(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID, true))
+}
+
+func TestUser_GetUserTeamIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ testSuccess := func(userID int64, expected []int64) {
+ teamIDs, err := org.GetUserTeamIDs(db.DefaultContext, userID)
+ require.NoError(t, err)
+ assert.Equal(t, expected, teamIDs)
+ }
+ testSuccess(2, []int64{1, 2, 14})
+ testSuccess(4, []int64{2})
+ testSuccess(unittest.NonexistentID, []int64{})
+}
+
+func TestAccessibleReposEnv_CountRepos(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ testSuccess := func(userID, expectedCount int64) {
+ env, err := organization.AccessibleReposEnv(db.DefaultContext, org, userID)
+ require.NoError(t, err)
+ count, err := env.CountRepos()
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedCount, count)
+ }
+ testSuccess(2, 3)
+ testSuccess(4, 2)
+}
+
+func TestAccessibleReposEnv_RepoIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ testSuccess := func(userID int64, expectedRepoIDs []int64) {
+ env, err := organization.AccessibleReposEnv(db.DefaultContext, org, userID)
+ require.NoError(t, err)
+ repoIDs, err := env.RepoIDs(1, 100)
+ require.NoError(t, err)
+ assert.Equal(t, expectedRepoIDs, repoIDs)
+ }
+ testSuccess(2, []int64{32, 5, 3})
+ testSuccess(4, []int64{32, 3})
+}
+
+func TestAccessibleReposEnv_Repos(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ testSuccess := func(userID int64, expectedRepoIDs []int64) {
+ env, err := organization.AccessibleReposEnv(db.DefaultContext, org, userID)
+ require.NoError(t, err)
+ repos, err := env.Repos(1, 100)
+ require.NoError(t, err)
+ expectedRepos := make(repo_model.RepositoryList, len(expectedRepoIDs))
+ for i, repoID := range expectedRepoIDs {
+ expectedRepos[i] = unittest.AssertExistsAndLoadBean(t,
+ &repo_model.Repository{ID: repoID})
+ }
+ assert.Equal(t, expectedRepos, repos)
+ }
+ testSuccess(2, []int64{32, 5, 3})
+ testSuccess(4, []int64{32, 3})
+}
+
+func TestAccessibleReposEnv_MirrorRepos(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ testSuccess := func(userID int64, expectedRepoIDs []int64) {
+ env, err := organization.AccessibleReposEnv(db.DefaultContext, org, userID)
+ require.NoError(t, err)
+ repos, err := env.MirrorRepos()
+ require.NoError(t, err)
+ expectedRepos := make(repo_model.RepositoryList, len(expectedRepoIDs))
+ for i, repoID := range expectedRepoIDs {
+ expectedRepos[i] = unittest.AssertExistsAndLoadBean(t,
+ &repo_model.Repository{ID: repoID})
+ }
+ assert.Equal(t, expectedRepos, repos)
+ }
+ testSuccess(2, []int64{5})
+ testSuccess(4, []int64{})
+}
+
+func TestHasOrgVisibleTypePublic(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+
+ const newOrgName = "test-org-public"
+ org := &organization.Organization{
+ Name: newOrgName,
+ Visibility: structs.VisibleTypePublic,
+ }
+
+ unittest.AssertNotExistsBean(t, &user_model.User{Name: org.Name, Type: user_model.UserTypeOrganization})
+ require.NoError(t, organization.CreateOrganization(db.DefaultContext, org, owner))
+ org = unittest.AssertExistsAndLoadBean(t,
+ &organization.Organization{Name: org.Name, Type: user_model.UserTypeOrganization})
+ test1 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), owner)
+ test2 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), org3)
+ test3 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), nil)
+ assert.True(t, test1) // owner of org
+ assert.True(t, test2) // user not a part of org
+ assert.True(t, test3) // logged out user
+}
+
+func TestHasOrgVisibleTypeLimited(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+
+ const newOrgName = "test-org-limited"
+ org := &organization.Organization{
+ Name: newOrgName,
+ Visibility: structs.VisibleTypeLimited,
+ }
+
+ unittest.AssertNotExistsBean(t, &user_model.User{Name: org.Name, Type: user_model.UserTypeOrganization})
+ require.NoError(t, organization.CreateOrganization(db.DefaultContext, org, owner))
+ org = unittest.AssertExistsAndLoadBean(t,
+ &organization.Organization{Name: org.Name, Type: user_model.UserTypeOrganization})
+ test1 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), owner)
+ test2 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), org3)
+ test3 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), nil)
+ assert.True(t, test1) // owner of org
+ assert.True(t, test2) // user not a part of org
+ assert.False(t, test3) // logged out user
+}
+
+func TestHasOrgVisibleTypePrivate(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+
+ const newOrgName = "test-org-private"
+ org := &organization.Organization{
+ Name: newOrgName,
+ Visibility: structs.VisibleTypePrivate,
+ }
+
+ unittest.AssertNotExistsBean(t, &user_model.User{Name: org.Name, Type: user_model.UserTypeOrganization})
+ require.NoError(t, organization.CreateOrganization(db.DefaultContext, org, owner))
+ org = unittest.AssertExistsAndLoadBean(t,
+ &organization.Organization{Name: org.Name, Type: user_model.UserTypeOrganization})
+ test1 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), owner)
+ test2 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), org3)
+ test3 := organization.HasOrgOrUserVisible(db.DefaultContext, org.AsUser(), nil)
+ assert.True(t, test1) // owner of org
+ assert.False(t, test2) // user not a part of org
+ assert.False(t, test3) // logged out user
+}
+
+func TestGetUsersWhoCanCreateOrgRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ users, err := organization.GetUsersWhoCanCreateOrgRepo(db.DefaultContext, 3)
+ require.NoError(t, err)
+ assert.Len(t, users, 2)
+ var ids []int64
+ for i := range users {
+ ids = append(ids, users[i].ID)
+ }
+ assert.ElementsMatch(t, ids, []int64{2, 28})
+
+ users, err = organization.GetUsersWhoCanCreateOrgRepo(db.DefaultContext, 7)
+ require.NoError(t, err)
+ assert.Len(t, users, 1)
+ assert.NotNil(t, users[5])
+}
+
+func TestUser_RemoveOrgRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{OwnerID: org.ID})
+
+ // remove a repo that does belong to org
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamRepo{RepoID: repo.ID, OrgID: org.ID})
+ require.NoError(t, organization.RemoveOrgRepo(db.DefaultContext, org.ID, repo.ID))
+ unittest.AssertNotExistsBean(t, &organization.TeamRepo{RepoID: repo.ID, OrgID: org.ID})
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repo.ID}) // repo should still exist
+
+ // remove a repo that does not belong to org
+ require.NoError(t, organization.RemoveOrgRepo(db.DefaultContext, org.ID, repo.ID))
+ unittest.AssertNotExistsBean(t, &organization.TeamRepo{RepoID: repo.ID, OrgID: org.ID})
+
+ require.NoError(t, organization.RemoveOrgRepo(db.DefaultContext, org.ID, unittest.NonexistentID))
+
+ unittest.CheckConsistencyFor(t,
+ &user_model.User{ID: org.ID},
+ &organization.Team{OrgID: org.ID},
+ &repo_model.Repository{ID: repo.ID})
+}
+
+func TestCreateOrganization(t *testing.T) {
+ // successful creation of org
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ const newOrgName = "neworg"
+ org := &organization.Organization{
+ Name: newOrgName,
+ }
+
+ unittest.AssertNotExistsBean(t, &user_model.User{Name: newOrgName, Type: user_model.UserTypeOrganization})
+ require.NoError(t, organization.CreateOrganization(db.DefaultContext, org, owner))
+ org = unittest.AssertExistsAndLoadBean(t,
+ &organization.Organization{Name: newOrgName, Type: user_model.UserTypeOrganization})
+ ownerTeam := unittest.AssertExistsAndLoadBean(t,
+ &organization.Team{Name: organization.OwnerTeamName, OrgID: org.ID})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{UID: owner.ID, TeamID: ownerTeam.ID})
+ unittest.CheckConsistencyFor(t, &user_model.User{}, &organization.Team{})
+}
+
+func TestCreateOrganization2(t *testing.T) {
+ // unauthorized creation of org
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 5})
+ const newOrgName = "neworg"
+ org := &organization.Organization{
+ Name: newOrgName,
+ }
+
+ unittest.AssertNotExistsBean(t, &organization.Organization{Name: newOrgName, Type: user_model.UserTypeOrganization})
+ err := organization.CreateOrganization(db.DefaultContext, org, owner)
+ require.Error(t, err)
+ assert.True(t, organization.IsErrUserNotAllowedCreateOrg(err))
+ unittest.AssertNotExistsBean(t, &organization.Organization{Name: newOrgName, Type: user_model.UserTypeOrganization})
+ unittest.CheckConsistencyFor(t, &organization.Organization{}, &organization.Team{})
+}
+
+func TestCreateOrganization3(t *testing.T) {
+ // create org with same name as existent org
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ org := &organization.Organization{Name: "org3"} // should already exist
+ unittest.AssertExistsAndLoadBean(t, &user_model.User{Name: org.Name}) // sanity check
+ err := organization.CreateOrganization(db.DefaultContext, org, owner)
+ require.Error(t, err)
+ assert.True(t, user_model.IsErrUserAlreadyExist(err))
+ unittest.CheckConsistencyFor(t, &user_model.User{}, &organization.Team{})
+}
+
+func TestCreateOrganization4(t *testing.T) {
+ // create org with unusable name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ err := organization.CreateOrganization(db.DefaultContext, &organization.Organization{Name: "assets"}, owner)
+ require.Error(t, err)
+ assert.True(t, db.IsErrNameReserved(err))
+ unittest.CheckConsistencyFor(t, &organization.Organization{}, &organization.Team{})
+}
+
+func TestUnitPermission(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ publicOrg := &organization.Organization{ID: 1001, Visibility: structs.VisibleTypePublic}
+ limitedOrg := &organization.Organization{ID: 1001, Visibility: structs.VisibleTypeLimited}
+ privateOrg := &organization.Organization{ID: 1001, Visibility: structs.VisibleTypePrivate}
+ user := &user_model.User{ID: 1001}
+ t.Run("Anonymous", func(t *testing.T) {
+ t.Run("Public", func(t *testing.T) {
+ assert.EqualValues(t, perm.AccessModeRead, publicOrg.UnitPermission(db.DefaultContext, nil, unit.TypeCode))
+ })
+ t.Run("Limited", func(t *testing.T) {
+ assert.EqualValues(t, perm.AccessModeNone, limitedOrg.UnitPermission(db.DefaultContext, nil, unit.TypeCode))
+ })
+ t.Run("Private", func(t *testing.T) {
+ assert.EqualValues(t, perm.AccessModeNone, privateOrg.UnitPermission(db.DefaultContext, nil, unit.TypeCode))
+ })
+ })
+
+ t.Run("Logged in", func(t *testing.T) {
+ t.Run("Public", func(t *testing.T) {
+ assert.EqualValues(t, perm.AccessModeRead, publicOrg.UnitPermission(db.DefaultContext, user, unit.TypeCode))
+ })
+ t.Run("Limited", func(t *testing.T) {
+ assert.EqualValues(t, perm.AccessModeRead, limitedOrg.UnitPermission(db.DefaultContext, user, unit.TypeCode))
+ })
+ t.Run("Private", func(t *testing.T) {
+ assert.EqualValues(t, perm.AccessModeNone, privateOrg.UnitPermission(db.DefaultContext, user, unit.TypeCode))
+ })
+ })
+}
diff --git a/models/organization/org_user.go b/models/organization/org_user.go
new file mode 100644
index 0000000..5fe3a17
--- /dev/null
+++ b/models/organization/org_user.go
@@ -0,0 +1,138 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+)
+
+// ________ ____ ___
+// \_____ \_______ ____ | | \______ ___________
+// / | \_ __ \/ ___\| | / ___// __ \_ __ \
+// / | \ | \/ /_/ > | /\___ \\ ___/| | \/
+// \_______ /__| \___ /|______//____ >\___ >__|
+// \/ /_____/ \/ \/
+
+// OrgUser represents an organization-user relation.
+type OrgUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX UNIQUE(s)"`
+ OrgID int64 `xorm:"INDEX UNIQUE(s)"`
+ IsPublic bool `xorm:"INDEX"`
+}
+
+func init() {
+ db.RegisterModel(new(OrgUser))
+}
+
+// GetOrganizationCount returns count of membership of organization of the user.
+func GetOrganizationCount(ctx context.Context, u *user_model.User) (int64, error) {
+ return db.GetEngine(ctx).
+ Where("uid=?", u.ID).
+ Count(new(OrgUser))
+}
+
+// IsOrganizationOwner returns true if given user is in the owner team.
+func IsOrganizationOwner(ctx context.Context, orgID, uid int64) (bool, error) {
+ ownerTeam, err := GetOwnerTeam(ctx, orgID)
+ if err != nil {
+ if IsErrTeamNotExist(err) {
+ log.Error("Organization does not have owner team: %d", orgID)
+ return false, nil
+ }
+ return false, err
+ }
+ return IsTeamMember(ctx, orgID, ownerTeam.ID, uid)
+}
+
+// IsOrganizationAdmin returns true if given user is in the owner team or an admin team.
+func IsOrganizationAdmin(ctx context.Context, orgID, uid int64) (bool, error) {
+ teams, err := GetUserOrgTeams(ctx, orgID, uid)
+ if err != nil {
+ return false, err
+ }
+ for _, t := range teams {
+ if t.AccessMode >= perm.AccessModeAdmin {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// IsOrganizationMember returns true if given user is member of organization.
+func IsOrganizationMember(ctx context.Context, orgID, uid int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("uid=?", uid).
+ And("org_id=?", orgID).
+ Table("org_user").
+ Exist()
+}
+
+// IsPublicMembership returns true if the given user's membership of given org is public.
+func IsPublicMembership(ctx context.Context, orgID, uid int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("uid=?", uid).
+ And("org_id=?", orgID).
+ And("is_public=?", true).
+ Table("org_user").
+ Exist()
+}
+
+// CanCreateOrgRepo returns true if user can create repo in organization
+func CanCreateOrgRepo(ctx context.Context, orgID, uid int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Where(builder.Eq{"team.can_create_org_repo": true}).
+ Join("INNER", "team_user", "team_user.team_id = team.id").
+ And("team_user.uid = ?", uid).
+ And("team_user.org_id = ?", orgID).
+ Exist(new(Team))
+}
+
+// IsUserOrgOwner returns true if user is in the owner team of given organization.
+func IsUserOrgOwner(ctx context.Context, users user_model.UserList, orgID int64) map[int64]bool {
+ results := make(map[int64]bool, len(users))
+ for _, user := range users {
+ results[user.ID] = false // Set default to false
+ }
+ ownerMaps, err := loadOrganizationOwners(ctx, users, orgID)
+ if err == nil {
+ for _, owner := range ownerMaps {
+ results[owner.UID] = true
+ }
+ }
+ return results
+}
+
+func loadOrganizationOwners(ctx context.Context, users user_model.UserList, orgID int64) (map[int64]*TeamUser, error) {
+ if len(users) == 0 {
+ return nil, nil
+ }
+ ownerTeam, err := GetOwnerTeam(ctx, orgID)
+ if err != nil {
+ if IsErrTeamNotExist(err) {
+ log.Error("Organization does not have owner team: %d", orgID)
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ userIDs := users.GetUserIDs()
+ ownerMaps := make(map[int64]*TeamUser)
+ err = db.GetEngine(ctx).In("uid", userIDs).
+ And("org_id=?", orgID).
+ And("team_id=?", ownerTeam.ID).
+ Find(&ownerMaps)
+ if err != nil {
+ return nil, fmt.Errorf("find team users: %w", err)
+ }
+ return ownerMaps, nil
+}
diff --git a/models/organization/org_user_test.go b/models/organization/org_user_test.go
new file mode 100644
index 0000000..07d07ce
--- /dev/null
+++ b/models/organization/org_user_test.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization_test
+
+import (
+ "fmt"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUserIsPublicMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ tt := []struct {
+ uid int64
+ orgid int64
+ expected bool
+ }{
+ {2, 3, true},
+ {4, 3, false},
+ {5, 6, true},
+ {5, 7, false},
+ }
+ for _, v := range tt {
+ t.Run(fmt.Sprintf("UserId%dIsPublicMemberOf%d", v.uid, v.orgid), func(t *testing.T) {
+ testUserIsPublicMember(t, v.uid, v.orgid, v.expected)
+ })
+ }
+}
+
+func testUserIsPublicMember(t *testing.T, uid, orgID int64, expected bool) {
+ user, err := user_model.GetUserByID(db.DefaultContext, uid)
+ require.NoError(t, err)
+ is, err := organization.IsPublicMembership(db.DefaultContext, orgID, user.ID)
+ require.NoError(t, err)
+ assert.Equal(t, expected, is)
+}
+
+func TestIsUserOrgOwner(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ tt := []struct {
+ uid int64
+ orgid int64
+ expected bool
+ }{
+ {2, 3, true},
+ {4, 3, false},
+ {5, 6, true},
+ {5, 7, true},
+ }
+ for _, v := range tt {
+ t.Run(fmt.Sprintf("UserId%dIsOrgOwnerOf%d", v.uid, v.orgid), func(t *testing.T) {
+ testIsUserOrgOwner(t, v.uid, v.orgid, v.expected)
+ })
+ }
+}
+
+func testIsUserOrgOwner(t *testing.T, uid, orgID int64, expected bool) {
+ user, err := user_model.GetUserByID(db.DefaultContext, uid)
+ require.NoError(t, err)
+ is, err := organization.IsOrganizationOwner(db.DefaultContext, orgID, user.ID)
+ require.NoError(t, err)
+ assert.Equal(t, expected, is)
+}
+
+func TestUserListIsPublicMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ tt := []struct {
+ orgid int64
+ expected map[int64]bool
+ }{
+ {3, map[int64]bool{2: true, 4: false, 28: true}},
+ {6, map[int64]bool{5: true, 28: true}},
+ {7, map[int64]bool{5: false}},
+ {25, map[int64]bool{24: true}},
+ {22, map[int64]bool{}},
+ }
+ for _, v := range tt {
+ t.Run(fmt.Sprintf("IsPublicMemberOfOrgId%d", v.orgid), func(t *testing.T) {
+ testUserListIsPublicMember(t, v.orgid, v.expected)
+ })
+ }
+}
+
+func testUserListIsPublicMember(t *testing.T, orgID int64, expected map[int64]bool) {
+ org, err := organization.GetOrgByID(db.DefaultContext, orgID)
+ require.NoError(t, err)
+ _, membersIsPublic, err := org.GetMembers(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, expected, membersIsPublic)
+}
+
+func TestUserListIsUserOrgOwner(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ tt := []struct {
+ orgid int64
+ expected map[int64]bool
+ }{
+ {3, map[int64]bool{2: true, 4: false, 28: false}},
+ {6, map[int64]bool{5: true, 28: false}},
+ {7, map[int64]bool{5: true}},
+ {25, map[int64]bool{24: false}}, // ErrTeamNotExist
+ {22, map[int64]bool{}}, // No member
+ }
+ for _, v := range tt {
+ t.Run(fmt.Sprintf("IsUserOrgOwnerOfOrgId%d", v.orgid), func(t *testing.T) {
+ testUserListIsUserOrgOwner(t, v.orgid, v.expected)
+ })
+ }
+}
+
+func testUserListIsUserOrgOwner(t *testing.T, orgID int64, expected map[int64]bool) {
+ org, err := organization.GetOrgByID(db.DefaultContext, orgID)
+ require.NoError(t, err)
+ members, _, err := org.GetMembers(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, expected, organization.IsUserOrgOwner(db.DefaultContext, members, orgID))
+}
+
+func TestAddOrgUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(orgID, userID int64, isPublic bool) {
+ org := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: orgID})
+ expectedNumMembers := org.NumMembers
+ if !unittest.BeanExists(t, &organization.OrgUser{OrgID: orgID, UID: userID}) {
+ expectedNumMembers++
+ }
+ require.NoError(t, organization.AddOrgUser(db.DefaultContext, orgID, userID))
+ ou := &organization.OrgUser{OrgID: orgID, UID: userID}
+ unittest.AssertExistsAndLoadBean(t, ou)
+ assert.Equal(t, isPublic, ou.IsPublic)
+ org = unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: orgID})
+ assert.EqualValues(t, expectedNumMembers, org.NumMembers)
+ }
+
+ setting.Service.DefaultOrgMemberVisible = false
+ testSuccess(3, 5, false)
+ testSuccess(3, 5, false)
+ testSuccess(6, 2, false)
+
+ setting.Service.DefaultOrgMemberVisible = true
+ testSuccess(6, 3, true)
+
+ unittest.CheckConsistencyFor(t, &user_model.User{}, &organization.Team{})
+}
diff --git a/models/organization/team.go b/models/organization/team.go
new file mode 100644
index 0000000..ddff32c
--- /dev/null
+++ b/models/organization/team.go
@@ -0,0 +1,310 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ___________
+// \__ ___/___ _____ _____
+// | |_/ __ \\__ \ / \
+// | |\ ___/ / __ \| Y Y \
+// |____| \___ >____ /__|_| /
+// \/ \/ \/
+
+// ErrTeamAlreadyExist represents a "TeamAlreadyExist" kind of error.
+type ErrTeamAlreadyExist struct {
+ OrgID int64
+ Name string
+}
+
+// IsErrTeamAlreadyExist checks if an error is a ErrTeamAlreadyExist.
+func IsErrTeamAlreadyExist(err error) bool {
+ _, ok := err.(ErrTeamAlreadyExist)
+ return ok
+}
+
+func (err ErrTeamAlreadyExist) Error() string {
+ return fmt.Sprintf("team already exists [org_id: %d, name: %s]", err.OrgID, err.Name)
+}
+
+func (err ErrTeamAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrTeamNotExist represents a "TeamNotExist" error
+type ErrTeamNotExist struct {
+ OrgID int64
+ TeamID int64
+ Name string
+}
+
+// IsErrTeamNotExist checks if an error is a ErrTeamNotExist.
+func IsErrTeamNotExist(err error) bool {
+ _, ok := err.(ErrTeamNotExist)
+ return ok
+}
+
+func (err ErrTeamNotExist) Error() string {
+ return fmt.Sprintf("team does not exist [org_id %d, team_id %d, name: %s]", err.OrgID, err.TeamID, err.Name)
+}
+
+func (err ErrTeamNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// OwnerTeamName return the owner team name
+const OwnerTeamName = "Owners"
+
+// Team represents a organization team.
+type Team struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ LowerName string
+ Name string
+ Description string
+ AccessMode perm.AccessMode `xorm:"'authorize'"`
+ Repos []*repo_model.Repository `xorm:"-"`
+ Members []*user_model.User `xorm:"-"`
+ NumRepos int
+ NumMembers int
+ Units []*TeamUnit `xorm:"-"`
+ IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"`
+ CanCreateOrgRepo bool `xorm:"NOT NULL DEFAULT false"`
+}
+
+func init() {
+ db.RegisterModel(new(Team))
+ db.RegisterModel(new(TeamUser))
+ db.RegisterModel(new(TeamRepo))
+ db.RegisterModel(new(TeamUnit))
+ db.RegisterModel(new(TeamInvite))
+}
+
+func (t *Team) LogString() string {
+ if t == nil {
+ return "<Team nil>"
+ }
+ return fmt.Sprintf("<Team %d:%s OrgID=%d AccessMode=%s>", t.ID, t.Name, t.OrgID, t.AccessMode.LogString())
+}
+
+// LoadUnits load a list of available units for a team
+func (t *Team) LoadUnits(ctx context.Context) (err error) {
+ if t.Units != nil {
+ return nil
+ }
+
+ t.Units, err = getUnitsByTeamID(ctx, t.ID)
+ return err
+}
+
+// GetUnitNames returns the team units names
+func (t *Team) GetUnitNames() (res []string) {
+ if t.AccessMode >= perm.AccessModeAdmin {
+ return unit.AllUnitKeyNames()
+ }
+
+ for _, u := range t.Units {
+ res = append(res, unit.Units[u.Type].NameKey)
+ }
+ return res
+}
+
+// GetUnitsMap returns the team units permissions
+func (t *Team) GetUnitsMap() map[string]string {
+ m := make(map[string]string)
+ if t.AccessMode >= perm.AccessModeAdmin {
+ for _, u := range unit.Units {
+ m[u.NameKey] = t.AccessMode.String()
+ }
+ } else {
+ for _, u := range t.Units {
+ m[u.Unit().NameKey] = u.AccessMode.String()
+ }
+ }
+ return m
+}
+
+// IsOwnerTeam returns true if team is owner team.
+func (t *Team) IsOwnerTeam() bool {
+ return t.Name == OwnerTeamName
+}
+
+// IsMember returns true if given user is a member of team.
+func (t *Team) IsMember(ctx context.Context, userID int64) bool {
+ isMember, err := IsTeamMember(ctx, t.OrgID, t.ID, userID)
+ if err != nil {
+ log.Error("IsMember: %v", err)
+ return false
+ }
+ return isMember
+}
+
+// LoadRepositories returns paginated repositories in team of organization.
+func (t *Team) LoadRepositories(ctx context.Context) (err error) {
+ if t.Repos != nil {
+ return nil
+ }
+ t.Repos, err = GetTeamRepositories(ctx, &SearchTeamRepoOptions{
+ TeamID: t.ID,
+ })
+ return err
+}
+
+// LoadMembers returns paginated members in team of organization.
+func (t *Team) LoadMembers(ctx context.Context) (err error) {
+ t.Members, err = GetTeamMembers(ctx, &SearchMembersOptions{
+ TeamID: t.ID,
+ })
+ return err
+}
+
+// UnitEnabled returns if the team has the given unit type enabled
+func (t *Team) UnitEnabled(ctx context.Context, tp unit.Type) bool {
+ return t.UnitAccessMode(ctx, tp) > perm.AccessModeNone
+}
+
+// UnitAccessMode returns if the team has the given unit type enabled
+func (t *Team) UnitAccessMode(ctx context.Context, tp unit.Type) perm.AccessMode {
+ if err := t.LoadUnits(ctx); err != nil {
+ log.Warn("Error loading team (ID: %d) units: %s", t.ID, err.Error())
+ }
+
+ for _, unit := range t.Units {
+ if unit.Type == tp {
+ return unit.AccessMode
+ }
+ }
+ return perm.AccessModeNone
+}
+
+// IsUsableTeamName tests if a name could be as team name
+func IsUsableTeamName(name string) error {
+ switch name {
+ case "new":
+ return db.ErrNameReserved{Name: name}
+ default:
+ return nil
+ }
+}
+
+// GetTeam returns team by given team name and organization.
+func GetTeam(ctx context.Context, orgID int64, name string) (*Team, error) {
+ t, exist, err := db.Get[Team](ctx, builder.Eq{"org_id": orgID, "lower_name": strings.ToLower(name)})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrTeamNotExist{orgID, 0, name}
+ }
+ return t, nil
+}
+
+// GetTeamIDsByNames returns a slice of team ids corresponds to names.
+func GetTeamIDsByNames(ctx context.Context, orgID int64, names []string, ignoreNonExistent bool) ([]int64, error) {
+ ids := make([]int64, 0, len(names))
+ for _, name := range names {
+ u, err := GetTeam(ctx, orgID, name)
+ if err != nil {
+ if ignoreNonExistent {
+ continue
+ }
+ return nil, err
+ }
+ ids = append(ids, u.ID)
+ }
+ return ids, nil
+}
+
+// GetOwnerTeam returns team by given team name and organization.
+func GetOwnerTeam(ctx context.Context, orgID int64) (*Team, error) {
+ return GetTeam(ctx, orgID, OwnerTeamName)
+}
+
+// GetTeamByID returns team by given ID.
+func GetTeamByID(ctx context.Context, teamID int64) (*Team, error) {
+ t := new(Team)
+ has, err := db.GetEngine(ctx).ID(teamID).Get(t)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrTeamNotExist{0, teamID, ""}
+ }
+ return t, nil
+}
+
+// GetTeamNamesByID returns team's lower name from a list of team ids.
+func GetTeamNamesByID(ctx context.Context, teamIDs []int64) ([]string, error) {
+ if len(teamIDs) == 0 {
+ return []string{}, nil
+ }
+
+ var teamNames []string
+ err := db.GetEngine(ctx).Table("team").
+ Select("lower_name").
+ In("id", teamIDs).
+ Asc("name").
+ Find(&teamNames)
+
+ return teamNames, err
+}
+
+// IncrTeamRepoNum increases the number of repos for the given team by 1
+func IncrTeamRepoNum(ctx context.Context, teamID int64) error {
+ _, err := db.GetEngine(ctx).Incr("num_repos").ID(teamID).Update(new(Team))
+ return err
+}
+
+// CountInconsistentOwnerTeams returns the amount of owner teams that have all of
+// their access modes set to "None".
+func CountInconsistentOwnerTeams(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Table("team").
+ Join("INNER", "team_unit", "`team`.id = `team_unit`.team_id").
+ Where("`team`.lower_name = ?", strings.ToLower(OwnerTeamName)).
+ GroupBy("`team_unit`.team_id").
+ Having("SUM(`team_unit`.access_mode) = 0").
+ Count()
+}
+
+// FixInconsistentOwnerTeams fixes inconsistent owner teams that have all of
+// their access modes set to "None", it sets it back to "Owner".
+func FixInconsistentOwnerTeams(ctx context.Context) (int64, error) {
+ teamIDs := []int64{}
+ if err := db.GetEngine(ctx).Table("team").
+ Select("`team`.id").
+ Join("INNER", "team_unit", "`team`.id = `team_unit`.team_id").
+ Where("`team`.lower_name = ?", strings.ToLower(OwnerTeamName)).
+ GroupBy("`team_unit`.team_id").
+ Having("SUM(`team_unit`.access_mode) = 0").
+ Find(&teamIDs); err != nil {
+ return 0, err
+ }
+
+ if err := db.Iterate(ctx, builder.In("team_id", teamIDs), func(ctx context.Context, bean *TeamUnit) error {
+ if bean.Type == unit.TypeExternalTracker || bean.Type == unit.TypeExternalWiki {
+ bean.AccessMode = perm.AccessModeRead
+ } else {
+ bean.AccessMode = perm.AccessModeOwner
+ }
+ _, err := db.GetEngine(ctx).ID(bean.ID).Table("team_unit").Cols("access_mode").Update(bean)
+ return err
+ }); err != nil {
+ return 0, err
+ }
+
+ return int64(len(teamIDs)), nil
+}
diff --git a/models/organization/team_invite.go b/models/organization/team_invite.go
new file mode 100644
index 0000000..17f6c59
--- /dev/null
+++ b/models/organization/team_invite.go
@@ -0,0 +1,161 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+type ErrTeamInviteAlreadyExist struct {
+ TeamID int64
+ Email string
+}
+
+func IsErrTeamInviteAlreadyExist(err error) bool {
+ _, ok := err.(ErrTeamInviteAlreadyExist)
+ return ok
+}
+
+func (err ErrTeamInviteAlreadyExist) Error() string {
+ return fmt.Sprintf("team invite already exists [team_id: %d, email: %s]", err.TeamID, err.Email)
+}
+
+func (err ErrTeamInviteAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+type ErrTeamInviteNotFound struct {
+ Token string
+}
+
+func IsErrTeamInviteNotFound(err error) bool {
+ _, ok := err.(ErrTeamInviteNotFound)
+ return ok
+}
+
+func (err ErrTeamInviteNotFound) Error() string {
+ return fmt.Sprintf("team invite was not found [token: %s]", err.Token)
+}
+
+func (err ErrTeamInviteNotFound) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrUserEmailAlreadyAdded represents a "user by email already added to team" error.
+type ErrUserEmailAlreadyAdded struct {
+ Email string
+}
+
+// IsErrUserEmailAlreadyAdded checks if an error is a ErrUserEmailAlreadyAdded.
+func IsErrUserEmailAlreadyAdded(err error) bool {
+ _, ok := err.(ErrUserEmailAlreadyAdded)
+ return ok
+}
+
+func (err ErrUserEmailAlreadyAdded) Error() string {
+ return fmt.Sprintf("user with email already added [email: %s]", err.Email)
+}
+
+func (err ErrUserEmailAlreadyAdded) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// TeamInvite represents an invite to a team
+type TeamInvite struct {
+ ID int64 `xorm:"pk autoincr"`
+ Token string `xorm:"UNIQUE(token) INDEX NOT NULL DEFAULT ''"`
+ InviterID int64 `xorm:"NOT NULL DEFAULT 0"`
+ OrgID int64 `xorm:"INDEX NOT NULL DEFAULT 0"`
+ TeamID int64 `xorm:"UNIQUE(team_mail) INDEX NOT NULL DEFAULT 0"`
+ Email string `xorm:"UNIQUE(team_mail) NOT NULL DEFAULT ''"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func CreateTeamInvite(ctx context.Context, doer *user_model.User, team *Team, email string) (*TeamInvite, error) {
+ has, err := db.GetEngine(ctx).Exist(&TeamInvite{
+ TeamID: team.ID,
+ Email: email,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return nil, ErrTeamInviteAlreadyExist{
+ TeamID: team.ID,
+ Email: email,
+ }
+ }
+
+ // check if the user is already a team member by email
+ exist, err := db.GetEngine(ctx).
+ Where(builder.Eq{
+ "team_user.org_id": team.OrgID,
+ "team_user.team_id": team.ID,
+ "`user`.email": email,
+ }).
+ Join("INNER", "`user`", "`user`.id = team_user.uid").
+ Table("team_user").
+ Exist()
+ if err != nil {
+ return nil, err
+ }
+
+ if exist {
+ return nil, ErrUserEmailAlreadyAdded{
+ Email: email,
+ }
+ }
+
+ token, err := util.CryptoRandomString(25)
+ if err != nil {
+ return nil, err
+ }
+
+ invite := &TeamInvite{
+ Token: token,
+ InviterID: doer.ID,
+ OrgID: team.OrgID,
+ TeamID: team.ID,
+ Email: email,
+ }
+
+ return invite, db.Insert(ctx, invite)
+}
+
+func RemoveInviteByID(ctx context.Context, inviteID, teamID int64) error {
+ _, err := db.DeleteByBean(ctx, &TeamInvite{
+ ID: inviteID,
+ TeamID: teamID,
+ })
+ return err
+}
+
+func GetInvitesByTeamID(ctx context.Context, teamID int64) ([]*TeamInvite, error) {
+ invites := make([]*TeamInvite, 0, 10)
+ return invites, db.GetEngine(ctx).
+ Where("team_id=?", teamID).
+ Find(&invites)
+}
+
+func GetInviteByToken(ctx context.Context, token string) (*TeamInvite, error) {
+ invite := &TeamInvite{}
+
+ has, err := db.GetEngine(ctx).Where("token=?", token).Get(invite)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrTeamInviteNotFound{Token: token}
+ }
+ return invite, nil
+}
diff --git a/models/organization/team_invite_test.go b/models/organization/team_invite_test.go
new file mode 100644
index 0000000..cbabf79
--- /dev/null
+++ b/models/organization/team_invite_test.go
@@ -0,0 +1,49 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTeamInvite(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 2})
+
+ t.Run("MailExistsInTeam", func(t *testing.T) {
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ // user 2 already added to team 2, should result in error
+ _, err := organization.CreateTeamInvite(db.DefaultContext, user2, team, user2.Email)
+ require.Error(t, err)
+ })
+
+ t.Run("CreateAndRemove", func(t *testing.T) {
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ invite, err := organization.CreateTeamInvite(db.DefaultContext, user1, team, "org3@example.com")
+ assert.NotNil(t, invite)
+ require.NoError(t, err)
+
+ // Shouldn't allow duplicate invite
+ _, err = organization.CreateTeamInvite(db.DefaultContext, user1, team, "org3@example.com")
+ require.Error(t, err)
+
+ // should remove invite
+ require.NoError(t, organization.RemoveInviteByID(db.DefaultContext, invite.ID, invite.TeamID))
+
+ // invite should not exist
+ _, err = organization.GetInviteByToken(db.DefaultContext, invite.Token)
+ require.Error(t, err)
+ })
+}
diff --git a/models/organization/team_list.go b/models/organization/team_list.go
new file mode 100644
index 0000000..5b45429
--- /dev/null
+++ b/models/organization/team_list.go
@@ -0,0 +1,128 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+
+ "xorm.io/builder"
+)
+
+type TeamList []*Team
+
+func (t TeamList) LoadUnits(ctx context.Context) error {
+ for _, team := range t {
+ if err := team.LoadUnits(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t TeamList) UnitMaxAccess(tp unit.Type) perm.AccessMode {
+ maxAccess := perm.AccessModeNone
+ for _, team := range t {
+ if team.IsOwnerTeam() {
+ return perm.AccessModeOwner
+ }
+ for _, teamUnit := range team.Units {
+ if teamUnit.Type != tp {
+ continue
+ }
+ if teamUnit.AccessMode > maxAccess {
+ maxAccess = teamUnit.AccessMode
+ }
+ }
+ }
+ return maxAccess
+}
+
+// SearchTeamOptions holds the search options
+type SearchTeamOptions struct {
+ db.ListOptions
+ UserID int64
+ Keyword string
+ OrgID int64
+ IncludeDesc bool
+}
+
+func (opts *SearchTeamOptions) toCond() builder.Cond {
+ cond := builder.NewCond()
+
+ if len(opts.Keyword) > 0 {
+ lowerKeyword := strings.ToLower(opts.Keyword)
+ var keywordCond builder.Cond = builder.Like{"lower_name", lowerKeyword}
+ if opts.IncludeDesc {
+ keywordCond = keywordCond.Or(builder.Like{"LOWER(description)", lowerKeyword})
+ }
+ cond = cond.And(keywordCond)
+ }
+
+ if opts.OrgID > 0 {
+ cond = cond.And(builder.Eq{"`team`.org_id": opts.OrgID})
+ }
+
+ if opts.UserID > 0 {
+ cond = cond.And(builder.Eq{"team_user.uid": opts.UserID})
+ }
+
+ return cond
+}
+
+// SearchTeam search for teams. Caller is responsible to check permissions.
+func SearchTeam(ctx context.Context, opts *SearchTeamOptions) (TeamList, int64, error) {
+ sess := db.GetEngine(ctx)
+
+ opts.SetDefaultValues()
+ cond := opts.toCond()
+
+ if opts.UserID > 0 {
+ sess = sess.Join("INNER", "team_user", "team_user.team_id = team.id")
+ }
+ sess = db.SetSessionPagination(sess, opts)
+
+ teams := make([]*Team, 0, opts.PageSize)
+ count, err := sess.Where(cond).OrderBy("lower_name").FindAndCount(&teams)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return teams, count, nil
+}
+
+// GetRepoTeams gets the list of teams that has access to the repository
+func GetRepoTeams(ctx context.Context, repo *repo_model.Repository) (teams TeamList, err error) {
+ return teams, db.GetEngine(ctx).
+ Join("INNER", "team_repo", "team_repo.team_id = team.id").
+ Where("team.org_id = ?", repo.OwnerID).
+ And("team_repo.repo_id=?", repo.ID).
+ OrderBy("CASE WHEN name LIKE '" + OwnerTeamName + "' THEN '' ELSE name END").
+ Find(&teams)
+}
+
+// GetUserOrgTeams returns all teams that user belongs to in given organization.
+func GetUserOrgTeams(ctx context.Context, orgID, userID int64) (teams TeamList, err error) {
+ return teams, db.GetEngine(ctx).
+ Join("INNER", "team_user", "team_user.team_id = team.id").
+ Where("team.org_id = ?", orgID).
+ And("team_user.uid=?", userID).
+ Find(&teams)
+}
+
+// GetUserRepoTeams returns user repo's teams
+func GetUserRepoTeams(ctx context.Context, orgID, userID, repoID int64) (teams TeamList, err error) {
+ return teams, db.GetEngine(ctx).
+ Join("INNER", "team_user", "team_user.team_id = team.id").
+ Join("INNER", "team_repo", "team_repo.team_id = team.id").
+ Where("team.org_id = ?", orgID).
+ And("team_user.uid=?", userID).
+ And("team_repo.repo_id=?", repoID).
+ Find(&teams)
+}
diff --git a/models/organization/team_repo.go b/models/organization/team_repo.go
new file mode 100644
index 0000000..1184e39
--- /dev/null
+++ b/models/organization/team_repo.go
@@ -0,0 +1,85 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+
+ "xorm.io/builder"
+)
+
+// TeamRepo represents an team-repository relation.
+type TeamRepo struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+}
+
+// HasTeamRepo returns true if given repository belongs to team.
+func HasTeamRepo(ctx context.Context, orgID, teamID, repoID int64) bool {
+ has, _ := db.GetEngine(ctx).
+ Where("org_id=?", orgID).
+ And("team_id=?", teamID).
+ And("repo_id=?", repoID).
+ Get(new(TeamRepo))
+ return has
+}
+
+type SearchTeamRepoOptions struct {
+ db.ListOptions
+ TeamID int64
+}
+
+// GetRepositories returns paginated repositories in team of organization.
+func GetTeamRepositories(ctx context.Context, opts *SearchTeamRepoOptions) (repo_model.RepositoryList, error) {
+ sess := db.GetEngine(ctx)
+ if opts.TeamID > 0 {
+ sess = sess.In("id",
+ builder.Select("repo_id").
+ From("team_repo").
+ Where(builder.Eq{"team_id": opts.TeamID}),
+ )
+ }
+ if opts.PageSize > 0 {
+ sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ var repos []*repo_model.Repository
+ return repos, sess.OrderBy("repository.name").
+ Find(&repos)
+}
+
+// AddTeamRepo adds a repo for an organization's team
+func AddTeamRepo(ctx context.Context, orgID, teamID, repoID int64) error {
+ _, err := db.GetEngine(ctx).Insert(&TeamRepo{
+ OrgID: orgID,
+ TeamID: teamID,
+ RepoID: repoID,
+ })
+ return err
+}
+
+// RemoveTeamRepo remove repository from team
+func RemoveTeamRepo(ctx context.Context, teamID, repoID int64) error {
+ _, err := db.DeleteByBean(ctx, &TeamRepo{
+ TeamID: teamID,
+ RepoID: repoID,
+ })
+ return err
+}
+
+// GetTeamsWithAccessToRepo returns all teams in an organization that have given access level to the repository.
+func GetTeamsWithAccessToRepo(ctx context.Context, orgID, repoID int64, mode perm.AccessMode) ([]*Team, error) {
+ teams := make([]*Team, 0, 5)
+ return teams, db.GetEngine(ctx).Where("team.authorize >= ?", mode).
+ Join("INNER", "team_repo", "team_repo.team_id = team.id").
+ And("team_repo.org_id = ?", orgID).
+ And("team_repo.repo_id = ?", repoID).
+ OrderBy("name").
+ Find(&teams)
+}
diff --git a/models/organization/team_test.go b/models/organization/team_test.go
new file mode 100644
index 0000000..601d136
--- /dev/null
+++ b/models/organization/team_test.go
@@ -0,0 +1,250 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization_test
+
+import (
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTeam_IsOwnerTeam(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 1})
+ assert.True(t, team.IsOwnerTeam())
+
+ team = unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 2})
+ assert.False(t, team.IsOwnerTeam())
+}
+
+func TestTeam_IsMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 1})
+ assert.True(t, team.IsMember(db.DefaultContext, 2))
+ assert.False(t, team.IsMember(db.DefaultContext, 4))
+ assert.False(t, team.IsMember(db.DefaultContext, unittest.NonexistentID))
+
+ team = unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 2})
+ assert.True(t, team.IsMember(db.DefaultContext, 2))
+ assert.True(t, team.IsMember(db.DefaultContext, 4))
+ assert.False(t, team.IsMember(db.DefaultContext, unittest.NonexistentID))
+}
+
+func TestTeam_GetRepositories(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ require.NoError(t, team.LoadRepositories(db.DefaultContext))
+ assert.Len(t, team.Repos, team.NumRepos)
+ for _, repo := range team.Repos {
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamRepo{TeamID: teamID, RepoID: repo.ID})
+ }
+ }
+ test(1)
+ test(3)
+}
+
+func TestTeam_GetMembers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ require.NoError(t, team.LoadMembers(db.DefaultContext))
+ assert.Len(t, team.Members, team.NumMembers)
+ for _, member := range team.Members {
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{UID: member.ID, TeamID: teamID})
+ }
+ }
+ test(1)
+ test(3)
+}
+
+func TestGetTeam(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(orgID int64, name string) {
+ team, err := organization.GetTeam(db.DefaultContext, orgID, name)
+ require.NoError(t, err)
+ assert.EqualValues(t, orgID, team.OrgID)
+ assert.Equal(t, name, team.Name)
+ }
+ testSuccess(3, "Owners")
+ testSuccess(3, "team1")
+
+ _, err := organization.GetTeam(db.DefaultContext, 3, "nonexistent")
+ require.Error(t, err)
+ _, err = organization.GetTeam(db.DefaultContext, unittest.NonexistentID, "Owners")
+ require.Error(t, err)
+}
+
+func TestGetTeamByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(teamID int64) {
+ team, err := organization.GetTeamByID(db.DefaultContext, teamID)
+ require.NoError(t, err)
+ assert.EqualValues(t, teamID, team.ID)
+ }
+ testSuccess(1)
+ testSuccess(2)
+ testSuccess(3)
+ testSuccess(4)
+
+ _, err := organization.GetTeamByID(db.DefaultContext, unittest.NonexistentID)
+ require.Error(t, err)
+}
+
+func TestIsTeamMember(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(orgID, teamID, userID int64, expected bool) {
+ isMember, err := organization.IsTeamMember(db.DefaultContext, orgID, teamID, userID)
+ require.NoError(t, err)
+ assert.Equal(t, expected, isMember)
+ }
+
+ test(3, 1, 2, true)
+ test(3, 1, 4, false)
+ test(3, 1, unittest.NonexistentID, false)
+
+ test(3, 2, 2, true)
+ test(3, 2, 4, true)
+
+ test(3, unittest.NonexistentID, unittest.NonexistentID, false)
+ test(unittest.NonexistentID, unittest.NonexistentID, unittest.NonexistentID, false)
+}
+
+func TestGetTeamMembers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamID int64) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ members, err := organization.GetTeamMembers(db.DefaultContext, &organization.SearchMembersOptions{
+ TeamID: teamID,
+ })
+ require.NoError(t, err)
+ assert.Len(t, members, team.NumMembers)
+ for _, member := range members {
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{UID: member.ID, TeamID: teamID})
+ }
+ }
+ test(1)
+ test(3)
+}
+
+func TestGetUserTeams(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(userID int64) {
+ teams, _, err := organization.SearchTeam(db.DefaultContext, &organization.SearchTeamOptions{UserID: userID})
+ require.NoError(t, err)
+ for _, team := range teams {
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{TeamID: team.ID, UID: userID})
+ }
+ }
+ test(2)
+ test(5)
+ test(unittest.NonexistentID)
+}
+
+func TestGetUserOrgTeams(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(orgID, userID int64) {
+ teams, err := organization.GetUserOrgTeams(db.DefaultContext, orgID, userID)
+ require.NoError(t, err)
+ for _, team := range teams {
+ assert.EqualValues(t, orgID, team.OrgID)
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUser{TeamID: team.ID, UID: userID})
+ }
+ }
+ test(3, 2)
+ test(3, 4)
+ test(3, unittest.NonexistentID)
+}
+
+func TestHasTeamRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamID, repoID int64, expected bool) {
+ team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: teamID})
+ assert.Equal(t, expected, organization.HasTeamRepo(db.DefaultContext, team.OrgID, teamID, repoID))
+ }
+ test(1, 1, false)
+ test(1, 3, true)
+ test(1, 5, true)
+ test(1, unittest.NonexistentID, false)
+
+ test(2, 3, true)
+ test(2, 5, false)
+}
+
+func TestUsersInTeamsCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(teamIDs, userIDs []int64, expected int64) {
+ count, err := organization.UsersInTeamsCount(db.DefaultContext, teamIDs, userIDs)
+ require.NoError(t, err)
+ assert.Equal(t, expected, count)
+ }
+
+ test([]int64{2}, []int64{1, 2, 3, 4}, 1) // only userid 2
+ test([]int64{1, 2, 3, 4, 5}, []int64{2, 5}, 2) // userid 2,4
+ test([]int64{1, 2, 3, 4, 5}, []int64{2, 3, 5}, 3) // userid 2,4,5
+}
+
+func TestInconsistentOwnerTeam(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/organization/TestInconsistentOwnerTeam/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1000, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1001, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1002, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1003, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1004, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1005, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1006, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1007, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1008, TeamID: 1000, AccessMode: perm.AccessModeNone})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1009, TeamID: 1000, AccessMode: perm.AccessModeNone})
+
+ count, err := organization.CountInconsistentOwnerTeams(db.DefaultContext)
+ require.NoError(t, err)
+ require.EqualValues(t, 1, count)
+
+ count, err = organization.FixInconsistentOwnerTeams(db.DefaultContext)
+ require.NoError(t, err)
+ require.EqualValues(t, 1, count)
+
+ count, err = organization.CountInconsistentOwnerTeams(db.DefaultContext)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, count)
+
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1000, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1001, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1002, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1003, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1004, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1007, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1008, AccessMode: perm.AccessModeOwner})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1009, AccessMode: perm.AccessModeOwner})
+
+ // External wiki and issue
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1005, AccessMode: perm.AccessModeRead})
+ unittest.AssertExistsAndLoadBean(t, &organization.TeamUnit{ID: 1006, AccessMode: perm.AccessModeRead})
+}
diff --git a/models/organization/team_unit.go b/models/organization/team_unit.go
new file mode 100644
index 0000000..3087b70
--- /dev/null
+++ b/models/organization/team_unit.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+)
+
+// TeamUnit describes all units of a repository
+type TeamUnit struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ Type unit.Type `xorm:"UNIQUE(s)"`
+ AccessMode perm.AccessMode
+}
+
+// Unit returns Unit
+func (t *TeamUnit) Unit() unit.Unit {
+ return unit.Units[t.Type]
+}
+
+func getUnitsByTeamID(ctx context.Context, teamID int64) (units []*TeamUnit, err error) {
+ return units, db.GetEngine(ctx).Where("team_id = ?", teamID).Find(&units)
+}
+
+// UpdateTeamUnits updates a teams's units
+func UpdateTeamUnits(ctx context.Context, team *Team, units []TeamUnit) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err = db.GetEngine(ctx).Where("team_id = ?", team.ID).Delete(new(TeamUnit)); err != nil {
+ return err
+ }
+
+ if len(units) > 0 {
+ if err = db.Insert(ctx, units); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
diff --git a/models/organization/team_user.go b/models/organization/team_user.go
new file mode 100644
index 0000000..ab767db
--- /dev/null
+++ b/models/organization/team_user.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package organization
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "xorm.io/builder"
+)
+
+// TeamUser represents an team-user relation.
+type TeamUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ OrgID int64 `xorm:"INDEX"`
+ TeamID int64 `xorm:"UNIQUE(s)"`
+ UID int64 `xorm:"UNIQUE(s)"`
+}
+
+// IsTeamMember returns true if given user is a member of team.
+func IsTeamMember(ctx context.Context, orgID, teamID, userID int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("org_id=?", orgID).
+ And("team_id=?", teamID).
+ And("uid=?", userID).
+ Table("team_user").
+ Exist()
+}
+
+// GetTeamUsersByTeamID returns team users for a team
+func GetTeamUsersByTeamID(ctx context.Context, teamID int64) ([]*TeamUser, error) {
+ teamUsers := make([]*TeamUser, 0, 10)
+ return teamUsers, db.GetEngine(ctx).
+ Where("team_id=?", teamID).
+ Find(&teamUsers)
+}
+
+// SearchMembersOptions holds the search options
+type SearchMembersOptions struct {
+ db.ListOptions
+ TeamID int64
+}
+
+func (opts SearchMembersOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.TeamID > 0 {
+ cond = cond.And(builder.Eq{"": opts.TeamID})
+ }
+ return cond
+}
+
+// GetTeamMembers returns all members in given team of organization.
+func GetTeamMembers(ctx context.Context, opts *SearchMembersOptions) ([]*user_model.User, error) {
+ var members []*user_model.User
+ sess := db.GetEngine(ctx)
+ if opts.TeamID > 0 {
+ sess = sess.In("id",
+ builder.Select("uid").
+ From("team_user").
+ Where(builder.Eq{"team_id": opts.TeamID}),
+ )
+ }
+ if opts.PageSize > 0 && opts.Page > 0 {
+ sess = sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ if err := sess.OrderBy("full_name, name").Find(&members); err != nil {
+ return nil, err
+ }
+ return members, nil
+}
+
+// IsUserInTeams returns if a user in some teams
+func IsUserInTeams(ctx context.Context, userID int64, teamIDs []int64) (bool, error) {
+ return db.GetEngine(ctx).Where("uid=?", userID).In("team_id", teamIDs).Exist(new(TeamUser))
+}
+
+// UsersInTeamsCount counts the number of users which are in userIDs and teamIDs
+func UsersInTeamsCount(ctx context.Context, userIDs, teamIDs []int64) (int64, error) {
+ var ids []int64
+ if err := db.GetEngine(ctx).In("uid", userIDs).In("team_id", teamIDs).
+ Table("team_user").
+ Cols("uid").GroupBy("uid").Find(&ids); err != nil {
+ return 0, err
+ }
+ return int64(len(ids)), nil
+}
diff --git a/models/packages/alpine/search.go b/models/packages/alpine/search.go
new file mode 100644
index 0000000..77eccb9
--- /dev/null
+++ b/models/packages/alpine/search.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package alpine
+
+import (
+ "context"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ alpine_module "code.gitea.io/gitea/modules/packages/alpine"
+)
+
+// GetBranches gets all available branches
+func GetBranches(ctx context.Context, ownerID int64) ([]string, error) {
+ return packages_model.GetDistinctPropertyValues(
+ ctx,
+ packages_model.TypeAlpine,
+ ownerID,
+ packages_model.PropertyTypeFile,
+ alpine_module.PropertyBranch,
+ nil,
+ )
+}
+
+// GetRepositories gets all available repositories for the given branch
+func GetRepositories(ctx context.Context, ownerID int64, branch string) ([]string, error) {
+ return packages_model.GetDistinctPropertyValues(
+ ctx,
+ packages_model.TypeAlpine,
+ ownerID,
+ packages_model.PropertyTypeFile,
+ alpine_module.PropertyRepository,
+ &packages_model.DistinctPropertyDependency{
+ Name: alpine_module.PropertyBranch,
+ Value: branch,
+ },
+ )
+}
+
+// GetArchitectures gets all available architectures for the given repository
+func GetArchitectures(ctx context.Context, ownerID int64, repository string) ([]string, error) {
+ return packages_model.GetDistinctPropertyValues(
+ ctx,
+ packages_model.TypeAlpine,
+ ownerID,
+ packages_model.PropertyTypeFile,
+ alpine_module.PropertyArchitecture,
+ &packages_model.DistinctPropertyDependency{
+ Name: alpine_module.PropertyRepository,
+ Value: repository,
+ },
+ )
+}
diff --git a/models/packages/conan/references.go b/models/packages/conan/references.go
new file mode 100644
index 0000000..0d888a1
--- /dev/null
+++ b/models/packages/conan/references.go
@@ -0,0 +1,170 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conan
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ conan_module "code.gitea.io/gitea/modules/packages/conan"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+var (
+ ErrRecipeReferenceNotExist = util.NewNotExistErrorf("recipe reference does not exist")
+ ErrPackageReferenceNotExist = util.NewNotExistErrorf("package reference does not exist")
+)
+
+// RecipeExists checks if a recipe exists
+func RecipeExists(ctx context.Context, ownerID int64, ref *conan_module.RecipeReference) (bool, error) {
+ revisions, err := GetRecipeRevisions(ctx, ownerID, ref)
+ if err != nil {
+ return false, err
+ }
+
+ return len(revisions) != 0, nil
+}
+
+type PropertyValue struct {
+ Value string
+ CreatedUnix timeutil.TimeStamp
+}
+
+func findPropertyValues(ctx context.Context, propertyName string, ownerID int64, name, version string, propertyFilter map[string]string) ([]*PropertyValue, error) {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeFile,
+ }
+ propsCond = propsCond.And(builder.Expr("package_property.ref_id = package_file.id"))
+
+ propsCondBlock := builder.NewCond()
+ for name, value := range propertyFilter {
+ propsCondBlock = propsCondBlock.Or(builder.Eq{
+ "package_property.name": name,
+ "package_property.value": value,
+ })
+ }
+ propsCond = propsCond.And(propsCondBlock)
+
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeConan,
+ "package.owner_id": ownerID,
+ "package.lower_name": strings.ToLower(name),
+ "package_version.lower_version": strings.ToLower(version),
+ "package_version.is_internal": false,
+ strconv.Itoa(len(propertyFilter)): builder.Select("COUNT(*)").Where(propsCond).From("package_property"),
+ }
+
+ in2 := builder.
+ Select("package_file.id").
+ From("package_file").
+ InnerJoin("package_version", "package_version.id = package_file.version_id").
+ InnerJoin("package", "package.id = package_version.package_id").
+ Where(cond)
+
+ query := builder.
+ Select("package_property.value, MAX(package_file.created_unix) AS created_unix").
+ From("package_property").
+ InnerJoin("package_file", "package_file.id = package_property.ref_id").
+ Where(builder.Eq{"package_property.name": propertyName}.And(builder.In("package_property.ref_id", in2))).
+ GroupBy("package_property.value").
+ OrderBy("created_unix DESC")
+
+ var values []*PropertyValue
+ return values, db.GetEngine(ctx).SQL(query).Find(&values)
+}
+
+// GetRecipeRevisions gets all revisions of a recipe
+func GetRecipeRevisions(ctx context.Context, ownerID int64, ref *conan_module.RecipeReference) ([]*PropertyValue, error) {
+ values, err := findPropertyValues(
+ ctx,
+ conan_module.PropertyRecipeRevision,
+ ownerID,
+ ref.Name,
+ ref.Version,
+ map[string]string{
+ conan_module.PropertyRecipeUser: ref.User,
+ conan_module.PropertyRecipeChannel: ref.Channel,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return values, nil
+}
+
+// GetLastRecipeRevision gets the latest recipe revision
+func GetLastRecipeRevision(ctx context.Context, ownerID int64, ref *conan_module.RecipeReference) (*PropertyValue, error) {
+ revisions, err := GetRecipeRevisions(ctx, ownerID, ref)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(revisions) == 0 {
+ return nil, ErrRecipeReferenceNotExist
+ }
+ return revisions[0], nil
+}
+
+// GetPackageReferences gets all package references of a recipe
+func GetPackageReferences(ctx context.Context, ownerID int64, ref *conan_module.RecipeReference) ([]*PropertyValue, error) {
+ values, err := findPropertyValues(
+ ctx,
+ conan_module.PropertyPackageReference,
+ ownerID,
+ ref.Name,
+ ref.Version,
+ map[string]string{
+ conan_module.PropertyRecipeUser: ref.User,
+ conan_module.PropertyRecipeChannel: ref.Channel,
+ conan_module.PropertyRecipeRevision: ref.Revision,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return values, nil
+}
+
+// GetPackageRevisions gets all revision of a package
+func GetPackageRevisions(ctx context.Context, ownerID int64, ref *conan_module.PackageReference) ([]*PropertyValue, error) {
+ values, err := findPropertyValues(
+ ctx,
+ conan_module.PropertyPackageRevision,
+ ownerID,
+ ref.Recipe.Name,
+ ref.Recipe.Version,
+ map[string]string{
+ conan_module.PropertyRecipeUser: ref.Recipe.User,
+ conan_module.PropertyRecipeChannel: ref.Recipe.Channel,
+ conan_module.PropertyRecipeRevision: ref.Recipe.Revision,
+ conan_module.PropertyPackageReference: ref.Reference,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return values, nil
+}
+
+// GetLastPackageRevision gets the latest package revision
+func GetLastPackageRevision(ctx context.Context, ownerID int64, ref *conan_module.PackageReference) (*PropertyValue, error) {
+ revisions, err := GetPackageRevisions(ctx, ownerID, ref)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(revisions) == 0 {
+ return nil, ErrPackageReferenceNotExist
+ }
+ return revisions[0], nil
+}
diff --git a/models/packages/conan/search.go b/models/packages/conan/search.go
new file mode 100644
index 0000000..ab0bff5
--- /dev/null
+++ b/models/packages/conan/search.go
@@ -0,0 +1,149 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conan
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/container"
+ conan_module "code.gitea.io/gitea/modules/packages/conan"
+
+ "xorm.io/builder"
+)
+
+// buildCondition creates a Like condition if a wildcard is present. Otherwise Eq is used.
+func buildCondition(name, value string) builder.Cond {
+ if strings.Contains(value, "*") {
+ return builder.Like{name, strings.ReplaceAll(strings.ReplaceAll(value, "_", "\\_"), "*", "%")}
+ }
+ return builder.Eq{name: value}
+}
+
+type RecipeSearchOptions struct {
+ OwnerID int64
+ Name string
+ Version string
+ User string
+ Channel string
+}
+
+// SearchRecipes gets all recipes matching the search options
+func SearchRecipes(ctx context.Context, opts *RecipeSearchOptions) ([]string, error) {
+ var cond builder.Cond = builder.Eq{
+ "package_file.is_lead": true,
+ "package.type": packages.TypeConan,
+ "package.owner_id": opts.OwnerID,
+ "package_version.is_internal": false,
+ }
+
+ if opts.Name != "" {
+ cond = cond.And(buildCondition("package.lower_name", strings.ToLower(opts.Name)))
+ }
+ if opts.Version != "" {
+ cond = cond.And(buildCondition("package_version.lower_version", strings.ToLower(opts.Version)))
+ }
+ if opts.User != "" || opts.Channel != "" {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeFile,
+ }
+ propsCond = propsCond.And(builder.Expr("package_property.ref_id = package_file.id"))
+
+ count := 0
+ propsCondBlock := builder.NewCond()
+ if opts.User != "" {
+ count++
+ propsCondBlock = propsCondBlock.Or(builder.Eq{"package_property.name": conan_module.PropertyRecipeUser}.And(buildCondition("package_property.value", opts.User)))
+ }
+ if opts.Channel != "" {
+ count++
+ propsCondBlock = propsCondBlock.Or(builder.Eq{"package_property.name": conan_module.PropertyRecipeChannel}.And(buildCondition("package_property.value", opts.Channel)))
+ }
+ propsCond = propsCond.And(propsCondBlock)
+
+ cond = cond.And(builder.Eq{
+ strconv.Itoa(count): builder.Select("COUNT(*)").Where(propsCond).From("package_property"),
+ })
+ }
+
+ query := builder.
+ Select("package.name, package_version.version, package_file.id").
+ From("package_file").
+ InnerJoin("package_version", "package_version.id = package_file.version_id").
+ InnerJoin("package", "package.id = package_version.package_id").
+ Where(cond)
+
+ results := make([]struct {
+ Name string
+ Version string
+ ID int64
+ }, 0, 5)
+ err := db.GetEngine(ctx).SQL(query).Find(&results)
+ if err != nil {
+ return nil, err
+ }
+
+ unique := make(container.Set[string])
+ for _, info := range results {
+ recipe := fmt.Sprintf("%s/%s", info.Name, info.Version)
+
+ props, _ := packages.GetProperties(ctx, packages.PropertyTypeFile, info.ID)
+ if len(props) > 0 {
+ var (
+ user = ""
+ channel = ""
+ )
+ for _, prop := range props {
+ if prop.Name == conan_module.PropertyRecipeUser {
+ user = prop.Value
+ }
+ if prop.Name == conan_module.PropertyRecipeChannel {
+ channel = prop.Value
+ }
+ }
+ if user != "" && channel != "" {
+ recipe = fmt.Sprintf("%s@%s/%s", recipe, user, channel)
+ }
+ }
+
+ unique.Add(recipe)
+ }
+
+ recipes := make([]string, 0, len(unique))
+ for recipe := range unique {
+ recipes = append(recipes, recipe)
+ }
+ return recipes, nil
+}
+
+// GetPackageInfo gets the Conaninfo for a package
+func GetPackageInfo(ctx context.Context, ownerID int64, ref *conan_module.PackageReference) (string, error) {
+ values, err := findPropertyValues(
+ ctx,
+ conan_module.PropertyPackageInfo,
+ ownerID,
+ ref.Recipe.Name,
+ ref.Recipe.Version,
+ map[string]string{
+ conan_module.PropertyRecipeUser: ref.Recipe.User,
+ conan_module.PropertyRecipeChannel: ref.Recipe.Channel,
+ conan_module.PropertyRecipeRevision: ref.Recipe.Revision,
+ conan_module.PropertyPackageReference: ref.Reference,
+ conan_module.PropertyPackageRevision: ref.Revision,
+ },
+ )
+ if err != nil {
+ return "", err
+ }
+
+ if len(values) == 0 {
+ return "", ErrPackageReferenceNotExist
+ }
+
+ return values[0].Value, nil
+}
diff --git a/models/packages/conda/search.go b/models/packages/conda/search.go
new file mode 100644
index 0000000..887441e
--- /dev/null
+++ b/models/packages/conda/search.go
@@ -0,0 +1,63 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conda
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ conda_module "code.gitea.io/gitea/modules/packages/conda"
+
+ "xorm.io/builder"
+)
+
+type FileSearchOptions struct {
+ OwnerID int64
+ Channel string
+ Subdir string
+ Filename string
+}
+
+// SearchFiles gets all files matching the search options
+func SearchFiles(ctx context.Context, opts *FileSearchOptions) ([]*packages.PackageFile, error) {
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeConda,
+ "package.owner_id": opts.OwnerID,
+ "package_version.is_internal": false,
+ }
+
+ if opts.Filename != "" {
+ cond = cond.And(builder.Eq{
+ "package_file.lower_name": strings.ToLower(opts.Filename),
+ })
+ }
+
+ var versionPropsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypePackage,
+ "package_property.name": conda_module.PropertyChannel,
+ "package_property.value": opts.Channel,
+ }
+
+ cond = cond.And(builder.In("package.id", builder.Select("package_property.ref_id").Where(versionPropsCond).From("package_property")))
+
+ var filePropsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeFile,
+ "package_property.name": conda_module.PropertySubdir,
+ "package_property.value": opts.Subdir,
+ }
+
+ cond = cond.And(builder.In("package_file.id", builder.Select("package_property.ref_id").Where(filePropsCond).From("package_property")))
+
+ sess := db.GetEngine(ctx).
+ Select("package_file.*").
+ Table("package_file").
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(cond)
+
+ pfs := make([]*packages.PackageFile, 0, 10)
+ return pfs, sess.Find(&pfs)
+}
diff --git a/models/packages/container/const.go b/models/packages/container/const.go
new file mode 100644
index 0000000..0dfbda0
--- /dev/null
+++ b/models/packages/container/const.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+const (
+ ManifestFilename = "manifest.json"
+ UploadVersion = "_upload"
+)
diff --git a/models/packages/container/search.go b/models/packages/container/search.go
new file mode 100644
index 0000000..5df3511
--- /dev/null
+++ b/models/packages/container/search.go
@@ -0,0 +1,285 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ user_model "code.gitea.io/gitea/models/user"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+var ErrContainerBlobNotExist = util.NewNotExistErrorf("container blob does not exist")
+
+type BlobSearchOptions struct {
+ OwnerID int64
+ Image string
+ Digest string
+ Tag string
+ IsManifest bool
+ Repository string
+}
+
+func (opts *BlobSearchOptions) toConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeContainer,
+ }
+
+ if opts.OwnerID != 0 {
+ cond = cond.And(builder.Eq{"package.owner_id": opts.OwnerID})
+ }
+ if opts.Image != "" {
+ cond = cond.And(builder.Eq{"package.lower_name": strings.ToLower(opts.Image)})
+ }
+ if opts.Tag != "" {
+ cond = cond.And(builder.Eq{"package_version.lower_version": strings.ToLower(opts.Tag)})
+ }
+ if opts.IsManifest {
+ cond = cond.And(builder.Eq{"package_file.lower_name": ManifestFilename})
+ }
+ if opts.Digest != "" {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeFile,
+ "package_property.name": container_module.PropertyDigest,
+ "package_property.value": opts.Digest,
+ }
+
+ cond = cond.And(builder.In("package_file.id", builder.Select("package_property.ref_id").Where(propsCond).From("package_property")))
+ }
+ if opts.Repository != "" {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypePackage,
+ "package_property.name": container_module.PropertyRepository,
+ "package_property.value": opts.Repository,
+ }
+
+ cond = cond.And(builder.In("package.id", builder.Select("package_property.ref_id").Where(propsCond).From("package_property")))
+ }
+
+ return cond
+}
+
+// GetContainerBlob gets the container blob matching the blob search options
+// If multiple matching blobs are found (manifests with the same digest) the first (according to the database) is selected.
+func GetContainerBlob(ctx context.Context, opts *BlobSearchOptions) (*packages.PackageFileDescriptor, error) {
+ pfds, err := getContainerBlobsLimit(ctx, opts, 1)
+ if err != nil {
+ return nil, err
+ }
+ if len(pfds) != 1 {
+ return nil, ErrContainerBlobNotExist
+ }
+
+ return pfds[0], nil
+}
+
+// GetContainerBlobs gets the container blobs matching the blob search options
+func GetContainerBlobs(ctx context.Context, opts *BlobSearchOptions) ([]*packages.PackageFileDescriptor, error) {
+ return getContainerBlobsLimit(ctx, opts, 0)
+}
+
+func getContainerBlobsLimit(ctx context.Context, opts *BlobSearchOptions, limit int) ([]*packages.PackageFileDescriptor, error) {
+ pfs := make([]*packages.PackageFile, 0, limit)
+ sess := db.GetEngine(ctx).
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(opts.toConds())
+
+ if limit > 0 {
+ sess = sess.Limit(limit)
+ }
+
+ if err := sess.Find(&pfs); err != nil {
+ return nil, err
+ }
+
+ return packages.GetPackageFileDescriptors(ctx, pfs)
+}
+
+// GetManifestVersions gets all package versions representing the matching manifest
+func GetManifestVersions(ctx context.Context, opts *BlobSearchOptions) ([]*packages.PackageVersion, error) {
+ cond := opts.toConds().And(builder.Eq{"package_version.is_internal": false})
+
+ pvs := make([]*packages.PackageVersion, 0, 10)
+ return pvs, db.GetEngine(ctx).
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Join("INNER", "package_file", "package_file.version_id = package_version.id").
+ Where(cond).
+ Find(&pvs)
+}
+
+// GetImageTags gets a sorted list of the tags of an image
+// The result is suitable for the api call.
+func GetImageTags(ctx context.Context, ownerID int64, image string, n int, last string) ([]string, error) {
+ // Short circuit: n == 0 should return an empty list
+ if n == 0 {
+ return []string{}, nil
+ }
+
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeContainer,
+ "package.owner_id": ownerID,
+ "package.lower_name": strings.ToLower(image),
+ "package_version.is_internal": false,
+ }
+
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeVersion,
+ "package_property.name": container_module.PropertyManifestTagged,
+ }
+
+ cond = cond.And(builder.In("package_version.id", builder.Select("package_property.ref_id").Where(propsCond).From("package_property")))
+
+ if last != "" {
+ cond = cond.And(builder.Gt{"package_version.lower_version": strings.ToLower(last)})
+ }
+
+ sess := db.GetEngine(ctx).
+ Table("package_version").
+ Select("package_version.lower_version").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(cond).
+ Asc("package_version.lower_version")
+
+ var tags []string
+ if n > 0 {
+ sess = sess.Limit(n)
+
+ tags = make([]string, 0, n)
+ } else {
+ tags = make([]string, 0, 10)
+ }
+
+ return tags, sess.Find(&tags)
+}
+
+type ImageTagsSearchOptions struct {
+ PackageID int64
+ Query string
+ IsTagged bool
+ Sort packages.VersionSort
+ db.Paginator
+}
+
+func (opts *ImageTagsSearchOptions) toConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeContainer,
+ "package.id": opts.PackageID,
+ "package_version.is_internal": false,
+ }
+
+ if opts.Query != "" {
+ cond = cond.And(builder.Like{"package_version.lower_version", strings.ToLower(opts.Query)})
+ }
+
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeVersion,
+ "package_property.name": container_module.PropertyManifestTagged,
+ }
+
+ in := builder.In("package_version.id", builder.Select("package_property.ref_id").Where(propsCond).From("package_property"))
+
+ if opts.IsTagged {
+ cond = cond.And(in)
+ } else {
+ cond = cond.And(builder.Not{in})
+ }
+
+ return cond
+}
+
+func (opts *ImageTagsSearchOptions) configureOrderBy(e db.Engine) {
+ switch opts.Sort {
+ case packages.SortVersionDesc:
+ e.Desc("package_version.version")
+ case packages.SortVersionAsc:
+ e.Asc("package_version.version")
+ case packages.SortCreatedAsc:
+ e.Asc("package_version.created_unix")
+ default:
+ e.Desc("package_version.created_unix")
+ }
+
+ // Sort by id for stable order with duplicates in the other field
+ e.Asc("package_version.id")
+}
+
+// SearchImageTags gets a sorted list of the tags of an image
+func SearchImageTags(ctx context.Context, opts *ImageTagsSearchOptions) ([]*packages.PackageVersion, int64, error) {
+ sess := db.GetEngine(ctx).
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(opts.toConds())
+
+ opts.configureOrderBy(sess)
+
+ if opts.Paginator != nil {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+
+ pvs := make([]*packages.PackageVersion, 0, 10)
+ count, err := sess.FindAndCount(&pvs)
+ return pvs, count, err
+}
+
+// SearchExpiredUploadedBlobs gets all uploaded blobs which are older than specified
+func SearchExpiredUploadedBlobs(ctx context.Context, olderThan time.Duration) ([]*packages.PackageFile, error) {
+ var cond builder.Cond = builder.Eq{
+ "package_version.is_internal": true,
+ "package_version.lower_version": UploadVersion,
+ "package.type": packages.TypeContainer,
+ }
+ cond = cond.And(builder.Lt{"package_file.created_unix": time.Now().Add(-olderThan).Unix()})
+
+ var pfs []*packages.PackageFile
+ return pfs, db.GetEngine(ctx).
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(cond).
+ Find(&pfs)
+}
+
+// GetRepositories gets a sorted list of all repositories
+func GetRepositories(ctx context.Context, actor *user_model.User, n int, last string) ([]string, error) {
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeContainer,
+ "package_property.ref_type": packages.PropertyTypePackage,
+ "package_property.name": container_module.PropertyRepository,
+ }
+
+ cond = cond.And(builder.Exists(
+ builder.
+ Select("package_version.id").
+ Where(builder.Eq{"package_version.is_internal": false}.And(builder.Expr("package.id = package_version.package_id"))).
+ From("package_version"),
+ ))
+
+ if last != "" {
+ cond = cond.And(builder.Gt{"package_property.value": strings.ToLower(last)})
+ }
+
+ if actor.IsGhost() {
+ actor = nil
+ }
+
+ cond = cond.And(user_model.BuildCanSeeUserCondition(actor))
+
+ sess := db.GetEngine(ctx).
+ Table("package").
+ Select("package_property.value").
+ Join("INNER", "user", "`user`.id = package.owner_id").
+ Join("INNER", "package_property", "package_property.ref_id = package.id").
+ Where(cond).
+ Asc("package_property.value").
+ Limit(n)
+
+ repositories := make([]string, 0, n)
+ return repositories, sess.Find(&repositories)
+}
diff --git a/models/packages/cran/search.go b/models/packages/cran/search.go
new file mode 100644
index 0000000..8a8b52a
--- /dev/null
+++ b/models/packages/cran/search.go
@@ -0,0 +1,90 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package cran
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ cran_module "code.gitea.io/gitea/modules/packages/cran"
+
+ "xorm.io/builder"
+)
+
+type SearchOptions struct {
+ OwnerID int64
+ FileType string
+ Platform string
+ RVersion string
+ Filename string
+}
+
+func (opts *SearchOptions) toConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "package.type": packages.TypeCran,
+ "package.owner_id": opts.OwnerID,
+ "package_version.is_internal": false,
+ }
+
+ if opts.Filename != "" {
+ cond = cond.And(builder.Eq{"package_file.lower_name": strings.ToLower(opts.Filename)})
+ }
+
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeFile,
+ }
+ propsCond = propsCond.And(builder.Expr("package_property.ref_id = package_file.id"))
+
+ count := 1
+ propsCondBlock := builder.Eq{"package_property.name": cran_module.PropertyType}.And(builder.Eq{"package_property.value": opts.FileType})
+
+ if opts.Platform != "" {
+ count += 2
+ propsCondBlock = propsCondBlock.
+ Or(builder.Eq{"package_property.name": cran_module.PropertyPlatform}.And(builder.Eq{"package_property.value": opts.Platform})).
+ Or(builder.Eq{"package_property.name": cran_module.PropertyRVersion}.And(builder.Eq{"package_property.value": opts.RVersion}))
+ }
+
+ propsCond = propsCond.And(propsCondBlock)
+
+ cond = cond.And(builder.Eq{
+ strconv.Itoa(count): builder.Select("COUNT(*)").Where(propsCond).From("package_property"),
+ })
+
+ return cond
+}
+
+func SearchLatestVersions(ctx context.Context, opts *SearchOptions) ([]*packages.PackageVersion, error) {
+ sess := db.GetEngine(ctx).
+ Table("package_version").
+ Select("package_version.*").
+ Join("LEFT", "package_version pv2", builder.Expr("package_version.package_id = pv2.package_id AND pv2.is_internal = ? AND (package_version.created_unix < pv2.created_unix OR (package_version.created_unix = pv2.created_unix AND package_version.id < pv2.id))", false)).
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Join("INNER", "package_file", "package_file.version_id = package_version.id").
+ Where(opts.toConds().And(builder.Expr("pv2.id IS NULL"))).
+ Asc("package.name")
+
+ pvs := make([]*packages.PackageVersion, 0, 10)
+ return pvs, sess.Find(&pvs)
+}
+
+func SearchFile(ctx context.Context, opts *SearchOptions) (*packages.PackageFile, error) {
+ sess := db.GetEngine(ctx).
+ Table("package_version").
+ Select("package_file.*").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Join("INNER", "package_file", "package_file.version_id = package_version.id").
+ Where(opts.toConds())
+
+ pf := &packages.PackageFile{}
+ if has, err := sess.Get(pf); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, packages.ErrPackageFileNotExist
+ }
+ return pf, nil
+}
diff --git a/models/packages/debian/search.go b/models/packages/debian/search.go
new file mode 100644
index 0000000..abf23e4
--- /dev/null
+++ b/models/packages/debian/search.go
@@ -0,0 +1,157 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package debian
+
+import (
+ "context"
+ "strconv"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ debian_module "code.gitea.io/gitea/modules/packages/debian"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/builder"
+)
+
+type PackageSearchOptions struct {
+ OwnerID int64
+ Distribution string
+ Component string
+ Architecture string
+}
+
+func (opts *PackageSearchOptions) toCond() builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "package_file.is_lead": true,
+ "package.type": packages.TypeDebian,
+ "package.owner_id": opts.OwnerID,
+ "package.is_internal": false,
+ "package_version.is_internal": false,
+ }
+
+ props := make(map[string]string)
+ if opts.Distribution != "" {
+ props[debian_module.PropertyDistribution] = opts.Distribution
+ }
+ if opts.Component != "" {
+ props[debian_module.PropertyComponent] = opts.Component
+ }
+ if opts.Architecture != "" {
+ props[debian_module.PropertyArchitecture] = opts.Architecture
+ }
+
+ if len(props) > 0 {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": packages.PropertyTypeFile,
+ }
+ propsCond = propsCond.And(builder.Expr("package_property.ref_id = package_file.id"))
+
+ propsCondBlock := builder.NewCond()
+ for name, value := range props {
+ propsCondBlock = propsCondBlock.Or(builder.Eq{
+ "package_property.name": name,
+ "package_property.value": value,
+ })
+ }
+ propsCond = propsCond.And(propsCondBlock)
+
+ cond = cond.And(builder.Eq{
+ strconv.Itoa(len(props)): builder.Select("COUNT(*)").Where(propsCond).From("package_property"),
+ })
+ }
+
+ return cond
+}
+
+// ExistPackages tests if there are packages matching the search options
+func ExistPackages(ctx context.Context, opts *PackageSearchOptions) (bool, error) {
+ return db.GetEngine(ctx).
+ Table("package_file").
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(opts.toCond()).
+ Exist(new(packages.PackageFile))
+}
+
+// SearchPackages gets the packages matching the search options
+func SearchPackages(ctx context.Context, opts *PackageSearchOptions, iter func(*packages.PackageFileDescriptor)) error {
+ var start int
+ batchSize := setting.Database.IterateBufferSize
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ beans := make([]*packages.PackageFile, 0, batchSize)
+
+ if err := db.GetEngine(ctx).
+ Table("package_file").
+ Select("package_file.*").
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(opts.toCond()).
+ Asc("package.lower_name", "package_version.created_unix").
+ Limit(batchSize, start).
+ Find(&beans); err != nil {
+ return err
+ }
+ if len(beans) == 0 {
+ return nil
+ }
+ start += len(beans)
+
+ for _, bean := range beans {
+ pfd, err := packages.GetPackageFileDescriptor(ctx, bean)
+ if err != nil {
+ return err
+ }
+
+ iter(pfd)
+ }
+ }
+ }
+}
+
+// GetDistributions gets all available distributions
+func GetDistributions(ctx context.Context, ownerID int64) ([]string, error) {
+ return packages.GetDistinctPropertyValues(
+ ctx,
+ packages.TypeDebian,
+ ownerID,
+ packages.PropertyTypeFile,
+ debian_module.PropertyDistribution,
+ nil,
+ )
+}
+
+// GetComponents gets all available components for the given distribution
+func GetComponents(ctx context.Context, ownerID int64, distribution string) ([]string, error) {
+ return packages.GetDistinctPropertyValues(
+ ctx,
+ packages.TypeDebian,
+ ownerID,
+ packages.PropertyTypeFile,
+ debian_module.PropertyComponent,
+ &packages.DistinctPropertyDependency{
+ Name: debian_module.PropertyDistribution,
+ Value: distribution,
+ },
+ )
+}
+
+// GetArchitectures gets all available architectures for the given distribution
+func GetArchitectures(ctx context.Context, ownerID int64, distribution string) ([]string, error) {
+ return packages.GetDistinctPropertyValues(
+ ctx,
+ packages.TypeDebian,
+ ownerID,
+ packages.PropertyTypeFile,
+ debian_module.PropertyArchitecture,
+ &packages.DistinctPropertyDependency{
+ Name: debian_module.PropertyDistribution,
+ Value: distribution,
+ },
+ )
+}
diff --git a/models/packages/debian/search_test.go b/models/packages/debian/search_test.go
new file mode 100644
index 0000000..104a014
--- /dev/null
+++ b/models/packages/debian/search_test.go
@@ -0,0 +1,93 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package debian
+
+import (
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/packages"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
+
+func preparePackage(t *testing.T, owner *user_model.User, name string) {
+ t.Helper()
+
+ data, err := packages.CreateHashedBufferFromReader(strings.NewReader("data"))
+ require.NoError(t, err)
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ db.DefaultContext,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: owner,
+ PackageType: packages_model.TypeDebian,
+ Name: name,
+ },
+ Creator: owner,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: name,
+ },
+ Data: data,
+ Creator: owner,
+ IsLead: true,
+ },
+ )
+
+ require.NoError(t, err)
+}
+
+func TestSearchPackages(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 1)()
+
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ user3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+
+ preparePackage(t, user2, "debian-1")
+ preparePackage(t, user2, "debian-2")
+ preparePackage(t, user3, "debian-1")
+
+ packageFiles := []string{}
+ require.NoError(t, SearchPackages(db.DefaultContext, &PackageSearchOptions{
+ OwnerID: user2.ID,
+ }, func(pfd *packages_model.PackageFileDescriptor) {
+ assert.NotNil(t, pfd)
+ packageFiles = append(packageFiles, pfd.File.Name)
+ }))
+
+ assert.Len(t, packageFiles, 2)
+ assert.Contains(t, packageFiles, "debian-1")
+ assert.Contains(t, packageFiles, "debian-2")
+
+ packageFiles = []string{}
+ require.NoError(t, SearchPackages(db.DefaultContext, &PackageSearchOptions{
+ OwnerID: user3.ID,
+ }, func(pfd *packages_model.PackageFileDescriptor) {
+ assert.NotNil(t, pfd)
+ packageFiles = append(packageFiles, pfd.File.Name)
+ }))
+
+ assert.Len(t, packageFiles, 1)
+ assert.Contains(t, packageFiles, "debian-1")
+}
diff --git a/models/packages/descriptor.go b/models/packages/descriptor.go
new file mode 100644
index 0000000..803b73c
--- /dev/null
+++ b/models/packages/descriptor.go
@@ -0,0 +1,260 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/packages/alpine"
+ "code.gitea.io/gitea/modules/packages/arch"
+ "code.gitea.io/gitea/modules/packages/cargo"
+ "code.gitea.io/gitea/modules/packages/chef"
+ "code.gitea.io/gitea/modules/packages/composer"
+ "code.gitea.io/gitea/modules/packages/conan"
+ "code.gitea.io/gitea/modules/packages/conda"
+ "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/packages/cran"
+ "code.gitea.io/gitea/modules/packages/debian"
+ "code.gitea.io/gitea/modules/packages/helm"
+ "code.gitea.io/gitea/modules/packages/maven"
+ "code.gitea.io/gitea/modules/packages/npm"
+ "code.gitea.io/gitea/modules/packages/nuget"
+ "code.gitea.io/gitea/modules/packages/pub"
+ "code.gitea.io/gitea/modules/packages/pypi"
+ "code.gitea.io/gitea/modules/packages/rpm"
+ "code.gitea.io/gitea/modules/packages/rubygems"
+ "code.gitea.io/gitea/modules/packages/swift"
+ "code.gitea.io/gitea/modules/packages/vagrant"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/hashicorp/go-version"
+)
+
+// PackagePropertyList is a list of package properties
+type PackagePropertyList []*PackageProperty
+
+// GetByName gets the first property value with the specific name
+func (l PackagePropertyList) GetByName(name string) string {
+ for _, pp := range l {
+ if pp.Name == name {
+ return pp.Value
+ }
+ }
+ return ""
+}
+
+// PackageDescriptor describes a package
+type PackageDescriptor struct {
+ Package *Package
+ Owner *user_model.User
+ Repository *repo_model.Repository
+ Version *PackageVersion
+ SemVer *version.Version
+ Creator *user_model.User
+ PackageProperties PackagePropertyList
+ VersionProperties PackagePropertyList
+ Metadata any
+ Files []*PackageFileDescriptor
+}
+
+// PackageFileDescriptor describes a package file
+type PackageFileDescriptor struct {
+ File *PackageFile
+ Blob *PackageBlob
+ Properties PackagePropertyList
+}
+
+// PackageWebLink returns the relative package web link
+func (pd *PackageDescriptor) PackageWebLink() string {
+ return fmt.Sprintf("%s/-/packages/%s/%s", pd.Owner.HomeLink(), string(pd.Package.Type), url.PathEscape(pd.Package.LowerName))
+}
+
+// VersionWebLink returns the relative package version web link
+func (pd *PackageDescriptor) VersionWebLink() string {
+ return fmt.Sprintf("%s/%s", pd.PackageWebLink(), url.PathEscape(pd.Version.LowerVersion))
+}
+
+// PackageHTMLURL returns the absolute package HTML URL
+func (pd *PackageDescriptor) PackageHTMLURL() string {
+ return fmt.Sprintf("%s/-/packages/%s/%s", pd.Owner.HTMLURL(), string(pd.Package.Type), url.PathEscape(pd.Package.LowerName))
+}
+
+// VersionHTMLURL returns the absolute package version HTML URL
+func (pd *PackageDescriptor) VersionHTMLURL() string {
+ return fmt.Sprintf("%s/%s", pd.PackageHTMLURL(), url.PathEscape(pd.Version.LowerVersion))
+}
+
+// CalculateBlobSize returns the total blobs size in bytes
+func (pd *PackageDescriptor) CalculateBlobSize() int64 {
+ size := int64(0)
+ for _, f := range pd.Files {
+ size += f.Blob.Size
+ }
+ return size
+}
+
+// GetPackageDescriptor gets the package description for a version
+func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDescriptor, error) {
+ p, err := GetPackageByID(ctx, pv.PackageID)
+ if err != nil {
+ return nil, err
+ }
+ o, err := user_model.GetUserByID(ctx, p.OwnerID)
+ if err != nil {
+ return nil, err
+ }
+ repository, err := repo_model.GetRepositoryByID(ctx, p.RepoID)
+ if err != nil && !repo_model.IsErrRepoNotExist(err) {
+ return nil, err
+ }
+ creator, err := user_model.GetUserByID(ctx, pv.CreatorID)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ creator = user_model.NewGhostUser()
+ } else {
+ return nil, err
+ }
+ }
+ var semVer *version.Version
+ if p.SemverCompatible {
+ semVer, err = version.NewVersion(pv.Version)
+ if err != nil {
+ return nil, err
+ }
+ }
+ pps, err := GetProperties(ctx, PropertyTypePackage, p.ID)
+ if err != nil {
+ return nil, err
+ }
+ pvps, err := GetProperties(ctx, PropertyTypeVersion, pv.ID)
+ if err != nil {
+ return nil, err
+ }
+ pfs, err := GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ pfds, err := GetPackageFileDescriptors(ctx, pfs)
+ if err != nil {
+ return nil, err
+ }
+
+ var metadata any
+ switch p.Type {
+ case TypeAlpine:
+ metadata = &alpine.VersionMetadata{}
+ case TypeArch:
+ metadata = &arch.VersionMetadata{}
+ case TypeCargo:
+ metadata = &cargo.Metadata{}
+ case TypeChef:
+ metadata = &chef.Metadata{}
+ case TypeComposer:
+ metadata = &composer.Metadata{}
+ case TypeConan:
+ metadata = &conan.Metadata{}
+ case TypeConda:
+ metadata = &conda.VersionMetadata{}
+ case TypeContainer:
+ metadata = &container.Metadata{}
+ case TypeCran:
+ metadata = &cran.Metadata{}
+ case TypeDebian:
+ metadata = &debian.Metadata{}
+ case TypeGeneric:
+ // generic packages have no metadata
+ case TypeGo:
+ // go packages have no metadata
+ case TypeHelm:
+ metadata = &helm.Metadata{}
+ case TypeNuGet:
+ metadata = &nuget.Metadata{}
+ case TypeNpm:
+ metadata = &npm.Metadata{}
+ case TypeMaven:
+ metadata = &maven.Metadata{}
+ case TypePub:
+ metadata = &pub.Metadata{}
+ case TypePyPI:
+ metadata = &pypi.Metadata{}
+ case TypeRpm:
+ metadata = &rpm.VersionMetadata{}
+ case TypeRubyGems:
+ metadata = &rubygems.Metadata{}
+ case TypeSwift:
+ metadata = &swift.Metadata{}
+ case TypeVagrant:
+ metadata = &vagrant.Metadata{}
+ default:
+ panic(fmt.Sprintf("unknown package type: %s", string(p.Type)))
+ }
+ if metadata != nil {
+ if err := json.Unmarshal([]byte(pv.MetadataJSON), &metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return &PackageDescriptor{
+ Package: p,
+ Owner: o,
+ Repository: repository,
+ Version: pv,
+ SemVer: semVer,
+ Creator: creator,
+ PackageProperties: PackagePropertyList(pps),
+ VersionProperties: PackagePropertyList(pvps),
+ Metadata: metadata,
+ Files: pfds,
+ }, nil
+}
+
+// GetPackageFileDescriptor gets a package file descriptor for a package file
+func GetPackageFileDescriptor(ctx context.Context, pf *PackageFile) (*PackageFileDescriptor, error) {
+ pb, err := GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ return nil, err
+ }
+ pfps, err := GetProperties(ctx, PropertyTypeFile, pf.ID)
+ if err != nil {
+ return nil, err
+ }
+ return &PackageFileDescriptor{
+ pf,
+ pb,
+ PackagePropertyList(pfps),
+ }, nil
+}
+
+// GetPackageFileDescriptors gets the package file descriptors for the package files
+func GetPackageFileDescriptors(ctx context.Context, pfs []*PackageFile) ([]*PackageFileDescriptor, error) {
+ pfds := make([]*PackageFileDescriptor, 0, len(pfs))
+ for _, pf := range pfs {
+ pfd, err := GetPackageFileDescriptor(ctx, pf)
+ if err != nil {
+ return nil, err
+ }
+ pfds = append(pfds, pfd)
+ }
+ return pfds, nil
+}
+
+// GetPackageDescriptors gets the package descriptions for the versions
+func GetPackageDescriptors(ctx context.Context, pvs []*PackageVersion) ([]*PackageDescriptor, error) {
+ pds := make([]*PackageDescriptor, 0, len(pvs))
+ for _, pv := range pvs {
+ pd, err := GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return nil, err
+ }
+ pds = append(pds, pd)
+ }
+ return pds, nil
+}
diff --git a/models/packages/nuget/search.go b/models/packages/nuget/search.go
new file mode 100644
index 0000000..7a505ff
--- /dev/null
+++ b/models/packages/nuget/search.go
@@ -0,0 +1,70 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package nuget
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+
+ "xorm.io/builder"
+)
+
+// SearchVersions gets all versions of packages matching the search options
+func SearchVersions(ctx context.Context, opts *packages_model.PackageSearchOptions) ([]*packages_model.PackageVersion, int64, error) {
+ cond := toConds(opts)
+
+ e := db.GetEngine(ctx)
+
+ total, err := e.
+ Where(cond).
+ Count(&packages_model.Package{})
+ if err != nil {
+ return nil, 0, err
+ }
+
+ inner := builder.
+ Dialect(db.BuilderDialect()). // builder needs the sql dialect to build the Limit() below
+ Select("*").
+ From("package").
+ Where(cond).
+ OrderBy("package.name ASC")
+ if opts.Paginator != nil {
+ skip, take := opts.GetSkipTake()
+ inner = inner.Limit(take, skip)
+ }
+
+ sess := e.
+ Where(opts.ToConds()).
+ Table("package_version").
+ Join("INNER", inner, "package.id = package_version.package_id")
+
+ pvs := make([]*packages_model.PackageVersion, 0, 10)
+ return pvs, total, sess.Find(&pvs)
+}
+
+// CountPackages counts all packages matching the search options
+func CountPackages(ctx context.Context, opts *packages_model.PackageSearchOptions) (int64, error) {
+ return db.GetEngine(ctx).
+ Where(toConds(opts)).
+ Count(&packages_model.Package{})
+}
+
+func toConds(opts *packages_model.PackageSearchOptions) builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "package.is_internal": opts.IsInternal.Value(),
+ "package.owner_id": opts.OwnerID,
+ "package.type": packages_model.TypeNuGet,
+ }
+ if opts.Name.Value != "" {
+ if opts.Name.ExactMatch {
+ cond = cond.And(builder.Eq{"package.lower_name": strings.ToLower(opts.Name.Value)})
+ } else {
+ cond = cond.And(builder.Like{"package.lower_name", strings.ToLower(opts.Name.Value)})
+ }
+ }
+ return cond
+}
diff --git a/models/packages/package.go b/models/packages/package.go
new file mode 100644
index 0000000..364cc2e
--- /dev/null
+++ b/models/packages/package.go
@@ -0,0 +1,351 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+func init() {
+ db.RegisterModel(new(Package))
+}
+
+var (
+ // ErrDuplicatePackage indicates a duplicated package error
+ ErrDuplicatePackage = util.NewAlreadyExistErrorf("package already exists")
+ // ErrPackageNotExist indicates a package not exist error
+ ErrPackageNotExist = util.NewNotExistErrorf("package does not exist")
+)
+
+// Type of a package
+type Type string
+
+// List of supported packages
+const (
+ TypeAlpine Type = "alpine"
+ TypeArch Type = "arch"
+ TypeCargo Type = "cargo"
+ TypeChef Type = "chef"
+ TypeComposer Type = "composer"
+ TypeConan Type = "conan"
+ TypeConda Type = "conda"
+ TypeContainer Type = "container"
+ TypeCran Type = "cran"
+ TypeDebian Type = "debian"
+ TypeGeneric Type = "generic"
+ TypeGo Type = "go"
+ TypeHelm Type = "helm"
+ TypeMaven Type = "maven"
+ TypeNpm Type = "npm"
+ TypeNuGet Type = "nuget"
+ TypePub Type = "pub"
+ TypePyPI Type = "pypi"
+ TypeRpm Type = "rpm"
+ TypeRubyGems Type = "rubygems"
+ TypeSwift Type = "swift"
+ TypeVagrant Type = "vagrant"
+)
+
+var TypeList = []Type{
+ TypeAlpine,
+ TypeArch,
+ TypeCargo,
+ TypeChef,
+ TypeComposer,
+ TypeConan,
+ TypeConda,
+ TypeContainer,
+ TypeCran,
+ TypeDebian,
+ TypeGeneric,
+ TypeGo,
+ TypeHelm,
+ TypeMaven,
+ TypeNpm,
+ TypeNuGet,
+ TypePub,
+ TypePyPI,
+ TypeRpm,
+ TypeRubyGems,
+ TypeSwift,
+ TypeVagrant,
+}
+
+// Name gets the name of the package type
+func (pt Type) Name() string {
+ switch pt {
+ case TypeAlpine:
+ return "Alpine"
+ case TypeArch:
+ return "Arch"
+ case TypeCargo:
+ return "Cargo"
+ case TypeChef:
+ return "Chef"
+ case TypeComposer:
+ return "Composer"
+ case TypeConan:
+ return "Conan"
+ case TypeConda:
+ return "Conda"
+ case TypeContainer:
+ return "Container"
+ case TypeCran:
+ return "CRAN"
+ case TypeDebian:
+ return "Debian"
+ case TypeGeneric:
+ return "Generic"
+ case TypeGo:
+ return "Go"
+ case TypeHelm:
+ return "Helm"
+ case TypeMaven:
+ return "Maven"
+ case TypeNpm:
+ return "npm"
+ case TypeNuGet:
+ return "NuGet"
+ case TypePub:
+ return "Pub"
+ case TypePyPI:
+ return "PyPI"
+ case TypeRpm:
+ return "RPM"
+ case TypeRubyGems:
+ return "RubyGems"
+ case TypeSwift:
+ return "Swift"
+ case TypeVagrant:
+ return "Vagrant"
+ }
+ panic(fmt.Sprintf("unknown package type: %s", string(pt)))
+}
+
+// SVGName gets the name of the package type svg image
+func (pt Type) SVGName() string {
+ switch pt {
+ case TypeAlpine:
+ return "gitea-alpine"
+ case TypeArch:
+ return "gitea-arch"
+ case TypeCargo:
+ return "gitea-cargo"
+ case TypeChef:
+ return "gitea-chef"
+ case TypeComposer:
+ return "gitea-composer"
+ case TypeConan:
+ return "gitea-conan"
+ case TypeConda:
+ return "gitea-conda"
+ case TypeContainer:
+ return "octicon-container"
+ case TypeCran:
+ return "gitea-cran"
+ case TypeDebian:
+ return "gitea-debian"
+ case TypeGeneric:
+ return "octicon-package"
+ case TypeGo:
+ return "gitea-go"
+ case TypeHelm:
+ return "gitea-helm"
+ case TypeMaven:
+ return "gitea-maven"
+ case TypeNpm:
+ return "gitea-npm"
+ case TypeNuGet:
+ return "gitea-nuget"
+ case TypePub:
+ return "gitea-pub"
+ case TypePyPI:
+ return "gitea-python"
+ case TypeRpm:
+ return "gitea-rpm"
+ case TypeRubyGems:
+ return "gitea-rubygems"
+ case TypeSwift:
+ return "gitea-swift"
+ case TypeVagrant:
+ return "gitea-vagrant"
+ }
+ panic(fmt.Sprintf("unknown package type: %s", string(pt)))
+}
+
+// Package represents a package
+type Package struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RepoID int64 `xorm:"INDEX"`
+ Type Type `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"NOT NULL"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ SemverCompatible bool `xorm:"NOT NULL DEFAULT false"`
+ IsInternal bool `xorm:"NOT NULL DEFAULT false"`
+}
+
+// TryInsertPackage inserts a package. If a package exists already, ErrDuplicatePackage is returned
+func TryInsertPackage(ctx context.Context, p *Package) (*Package, error) {
+ e := db.GetEngine(ctx)
+
+ existing := &Package{}
+
+ has, err := e.Where(builder.Eq{
+ "owner_id": p.OwnerID,
+ "type": p.Type,
+ "lower_name": p.LowerName,
+ }).Get(existing)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return existing, ErrDuplicatePackage
+ }
+ if _, err = e.Insert(p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// DeletePackageByID deletes a package by id
+func DeletePackageByID(ctx context.Context, packageID int64) error {
+ n, err := db.GetEngine(ctx).ID(packageID).Delete(&Package{})
+ if n == 0 && err == nil {
+ return ErrPackageNotExist
+ }
+ return err
+}
+
+// SetRepositoryLink sets the linked repository
+func SetRepositoryLink(ctx context.Context, packageID, repoID int64) error {
+ n, err := db.GetEngine(ctx).ID(packageID).Cols("repo_id").Update(&Package{RepoID: repoID})
+ if n == 0 && err == nil {
+ return ErrPackageNotExist
+ }
+ return err
+}
+
+// UnlinkRepositoryFromAllPackages unlinks every package from the repository
+func UnlinkRepositoryFromAllPackages(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Cols("repo_id").Update(&Package{})
+ return err
+}
+
+// GetPackageByID gets a package by id
+func GetPackageByID(ctx context.Context, packageID int64) (*Package, error) {
+ p := &Package{}
+
+ has, err := db.GetEngine(ctx).ID(packageID).Get(p)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageNotExist
+ }
+ return p, nil
+}
+
+// GetPackageByName gets a package by name
+func GetPackageByName(ctx context.Context, ownerID int64, packageType Type, name string) (*Package, error) {
+ var cond builder.Cond = builder.Eq{
+ "package.owner_id": ownerID,
+ "package.type": packageType,
+ "package.lower_name": strings.ToLower(name),
+ "package.is_internal": false,
+ }
+
+ p := &Package{}
+
+ has, err := db.GetEngine(ctx).
+ Where(cond).
+ Get(p)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageNotExist
+ }
+ return p, nil
+}
+
+// GetPackagesByType gets all packages of a specific type
+func GetPackagesByType(ctx context.Context, ownerID int64, packageType Type) ([]*Package, error) {
+ var cond builder.Cond = builder.Eq{
+ "package.owner_id": ownerID,
+ "package.type": packageType,
+ "package.is_internal": false,
+ }
+
+ ps := make([]*Package, 0, 10)
+ return ps, db.GetEngine(ctx).
+ Where(cond).
+ Find(&ps)
+}
+
+// FindUnreferencedPackages gets all packages without associated versions
+func FindUnreferencedPackages(ctx context.Context) ([]int64, error) {
+ var pIDs []int64
+ if err := db.GetEngine(ctx).
+ Select("package.id").
+ Table("package").
+ Join("LEFT", "package_version", "package_version.package_id = package.id").
+ Where("package_version.id IS NULL").
+ Find(&pIDs); err != nil {
+ return nil, err
+ }
+ return pIDs, nil
+}
+
+func getPackages(ctx context.Context) *xorm.Session {
+ return db.GetEngine(ctx).
+ Table("package_version").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where("package_version.is_internal = ?", false)
+}
+
+func getOwnerPackages(ctx context.Context, ownerID int64) *xorm.Session {
+ return getPackages(ctx).
+ Where("package.owner_id = ?", ownerID)
+}
+
+// HasOwnerPackages tests if a user/org has accessible packages
+func HasOwnerPackages(ctx context.Context, ownerID int64) (bool, error) {
+ return getOwnerPackages(ctx, ownerID).
+ Exist(&Package{})
+}
+
+// CountOwnerPackages counts user/org accessible packages
+func CountOwnerPackages(ctx context.Context, ownerID int64) (int64, error) {
+ return getOwnerPackages(ctx, ownerID).
+ Distinct("package.id").
+ Count(&Package{})
+}
+
+func getRepositoryPackages(ctx context.Context, repositoryID int64) *xorm.Session {
+ return getPackages(ctx).
+ Where("package.repo_id = ?", repositoryID)
+}
+
+// HasRepositoryPackages tests if a repository has packages
+func HasRepositoryPackages(ctx context.Context, repositoryID int64) (bool, error) {
+ return getRepositoryPackages(ctx, repositoryID).
+ Exist(&PackageVersion{})
+}
+
+// CountRepositoryPackages counts packages of a repository
+func CountRepositoryPackages(ctx context.Context, repositoryID int64) (int64, error) {
+ return getRepositoryPackages(ctx, repositoryID).
+ Distinct("package.id").
+ Count(&Package{})
+}
diff --git a/models/packages/package_blob.go b/models/packages/package_blob.go
new file mode 100644
index 0000000..d9c30b6
--- /dev/null
+++ b/models/packages/package_blob.go
@@ -0,0 +1,154 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "strconv"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrPackageBlobNotExist indicates a package blob not exist error
+var ErrPackageBlobNotExist = util.NewNotExistErrorf("package blob does not exist")
+
+func init() {
+ db.RegisterModel(new(PackageBlob))
+}
+
+// PackageBlob represents a package blob
+type PackageBlob struct {
+ ID int64 `xorm:"pk autoincr"`
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ HashMD5 string `xorm:"hash_md5 char(32) UNIQUE(md5) INDEX NOT NULL"`
+ HashSHA1 string `xorm:"hash_sha1 char(40) UNIQUE(sha1) INDEX NOT NULL"`
+ HashSHA256 string `xorm:"hash_sha256 char(64) UNIQUE(sha256) INDEX NOT NULL"`
+ HashSHA512 string `xorm:"hash_sha512 char(128) UNIQUE(sha512) INDEX NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+}
+
+// GetOrInsertBlob inserts a blob. If the blob exists already the existing blob is returned
+func GetOrInsertBlob(ctx context.Context, pb *PackageBlob) (*PackageBlob, bool, error) {
+ e := db.GetEngine(ctx)
+
+ existing := &PackageBlob{}
+
+ has, err := e.Where(builder.Eq{
+ "size": pb.Size,
+ "hash_md5": pb.HashMD5,
+ "hash_sha1": pb.HashSHA1,
+ "hash_sha256": pb.HashSHA256,
+ "hash_sha512": pb.HashSHA512,
+ }).Get(existing)
+ if err != nil {
+ return nil, false, err
+ }
+ if has {
+ return existing, true, nil
+ }
+ if _, err = e.Insert(pb); err != nil {
+ return nil, false, err
+ }
+ return pb, false, nil
+}
+
+// GetBlobByID gets a blob by id
+func GetBlobByID(ctx context.Context, blobID int64) (*PackageBlob, error) {
+ pb := &PackageBlob{}
+
+ has, err := db.GetEngine(ctx).ID(blobID).Get(pb)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageBlobNotExist
+ }
+ return pb, nil
+}
+
+// ExistPackageBlobWithSHA returns if a package blob exists with the provided sha
+func ExistPackageBlobWithSHA(ctx context.Context, blobSha256 string) (bool, error) {
+ return db.GetEngine(ctx).Exist(&PackageBlob{
+ HashSHA256: blobSha256,
+ })
+}
+
+// FindExpiredUnreferencedBlobs gets all blobs without associated files older than the specific duration
+func FindExpiredUnreferencedBlobs(ctx context.Context, olderThan time.Duration) ([]*PackageBlob, error) {
+ pbs := make([]*PackageBlob, 0, 10)
+ return pbs, db.GetEngine(ctx).
+ Table("package_blob").
+ Join("LEFT", "package_file", "package_file.blob_id = package_blob.id").
+ Where("package_file.id IS NULL AND package_blob.created_unix < ?", time.Now().Add(-olderThan).Unix()).
+ Find(&pbs)
+}
+
+// DeleteBlobByID deletes a blob by id
+func DeleteBlobByID(ctx context.Context, blobID int64) error {
+ _, err := db.GetEngine(ctx).ID(blobID).Delete(&PackageBlob{})
+ return err
+}
+
+// GetTotalBlobSize returns the total blobs size in bytes
+func GetTotalBlobSize(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).
+ SumInt(&PackageBlob{}, "size")
+}
+
+// GetTotalUnreferencedBlobSize returns the total size of all unreferenced blobs in bytes
+func GetTotalUnreferencedBlobSize(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).
+ Table("package_blob").
+ Join("LEFT", "package_file", "package_file.blob_id = package_blob.id").
+ Where("package_file.id IS NULL").
+ SumInt(&PackageBlob{}, "size")
+}
+
+// IsBlobAccessibleForUser tests if the user has access to the blob
+func IsBlobAccessibleForUser(ctx context.Context, blobID int64, user *user_model.User) (bool, error) {
+ if user.IsAdmin {
+ return true, nil
+ }
+
+ maxTeamAuthorize := builder.
+ Select("max(team.authorize)").
+ From("team").
+ InnerJoin("team_user", "team_user.team_id = team.id").
+ Where(builder.Eq{"team_user.uid": user.ID}.And(builder.Expr("team_user.org_id = `user`.id")))
+
+ maxTeamUnitAccessMode := builder.
+ Select("max(team_unit.access_mode)").
+ From("team").
+ InnerJoin("team_user", "team_user.team_id = team.id").
+ InnerJoin("team_unit", "team_unit.team_id = team.id").
+ Where(builder.Eq{"team_user.uid": user.ID, "team_unit.type": unit.TypePackages}.And(builder.Expr("team_user.org_id = `user`.id")))
+
+ cond := builder.Eq{"package_blob.id": blobID}.And(
+ // owner = user
+ builder.Eq{"`user`.id": user.ID}.
+ // user can see owner
+ Or(builder.Eq{"`user`.visibility": structs.VisibleTypePublic}.Or(builder.Eq{"`user`.visibility": structs.VisibleTypeLimited})).
+ // owner is an organization and user has access to it
+ Or(builder.Eq{"`user`.type": user_model.UserTypeOrganization}.
+ And(builder.Lte{strconv.Itoa(int(perm.AccessModeRead)): maxTeamAuthorize}.Or(builder.Lte{strconv.Itoa(int(perm.AccessModeRead)): maxTeamUnitAccessMode}))),
+ )
+
+ return db.GetEngine(ctx).
+ Table("package_blob").
+ Join("INNER", "package_file", "package_file.blob_id = package_blob.id").
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Join("INNER", "user", "`user`.id = package.owner_id").
+ Where(cond).
+ Exist(&PackageBlob{})
+}
diff --git a/models/packages/package_blob_upload.go b/models/packages/package_blob_upload.go
new file mode 100644
index 0000000..4b0e789
--- /dev/null
+++ b/models/packages/package_blob_upload.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrPackageBlobUploadNotExist indicates a package blob upload not exist error
+var ErrPackageBlobUploadNotExist = util.NewNotExistErrorf("package blob upload does not exist")
+
+func init() {
+ db.RegisterModel(new(PackageBlobUpload))
+}
+
+// PackageBlobUpload represents a package blob upload
+type PackageBlobUpload struct {
+ ID string `xorm:"pk"`
+ BytesReceived int64 `xorm:"NOT NULL DEFAULT 0"`
+ HashStateBytes []byte `xorm:"BLOB"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated INDEX NOT NULL"`
+}
+
+// CreateBlobUpload inserts a blob upload
+func CreateBlobUpload(ctx context.Context) (*PackageBlobUpload, error) {
+ id, err := util.CryptoRandomString(25)
+ if err != nil {
+ return nil, err
+ }
+
+ pbu := &PackageBlobUpload{
+ ID: strings.ToLower(id),
+ }
+
+ _, err = db.GetEngine(ctx).Insert(pbu)
+ return pbu, err
+}
+
+// GetBlobUploadByID gets a blob upload by id
+func GetBlobUploadByID(ctx context.Context, id string) (*PackageBlobUpload, error) {
+ pbu := &PackageBlobUpload{}
+
+ has, err := db.GetEngine(ctx).ID(id).Get(pbu)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageBlobUploadNotExist
+ }
+ return pbu, nil
+}
+
+// UpdateBlobUpload updates the blob upload
+func UpdateBlobUpload(ctx context.Context, pbu *PackageBlobUpload) error {
+ _, err := db.GetEngine(ctx).ID(pbu.ID).Update(pbu)
+ return err
+}
+
+// DeleteBlobUploadByID deletes the blob upload
+func DeleteBlobUploadByID(ctx context.Context, id string) error {
+ _, err := db.GetEngine(ctx).ID(id).Delete(&PackageBlobUpload{})
+ return err
+}
+
+// FindExpiredBlobUploads gets all expired blob uploads
+func FindExpiredBlobUploads(ctx context.Context, olderThan time.Duration) ([]*PackageBlobUpload, error) {
+ pbus := make([]*PackageBlobUpload, 0, 10)
+ return pbus, db.GetEngine(ctx).
+ Where("updated_unix < ?", time.Now().Add(-olderThan).Unix()).
+ Find(&pbus)
+}
diff --git a/models/packages/package_cleanup_rule.go b/models/packages/package_cleanup_rule.go
new file mode 100644
index 0000000..fa12dec
--- /dev/null
+++ b/models/packages/package_cleanup_rule.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+var ErrPackageCleanupRuleNotExist = util.NewNotExistErrorf("package blob does not exist")
+
+func init() {
+ db.RegisterModel(new(PackageCleanupRule))
+}
+
+// PackageCleanupRule represents a rule which describes when to clean up package versions
+type PackageCleanupRule struct {
+ ID int64 `xorm:"pk autoincr"`
+ Enabled bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL DEFAULT 0"`
+ Type Type `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ KeepCount int `xorm:"NOT NULL DEFAULT 0"`
+ KeepPattern string `xorm:"NOT NULL DEFAULT ''"`
+ KeepPatternMatcher *regexp.Regexp `xorm:"-"`
+ RemoveDays int `xorm:"NOT NULL DEFAULT 0"`
+ RemovePattern string `xorm:"NOT NULL DEFAULT ''"`
+ RemovePatternMatcher *regexp.Regexp `xorm:"-"`
+ MatchFullName bool `xorm:"NOT NULL DEFAULT false"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL DEFAULT 0"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated NOT NULL DEFAULT 0"`
+}
+
+func (pcr *PackageCleanupRule) CompiledPattern() error {
+ if pcr.KeepPatternMatcher != nil || pcr.RemovePatternMatcher != nil {
+ return nil
+ }
+
+ if pcr.KeepPattern != "" {
+ var err error
+ pcr.KeepPatternMatcher, err = regexp.Compile(fmt.Sprintf(`(?i)\A%s\z`, pcr.KeepPattern))
+ if err != nil {
+ return err
+ }
+ }
+
+ if pcr.RemovePattern != "" {
+ var err error
+ pcr.RemovePatternMatcher, err = regexp.Compile(fmt.Sprintf(`(?i)\A%s\z`, pcr.RemovePattern))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func InsertCleanupRule(ctx context.Context, pcr *PackageCleanupRule) (*PackageCleanupRule, error) {
+ return pcr, db.Insert(ctx, pcr)
+}
+
+func GetCleanupRuleByID(ctx context.Context, id int64) (*PackageCleanupRule, error) {
+ pcr := &PackageCleanupRule{}
+
+ has, err := db.GetEngine(ctx).ID(id).Get(pcr)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageCleanupRuleNotExist
+ }
+ return pcr, nil
+}
+
+func UpdateCleanupRule(ctx context.Context, pcr *PackageCleanupRule) error {
+ _, err := db.GetEngine(ctx).ID(pcr.ID).AllCols().Update(pcr)
+ return err
+}
+
+func GetCleanupRulesByOwner(ctx context.Context, ownerID int64) ([]*PackageCleanupRule, error) {
+ pcrs := make([]*PackageCleanupRule, 0, 10)
+ return pcrs, db.GetEngine(ctx).Where("owner_id = ?", ownerID).Find(&pcrs)
+}
+
+func DeleteCleanupRuleByID(ctx context.Context, ruleID int64) error {
+ _, err := db.GetEngine(ctx).ID(ruleID).Delete(&PackageCleanupRule{})
+ return err
+}
+
+func HasOwnerCleanupRuleForPackageType(ctx context.Context, ownerID int64, packageType Type) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("owner_id = ? AND type = ?", ownerID, packageType).
+ Exist(&PackageCleanupRule{})
+}
+
+func IterateEnabledCleanupRules(ctx context.Context, callback func(context.Context, *PackageCleanupRule) error) error {
+ return db.Iterate(
+ ctx,
+ builder.Eq{"enabled": true},
+ callback,
+ )
+}
diff --git a/models/packages/package_file.go b/models/packages/package_file.go
new file mode 100644
index 0000000..1bb6b57
--- /dev/null
+++ b/models/packages/package_file.go
@@ -0,0 +1,232 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+func init() {
+ db.RegisterModel(new(PackageFile))
+}
+
+var (
+ // ErrDuplicatePackageFile indicates a duplicated package file error
+ ErrDuplicatePackageFile = util.NewAlreadyExistErrorf("package file already exists")
+ // ErrPackageFileNotExist indicates a package file not exist error
+ ErrPackageFileNotExist = util.NewNotExistErrorf("package file does not exist")
+)
+
+// EmptyFileKey is a named constant for an empty file key
+const EmptyFileKey = ""
+
+// PackageFile represents a package file
+type PackageFile struct {
+ ID int64 `xorm:"pk autoincr"`
+ VersionID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ BlobID int64 `xorm:"INDEX NOT NULL"`
+ Name string `xorm:"NOT NULL"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CompositeKey string `xorm:"UNIQUE(s) INDEX"`
+ IsLead bool `xorm:"NOT NULL DEFAULT false"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+}
+
+// TryInsertFile inserts a file. If the file exists already ErrDuplicatePackageFile is returned
+func TryInsertFile(ctx context.Context, pf *PackageFile) (*PackageFile, error) {
+ e := db.GetEngine(ctx)
+
+ existing := &PackageFile{}
+
+ has, err := e.Where(builder.Eq{
+ "version_id": pf.VersionID,
+ "lower_name": pf.LowerName,
+ "composite_key": pf.CompositeKey,
+ }).Get(existing)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return existing, ErrDuplicatePackageFile
+ }
+ if _, err = e.Insert(pf); err != nil {
+ return nil, err
+ }
+ return pf, nil
+}
+
+// GetFilesByVersionID gets all files of a version
+func GetFilesByVersionID(ctx context.Context, versionID int64) ([]*PackageFile, error) {
+ pfs := make([]*PackageFile, 0, 10)
+ return pfs, db.GetEngine(ctx).Where("version_id = ?", versionID).Find(&pfs)
+}
+
+// GetFileForVersionByID gets a file of a version by id
+func GetFileForVersionByID(ctx context.Context, versionID, fileID int64) (*PackageFile, error) {
+ pf := &PackageFile{
+ VersionID: versionID,
+ }
+
+ has, err := db.GetEngine(ctx).ID(fileID).Get(pf)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageFileNotExist
+ }
+ return pf, nil
+}
+
+// GetFileForVersionByName gets a file of a version by name
+func GetFileForVersionByName(ctx context.Context, versionID int64, name, key string) (*PackageFile, error) {
+ if name == "" {
+ return nil, ErrPackageFileNotExist
+ }
+
+ pf := &PackageFile{}
+
+ has, err := db.GetEngine(ctx).Where(builder.Eq{
+ "version_id": versionID,
+ "lower_name": strings.ToLower(name),
+ "composite_key": key,
+ }).Get(pf)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageFileNotExist
+ }
+ return pf, nil
+}
+
+// DeleteFileByID deletes a file
+func DeleteFileByID(ctx context.Context, fileID int64) error {
+ _, err := db.GetEngine(ctx).ID(fileID).Delete(&PackageFile{})
+ return err
+}
+
+// PackageFileSearchOptions are options for SearchXXX methods
+type PackageFileSearchOptions struct {
+ OwnerID int64
+ PackageType Type
+ VersionID int64
+ Query string
+ CompositeKey string
+ Properties map[string]string
+ OlderThan time.Duration
+ HashAlgorithm string
+ Hash string
+ db.Paginator
+}
+
+func (opts *PackageFileSearchOptions) toConds() builder.Cond {
+ cond := builder.NewCond()
+
+ if opts.VersionID != 0 {
+ cond = cond.And(builder.Eq{"package_file.version_id": opts.VersionID})
+ } else if opts.OwnerID != 0 || (opts.PackageType != "" && opts.PackageType != "all") {
+ var versionCond builder.Cond = builder.Eq{
+ "package_version.is_internal": false,
+ }
+ if opts.OwnerID != 0 {
+ versionCond = versionCond.And(builder.Eq{"package.owner_id": opts.OwnerID})
+ }
+ if opts.PackageType != "" && opts.PackageType != "all" {
+ versionCond = versionCond.And(builder.Eq{"package.type": opts.PackageType})
+ }
+
+ in := builder.
+ Select("package_version.id").
+ From("package_version").
+ InnerJoin("package", "package.id = package_version.package_id").
+ Where(versionCond)
+
+ cond = cond.And(builder.In("package_file.version_id", in))
+ }
+ if opts.CompositeKey != "" {
+ cond = cond.And(builder.Eq{"package_file.composite_key": opts.CompositeKey})
+ }
+ if opts.Query != "" {
+ cond = cond.And(builder.Like{"package_file.lower_name", strings.ToLower(opts.Query)})
+ }
+
+ if len(opts.Properties) != 0 {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": PropertyTypeFile,
+ }
+ propsCond = propsCond.And(builder.Expr("package_property.ref_id = package_file.id"))
+
+ propsCondBlock := builder.NewCond()
+ for name, value := range opts.Properties {
+ propsCondBlock = propsCondBlock.Or(builder.Eq{
+ "package_property.name": name,
+ "package_property.value": value,
+ })
+ }
+ propsCond = propsCond.And(propsCondBlock)
+
+ cond = cond.And(builder.Eq{
+ strconv.Itoa(len(opts.Properties)): builder.Select("COUNT(*)").Where(propsCond).From("package_property"),
+ })
+ }
+
+ if opts.OlderThan != 0 {
+ cond = cond.And(builder.Lt{"package_file.created_unix": time.Now().Add(-opts.OlderThan).Unix()})
+ }
+
+ if opts.Hash != "" {
+ var field string
+ switch strings.ToLower(opts.HashAlgorithm) {
+ case "md5":
+ field = "package_blob.hash_md5"
+ case "sha1":
+ field = "package_blob.hash_sha1"
+ case "sha256":
+ field = "package_blob.hash_sha256"
+ case "sha512":
+ fallthrough
+ default: // default to SHA512 if not specified or unknown
+ field = "package_blob.hash_sha512"
+ }
+ innerCond := builder.
+ Expr("package_blob.id = package_file.blob_id").
+ And(builder.Eq{field: opts.Hash})
+ cond = cond.And(builder.Exists(builder.Select("package_blob.id").From("package_blob").Where(innerCond)))
+ }
+
+ return cond
+}
+
+// SearchFiles gets all files of packages matching the search options
+func SearchFiles(ctx context.Context, opts *PackageFileSearchOptions) ([]*PackageFile, int64, error) {
+ sess := db.GetEngine(ctx).
+ Where(opts.toConds())
+
+ if opts.Paginator != nil {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+
+ pfs := make([]*PackageFile, 0, 10)
+ count, err := sess.FindAndCount(&pfs)
+ return pfs, count, err
+}
+
+// CalculateFileSize sums up all blob sizes matching the search options.
+// It does NOT respect the deduplication of blobs.
+func CalculateFileSize(ctx context.Context, opts *PackageFileSearchOptions) (int64, error) {
+ return db.GetEngine(ctx).
+ Table("package_file").
+ Where(opts.toConds()).
+ Join("INNER", "package_blob", "package_blob.id = package_file.blob_id").
+ SumInt(new(PackageBlob), "size")
+}
diff --git a/models/packages/package_property.go b/models/packages/package_property.go
new file mode 100644
index 0000000..e017001
--- /dev/null
+++ b/models/packages/package_property.go
@@ -0,0 +1,121 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+)
+
+func init() {
+ db.RegisterModel(new(PackageProperty))
+}
+
+type PropertyType int64
+
+const (
+ // PropertyTypeVersion means the reference is a package version
+ PropertyTypeVersion PropertyType = iota // 0
+ // PropertyTypeFile means the reference is a package file
+ PropertyTypeFile // 1
+ // PropertyTypePackage means the reference is a package
+ PropertyTypePackage // 2
+)
+
+// PackageProperty represents a property of a package, version or file
+type PackageProperty struct {
+ ID int64 `xorm:"pk autoincr"`
+ RefType PropertyType `xorm:"INDEX NOT NULL"`
+ RefID int64 `xorm:"INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ Value string `xorm:"TEXT NOT NULL"`
+}
+
+// InsertProperty creates a property
+func InsertProperty(ctx context.Context, refType PropertyType, refID int64, name, value string) (*PackageProperty, error) {
+ pp := &PackageProperty{
+ RefType: refType,
+ RefID: refID,
+ Name: name,
+ Value: value,
+ }
+
+ _, err := db.GetEngine(ctx).Insert(pp)
+ return pp, err
+}
+
+// GetProperties gets all properties
+func GetProperties(ctx context.Context, refType PropertyType, refID int64) ([]*PackageProperty, error) {
+ pps := make([]*PackageProperty, 0, 10)
+ return pps, db.GetEngine(ctx).Where("ref_type = ? AND ref_id = ?", refType, refID).Find(&pps)
+}
+
+// GetPropertiesByName gets all properties with a specific name
+func GetPropertiesByName(ctx context.Context, refType PropertyType, refID int64, name string) ([]*PackageProperty, error) {
+ pps := make([]*PackageProperty, 0, 10)
+ return pps, db.GetEngine(ctx).Where("ref_type = ? AND ref_id = ? AND name = ?", refType, refID, name).Find(&pps)
+}
+
+// UpdateProperty updates a property
+func UpdateProperty(ctx context.Context, pp *PackageProperty) error {
+ _, err := db.GetEngine(ctx).ID(pp.ID).Update(pp)
+ return err
+}
+
+// DeleteAllProperties deletes all properties of a ref
+func DeleteAllProperties(ctx context.Context, refType PropertyType, refID int64) error {
+ _, err := db.GetEngine(ctx).Where("ref_type = ? AND ref_id = ?", refType, refID).Delete(&PackageProperty{})
+ return err
+}
+
+// DeletePropertyByID deletes a property
+func DeletePropertyByID(ctx context.Context, propertyID int64) error {
+ _, err := db.GetEngine(ctx).ID(propertyID).Delete(&PackageProperty{})
+ return err
+}
+
+// DeletePropertyByName deletes properties by name
+func DeletePropertyByName(ctx context.Context, refType PropertyType, refID int64, name string) error {
+ _, err := db.GetEngine(ctx).Where("ref_type = ? AND ref_id = ? AND name = ?", refType, refID, name).Delete(&PackageProperty{})
+ return err
+}
+
+type DistinctPropertyDependency struct {
+ Name string
+ Value string
+}
+
+// GetDistinctPropertyValues returns all distinct property values for a given type.
+// Optional: Search only in dependence of another property.
+func GetDistinctPropertyValues(ctx context.Context, packageType Type, ownerID int64, refType PropertyType, propertyName string, dep *DistinctPropertyDependency) ([]string, error) {
+ var cond builder.Cond = builder.Eq{
+ "package_property.ref_type": refType,
+ "package_property.name": propertyName,
+ "package.type": packageType,
+ "package.owner_id": ownerID,
+ }
+ if dep != nil {
+ innerCond := builder.
+ Expr("pp.ref_id = package_property.ref_id").
+ And(builder.Eq{
+ "pp.ref_type": refType,
+ "pp.name": dep.Name,
+ "pp.value": dep.Value,
+ })
+ cond = cond.And(builder.Exists(builder.Select("pp.ref_id").From("package_property pp").Where(innerCond)))
+ }
+
+ values := make([]string, 0, 5)
+ return values, db.GetEngine(ctx).
+ Table("package_property").
+ Distinct("package_property.value").
+ Join("INNER", "package_file", "package_file.id = package_property.ref_id").
+ Join("INNER", "package_version", "package_version.id = package_file.version_id").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(cond).
+ Find(&values)
+}
diff --git a/models/packages/package_test.go b/models/packages/package_test.go
new file mode 100644
index 0000000..1c96e08
--- /dev/null
+++ b/models/packages/package_test.go
@@ -0,0 +1,319 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
+
+func prepareExamplePackage(t *testing.T) *packages_model.Package {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+
+ p0 := &packages_model.Package{
+ OwnerID: owner.ID,
+ RepoID: repo.ID,
+ LowerName: "package",
+ Type: packages_model.TypeGeneric,
+ }
+
+ p, err := packages_model.TryInsertPackage(db.DefaultContext, p0)
+ require.NotNil(t, p)
+ require.NoError(t, err)
+ require.Equal(t, *p0, *p)
+ return p
+}
+
+func deletePackage(t *testing.T, p *packages_model.Package) {
+ err := packages_model.DeletePackageByID(db.DefaultContext, p.ID)
+ require.NoError(t, err)
+}
+
+func TestTryInsertPackage(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ p0 := &packages_model.Package{
+ OwnerID: owner.ID,
+ LowerName: "package",
+ }
+
+ // Insert package should return the package and yield no error
+ p, err := packages_model.TryInsertPackage(db.DefaultContext, p0)
+ require.NotNil(t, p)
+ require.NoError(t, err)
+ require.Equal(t, *p0, *p)
+
+ // Insert same package again should return the same package and yield ErrDuplicatePackage
+ p, err = packages_model.TryInsertPackage(db.DefaultContext, p0)
+ require.NotNil(t, p)
+ require.IsType(t, packages_model.ErrDuplicatePackage, err)
+ require.Equal(t, *p0, *p)
+
+ err = packages_model.DeletePackageByID(db.DefaultContext, p0.ID)
+ require.NoError(t, err)
+}
+
+func TestGetPackageByID(t *testing.T) {
+ p0 := prepareExamplePackage(t)
+
+ // Get package should return package and yield no error
+ p, err := packages_model.GetPackageByID(db.DefaultContext, p0.ID)
+ require.NotNil(t, p)
+ require.Equal(t, *p0, *p)
+ require.NoError(t, err)
+
+ // Get package with non-existng ID should yield ErrPackageNotExist
+ p, err = packages_model.GetPackageByID(db.DefaultContext, 999)
+ require.Nil(t, p)
+ require.Error(t, err)
+ require.IsType(t, packages_model.ErrPackageNotExist, err)
+
+ deletePackage(t, p0)
+}
+
+func TestDeletePackageByID(t *testing.T) {
+ p0 := prepareExamplePackage(t)
+
+ // Delete existing package should yield no error
+ err := packages_model.DeletePackageByID(db.DefaultContext, p0.ID)
+ require.NoError(t, err)
+
+ // Delete (now) non-existing package should yield ErrPackageNotExist
+ err = packages_model.DeletePackageByID(db.DefaultContext, p0.ID)
+ require.Error(t, err)
+ require.IsType(t, packages_model.ErrPackageNotExist, err)
+}
+
+func TestSetRepositoryLink(t *testing.T) {
+ p0 := prepareExamplePackage(t)
+
+ // Set repository link to package should yield no error and package RepoID should be updated
+ err := packages_model.SetRepositoryLink(db.DefaultContext, p0.ID, 5)
+ require.NoError(t, err)
+
+ p, err := packages_model.GetPackageByID(db.DefaultContext, p0.ID)
+ require.NoError(t, err)
+ require.EqualValues(t, 5, p.RepoID)
+
+ // Set repository link to non-existing package should yied ErrPackageNotExist
+ err = packages_model.SetRepositoryLink(db.DefaultContext, 999, 5)
+ require.Error(t, err)
+ require.IsType(t, packages_model.ErrPackageNotExist, err)
+
+ deletePackage(t, p0)
+}
+
+func TestUnlinkRepositoryFromAllPackages(t *testing.T) {
+ p0 := prepareExamplePackage(t)
+
+ // Unlink repository from all packages should yield no error and package with p0.ID should have RepoID 0
+ err := packages_model.UnlinkRepositoryFromAllPackages(db.DefaultContext, p0.RepoID)
+ require.NoError(t, err)
+
+ p, err := packages_model.GetPackageByID(db.DefaultContext, p0.ID)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, p.RepoID)
+
+ // Unlink repository again from all packages should also yield no error
+ err = packages_model.UnlinkRepositoryFromAllPackages(db.DefaultContext, p0.RepoID)
+ require.NoError(t, err)
+
+ deletePackage(t, p0)
+}
+
+func TestGetPackageByName(t *testing.T) {
+ p0 := prepareExamplePackage(t)
+
+ // Get package should return package and yield no error
+ p, err := packages_model.GetPackageByName(db.DefaultContext, p0.OwnerID, p0.Type, p0.LowerName)
+ require.NotNil(t, p)
+ require.Equal(t, *p0, *p)
+ require.NoError(t, err)
+
+ // Get package with uppercase name should return package and yield no error
+ p, err = packages_model.GetPackageByName(db.DefaultContext, p0.OwnerID, p0.Type, "Package")
+ require.NotNil(t, p)
+ require.Equal(t, *p0, *p)
+ require.NoError(t, err)
+
+ // Get package with wrong owner ID, type or name should return no package and yield ErrPackageNotExist
+ p, err = packages_model.GetPackageByName(db.DefaultContext, 999, p0.Type, p0.LowerName)
+ require.Nil(t, p)
+ require.Error(t, err)
+ require.IsType(t, packages_model.ErrPackageNotExist, err)
+ p, err = packages_model.GetPackageByName(db.DefaultContext, p0.OwnerID, packages_model.TypeDebian, p0.LowerName)
+ require.Nil(t, p)
+ require.Error(t, err)
+ require.IsType(t, packages_model.ErrPackageNotExist, err)
+ p, err = packages_model.GetPackageByName(db.DefaultContext, p0.OwnerID, p0.Type, "package1")
+ require.Nil(t, p)
+ require.Error(t, err)
+ require.IsType(t, packages_model.ErrPackageNotExist, err)
+
+ deletePackage(t, p0)
+}
+
+func TestHasCountPackages(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ owner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+
+ p, err := packages_model.TryInsertPackage(db.DefaultContext, &packages_model.Package{
+ OwnerID: owner.ID,
+ RepoID: repo.ID,
+ LowerName: "package",
+ })
+ require.NotNil(t, p)
+ require.NoError(t, err)
+
+ // A package without package versions gets automatically cleaned up and should return false for owner
+ has, err := packages_model.HasOwnerPackages(db.DefaultContext, owner.ID)
+ require.False(t, has)
+ require.NoError(t, err)
+ count, err := packages_model.CountOwnerPackages(db.DefaultContext, owner.ID)
+ require.EqualValues(t, 0, count)
+ require.NoError(t, err)
+
+ // A package without package versions gets automatically cleaned up and should return false for repository
+ has, err = packages_model.HasRepositoryPackages(db.DefaultContext, repo.ID)
+ require.False(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountRepositoryPackages(db.DefaultContext, repo.ID)
+ require.EqualValues(t, 0, count)
+ require.NoError(t, err)
+
+ pv, err := packages_model.GetOrInsertVersion(db.DefaultContext, &packages_model.PackageVersion{
+ PackageID: p.ID,
+ LowerVersion: "internal",
+ IsInternal: true,
+ })
+ require.NotNil(t, pv)
+ require.NoError(t, err)
+
+ // A package with an internal package version gets automatically cleaned up and should return false
+ has, err = packages_model.HasOwnerPackages(db.DefaultContext, owner.ID)
+ require.False(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountOwnerPackages(db.DefaultContext, owner.ID)
+ require.EqualValues(t, 0, count)
+ require.NoError(t, err)
+ has, err = packages_model.HasRepositoryPackages(db.DefaultContext, repo.ID)
+ require.False(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountRepositoryPackages(db.DefaultContext, repo.ID)
+ require.EqualValues(t, 0, count)
+ require.NoError(t, err)
+
+ pv, err = packages_model.GetOrInsertVersion(db.DefaultContext, &packages_model.PackageVersion{
+ PackageID: p.ID,
+ LowerVersion: "normal",
+ IsInternal: false,
+ })
+ require.NotNil(t, pv)
+ require.NoError(t, err)
+
+ // A package with a normal package version should return true
+ has, err = packages_model.HasOwnerPackages(db.DefaultContext, owner.ID)
+ require.True(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountOwnerPackages(db.DefaultContext, owner.ID)
+ require.EqualValues(t, 1, count)
+ require.NoError(t, err)
+ has, err = packages_model.HasRepositoryPackages(db.DefaultContext, repo.ID)
+ require.True(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountRepositoryPackages(db.DefaultContext, repo.ID)
+ require.EqualValues(t, 1, count)
+ require.NoError(t, err)
+
+ pv2, err := packages_model.GetOrInsertVersion(db.DefaultContext, &packages_model.PackageVersion{
+ PackageID: p.ID,
+ LowerVersion: "normal2",
+ IsInternal: false,
+ })
+ require.NotNil(t, pv2)
+ require.NoError(t, err)
+
+ // A package withmultiple package versions should be counted only once
+ has, err = packages_model.HasOwnerPackages(db.DefaultContext, owner.ID)
+ require.True(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountOwnerPackages(db.DefaultContext, owner.ID)
+ require.EqualValues(t, 1, count)
+ require.NoError(t, err)
+ has, err = packages_model.HasRepositoryPackages(db.DefaultContext, repo.ID)
+ require.True(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountRepositoryPackages(db.DefaultContext, repo.ID)
+ require.EqualValues(t, 1, count)
+ require.NoError(t, err)
+
+ // For owner ID 0 there should be no packages
+ has, err = packages_model.HasOwnerPackages(db.DefaultContext, 0)
+ require.False(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountOwnerPackages(db.DefaultContext, 0)
+ require.EqualValues(t, 0, count)
+ require.NoError(t, err)
+
+ // For repo ID 0 there should be no packages
+ has, err = packages_model.HasRepositoryPackages(db.DefaultContext, 0)
+ require.False(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountRepositoryPackages(db.DefaultContext, 0)
+ require.EqualValues(t, 0, count)
+ require.NoError(t, err)
+
+ p1, err := packages_model.TryInsertPackage(db.DefaultContext, &packages_model.Package{
+ OwnerID: owner.ID,
+ LowerName: "package0",
+ })
+ require.NotNil(t, p1)
+ require.NoError(t, err)
+ p1v, err := packages_model.GetOrInsertVersion(db.DefaultContext, &packages_model.PackageVersion{
+ PackageID: p1.ID,
+ LowerVersion: "normal",
+ IsInternal: false,
+ })
+ require.NotNil(t, p1v)
+ require.NoError(t, err)
+
+ // Owner owner.ID should have two packages now
+ has, err = packages_model.HasOwnerPackages(db.DefaultContext, owner.ID)
+ require.True(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountOwnerPackages(db.DefaultContext, owner.ID)
+ require.EqualValues(t, 2, count)
+ require.NoError(t, err)
+
+ // For repo ID 0 there should be now one package, because p1 is not assigned to a repo
+ has, err = packages_model.HasRepositoryPackages(db.DefaultContext, 0)
+ require.True(t, has)
+ require.NoError(t, err)
+ count, err = packages_model.CountRepositoryPackages(db.DefaultContext, 0)
+ require.EqualValues(t, 1, count)
+ require.NoError(t, err)
+}
diff --git a/models/packages/package_version.go b/models/packages/package_version.go
new file mode 100644
index 0000000..278e8e3
--- /dev/null
+++ b/models/packages/package_version.go
@@ -0,0 +1,348 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrDuplicatePackageVersion indicates a duplicated package version error
+var ErrDuplicatePackageVersion = util.NewAlreadyExistErrorf("package version already exists")
+
+func init() {
+ db.RegisterModel(new(PackageVersion))
+}
+
+// PackageVersion represents a package version
+type PackageVersion struct {
+ ID int64 `xorm:"pk autoincr"`
+ PackageID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CreatorID int64 `xorm:"NOT NULL DEFAULT 0"`
+ Version string `xorm:"NOT NULL"`
+ LowerVersion string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
+ IsInternal bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ MetadataJSON string `xorm:"metadata_json LONGTEXT"`
+ DownloadCount int64 `xorm:"NOT NULL DEFAULT 0"`
+}
+
+// GetOrInsertVersion inserts a version. If the same version exist already ErrDuplicatePackageVersion is returned
+func GetOrInsertVersion(ctx context.Context, pv *PackageVersion) (*PackageVersion, error) {
+ e := db.GetEngine(ctx)
+
+ existing := &PackageVersion{}
+
+ has, err := e.Where(builder.Eq{
+ "package_id": pv.PackageID,
+ "lower_version": pv.LowerVersion,
+ }).Get(existing)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return existing, ErrDuplicatePackageVersion
+ }
+ if _, err = e.Insert(pv); err != nil {
+ return nil, err
+ }
+ return pv, nil
+}
+
+// UpdateVersion updates a version
+func UpdateVersion(ctx context.Context, pv *PackageVersion) error {
+ _, err := db.GetEngine(ctx).ID(pv.ID).Update(pv)
+ return err
+}
+
+// IncrementDownloadCounter increments the download counter of a version
+func IncrementDownloadCounter(ctx context.Context, versionID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `package_version` SET `download_count` = `download_count` + 1 WHERE `id` = ?", versionID)
+ return err
+}
+
+// GetVersionByID gets a version by id
+func GetVersionByID(ctx context.Context, versionID int64) (*PackageVersion, error) {
+ pv := &PackageVersion{}
+
+ has, err := db.GetEngine(ctx).ID(versionID).Get(pv)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrPackageNotExist
+ }
+ return pv, nil
+}
+
+// GetVersionByNameAndVersion gets a version by name and version number
+func GetVersionByNameAndVersion(ctx context.Context, ownerID int64, packageType Type, name, version string) (*PackageVersion, error) {
+ return getVersionByNameAndVersion(ctx, ownerID, packageType, name, version, false)
+}
+
+// GetInternalVersionByNameAndVersion gets a version by name and version number
+func GetInternalVersionByNameAndVersion(ctx context.Context, ownerID int64, packageType Type, name, version string) (*PackageVersion, error) {
+ return getVersionByNameAndVersion(ctx, ownerID, packageType, name, version, true)
+}
+
+func getVersionByNameAndVersion(ctx context.Context, ownerID int64, packageType Type, name, version string, isInternal bool) (*PackageVersion, error) {
+ pvs, _, err := SearchVersions(ctx, &PackageSearchOptions{
+ OwnerID: ownerID,
+ Type: packageType,
+ Name: SearchValue{
+ ExactMatch: true,
+ Value: name,
+ },
+ Version: SearchValue{
+ ExactMatch: true,
+ Value: version,
+ },
+ IsInternal: optional.Some(isInternal),
+ Paginator: db.NewAbsoluteListOptions(0, 1),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(pvs) == 0 {
+ return nil, ErrPackageNotExist
+ }
+ return pvs[0], nil
+}
+
+// GetVersionsByPackageType gets all versions of a specific type
+func GetVersionsByPackageType(ctx context.Context, ownerID int64, packageType Type) ([]*PackageVersion, error) {
+ pvs, _, err := SearchVersions(ctx, &PackageSearchOptions{
+ OwnerID: ownerID,
+ Type: packageType,
+ IsInternal: optional.Some(false),
+ })
+ return pvs, err
+}
+
+// GetVersionsByPackageName gets all versions of a specific package
+func GetVersionsByPackageName(ctx context.Context, ownerID int64, packageType Type, name string) ([]*PackageVersion, error) {
+ pvs, _, err := SearchVersions(ctx, &PackageSearchOptions{
+ OwnerID: ownerID,
+ Type: packageType,
+ Name: SearchValue{
+ ExactMatch: true,
+ Value: name,
+ },
+ IsInternal: optional.Some(false),
+ })
+ return pvs, err
+}
+
+// DeleteVersionByID deletes a version by id
+func DeleteVersionByID(ctx context.Context, versionID int64) error {
+ _, err := db.GetEngine(ctx).ID(versionID).Delete(&PackageVersion{})
+ return err
+}
+
+// HasVersionFileReferences checks if there are associated files
+func HasVersionFileReferences(ctx context.Context, versionID int64) (bool, error) {
+ return db.GetEngine(ctx).Get(&PackageFile{
+ VersionID: versionID,
+ })
+}
+
+// SearchValue describes a value to search
+// If ExactMatch is true, the field must match the value otherwise a LIKE search is performed.
+type SearchValue struct {
+ Value string
+ ExactMatch bool
+}
+
+type VersionSort = string
+
+const (
+ SortNameAsc VersionSort = "name_asc"
+ SortNameDesc VersionSort = "name_desc"
+ SortVersionAsc VersionSort = "version_asc"
+ SortVersionDesc VersionSort = "version_desc"
+ SortCreatedAsc VersionSort = "created_asc"
+ SortCreatedDesc VersionSort = "created_desc"
+)
+
+// PackageSearchOptions are options for SearchXXX methods
+// All fields optional and are not used if they have their default value (nil, "", 0)
+type PackageSearchOptions struct {
+ OwnerID int64
+ RepoID int64
+ Type Type
+ PackageID int64
+ Name SearchValue // only results with the specific name are found
+ Version SearchValue // only results with the specific version are found
+ Properties map[string]string // only results are found which contain all listed version properties with the specific value
+ IsInternal optional.Option[bool]
+ HasFileWithName string // only results are found which are associated with a file with the specific name
+ HasFiles optional.Option[bool] // only results are found which have associated files
+ Sort VersionSort
+ db.Paginator
+}
+
+func (opts *PackageSearchOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.IsInternal.Has() {
+ cond = builder.Eq{
+ "package_version.is_internal": opts.IsInternal.Value(),
+ }
+ }
+
+ if opts.OwnerID != 0 {
+ cond = cond.And(builder.Eq{"package.owner_id": opts.OwnerID})
+ }
+ if opts.RepoID != 0 {
+ cond = cond.And(builder.Eq{"package.repo_id": opts.RepoID})
+ }
+ if opts.Type != "" && opts.Type != "all" {
+ cond = cond.And(builder.Eq{"package.type": opts.Type})
+ }
+ if opts.PackageID != 0 {
+ cond = cond.And(builder.Eq{"package.id": opts.PackageID})
+ }
+ if opts.Name.Value != "" {
+ if opts.Name.ExactMatch {
+ cond = cond.And(builder.Eq{"package.lower_name": strings.ToLower(opts.Name.Value)})
+ } else {
+ cond = cond.And(builder.Like{"package.lower_name", strings.ToLower(opts.Name.Value)})
+ }
+ }
+ if opts.Version.Value != "" {
+ if opts.Version.ExactMatch {
+ cond = cond.And(builder.Eq{"package_version.lower_version": strings.ToLower(opts.Version.Value)})
+ } else {
+ cond = cond.And(builder.Like{"package_version.lower_version", strings.ToLower(opts.Version.Value)})
+ }
+ }
+
+ if len(opts.Properties) != 0 {
+ var propsCond builder.Cond = builder.Eq{
+ "package_property.ref_type": PropertyTypeVersion,
+ }
+ propsCond = propsCond.And(builder.Expr("package_property.ref_id = package_version.id"))
+
+ propsCondBlock := builder.NewCond()
+ for name, value := range opts.Properties {
+ propsCondBlock = propsCondBlock.Or(builder.Eq{
+ "package_property.name": name,
+ "package_property.value": value,
+ })
+ }
+ propsCond = propsCond.And(propsCondBlock)
+
+ cond = cond.And(builder.Eq{
+ strconv.Itoa(len(opts.Properties)): builder.Select("COUNT(*)").Where(propsCond).From("package_property"),
+ })
+ }
+
+ if opts.HasFileWithName != "" {
+ fileCond := builder.Expr("package_file.version_id = package_version.id").And(builder.Eq{"package_file.lower_name": strings.ToLower(opts.HasFileWithName)})
+
+ cond = cond.And(builder.Exists(builder.Select("package_file.id").From("package_file").Where(fileCond)))
+ }
+
+ if opts.HasFiles.Has() {
+ filesCond := builder.Exists(builder.Select("package_file.id").From("package_file").Where(builder.Expr("package_file.version_id = package_version.id")))
+
+ if !opts.HasFiles.Value() {
+ filesCond = builder.Not{filesCond}
+ }
+
+ cond = cond.And(filesCond)
+ }
+
+ return cond
+}
+
+func (opts *PackageSearchOptions) configureOrderBy(e db.Engine) {
+ switch opts.Sort {
+ case SortNameAsc:
+ e.Asc("package.name")
+ case SortNameDesc:
+ e.Desc("package.name")
+ case SortVersionDesc:
+ e.Desc("package_version.version")
+ case SortVersionAsc:
+ e.Asc("package_version.version")
+ case SortCreatedAsc:
+ e.Asc("package_version.created_unix")
+ default:
+ e.Desc("package_version.created_unix")
+ }
+
+ // Sort by id for stable order with duplicates in the other field
+ e.Asc("package_version.id")
+}
+
+// SearchVersions gets all versions of packages matching the search options
+func SearchVersions(ctx context.Context, opts *PackageSearchOptions) ([]*PackageVersion, int64, error) {
+ sess := db.GetEngine(ctx).
+ Select("package_version.*").
+ Table("package_version").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(opts.ToConds())
+
+ opts.configureOrderBy(sess)
+
+ if opts.Paginator != nil {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+
+ pvs := make([]*PackageVersion, 0, 10)
+ count, err := sess.FindAndCount(&pvs)
+ return pvs, count, err
+}
+
+// SearchLatestVersions gets the latest version of every package matching the search options
+func SearchLatestVersions(ctx context.Context, opts *PackageSearchOptions) ([]*PackageVersion, int64, error) {
+ in := builder.
+ Select("MAX(package_version.id)").
+ From("package_version").
+ InnerJoin("package", "package.id = package_version.package_id").
+ Where(opts.ToConds()).
+ GroupBy("package_version.package_id")
+
+ sess := db.GetEngine(ctx).
+ Select("package_version.*").
+ Table("package_version").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Where(builder.In("package_version.id", in))
+
+ opts.configureOrderBy(sess)
+
+ if opts.Paginator != nil {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+
+ pvs := make([]*PackageVersion, 0, 10)
+ count, err := sess.FindAndCount(&pvs)
+ return pvs, count, err
+}
+
+// ExistVersion checks if a version matching the search options exist
+func ExistVersion(ctx context.Context, opts *PackageSearchOptions) (bool, error) {
+ return db.GetEngine(ctx).
+ Where(opts.ToConds()).
+ Table("package_version").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Exist(new(PackageVersion))
+}
+
+// CountVersions counts all versions of packages matching the search options
+func CountVersions(ctx context.Context, opts *PackageSearchOptions) (int64, error) {
+ return db.GetEngine(ctx).
+ Where(opts.ToConds()).
+ Table("package_version").
+ Join("INNER", "package", "package.id = package_version.package_id").
+ Count(new(PackageVersion))
+}
diff --git a/models/packages/rpm/search.go b/models/packages/rpm/search.go
new file mode 100644
index 0000000..e697421
--- /dev/null
+++ b/models/packages/rpm/search.go
@@ -0,0 +1,23 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package rpm
+
+import (
+ "context"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ rpm_module "code.gitea.io/gitea/modules/packages/rpm"
+)
+
+// GetGroups gets all available groups
+func GetGroups(ctx context.Context, ownerID int64) ([]string, error) {
+ return packages_model.GetDistinctPropertyValues(
+ ctx,
+ packages_model.TypeRpm,
+ ownerID,
+ packages_model.PropertyTypeFile,
+ rpm_module.PropertyGroup,
+ nil,
+ )
+}
diff --git a/models/perm/access/access.go b/models/perm/access/access.go
new file mode 100644
index 0000000..3e2568b
--- /dev/null
+++ b/models/perm/access/access.go
@@ -0,0 +1,250 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package access
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "xorm.io/builder"
+)
+
+// Access represents the highest access level of a user to the repository. The only access type
+// that is not in this table is the real owner of a repository. In case of an organization
+// repository, the members of the owners team are in this table.
+type Access struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"UNIQUE(s)"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Mode perm.AccessMode
+}
+
+func init() {
+ db.RegisterModel(new(Access))
+}
+
+func accessLevel(ctx context.Context, user *user_model.User, repo *repo_model.Repository) (perm.AccessMode, error) {
+ mode := perm.AccessModeNone
+ var userID int64
+ restricted := false
+
+ if user != nil {
+ userID = user.ID
+ restricted = user.IsRestricted
+ }
+
+ if !restricted && !repo.IsPrivate {
+ mode = perm.AccessModeRead
+ }
+
+ if userID == 0 {
+ return mode, nil
+ }
+
+ if userID == repo.OwnerID {
+ return perm.AccessModeOwner, nil
+ }
+
+ a, exist, err := db.Get[Access](ctx, builder.Eq{"user_id": userID, "repo_id": repo.ID})
+ if err != nil {
+ return mode, err
+ } else if !exist {
+ return mode, nil
+ }
+ return a.Mode, nil
+}
+
+func maxAccessMode(modes ...perm.AccessMode) perm.AccessMode {
+ max := perm.AccessModeNone
+ for _, mode := range modes {
+ if mode > max {
+ max = mode
+ }
+ }
+ return max
+}
+
+type userAccess struct {
+ User *user_model.User
+ Mode perm.AccessMode
+}
+
+// updateUserAccess updates an access map so that user has at least mode
+func updateUserAccess(accessMap map[int64]*userAccess, user *user_model.User, mode perm.AccessMode) {
+ if ua, ok := accessMap[user.ID]; ok {
+ ua.Mode = maxAccessMode(ua.Mode, mode)
+ } else {
+ accessMap[user.ID] = &userAccess{User: user, Mode: mode}
+ }
+}
+
+// FIXME: do cross-comparison so reduce deletions and additions to the minimum?
+func refreshAccesses(ctx context.Context, repo *repo_model.Repository, accessMap map[int64]*userAccess) (err error) {
+ minMode := perm.AccessModeRead
+ if err := repo.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("LoadOwner: %w", err)
+ }
+
+ // If the repo isn't private and isn't owned by a organization,
+ // increase the minMode to Write.
+ if !repo.IsPrivate && !repo.Owner.IsOrganization() {
+ minMode = perm.AccessModeWrite
+ }
+
+ newAccesses := make([]Access, 0, len(accessMap))
+ for userID, ua := range accessMap {
+ if ua.Mode < minMode && !ua.User.IsRestricted {
+ continue
+ }
+
+ newAccesses = append(newAccesses, Access{
+ UserID: userID,
+ RepoID: repo.ID,
+ Mode: ua.Mode,
+ })
+ }
+
+ // Delete old accesses and insert new ones for repository.
+ if _, err = db.DeleteByBean(ctx, &Access{RepoID: repo.ID}); err != nil {
+ return fmt.Errorf("delete old accesses: %w", err)
+ }
+ if len(newAccesses) == 0 {
+ return nil
+ }
+
+ if err = db.Insert(ctx, newAccesses); err != nil {
+ return fmt.Errorf("insert new accesses: %w", err)
+ }
+ return nil
+}
+
+// refreshCollaboratorAccesses retrieves repository collaborations with their access modes.
+func refreshCollaboratorAccesses(ctx context.Context, repoID int64, accessMap map[int64]*userAccess) error {
+ collaborators, err := repo_model.GetCollaborators(ctx, repoID, db.ListOptions{})
+ if err != nil {
+ return fmt.Errorf("getCollaborations: %w", err)
+ }
+ for _, c := range collaborators {
+ if c.User.IsGhost() {
+ continue
+ }
+ updateUserAccess(accessMap, c.User, c.Collaboration.Mode)
+ }
+ return nil
+}
+
+// RecalculateTeamAccesses recalculates new accesses for teams of an organization
+// except the team whose ID is given. It is used to assign a team ID when
+// remove repository from that team.
+func RecalculateTeamAccesses(ctx context.Context, repo *repo_model.Repository, ignTeamID int64) (err error) {
+ accessMap := make(map[int64]*userAccess, 20)
+
+ if err = repo.LoadOwner(ctx); err != nil {
+ return err
+ } else if !repo.Owner.IsOrganization() {
+ return fmt.Errorf("owner is not an organization: %d", repo.OwnerID)
+ }
+
+ if err = refreshCollaboratorAccesses(ctx, repo.ID, accessMap); err != nil {
+ return fmt.Errorf("refreshCollaboratorAccesses: %w", err)
+ }
+
+ teams, err := organization.FindOrgTeams(ctx, repo.Owner.ID)
+ if err != nil {
+ return err
+ }
+
+ for _, t := range teams {
+ if t.ID == ignTeamID {
+ continue
+ }
+
+ // Owner team gets owner access, and skip for teams that do not
+ // have relations with repository.
+ if t.IsOwnerTeam() {
+ t.AccessMode = perm.AccessModeOwner
+ } else if !organization.HasTeamRepo(ctx, t.OrgID, t.ID, repo.ID) {
+ continue
+ }
+
+ if err = t.LoadMembers(ctx); err != nil {
+ return fmt.Errorf("getMembers '%d': %w", t.ID, err)
+ }
+ for _, m := range t.Members {
+ updateUserAccess(accessMap, m, t.AccessMode)
+ }
+ }
+
+ return refreshAccesses(ctx, repo, accessMap)
+}
+
+// RecalculateUserAccess recalculates new access for a single user
+// Usable if we know access only affected one user
+func RecalculateUserAccess(ctx context.Context, repo *repo_model.Repository, uid int64) (err error) {
+ minMode := perm.AccessModeRead
+ if !repo.IsPrivate {
+ minMode = perm.AccessModeWrite
+ }
+
+ accessMode := perm.AccessModeNone
+ e := db.GetEngine(ctx)
+ collaborator, err := repo_model.GetCollaboration(ctx, repo.ID, uid)
+ if err != nil {
+ return err
+ } else if collaborator != nil {
+ accessMode = collaborator.Mode
+ }
+
+ if err = repo.LoadOwner(ctx); err != nil {
+ return err
+ } else if repo.Owner.IsOrganization() {
+ var teams []organization.Team
+ if err := e.Join("INNER", "team_repo", "team_repo.team_id = team.id").
+ Join("INNER", "team_user", "team_user.team_id = team.id").
+ Where("team.org_id = ?", repo.OwnerID).
+ And("team_repo.repo_id=?", repo.ID).
+ And("team_user.uid=?", uid).
+ Find(&teams); err != nil {
+ return err
+ }
+
+ for _, t := range teams {
+ if t.IsOwnerTeam() {
+ t.AccessMode = perm.AccessModeOwner
+ }
+
+ accessMode = maxAccessMode(accessMode, t.AccessMode)
+ }
+ }
+
+ // Delete old user accesses and insert new one for repository.
+ if _, err = e.Delete(&Access{RepoID: repo.ID, UserID: uid}); err != nil {
+ return fmt.Errorf("delete old user accesses: %w", err)
+ } else if accessMode >= minMode {
+ if err = db.Insert(ctx, &Access{RepoID: repo.ID, UserID: uid, Mode: accessMode}); err != nil {
+ return fmt.Errorf("insert new user accesses: %w", err)
+ }
+ }
+ return nil
+}
+
+// RecalculateAccesses recalculates all accesses for repository.
+func RecalculateAccesses(ctx context.Context, repo *repo_model.Repository) error {
+ if repo.Owner.IsOrganization() {
+ return RecalculateTeamAccesses(ctx, repo, 0)
+ }
+
+ accessMap := make(map[int64]*userAccess, 20)
+ if err := refreshCollaboratorAccesses(ctx, repo.ID, accessMap); err != nil {
+ return fmt.Errorf("refreshCollaboratorAccesses: %w", err)
+ }
+ return refreshAccesses(ctx, repo, accessMap)
+}
diff --git a/models/perm/access/access_test.go b/models/perm/access/access_test.go
new file mode 100644
index 0000000..556f513
--- /dev/null
+++ b/models/perm/access/access_test.go
@@ -0,0 +1,127 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package access_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ perm_model "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAccessLevel(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ user5 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 5})
+ user29 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 29})
+ // A public repository owned by User 2
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.False(t, repo1.IsPrivate)
+ // A private repository owned by Org 3
+ repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+ assert.True(t, repo3.IsPrivate)
+
+ // Another public repository
+ repo4 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ assert.False(t, repo4.IsPrivate)
+ // org. owned private repo
+ repo24 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 24})
+
+ level, err := access_model.AccessLevel(db.DefaultContext, user2, repo1)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeOwner, level)
+
+ level, err = access_model.AccessLevel(db.DefaultContext, user2, repo3)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeOwner, level)
+
+ level, err = access_model.AccessLevel(db.DefaultContext, user5, repo1)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeRead, level)
+
+ level, err = access_model.AccessLevel(db.DefaultContext, user5, repo3)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeNone, level)
+
+ // restricted user has no access to a public repo
+ level, err = access_model.AccessLevel(db.DefaultContext, user29, repo1)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeNone, level)
+
+ // ... unless he's a collaborator
+ level, err = access_model.AccessLevel(db.DefaultContext, user29, repo4)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeWrite, level)
+
+ // ... or a team member
+ level, err = access_model.AccessLevel(db.DefaultContext, user29, repo24)
+ require.NoError(t, err)
+ assert.Equal(t, perm_model.AccessModeRead, level)
+}
+
+func TestHasAccess(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 5})
+ // A public repository owned by User 2
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.False(t, repo1.IsPrivate)
+ // A private repository owned by Org 3
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+ assert.True(t, repo2.IsPrivate)
+
+ has, err := access_model.HasAccess(db.DefaultContext, user1.ID, repo1)
+ require.NoError(t, err)
+ assert.True(t, has)
+
+ _, err = access_model.HasAccess(db.DefaultContext, user1.ID, repo2)
+ require.NoError(t, err)
+
+ _, err = access_model.HasAccess(db.DefaultContext, user2.ID, repo1)
+ require.NoError(t, err)
+
+ _, err = access_model.HasAccess(db.DefaultContext, user2.ID, repo2)
+ require.NoError(t, err)
+}
+
+func TestRepository_RecalculateAccesses(t *testing.T) {
+ // test with organization repo
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+ require.NoError(t, repo1.LoadOwner(db.DefaultContext))
+
+ _, err := db.GetEngine(db.DefaultContext).Delete(&repo_model.Collaboration{UserID: 2, RepoID: 3})
+ require.NoError(t, err)
+ require.NoError(t, access_model.RecalculateAccesses(db.DefaultContext, repo1))
+
+ access := &access_model.Access{UserID: 2, RepoID: 3}
+ has, err := db.GetEngine(db.DefaultContext).Get(access)
+ require.NoError(t, err)
+ assert.True(t, has)
+ assert.Equal(t, perm_model.AccessModeOwner, access.Mode)
+}
+
+func TestRepository_RecalculateAccesses2(t *testing.T) {
+ // test with non-organization repo
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ require.NoError(t, repo1.LoadOwner(db.DefaultContext))
+
+ _, err := db.GetEngine(db.DefaultContext).Delete(&repo_model.Collaboration{UserID: 4, RepoID: 4})
+ require.NoError(t, err)
+ require.NoError(t, access_model.RecalculateAccesses(db.DefaultContext, repo1))
+
+ has, err := db.GetEngine(db.DefaultContext).Get(&access_model.Access{UserID: 4, RepoID: 4})
+ require.NoError(t, err)
+ assert.False(t, has)
+}
diff --git a/models/perm/access/main_test.go b/models/perm/access/main_test.go
new file mode 100644
index 0000000..0a350dc
--- /dev/null
+++ b/models/perm/access/main_test.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package access_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/repo"
+ _ "code.gitea.io/gitea/models/user"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/perm/access/repo_permission.go b/models/perm/access/repo_permission.go
new file mode 100644
index 0000000..7e39627
--- /dev/null
+++ b/models/perm/access/repo_permission.go
@@ -0,0 +1,450 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package access
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ perm_model "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+)
+
+// Permission contains all the permissions related variables to a repository for a user
+type Permission struct {
+ AccessMode perm_model.AccessMode
+ Units []*repo_model.RepoUnit
+ UnitsMode map[unit.Type]perm_model.AccessMode
+}
+
+// IsOwner returns true if current user is the owner of repository.
+func (p *Permission) IsOwner() bool {
+ return p.AccessMode >= perm_model.AccessModeOwner
+}
+
+// IsAdmin returns true if current user has admin or higher access of repository.
+func (p *Permission) IsAdmin() bool {
+ return p.AccessMode >= perm_model.AccessModeAdmin
+}
+
+// IsGloballyWriteable returns true if the unit is writeable by all users of the instance.
+func (p *Permission) IsGloballyWriteable(unitType unit.Type) bool {
+ for _, u := range p.Units {
+ if u.Type == unitType {
+ return u.DefaultPermissions == repo_model.UnitAccessModeWrite
+ }
+ }
+ return false
+}
+
+// HasAccess returns true if the current user has at least read access to any unit of this repository
+func (p *Permission) HasAccess() bool {
+ if p.UnitsMode == nil {
+ return p.AccessMode >= perm_model.AccessModeRead
+ }
+ return len(p.UnitsMode) > 0
+}
+
+// UnitAccessMode returns current user accessmode to the specify unit of the repository
+func (p *Permission) UnitAccessMode(unitType unit.Type) perm_model.AccessMode {
+ if p.UnitsMode == nil {
+ for _, u := range p.Units {
+ if u.Type == unitType {
+ return p.AccessMode
+ }
+ }
+ return perm_model.AccessModeNone
+ }
+ return p.UnitsMode[unitType]
+}
+
+// CanAccess returns true if user has mode access to the unit of the repository
+func (p *Permission) CanAccess(mode perm_model.AccessMode, unitType unit.Type) bool {
+ return p.UnitAccessMode(unitType) >= mode
+}
+
+// CanAccessAny returns true if user has mode access to any of the units of the repository
+func (p *Permission) CanAccessAny(mode perm_model.AccessMode, unitTypes ...unit.Type) bool {
+ for _, u := range unitTypes {
+ if p.CanAccess(mode, u) {
+ return true
+ }
+ }
+ return false
+}
+
+// CanRead returns true if user could read to this unit
+func (p *Permission) CanRead(unitType unit.Type) bool {
+ return p.CanAccess(perm_model.AccessModeRead, unitType)
+}
+
+// CanReadAny returns true if user has read access to any of the units of the repository
+func (p *Permission) CanReadAny(unitTypes ...unit.Type) bool {
+ return p.CanAccessAny(perm_model.AccessModeRead, unitTypes...)
+}
+
+// CanReadIssuesOrPulls returns true if isPull is true and user could read pull requests and
+// returns true if isPull is false and user could read to issues
+func (p *Permission) CanReadIssuesOrPulls(isPull bool) bool {
+ if isPull {
+ return p.CanRead(unit.TypePullRequests)
+ }
+ return p.CanRead(unit.TypeIssues)
+}
+
+// CanWrite returns true if user could write to this unit
+func (p *Permission) CanWrite(unitType unit.Type) bool {
+ return p.CanAccess(perm_model.AccessModeWrite, unitType)
+}
+
+// CanWriteIssuesOrPulls returns true if isPull is true and user could write to pull requests and
+// returns true if isPull is false and user could write to issues
+func (p *Permission) CanWriteIssuesOrPulls(isPull bool) bool {
+ if isPull {
+ return p.CanWrite(unit.TypePullRequests)
+ }
+ return p.CanWrite(unit.TypeIssues)
+}
+
+func (p *Permission) LogString() string {
+ format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): [ "
+ args := []any{p.AccessMode.String(), len(p.Units), len(p.UnitsMode)}
+
+ for i, unit := range p.Units {
+ config := ""
+ if unit.Config != nil {
+ configBytes, err := unit.Config.ToDB()
+ config = string(configBytes)
+ if err != nil {
+ config = err.Error()
+ }
+ }
+ format += "\nUnits[%d]: ID: %d RepoID: %d Type: %s Config: %s"
+ args = append(args, i, unit.ID, unit.RepoID, unit.Type.LogString(), config)
+ }
+ for key, value := range p.UnitsMode {
+ format += "\nUnitMode[%-v]: %-v"
+ args = append(args, key.LogString(), value.LogString())
+ }
+ format += " ]>"
+ return fmt.Sprintf(format, args...)
+}
+
+// GetUserRepoPermission returns the user permissions to the repository
+func GetUserRepoPermission(ctx context.Context, repo *repo_model.Repository, user *user_model.User) (Permission, error) {
+ var perm Permission
+ if log.IsTrace() {
+ defer func() {
+ if user == nil {
+ log.Trace("Permission Loaded for anonymous user in %-v:\nPermissions: %-+v",
+ repo,
+ perm)
+ return
+ }
+ log.Trace("Permission Loaded for %-v in %-v:\nPermissions: %-+v",
+ user,
+ repo,
+ perm)
+ }()
+ }
+
+ // anonymous user visit private repo.
+ // TODO: anonymous user visit public unit of private repo???
+ if user == nil && repo.IsPrivate {
+ perm.AccessMode = perm_model.AccessModeNone
+ return perm, nil
+ }
+
+ var isCollaborator bool
+ var err error
+ if user != nil {
+ isCollaborator, err = repo_model.IsCollaborator(ctx, repo.ID, user.ID)
+ if err != nil {
+ return perm, err
+ }
+ }
+
+ if err := repo.LoadOwner(ctx); err != nil {
+ return perm, err
+ }
+
+ // Prevent strangers from checking out public repo of private organization/users
+ // Allow user if they are collaborator of a repo within a private user or a private organization but not a member of the organization itself
+ if !organization.HasOrgOrUserVisible(ctx, repo.Owner, user) && !isCollaborator {
+ perm.AccessMode = perm_model.AccessModeNone
+ return perm, nil
+ }
+
+ if err := repo.LoadUnits(ctx); err != nil {
+ return perm, err
+ }
+
+ perm.Units = repo.Units
+
+ // anonymous visit public repo
+ if user == nil {
+ perm.AccessMode = perm_model.AccessModeRead
+ return perm, nil
+ }
+
+ // Admin or the owner has super access to the repository
+ if user.IsAdmin || user.ID == repo.OwnerID {
+ perm.AccessMode = perm_model.AccessModeOwner
+ return perm, nil
+ }
+
+ // plain user
+ perm.AccessMode, err = accessLevel(ctx, user, repo)
+ if err != nil {
+ return perm, err
+ }
+
+ if err := repo.LoadOwner(ctx); err != nil {
+ return perm, err
+ }
+
+ if !repo.Owner.IsOrganization() {
+ // for a public repo, different repo units may have different default
+ // permissions for non-restricted users.
+ if !repo.IsPrivate && !user.IsRestricted && len(repo.Units) > 0 {
+ perm.UnitsMode = make(map[unit.Type]perm_model.AccessMode)
+ for _, u := range repo.Units {
+ if _, ok := perm.UnitsMode[u.Type]; !ok {
+ perm.UnitsMode[u.Type] = u.DefaultPermissions.ToAccessMode(perm.AccessMode)
+ }
+ }
+ }
+
+ return perm, nil
+ }
+
+ perm.UnitsMode = make(map[unit.Type]perm_model.AccessMode)
+
+ // Collaborators on organization
+ if isCollaborator {
+ for _, u := range repo.Units {
+ perm.UnitsMode[u.Type] = perm.AccessMode
+ }
+ }
+
+ // get units mode from teams
+ teams, err := organization.GetUserRepoTeams(ctx, repo.OwnerID, user.ID, repo.ID)
+ if err != nil {
+ return perm, err
+ }
+
+ // if user in an owner team
+ for _, team := range teams {
+ if team.AccessMode >= perm_model.AccessModeAdmin {
+ perm.AccessMode = perm_model.AccessModeOwner
+ perm.UnitsMode = nil
+ return perm, nil
+ }
+ }
+
+ for _, u := range repo.Units {
+ var found bool
+ for _, team := range teams {
+ teamMode := team.UnitAccessMode(ctx, u.Type)
+ if teamMode > perm_model.AccessModeNone {
+ m := perm.UnitsMode[u.Type]
+ if m < teamMode {
+ perm.UnitsMode[u.Type] = teamMode
+ }
+ found = true
+ }
+ }
+
+ // for a public repo on an organization, a non-restricted user should
+ // have the same permission on non-team defined units as the default
+ // permissions for the repo unit.
+ if !found && !repo.IsPrivate && !user.IsRestricted {
+ if _, ok := perm.UnitsMode[u.Type]; !ok {
+ perm.UnitsMode[u.Type] = u.DefaultPermissions.ToAccessMode(perm_model.AccessModeRead)
+ }
+ }
+ }
+
+ // remove no permission units
+ perm.Units = make([]*repo_model.RepoUnit, 0, len(repo.Units))
+ for t := range perm.UnitsMode {
+ for _, u := range repo.Units {
+ if u.Type == t {
+ perm.Units = append(perm.Units, u)
+ }
+ }
+ }
+
+ return perm, err
+}
+
+// IsUserRealRepoAdmin check if this user is real repo admin
+func IsUserRealRepoAdmin(ctx context.Context, repo *repo_model.Repository, user *user_model.User) (bool, error) {
+ if repo.OwnerID == user.ID {
+ return true, nil
+ }
+
+ if err := repo.LoadOwner(ctx); err != nil {
+ return false, err
+ }
+
+ accessMode, err := accessLevel(ctx, user, repo)
+ if err != nil {
+ return false, err
+ }
+
+ return accessMode >= perm_model.AccessModeAdmin, nil
+}
+
+// IsUserRepoAdmin return true if user has admin right of a repo
+func IsUserRepoAdmin(ctx context.Context, repo *repo_model.Repository, user *user_model.User) (bool, error) {
+ if user == nil || repo == nil {
+ return false, nil
+ }
+ if user.IsAdmin {
+ return true, nil
+ }
+
+ mode, err := accessLevel(ctx, user, repo)
+ if err != nil {
+ return false, err
+ }
+ if mode >= perm_model.AccessModeAdmin {
+ return true, nil
+ }
+
+ teams, err := organization.GetUserRepoTeams(ctx, repo.OwnerID, user.ID, repo.ID)
+ if err != nil {
+ return false, err
+ }
+
+ for _, team := range teams {
+ if team.AccessMode >= perm_model.AccessModeAdmin {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// AccessLevel returns the Access a user has to a repository. Will return NoneAccess if the
+// user does not have access.
+func AccessLevel(ctx context.Context, user *user_model.User, repo *repo_model.Repository) (perm_model.AccessMode, error) { //nolint
+ return AccessLevelUnit(ctx, user, repo, unit.TypeCode)
+}
+
+// AccessLevelUnit returns the Access a user has to a repository's. Will return NoneAccess if the
+// user does not have access.
+func AccessLevelUnit(ctx context.Context, user *user_model.User, repo *repo_model.Repository, unitType unit.Type) (perm_model.AccessMode, error) { //nolint
+ perm, err := GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ return perm_model.AccessModeNone, err
+ }
+ return perm.UnitAccessMode(unitType), nil
+}
+
+// HasAccessUnit returns true if user has testMode to the unit of the repository
+func HasAccessUnit(ctx context.Context, user *user_model.User, repo *repo_model.Repository, unitType unit.Type, testMode perm_model.AccessMode) (bool, error) {
+ mode, err := AccessLevelUnit(ctx, user, repo, unitType)
+ return testMode <= mode, err
+}
+
+// CanBeAssigned return true if user can be assigned to issue or pull requests in repo
+// Currently any write access (code, issues or pr's) is assignable, to match assignee list in user interface.
+func CanBeAssigned(ctx context.Context, user *user_model.User, repo *repo_model.Repository, _ bool) (bool, error) {
+ if user.IsOrganization() {
+ return false, fmt.Errorf("Organization can't be added as assignee [user_id: %d, repo_id: %d]", user.ID, repo.ID)
+ }
+ perm, err := GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ return false, err
+ }
+ return perm.CanAccessAny(perm_model.AccessModeWrite, unit.AllRepoUnitTypes...) ||
+ perm.CanAccessAny(perm_model.AccessModeRead, unit.TypePullRequests), nil
+}
+
+// HasAccess returns true if user has access to repo
+func HasAccess(ctx context.Context, userID int64, repo *repo_model.Repository) (bool, error) {
+ var user *user_model.User
+ var err error
+ if userID > 0 {
+ user, err = user_model.GetUserByID(ctx, userID)
+ if err != nil {
+ return false, err
+ }
+ }
+ perm, err := GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ return false, err
+ }
+ return perm.HasAccess(), nil
+}
+
+// getUsersWithAccessMode returns users that have at least given access mode to the repository.
+func getUsersWithAccessMode(ctx context.Context, repo *repo_model.Repository, mode perm_model.AccessMode) (_ []*user_model.User, err error) {
+ if err = repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ e := db.GetEngine(ctx)
+ accesses := make([]*Access, 0, 10)
+ if err = e.Where("repo_id = ? AND mode >= ?", repo.ID, mode).Find(&accesses); err != nil {
+ return nil, err
+ }
+
+ // Leave a seat for owner itself to append later, but if owner is an organization
+ // and just waste 1 unit is cheaper than re-allocate memory once.
+ users := make([]*user_model.User, 0, len(accesses)+1)
+ if len(accesses) > 0 {
+ userIDs := make([]int64, len(accesses))
+ for i := 0; i < len(accesses); i++ {
+ userIDs[i] = accesses[i].UserID
+ }
+
+ if err = e.In("id", userIDs).Find(&users); err != nil {
+ return nil, err
+ }
+ }
+ if !repo.Owner.IsOrganization() {
+ users = append(users, repo.Owner)
+ }
+
+ return users, nil
+}
+
+// GetRepoReaders returns all users that have explicit read access or higher to the repository.
+func GetRepoReaders(ctx context.Context, repo *repo_model.Repository) (_ []*user_model.User, err error) {
+ return getUsersWithAccessMode(ctx, repo, perm_model.AccessModeRead)
+}
+
+// GetRepoWriters returns all users that have write access to the repository.
+func GetRepoWriters(ctx context.Context, repo *repo_model.Repository) (_ []*user_model.User, err error) {
+ return getUsersWithAccessMode(ctx, repo, perm_model.AccessModeWrite)
+}
+
+// IsRepoReader returns true if user has explicit read access or higher to the repository.
+func IsRepoReader(ctx context.Context, repo *repo_model.Repository, userID int64) (bool, error) {
+ if repo.OwnerID == userID {
+ return true, nil
+ }
+ return db.GetEngine(ctx).Where("repo_id = ? AND user_id = ? AND mode >= ?", repo.ID, userID, perm_model.AccessModeRead).Get(&Access{})
+}
+
+// CheckRepoUnitUser check whether user could visit the unit of this repository
+func CheckRepoUnitUser(ctx context.Context, repo *repo_model.Repository, user *user_model.User, unitType unit.Type) bool {
+ if user != nil && user.IsAdmin {
+ return true
+ }
+ perm, err := GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ log.Error("GetUserRepoPermission: %w", err)
+ return false
+ }
+
+ return perm.CanRead(unitType)
+}
diff --git a/models/perm/access_mode.go b/models/perm/access_mode.go
new file mode 100644
index 0000000..a37bc1f
--- /dev/null
+++ b/models/perm/access_mode.go
@@ -0,0 +1,57 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package perm
+
+import (
+ "fmt"
+)
+
+// AccessMode specifies the users access mode
+type AccessMode int
+
+const (
+ // AccessModeNone no access
+ AccessModeNone AccessMode = iota // 0
+ // AccessModeRead read access
+ AccessModeRead // 1
+ // AccessModeWrite write access
+ AccessModeWrite // 2
+ // AccessModeAdmin admin access
+ AccessModeAdmin // 3
+ // AccessModeOwner owner access
+ AccessModeOwner // 4
+)
+
+func (mode AccessMode) String() string {
+ switch mode {
+ case AccessModeRead:
+ return "read"
+ case AccessModeWrite:
+ return "write"
+ case AccessModeAdmin:
+ return "admin"
+ case AccessModeOwner:
+ return "owner"
+ default:
+ return "none"
+ }
+}
+
+func (mode AccessMode) LogString() string {
+ return fmt.Sprintf("<AccessMode:%d:%s>", mode, mode.String())
+}
+
+// ParseAccessMode returns corresponding access mode to given permission string.
+func ParseAccessMode(permission string) AccessMode {
+ switch permission {
+ case "read":
+ return AccessModeRead
+ case "write":
+ return AccessModeWrite
+ case "admin":
+ return AccessModeAdmin
+ default:
+ return AccessModeNone
+ }
+}
diff --git a/models/project/column.go b/models/project/column.go
new file mode 100644
index 0000000..222f448
--- /dev/null
+++ b/models/project/column.go
@@ -0,0 +1,359 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "regexp"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+type (
+
+ // CardType is used to represent a project column card type
+ CardType uint8
+
+ // ColumnList is a list of all project columns in a repository
+ ColumnList []*Column
+)
+
+const (
+ // CardTypeTextOnly is a project column card type that is text only
+ CardTypeTextOnly CardType = iota
+
+ // CardTypeImagesAndText is a project column card type that has images and text
+ CardTypeImagesAndText
+)
+
+// ColumnColorPattern is a regexp witch can validate ColumnColor
+var ColumnColorPattern = regexp.MustCompile("^#[0-9a-fA-F]{6}$")
+
+// Column is used to represent column on a project
+type Column struct {
+ ID int64 `xorm:"pk autoincr"`
+ Title string
+ Default bool `xorm:"NOT NULL DEFAULT false"` // issues not assigned to a specific column will be assigned to this column
+ Sorting int8 `xorm:"NOT NULL DEFAULT 0"`
+ Color string `xorm:"VARCHAR(7)"`
+
+ ProjectID int64 `xorm:"INDEX NOT NULL"`
+ CreatorID int64 `xorm:"NOT NULL"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+// TableName return the real table name
+func (Column) TableName() string {
+ return "project_board" // TODO: the legacy table name should be project_column
+}
+
+// NumIssues return counter of all issues assigned to the column
+func (c *Column) NumIssues(ctx context.Context) int {
+ total, err := db.GetEngine(ctx).Table("project_issue").
+ Where("project_id=?", c.ProjectID).
+ And("project_board_id=?", c.ID).
+ GroupBy("issue_id").
+ Cols("issue_id").
+ Count()
+ if err != nil {
+ return 0
+ }
+ return int(total)
+}
+
+func (c *Column) GetIssues(ctx context.Context) ([]*ProjectIssue, error) {
+ issues := make([]*ProjectIssue, 0, 5)
+ if err := db.GetEngine(ctx).Where("project_id=?", c.ProjectID).
+ And("project_board_id=?", c.ID).
+ OrderBy("sorting, id").
+ Find(&issues); err != nil {
+ return nil, err
+ }
+ return issues, nil
+}
+
+func init() {
+ db.RegisterModel(new(Column))
+}
+
+// IsCardTypeValid checks if the project column card type is valid
+func IsCardTypeValid(p CardType) bool {
+ switch p {
+ case CardTypeTextOnly, CardTypeImagesAndText:
+ return true
+ default:
+ return false
+ }
+}
+
+func createDefaultColumnsForProject(ctx context.Context, project *Project) error {
+ var items []string
+
+ switch project.TemplateType {
+ case TemplateTypeBugTriage:
+ items = setting.Project.ProjectBoardBugTriageType
+ case TemplateTypeBasicKanban:
+ items = setting.Project.ProjectBoardBasicKanbanType
+ case TemplateTypeNone:
+ fallthrough
+ default:
+ return nil
+ }
+
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ column := Column{
+ CreatedUnix: timeutil.TimeStampNow(),
+ CreatorID: project.CreatorID,
+ Title: "Backlog",
+ ProjectID: project.ID,
+ Default: true,
+ }
+ if err := db.Insert(ctx, column); err != nil {
+ return err
+ }
+
+ if len(items) == 0 {
+ return nil
+ }
+
+ columns := make([]Column, 0, len(items))
+ for _, v := range items {
+ columns = append(columns, Column{
+ CreatedUnix: timeutil.TimeStampNow(),
+ CreatorID: project.CreatorID,
+ Title: v,
+ ProjectID: project.ID,
+ })
+ }
+
+ return db.Insert(ctx, columns)
+ })
+}
+
+// maxProjectColumns max columns allowed in a project, this should not bigger than 127
+// because sorting is int8 in database
+const maxProjectColumns = 20
+
+// NewColumn adds a new project column to a given project
+func NewColumn(ctx context.Context, column *Column) error {
+ if len(column.Color) != 0 && !ColumnColorPattern.MatchString(column.Color) {
+ return fmt.Errorf("bad color code: %s", column.Color)
+ }
+
+ res := struct {
+ MaxSorting int64
+ ColumnCount int64
+ }{}
+ if _, err := db.GetEngine(ctx).Select("max(sorting) as max_sorting, count(*) as column_count").Table("project_board").
+ Where("project_id=?", column.ProjectID).Get(&res); err != nil {
+ return err
+ }
+ if res.ColumnCount >= maxProjectColumns {
+ return fmt.Errorf("NewBoard: maximum number of columns reached")
+ }
+ column.Sorting = int8(util.Iif(res.ColumnCount > 0, res.MaxSorting+1, 0))
+ _, err := db.GetEngine(ctx).Insert(column)
+ return err
+}
+
+// DeleteColumnByID removes all issues references to the project column.
+func DeleteColumnByID(ctx context.Context, columnID int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ return deleteColumnByID(ctx, columnID)
+ })
+}
+
+func deleteColumnByID(ctx context.Context, columnID int64) error {
+ column, err := GetColumn(ctx, columnID)
+ if err != nil {
+ if IsErrProjectColumnNotExist(err) {
+ return nil
+ }
+
+ return err
+ }
+
+ if column.Default {
+ return fmt.Errorf("deleteColumnByID: cannot delete default column")
+ }
+
+ // move all issues to the default column
+ project, err := GetProjectByID(ctx, column.ProjectID)
+ if err != nil {
+ return err
+ }
+ defaultColumn, err := project.GetDefaultColumn(ctx)
+ if err != nil {
+ return err
+ }
+
+ if err = column.moveIssuesToAnotherColumn(ctx, defaultColumn); err != nil {
+ return err
+ }
+
+ if _, err := db.GetEngine(ctx).ID(column.ID).NoAutoCondition().Delete(column); err != nil {
+ return err
+ }
+ return nil
+}
+
+func deleteColumnByProjectID(ctx context.Context, projectID int64) error {
+ _, err := db.GetEngine(ctx).Where("project_id=?", projectID).Delete(&Column{})
+ return err
+}
+
+// GetColumn fetches the current column of a project
+func GetColumn(ctx context.Context, columnID int64) (*Column, error) {
+ column := new(Column)
+ has, err := db.GetEngine(ctx).ID(columnID).Get(column)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrProjectColumnNotExist{ColumnID: columnID}
+ }
+
+ return column, nil
+}
+
+// UpdateColumn updates a project column
+func UpdateColumn(ctx context.Context, column *Column) error {
+ var fieldToUpdate []string
+
+ if column.Sorting != 0 {
+ fieldToUpdate = append(fieldToUpdate, "sorting")
+ }
+
+ if column.Title != "" {
+ fieldToUpdate = append(fieldToUpdate, "title")
+ }
+
+ if len(column.Color) != 0 && !ColumnColorPattern.MatchString(column.Color) {
+ return fmt.Errorf("bad color code: %s", column.Color)
+ }
+ fieldToUpdate = append(fieldToUpdate, "color")
+
+ _, err := db.GetEngine(ctx).ID(column.ID).Cols(fieldToUpdate...).Update(column)
+
+ return err
+}
+
+// GetColumns fetches all columns related to a project
+func (p *Project) GetColumns(ctx context.Context) (ColumnList, error) {
+ columns := make([]*Column, 0, 5)
+ if err := db.GetEngine(ctx).Where("project_id=?", p.ID).OrderBy("sorting, id").Find(&columns); err != nil {
+ return nil, err
+ }
+
+ return columns, nil
+}
+
+// GetDefaultColumn return default column and ensure only one exists
+func (p *Project) GetDefaultColumn(ctx context.Context) (*Column, error) {
+ var column Column
+ has, err := db.GetEngine(ctx).
+ Where("project_id=? AND `default` = ?", p.ID, true).
+ Desc("id").Get(&column)
+ if err != nil {
+ return nil, err
+ }
+
+ if has {
+ return &column, nil
+ }
+
+ // create a default column if none is found
+ column = Column{
+ ProjectID: p.ID,
+ Default: true,
+ Title: "Uncategorized",
+ CreatorID: p.CreatorID,
+ }
+ if _, err := db.GetEngine(ctx).Insert(&column); err != nil {
+ return nil, err
+ }
+ return &column, nil
+}
+
+// SetDefaultColumn represents a column for issues not assigned to one
+func SetDefaultColumn(ctx context.Context, projectID, columnID int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ if _, err := GetColumn(ctx, columnID); err != nil {
+ return err
+ }
+
+ if _, err := db.GetEngine(ctx).Where(builder.Eq{
+ "project_id": projectID,
+ "`default`": true,
+ }).Cols("`default`").Update(&Column{Default: false}); err != nil {
+ return err
+ }
+
+ _, err := db.GetEngine(ctx).ID(columnID).
+ Where(builder.Eq{"project_id": projectID}).
+ Cols("`default`").Update(&Column{Default: true})
+ return err
+ })
+}
+
+// UpdateColumnSorting update project column sorting
+func UpdateColumnSorting(ctx context.Context, cl ColumnList) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ for i := range cl {
+ if _, err := db.GetEngine(ctx).ID(cl[i].ID).Cols(
+ "sorting",
+ ).Update(cl[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func GetColumnsByIDs(ctx context.Context, projectID int64, columnsIDs []int64) (ColumnList, error) {
+ columns := make([]*Column, 0, 5)
+ if err := db.GetEngine(ctx).
+ Where("project_id =?", projectID).
+ In("id", columnsIDs).
+ OrderBy("sorting").Find(&columns); err != nil {
+ return nil, err
+ }
+ return columns, nil
+}
+
+// MoveColumnsOnProject sorts columns in a project
+func MoveColumnsOnProject(ctx context.Context, project *Project, sortedColumnIDs map[int64]int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ sess := db.GetEngine(ctx)
+ columnIDs := util.ValuesOfMap(sortedColumnIDs)
+ movedColumns, err := GetColumnsByIDs(ctx, project.ID, columnIDs)
+ if err != nil {
+ return err
+ }
+ if len(movedColumns) != len(sortedColumnIDs) {
+ return errors.New("some columns do not exist")
+ }
+
+ for _, column := range movedColumns {
+ if column.ProjectID != project.ID {
+ return fmt.Errorf("column[%d]'s projectID is not equal to project's ID [%d]", column.ProjectID, project.ID)
+ }
+ }
+
+ for sorting, columnID := range sortedColumnIDs {
+ if _, err := sess.Exec("UPDATE `project_board` SET sorting=? WHERE id=?", sorting, columnID); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
diff --git a/models/project/column_test.go b/models/project/column_test.go
new file mode 100644
index 0000000..b02a5b5
--- /dev/null
+++ b/models/project/column_test.go
@@ -0,0 +1,128 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetDefaultColumn(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ projectWithoutDefault, err := GetProjectByID(db.DefaultContext, 5)
+ require.NoError(t, err)
+
+ // check if default column was added
+ column, err := projectWithoutDefault.GetDefaultColumn(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, int64(5), column.ProjectID)
+ assert.Equal(t, "Uncategorized", column.Title)
+
+ projectWithMultipleDefaults, err := GetProjectByID(db.DefaultContext, 6)
+ require.NoError(t, err)
+
+ // check if multiple defaults were removed
+ column, err = projectWithMultipleDefaults.GetDefaultColumn(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Equal(t, int64(6), column.ProjectID)
+ assert.Equal(t, int64(9), column.ID)
+
+ // set 8 as default column
+ require.NoError(t, SetDefaultColumn(db.DefaultContext, column.ProjectID, 8))
+
+ // then 9 will become a non-default column
+ column, err = GetColumn(db.DefaultContext, 9)
+ require.NoError(t, err)
+ assert.Equal(t, int64(6), column.ProjectID)
+ assert.False(t, column.Default)
+}
+
+func Test_moveIssuesToAnotherColumn(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ column1 := unittest.AssertExistsAndLoadBean(t, &Column{ID: 1, ProjectID: 1})
+
+ issues, err := column1.GetIssues(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, issues, 1)
+ assert.EqualValues(t, 1, issues[0].ID)
+
+ column2 := unittest.AssertExistsAndLoadBean(t, &Column{ID: 2, ProjectID: 1})
+ issues, err = column2.GetIssues(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, issues, 1)
+ assert.EqualValues(t, 3, issues[0].ID)
+
+ err = column1.moveIssuesToAnotherColumn(db.DefaultContext, column2)
+ require.NoError(t, err)
+
+ issues, err = column1.GetIssues(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Empty(t, issues)
+
+ issues, err = column2.GetIssues(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, issues, 2)
+ assert.EqualValues(t, 3, issues[0].ID)
+ assert.EqualValues(t, 0, issues[0].Sorting)
+ assert.EqualValues(t, 1, issues[1].ID)
+ assert.EqualValues(t, 1, issues[1].Sorting)
+}
+
+func Test_MoveColumnsOnProject(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ project1 := unittest.AssertExistsAndLoadBean(t, &Project{ID: 1})
+ columns, err := project1.GetColumns(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, columns, 3)
+ assert.EqualValues(t, 0, columns[0].Sorting) // even if there is no default sorting, the code should also work
+ assert.EqualValues(t, 0, columns[1].Sorting)
+ assert.EqualValues(t, 0, columns[2].Sorting)
+
+ err = MoveColumnsOnProject(db.DefaultContext, project1, map[int64]int64{
+ 0: columns[1].ID,
+ 1: columns[2].ID,
+ 2: columns[0].ID,
+ })
+ require.NoError(t, err)
+
+ columnsAfter, err := project1.GetColumns(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, columnsAfter, 3)
+ assert.EqualValues(t, columns[1].ID, columnsAfter[0].ID)
+ assert.EqualValues(t, columns[2].ID, columnsAfter[1].ID)
+ assert.EqualValues(t, columns[0].ID, columnsAfter[2].ID)
+}
+
+func Test_NewColumn(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ project1 := unittest.AssertExistsAndLoadBean(t, &Project{ID: 1})
+ columns, err := project1.GetColumns(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, columns, 3)
+
+ for i := 0; i < maxProjectColumns-3; i++ {
+ err := NewColumn(db.DefaultContext, &Column{
+ Title: fmt.Sprintf("column-%d", i+4),
+ ProjectID: project1.ID,
+ })
+ require.NoError(t, err)
+ }
+ err = NewColumn(db.DefaultContext, &Column{
+ Title: "column-21",
+ ProjectID: project1.ID,
+ })
+ require.Error(t, err)
+ assert.True(t, strings.Contains(err.Error(), "maximum number of columns reached"))
+}
diff --git a/models/project/issue.go b/models/project/issue.go
new file mode 100644
index 0000000..3361b53
--- /dev/null
+++ b/models/project/issue.go
@@ -0,0 +1,143 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ProjectIssue saves relation from issue to a project
+type ProjectIssue struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ IssueID int64 `xorm:"INDEX"`
+ ProjectID int64 `xorm:"INDEX"`
+
+ // ProjectColumnID should not be zero since 1.22. If it's zero, the issue will not be displayed on UI and it might result in errors.
+ ProjectColumnID int64 `xorm:"'project_board_id' INDEX"`
+
+ // the sorting order on the column
+ Sorting int64 `xorm:"NOT NULL DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(ProjectIssue))
+}
+
+func deleteProjectIssuesByProjectID(ctx context.Context, projectID int64) error {
+ _, err := db.GetEngine(ctx).Where("project_id=?", projectID).Delete(&ProjectIssue{})
+ return err
+}
+
+// NumIssues return counter of all issues assigned to a project
+func (p *Project) NumIssues(ctx context.Context) int {
+ c, err := db.GetEngine(ctx).Table("project_issue").
+ Where("project_id=?", p.ID).
+ GroupBy("issue_id").
+ Cols("issue_id").
+ Count()
+ if err != nil {
+ log.Error("NumIssues: %v", err)
+ return 0
+ }
+ return int(c)
+}
+
+// NumClosedIssues return counter of closed issues assigned to a project
+func (p *Project) NumClosedIssues(ctx context.Context) int {
+ c, err := db.GetEngine(ctx).Table("project_issue").
+ Join("INNER", "issue", "project_issue.issue_id=issue.id").
+ Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, true).
+ Cols("issue_id").
+ Count()
+ if err != nil {
+ log.Error("NumClosedIssues: %v", err)
+ return 0
+ }
+ return int(c)
+}
+
+// NumOpenIssues return counter of open issues assigned to a project
+func (p *Project) NumOpenIssues(ctx context.Context) int {
+ c, err := db.GetEngine(ctx).Table("project_issue").
+ Join("INNER", "issue", "project_issue.issue_id=issue.id").
+ Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, false).
+ Cols("issue_id").
+ Count()
+ if err != nil {
+ log.Error("NumOpenIssues: %v", err)
+ return 0
+ }
+ return int(c)
+}
+
+// MoveIssuesOnProjectColumn moves or keeps issues in a column and sorts them inside that column
+func MoveIssuesOnProjectColumn(ctx context.Context, column *Column, sortedIssueIDs map[int64]int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ sess := db.GetEngine(ctx)
+ issueIDs := util.ValuesOfMap(sortedIssueIDs)
+
+ count, err := sess.Table(new(ProjectIssue)).Where("project_id=?", column.ProjectID).In("issue_id", issueIDs).Count()
+ if err != nil {
+ return err
+ }
+ if int(count) != len(sortedIssueIDs) {
+ return fmt.Errorf("all issues have to be added to a project first")
+ }
+
+ for sorting, issueID := range sortedIssueIDs {
+ _, err = sess.Exec("UPDATE `project_issue` SET project_board_id=?, sorting=? WHERE issue_id=?", column.ID, sorting, issueID)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func (c *Column) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Column) error {
+ if c.ProjectID != newColumn.ProjectID {
+ return fmt.Errorf("columns have to be in the same project")
+ }
+
+ if c.ID == newColumn.ID {
+ return nil
+ }
+
+ res := struct {
+ MaxSorting int64
+ IssueCount int64
+ }{}
+ if _, err := db.GetEngine(ctx).Select("max(sorting) as max_sorting, count(*) as issue_count").
+ Table("project_issue").
+ Where("project_id=?", newColumn.ProjectID).
+ And("project_board_id=?", newColumn.ID).
+ Get(&res); err != nil {
+ return err
+ }
+
+ issues, err := c.GetIssues(ctx)
+ if err != nil {
+ return err
+ }
+ if len(issues) == 0 {
+ return nil
+ }
+
+ nextSorting := util.Iif(res.IssueCount > 0, res.MaxSorting+1, 0)
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ for i, issue := range issues {
+ issue.ProjectColumnID = newColumn.ID
+ issue.Sorting = nextSorting + int64(i)
+ if _, err := db.GetEngine(ctx).ID(issue.ID).Cols("project_board_id", "sorting").Update(issue); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
diff --git a/models/project/main_test.go b/models/project/main_test.go
new file mode 100644
index 0000000..f4b2d6f
--- /dev/null
+++ b/models/project/main_test.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models/repo"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m, &unittest.TestOptions{
+ FixtureFiles: []string{
+ "project.yml",
+ "project_board.yml",
+ "project_issue.yml",
+ "repository.yml",
+ },
+ })
+}
diff --git a/models/project/project.go b/models/project/project.go
new file mode 100644
index 0000000..8cebf34
--- /dev/null
+++ b/models/project/project.go
@@ -0,0 +1,451 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+type (
+ // CardConfig is used to identify the type of column card that is being used
+ CardConfig struct {
+ CardType CardType
+ Translation string
+ }
+
+ // Type is used to identify the type of project in question and ownership
+ Type uint8
+)
+
+const (
+ // TypeIndividual is a type of project column that is owned by an individual
+ TypeIndividual Type = iota + 1
+
+ // TypeRepository is a project that is tied to a repository
+ TypeRepository
+
+ // TypeOrganization is a project that is tied to an organisation
+ TypeOrganization
+)
+
+// ErrProjectNotExist represents a "ProjectNotExist" kind of error.
+type ErrProjectNotExist struct {
+ ID int64
+ RepoID int64
+}
+
+// IsErrProjectNotExist checks if an error is a ErrProjectNotExist
+func IsErrProjectNotExist(err error) bool {
+ _, ok := err.(ErrProjectNotExist)
+ return ok
+}
+
+func (err ErrProjectNotExist) Error() string {
+ return fmt.Sprintf("projects does not exist [id: %d]", err.ID)
+}
+
+func (err ErrProjectNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrProjectColumnNotExist represents a "ErrProjectColumnNotExist" kind of error.
+type ErrProjectColumnNotExist struct {
+ ColumnID int64
+}
+
+// IsErrProjectColumnNotExist checks if an error is a ErrProjectColumnNotExist
+func IsErrProjectColumnNotExist(err error) bool {
+ _, ok := err.(ErrProjectColumnNotExist)
+ return ok
+}
+
+func (err ErrProjectColumnNotExist) Error() string {
+ return fmt.Sprintf("project column does not exist [id: %d]", err.ColumnID)
+}
+
+func (err ErrProjectColumnNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Project represents a project
+type Project struct {
+ ID int64 `xorm:"pk autoincr"`
+ Title string `xorm:"INDEX NOT NULL"`
+ Description string `xorm:"TEXT"`
+ OwnerID int64 `xorm:"INDEX"`
+ Owner *user_model.User `xorm:"-"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *repo_model.Repository `xorm:"-"`
+ CreatorID int64 `xorm:"NOT NULL"`
+ IsClosed bool `xorm:"INDEX"`
+ TemplateType TemplateType `xorm:"'board_type'"` // TODO: rename the column to template_type
+ CardType CardType
+ Type Type
+
+ RenderedContent template.HTML `xorm:"-"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ ClosedDateUnix timeutil.TimeStamp
+}
+
+// Ghost Project is a project which has been deleted
+const GhostProjectID = -1
+
+func (p *Project) IsGhost() bool {
+ return p.ID == GhostProjectID
+}
+
+func (p *Project) LoadOwner(ctx context.Context) (err error) {
+ if p.Owner != nil {
+ return nil
+ }
+ p.Owner, err = user_model.GetUserByID(ctx, p.OwnerID)
+ return err
+}
+
+func (p *Project) LoadRepo(ctx context.Context) (err error) {
+ if p.RepoID == 0 || p.Repo != nil {
+ return nil
+ }
+ p.Repo, err = repo_model.GetRepositoryByID(ctx, p.RepoID)
+ return err
+}
+
+// Link returns the project's relative URL.
+func (p *Project) Link(ctx context.Context) string {
+ if p.OwnerID > 0 {
+ err := p.LoadOwner(ctx)
+ if err != nil {
+ log.Error("LoadOwner: %v", err)
+ return ""
+ }
+ return fmt.Sprintf("%s/-/projects/%d", p.Owner.HomeLink(), p.ID)
+ }
+ if p.RepoID > 0 {
+ err := p.LoadRepo(ctx)
+ if err != nil {
+ log.Error("LoadRepo: %v", err)
+ return ""
+ }
+ return fmt.Sprintf("%s/projects/%d", p.Repo.Link(), p.ID)
+ }
+ return ""
+}
+
+func (p *Project) IconName() string {
+ if p.IsRepositoryProject() {
+ return "octicon-project"
+ }
+ return "octicon-project-symlink"
+}
+
+func (p *Project) IsOrganizationProject() bool {
+ return p.Type == TypeOrganization
+}
+
+func (p *Project) IsRepositoryProject() bool {
+ return p.Type == TypeRepository
+}
+
+func (p *Project) CanBeAccessedByOwnerRepo(ownerID int64, repo *repo_model.Repository) bool {
+ if p.Type == TypeRepository {
+ return repo != nil && p.RepoID == repo.ID // if a project belongs to a repository, then its OwnerID is 0 and can be ignored
+ }
+ return p.OwnerID == ownerID && p.RepoID == 0
+}
+
+func init() {
+ db.RegisterModel(new(Project))
+}
+
+// GetCardConfig retrieves the types of configurations project column cards could have
+func GetCardConfig() []CardConfig {
+ return []CardConfig{
+ {CardTypeTextOnly, "repo.projects.card_type.text_only"},
+ {CardTypeImagesAndText, "repo.projects.card_type.images_and_text"},
+ }
+}
+
+// IsTypeValid checks if a project type is valid
+func IsTypeValid(p Type) bool {
+ switch p {
+ case TypeIndividual, TypeRepository, TypeOrganization:
+ return true
+ default:
+ return false
+ }
+}
+
+// SearchOptions are options for GetProjects
+type SearchOptions struct {
+ db.ListOptions
+ OwnerID int64
+ RepoID int64
+ IsClosed optional.Option[bool]
+ OrderBy db.SearchOrderBy
+ Type Type
+ Title string
+}
+
+func (opts SearchOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.IsClosed.Has() {
+ cond = cond.And(builder.Eq{"is_closed": opts.IsClosed.Value()})
+ }
+
+ if opts.Type > 0 {
+ cond = cond.And(builder.Eq{"type": opts.Type})
+ }
+ if opts.OwnerID > 0 {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+
+ if len(opts.Title) != 0 {
+ cond = cond.And(db.BuildCaseInsensitiveLike("title", opts.Title))
+ }
+ return cond
+}
+
+func (opts SearchOptions) ToOrders() string {
+ return opts.OrderBy.String()
+}
+
+func GetSearchOrderByBySortType(sortType string) db.SearchOrderBy {
+ switch sortType {
+ case "oldest":
+ return db.SearchOrderByOldest
+ case "recentupdate":
+ return db.SearchOrderByRecentUpdated
+ case "leastupdate":
+ return db.SearchOrderByLeastUpdated
+ default:
+ return db.SearchOrderByNewest
+ }
+}
+
+// NewProject creates a new Project
+func NewProject(ctx context.Context, p *Project) error {
+ if !IsTemplateTypeValid(p.TemplateType) {
+ p.TemplateType = TemplateTypeNone
+ }
+
+ if !IsCardTypeValid(p.CardType) {
+ p.CardType = CardTypeTextOnly
+ }
+
+ if !IsTypeValid(p.Type) {
+ return util.NewInvalidArgumentErrorf("project type is not valid")
+ }
+
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ if err := db.Insert(ctx, p); err != nil {
+ return err
+ }
+
+ if p.RepoID > 0 {
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_projects = num_projects + 1 WHERE id = ?", p.RepoID); err != nil {
+ return err
+ }
+ }
+
+ return createDefaultColumnsForProject(ctx, p)
+ })
+}
+
+// GetProjectByID returns the projects in a repository
+func GetProjectByID(ctx context.Context, id int64) (*Project, error) {
+ p := new(Project)
+
+ has, err := db.GetEngine(ctx).ID(id).Get(p)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrProjectNotExist{ID: id}
+ }
+
+ return p, nil
+}
+
+// GetProjectForRepoByID returns the projects in a repository
+func GetProjectForRepoByID(ctx context.Context, repoID, id int64) (*Project, error) {
+ p := new(Project)
+ has, err := db.GetEngine(ctx).Where("id=? AND repo_id=?", id, repoID).Get(p)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrProjectNotExist{ID: id}
+ }
+ return p, nil
+}
+
+// UpdateProject updates project properties
+func UpdateProject(ctx context.Context, p *Project) error {
+ if !IsCardTypeValid(p.CardType) {
+ p.CardType = CardTypeTextOnly
+ }
+
+ _, err := db.GetEngine(ctx).ID(p.ID).Cols(
+ "title",
+ "description",
+ "card_type",
+ ).Update(p)
+ return err
+}
+
+func updateRepositoryProjectCount(ctx context.Context, repoID int64) error {
+ if _, err := db.GetEngine(ctx).Exec(builder.Update(
+ builder.Eq{
+ "`num_projects`": builder.Select("count(*)").From("`project`").
+ Where(builder.Eq{"`project`.`repo_id`": repoID}.
+ And(builder.Eq{"`project`.`type`": TypeRepository})),
+ }).From("`repository`").Where(builder.Eq{"id": repoID})); err != nil {
+ return err
+ }
+
+ if _, err := db.GetEngine(ctx).Exec(builder.Update(
+ builder.Eq{
+ "`num_closed_projects`": builder.Select("count(*)").From("`project`").
+ Where(builder.Eq{"`project`.`repo_id`": repoID}.
+ And(builder.Eq{"`project`.`type`": TypeRepository}).
+ And(builder.Eq{"`project`.`is_closed`": true})),
+ }).From("`repository`").Where(builder.Eq{"id": repoID})); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ChangeProjectStatusByRepoIDAndID toggles a project between opened and closed
+func ChangeProjectStatusByRepoIDAndID(ctx context.Context, repoID, projectID int64, isClosed bool) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ p := new(Project)
+
+ has, err := db.GetEngine(ctx).ID(projectID).Where("repo_id = ?", repoID).Get(p)
+ if err != nil {
+ return err
+ } else if !has {
+ return ErrProjectNotExist{ID: projectID, RepoID: repoID}
+ }
+
+ if err := changeProjectStatus(ctx, p, isClosed); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// ChangeProjectStatus toggle a project between opened and closed
+func ChangeProjectStatus(ctx context.Context, p *Project, isClosed bool) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := changeProjectStatus(ctx, p, isClosed); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func changeProjectStatus(ctx context.Context, p *Project, isClosed bool) error {
+ p.IsClosed = isClosed
+ p.ClosedDateUnix = timeutil.TimeStampNow()
+ count, err := db.GetEngine(ctx).ID(p.ID).Where("repo_id = ? AND is_closed = ?", p.RepoID, !isClosed).Cols("is_closed", "closed_date_unix").Update(p)
+ if err != nil {
+ return err
+ }
+ if count < 1 {
+ return nil
+ }
+
+ return updateRepositoryProjectCount(ctx, p.RepoID)
+}
+
+// DeleteProjectByID deletes a project from a repository. if it's not in a database
+// transaction, it will start a new database transaction
+func DeleteProjectByID(ctx context.Context, id int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ p, err := GetProjectByID(ctx, id)
+ if err != nil {
+ if IsErrProjectNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ if err := deleteProjectIssuesByProjectID(ctx, id); err != nil {
+ return err
+ }
+
+ if err := deleteColumnByProjectID(ctx, id); err != nil {
+ return err
+ }
+
+ if _, err = db.GetEngine(ctx).ID(p.ID).Delete(new(Project)); err != nil {
+ return err
+ }
+
+ return updateRepositoryProjectCount(ctx, p.RepoID)
+ })
+}
+
+func DeleteProjectByRepoID(ctx context.Context, repoID int64) error {
+ switch {
+ case setting.Database.Type.IsSQLite3():
+ if _, err := db.GetEngine(ctx).Exec("DELETE FROM project_issue WHERE project_issue.id IN (SELECT project_issue.id FROM project_issue INNER JOIN project WHERE project.id = project_issue.project_id AND project.repo_id = ?)", repoID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Exec("DELETE FROM project_board WHERE project_board.id IN (SELECT project_board.id FROM project_board INNER JOIN project WHERE project.id = project_board.project_id AND project.repo_id = ?)", repoID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Table("project").Where("repo_id = ? ", repoID).Delete(&Project{}); err != nil {
+ return err
+ }
+ case setting.Database.Type.IsPostgreSQL():
+ if _, err := db.GetEngine(ctx).Exec("DELETE FROM project_issue USING project WHERE project.id = project_issue.project_id AND project.repo_id = ? ", repoID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Exec("DELETE FROM project_board USING project WHERE project.id = project_board.project_id AND project.repo_id = ? ", repoID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Table("project").Where("repo_id = ? ", repoID).Delete(&Project{}); err != nil {
+ return err
+ }
+ default:
+ if _, err := db.GetEngine(ctx).Exec("DELETE project_issue FROM project_issue INNER JOIN project ON project.id = project_issue.project_id WHERE project.repo_id = ? ", repoID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Exec("DELETE project_board FROM project_board INNER JOIN project ON project.id = project_board.project_id WHERE project.repo_id = ? ", repoID); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).Table("project").Where("repo_id = ? ", repoID).Delete(&Project{}); err != nil {
+ return err
+ }
+ }
+
+ return updateRepositoryProjectCount(ctx, repoID)
+}
diff --git a/models/project/project_test.go b/models/project/project_test.go
new file mode 100644
index 0000000..8c660b9
--- /dev/null
+++ b/models/project/project_test.go
@@ -0,0 +1,124 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsProjectTypeValid(t *testing.T) {
+ const UnknownType Type = 15
+
+ cases := []struct {
+ typ Type
+ valid bool
+ }{
+ {TypeIndividual, true},
+ {TypeRepository, true},
+ {TypeOrganization, true},
+ {UnknownType, false},
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.valid, IsTypeValid(v.typ))
+ }
+}
+
+func TestGetProjects(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ projects, err := db.Find[Project](db.DefaultContext, SearchOptions{RepoID: 1})
+ require.NoError(t, err)
+
+ // 1 value for this repo exists in the fixtures
+ assert.Len(t, projects, 1)
+
+ projects, err = db.Find[Project](db.DefaultContext, SearchOptions{RepoID: 3})
+ require.NoError(t, err)
+
+ // 1 value for this repo exists in the fixtures
+ assert.Len(t, projects, 1)
+}
+
+func TestProject(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ project := &Project{
+ Type: TypeRepository,
+ TemplateType: TemplateTypeBasicKanban,
+ CardType: CardTypeTextOnly,
+ Title: "New Project",
+ RepoID: 1,
+ CreatedUnix: timeutil.TimeStampNow(),
+ CreatorID: 2,
+ }
+
+ require.NoError(t, NewProject(db.DefaultContext, project))
+
+ _, err := GetProjectByID(db.DefaultContext, project.ID)
+ require.NoError(t, err)
+
+ // Update project
+ project.Title = "Updated title"
+ require.NoError(t, UpdateProject(db.DefaultContext, project))
+
+ projectFromDB, err := GetProjectByID(db.DefaultContext, project.ID)
+ require.NoError(t, err)
+
+ assert.Equal(t, project.Title, projectFromDB.Title)
+
+ require.NoError(t, ChangeProjectStatus(db.DefaultContext, project, true))
+
+ // Retrieve from DB afresh to check if it is truly closed
+ projectFromDB, err = GetProjectByID(db.DefaultContext, project.ID)
+ require.NoError(t, err)
+
+ assert.True(t, projectFromDB.IsClosed)
+}
+
+func TestProjectsSort(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ tests := []struct {
+ sortType string
+ wants []int64
+ }{
+ {
+ sortType: "default",
+ wants: []int64{1, 3, 2, 6, 5, 4},
+ },
+ {
+ sortType: "oldest",
+ wants: []int64{4, 5, 6, 2, 3, 1},
+ },
+ {
+ sortType: "recentupdate",
+ wants: []int64{1, 3, 2, 6, 5, 4},
+ },
+ {
+ sortType: "leastupdate",
+ wants: []int64{4, 5, 6, 2, 3, 1},
+ },
+ }
+
+ for _, tt := range tests {
+ projects, count, err := db.FindAndCount[Project](db.DefaultContext, SearchOptions{
+ OrderBy: GetSearchOrderByBySortType(tt.sortType),
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, int64(6), count)
+ if assert.Len(t, projects, 6) {
+ for i := range projects {
+ assert.EqualValues(t, tt.wants[i], projects[i].ID)
+ }
+ }
+ }
+}
diff --git a/models/project/template.go b/models/project/template.go
new file mode 100644
index 0000000..06d5d2a
--- /dev/null
+++ b/models/project/template.go
@@ -0,0 +1,45 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package project
+
+type (
+ // TemplateType is used to represent a project template type
+ TemplateType uint8
+
+ // TemplateConfig is used to identify the template type of project that is being created
+ TemplateConfig struct {
+ TemplateType TemplateType
+ Translation string
+ }
+)
+
+const (
+ // TemplateTypeNone is a project template type that has no predefined columns
+ TemplateTypeNone TemplateType = iota
+
+ // TemplateTypeBasicKanban is a project template type that has basic predefined columns
+ TemplateTypeBasicKanban
+
+ // TemplateTypeBugTriage is a project template type that has predefined columns suited to hunting down bugs
+ TemplateTypeBugTriage
+)
+
+// GetTemplateConfigs retrieves the template configs of configurations project columns could have
+func GetTemplateConfigs() []TemplateConfig {
+ return []TemplateConfig{
+ {TemplateTypeNone, "repo.projects.type.none"},
+ {TemplateTypeBasicKanban, "repo.projects.type.basic_kanban"},
+ {TemplateTypeBugTriage, "repo.projects.type.bug_triage"},
+ }
+}
+
+// IsTemplateTypeValid checks if the project template type is valid
+func IsTemplateTypeValid(p TemplateType) bool {
+ switch p {
+ case TemplateTypeNone, TemplateTypeBasicKanban, TemplateTypeBugTriage:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/models/pull/automerge.go b/models/pull/automerge.go
new file mode 100644
index 0000000..f31159a
--- /dev/null
+++ b/models/pull/automerge.go
@@ -0,0 +1,97 @@
+// Copyright 2022 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package pull
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// AutoMerge represents a pull request scheduled for merging when checks succeed
+type AutoMerge struct {
+ ID int64 `xorm:"pk autoincr"`
+ PullID int64 `xorm:"UNIQUE"`
+ DoerID int64 `xorm:"INDEX NOT NULL"`
+ Doer *user_model.User `xorm:"-"`
+ MergeStyle repo_model.MergeStyle `xorm:"varchar(30)"`
+ Message string `xorm:"LONGTEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+// TableName return database table name for xorm
+func (AutoMerge) TableName() string {
+ return "pull_auto_merge"
+}
+
+func init() {
+ db.RegisterModel(new(AutoMerge))
+}
+
+// ErrAlreadyScheduledToAutoMerge represents a "PullRequestHasMerged"-error
+type ErrAlreadyScheduledToAutoMerge struct {
+ PullID int64
+}
+
+func (err ErrAlreadyScheduledToAutoMerge) Error() string {
+ return fmt.Sprintf("pull request is already scheduled to auto merge when checks succeed [pull_id: %d]", err.PullID)
+}
+
+// IsErrAlreadyScheduledToAutoMerge checks if an error is a ErrAlreadyScheduledToAutoMerge.
+func IsErrAlreadyScheduledToAutoMerge(err error) bool {
+ _, ok := err.(ErrAlreadyScheduledToAutoMerge)
+ return ok
+}
+
+// ScheduleAutoMerge schedules a pull request to be merged when all checks succeed
+func ScheduleAutoMerge(ctx context.Context, doer *user_model.User, pullID int64, style repo_model.MergeStyle, message string) error {
+ // Check if we already have a merge scheduled for that pull request
+ if exists, _, err := GetScheduledMergeByPullID(ctx, pullID); err != nil {
+ return err
+ } else if exists {
+ return ErrAlreadyScheduledToAutoMerge{PullID: pullID}
+ }
+
+ _, err := db.GetEngine(ctx).Insert(&AutoMerge{
+ DoerID: doer.ID,
+ PullID: pullID,
+ MergeStyle: style,
+ Message: message,
+ })
+ return err
+}
+
+// GetScheduledMergeByPullID gets a scheduled pull request merge by pull request id
+func GetScheduledMergeByPullID(ctx context.Context, pullID int64) (bool, *AutoMerge, error) {
+ scheduledPRM := &AutoMerge{}
+ exists, err := db.GetEngine(ctx).Where("pull_id = ?", pullID).Get(scheduledPRM)
+ if err != nil || !exists {
+ return false, nil, err
+ }
+
+ doer, err := user_model.GetPossibleUserByID(ctx, scheduledPRM.DoerID)
+ if err != nil {
+ return false, nil, err
+ }
+
+ scheduledPRM.Doer = doer
+ return true, scheduledPRM, nil
+}
+
+// DeleteScheduledAutoMerge delete a scheduled pull request
+func DeleteScheduledAutoMerge(ctx context.Context, pullID int64) error {
+ exist, scheduledPRM, err := GetScheduledMergeByPullID(ctx, pullID)
+ if err != nil {
+ return err
+ } else if !exist {
+ return db.ErrNotExist{Resource: "auto_merge", ID: pullID}
+ }
+
+ _, err = db.GetEngine(ctx).ID(scheduledPRM.ID).Delete(&AutoMerge{})
+ return err
+}
diff --git a/models/pull/review_state.go b/models/pull/review_state.go
new file mode 100644
index 0000000..e46a22a
--- /dev/null
+++ b/models/pull/review_state.go
@@ -0,0 +1,139 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package pull
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// ViewedState stores for a file in which state it is currently viewed
+type ViewedState uint8
+
+const (
+ Unviewed ViewedState = iota
+ HasChanged // cannot be set from the UI/ API, only internally
+ Viewed
+)
+
+func (viewedState ViewedState) String() string {
+ switch viewedState {
+ case Unviewed:
+ return "unviewed"
+ case HasChanged:
+ return "has-changed"
+ case Viewed:
+ return "viewed"
+ default:
+ return fmt.Sprintf("unknown(value=%d)", viewedState)
+ }
+}
+
+// ReviewState stores for a user-PR-commit combination which files the user has already viewed
+type ReviewState struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"NOT NULL UNIQUE(pull_commit_user)"`
+ PullID int64 `xorm:"NOT NULL INDEX UNIQUE(pull_commit_user) DEFAULT 0"` // Which PR was the review on?
+ CommitSHA string `xorm:"NOT NULL VARCHAR(64) UNIQUE(pull_commit_user)"` // Which commit was the head commit for the review?
+ UpdatedFiles map[string]ViewedState `xorm:"NOT NULL LONGTEXT JSON"` // Stores for each of the changed files of a PR whether they have been viewed, changed since last viewed, or not viewed
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"` // Is an accurate indicator of the order of commits as we do not expect it to be possible to make reviews on previous commits
+}
+
+func init() {
+ db.RegisterModel(new(ReviewState))
+}
+
+// GetReviewState returns the ReviewState with all given values prefilled, whether or not it exists in the database.
+// If the review didn't exist before in the database, it won't afterwards either.
+// The returned boolean shows whether the review exists in the database
+func GetReviewState(ctx context.Context, userID, pullID int64, commitSHA string) (*ReviewState, bool, error) {
+ review := &ReviewState{UserID: userID, PullID: pullID, CommitSHA: commitSHA}
+ has, err := db.GetEngine(ctx).Get(review)
+ return review, has, err
+}
+
+// UpdateReviewState updates the given review inside the database, regardless of whether it existed before or not
+// The given map of files with their viewed state will be merged with the previous review, if present
+func UpdateReviewState(ctx context.Context, userID, pullID int64, commitSHA string, updatedFiles map[string]ViewedState) error {
+ log.Trace("Updating review for user %d, repo %d, commit %s with the updated files %v.", userID, pullID, commitSHA, updatedFiles)
+
+ review, exists, err := GetReviewState(ctx, userID, pullID, commitSHA)
+ if err != nil {
+ return err
+ }
+
+ if exists {
+ review.UpdatedFiles = mergeFiles(review.UpdatedFiles, updatedFiles)
+ } else if previousReview, err := getNewestReviewStateApartFrom(ctx, userID, pullID, commitSHA); err != nil {
+ return err
+
+ // Overwrite the viewed files of the previous review if present
+ } else if previousReview != nil {
+ review.UpdatedFiles = mergeFiles(previousReview.UpdatedFiles, updatedFiles)
+ } else {
+ review.UpdatedFiles = updatedFiles
+ }
+
+ // Insert or Update review
+ engine := db.GetEngine(ctx)
+ if !exists {
+ log.Trace("Inserting new review for user %d, repo %d, commit %s with the updated files %v.", userID, pullID, commitSHA, review.UpdatedFiles)
+ _, err := engine.Insert(review)
+ return err
+ }
+ log.Trace("Updating already existing review with ID %d (user %d, repo %d, commit %s) with the updated files %v.", review.ID, userID, pullID, commitSHA, review.UpdatedFiles)
+ _, err = engine.ID(review.ID).Update(&ReviewState{UpdatedFiles: review.UpdatedFiles})
+ return err
+}
+
+// mergeFiles merges the given maps of files with their viewing state into one map.
+// Values from oldFiles will be overridden with values from newFiles
+func mergeFiles(oldFiles, newFiles map[string]ViewedState) map[string]ViewedState {
+ if oldFiles == nil {
+ return newFiles
+ } else if newFiles == nil {
+ return oldFiles
+ }
+
+ for file, viewed := range newFiles {
+ oldFiles[file] = viewed
+ }
+ return oldFiles
+}
+
+// GetNewestReviewState gets the newest review of the current user in the current PR.
+// The returned PR Review will be nil if the user has not yet reviewed this PR.
+func GetNewestReviewState(ctx context.Context, userID, pullID int64) (*ReviewState, error) {
+ var review ReviewState
+ has, err := db.GetEngine(ctx).Where("user_id = ?", userID).And("pull_id = ?", pullID).OrderBy("updated_unix DESC").Get(&review)
+ if err != nil || !has {
+ return nil, err
+ }
+ return &review, err
+}
+
+// getNewestReviewStateApartFrom is like GetNewestReview, except that the second newest review will be returned if the newest review points at the given commit.
+// The returned PR Review will be nil if the user has not yet reviewed this PR.
+func getNewestReviewStateApartFrom(ctx context.Context, userID, pullID int64, commitSHA string) (*ReviewState, error) {
+ var reviews []ReviewState
+ err := db.GetEngine(ctx).Where("user_id = ?", userID).And("pull_id = ?", pullID).OrderBy("updated_unix DESC").Limit(2).Find(&reviews)
+ // It would also be possible to use ".And("commit_sha != ?", commitSHA)" instead of the error handling below
+ // However, benchmarks show drastically improved performance by not doing that
+
+ // Error cases in which no review should be returned
+ if err != nil || len(reviews) == 0 || (len(reviews) == 1 && reviews[0].CommitSHA == commitSHA) {
+ return nil, err
+
+ // The first review points at the commit to exclude, hence skip to the second review
+ } else if len(reviews) >= 2 && reviews[0].CommitSHA == commitSHA {
+ return &reviews[1], nil
+ }
+
+ // As we have no error cases left, the result must be the first element in the list
+ return &reviews[0], nil
+}
diff --git a/models/quota/default.go b/models/quota/default.go
new file mode 100644
index 0000000..6b553d6
--- /dev/null
+++ b/models/quota/default.go
@@ -0,0 +1,25 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import (
+ "code.gitea.io/gitea/modules/setting"
+)
+
+func EvaluateDefault(used Used, forSubject LimitSubject) bool {
+ groups := GroupList{
+ &Group{
+ Name: "builtin-default-group",
+ Rules: []Rule{
+ {
+ Name: "builtin-default-rule",
+ Limit: setting.Quota.Default.Total,
+ Subjects: LimitSubjects{LimitSubjectSizeAll},
+ },
+ },
+ },
+ }
+
+ return groups.Evaluate(used, forSubject)
+}
diff --git a/models/quota/errors.go b/models/quota/errors.go
new file mode 100644
index 0000000..962c8b1
--- /dev/null
+++ b/models/quota/errors.go
@@ -0,0 +1,127 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import "fmt"
+
+type ErrRuleAlreadyExists struct {
+ Name string
+}
+
+func IsErrRuleAlreadyExists(err error) bool {
+ _, ok := err.(ErrRuleAlreadyExists)
+ return ok
+}
+
+func (err ErrRuleAlreadyExists) Error() string {
+ return fmt.Sprintf("rule already exists: [name: %s]", err.Name)
+}
+
+type ErrRuleNotFound struct {
+ Name string
+}
+
+func IsErrRuleNotFound(err error) bool {
+ _, ok := err.(ErrRuleNotFound)
+ return ok
+}
+
+func (err ErrRuleNotFound) Error() string {
+ return fmt.Sprintf("rule not found: [name: %s]", err.Name)
+}
+
+type ErrGroupAlreadyExists struct {
+ Name string
+}
+
+func IsErrGroupAlreadyExists(err error) bool {
+ _, ok := err.(ErrGroupAlreadyExists)
+ return ok
+}
+
+func (err ErrGroupAlreadyExists) Error() string {
+ return fmt.Sprintf("group already exists: [name: %s]", err.Name)
+}
+
+type ErrGroupNotFound struct {
+ Name string
+}
+
+func IsErrGroupNotFound(err error) bool {
+ _, ok := err.(ErrGroupNotFound)
+ return ok
+}
+
+func (err ErrGroupNotFound) Error() string {
+ return fmt.Sprintf("group not found: [group: %s]", err.Name)
+}
+
+type ErrUserAlreadyInGroup struct {
+ GroupName string
+ UserID int64
+}
+
+func IsErrUserAlreadyInGroup(err error) bool {
+ _, ok := err.(ErrUserAlreadyInGroup)
+ return ok
+}
+
+func (err ErrUserAlreadyInGroup) Error() string {
+ return fmt.Sprintf("user already in group: [group: %s, userID: %d]", err.GroupName, err.UserID)
+}
+
+type ErrUserNotInGroup struct {
+ GroupName string
+ UserID int64
+}
+
+func IsErrUserNotInGroup(err error) bool {
+ _, ok := err.(ErrUserNotInGroup)
+ return ok
+}
+
+func (err ErrUserNotInGroup) Error() string {
+ return fmt.Sprintf("user not in group: [group: %s, userID: %d]", err.GroupName, err.UserID)
+}
+
+type ErrRuleAlreadyInGroup struct {
+ GroupName string
+ RuleName string
+}
+
+func IsErrRuleAlreadyInGroup(err error) bool {
+ _, ok := err.(ErrRuleAlreadyInGroup)
+ return ok
+}
+
+func (err ErrRuleAlreadyInGroup) Error() string {
+ return fmt.Sprintf("rule already in group: [group: %s, rule: %s]", err.GroupName, err.RuleName)
+}
+
+type ErrRuleNotInGroup struct {
+ GroupName string
+ RuleName string
+}
+
+func IsErrRuleNotInGroup(err error) bool {
+ _, ok := err.(ErrRuleNotInGroup)
+ return ok
+}
+
+func (err ErrRuleNotInGroup) Error() string {
+ return fmt.Sprintf("rule not in group: [group: %s, rule: %s]", err.GroupName, err.RuleName)
+}
+
+type ErrParseLimitSubjectUnrecognized struct {
+ Subject string
+}
+
+func IsErrParseLimitSubjectUnrecognized(err error) bool {
+ _, ok := err.(ErrParseLimitSubjectUnrecognized)
+ return ok
+}
+
+func (err ErrParseLimitSubjectUnrecognized) Error() string {
+ return fmt.Sprintf("unrecognized quota limit subject: [subject: %s]", err.Subject)
+}
diff --git a/models/quota/group.go b/models/quota/group.go
new file mode 100644
index 0000000..0acb5b2
--- /dev/null
+++ b/models/quota/group.go
@@ -0,0 +1,401 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+
+ "xorm.io/builder"
+)
+
+type (
+ GroupList []*Group
+ Group struct {
+ // Name of the quota group
+ Name string `json:"name" xorm:"pk NOT NULL" binding:"Required"`
+ Rules []Rule `json:"rules" xorm:"-"`
+ }
+)
+
+type GroupRuleMapping struct {
+ ID int64 `xorm:"pk autoincr" json:"-"`
+ GroupName string `xorm:"index unique(qgrm_gr) not null" json:"group_name"`
+ RuleName string `xorm:"unique(qgrm_gr) not null" json:"rule_name"`
+}
+
+type Kind int
+
+const (
+ KindUser Kind = iota
+)
+
+type GroupMapping struct {
+ ID int64 `xorm:"pk autoincr"`
+ Kind Kind `xorm:"unique(qgm_kmg) not null"`
+ MappedID int64 `xorm:"unique(qgm_kmg) not null"`
+ GroupName string `xorm:"index unique(qgm_kmg) not null"`
+}
+
+func (g *Group) TableName() string {
+ return "quota_group"
+}
+
+func (grm *GroupRuleMapping) TableName() string {
+ return "quota_group_rule_mapping"
+}
+
+func (ugm *GroupMapping) TableName() string {
+ return "quota_group_mapping"
+}
+
+func (g *Group) LoadRules(ctx context.Context) error {
+ return db.GetEngine(ctx).Select("`quota_rule`.*").
+ Table("quota_rule").
+ Join("INNER", "`quota_group_rule_mapping`", "`quota_group_rule_mapping`.rule_name = `quota_rule`.name").
+ Where("`quota_group_rule_mapping`.group_name = ?", g.Name).
+ Find(&g.Rules)
+}
+
+func (g *Group) isUserInGroup(ctx context.Context, userID int64) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("kind = ? AND mapped_id = ? AND group_name = ?", KindUser, userID, g.Name).
+ Get(&GroupMapping{})
+}
+
+func (g *Group) AddUserByID(ctx context.Context, userID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ exists, err := g.isUserInGroup(ctx, userID)
+ if err != nil {
+ return err
+ } else if exists {
+ return ErrUserAlreadyInGroup{GroupName: g.Name, UserID: userID}
+ }
+
+ _, err = db.GetEngine(ctx).Insert(&GroupMapping{
+ Kind: KindUser,
+ MappedID: userID,
+ GroupName: g.Name,
+ })
+ if err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func (g *Group) RemoveUserByID(ctx context.Context, userID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ exists, err := g.isUserInGroup(ctx, userID)
+ if err != nil {
+ return err
+ } else if !exists {
+ return ErrUserNotInGroup{GroupName: g.Name, UserID: userID}
+ }
+
+ _, err = db.GetEngine(ctx).Delete(&GroupMapping{
+ Kind: KindUser,
+ MappedID: userID,
+ GroupName: g.Name,
+ })
+ if err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func (g *Group) isRuleInGroup(ctx context.Context, ruleName string) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("group_name = ? AND rule_name = ?", g.Name, ruleName).
+ Get(&GroupRuleMapping{})
+}
+
+func (g *Group) AddRuleByName(ctx context.Context, ruleName string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ exists, err := DoesRuleExist(ctx, ruleName)
+ if err != nil {
+ return err
+ } else if !exists {
+ return ErrRuleNotFound{Name: ruleName}
+ }
+
+ has, err := g.isRuleInGroup(ctx, ruleName)
+ if err != nil {
+ return err
+ } else if has {
+ return ErrRuleAlreadyInGroup{GroupName: g.Name, RuleName: ruleName}
+ }
+
+ _, err = db.GetEngine(ctx).Insert(&GroupRuleMapping{
+ GroupName: g.Name,
+ RuleName: ruleName,
+ })
+ if err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func (g *Group) RemoveRuleByName(ctx context.Context, ruleName string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ exists, err := g.isRuleInGroup(ctx, ruleName)
+ if err != nil {
+ return err
+ } else if !exists {
+ return ErrRuleNotInGroup{GroupName: g.Name, RuleName: ruleName}
+ }
+
+ _, err = db.GetEngine(ctx).Delete(&GroupRuleMapping{
+ GroupName: g.Name,
+ RuleName: ruleName,
+ })
+ if err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+var affectsMap = map[LimitSubject]LimitSubjects{
+ LimitSubjectSizeAll: {
+ LimitSubjectSizeReposAll,
+ LimitSubjectSizeGitLFS,
+ LimitSubjectSizeAssetsAll,
+ },
+ LimitSubjectSizeReposAll: {
+ LimitSubjectSizeReposPublic,
+ LimitSubjectSizeReposPrivate,
+ },
+ LimitSubjectSizeAssetsAll: {
+ LimitSubjectSizeAssetsAttachmentsAll,
+ LimitSubjectSizeAssetsArtifacts,
+ LimitSubjectSizeAssetsPackagesAll,
+ },
+ LimitSubjectSizeAssetsAttachmentsAll: {
+ LimitSubjectSizeAssetsAttachmentsIssues,
+ LimitSubjectSizeAssetsAttachmentsReleases,
+ },
+}
+
+func (g *Group) Evaluate(used Used, forSubject LimitSubject) (bool, bool) {
+ var found bool
+ for _, rule := range g.Rules {
+ ok, has := rule.Evaluate(used, forSubject)
+ if has {
+ found = true
+ if !ok {
+ return false, true
+ }
+ }
+ }
+
+ if !found {
+ // If Evaluation for forSubject did not succeed, try evaluating against
+ // subjects below
+
+ for _, subject := range affectsMap[forSubject] {
+ ok, has := g.Evaluate(used, subject)
+ if has {
+ found = true
+ if !ok {
+ return false, true
+ }
+ }
+ }
+ }
+
+ return true, found
+}
+
+func (gl *GroupList) Evaluate(used Used, forSubject LimitSubject) bool {
+ // If there are no groups, use the configured defaults:
+ if gl == nil || len(*gl) == 0 {
+ return EvaluateDefault(used, forSubject)
+ }
+
+ for _, group := range *gl {
+ ok, has := group.Evaluate(used, forSubject)
+ if has && ok {
+ return true
+ }
+ }
+ return false
+}
+
+func GetGroupByName(ctx context.Context, name string) (*Group, error) {
+ var group Group
+ has, err := db.GetEngine(ctx).Where("name = ?", name).Get(&group)
+ if has {
+ if err = group.LoadRules(ctx); err != nil {
+ return nil, err
+ }
+ return &group, nil
+ }
+ return nil, err
+}
+
+func ListGroups(ctx context.Context) (GroupList, error) {
+ var groups GroupList
+ err := db.GetEngine(ctx).Find(&groups)
+ return groups, err
+}
+
+func doesGroupExist(ctx context.Context, name string) (bool, error) {
+ return db.GetEngine(ctx).Where("name = ?", name).Get(&Group{})
+}
+
+func CreateGroup(ctx context.Context, name string) (*Group, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ exists, err := doesGroupExist(ctx, name)
+ if err != nil {
+ return nil, err
+ } else if exists {
+ return nil, ErrGroupAlreadyExists{Name: name}
+ }
+
+ group := Group{Name: name}
+ _, err = db.GetEngine(ctx).Insert(group)
+ if err != nil {
+ return nil, err
+ }
+ return &group, committer.Commit()
+}
+
+func ListUsersInGroup(ctx context.Context, name string) ([]*user_model.User, error) {
+ group, err := GetGroupByName(ctx, name)
+ if err != nil {
+ return nil, err
+ }
+
+ var users []*user_model.User
+ err = db.GetEngine(ctx).Select("`user`.*").
+ Table("user").
+ Join("INNER", "`quota_group_mapping`", "`quota_group_mapping`.mapped_id = `user`.id").
+ Where("`quota_group_mapping`.kind = ? AND `quota_group_mapping`.group_name = ?", KindUser, group.Name).
+ Find(&users)
+ return users, err
+}
+
+func DeleteGroupByName(ctx context.Context, name string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ _, err = db.GetEngine(ctx).Delete(GroupMapping{
+ GroupName: name,
+ })
+ if err != nil {
+ return err
+ }
+ _, err = db.GetEngine(ctx).Delete(GroupRuleMapping{
+ GroupName: name,
+ })
+ if err != nil {
+ return err
+ }
+
+ _, err = db.GetEngine(ctx).Delete(Group{Name: name})
+ if err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func SetUserGroups(ctx context.Context, userID int64, groups *[]string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // First: remove the user from any groups
+ _, err = db.GetEngine(ctx).Where("kind = ? AND mapped_id = ?", KindUser, userID).Delete(GroupMapping{})
+ if err != nil {
+ return err
+ }
+
+ if groups == nil {
+ return nil
+ }
+
+ // Then add the user to each group listed
+ for _, groupName := range *groups {
+ group, err := GetGroupByName(ctx, groupName)
+ if err != nil {
+ return err
+ }
+ if group == nil {
+ return ErrGroupNotFound{Name: groupName}
+ }
+ err = group.AddUserByID(ctx, userID)
+ if err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+func GetGroupsForUser(ctx context.Context, userID int64) (GroupList, error) {
+ var groups GroupList
+ err := db.GetEngine(ctx).
+ Where(builder.In("name",
+ builder.Select("group_name").
+ From("quota_group_mapping").
+ Where(builder.And(
+ builder.Eq{"kind": KindUser},
+ builder.Eq{"mapped_id": userID}),
+ ))).
+ Find(&groups)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(groups) == 0 {
+ err = db.GetEngine(ctx).Where(builder.In("name", setting.Quota.DefaultGroups)).Find(&groups)
+ if err != nil {
+ return nil, err
+ }
+ if len(groups) == 0 {
+ return nil, nil
+ }
+ }
+
+ for _, group := range groups {
+ err = group.LoadRules(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return groups, nil
+}
diff --git a/models/quota/limit_subject.go b/models/quota/limit_subject.go
new file mode 100644
index 0000000..4a49d33
--- /dev/null
+++ b/models/quota/limit_subject.go
@@ -0,0 +1,69 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import "fmt"
+
+type (
+ LimitSubject int
+ LimitSubjects []LimitSubject
+)
+
+const (
+ LimitSubjectNone LimitSubject = iota
+ LimitSubjectSizeAll
+ LimitSubjectSizeReposAll
+ LimitSubjectSizeReposPublic
+ LimitSubjectSizeReposPrivate
+ LimitSubjectSizeGitAll
+ LimitSubjectSizeGitLFS
+ LimitSubjectSizeAssetsAll
+ LimitSubjectSizeAssetsAttachmentsAll
+ LimitSubjectSizeAssetsAttachmentsIssues
+ LimitSubjectSizeAssetsAttachmentsReleases
+ LimitSubjectSizeAssetsArtifacts
+ LimitSubjectSizeAssetsPackagesAll
+ LimitSubjectSizeWiki
+
+ LimitSubjectFirst = LimitSubjectSizeAll
+ LimitSubjectLast = LimitSubjectSizeWiki
+)
+
+var limitSubjectRepr = map[string]LimitSubject{
+ "none": LimitSubjectNone,
+ "size:all": LimitSubjectSizeAll,
+ "size:repos:all": LimitSubjectSizeReposAll,
+ "size:repos:public": LimitSubjectSizeReposPublic,
+ "size:repos:private": LimitSubjectSizeReposPrivate,
+ "size:git:all": LimitSubjectSizeGitAll,
+ "size:git:lfs": LimitSubjectSizeGitLFS,
+ "size:assets:all": LimitSubjectSizeAssetsAll,
+ "size:assets:attachments:all": LimitSubjectSizeAssetsAttachmentsAll,
+ "size:assets:attachments:issues": LimitSubjectSizeAssetsAttachmentsIssues,
+ "size:assets:attachments:releases": LimitSubjectSizeAssetsAttachmentsReleases,
+ "size:assets:artifacts": LimitSubjectSizeAssetsArtifacts,
+ "size:assets:packages:all": LimitSubjectSizeAssetsPackagesAll,
+ "size:assets:wiki": LimitSubjectSizeWiki,
+}
+
+func (subject LimitSubject) String() string {
+ for repr, limit := range limitSubjectRepr {
+ if limit == subject {
+ return repr
+ }
+ }
+ return "<unknown>"
+}
+
+func (subjects LimitSubjects) GoString() string {
+ return fmt.Sprintf("%T{%+v}", subjects, subjects)
+}
+
+func ParseLimitSubject(repr string) (LimitSubject, error) {
+ result, has := limitSubjectRepr[repr]
+ if !has {
+ return LimitSubjectNone, ErrParseLimitSubjectUnrecognized{Subject: repr}
+ }
+ return result, nil
+}
diff --git a/models/quota/quota.go b/models/quota/quota.go
new file mode 100644
index 0000000..d38bfab
--- /dev/null
+++ b/models/quota/quota.go
@@ -0,0 +1,36 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+func init() {
+ db.RegisterModel(new(Rule))
+ db.RegisterModel(new(Group))
+ db.RegisterModel(new(GroupRuleMapping))
+ db.RegisterModel(new(GroupMapping))
+}
+
+func EvaluateForUser(ctx context.Context, userID int64, subject LimitSubject) (bool, error) {
+ if !setting.Quota.Enabled {
+ return true, nil
+ }
+
+ groups, err := GetGroupsForUser(ctx, userID)
+ if err != nil {
+ return false, err
+ }
+
+ used, err := GetUsedForUser(ctx, userID)
+ if err != nil {
+ return false, err
+ }
+
+ return groups.Evaluate(*used, subject), nil
+}
diff --git a/models/quota/quota_group_test.go b/models/quota/quota_group_test.go
new file mode 100644
index 0000000..bc25858
--- /dev/null
+++ b/models/quota/quota_group_test.go
@@ -0,0 +1,208 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota_test
+
+import (
+ "testing"
+
+ quota_model "code.gitea.io/gitea/models/quota"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestQuotaGroupAllRulesMustPass(t *testing.T) {
+ unlimitedRule := quota_model.Rule{
+ Limit: -1,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+ denyRule := quota_model.Rule{
+ Limit: 0,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+ group := quota_model.Group{
+ Rules: []quota_model.Rule{
+ unlimitedRule,
+ denyRule,
+ },
+ }
+
+ used := quota_model.Used{}
+ used.Size.Repos.Public = 1024
+
+ // Within a group, *all* rules must pass. Thus, if we have a deny-all rule,
+ // and an unlimited rule, that will always fail.
+ ok, has := group.Evaluate(used, quota_model.LimitSubjectSizeAll)
+ assert.True(t, has)
+ assert.False(t, ok)
+}
+
+func TestQuotaGroupRuleScenario1(t *testing.T) {
+ group := quota_model.Group{
+ Rules: []quota_model.Rule{
+ {
+ Limit: 1024,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAssetsAttachmentsReleases,
+ quota_model.LimitSubjectSizeGitLFS,
+ quota_model.LimitSubjectSizeAssetsPackagesAll,
+ },
+ },
+ {
+ Limit: 0,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeGitLFS,
+ },
+ },
+ },
+ }
+
+ used := quota_model.Used{}
+ used.Size.Assets.Attachments.Releases = 512
+ used.Size.Assets.Packages.All = 256
+ used.Size.Git.LFS = 16
+
+ ok, has := group.Evaluate(used, quota_model.LimitSubjectSizeAssetsAttachmentsReleases)
+ assert.True(t, has, "size:assets:attachments:releases is covered")
+ assert.True(t, ok, "size:assets:attachments:releases passes")
+
+ ok, has = group.Evaluate(used, quota_model.LimitSubjectSizeAssetsPackagesAll)
+ assert.True(t, has, "size:assets:packages:all is covered")
+ assert.True(t, ok, "size:assets:packages:all passes")
+
+ ok, has = group.Evaluate(used, quota_model.LimitSubjectSizeGitLFS)
+ assert.True(t, has, "size:git:lfs is covered")
+ assert.False(t, ok, "size:git:lfs fails")
+
+ ok, has = group.Evaluate(used, quota_model.LimitSubjectSizeAll)
+ assert.True(t, has, "size:all is covered")
+ assert.False(t, ok, "size:all fails")
+}
+
+func TestQuotaGroupRuleCombination(t *testing.T) {
+ repoRule := quota_model.Rule{
+ Limit: 4096,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeReposAll,
+ },
+ }
+ packagesRule := quota_model.Rule{
+ Limit: 0,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAssetsPackagesAll,
+ },
+ }
+
+ used := quota_model.Used{}
+ used.Size.Repos.Public = 1024
+ used.Size.Assets.Packages.All = 1024
+
+ group := quota_model.Group{
+ Rules: []quota_model.Rule{
+ repoRule,
+ packagesRule,
+ },
+ }
+
+ // Git LFS isn't covered by any rule
+ _, has := group.Evaluate(used, quota_model.LimitSubjectSizeGitLFS)
+ assert.False(t, has)
+
+ // repos:all is covered, and is passing
+ ok, has := group.Evaluate(used, quota_model.LimitSubjectSizeReposAll)
+ assert.True(t, has)
+ assert.True(t, ok)
+
+ // packages:all is covered, and is failing
+ ok, has = group.Evaluate(used, quota_model.LimitSubjectSizeAssetsPackagesAll)
+ assert.True(t, has)
+ assert.False(t, ok)
+
+ // size:all is covered, and is failing (due to packages:all being over quota)
+ ok, has = group.Evaluate(used, quota_model.LimitSubjectSizeAll)
+ assert.True(t, has, "size:all should be covered")
+ assert.False(t, ok, "size:all should fail")
+}
+
+func TestQuotaGroupListsRequireOnlyOnePassing(t *testing.T) {
+ unlimitedRule := quota_model.Rule{
+ Limit: -1,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+ denyRule := quota_model.Rule{
+ Limit: 0,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+
+ denyGroup := quota_model.Group{
+ Rules: []quota_model.Rule{
+ denyRule,
+ },
+ }
+ unlimitedGroup := quota_model.Group{
+ Rules: []quota_model.Rule{
+ unlimitedRule,
+ },
+ }
+
+ groups := quota_model.GroupList{&denyGroup, &unlimitedGroup}
+
+ used := quota_model.Used{}
+ used.Size.Repos.Public = 1024
+
+ // In a group list, if any group passes, the entire evaluation passes.
+ ok := groups.Evaluate(used, quota_model.LimitSubjectSizeAll)
+ assert.True(t, ok)
+}
+
+func TestQuotaGroupListAllFailing(t *testing.T) {
+ denyRule := quota_model.Rule{
+ Limit: 0,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+ limitedRule := quota_model.Rule{
+ Limit: 1024,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+
+ denyGroup := quota_model.Group{
+ Rules: []quota_model.Rule{
+ denyRule,
+ },
+ }
+ limitedGroup := quota_model.Group{
+ Rules: []quota_model.Rule{
+ limitedRule,
+ },
+ }
+
+ groups := quota_model.GroupList{&denyGroup, &limitedGroup}
+
+ used := quota_model.Used{}
+ used.Size.Repos.Public = 2048
+
+ ok := groups.Evaluate(used, quota_model.LimitSubjectSizeAll)
+ assert.False(t, ok)
+}
+
+func TestQuotaGroupListEmpty(t *testing.T) {
+ groups := quota_model.GroupList{}
+
+ used := quota_model.Used{}
+ used.Size.Repos.Public = 2048
+
+ ok := groups.Evaluate(used, quota_model.LimitSubjectSizeAll)
+ assert.True(t, ok)
+}
diff --git a/models/quota/quota_rule_test.go b/models/quota/quota_rule_test.go
new file mode 100644
index 0000000..1e1daf4
--- /dev/null
+++ b/models/quota/quota_rule_test.go
@@ -0,0 +1,304 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota_test
+
+import (
+ "testing"
+
+ quota_model "code.gitea.io/gitea/models/quota"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func makeFullyUsed() quota_model.Used {
+ return quota_model.Used{
+ Size: quota_model.UsedSize{
+ Repos: quota_model.UsedSizeRepos{
+ Public: 1024,
+ Private: 1024,
+ },
+ Git: quota_model.UsedSizeGit{
+ LFS: 1024,
+ },
+ Assets: quota_model.UsedSizeAssets{
+ Attachments: quota_model.UsedSizeAssetsAttachments{
+ Issues: 1024,
+ Releases: 1024,
+ },
+ Artifacts: 1024,
+ Packages: quota_model.UsedSizeAssetsPackages{
+ All: 1024,
+ },
+ },
+ },
+ }
+}
+
+func makePartiallyUsed() quota_model.Used {
+ return quota_model.Used{
+ Size: quota_model.UsedSize{
+ Repos: quota_model.UsedSizeRepos{
+ Public: 1024,
+ },
+ Assets: quota_model.UsedSizeAssets{
+ Attachments: quota_model.UsedSizeAssetsAttachments{
+ Releases: 1024,
+ },
+ },
+ },
+ }
+}
+
+func setUsed(used quota_model.Used, subject quota_model.LimitSubject, value int64) *quota_model.Used {
+ switch subject {
+ case quota_model.LimitSubjectSizeReposPublic:
+ used.Size.Repos.Public = value
+ return &used
+ case quota_model.LimitSubjectSizeReposPrivate:
+ used.Size.Repos.Private = value
+ return &used
+ case quota_model.LimitSubjectSizeGitLFS:
+ used.Size.Git.LFS = value
+ return &used
+ case quota_model.LimitSubjectSizeAssetsAttachmentsIssues:
+ used.Size.Assets.Attachments.Issues = value
+ return &used
+ case quota_model.LimitSubjectSizeAssetsAttachmentsReleases:
+ used.Size.Assets.Attachments.Releases = value
+ return &used
+ case quota_model.LimitSubjectSizeAssetsArtifacts:
+ used.Size.Assets.Artifacts = value
+ return &used
+ case quota_model.LimitSubjectSizeAssetsPackagesAll:
+ used.Size.Assets.Packages.All = value
+ return &used
+ case quota_model.LimitSubjectSizeWiki:
+ }
+
+ return nil
+}
+
+func assertEvaluation(t *testing.T, rule quota_model.Rule, used quota_model.Used, subject quota_model.LimitSubject, expected bool) {
+ t.Helper()
+
+ t.Run(subject.String(), func(t *testing.T) {
+ ok, has := rule.Evaluate(used, subject)
+ assert.True(t, has)
+ assert.Equal(t, expected, ok)
+ })
+}
+
+func TestQuotaRuleNoEvaluation(t *testing.T) {
+ rule := quota_model.Rule{
+ Limit: 1024,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAssetsAttachmentsAll,
+ },
+ }
+ used := quota_model.Used{}
+ used.Size.Repos.Public = 4096
+
+ _, has := rule.Evaluate(used, quota_model.LimitSubjectSizeReposAll)
+
+ // We have a rule for "size:assets:attachments:all", and query for
+ // "size:repos:all". We don't cover that subject, so the evaluation returns
+ // with no rules found.
+ assert.False(t, has)
+}
+
+func TestQuotaRuleDirectEvaluation(t *testing.T) {
+ // This function is meant to test direct rule evaluation: cases where we set
+ // a rule for a subject, and we evaluate against the same subject.
+
+ runTest := func(t *testing.T, subject quota_model.LimitSubject, limit, used int64, expected bool) {
+ t.Helper()
+
+ rule := quota_model.Rule{
+ Limit: limit,
+ Subjects: quota_model.LimitSubjects{
+ subject,
+ },
+ }
+ usedObj := setUsed(quota_model.Used{}, subject, used)
+ if usedObj == nil {
+ return
+ }
+
+ assertEvaluation(t, rule, *usedObj, subject, expected)
+ }
+
+ t.Run("limit:0", func(t *testing.T) {
+ // With limit:0, nothing used is fine.
+ t.Run("used:0", func(t *testing.T) {
+ for subject := quota_model.LimitSubjectFirst; subject <= quota_model.LimitSubjectLast; subject++ {
+ runTest(t, subject, 0, 0, true)
+ }
+ })
+ // With limit:0, any usage will fail evaluation
+ t.Run("used:512", func(t *testing.T) {
+ for subject := quota_model.LimitSubjectFirst; subject <= quota_model.LimitSubjectLast; subject++ {
+ runTest(t, subject, 0, 512, false)
+ }
+ })
+ })
+
+ t.Run("limit:unlimited", func(t *testing.T) {
+ // With no limits, any usage will succeed evaluation
+ t.Run("used:512", func(t *testing.T) {
+ for subject := quota_model.LimitSubjectFirst; subject <= quota_model.LimitSubjectLast; subject++ {
+ runTest(t, subject, -1, 512, true)
+ }
+ })
+ })
+
+ t.Run("limit:1024", func(t *testing.T) {
+ // With a set limit, usage below the limit succeeds
+ t.Run("used:512", func(t *testing.T) {
+ for subject := quota_model.LimitSubjectFirst; subject <= quota_model.LimitSubjectLast; subject++ {
+ runTest(t, subject, 1024, 512, true)
+ }
+ })
+
+ // With a set limit, usage above the limit fails
+ t.Run("used:2048", func(t *testing.T) {
+ for subject := quota_model.LimitSubjectFirst; subject <= quota_model.LimitSubjectLast; subject++ {
+ runTest(t, subject, 1024, 2048, false)
+ }
+ })
+ })
+}
+
+func TestQuotaRuleCombined(t *testing.T) {
+ rule := quota_model.Rule{
+ Limit: 1024,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeGitLFS,
+ quota_model.LimitSubjectSizeAssetsAttachmentsReleases,
+ quota_model.LimitSubjectSizeAssetsPackagesAll,
+ },
+ }
+ used := quota_model.Used{
+ Size: quota_model.UsedSize{
+ Repos: quota_model.UsedSizeRepos{
+ Public: 4096,
+ },
+ Git: quota_model.UsedSizeGit{
+ LFS: 256,
+ },
+ Assets: quota_model.UsedSizeAssets{
+ Attachments: quota_model.UsedSizeAssetsAttachments{
+ Issues: 2048,
+ Releases: 256,
+ },
+ Packages: quota_model.UsedSizeAssetsPackages{
+ All: 2560,
+ },
+ },
+ },
+ }
+
+ expectationMap := map[quota_model.LimitSubject]bool{
+ quota_model.LimitSubjectSizeGitLFS: false,
+ quota_model.LimitSubjectSizeAssetsAttachmentsReleases: false,
+ quota_model.LimitSubjectSizeAssetsPackagesAll: false,
+ }
+
+ for subject := quota_model.LimitSubjectFirst; subject <= quota_model.LimitSubjectLast; subject++ {
+ t.Run(subject.String(), func(t *testing.T) {
+ evalOk, evalHas := rule.Evaluate(used, subject)
+ expected, expectedHas := expectationMap[subject]
+
+ assert.Equal(t, expectedHas, evalHas)
+ if expectedHas {
+ assert.Equal(t, expected, evalOk)
+ }
+ })
+ }
+}
+
+func TestQuotaRuleSizeAll(t *testing.T) {
+ runTests := func(t *testing.T, rule quota_model.Rule, expected bool) {
+ t.Helper()
+
+ subject := quota_model.LimitSubjectSizeAll
+
+ t.Run("used:0", func(t *testing.T) {
+ used := quota_model.Used{}
+
+ assertEvaluation(t, rule, used, subject, true)
+ })
+
+ t.Run("used:some-each", func(t *testing.T) {
+ used := makeFullyUsed()
+
+ assertEvaluation(t, rule, used, subject, expected)
+ })
+
+ t.Run("used:some", func(t *testing.T) {
+ used := makePartiallyUsed()
+
+ assertEvaluation(t, rule, used, subject, expected)
+ })
+ }
+
+ // With all limits set to 0, evaluation always fails if usage > 0
+ t.Run("rule:0", func(t *testing.T) {
+ rule := quota_model.Rule{
+ Limit: 0,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+
+ runTests(t, rule, false)
+ })
+
+ // With no limits, evaluation always succeeds
+ t.Run("rule:unlimited", func(t *testing.T) {
+ rule := quota_model.Rule{
+ Limit: -1,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+
+ runTests(t, rule, true)
+ })
+
+ // With a specific, very generous limit, evaluation succeeds if the limit isn't exhausted
+ t.Run("rule:generous", func(t *testing.T) {
+ rule := quota_model.Rule{
+ Limit: 102400,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+
+ runTests(t, rule, true)
+
+ t.Run("limit exhaustion", func(t *testing.T) {
+ used := quota_model.Used{
+ Size: quota_model.UsedSize{
+ Repos: quota_model.UsedSizeRepos{
+ Public: 204800,
+ },
+ },
+ }
+
+ assertEvaluation(t, rule, used, quota_model.LimitSubjectSizeAll, false)
+ })
+ })
+
+ // With a specific, small limit, evaluation fails
+ t.Run("rule:limited", func(t *testing.T) {
+ rule := quota_model.Rule{
+ Limit: 512,
+ Subjects: quota_model.LimitSubjects{
+ quota_model.LimitSubjectSizeAll,
+ },
+ }
+
+ runTests(t, rule, false)
+ })
+}
diff --git a/models/quota/rule.go b/models/quota/rule.go
new file mode 100644
index 0000000..b0c6c0f
--- /dev/null
+++ b/models/quota/rule.go
@@ -0,0 +1,127 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import (
+ "context"
+ "slices"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+type Rule struct {
+ Name string `xorm:"pk not null" json:"name,omitempty"`
+ Limit int64 `xorm:"NOT NULL" binding:"Required" json:"limit"`
+ Subjects LimitSubjects `json:"subjects,omitempty"`
+}
+
+func (r *Rule) TableName() string {
+ return "quota_rule"
+}
+
+func (r Rule) Evaluate(used Used, forSubject LimitSubject) (bool, bool) {
+ // If there's no limit, short circuit out
+ if r.Limit == -1 {
+ return true, true
+ }
+
+ // If the rule does not cover forSubject, bail out early
+ if !slices.Contains(r.Subjects, forSubject) {
+ return false, false
+ }
+
+ var sum int64
+ for _, subject := range r.Subjects {
+ sum += used.CalculateFor(subject)
+ }
+ return sum <= r.Limit, true
+}
+
+func (r *Rule) Edit(ctx context.Context, limit *int64, subjects *LimitSubjects) (*Rule, error) {
+ cols := []string{}
+
+ if limit != nil {
+ r.Limit = *limit
+ cols = append(cols, "limit")
+ }
+ if subjects != nil {
+ r.Subjects = *subjects
+ cols = append(cols, "subjects")
+ }
+
+ _, err := db.GetEngine(ctx).Where("name = ?", r.Name).Cols(cols...).Update(r)
+ return r, err
+}
+
+func GetRuleByName(ctx context.Context, name string) (*Rule, error) {
+ var rule Rule
+ has, err := db.GetEngine(ctx).Where("name = ?", name).Get(&rule)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return &rule, err
+}
+
+func ListRules(ctx context.Context) ([]Rule, error) {
+ var rules []Rule
+ err := db.GetEngine(ctx).Find(&rules)
+ return rules, err
+}
+
+func DoesRuleExist(ctx context.Context, name string) (bool, error) {
+ return db.GetEngine(ctx).
+ Where("name = ?", name).
+ Get(&Rule{})
+}
+
+func CreateRule(ctx context.Context, name string, limit int64, subjects LimitSubjects) (*Rule, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ exists, err := DoesRuleExist(ctx, name)
+ if err != nil {
+ return nil, err
+ } else if exists {
+ return nil, ErrRuleAlreadyExists{Name: name}
+ }
+
+ rule := Rule{
+ Name: name,
+ Limit: limit,
+ Subjects: subjects,
+ }
+ _, err = db.GetEngine(ctx).Insert(rule)
+ if err != nil {
+ return nil, err
+ }
+
+ return &rule, committer.Commit()
+}
+
+func DeleteRuleByName(ctx context.Context, name string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ _, err = db.GetEngine(ctx).Delete(GroupRuleMapping{
+ RuleName: name,
+ })
+ if err != nil {
+ return err
+ }
+
+ _, err = db.GetEngine(ctx).Delete(Rule{Name: name})
+ if err != nil {
+ return err
+ }
+ return committer.Commit()
+}
diff --git a/models/quota/used.go b/models/quota/used.go
new file mode 100644
index 0000000..ff84ac2
--- /dev/null
+++ b/models/quota/used.go
@@ -0,0 +1,252 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package quota
+
+import (
+ "context"
+
+ action_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ package_model "code.gitea.io/gitea/models/packages"
+ repo_model "code.gitea.io/gitea/models/repo"
+
+ "xorm.io/builder"
+)
+
+type Used struct {
+ Size UsedSize
+}
+
+type UsedSize struct {
+ Repos UsedSizeRepos
+ Git UsedSizeGit
+ Assets UsedSizeAssets
+}
+
+func (u UsedSize) All() int64 {
+ return u.Repos.All() + u.Git.All(u.Repos) + u.Assets.All()
+}
+
+type UsedSizeRepos struct {
+ Public int64
+ Private int64
+}
+
+func (u UsedSizeRepos) All() int64 {
+ return u.Public + u.Private
+}
+
+type UsedSizeGit struct {
+ LFS int64
+}
+
+func (u UsedSizeGit) All(r UsedSizeRepos) int64 {
+ return u.LFS + r.All()
+}
+
+type UsedSizeAssets struct {
+ Attachments UsedSizeAssetsAttachments
+ Artifacts int64
+ Packages UsedSizeAssetsPackages
+}
+
+func (u UsedSizeAssets) All() int64 {
+ return u.Attachments.All() + u.Artifacts + u.Packages.All
+}
+
+type UsedSizeAssetsAttachments struct {
+ Issues int64
+ Releases int64
+}
+
+func (u UsedSizeAssetsAttachments) All() int64 {
+ return u.Issues + u.Releases
+}
+
+type UsedSizeAssetsPackages struct {
+ All int64
+}
+
+func (u Used) CalculateFor(subject LimitSubject) int64 {
+ switch subject {
+ case LimitSubjectNone:
+ return 0
+ case LimitSubjectSizeAll:
+ return u.Size.All()
+ case LimitSubjectSizeReposAll:
+ return u.Size.Repos.All()
+ case LimitSubjectSizeReposPublic:
+ return u.Size.Repos.Public
+ case LimitSubjectSizeReposPrivate:
+ return u.Size.Repos.Private
+ case LimitSubjectSizeGitAll:
+ return u.Size.Git.All(u.Size.Repos)
+ case LimitSubjectSizeGitLFS:
+ return u.Size.Git.LFS
+ case LimitSubjectSizeAssetsAll:
+ return u.Size.Assets.All()
+ case LimitSubjectSizeAssetsAttachmentsAll:
+ return u.Size.Assets.Attachments.All()
+ case LimitSubjectSizeAssetsAttachmentsIssues:
+ return u.Size.Assets.Attachments.Issues
+ case LimitSubjectSizeAssetsAttachmentsReleases:
+ return u.Size.Assets.Attachments.Releases
+ case LimitSubjectSizeAssetsArtifacts:
+ return u.Size.Assets.Artifacts
+ case LimitSubjectSizeAssetsPackagesAll:
+ return u.Size.Assets.Packages.All
+ case LimitSubjectSizeWiki:
+ return 0
+ }
+ return 0
+}
+
+func makeUserOwnedCondition(q string, userID int64) builder.Cond {
+ switch q {
+ case "repositories", "attachments", "artifacts":
+ return builder.Eq{"`repository`.owner_id": userID}
+ case "packages":
+ return builder.Or(
+ builder.Eq{"`repository`.owner_id": userID},
+ builder.And(
+ builder.Eq{"`package`.repo_id": 0},
+ builder.Eq{"`package`.owner_id": userID},
+ ),
+ )
+ }
+ return builder.NewCond()
+}
+
+func createQueryFor(ctx context.Context, userID int64, q string) db.Engine {
+ session := db.GetEngine(ctx)
+
+ switch q {
+ case "repositories":
+ session = session.Table("repository")
+ case "attachments":
+ session = session.
+ Table("attachment").
+ Join("INNER", "`repository`", "`attachment`.repo_id = `repository`.id")
+ case "artifacts":
+ session = session.
+ Table("action_artifact").
+ Join("INNER", "`repository`", "`action_artifact`.repo_id = `repository`.id")
+ case "packages":
+ session = session.
+ Table("package_version").
+ Join("INNER", "`package_file`", "`package_file`.version_id = `package_version`.id").
+ Join("INNER", "`package_blob`", "`package_file`.blob_id = `package_blob`.id").
+ Join("INNER", "`package`", "`package_version`.package_id = `package`.id").
+ Join("LEFT OUTER", "`repository`", "`package`.repo_id = `repository`.id")
+ }
+
+ return session.Where(makeUserOwnedCondition(q, userID))
+}
+
+func GetQuotaAttachmentsForUser(ctx context.Context, userID int64, opts db.ListOptions) (int64, *[]*repo_model.Attachment, error) {
+ var attachments []*repo_model.Attachment
+
+ sess := createQueryFor(ctx, userID, "attachments").
+ OrderBy("`attachment`.size DESC")
+ if opts.PageSize > 0 {
+ sess = sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ count, err := sess.FindAndCount(&attachments)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return count, &attachments, nil
+}
+
+func GetQuotaPackagesForUser(ctx context.Context, userID int64, opts db.ListOptions) (int64, *[]*package_model.PackageVersion, error) {
+ var pkgs []*package_model.PackageVersion
+
+ sess := createQueryFor(ctx, userID, "packages").
+ OrderBy("`package_blob`.size DESC")
+ if opts.PageSize > 0 {
+ sess = sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ count, err := sess.FindAndCount(&pkgs)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return count, &pkgs, nil
+}
+
+func GetQuotaArtifactsForUser(ctx context.Context, userID int64, opts db.ListOptions) (int64, *[]*action_model.ActionArtifact, error) {
+ var artifacts []*action_model.ActionArtifact
+
+ sess := createQueryFor(ctx, userID, "artifacts").
+ OrderBy("`action_artifact`.file_compressed_size DESC")
+ if opts.PageSize > 0 {
+ sess = sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ count, err := sess.FindAndCount(&artifacts)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return count, &artifacts, nil
+}
+
+func GetUsedForUser(ctx context.Context, userID int64) (*Used, error) {
+ var used Used
+
+ _, err := createQueryFor(ctx, userID, "repositories").
+ Where("`repository`.is_private = ?", true).
+ Select("SUM(git_size) AS code").
+ Get(&used.Size.Repos.Private)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = createQueryFor(ctx, userID, "repositories").
+ Where("`repository`.is_private = ?", false).
+ Select("SUM(git_size) AS code").
+ Get(&used.Size.Repos.Public)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = createQueryFor(ctx, userID, "repositories").
+ Select("SUM(lfs_size) AS lfs").
+ Get(&used.Size.Git.LFS)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = createQueryFor(ctx, userID, "attachments").
+ Select("SUM(`attachment`.size) AS size").
+ Where("`attachment`.release_id != 0").
+ Get(&used.Size.Assets.Attachments.Releases)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = createQueryFor(ctx, userID, "attachments").
+ Select("SUM(`attachment`.size) AS size").
+ Where("`attachment`.release_id = 0").
+ Get(&used.Size.Assets.Attachments.Issues)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = createQueryFor(ctx, userID, "artifacts").
+ Select("SUM(file_compressed_size) AS size").
+ Get(&used.Size.Assets.Artifacts)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = createQueryFor(ctx, userID, "packages").
+ Select("SUM(package_blob.size) AS size").
+ Get(&used.Size.Assets.Packages.All)
+ if err != nil {
+ return nil, err
+ }
+
+ return &used, nil
+}
diff --git a/models/repo.go b/models/repo.go
new file mode 100644
index 0000000..0dc8ee5
--- /dev/null
+++ b/models/repo.go
@@ -0,0 +1,362 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ _ "image/jpeg" // Needed for jpeg support
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+)
+
+// Init initialize model
+func Init(ctx context.Context) error {
+ return unit.LoadUnitConfig()
+}
+
+type repoChecker struct {
+ querySQL func(ctx context.Context) ([]map[string][]byte, error)
+ correctSQL func(ctx context.Context, id int64) error
+ desc string
+}
+
+func repoStatsCheck(ctx context.Context, checker *repoChecker) {
+ results, err := checker.querySQL(ctx)
+ if err != nil {
+ log.Error("Select %s: %v", checker.desc, err)
+ return
+ }
+ for _, result := range results {
+ id, _ := strconv.ParseInt(string(result["id"]), 10, 64)
+ select {
+ case <-ctx.Done():
+ log.Warn("CheckRepoStats: Cancelled before checking %s for with id=%d", checker.desc, id)
+ return
+ default:
+ }
+ log.Trace("Updating %s: %d", checker.desc, id)
+ err = checker.correctSQL(ctx, id)
+ if err != nil {
+ log.Error("Update %s[%d]: %v", checker.desc, id, err)
+ }
+ }
+}
+
+func StatsCorrectSQL(ctx context.Context, sql string, id int64) error {
+ _, err := db.GetEngine(ctx).Exec(sql, id, id)
+ return err
+}
+
+func repoStatsCorrectNumWatches(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=? AND mode<>2) WHERE id=?", id)
+}
+
+func repoStatsCorrectNumStars(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?", id)
+}
+
+func labelStatsCorrectNumIssues(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?", id)
+}
+
+func labelStatsCorrectNumIssuesRepo(ctx context.Context, id int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=id) WHERE repo_id=?", id)
+ return err
+}
+
+func labelStatsCorrectNumClosedIssues(ctx context.Context, id int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `label` SET num_closed_issues=(SELECT COUNT(*) FROM `issue_label`,`issue` WHERE `issue_label`.label_id=`label`.id AND `issue_label`.issue_id=`issue`.id AND `issue`.is_closed=?) WHERE `label`.id=?", true, id)
+ return err
+}
+
+func labelStatsCorrectNumClosedIssuesRepo(ctx context.Context, id int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `label` SET num_closed_issues=(SELECT COUNT(*) FROM `issue_label`,`issue` WHERE `issue_label`.label_id=`label`.id AND `issue_label`.issue_id=`issue`.id AND `issue`.is_closed=?) WHERE `label`.repo_id=?", true, id)
+ return err
+}
+
+var milestoneStatsQueryNumIssues = "SELECT `milestone`.id FROM `milestone` WHERE `milestone`.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE `issue`.milestone_id=`milestone`.id AND `issue`.is_closed=?) OR `milestone`.num_issues!=(SELECT COUNT(*) FROM `issue` WHERE `issue`.milestone_id=`milestone`.id)"
+
+func milestoneStatsCorrectNumIssuesRepo(ctx context.Context, id int64) error {
+ e := db.GetEngine(ctx)
+ results, err := e.Query(milestoneStatsQueryNumIssues+" AND `milestone`.repo_id = ?", true, id)
+ if err != nil {
+ return err
+ }
+ for _, result := range results {
+ id, _ := strconv.ParseInt(string(result["id"]), 10, 64)
+ err = issues_model.UpdateMilestoneCounters(ctx, id)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func userStatsCorrectNumRepos(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?", id)
+}
+
+func repoStatsCorrectIssueNumComments(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?", id)
+}
+
+func repoStatsCorrectNumIssues(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, false, false)
+}
+
+func repoStatsCorrectNumPulls(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, true, false)
+}
+
+func repoStatsCorrectNumClosedIssues(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, false, true)
+}
+
+func repoStatsCorrectNumClosedPulls(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, true, true)
+}
+
+func statsQuery(args ...any) func(context.Context) ([]map[string][]byte, error) {
+ return func(ctx context.Context) ([]map[string][]byte, error) {
+ return db.GetEngine(ctx).Query(args...)
+ }
+}
+
+// CheckRepoStats checks the repository stats
+func CheckRepoStats(ctx context.Context) error {
+ log.Trace("Doing: CheckRepoStats")
+
+ checkers := []*repoChecker{
+ // Repository.NumWatches
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id AND mode<>2)"),
+ repoStatsCorrectNumWatches,
+ "repository count 'num_watches'",
+ },
+ // Repository.NumStars
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)"),
+ repoStatsCorrectNumStars,
+ "repository count 'num_stars'",
+ },
+ // Repository.NumIssues
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_pull=?)", false),
+ repoStatsCorrectNumIssues,
+ "repository count 'num_issues'",
+ },
+ // Repository.NumClosedIssues
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false),
+ repoStatsCorrectNumClosedIssues,
+ "repository count 'num_closed_issues'",
+ },
+ // Repository.NumPulls
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_pulls!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_pull=?)", true),
+ repoStatsCorrectNumPulls,
+ "repository count 'num_pulls'",
+ },
+ // Repository.NumClosedPulls
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_pulls!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, true),
+ repoStatsCorrectNumClosedPulls,
+ "repository count 'num_closed_pulls'",
+ },
+ // Label.NumIssues
+ {
+ statsQuery("SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)"),
+ labelStatsCorrectNumIssues,
+ "label count 'num_issues'",
+ },
+ // Label.NumClosedIssues
+ {
+ statsQuery("SELECT `label`.id FROM `label` WHERE `label`.num_closed_issues!=(SELECT COUNT(*) FROM `issue_label`,`issue` WHERE `issue_label`.label_id=`label`.id AND `issue_label`.issue_id=`issue`.id AND `issue`.is_closed=?)", true),
+ labelStatsCorrectNumClosedIssues,
+ "label count 'num_closed_issues'",
+ },
+ // Milestone.Num{,Closed}Issues
+ {
+ statsQuery(milestoneStatsQueryNumIssues, true),
+ issues_model.UpdateMilestoneCounters,
+ "milestone count 'num_closed_issues' and 'num_issues'",
+ },
+ // User.NumRepos
+ {
+ statsQuery("SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)"),
+ userStatsCorrectNumRepos,
+ "user count 'num_repos'",
+ },
+ // Issue.NumComments
+ {
+ statsQuery("SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)"),
+ repoStatsCorrectIssueNumComments,
+ "issue count 'num_comments'",
+ },
+ }
+ for _, checker := range checkers {
+ select {
+ case <-ctx.Done():
+ log.Warn("CheckRepoStats: Cancelled before %s", checker.desc)
+ return db.ErrCancelledf("before checking %s", checker.desc)
+ default:
+ repoStatsCheck(ctx, checker)
+ }
+ }
+
+ // FIXME: use checker when stop supporting old fork repo format.
+ // ***** START: Repository.NumForks *****
+ e := db.GetEngine(ctx)
+ results, err := e.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
+ if err != nil {
+ log.Error("Select repository count 'num_forks': %v", err)
+ } else {
+ for _, result := range results {
+ id, _ := strconv.ParseInt(string(result["id"]), 10, 64)
+ select {
+ case <-ctx.Done():
+ log.Warn("CheckRepoStats: Cancelled")
+ return db.ErrCancelledf("during repository count 'num_fork' for repo ID %d", id)
+ default:
+ }
+ log.Trace("Updating repository count 'num_forks': %d", id)
+
+ repo, err := repo_model.GetRepositoryByID(ctx, id)
+ if err != nil {
+ log.Error("repo_model.GetRepositoryByID[%d]: %v", id, err)
+ continue
+ }
+
+ _, err = e.SQL("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID).Get(&repo.NumForks)
+ if err != nil {
+ log.Error("Select count of forks[%d]: %v", repo.ID, err)
+ continue
+ }
+
+ if _, err = e.ID(repo.ID).Cols("num_forks").Update(repo); err != nil {
+ log.Error("UpdateRepository[%d]: %v", id, err)
+ continue
+ }
+ }
+ }
+ // ***** END: Repository.NumForks *****
+ return nil
+}
+
+func UpdateRepoStats(ctx context.Context, id int64) error {
+ var err error
+
+ for _, f := range []func(ctx context.Context, id int64) error{
+ repoStatsCorrectNumWatches,
+ repoStatsCorrectNumStars,
+ repoStatsCorrectNumIssues,
+ repoStatsCorrectNumPulls,
+ repoStatsCorrectNumClosedIssues,
+ repoStatsCorrectNumClosedPulls,
+ labelStatsCorrectNumIssuesRepo,
+ labelStatsCorrectNumClosedIssuesRepo,
+ milestoneStatsCorrectNumIssuesRepo,
+ } {
+ err = f(ctx, id)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func updateUserStarNumbers(ctx context.Context, users []user_model.User) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ for _, user := range users {
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+// DoctorUserStarNum recalculate Stars number for all user
+func DoctorUserStarNum(ctx context.Context) (err error) {
+ const batchSize = 100
+
+ for start := 0; ; start += batchSize {
+ users := make([]user_model.User, 0, batchSize)
+ if err = db.GetEngine(ctx).Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil {
+ return err
+ }
+ if len(users) == 0 {
+ break
+ }
+
+ if err = updateUserStarNumbers(ctx, users); err != nil {
+ return err
+ }
+ }
+
+ log.Debug("recalculate Stars number for all user finished")
+
+ return err
+}
+
+// DeleteDeployKey delete deploy keys
+func DeleteDeployKey(ctx context.Context, doer *user_model.User, id int64) error {
+ key, err := asymkey_model.GetDeployKeyByID(ctx, id)
+ if err != nil {
+ if asymkey_model.IsErrDeployKeyNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetDeployKeyByID: %w", err)
+ }
+
+ // Check if user has access to delete this key.
+ if !doer.IsAdmin {
+ repo, err := repo_model.GetRepositoryByID(ctx, key.RepoID)
+ if err != nil {
+ return fmt.Errorf("GetRepositoryByID: %w", err)
+ }
+ has, err := access_model.IsUserRepoAdmin(ctx, repo, doer)
+ if err != nil {
+ return fmt.Errorf("GetUserRepoPermission: %w", err)
+ } else if !has {
+ return asymkey_model.ErrKeyAccessDenied{
+ UserID: doer.ID,
+ KeyID: key.ID,
+ Note: "deploy",
+ }
+ }
+ }
+
+ if _, err := db.DeleteByID[asymkey_model.DeployKey](ctx, key.ID); err != nil {
+ return fmt.Errorf("delete deploy key [%d]: %w", key.ID, err)
+ }
+
+ // Check if this is the last reference to same key content.
+ has, err := asymkey_model.IsDeployKeyExistByKeyID(ctx, key.KeyID)
+ if err != nil {
+ return err
+ } else if !has {
+ if _, err = db.DeleteByID[asymkey_model.PublicKey](ctx, key.KeyID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/repo/TestSearchRepositoryIDsByCondition/repository.yml b/models/repo/TestSearchRepositoryIDsByCondition/repository.yml
new file mode 100644
index 0000000..9ce8307
--- /dev/null
+++ b/models/repo/TestSearchRepositoryIDsByCondition/repository.yml
@@ -0,0 +1,30 @@
+-
+ id: 1001
+ owner_id: 33
+ owner_name: user33
+ lower_name: repo1001
+ name: repo1001
+ default_branch: main
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
diff --git a/models/repo/archive_download_count.go b/models/repo/archive_download_count.go
new file mode 100644
index 0000000..31f0399
--- /dev/null
+++ b/models/repo/archive_download_count.go
@@ -0,0 +1,90 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// RepoArchiveDownloadCount counts all archive downloads for a tag
+type RepoArchiveDownloadCount struct { //nolint:revive
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"index unique(s)"`
+ ReleaseID int64 `xorm:"index unique(s)"`
+ Type git.ArchiveType `xorm:"unique(s)"`
+ Count int64
+}
+
+func init() {
+ db.RegisterModel(new(RepoArchiveDownloadCount))
+}
+
+// CountArchiveDownload adds one download the the given archive
+func CountArchiveDownload(ctx context.Context, repoID, releaseID int64, tp git.ArchiveType) error {
+ updateCount, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).And("release_id = ?", releaseID).And("`type` = ?", tp).Incr("count").Update(new(RepoArchiveDownloadCount))
+ if err != nil {
+ return err
+ }
+
+ if updateCount != 0 {
+ // The count was updated, so we can exit
+ return nil
+ }
+
+ // The archive does not esxists in the database, so let's add it
+ newCounter := &RepoArchiveDownloadCount{
+ RepoID: repoID,
+ ReleaseID: releaseID,
+ Type: tp,
+ Count: 1,
+ }
+
+ _, err = db.GetEngine(ctx).Insert(newCounter)
+ return err
+}
+
+// GetArchiveDownloadCount returns the download count of a tag
+func GetArchiveDownloadCount(ctx context.Context, repoID, releaseID int64) (*api.TagArchiveDownloadCount, error) {
+ downloadCountList := make([]RepoArchiveDownloadCount, 0)
+ err := db.GetEngine(ctx).Where("repo_id = ?", repoID).And("release_id = ?", releaseID).Find(&downloadCountList)
+ if err != nil {
+ return nil, err
+ }
+
+ tagCounter := new(api.TagArchiveDownloadCount)
+
+ for _, singleCount := range downloadCountList {
+ switch singleCount.Type {
+ case git.ZIP:
+ tagCounter.Zip = singleCount.Count
+ case git.TARGZ:
+ tagCounter.TarGz = singleCount.Count
+ }
+ }
+
+ return tagCounter, nil
+}
+
+// GetDownloadCountForTagName returns the download count of a tag with the given name
+func GetArchiveDownloadCountForTagName(ctx context.Context, repoID int64, tagName string) (*api.TagArchiveDownloadCount, error) {
+ release, err := GetRelease(ctx, repoID, tagName)
+ if err != nil {
+ if IsErrReleaseNotExist(err) {
+ return new(api.TagArchiveDownloadCount), nil
+ }
+ return nil, err
+ }
+
+ return GetArchiveDownloadCount(ctx, repoID, release.ID)
+}
+
+// DeleteArchiveDownloadCountForRelease deletes the release from the repo_archive_download_count table
+func DeleteArchiveDownloadCountForRelease(ctx context.Context, releaseID int64) error {
+ _, err := db.GetEngine(ctx).Delete(&RepoArchiveDownloadCount{ReleaseID: releaseID})
+ return err
+}
diff --git a/models/repo/archive_download_count_test.go b/models/repo/archive_download_count_test.go
new file mode 100644
index 0000000..ffc6cdf
--- /dev/null
+++ b/models/repo/archive_download_count_test.go
@@ -0,0 +1,65 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/git"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepoArchiveDownloadCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ release, err := repo_model.GetReleaseByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ // We have no count, so it should return 0
+ downloadCount, err := repo_model.GetArchiveDownloadCount(db.DefaultContext, release.RepoID, release.ID)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(0), downloadCount.TarGz)
+
+ // Set the TarGz counter to 1
+ err = repo_model.CountArchiveDownload(db.DefaultContext, release.RepoID, release.ID, git.TARGZ)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(1), downloadCount.TarGz)
+
+ // Set the TarGz counter to 2
+ err = repo_model.CountArchiveDownload(db.DefaultContext, release.RepoID, release.ID, git.TARGZ)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(2), downloadCount.TarGz)
+
+ // Set the Zip counter to 1
+ err = repo_model.CountArchiveDownload(db.DefaultContext, release.RepoID, release.ID, git.ZIP)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), downloadCount.Zip)
+ assert.Equal(t, int64(2), downloadCount.TarGz)
+
+ // Delete the count
+ err = repo_model.DeleteArchiveDownloadCountForRelease(db.DefaultContext, release.ID)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(0), downloadCount.TarGz)
+}
diff --git a/models/repo/archiver.go b/models/repo/archiver.go
new file mode 100644
index 0000000..3f05fcf
--- /dev/null
+++ b/models/repo/archiver.go
@@ -0,0 +1,139 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ArchiverStatus represents repo archive status
+type ArchiverStatus int
+
+// enumerate all repo archive statuses
+const (
+ ArchiverGenerating = iota // the archiver is generating
+ ArchiverReady // it's ready
+)
+
+// RepoArchiver represents all archivers
+type RepoArchiver struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"index unique(s)"`
+ Type git.ArchiveType `xorm:"unique(s)"`
+ Status ArchiverStatus
+ CommitID string `xorm:"VARCHAR(64) unique(s)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL created"`
+ ReleaseID int64 `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoArchiver))
+}
+
+// RelativePath returns the archive path relative to the archive storage root.
+func (archiver *RepoArchiver) RelativePath() string {
+ return fmt.Sprintf("%d/%s/%s.%s", archiver.RepoID, archiver.CommitID[:2], archiver.CommitID, archiver.Type.String())
+}
+
+// repoArchiverForRelativePath takes a relativePath created from (archiver *RepoArchiver) RelativePath() and creates a shell repoArchiver struct representing it
+func repoArchiverForRelativePath(relativePath string) (*RepoArchiver, error) {
+ parts := strings.SplitN(relativePath, "/", 3)
+ if len(parts) != 3 {
+ return nil, util.SilentWrap{Message: fmt.Sprintf("invalid storage path: %s", relativePath), Err: util.ErrInvalidArgument}
+ }
+ repoID, err := strconv.ParseInt(parts[0], 10, 64)
+ if err != nil {
+ return nil, util.SilentWrap{Message: fmt.Sprintf("invalid storage path: %s", relativePath), Err: util.ErrInvalidArgument}
+ }
+ nameExts := strings.SplitN(parts[2], ".", 2)
+ if len(nameExts) != 2 {
+ return nil, util.SilentWrap{Message: fmt.Sprintf("invalid storage path: %s", relativePath), Err: util.ErrInvalidArgument}
+ }
+
+ return &RepoArchiver{
+ RepoID: repoID,
+ CommitID: parts[1] + nameExts[0],
+ Type: git.ToArchiveType(nameExts[1]),
+ }, nil
+}
+
+// GetRepoArchiver get an archiver
+func GetRepoArchiver(ctx context.Context, repoID int64, tp git.ArchiveType, commitID string) (*RepoArchiver, error) {
+ var archiver RepoArchiver
+ has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).And("`type`=?", tp).And("commit_id=?", commitID).Get(&archiver)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return &archiver, nil
+ }
+ return nil, nil
+}
+
+// ExistsRepoArchiverWithStoragePath checks if there is a RepoArchiver for a given storage path
+func ExistsRepoArchiverWithStoragePath(ctx context.Context, storagePath string) (bool, error) {
+ // We need to invert the path provided func (archiver *RepoArchiver) RelativePath() above
+ archiver, err := repoArchiverForRelativePath(storagePath)
+ if err != nil {
+ return false, err
+ }
+
+ return db.GetEngine(ctx).Exist(archiver)
+}
+
+// UpdateRepoArchiverStatus updates archiver's status
+func UpdateRepoArchiverStatus(ctx context.Context, archiver *RepoArchiver) error {
+ _, err := db.GetEngine(ctx).ID(archiver.ID).Cols("status").Update(archiver)
+ return err
+}
+
+// DeleteAllRepoArchives deletes all repo archives records
+func DeleteAllRepoArchives(ctx context.Context) error {
+ // 1=1 to enforce delete all data, otherwise it will delete nothing
+ _, err := db.GetEngine(ctx).Where("1=1").Delete(new(RepoArchiver))
+ return err
+}
+
+// FindRepoArchiversOption represents an archiver options
+type FindRepoArchiversOption struct {
+ db.ListOptions
+ OlderThan time.Duration
+}
+
+func (opts FindRepoArchiversOption) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.OlderThan > 0 {
+ cond = cond.And(builder.Lt{"created_unix": time.Now().Add(-opts.OlderThan).Unix()})
+ }
+ return cond
+}
+
+func (opts FindRepoArchiversOption) ToOrders() string {
+ return "created_unix ASC"
+}
+
+// SetArchiveRepoState sets if a repo is archived
+func SetArchiveRepoState(ctx context.Context, repo *Repository, isArchived bool) (err error) {
+ repo.IsArchived = isArchived
+
+ if isArchived {
+ repo.ArchivedUnix = timeutil.TimeStampNow()
+ } else {
+ repo.ArchivedUnix = timeutil.TimeStamp(0)
+ }
+
+ _, err = db.GetEngine(ctx).ID(repo.ID).Cols("is_archived", "archived_unix").NoAutoTime().Update(repo)
+ return err
+}
diff --git a/models/repo/attachment.go b/models/repo/attachment.go
new file mode 100644
index 0000000..128bceb
--- /dev/null
+++ b/models/repo/attachment.go
@@ -0,0 +1,287 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "path"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+// Attachment represent a attachment of issue/comment/release.
+type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ RepoID int64 `xorm:"INDEX"` // this should not be zero
+ IssueID int64 `xorm:"INDEX"` // maybe zero when creating
+ ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating
+ UploaderID int64 `xorm:"INDEX DEFAULT 0"` // Notice: will be zero before this column added
+ CommentID int64 `xorm:"INDEX"`
+ Name string
+ DownloadCount int64 `xorm:"DEFAULT 0"`
+ Size int64 `xorm:"DEFAULT 0"`
+ NoAutoTime bool `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ CustomDownloadURL string `xorm:"-"`
+ ExternalURL string
+}
+
+func init() {
+ db.RegisterModel(new(Attachment))
+}
+
+// IncreaseDownloadCount is update download count + 1
+func (a *Attachment) IncreaseDownloadCount(ctx context.Context) error {
+ // Update download count.
+ if _, err := db.GetEngine(ctx).Exec("UPDATE `attachment` SET download_count=download_count+1 WHERE id=?", a.ID); err != nil {
+ return fmt.Errorf("increase attachment count: %w", err)
+ }
+
+ return nil
+}
+
+// AttachmentRelativePath returns the relative path
+func AttachmentRelativePath(uuid string) string {
+ return path.Join(uuid[0:1], uuid[1:2], uuid)
+}
+
+// RelativePath returns the relative path of the attachment
+func (a *Attachment) RelativePath() string {
+ return AttachmentRelativePath(a.UUID)
+}
+
+// DownloadURL returns the download url of the attached file
+func (a *Attachment) DownloadURL() string {
+ if a.ExternalURL != "" {
+ return a.ExternalURL
+ }
+
+ if a.CustomDownloadURL != "" {
+ return a.CustomDownloadURL
+ }
+
+ return setting.AppURL + "attachments/" + url.PathEscape(a.UUID)
+}
+
+// ErrAttachmentNotExist represents a "AttachmentNotExist" kind of error.
+type ErrAttachmentNotExist struct {
+ ID int64
+ UUID string
+}
+
+// IsErrAttachmentNotExist checks if an error is a ErrAttachmentNotExist.
+func IsErrAttachmentNotExist(err error) bool {
+ _, ok := err.(ErrAttachmentNotExist)
+ return ok
+}
+
+func (err ErrAttachmentNotExist) Error() string {
+ return fmt.Sprintf("attachment does not exist [id: %d, uuid: %s]", err.ID, err.UUID)
+}
+
+func (err ErrAttachmentNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+type ErrInvalidExternalURL struct {
+ ExternalURL string
+}
+
+func IsErrInvalidExternalURL(err error) bool {
+ _, ok := err.(ErrInvalidExternalURL)
+ return ok
+}
+
+func (err ErrInvalidExternalURL) Error() string {
+ return fmt.Sprintf("invalid external URL: '%s'", err.ExternalURL)
+}
+
+func (err ErrInvalidExternalURL) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// GetAttachmentByID returns attachment by given id
+func GetAttachmentByID(ctx context.Context, id int64) (*Attachment, error) {
+ attach := &Attachment{}
+ if has, err := db.GetEngine(ctx).ID(id).Get(attach); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrAttachmentNotExist{ID: id, UUID: ""}
+ }
+ return attach, nil
+}
+
+// GetAttachmentByUUID returns attachment by given UUID.
+func GetAttachmentByUUID(ctx context.Context, uuid string) (*Attachment, error) {
+ attach := &Attachment{}
+ has, err := db.GetEngine(ctx).Where("uuid=?", uuid).Get(attach)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrAttachmentNotExist{0, uuid}
+ }
+ return attach, nil
+}
+
+// GetAttachmentsByUUIDs returns attachment by given UUID list.
+func GetAttachmentsByUUIDs(ctx context.Context, uuids []string) ([]*Attachment, error) {
+ if len(uuids) == 0 {
+ return []*Attachment{}, nil
+ }
+
+ // Silently drop invalid uuids.
+ attachments := make([]*Attachment, 0, len(uuids))
+ return attachments, db.GetEngine(ctx).In("uuid", uuids).Find(&attachments)
+}
+
+// ExistAttachmentsByUUID returns true if attachment exists with the given UUID
+func ExistAttachmentsByUUID(ctx context.Context, uuid string) (bool, error) {
+ return db.GetEngine(ctx).Where("`uuid`=?", uuid).Exist(new(Attachment))
+}
+
+// GetAttachmentsByIssueID returns all attachments of an issue.
+func GetAttachmentsByIssueID(ctx context.Context, issueID int64) ([]*Attachment, error) {
+ attachments := make([]*Attachment, 0, 10)
+ return attachments, db.GetEngine(ctx).Where("issue_id = ? AND comment_id = 0", issueID).Find(&attachments)
+}
+
+// GetAttachmentsByIssueIDImagesLatest returns the latest image attachments of an issue.
+func GetAttachmentsByIssueIDImagesLatest(ctx context.Context, issueID int64) ([]*Attachment, error) {
+ attachments := make([]*Attachment, 0, 5)
+ return attachments, db.GetEngine(ctx).Where(`issue_id = ? AND (name like '%.apng'
+ OR name like '%.avif'
+ OR name like '%.bmp'
+ OR name like '%.gif'
+ OR name like '%.jpg'
+ OR name like '%.jpeg'
+ OR name like '%.jxl'
+ OR name like '%.png'
+ OR name like '%.svg'
+ OR name like '%.webp')`, issueID).Desc("comment_id").Limit(5).Find(&attachments)
+}
+
+// GetAttachmentsByCommentID returns all attachments if comment by given ID.
+func GetAttachmentsByCommentID(ctx context.Context, commentID int64) ([]*Attachment, error) {
+ attachments := make([]*Attachment, 0, 10)
+ return attachments, db.GetEngine(ctx).Where("comment_id=?", commentID).Find(&attachments)
+}
+
+// GetAttachmentByReleaseIDFileName returns attachment by given releaseId and fileName.
+func GetAttachmentByReleaseIDFileName(ctx context.Context, releaseID int64, fileName string) (*Attachment, error) {
+ attach := &Attachment{ReleaseID: releaseID, Name: fileName}
+ has, err := db.GetEngine(ctx).Get(attach)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, err
+ }
+ return attach, nil
+}
+
+// DeleteAttachment deletes the given attachment and optionally the associated file.
+func DeleteAttachment(ctx context.Context, a *Attachment, remove bool) error {
+ _, err := DeleteAttachments(ctx, []*Attachment{a}, remove)
+ return err
+}
+
+// DeleteAttachments deletes the given attachments and optionally the associated files.
+func DeleteAttachments(ctx context.Context, attachments []*Attachment, remove bool) (int, error) {
+ if len(attachments) == 0 {
+ return 0, nil
+ }
+
+ ids := make([]int64, 0, len(attachments))
+ for _, a := range attachments {
+ ids = append(ids, a.ID)
+ }
+
+ cnt, err := db.GetEngine(ctx).In("id", ids).NoAutoCondition().Delete(attachments[0])
+ if err != nil {
+ return 0, err
+ }
+
+ if remove {
+ for i, a := range attachments {
+ if err := storage.Attachments.Delete(a.RelativePath()); err != nil {
+ return i, err
+ }
+ }
+ }
+ return int(cnt), nil
+}
+
+// DeleteAttachmentsByIssue deletes all attachments associated with the given issue.
+func DeleteAttachmentsByIssue(ctx context.Context, issueID int64, remove bool) (int, error) {
+ attachments, err := GetAttachmentsByIssueID(ctx, issueID)
+ if err != nil {
+ return 0, err
+ }
+
+ return DeleteAttachments(ctx, attachments, remove)
+}
+
+// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
+func DeleteAttachmentsByComment(ctx context.Context, commentID int64, remove bool) (int, error) {
+ attachments, err := GetAttachmentsByCommentID(ctx, commentID)
+ if err != nil {
+ return 0, err
+ }
+
+ return DeleteAttachments(ctx, attachments, remove)
+}
+
+// UpdateAttachmentByUUID Updates attachment via uuid
+func UpdateAttachmentByUUID(ctx context.Context, attach *Attachment, cols ...string) error {
+ if attach.UUID == "" {
+ return fmt.Errorf("attachment uuid should be not blank")
+ }
+ if attach.ExternalURL != "" && !validation.IsValidExternalURL(attach.ExternalURL) {
+ return ErrInvalidExternalURL{ExternalURL: attach.ExternalURL}
+ }
+ _, err := db.GetEngine(ctx).Where("uuid=?", attach.UUID).Cols(cols...).Update(attach)
+ return err
+}
+
+// UpdateAttachment updates the given attachment in database
+func UpdateAttachment(ctx context.Context, atta *Attachment) error {
+ if atta.ExternalURL != "" && !validation.IsValidExternalURL(atta.ExternalURL) {
+ return ErrInvalidExternalURL{ExternalURL: atta.ExternalURL}
+ }
+ sess := db.GetEngine(ctx).Cols("name", "issue_id", "release_id", "comment_id", "download_count")
+ if atta.ID != 0 && atta.UUID == "" {
+ sess = sess.ID(atta.ID)
+ } else {
+ // Use uuid only if id is not set and uuid is set
+ sess = sess.Where("uuid = ?", atta.UUID)
+ }
+ _, err := sess.Update(atta)
+ return err
+}
+
+// DeleteAttachmentsByRelease deletes all attachments associated with the given release.
+func DeleteAttachmentsByRelease(ctx context.Context, releaseID int64) error {
+ _, err := db.GetEngine(ctx).Where("release_id = ?", releaseID).Delete(&Attachment{})
+ return err
+}
+
+// CountOrphanedAttachments returns the number of bad attachments
+func CountOrphanedAttachments(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("(issue_id > 0 and issue_id not in (select id from issue)) or (release_id > 0 and release_id not in (select id from `release`))").
+ Count(new(Attachment))
+}
+
+// DeleteOrphanedAttachments delete all bad attachments
+func DeleteOrphanedAttachments(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).Where("(issue_id > 0 and issue_id not in (select id from issue)) or (release_id > 0 and release_id not in (select id from `release`))").
+ Delete(new(Attachment))
+ return err
+}
diff --git a/models/repo/attachment_test.go b/models/repo/attachment_test.go
new file mode 100644
index 0000000..23945ba
--- /dev/null
+++ b/models/repo/attachment_test.go
@@ -0,0 +1,105 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIncreaseDownloadCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attachment, err := repo_model.GetAttachmentByUUID(db.DefaultContext, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), attachment.DownloadCount)
+
+ // increase download count
+ err = attachment.IncreaseDownloadCount(db.DefaultContext)
+ require.NoError(t, err)
+
+ attachment, err = repo_model.GetAttachmentByUUID(db.DefaultContext, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), attachment.DownloadCount)
+}
+
+func TestGetByCommentOrIssueID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // count of attachments from issue ID
+ attachments, err := repo_model.GetAttachmentsByIssueID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Len(t, attachments, 1)
+
+ attachments, err = repo_model.GetAttachmentsByCommentID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Len(t, attachments, 2)
+}
+
+func TestDeleteAttachments(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := repo_model.DeleteAttachmentsByIssue(db.DefaultContext, 4, false)
+ require.NoError(t, err)
+ assert.Equal(t, 2, count)
+
+ count, err = repo_model.DeleteAttachmentsByComment(db.DefaultContext, 2, false)
+ require.NoError(t, err)
+ assert.Equal(t, 2, count)
+
+ err = repo_model.DeleteAttachment(db.DefaultContext, &repo_model.Attachment{ID: 8}, false)
+ require.NoError(t, err)
+
+ attachment, err := repo_model.GetAttachmentByUUID(db.DefaultContext, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18")
+ require.Error(t, err)
+ assert.True(t, repo_model.IsErrAttachmentNotExist(err))
+ assert.Nil(t, attachment)
+}
+
+func TestGetAttachmentByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attach, err := repo_model.GetAttachmentByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attach.UUID)
+}
+
+func TestAttachment_DownloadURL(t *testing.T) {
+ attach := &repo_model.Attachment{
+ UUID: "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11",
+ ID: 1,
+ }
+ assert.Equal(t, "https://try.gitea.io/attachments/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attach.DownloadURL())
+}
+
+func TestUpdateAttachment(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attach, err := repo_model.GetAttachmentByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attach.UUID)
+
+ attach.Name = "new_name"
+ require.NoError(t, repo_model.UpdateAttachment(db.DefaultContext, attach))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Attachment{Name: "new_name"})
+}
+
+func TestGetAttachmentsByUUIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attachList, err := repo_model.GetAttachmentsByUUIDs(db.DefaultContext, []string{"a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17", "not-existing-uuid"})
+ require.NoError(t, err)
+ assert.Len(t, attachList, 2)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attachList[0].UUID)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17", attachList[1].UUID)
+ assert.Equal(t, int64(1), attachList[0].IssueID)
+ assert.Equal(t, int64(5), attachList[1].IssueID)
+}
diff --git a/models/repo/avatar.go b/models/repo/avatar.go
new file mode 100644
index 0000000..72ee938
--- /dev/null
+++ b/models/repo/avatar.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "image/png"
+ "io"
+ "net/url"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/avatar"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+)
+
+// CustomAvatarRelativePath returns repository custom avatar file path.
+func (repo *Repository) CustomAvatarRelativePath() string {
+ return repo.Avatar
+}
+
+// ExistsWithAvatarAtStoragePath returns true if there is a user with this Avatar
+func ExistsWithAvatarAtStoragePath(ctx context.Context, storagePath string) (bool, error) {
+ // See func (repo *Repository) CustomAvatarRelativePath()
+ // repo.Avatar is used directly as the storage path - therefore we can check for existence directly using the path
+ return db.GetEngine(ctx).Where("`avatar`=?", storagePath).Exist(new(Repository))
+}
+
+// RelAvatarLink returns a relative link to the repository's avatar.
+func (repo *Repository) RelAvatarLink(ctx context.Context) string {
+ return repo.relAvatarLink(ctx)
+}
+
+// generateRandomAvatar generates a random avatar for repository.
+func generateRandomAvatar(ctx context.Context, repo *Repository) error {
+ idToString := fmt.Sprintf("%d", repo.ID)
+
+ seed := idToString
+ img, err := avatar.RandomImage([]byte(seed))
+ if err != nil {
+ return fmt.Errorf("RandomImage: %w", err)
+ }
+
+ repo.Avatar = idToString
+
+ if err := storage.SaveFrom(storage.RepoAvatars, repo.CustomAvatarRelativePath(), func(w io.Writer) error {
+ if err := png.Encode(w, img); err != nil {
+ log.Error("Encode: %v", err)
+ }
+ return err
+ }); err != nil {
+ return fmt.Errorf("Failed to create dir %s: %w", repo.CustomAvatarRelativePath(), err)
+ }
+
+ log.Info("New random avatar created for repository: %d", repo.ID)
+
+ if _, err := db.GetEngine(ctx).ID(repo.ID).Cols("avatar").NoAutoTime().Update(repo); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (repo *Repository) relAvatarLink(ctx context.Context) string {
+ // If no avatar - path is empty
+ avatarPath := repo.CustomAvatarRelativePath()
+ if len(avatarPath) == 0 {
+ switch mode := setting.RepoAvatar.Fallback; mode {
+ case "image":
+ return setting.RepoAvatar.FallbackImage
+ case "random":
+ if err := generateRandomAvatar(ctx, repo); err != nil {
+ log.Error("generateRandomAvatar: %v", err)
+ }
+ default:
+ // default behaviour: do not display avatar
+ return ""
+ }
+ }
+ return setting.AppSubURL + "/repo-avatars/" + url.PathEscape(repo.Avatar)
+}
+
+// AvatarLink returns a link to the repository's avatar.
+func (repo *Repository) AvatarLink(ctx context.Context) string {
+ link := repo.relAvatarLink(ctx)
+ // we only prepend our AppURL to our known (relative, internal) avatar link to get an absolute URL
+ if strings.HasPrefix(link, "/") && !strings.HasPrefix(link, "//") {
+ return setting.AppURL + strings.TrimPrefix(link, setting.AppSubURL)[1:]
+ }
+ // otherwise, return the link as it is
+ return link
+}
diff --git a/models/repo/collaboration.go b/models/repo/collaboration.go
new file mode 100644
index 0000000..cb66cb5
--- /dev/null
+++ b/models/repo/collaboration.go
@@ -0,0 +1,170 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+// Collaboration represent the relation between an individual and a repository.
+type Collaboration struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Mode perm.AccessMode `xorm:"DEFAULT 2 NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(Collaboration))
+}
+
+// Collaborator represents a user with collaboration details.
+type Collaborator struct {
+ *user_model.User
+ Collaboration *Collaboration
+}
+
+// GetCollaborators returns the collaborators for a repository
+func GetCollaborators(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*Collaborator, error) {
+ collaborations, err := db.Find[Collaboration](ctx, FindCollaborationOptions{
+ ListOptions: listOptions,
+ RepoID: repoID,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("db.Find[Collaboration]: %w", err)
+ }
+
+ collaborators := make([]*Collaborator, 0, len(collaborations))
+ userIDs := make([]int64, 0, len(collaborations))
+ for _, c := range collaborations {
+ userIDs = append(userIDs, c.UserID)
+ }
+
+ usersMap := make(map[int64]*user_model.User)
+ if err := db.GetEngine(ctx).In("id", userIDs).Find(&usersMap); err != nil {
+ return nil, fmt.Errorf("Find users map by user ids: %w", err)
+ }
+
+ for _, c := range collaborations {
+ u := usersMap[c.UserID]
+ if u == nil {
+ u = user_model.NewGhostUser()
+ }
+ collaborators = append(collaborators, &Collaborator{
+ User: u,
+ Collaboration: c,
+ })
+ }
+ return collaborators, nil
+}
+
+// GetCollaboration get collaboration for a repository id with a user id
+func GetCollaboration(ctx context.Context, repoID, uid int64) (*Collaboration, error) {
+ collaboration := &Collaboration{
+ RepoID: repoID,
+ UserID: uid,
+ }
+ has, err := db.GetEngine(ctx).Get(collaboration)
+ if !has {
+ collaboration = nil
+ }
+ return collaboration, err
+}
+
+// IsCollaborator check if a user is a collaborator of a repository
+func IsCollaborator(ctx context.Context, repoID, userID int64) (bool, error) {
+ return db.GetEngine(ctx).Get(&Collaboration{RepoID: repoID, UserID: userID})
+}
+
+type FindCollaborationOptions struct {
+ db.ListOptions
+ RepoID int64
+}
+
+func (opts FindCollaborationOptions) ToConds() builder.Cond {
+ return builder.And(builder.Eq{"repo_id": opts.RepoID})
+}
+
+// ChangeCollaborationAccessMode sets new access mode for the collaboration.
+func ChangeCollaborationAccessMode(ctx context.Context, repo *Repository, uid int64, mode perm.AccessMode) error {
+ // Discard invalid input
+ if mode <= perm.AccessModeNone || mode > perm.AccessModeOwner {
+ return nil
+ }
+
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ e := db.GetEngine(ctx)
+
+ collaboration := &Collaboration{
+ RepoID: repo.ID,
+ UserID: uid,
+ }
+ has, err := e.Get(collaboration)
+ if err != nil {
+ return fmt.Errorf("get collaboration: %w", err)
+ } else if !has {
+ return nil
+ }
+
+ if collaboration.Mode == mode {
+ return nil
+ }
+ collaboration.Mode = mode
+
+ if _, err = e.
+ ID(collaboration.ID).
+ Cols("mode").
+ Update(collaboration); err != nil {
+ return fmt.Errorf("update collaboration: %w", err)
+ } else if _, err = e.Exec("UPDATE access SET mode = ? WHERE user_id = ? AND repo_id = ?", mode, uid, repo.ID); err != nil {
+ return fmt.Errorf("update access table: %w", err)
+ }
+
+ return nil
+ })
+}
+
+// GetCollaboratorWithUser returns all collaborator IDs of collabUserID on
+// repositories of ownerID.
+func GetCollaboratorWithUser(ctx context.Context, ownerID, collabUserID int64) ([]int64, error) {
+ collabsID := make([]int64, 0, 8)
+ err := db.GetEngine(ctx).Table("collaboration").Select("collaboration.`id`").
+ Join("INNER", "repository", "repository.id = collaboration.repo_id").
+ Where("repository.`owner_id` = ?", ownerID).
+ And("collaboration.`user_id` = ?", collabUserID).
+ Find(&collabsID)
+
+ return collabsID, err
+}
+
+// IsOwnerMemberCollaborator checks if a provided user is the owner, a collaborator or a member of a team in a repository
+func IsOwnerMemberCollaborator(ctx context.Context, repo *Repository, userID int64) (bool, error) {
+ if repo.OwnerID == userID {
+ return true, nil
+ }
+ teamMember, err := db.GetEngine(ctx).Join("INNER", "team_repo", "team_repo.team_id = team_user.team_id").
+ Join("INNER", "team_unit", "team_unit.team_id = team_user.team_id").
+ Where("team_repo.repo_id = ?", repo.ID).
+ And("team_unit.`type` = ?", unit.TypeCode).
+ And("team_user.uid = ?", userID).Table("team_user").Exist()
+ if err != nil {
+ return false, err
+ }
+ if teamMember {
+ return true, nil
+ }
+
+ return db.GetEngine(ctx).Get(&Collaboration{RepoID: repo.ID, UserID: userID})
+}
diff --git a/models/repo/collaboration_test.go b/models/repo/collaboration_test.go
new file mode 100644
index 0000000..5adedfe
--- /dev/null
+++ b/models/repo/collaboration_test.go
@@ -0,0 +1,186 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepository_GetCollaborators(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(repoID int64) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ collaborators, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{})
+ require.NoError(t, err)
+ expectedLen, err := db.GetEngine(db.DefaultContext).Count(&repo_model.Collaboration{RepoID: repoID})
+ require.NoError(t, err)
+ assert.Len(t, collaborators, int(expectedLen))
+ for _, collaborator := range collaborators {
+ assert.EqualValues(t, collaborator.User.ID, collaborator.Collaboration.UserID)
+ assert.EqualValues(t, repoID, collaborator.Collaboration.RepoID)
+ }
+ }
+ test(1)
+ test(2)
+ test(3)
+ test(4)
+
+ // Test db.ListOptions
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 22})
+
+ collaborators1, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{PageSize: 1, Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, collaborators1, 1)
+
+ collaborators2, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{PageSize: 1, Page: 2})
+ require.NoError(t, err)
+ assert.Len(t, collaborators2, 1)
+
+ assert.NotEqualValues(t, collaborators1[0].ID, collaborators2[0].ID)
+}
+
+func TestRepository_IsCollaborator(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(repoID, userID int64, expected bool) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ actual, err := repo_model.IsCollaborator(db.DefaultContext, repo.ID, userID)
+ require.NoError(t, err)
+ assert.Equal(t, expected, actual)
+ }
+ test(3, 2, true)
+ test(3, unittest.NonexistentID, false)
+ test(4, 2, false)
+ test(4, 4, true)
+}
+
+func TestRepository_ChangeCollaborationAccessMode(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessModeAdmin))
+
+ collaboration := unittest.AssertExistsAndLoadBean(t, &repo_model.Collaboration{RepoID: repo.ID, UserID: 4})
+ assert.EqualValues(t, perm.AccessModeAdmin, collaboration.Mode)
+
+ access := unittest.AssertExistsAndLoadBean(t, &access_model.Access{UserID: 4, RepoID: repo.ID})
+ assert.EqualValues(t, perm.AccessModeAdmin, access.Mode)
+
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessModeAdmin))
+
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, unittest.NonexistentID, perm.AccessModeAdmin))
+
+ // Disvard invalid input.
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessMode(unittest.NonexistentID)))
+
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repo.ID})
+}
+
+func TestRepository_CountCollaborators(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ count, err := db.Count[repo_model.Collaboration](db.DefaultContext, repo_model.FindCollaborationOptions{
+ RepoID: repo1.ID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, count)
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 22})
+ count, err = db.Count[repo_model.Collaboration](db.DefaultContext, repo_model.FindCollaborationOptions{
+ RepoID: repo2.ID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, count)
+
+ // Non-existent repository.
+ count, err = db.Count[repo_model.Collaboration](db.DefaultContext, repo_model.FindCollaborationOptions{
+ RepoID: unittest.NonexistentID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+}
+
+func TestRepository_IsOwnerMemberCollaborator(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+
+ // Organisation owner.
+ actual, err := repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo1, 2)
+ require.NoError(t, err)
+ assert.True(t, actual)
+
+ // Team member.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo1, 4)
+ require.NoError(t, err)
+ assert.True(t, actual)
+
+ // Normal user.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo1, 1)
+ require.NoError(t, err)
+ assert.False(t, actual)
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+
+ // Collaborator.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo2, 4)
+ require.NoError(t, err)
+ assert.True(t, actual)
+
+ repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 15})
+
+ // Repository owner.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo3, 2)
+ require.NoError(t, err)
+ assert.True(t, actual)
+}
+
+func TestRepo_GetCollaboration(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+
+ // Existing collaboration.
+ collab, err := repo_model.GetCollaboration(db.DefaultContext, repo.ID, 4)
+ require.NoError(t, err)
+ assert.NotNil(t, collab)
+ assert.EqualValues(t, 4, collab.UserID)
+ assert.EqualValues(t, 4, collab.RepoID)
+
+ // Non-existing collaboration.
+ collab, err = repo_model.GetCollaboration(db.DefaultContext, repo.ID, 1)
+ require.NoError(t, err)
+ assert.Nil(t, collab)
+}
+
+func TestGetCollaboratorWithUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user16 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 16})
+ user15 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 15})
+ user18 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 18})
+
+ collabs, err := repo_model.GetCollaboratorWithUser(db.DefaultContext, user16.ID, user15.ID)
+ require.NoError(t, err)
+ assert.Len(t, collabs, 2)
+ assert.EqualValues(t, 5, collabs[0])
+ assert.EqualValues(t, 7, collabs[1])
+
+ collabs, err = repo_model.GetCollaboratorWithUser(db.DefaultContext, user16.ID, user18.ID)
+ require.NoError(t, err)
+ assert.Len(t, collabs, 2)
+ assert.EqualValues(t, 6, collabs[0])
+ assert.EqualValues(t, 8, collabs[1])
+}
diff --git a/models/repo/following_repo.go b/models/repo/following_repo.go
new file mode 100644
index 0000000..85b96aa
--- /dev/null
+++ b/models/repo/following_repo.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "code.gitea.io/gitea/modules/validation"
+)
+
+// FollowingRepo represents a federated Repository Actor connected with a local Repo
+type FollowingRepo struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ ExternalID string `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ FederationHostID int64 `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ URI string
+}
+
+func NewFollowingRepo(repoID int64, externalID string, federationHostID int64, uri string) (FollowingRepo, error) {
+ result := FollowingRepo{
+ RepoID: repoID,
+ ExternalID: externalID,
+ FederationHostID: federationHostID,
+ URI: uri,
+ }
+ if valid, err := validation.IsValid(result); !valid {
+ return FollowingRepo{}, err
+ }
+ return result, nil
+}
+
+func (user FollowingRepo) Validate() []string {
+ var result []string
+ result = append(result, validation.ValidateNotEmpty(user.RepoID, "UserID")...)
+ result = append(result, validation.ValidateNotEmpty(user.ExternalID, "ExternalID")...)
+ result = append(result, validation.ValidateNotEmpty(user.FederationHostID, "FederationHostID")...)
+ result = append(result, validation.ValidateNotEmpty(user.URI, "Uri")...)
+ return result
+}
diff --git a/models/repo/following_repo_test.go b/models/repo/following_repo_test.go
new file mode 100644
index 0000000..d0dd0a3
--- /dev/null
+++ b/models/repo/following_repo_test.go
@@ -0,0 +1,31 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func Test_FollowingRepoValidation(t *testing.T) {
+ sut := FollowingRepo{
+ RepoID: 12,
+ ExternalID: "12",
+ FederationHostID: 1,
+ URI: "http://localhost:3000/api/v1/activitypub/repo-id/1",
+ }
+ if res, err := validation.IsValid(sut); !res {
+ t.Errorf("sut should be valid but was %q", err)
+ }
+
+ sut = FollowingRepo{
+ ExternalID: "12",
+ FederationHostID: 1,
+ URI: "http://localhost:3000/api/v1/activitypub/repo-id/1",
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid")
+ }
+}
diff --git a/models/repo/fork.go b/models/repo/fork.go
new file mode 100644
index 0000000..632e91c
--- /dev/null
+++ b/models/repo/fork.go
@@ -0,0 +1,120 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "xorm.io/builder"
+)
+
+// GetRepositoriesByForkID returns all repositories with given fork ID.
+func GetRepositoriesByForkID(ctx context.Context, forkID int64) ([]*Repository, error) {
+ repos := make([]*Repository, 0, 10)
+ return repos, db.GetEngine(ctx).
+ Where("fork_id=?", forkID).
+ Find(&repos)
+}
+
+// GetForkedRepo checks if given user has already forked a repository with given ID.
+func GetForkedRepo(ctx context.Context, ownerID, repoID int64) *Repository {
+ repo := new(Repository)
+ has, _ := db.GetEngine(ctx).
+ Where("owner_id=? AND fork_id=?", ownerID, repoID).
+ Get(repo)
+ if has {
+ return repo
+ }
+ return nil
+}
+
+// HasForkedRepo checks if given user has already forked a repository with given ID.
+func HasForkedRepo(ctx context.Context, ownerID, repoID int64) bool {
+ has, _ := db.GetEngine(ctx).
+ Table("repository").
+ Where("owner_id=? AND fork_id=?", ownerID, repoID).
+ Exist()
+ return has
+}
+
+// GetUserFork return user forked repository from this repository, if not forked return nil
+func GetUserFork(ctx context.Context, repoID, userID int64) (*Repository, error) {
+ var forkedRepo Repository
+ has, err := db.GetEngine(ctx).Where("fork_id = ?", repoID).And("owner_id = ?", userID).Get(&forkedRepo)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return &forkedRepo, nil
+}
+
+// GetForks returns all the forks of the repository that are visible to the user.
+func GetForks(ctx context.Context, repo *Repository, user *user_model.User, listOptions db.ListOptions) ([]*Repository, int64, error) {
+ sess := db.GetEngine(ctx).Where(AccessibleRepositoryCondition(user, unit.TypeInvalid))
+
+ var forks []*Repository
+ if listOptions.Page == 0 {
+ forks = make([]*Repository, 0, repo.NumForks)
+ } else {
+ forks = make([]*Repository, 0, listOptions.PageSize)
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+
+ count, err := sess.FindAndCount(&forks, &Repository{ForkID: repo.ID})
+ return forks, count, err
+}
+
+// IncrementRepoForkNum increment repository fork number
+func IncrementRepoForkNum(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", repoID)
+ return err
+}
+
+// DecrementRepoForkNum decrement repository fork number
+func DecrementRepoForkNum(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repoID)
+ return err
+}
+
+// FindUserOrgForks returns the forked repositories for one user from a repository
+func FindUserOrgForks(ctx context.Context, repoID, userID int64) ([]*Repository, error) {
+ cond := builder.And(
+ builder.Eq{"fork_id": repoID},
+ builder.In("owner_id",
+ builder.Select("org_id").
+ From("org_user").
+ Where(builder.Eq{"uid": userID}),
+ ),
+ )
+
+ var repos []*Repository
+ return repos, db.GetEngine(ctx).Table("repository").Where(cond).Find(&repos)
+}
+
+// GetForksByUserAndOrgs return forked repos of the user and owned orgs
+func GetForksByUserAndOrgs(ctx context.Context, user *user_model.User, repo *Repository) ([]*Repository, error) {
+ var repoList []*Repository
+ if user == nil {
+ return repoList, nil
+ }
+ forkedRepo, err := GetUserFork(ctx, repo.ID, user.ID)
+ if err != nil {
+ return repoList, err
+ }
+ if forkedRepo != nil {
+ repoList = append(repoList, forkedRepo)
+ }
+ orgForks, err := FindUserOrgForks(ctx, repo.ID, user.ID)
+ if err != nil {
+ return nil, err
+ }
+ repoList = append(repoList, orgForks...)
+ return repoList, nil
+}
diff --git a/models/repo/fork_test.go b/models/repo/fork_test.go
new file mode 100644
index 0000000..dd12429
--- /dev/null
+++ b/models/repo/fork_test.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetUserFork(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // User13 has repo 11 forked from repo10
+ repo, err := repo_model.GetRepositoryByID(db.DefaultContext, 10)
+ require.NoError(t, err)
+ assert.NotNil(t, repo)
+ repo, err = repo_model.GetUserFork(db.DefaultContext, repo.ID, 13)
+ require.NoError(t, err)
+ assert.NotNil(t, repo)
+
+ repo, err = repo_model.GetRepositoryByID(db.DefaultContext, 9)
+ require.NoError(t, err)
+ assert.NotNil(t, repo)
+ repo, err = repo_model.GetUserFork(db.DefaultContext, repo.ID, 13)
+ require.NoError(t, err)
+ assert.Nil(t, repo)
+}
diff --git a/models/repo/git.go b/models/repo/git.go
new file mode 100644
index 0000000..388bf86
--- /dev/null
+++ b/models/repo/git.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// MergeStyle represents the approach to merge commits into base branch.
+type MergeStyle string
+
+const (
+ // MergeStyleMerge create merge commit
+ MergeStyleMerge MergeStyle = "merge"
+ // MergeStyleRebase rebase before merging, and fast-forward
+ MergeStyleRebase MergeStyle = "rebase"
+ // MergeStyleRebaseMerge rebase before merging with merge commit (--no-ff)
+ MergeStyleRebaseMerge MergeStyle = "rebase-merge"
+ // MergeStyleSquash squash commits into single commit before merging
+ MergeStyleSquash MergeStyle = "squash"
+ // MergeStyleFastForwardOnly fast-forward merge if possible, otherwise fail
+ MergeStyleFastForwardOnly MergeStyle = "fast-forward-only"
+ // MergeStyleManuallyMerged pr has been merged manually, just mark it as merged directly
+ MergeStyleManuallyMerged MergeStyle = "manually-merged"
+ // MergeStyleRebaseUpdate not a merge style, used to update pull head by rebase
+ MergeStyleRebaseUpdate MergeStyle = "rebase-update-only"
+)
+
+// UpdateDefaultBranch updates the default branch
+func UpdateDefaultBranch(ctx context.Context, repo *Repository) error {
+ _, err := db.GetEngine(ctx).ID(repo.ID).Cols("default_branch").Update(repo)
+ return err
+}
diff --git a/models/repo/issue.go b/models/repo/issue.go
new file mode 100644
index 0000000..0dd4fd5
--- /dev/null
+++ b/models/repo/issue.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// ___________.__ ___________ __
+// \__ ___/|__| _____ ___\__ ___/___________ ____ | | __ ___________
+// | | | |/ \_/ __ \| | \_ __ \__ \ _/ ___\| |/ // __ \_ __ \
+// | | | | Y Y \ ___/| | | | \// __ \\ \___| <\ ___/| | \/
+// |____| |__|__|_| /\___ >____| |__| (____ /\___ >__|_ \\___ >__|
+// \/ \/ \/ \/ \/ \/
+
+// CanEnableTimetracker returns true when the server admin enabled time tracking
+// This overrules IsTimetrackerEnabled
+func (repo *Repository) CanEnableTimetracker() bool {
+ return setting.Service.EnableTimetracking
+}
+
+// IsTimetrackerEnabled returns whether or not the timetracker is enabled. It returns the default value from config if an error occurs.
+func (repo *Repository) IsTimetrackerEnabled(ctx context.Context) bool {
+ if !setting.Service.EnableTimetracking {
+ return false
+ }
+
+ var u *RepoUnit
+ var err error
+ if u, err = repo.GetUnit(ctx, unit.TypeIssues); err != nil {
+ return setting.Service.DefaultEnableTimetracking
+ }
+ return u.IssuesConfig().EnableTimetracker
+}
+
+// AllowOnlyContributorsToTrackTime returns value of IssuesConfig or the default value
+func (repo *Repository) AllowOnlyContributorsToTrackTime(ctx context.Context) bool {
+ var u *RepoUnit
+ var err error
+ if u, err = repo.GetUnit(ctx, unit.TypeIssues); err != nil {
+ return setting.Service.DefaultAllowOnlyContributorsToTrackTime
+ }
+ return u.IssuesConfig().AllowOnlyContributorsToTrackTime
+}
+
+// IsDependenciesEnabled returns if dependencies are enabled and returns the default setting if not set.
+func (repo *Repository) IsDependenciesEnabled(ctx context.Context) bool {
+ var u *RepoUnit
+ var err error
+ if u, err = repo.GetUnit(ctx, unit.TypeIssues); err != nil {
+ log.Trace("IsDependenciesEnabled: %v", err)
+ return setting.Service.DefaultEnableDependencies
+ }
+ return u.IssuesConfig().EnableDependencies
+}
diff --git a/models/repo/language_stats.go b/models/repo/language_stats.go
new file mode 100644
index 0000000..0bc0f1f
--- /dev/null
+++ b/models/repo/language_stats.go
@@ -0,0 +1,242 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "math"
+ "sort"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/go-enry/go-enry/v2"
+)
+
+// LanguageStat describes language statistics of a repository
+type LanguageStat struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CommitID string
+ IsPrimary bool
+ Language string `xorm:"VARCHAR(50) UNIQUE(s) INDEX NOT NULL"`
+ Percentage float32 `xorm:"-"`
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ Color string `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+}
+
+func init() {
+ db.RegisterModel(new(LanguageStat))
+}
+
+// LanguageStatList defines a list of language statistics
+type LanguageStatList []*LanguageStat
+
+// LoadAttributes loads attributes
+func (stats LanguageStatList) LoadAttributes() {
+ for i := range stats {
+ stats[i].Color = enry.GetColor(stats[i].Language)
+ }
+}
+
+func (stats LanguageStatList) getLanguagePercentages() map[string]float32 {
+ langPerc := make(map[string]float32)
+ var otherPerc float32
+ var total int64
+
+ for _, stat := range stats {
+ total += stat.Size
+ }
+ if total > 0 {
+ for _, stat := range stats {
+ perc := float32(float64(stat.Size) / float64(total) * 100)
+ if perc <= 0.1 {
+ otherPerc += perc
+ continue
+ }
+ langPerc[stat.Language] = perc
+ }
+ }
+ if otherPerc > 0 {
+ langPerc["other"] = otherPerc
+ }
+ roundByLargestRemainder(langPerc, 100)
+ return langPerc
+}
+
+// Rounds to 1 decimal point, target should be the expected sum of percs
+func roundByLargestRemainder(percs map[string]float32, target float32) {
+ leftToDistribute := int(target * 10)
+
+ keys := make([]string, 0, len(percs))
+
+ for k, v := range percs {
+ percs[k] = v * 10
+ floored := math.Floor(float64(percs[k]))
+ leftToDistribute -= int(floored)
+ keys = append(keys, k)
+ }
+
+ // Sort the keys by the largest remainder
+ sort.SliceStable(keys, func(i, j int) bool {
+ _, remainderI := math.Modf(float64(percs[keys[i]]))
+ _, remainderJ := math.Modf(float64(percs[keys[j]]))
+ return remainderI > remainderJ
+ })
+
+ // Increment the values in order of largest remainder
+ for _, k := range keys {
+ percs[k] = float32(math.Floor(float64(percs[k])))
+ if leftToDistribute > 0 {
+ percs[k]++
+ leftToDistribute--
+ }
+ percs[k] /= 10
+ }
+}
+
+// GetLanguageStats returns the language statistics for a repository
+func GetLanguageStats(ctx context.Context, repo *Repository) (LanguageStatList, error) {
+ stats := make(LanguageStatList, 0, 6)
+ if err := db.GetEngine(ctx).Where("`repo_id` = ?", repo.ID).Desc("`size`").Find(&stats); err != nil {
+ return nil, err
+ }
+ return stats, nil
+}
+
+// GetTopLanguageStats returns the top language statistics for a repository
+func GetTopLanguageStats(ctx context.Context, repo *Repository, limit int) (LanguageStatList, error) {
+ stats, err := GetLanguageStats(ctx, repo)
+ if err != nil {
+ return nil, err
+ }
+ perc := stats.getLanguagePercentages()
+ topstats := make(LanguageStatList, 0, limit)
+ var other float32
+ for i := range stats {
+ if _, ok := perc[stats[i].Language]; !ok {
+ continue
+ }
+ if stats[i].Language == "other" || len(topstats) >= limit {
+ other += perc[stats[i].Language]
+ continue
+ }
+ stats[i].Percentage = perc[stats[i].Language]
+ topstats = append(topstats, stats[i])
+ }
+ if other > 0 {
+ topstats = append(topstats, &LanguageStat{
+ RepoID: repo.ID,
+ Language: "other",
+ Color: "#cccccc",
+ Percentage: float32(math.Round(float64(other)*10) / 10),
+ })
+ }
+ topstats.LoadAttributes()
+ return topstats, nil
+}
+
+// UpdateLanguageStats updates the language statistics for repository
+func UpdateLanguageStats(ctx context.Context, repo *Repository, commitID string, stats map[string]int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ oldstats, err := GetLanguageStats(ctx, repo)
+ if err != nil {
+ return err
+ }
+ var topLang string
+ var s int64
+ for lang, size := range stats {
+ if size > s {
+ s = size
+ topLang = strings.ToLower(lang)
+ }
+ }
+
+ for lang, size := range stats {
+ upd := false
+ llang := strings.ToLower(lang)
+ for _, s := range oldstats {
+ // Update already existing language
+ if strings.ToLower(s.Language) == llang {
+ s.CommitID = commitID
+ s.IsPrimary = llang == topLang
+ s.Size = size
+ if _, err := sess.ID(s.ID).Cols("`commit_id`", "`size`", "`is_primary`").Update(s); err != nil {
+ return err
+ }
+ upd = true
+ break
+ }
+ }
+ // Insert new language
+ if !upd {
+ if err := db.Insert(ctx, &LanguageStat{
+ RepoID: repo.ID,
+ CommitID: commitID,
+ IsPrimary: llang == topLang,
+ Language: lang,
+ Size: size,
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ // Delete old languages
+ statsToDelete := make([]int64, 0, len(oldstats))
+ for _, s := range oldstats {
+ if s.CommitID != commitID {
+ statsToDelete = append(statsToDelete, s.ID)
+ }
+ }
+ if len(statsToDelete) > 0 {
+ if _, err := sess.In("`id`", statsToDelete).Delete(&LanguageStat{}); err != nil {
+ return err
+ }
+ }
+
+ // Update indexer status
+ if err = UpdateIndexerStatus(ctx, repo, RepoIndexerTypeStats, commitID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// CopyLanguageStat Copy originalRepo language stat information to destRepo (use for forked repo)
+func CopyLanguageStat(ctx context.Context, originalRepo, destRepo *Repository) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ RepoLang := make(LanguageStatList, 0, 6)
+ if err := db.GetEngine(ctx).Where("`repo_id` = ?", originalRepo.ID).Desc("`size`").Find(&RepoLang); err != nil {
+ return err
+ }
+ if len(RepoLang) > 0 {
+ for i := range RepoLang {
+ RepoLang[i].ID = 0
+ RepoLang[i].RepoID = destRepo.ID
+ RepoLang[i].CreatedUnix = timeutil.TimeStampNow()
+ }
+ // update destRepo's indexer status
+ tmpCommitID := RepoLang[0].CommitID
+ if err := UpdateIndexerStatus(ctx, destRepo, RepoIndexerTypeStats, tmpCommitID); err != nil {
+ return err
+ }
+ if err := db.Insert(ctx, &RepoLang); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
diff --git a/models/repo/main_test.go b/models/repo/main_test.go
new file mode 100644
index 0000000..b49855f
--- /dev/null
+++ b/models/repo/main_test.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models" // register table model
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/perm/access" // register table model
+ _ "code.gitea.io/gitea/models/repo" // register table model
+ _ "code.gitea.io/gitea/models/user" // register table model
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/repo/mirror.go b/models/repo/mirror.go
new file mode 100644
index 0000000..be7b785
--- /dev/null
+++ b/models/repo/mirror.go
@@ -0,0 +1,123 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrMirrorNotExist mirror does not exist error
+var ErrMirrorNotExist = util.NewNotExistErrorf("Mirror does not exist")
+
+// Mirror represents mirror information of a repository.
+type Mirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *Repository `xorm:"-"`
+ Interval time.Duration
+ EnablePrune bool `xorm:"NOT NULL DEFAULT true"`
+
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`
+
+ LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
+ LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
+
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+}
+
+func init() {
+ db.RegisterModel(new(Mirror))
+}
+
+// BeforeInsert will be invoked by XORM before inserting a record
+func (m *Mirror) BeforeInsert() {
+ if m != nil {
+ m.UpdatedUnix = timeutil.TimeStampNow()
+ m.NextUpdateUnix = timeutil.TimeStampNow()
+ }
+}
+
+// GetRepository returns the repository.
+func (m *Mirror) GetRepository(ctx context.Context) *Repository {
+ if m.Repo != nil {
+ return m.Repo
+ }
+ var err error
+ m.Repo, err = GetRepositoryByID(ctx, m.RepoID)
+ if err != nil {
+ log.Error("getRepositoryByID[%d]: %v", m.ID, err)
+ }
+ return m.Repo
+}
+
+// GetRemoteName returns the name of the remote.
+func (m *Mirror) GetRemoteName() string {
+ return "origin"
+}
+
+// ScheduleNextUpdate calculates and sets next update time.
+func (m *Mirror) ScheduleNextUpdate() {
+ if m.Interval != 0 {
+ m.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(m.Interval)
+ } else {
+ m.NextUpdateUnix = 0
+ }
+}
+
+// GetMirrorByRepoID returns mirror information of a repository.
+func GetMirrorByRepoID(ctx context.Context, repoID int64) (*Mirror, error) {
+ m := &Mirror{RepoID: repoID}
+ has, err := db.GetEngine(ctx).Get(m)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrMirrorNotExist
+ }
+ return m, nil
+}
+
+// UpdateMirror updates the mirror
+func UpdateMirror(ctx context.Context, m *Mirror) error {
+ _, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
+ return err
+}
+
+// TouchMirror updates the mirror updatedUnix
+func TouchMirror(ctx context.Context, m *Mirror) error {
+ m.UpdatedUnix = timeutil.TimeStampNow()
+ _, err := db.GetEngine(ctx).ID(m.ID).Cols("updated_unix").Update(m)
+ return err
+}
+
+// DeleteMirrorByRepoID deletes a mirror by repoID
+func DeleteMirrorByRepoID(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Delete(&Mirror{RepoID: repoID})
+ return err
+}
+
+// MirrorsIterate iterates all mirror repositories.
+func MirrorsIterate(ctx context.Context, limit int, f func(idx int, bean any) error) error {
+ sess := db.GetEngine(ctx).
+ Where("next_update_unix<=?", time.Now().Unix()).
+ And("next_update_unix!=0").
+ OrderBy("updated_unix ASC")
+ if limit > 0 {
+ sess = sess.Limit(limit)
+ }
+ return sess.Iterate(new(Mirror), f)
+}
+
+// InsertMirror inserts a mirror to database
+func InsertMirror(ctx context.Context, mirror *Mirror) error {
+ _, err := db.GetEngine(ctx).Insert(mirror)
+ return err
+}
diff --git a/models/repo/pushmirror.go b/models/repo/pushmirror.go
new file mode 100644
index 0000000..68fb504
--- /dev/null
+++ b/models/repo/pushmirror.go
@@ -0,0 +1,188 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ giturl "code.gitea.io/gitea/modules/git/url"
+ "code.gitea.io/gitea/modules/keying"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrPushMirrorNotExist mirror does not exist error
+var ErrPushMirrorNotExist = util.NewNotExistErrorf("PushMirror does not exist")
+
+// PushMirror represents mirror information of a repository.
+type PushMirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *Repository `xorm:"-"`
+ RemoteName string
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+
+ // A keypair formatted in OpenSSH format.
+ PublicKey string `xorm:"VARCHAR(100)"`
+ PrivateKey []byte `xorm:"BLOB"`
+
+ SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"`
+ Interval time.Duration
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
+ LastError string `xorm:"text"`
+}
+
+type PushMirrorOptions struct {
+ db.ListOptions
+ ID int64
+ RepoID int64
+ RemoteName string
+}
+
+func (opts PushMirrorOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.RemoteName != "" {
+ cond = cond.And(builder.Eq{"remote_name": opts.RemoteName})
+ }
+ if opts.ID > 0 {
+ cond = cond.And(builder.Eq{"id": opts.ID})
+ }
+ return cond
+}
+
+func init() {
+ db.RegisterModel(new(PushMirror))
+}
+
+// GetRepository returns the path of the repository.
+func (m *PushMirror) GetRepository(ctx context.Context) *Repository {
+ if m.Repo != nil {
+ return m.Repo
+ }
+ var err error
+ m.Repo, err = GetRepositoryByID(ctx, m.RepoID)
+ if err != nil {
+ log.Error("getRepositoryByID[%d]: %v", m.ID, err)
+ }
+ return m.Repo
+}
+
+// GetRemoteName returns the name of the remote.
+func (m *PushMirror) GetRemoteName() string {
+ return m.RemoteName
+}
+
+// GetPublicKey returns a sanitized version of the public key.
+// This should only be used when displaying the public key to the user, not for actual code.
+func (m *PushMirror) GetPublicKey() string {
+ return strings.TrimSuffix(m.PublicKey, "\n")
+}
+
+// SetPrivatekey encrypts the given private key and store it in the database.
+// The ID of the push mirror must be known, so this should be done after the
+// push mirror is inserted.
+func (m *PushMirror) SetPrivatekey(ctx context.Context, privateKey []byte) error {
+ key := keying.DeriveKey(keying.ContextPushMirror)
+ m.PrivateKey = key.Encrypt(privateKey, keying.ColumnAndID("private_key", m.ID))
+
+ _, err := db.GetEngine(ctx).ID(m.ID).Cols("private_key").Update(m)
+ return err
+}
+
+// Privatekey retrieves the encrypted private key and decrypts it.
+func (m *PushMirror) Privatekey() ([]byte, error) {
+ key := keying.DeriveKey(keying.ContextPushMirror)
+ return key.Decrypt(m.PrivateKey, keying.ColumnAndID("private_key", m.ID))
+}
+
+// UpdatePushMirror updates the push-mirror
+func UpdatePushMirror(ctx context.Context, m *PushMirror) error {
+ _, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
+ return err
+}
+
+// UpdatePushMirrorInterval updates the push-mirror
+func UpdatePushMirrorInterval(ctx context.Context, m *PushMirror) error {
+ _, err := db.GetEngine(ctx).ID(m.ID).Cols("interval").Update(m)
+ return err
+}
+
+var DeletePushMirrors = deletePushMirrors
+
+func deletePushMirrors(ctx context.Context, opts PushMirrorOptions) error {
+ if opts.RepoID > 0 {
+ _, err := db.Delete[PushMirror](ctx, opts)
+ return err
+ }
+ return util.NewInvalidArgumentErrorf("repoID required and must be set")
+}
+
+// GetPushMirrorsByRepoID returns push-mirror information of a repository.
+func GetPushMirrorsByRepoID(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*PushMirror, int64, error) {
+ sess := db.GetEngine(ctx).Where("repo_id = ?", repoID)
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ mirrors := make([]*PushMirror, 0, listOptions.PageSize)
+ count, err := sess.FindAndCount(&mirrors)
+ return mirrors, count, err
+ }
+ mirrors := make([]*PushMirror, 0, 10)
+ count, err := sess.FindAndCount(&mirrors)
+ return mirrors, count, err
+}
+
+// GetPushMirrorsSyncedOnCommit returns push-mirrors for this repo that should be updated by new commits
+func GetPushMirrorsSyncedOnCommit(ctx context.Context, repoID int64) ([]*PushMirror, error) {
+ mirrors := make([]*PushMirror, 0, 10)
+ return mirrors, db.GetEngine(ctx).
+ Where("repo_id = ? AND sync_on_commit = ?", repoID, true).
+ Find(&mirrors)
+}
+
+// PushMirrorsIterate iterates all push-mirror repositories.
+func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean any) error) error {
+ sess := db.GetEngine(ctx).
+ Table("push_mirror").
+ Join("INNER", "`repository`", "`repository`.id = `push_mirror`.repo_id").
+ Where("`push_mirror`.last_update + (`push_mirror`.`interval` / ?) <= ?", time.Second, time.Now().Unix()).
+ And("`push_mirror`.`interval` != 0").
+ And("`repository`.is_archived = ?", false).
+ OrderBy("last_update ASC")
+ if limit > 0 {
+ sess = sess.Limit(limit)
+ }
+ return sess.Iterate(new(PushMirror), f)
+}
+
+// GetPushMirrorRemoteAddress returns the address of associated with a repository's given remote.
+func GetPushMirrorRemoteAddress(ownerName, repoName, remoteName string) (string, error) {
+ repoPath := filepath.Join(setting.RepoRootPath, strings.ToLower(ownerName), strings.ToLower(repoName)+".git")
+
+ remoteURL, err := git.GetRemoteAddress(context.Background(), repoPath, remoteName)
+ if err != nil {
+ return "", fmt.Errorf("get remote %s's address of %s/%s failed: %v", remoteName, ownerName, repoName, err)
+ }
+
+ u, err := giturl.Parse(remoteURL)
+ if err != nil {
+ return "", err
+ }
+ u.User = nil
+
+ return u.String(), nil
+}
diff --git a/models/repo/pushmirror_test.go b/models/repo/pushmirror_test.go
new file mode 100644
index 0000000..c3368cc
--- /dev/null
+++ b/models/repo/pushmirror_test.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPushMirrorsIterate(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ now := timeutil.TimeStampNow()
+
+ db.Insert(db.DefaultContext, &repo_model.PushMirror{
+ RemoteName: "test-1",
+ LastUpdateUnix: now,
+ Interval: 1,
+ })
+
+ long, _ := time.ParseDuration("24h")
+ db.Insert(db.DefaultContext, &repo_model.PushMirror{
+ RemoteName: "test-2",
+ LastUpdateUnix: now,
+ Interval: long,
+ })
+
+ db.Insert(db.DefaultContext, &repo_model.PushMirror{
+ RemoteName: "test-3",
+ LastUpdateUnix: now,
+ Interval: 0,
+ })
+
+ time.Sleep(1 * time.Millisecond)
+
+ repo_model.PushMirrorsIterate(db.DefaultContext, 1, func(idx int, bean any) error {
+ m, ok := bean.(*repo_model.PushMirror)
+ assert.True(t, ok)
+ assert.Equal(t, "test-1", m.RemoteName)
+ assert.Equal(t, m.RemoteName, m.GetRemoteName())
+ return nil
+ })
+}
+
+func TestPushMirrorPrivatekey(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ m := &repo_model.PushMirror{
+ RemoteName: "test-privatekey",
+ }
+ require.NoError(t, db.Insert(db.DefaultContext, m))
+
+ privateKey := []byte{0x00, 0x01, 0x02, 0x04, 0x08, 0x10}
+ t.Run("Set privatekey", func(t *testing.T) {
+ require.NoError(t, m.SetPrivatekey(db.DefaultContext, privateKey))
+ })
+
+ t.Run("Normal retrieval", func(t *testing.T) {
+ actualPrivateKey, err := m.Privatekey()
+ require.NoError(t, err)
+ assert.EqualValues(t, privateKey, actualPrivateKey)
+ })
+
+ t.Run("Incorrect retrieval", func(t *testing.T) {
+ m.ID++
+ actualPrivateKey, err := m.Privatekey()
+ require.Error(t, err)
+ assert.Empty(t, actualPrivateKey)
+ })
+}
diff --git a/models/repo/redirect.go b/models/repo/redirect.go
new file mode 100644
index 0000000..61789eb
--- /dev/null
+++ b/models/repo/redirect.go
@@ -0,0 +1,86 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrRedirectNotExist represents a "RedirectNotExist" kind of error.
+type ErrRedirectNotExist struct {
+ OwnerID int64
+ RepoName string
+}
+
+// IsErrRedirectNotExist check if an error is an ErrRepoRedirectNotExist.
+func IsErrRedirectNotExist(err error) bool {
+ _, ok := err.(ErrRedirectNotExist)
+ return ok
+}
+
+func (err ErrRedirectNotExist) Error() string {
+ return fmt.Sprintf("repository redirect does not exist [uid: %d, name: %s]", err.OwnerID, err.RepoName)
+}
+
+func (err ErrRedirectNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Redirect represents that a repo name should be redirected to another
+type Redirect struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s)"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RedirectRepoID int64 // repoID to redirect to
+}
+
+// TableName represents real table name in database
+func (Redirect) TableName() string {
+ return "repo_redirect"
+}
+
+func init() {
+ db.RegisterModel(new(Redirect))
+}
+
+// LookupRedirect look up if a repository has a redirect name
+func LookupRedirect(ctx context.Context, ownerID int64, repoName string) (int64, error) {
+ repoName = strings.ToLower(repoName)
+ redirect := &Redirect{OwnerID: ownerID, LowerName: repoName}
+ if has, err := db.GetEngine(ctx).Get(redirect); err != nil {
+ return 0, err
+ } else if !has {
+ return 0, ErrRedirectNotExist{OwnerID: ownerID, RepoName: repoName}
+ }
+ return redirect.RedirectRepoID, nil
+}
+
+// NewRedirect create a new repo redirect
+func NewRedirect(ctx context.Context, ownerID, repoID int64, oldRepoName, newRepoName string) error {
+ oldRepoName = strings.ToLower(oldRepoName)
+ newRepoName = strings.ToLower(newRepoName)
+
+ if err := DeleteRedirect(ctx, ownerID, newRepoName); err != nil {
+ return err
+ }
+
+ return db.Insert(ctx, &Redirect{
+ OwnerID: ownerID,
+ LowerName: oldRepoName,
+ RedirectRepoID: repoID,
+ })
+}
+
+// DeleteRedirect delete any redirect from the specified repo name to
+// anything else
+func DeleteRedirect(ctx context.Context, ownerID int64, repoName string) error {
+ repoName = strings.ToLower(repoName)
+ _, err := db.GetEngine(ctx).Delete(&Redirect{OwnerID: ownerID, LowerName: repoName})
+ return err
+}
diff --git a/models/repo/redirect_test.go b/models/repo/redirect_test.go
new file mode 100644
index 0000000..2016784
--- /dev/null
+++ b/models/repo/redirect_test.go
@@ -0,0 +1,78 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLookupRedirect(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repoID, err := repo_model.LookupRedirect(db.DefaultContext, 2, "oldrepo1")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, repoID)
+
+ _, err = repo_model.LookupRedirect(db.DefaultContext, unittest.NonexistentID, "doesnotexist")
+ assert.True(t, repo_model.IsErrRedirectNotExist(err))
+}
+
+func TestNewRedirect(t *testing.T) {
+ // redirect to a completely new name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ require.NoError(t, repo_model.NewRedirect(db.DefaultContext, repo.OwnerID, repo.ID, repo.Name, "newreponame"))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: repo.LowerName,
+ RedirectRepoID: repo.ID,
+ })
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: "oldrepo1",
+ RedirectRepoID: repo.ID,
+ })
+}
+
+func TestNewRedirect2(t *testing.T) {
+ // redirect to previously used name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ require.NoError(t, repo_model.NewRedirect(db.DefaultContext, repo.OwnerID, repo.ID, repo.Name, "oldrepo1"))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: repo.LowerName,
+ RedirectRepoID: repo.ID,
+ })
+ unittest.AssertNotExistsBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: "oldrepo1",
+ RedirectRepoID: repo.ID,
+ })
+}
+
+func TestNewRedirect3(t *testing.T) {
+ // redirect for a previously-unredirected repo
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ require.NoError(t, repo_model.NewRedirect(db.DefaultContext, repo.OwnerID, repo.ID, repo.Name, "newreponame"))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: repo.LowerName,
+ RedirectRepoID: repo.ID,
+ })
+}
diff --git a/models/repo/release.go b/models/repo/release.go
new file mode 100644
index 0000000..e2cd7d7
--- /dev/null
+++ b/models/repo/release.go
@@ -0,0 +1,566 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrReleaseAlreadyExist represents a "ReleaseAlreadyExist" kind of error.
+type ErrReleaseAlreadyExist struct {
+ TagName string
+}
+
+// IsErrReleaseAlreadyExist checks if an error is a ErrReleaseAlreadyExist.
+func IsErrReleaseAlreadyExist(err error) bool {
+ _, ok := err.(ErrReleaseAlreadyExist)
+ return ok
+}
+
+func (err ErrReleaseAlreadyExist) Error() string {
+ return fmt.Sprintf("release tag already exist [tag_name: %s]", err.TagName)
+}
+
+func (err ErrReleaseAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrReleaseNotExist represents a "ReleaseNotExist" kind of error.
+type ErrReleaseNotExist struct {
+ ID int64
+ TagName string
+}
+
+// IsErrReleaseNotExist checks if an error is a ErrReleaseNotExist.
+func IsErrReleaseNotExist(err error) bool {
+ _, ok := err.(ErrReleaseNotExist)
+ return ok
+}
+
+func (err ErrReleaseNotExist) Error() string {
+ return fmt.Sprintf("release tag does not exist [id: %d, tag_name: %s]", err.ID, err.TagName)
+}
+
+func (err ErrReleaseNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Release represents a release of repository.
+type Release struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(n)"`
+ Repo *Repository `xorm:"-"`
+ PublisherID int64 `xorm:"INDEX"`
+ Publisher *user_model.User `xorm:"-"`
+ TagName string `xorm:"INDEX UNIQUE(n)"`
+ OriginalAuthor string
+ OriginalAuthorID int64 `xorm:"index"`
+ LowerTagName string
+ Target string
+ TargetBehind string `xorm:"-"` // to handle non-existing or empty target
+ Title string
+ Sha1 string `xorm:"VARCHAR(64)"`
+ HideArchiveLinks bool `xorm:"NOT NULL DEFAULT false"`
+ NumCommits int64
+ NumCommitsBehind int64 `xorm:"-"`
+ Note string `xorm:"TEXT"`
+ RenderedNote template.HTML `xorm:"-"`
+ IsDraft bool `xorm:"NOT NULL DEFAULT false"`
+ IsPrerelease bool `xorm:"NOT NULL DEFAULT false"`
+ IsTag bool `xorm:"NOT NULL DEFAULT false"` // will be true only if the record is a tag and has no related releases
+ Attachments []*Attachment `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ ArchiveDownloadCount *structs.TagArchiveDownloadCount `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(Release))
+}
+
+// LoadAttributes load repo and publisher attributes for a release
+func (r *Release) LoadAttributes(ctx context.Context) error {
+ var err error
+ if r.Repo == nil {
+ r.Repo, err = GetRepositoryByID(ctx, r.RepoID)
+ if err != nil {
+ return err
+ }
+ }
+ if r.Publisher == nil {
+ r.Publisher, err = user_model.GetUserByID(ctx, r.PublisherID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ r.Publisher = user_model.NewGhostUser()
+ } else {
+ return err
+ }
+ }
+ }
+
+ err = r.LoadArchiveDownloadCount(ctx)
+ if err != nil {
+ return err
+ }
+
+ return GetReleaseAttachments(ctx, r)
+}
+
+// LoadArchiveDownloadCount loads the download count for the source archives
+func (r *Release) LoadArchiveDownloadCount(ctx context.Context) error {
+ var err error
+ r.ArchiveDownloadCount, err = GetArchiveDownloadCount(ctx, r.RepoID, r.ID)
+ return err
+}
+
+// APIURL the api url for a release. release must have attributes loaded
+func (r *Release) APIURL() string {
+ return r.Repo.APIURL() + "/releases/" + strconv.FormatInt(r.ID, 10)
+}
+
+// ZipURL the zip url for a release. release must have attributes loaded
+func (r *Release) ZipURL() string {
+ return r.Repo.HTMLURL() + "/archive/" + util.PathEscapeSegments(r.TagName) + ".zip"
+}
+
+// TarURL the tar.gz url for a release. release must have attributes loaded
+func (r *Release) TarURL() string {
+ return r.Repo.HTMLURL() + "/archive/" + util.PathEscapeSegments(r.TagName) + ".tar.gz"
+}
+
+// HTMLURL the url for a release on the web UI. release must have attributes loaded
+func (r *Release) HTMLURL() string {
+ return r.Repo.HTMLURL() + "/releases/tag/" + util.PathEscapeSegments(r.TagName)
+}
+
+// APIUploadURL the api url to upload assets to a release. release must have attributes loaded
+func (r *Release) APIUploadURL() string {
+ return r.APIURL() + "/assets"
+}
+
+// Link the relative url for a release on the web UI. release must have attributes loaded
+func (r *Release) Link() string {
+ return r.Repo.Link() + "/releases/tag/" + util.PathEscapeSegments(r.TagName)
+}
+
+// IsReleaseExist returns true if release with given tag name already exists.
+func IsReleaseExist(ctx context.Context, repoID int64, tagName string) (bool, error) {
+ if len(tagName) == 0 {
+ return false, nil
+ }
+
+ return db.GetEngine(ctx).Exist(&Release{RepoID: repoID, LowerTagName: strings.ToLower(tagName)})
+}
+
+// UpdateRelease updates all columns of a release
+func UpdateRelease(ctx context.Context, rel *Release) error {
+ _, err := db.GetEngine(ctx).ID(rel.ID).AllCols().Update(rel)
+ return err
+}
+
+// AddReleaseAttachments adds a release attachments
+func AddReleaseAttachments(ctx context.Context, releaseID int64, attachmentUUIDs []string) (err error) {
+ // Check attachments
+ attachments, err := GetAttachmentsByUUIDs(ctx, attachmentUUIDs)
+ if err != nil {
+ return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %w", attachmentUUIDs, err)
+ }
+
+ for i := range attachments {
+ if attachments[i].ReleaseID != 0 {
+ return util.NewPermissionDeniedErrorf("release permission denied")
+ }
+ attachments[i].ReleaseID = releaseID
+ // No assign value could be 0, so ignore AllCols().
+ if _, err = db.GetEngine(ctx).ID(attachments[i].ID).Update(attachments[i]); err != nil {
+ return fmt.Errorf("update attachment [%d]: %w", attachments[i].ID, err)
+ }
+ }
+
+ return err
+}
+
+// GetRelease returns release by given ID.
+func GetRelease(ctx context.Context, repoID int64, tagName string) (*Release, error) {
+ rel := &Release{RepoID: repoID, LowerTagName: strings.ToLower(tagName)}
+ has, err := db.GetEngine(ctx).Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{0, tagName}
+ }
+ return rel, nil
+}
+
+// GetReleaseByID returns release with given ID.
+func GetReleaseByID(ctx context.Context, id int64) (*Release, error) {
+ rel := new(Release)
+ has, err := db.GetEngine(ctx).
+ ID(id).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{id, ""}
+ }
+
+ return rel, nil
+}
+
+// GetReleaseForRepoByID returns release with given ID.
+func GetReleaseForRepoByID(ctx context.Context, repoID, id int64) (*Release, error) {
+ rel := new(Release)
+ has, err := db.GetEngine(ctx).
+ Where("id=? AND repo_id=?", id, repoID).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{id, ""}
+ }
+
+ return rel, nil
+}
+
+// FindReleasesOptions describes the conditions to Find releases
+type FindReleasesOptions struct {
+ db.ListOptions
+ RepoID int64
+ IncludeDrafts bool
+ IncludeTags bool
+ IsPreRelease optional.Option[bool]
+ IsDraft optional.Option[bool]
+ TagNames []string
+ HasSha1 optional.Option[bool] // useful to find draft releases which are created with existing tags
+}
+
+func (opts FindReleasesOptions) ToConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{"repo_id": opts.RepoID}
+
+ if !opts.IncludeDrafts {
+ cond = cond.And(builder.Eq{"is_draft": false})
+ }
+ if !opts.IncludeTags {
+ cond = cond.And(builder.Eq{"is_tag": false})
+ }
+ if len(opts.TagNames) > 0 {
+ cond = cond.And(builder.In("tag_name", opts.TagNames))
+ }
+ if opts.IsPreRelease.Has() {
+ cond = cond.And(builder.Eq{"is_prerelease": opts.IsPreRelease.Value()})
+ }
+ if opts.IsDraft.Has() {
+ cond = cond.And(builder.Eq{"is_draft": opts.IsDraft.Value()})
+ }
+ if opts.HasSha1.Has() {
+ if opts.HasSha1.Value() {
+ cond = cond.And(builder.Neq{"sha1": ""})
+ } else {
+ cond = cond.And(builder.Eq{"sha1": ""})
+ }
+ }
+ return cond
+}
+
+func (opts FindReleasesOptions) ToOrders() string {
+ return "created_unix DESC, id DESC"
+}
+
+// GetTagNamesByRepoID returns a list of release tag names of repository.
+func GetTagNamesByRepoID(ctx context.Context, repoID int64) ([]string, error) {
+ listOptions := db.ListOptions{
+ ListAll: true,
+ }
+ opts := FindReleasesOptions{
+ ListOptions: listOptions,
+ IncludeDrafts: true,
+ IncludeTags: true,
+ HasSha1: optional.Some(true),
+ RepoID: repoID,
+ }
+
+ tags := make([]string, 0)
+ sess := db.GetEngine(ctx).
+ Table("release").
+ Desc("created_unix", "id").
+ Where(opts.ToConds()).
+ Cols("tag_name")
+
+ return tags, sess.Find(&tags)
+}
+
+// GetLatestReleaseByRepoID returns the latest release for a repository
+func GetLatestReleaseByRepoID(ctx context.Context, repoID int64) (*Release, error) {
+ cond := builder.NewCond().
+ And(builder.Eq{"repo_id": repoID}).
+ And(builder.Eq{"is_draft": false}).
+ And(builder.Eq{"is_prerelease": false}).
+ And(builder.Eq{"is_tag": false})
+
+ rel := new(Release)
+ has, err := db.GetEngine(ctx).
+ Desc("created_unix", "id").
+ Where(cond).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{0, "latest"}
+ }
+
+ return rel, nil
+}
+
+type releaseMetaSearch struct {
+ ID []int64
+ Rel []*Release
+}
+
+func (s releaseMetaSearch) Len() int {
+ return len(s.ID)
+}
+
+func (s releaseMetaSearch) Swap(i, j int) {
+ s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
+ s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
+}
+
+func (s releaseMetaSearch) Less(i, j int) bool {
+ return s.ID[i] < s.ID[j]
+}
+
+func hasDuplicateName(attaches []*Attachment) bool {
+ attachSet := container.Set[string]{}
+ for _, attachment := range attaches {
+ if attachSet.Contains(attachment.Name) {
+ return true
+ }
+ attachSet.Add(attachment.Name)
+ }
+ return false
+}
+
+// GetReleaseAttachments retrieves the attachments for releases
+func GetReleaseAttachments(ctx context.Context, rels ...*Release) (err error) {
+ if len(rels) == 0 {
+ return nil
+ }
+
+ // To keep this efficient as possible sort all releases by id,
+ // select attachments by release id,
+ // then merge join them
+
+ // Sort
+ sortedRels := releaseMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Release, len(rels))}
+ var attachments []*Attachment
+ for index, element := range rels {
+ element.Attachments = []*Attachment{}
+ sortedRels.ID[index] = element.ID
+ sortedRels.Rel[index] = element
+ }
+ sort.Sort(sortedRels)
+
+ // Select attachments
+ err = db.GetEngine(ctx).
+ Asc("release_id", "name").
+ In("release_id", sortedRels.ID).
+ Find(&attachments)
+ if err != nil {
+ return err
+ }
+
+ // merge join
+ currentIndex := 0
+ for _, attachment := range attachments {
+ for sortedRels.ID[currentIndex] < attachment.ReleaseID {
+ currentIndex++
+ }
+ sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment)
+ }
+
+ // Makes URL's predictable
+ for _, release := range rels {
+ // If we have no Repo, we don't need to execute this loop
+ if release.Repo == nil {
+ continue
+ }
+
+ // If the names unique, use the URL with the Name instead of the UUID
+ if !hasDuplicateName(release.Attachments) {
+ for _, attachment := range release.Attachments {
+ attachment.CustomDownloadURL = release.Repo.HTMLURL() + "/releases/download/" + url.PathEscape(release.TagName) + "/" + url.PathEscape(attachment.Name)
+ }
+ }
+ }
+
+ return err
+}
+
+// UpdateReleasesMigrationsByType updates all migrated repositories' releases from gitServiceType to replace originalAuthorID to posterID
+func UpdateReleasesMigrationsByType(ctx context.Context, gitServiceType structs.GitServiceType, originalAuthorID string, posterID int64) error {
+ _, err := db.GetEngine(ctx).Table("release").
+ Where("repo_id IN (SELECT id FROM repository WHERE original_service_type = ?)", gitServiceType).
+ And("original_author_id = ?", originalAuthorID).
+ Update(map[string]any{
+ "publisher_id": posterID,
+ "original_author": "",
+ "original_author_id": 0,
+ })
+ return err
+}
+
+// PushUpdateDeleteTagsContext updates a number of delete tags with context
+func PushUpdateDeleteTagsContext(ctx context.Context, repo *Repository, tags []string) error {
+ if len(tags) == 0 {
+ return nil
+ }
+ lowerTags := make([]string, 0, len(tags))
+ for _, tag := range tags {
+ lowerTags = append(lowerTags, strings.ToLower(tag))
+ }
+
+ for _, tag := range tags {
+ release, err := GetRelease(ctx, repo.ID, tag)
+ if err != nil {
+ return fmt.Errorf("GetRelease: %w", err)
+ }
+
+ err = DeleteArchiveDownloadCountForRelease(ctx, release.ID)
+ if err != nil {
+ return fmt.Errorf("DeleteTagArchiveDownloadCount: %w", err)
+ }
+ }
+
+ if _, err := db.GetEngine(ctx).
+ Where("repo_id = ? AND is_tag = ?", repo.ID, true).
+ In("lower_tag_name", lowerTags).
+ Delete(new(Release)); err != nil {
+ return fmt.Errorf("Delete: %w", err)
+ }
+
+ if _, err := db.GetEngine(ctx).
+ Where("repo_id = ? AND is_tag = ?", repo.ID, false).
+ In("lower_tag_name", lowerTags).
+ Cols("is_draft", "num_commits", "sha1").
+ Update(&Release{
+ IsDraft: true,
+ }); err != nil {
+ return fmt.Errorf("Update: %w", err)
+ }
+
+ return nil
+}
+
+// PushUpdateDeleteTag must be called for any push actions to delete tag
+func PushUpdateDeleteTag(ctx context.Context, repo *Repository, tagName string) error {
+ rel, err := GetRelease(ctx, repo.ID, tagName)
+ if err != nil {
+ if IsErrReleaseNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetRelease: %w", err)
+ }
+ if rel.IsTag {
+ if _, err = db.DeleteByID[Release](ctx, rel.ID); err != nil {
+ return fmt.Errorf("Delete: %w", err)
+ }
+ } else {
+ rel.IsDraft = true
+ rel.NumCommits = 0
+ rel.Sha1 = ""
+ if _, err = db.GetEngine(ctx).ID(rel.ID).AllCols().Update(rel); err != nil {
+ return fmt.Errorf("Update: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// SaveOrUpdateTag must be called for any push actions to add tag
+func SaveOrUpdateTag(ctx context.Context, repo *Repository, newRel *Release) error {
+ rel, err := GetRelease(ctx, repo.ID, newRel.TagName)
+ if err != nil && !IsErrReleaseNotExist(err) {
+ return fmt.Errorf("GetRelease: %w", err)
+ }
+
+ if rel == nil {
+ rel = newRel
+ if _, err = db.GetEngine(ctx).Insert(rel); err != nil {
+ return fmt.Errorf("InsertOne: %w", err)
+ }
+ } else {
+ rel.Sha1 = newRel.Sha1
+ rel.CreatedUnix = newRel.CreatedUnix
+ rel.NumCommits = newRel.NumCommits
+ rel.IsDraft = false
+ if rel.IsTag && newRel.PublisherID > 0 {
+ rel.PublisherID = newRel.PublisherID
+ }
+ if _, err = db.GetEngine(ctx).ID(rel.ID).AllCols().Update(rel); err != nil {
+ return fmt.Errorf("Update: %w", err)
+ }
+ }
+ return nil
+}
+
+// RemapExternalUser ExternalUserRemappable interface
+func (r *Release) RemapExternalUser(externalName string, externalID, userID int64) error {
+ r.OriginalAuthor = externalName
+ r.OriginalAuthorID = externalID
+ r.PublisherID = userID
+ return nil
+}
+
+// UserID ExternalUserRemappable interface
+func (r *Release) GetUserID() int64 { return r.PublisherID }
+
+// ExternalName ExternalUserRemappable interface
+func (r *Release) GetExternalName() string { return r.OriginalAuthor }
+
+// ExternalID ExternalUserRemappable interface
+func (r *Release) GetExternalID() int64 { return r.OriginalAuthorID }
+
+// InsertReleases migrates release
+func InsertReleases(ctx context.Context, rels ...*Release) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ for _, rel := range rels {
+ if _, err := sess.NoAutoTime().Insert(rel); err != nil {
+ return err
+ }
+
+ if len(rel.Attachments) > 0 {
+ for i := range rel.Attachments {
+ rel.Attachments[i].ReleaseID = rel.ID
+ }
+
+ if _, err := sess.NoAutoTime().Insert(rel.Attachments); err != nil {
+ return err
+ }
+ }
+ }
+
+ return committer.Commit()
+}
diff --git a/models/repo/release_test.go b/models/repo/release_test.go
new file mode 100644
index 0000000..4e61a28
--- /dev/null
+++ b/models/repo/release_test.go
@@ -0,0 +1,27 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestMigrate_InsertReleases(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ a := &Attachment{
+ UUID: "a0eebc91-9c0c-4ef7-bb6e-6bb9bd380a12",
+ }
+ r := &Release{
+ Attachments: []*Attachment{a},
+ }
+
+ err := InsertReleases(db.DefaultContext, r)
+ require.NoError(t, err)
+}
diff --git a/models/repo/repo.go b/models/repo/repo.go
new file mode 100644
index 0000000..cd6be48
--- /dev/null
+++ b/models/repo/repo.go
@@ -0,0 +1,951 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "net"
+ "net/url"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/translation"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrUserDoesNotHaveAccessToRepo represents an error where the user doesn't has access to a given repo.
+type ErrUserDoesNotHaveAccessToRepo struct {
+ UserID int64
+ RepoName string
+}
+
+// IsErrUserDoesNotHaveAccessToRepo checks if an error is a ErrRepoFileAlreadyExists.
+func IsErrUserDoesNotHaveAccessToRepo(err error) bool {
+ _, ok := err.(ErrUserDoesNotHaveAccessToRepo)
+ return ok
+}
+
+func (err ErrUserDoesNotHaveAccessToRepo) Error() string {
+ return fmt.Sprintf("user doesn't have access to repo [user_id: %d, repo_name: %s]", err.UserID, err.RepoName)
+}
+
+func (err ErrUserDoesNotHaveAccessToRepo) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+type ErrRepoIsArchived struct {
+ Repo *Repository
+}
+
+func (err ErrRepoIsArchived) Error() string {
+ return fmt.Sprintf("%s is archived", err.Repo.LogString())
+}
+
+var (
+ reservedRepoNames = []string{".", "..", "-"}
+ reservedRepoPatterns = []string{"*.git", "*.wiki", "*.rss", "*.atom"}
+)
+
+// IsUsableRepoName returns true when repository is usable
+func IsUsableRepoName(name string) error {
+ if db.AlphaDashDotPattern.MatchString(name) {
+ // Note: usually this error is normally caught up earlier in the UI
+ return db.ErrNameCharsNotAllowed{Name: name}
+ }
+ return db.IsUsableName(reservedRepoNames, reservedRepoPatterns, name)
+}
+
+// TrustModelType defines the types of trust model for this repository
+type TrustModelType int
+
+// kinds of TrustModel
+const (
+ DefaultTrustModel TrustModelType = iota // default trust model
+ CommitterTrustModel
+ CollaboratorTrustModel
+ CollaboratorCommitterTrustModel
+)
+
+// String converts a TrustModelType to a string
+func (t TrustModelType) String() string {
+ switch t {
+ case DefaultTrustModel:
+ return "default"
+ case CommitterTrustModel:
+ return "committer"
+ case CollaboratorTrustModel:
+ return "collaborator"
+ case CollaboratorCommitterTrustModel:
+ return "collaboratorcommitter"
+ }
+ return "default"
+}
+
+// ToTrustModel converts a string to a TrustModelType
+func ToTrustModel(model string) TrustModelType {
+ switch strings.ToLower(strings.TrimSpace(model)) {
+ case "default":
+ return DefaultTrustModel
+ case "collaborator":
+ return CollaboratorTrustModel
+ case "committer":
+ return CommitterTrustModel
+ case "collaboratorcommitter":
+ return CollaboratorCommitterTrustModel
+ }
+ return DefaultTrustModel
+}
+
+// RepositoryStatus defines the status of repository
+type RepositoryStatus int
+
+// all kinds of RepositoryStatus
+const (
+ RepositoryReady RepositoryStatus = iota // a normal repository
+ RepositoryBeingMigrated // repository is migrating
+ RepositoryPendingTransfer // repository pending in ownership transfer state
+ RepositoryBroken // repository is in a permanently broken state
+)
+
+// Repository represents a git repository.
+type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ OwnerName string
+ Owner *user_model.User `xorm:"-"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ Description string `xorm:"TEXT"`
+ Website string `xorm:"VARCHAR(2048)"`
+ OriginalServiceType api.GitServiceType `xorm:"index"`
+ OriginalURL string `xorm:"VARCHAR(2048)"`
+ DefaultBranch string
+ WikiBranch string
+
+ NumWatches int
+ NumStars int
+ NumForks int
+ NumIssues int
+ NumClosedIssues int
+ NumOpenIssues int `xorm:"-"`
+ NumPulls int
+ NumClosedPulls int
+ NumOpenPulls int `xorm:"-"`
+ NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
+ NumOpenMilestones int `xorm:"-"`
+ NumProjects int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"`
+ NumOpenProjects int `xorm:"-"`
+ NumActionRuns int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"`
+ NumOpenActionRuns int `xorm:"-"`
+
+ IsPrivate bool `xorm:"INDEX"`
+ IsEmpty bool `xorm:"INDEX"`
+ IsArchived bool `xorm:"INDEX"`
+ IsMirror bool `xorm:"INDEX"`
+
+ Status RepositoryStatus `xorm:"NOT NULL DEFAULT 0"`
+
+ RenderingMetas map[string]string `xorm:"-"`
+ DocumentRenderingMetas map[string]string `xorm:"-"`
+ Units []*RepoUnit `xorm:"-"`
+ PrimaryLanguage *LanguageStat `xorm:"-"`
+
+ IsFork bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ ForkID int64 `xorm:"INDEX"`
+ BaseRepo *Repository `xorm:"-"`
+ IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ TemplateID int64 `xorm:"INDEX"`
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ GitSize int64 `xorm:"NOT NULL DEFAULT 0"`
+ LFSSize int64 `xorm:"NOT NULL DEFAULT 0"`
+ CodeIndexerStatus *RepoIndexerStatus `xorm:"-"`
+ StatsIndexerStatus *RepoIndexerStatus `xorm:"-"`
+ IsFsckEnabled bool `xorm:"NOT NULL DEFAULT true"`
+ CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"`
+ Topics []string `xorm:"TEXT JSON"`
+ ObjectFormatName string `xorm:"VARCHAR(6) NOT NULL DEFAULT 'sha1'"`
+
+ TrustModel TrustModelType
+
+ // Avatar: ID(10-20)-md5(32) - must fit into 64 symbols
+ Avatar string `xorm:"VARCHAR(64)"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(Repository))
+}
+
+func (repo *Repository) GetName() string {
+ return repo.Name
+}
+
+func (repo *Repository) GetOwnerName() string {
+ return repo.OwnerName
+}
+
+func (repo *Repository) GetWikiBranchName() string {
+ if repo.WikiBranch == "" {
+ return setting.Repository.DefaultBranch
+ }
+ return repo.WikiBranch
+}
+
+// SanitizedOriginalURL returns a sanitized OriginalURL
+func (repo *Repository) SanitizedOriginalURL() string {
+ if repo.OriginalURL == "" {
+ return ""
+ }
+ u, _ := util.SanitizeURL(repo.OriginalURL)
+ return u
+}
+
+// text representations to be returned in SizeDetail.Name
+const (
+ SizeDetailNameGit = "git"
+ SizeDetailNameLFS = "lfs"
+)
+
+type SizeDetail struct {
+ Name string
+ Size int64
+}
+
+// SizeDetails forms a struct with various size details about repository
+// Note: SizeDetailsString below expects it to have 2 entries
+func (repo *Repository) SizeDetails() []SizeDetail {
+ sizeDetails := []SizeDetail{
+ {
+ Name: SizeDetailNameGit,
+ Size: repo.GitSize,
+ },
+ {
+ Name: SizeDetailNameLFS,
+ Size: repo.LFSSize,
+ },
+ }
+ return sizeDetails
+}
+
+// SizeDetailsString returns a concatenation of all repository size details as a string
+func (repo *Repository) SizeDetailsString(locale translation.Locale) string {
+ sizeDetails := repo.SizeDetails()
+ return locale.TrString("repo.size_format", sizeDetails[0].Name, locale.TrSize(sizeDetails[0].Size), sizeDetails[1].Name, locale.TrSize(sizeDetails[1].Size))
+}
+
+func (repo *Repository) LogString() string {
+ if repo == nil {
+ return "<Repository nil>"
+ }
+ return fmt.Sprintf("<Repository %d:%s/%s>", repo.ID, repo.OwnerName, repo.Name)
+}
+
+// IsBeingMigrated indicates that repository is being migrated
+func (repo *Repository) IsBeingMigrated() bool {
+ return repo.Status == RepositoryBeingMigrated
+}
+
+// IsBeingCreated indicates that repository is being migrated or forked
+func (repo *Repository) IsBeingCreated() bool {
+ return repo.IsBeingMigrated()
+}
+
+// IsBroken indicates that repository is broken
+func (repo *Repository) IsBroken() bool {
+ return repo.Status == RepositoryBroken
+}
+
+// MarkAsBrokenEmpty marks the repo as broken and empty
+func (repo *Repository) MarkAsBrokenEmpty() {
+ repo.Status = RepositoryBroken
+ repo.IsEmpty = true
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (repo *Repository) AfterLoad() {
+ repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
+ repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
+ repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
+ repo.NumOpenProjects = repo.NumProjects - repo.NumClosedProjects
+ repo.NumOpenActionRuns = repo.NumActionRuns - repo.NumClosedActionRuns
+}
+
+// LoadAttributes loads attributes of the repository.
+func (repo *Repository) LoadAttributes(ctx context.Context) error {
+ // Load owner
+ if err := repo.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("load owner: %w", err)
+ }
+
+ // Load primary language
+ stats := make(LanguageStatList, 0, 1)
+ if err := db.GetEngine(ctx).
+ Where("`repo_id` = ? AND `is_primary` = ? AND `language` != ?", repo.ID, true, "other").
+ Find(&stats); err != nil {
+ return fmt.Errorf("find primary languages: %w", err)
+ }
+ stats.LoadAttributes()
+ for _, st := range stats {
+ if st.RepoID == repo.ID {
+ repo.PrimaryLanguage = st
+ break
+ }
+ }
+ return nil
+}
+
+// FullName returns the repository full name
+func (repo *Repository) FullName() string {
+ return repo.OwnerName + "/" + repo.Name
+}
+
+// HTMLURL returns the repository HTML URL
+func (repo *Repository) HTMLURL() string {
+ return setting.AppURL + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
+}
+
+// CommitLink make link to by commit full ID
+// note: won't check whether it's an right id
+func (repo *Repository) CommitLink(commitID string) (result string) {
+ if git.IsEmptyCommitID(commitID, nil) {
+ result = ""
+ } else {
+ result = repo.Link() + "/commit/" + url.PathEscape(commitID)
+ }
+ return result
+}
+
+// APIURL returns the repository API URL
+func (repo *Repository) APIURL() string {
+ return setting.AppURL + "api/v1/repos/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
+}
+
+// APActorID returns the activitypub repository API URL
+func (repo *Repository) APActorID() string {
+ return fmt.Sprintf("%vapi/v1/activitypub/repository-id/%v", setting.AppURL, url.PathEscape(fmt.Sprint(repo.ID)))
+}
+
+// GetCommitsCountCacheKey returns cache key used for commits count caching.
+func (repo *Repository) GetCommitsCountCacheKey(contextName string, isRef bool) string {
+ var prefix string
+ if isRef {
+ prefix = "ref"
+ } else {
+ prefix = "commit"
+ }
+ return fmt.Sprintf("commits-count-%d-%s-%s", repo.ID, prefix, contextName)
+}
+
+// LoadUnits loads repo units into repo.Units
+func (repo *Repository) LoadUnits(ctx context.Context) (err error) {
+ if repo.Units != nil {
+ return nil
+ }
+
+ repo.Units, err = getUnitsByRepoID(ctx, repo.ID)
+ if log.IsTrace() {
+ unitTypeStrings := make([]string, len(repo.Units))
+ for i, unit := range repo.Units {
+ unitTypeStrings[i] = unit.Type.String()
+ }
+ log.Trace("repo.Units, ID=%d, Types: [%s]", repo.ID, strings.Join(unitTypeStrings, ", "))
+ }
+
+ return err
+}
+
+// UnitEnabled if this repository has the given unit enabled
+func (repo *Repository) UnitEnabled(ctx context.Context, tp unit.Type) bool {
+ if err := repo.LoadUnits(ctx); err != nil {
+ log.Warn("Error loading repository (ID: %d) units: %s", repo.ID, err.Error())
+ }
+ for _, unit := range repo.Units {
+ if unit.Type == tp {
+ return true
+ }
+ }
+ return false
+}
+
+// MustGetUnit always returns a RepoUnit object
+func (repo *Repository) MustGetUnit(ctx context.Context, tp unit.Type) *RepoUnit {
+ ru, err := repo.GetUnit(ctx, tp)
+ if err == nil {
+ return ru
+ }
+
+ if tp == unit.TypeExternalWiki {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(ExternalWikiConfig),
+ }
+ } else if tp == unit.TypeExternalTracker {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(ExternalTrackerConfig),
+ }
+ } else if tp == unit.TypePullRequests {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(PullRequestsConfig),
+ }
+ } else if tp == unit.TypeIssues {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(IssuesConfig),
+ }
+ } else if tp == unit.TypeActions {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(ActionsConfig),
+ }
+ }
+
+ return &RepoUnit{
+ Type: tp,
+ Config: new(UnitConfig),
+ }
+}
+
+// GetUnit returns a RepoUnit object
+func (repo *Repository) GetUnit(ctx context.Context, tp unit.Type) (*RepoUnit, error) {
+ if err := repo.LoadUnits(ctx); err != nil {
+ return nil, err
+ }
+ for _, unit := range repo.Units {
+ if unit.Type == tp {
+ return unit, nil
+ }
+ }
+ return nil, ErrUnitTypeNotExist{tp}
+}
+
+// AllUnitsEnabled returns true if all units are enabled for the repo.
+func (repo *Repository) AllUnitsEnabled(ctx context.Context) bool {
+ hasAnyUnitEnabled := func(unitGroup []unit.Type) bool {
+ // Loop over the group of units
+ for _, unit := range unitGroup {
+ // If *any* of them is enabled, return true.
+ if repo.UnitEnabled(ctx, unit) {
+ return true
+ }
+ }
+
+ // If none are enabled, return false.
+ return false
+ }
+
+ for _, unitGroup := range unit.AllowedRepoUnitGroups {
+ // If any disabled unit is found, return false immediately.
+ if !hasAnyUnitEnabled(unitGroup) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// LoadOwner loads owner user
+func (repo *Repository) LoadOwner(ctx context.Context) (err error) {
+ if repo.Owner != nil {
+ return nil
+ }
+
+ repo.Owner, err = user_model.GetUserByID(ctx, repo.OwnerID)
+ return err
+}
+
+// MustOwner always returns a valid *user_model.User object to avoid
+// conceptually impossible error handling.
+// It creates a fake object that contains error details
+// when error occurs.
+func (repo *Repository) MustOwner(ctx context.Context) *user_model.User {
+ if err := repo.LoadOwner(ctx); err != nil {
+ return &user_model.User{
+ Name: "error",
+ FullName: err.Error(),
+ }
+ }
+
+ return repo.Owner
+}
+
+// ComposeMetas composes a map of metas for properly rendering issue links and external issue trackers.
+func (repo *Repository) ComposeMetas(ctx context.Context) map[string]string {
+ if len(repo.RenderingMetas) == 0 {
+ metas := map[string]string{
+ "user": repo.OwnerName,
+ "repo": repo.Name,
+ "repoPath": repo.RepoPath(),
+ "mode": "comment",
+ }
+
+ unit, err := repo.GetUnit(ctx, unit.TypeExternalTracker)
+ if err == nil {
+ metas["format"] = unit.ExternalTrackerConfig().ExternalTrackerFormat
+ switch unit.ExternalTrackerConfig().ExternalTrackerStyle {
+ case markup.IssueNameStyleAlphanumeric:
+ metas["style"] = markup.IssueNameStyleAlphanumeric
+ case markup.IssueNameStyleRegexp:
+ metas["style"] = markup.IssueNameStyleRegexp
+ metas["regexp"] = unit.ExternalTrackerConfig().ExternalTrackerRegexpPattern
+ default:
+ metas["style"] = markup.IssueNameStyleNumeric
+ }
+ }
+
+ repo.MustOwner(ctx)
+ if repo.Owner.IsOrganization() {
+ teams := make([]string, 0, 5)
+ _ = db.GetEngine(ctx).Table("team_repo").
+ Join("INNER", "team", "team.id = team_repo.team_id").
+ Where("team_repo.repo_id = ?", repo.ID).
+ Select("team.lower_name").
+ OrderBy("team.lower_name").
+ Find(&teams)
+ metas["teams"] = "," + strings.Join(teams, ",") + ","
+ metas["org"] = strings.ToLower(repo.OwnerName)
+ }
+
+ repo.RenderingMetas = metas
+ }
+ return repo.RenderingMetas
+}
+
+// ComposeDocumentMetas composes a map of metas for properly rendering documents
+func (repo *Repository) ComposeDocumentMetas(ctx context.Context) map[string]string {
+ if len(repo.DocumentRenderingMetas) == 0 {
+ metas := map[string]string{}
+ for k, v := range repo.ComposeMetas(ctx) {
+ metas[k] = v
+ }
+ metas["mode"] = "document"
+ repo.DocumentRenderingMetas = metas
+ }
+ return repo.DocumentRenderingMetas
+}
+
+// GetBaseRepo populates repo.BaseRepo for a fork repository and
+// returns an error on failure (NOTE: no error is returned for
+// non-fork repositories, and BaseRepo will be left untouched)
+func (repo *Repository) GetBaseRepo(ctx context.Context) (err error) {
+ if !repo.IsFork {
+ return nil
+ }
+
+ if repo.BaseRepo != nil {
+ return nil
+ }
+ repo.BaseRepo, err = GetRepositoryByID(ctx, repo.ForkID)
+ return err
+}
+
+// IsGenerated returns whether _this_ repository was generated from a template
+func (repo *Repository) IsGenerated() bool {
+ return repo.TemplateID != 0
+}
+
+// RepoPath returns repository path by given user and repository name.
+func RepoPath(userName, repoName string) string { //revive:disable-line:exported
+ return filepath.Join(user_model.UserPath(userName), strings.ToLower(repoName)+".git")
+}
+
+// RepoPath returns the repository path
+func (repo *Repository) RepoPath() string {
+ return RepoPath(repo.OwnerName, repo.Name)
+}
+
+// Link returns the repository relative url
+func (repo *Repository) Link() string {
+ return setting.AppSubURL + "/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
+}
+
+// ComposeCompareURL returns the repository comparison URL
+func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
+ return fmt.Sprintf("%s/%s/compare/%s...%s", url.PathEscape(repo.OwnerName), url.PathEscape(repo.Name), util.PathEscapeSegments(oldCommitID), util.PathEscapeSegments(newCommitID))
+}
+
+func (repo *Repository) ComposeBranchCompareURL(baseRepo *Repository, branchName string) string {
+ if baseRepo == nil {
+ baseRepo = repo
+ }
+ var cmpBranchEscaped string
+ if repo.ID != baseRepo.ID {
+ cmpBranchEscaped = fmt.Sprintf("%s/%s:", url.PathEscape(repo.OwnerName), url.PathEscape(repo.Name))
+ }
+ cmpBranchEscaped = fmt.Sprintf("%s%s", cmpBranchEscaped, util.PathEscapeSegments(branchName))
+ return fmt.Sprintf("%s/compare/%s...%s", baseRepo.Link(), util.PathEscapeSegments(baseRepo.DefaultBranch), cmpBranchEscaped)
+}
+
+// IsOwnedBy returns true when user owns this repository
+func (repo *Repository) IsOwnedBy(userID int64) bool {
+ return repo.OwnerID == userID
+}
+
+// CanCreateBranch returns true if repository meets the requirements for creating new branches.
+func (repo *Repository) CanCreateBranch() bool {
+ return !repo.IsMirror
+}
+
+// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
+func (repo *Repository) CanEnablePulls() bool {
+ return !repo.IsMirror && !repo.IsEmpty
+}
+
+// AllowsPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
+func (repo *Repository) AllowsPulls(ctx context.Context) bool {
+ return repo.CanEnablePulls() && repo.UnitEnabled(ctx, unit.TypePullRequests)
+}
+
+// CanEnableEditor returns true if repository meets the requirements of web editor.
+func (repo *Repository) CanEnableEditor() bool {
+ return !repo.IsMirror
+}
+
+// DescriptionHTML does special handles to description and return HTML string.
+func (repo *Repository) DescriptionHTML(ctx context.Context) template.HTML {
+ desc, err := markup.RenderDescriptionHTML(&markup.RenderContext{
+ Ctx: ctx,
+ // Don't use Metas to speedup requests
+ }, repo.Description)
+ if err != nil {
+ log.Error("Failed to render description for %s (ID: %d): %v", repo.Name, repo.ID, err)
+ return template.HTML(markup.SanitizeDescription(repo.Description))
+ }
+ return template.HTML(markup.SanitizeDescription(desc))
+}
+
+// CloneLink represents different types of clone URLs of repository.
+type CloneLink struct {
+ SSH string
+ HTTPS string
+}
+
+// ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name.
+func ComposeHTTPSCloneURL(owner, repo string) string {
+ return fmt.Sprintf("%s%s/%s.git", setting.AppURL, url.PathEscape(owner), url.PathEscape(repo))
+}
+
+func ComposeSSHCloneURL(ownerName, repoName string) string {
+ sshUser := setting.SSH.User
+ sshDomain := setting.SSH.Domain
+
+ // non-standard port, it must use full URI
+ if setting.SSH.Port != 22 {
+ sshHost := net.JoinHostPort(sshDomain, strconv.Itoa(setting.SSH.Port))
+ return fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, sshHost, url.PathEscape(ownerName), url.PathEscape(repoName))
+ }
+
+ // for standard port, it can use a shorter URI (without the port)
+ sshHost := sshDomain
+ if ip := net.ParseIP(sshHost); ip != nil && ip.To4() == nil {
+ sshHost = "[" + sshHost + "]" // for IPv6 address, wrap it with brackets
+ }
+ if setting.Repository.UseCompatSSHURI {
+ return fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, sshHost, url.PathEscape(ownerName), url.PathEscape(repoName))
+ }
+ return fmt.Sprintf("%s@%s:%s/%s.git", sshUser, sshHost, url.PathEscape(ownerName), url.PathEscape(repoName))
+}
+
+func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
+ repoName := repo.Name
+ if isWiki {
+ repoName += ".wiki"
+ }
+
+ cl := new(CloneLink)
+ cl.SSH = ComposeSSHCloneURL(repo.OwnerName, repoName)
+ cl.HTTPS = ComposeHTTPSCloneURL(repo.OwnerName, repoName)
+ return cl
+}
+
+// CloneLink returns clone URLs of repository.
+func (repo *Repository) CloneLink() (cl *CloneLink) {
+ return repo.cloneLink(false)
+}
+
+// GetOriginalURLHostname returns the hostname of a URL or the URL
+func (repo *Repository) GetOriginalURLHostname() string {
+ u, err := url.Parse(repo.OriginalURL)
+ if err != nil {
+ return repo.OriginalURL
+ }
+
+ return u.Host
+}
+
+// GetTrustModel will get the TrustModel for the repo or the default trust model
+func (repo *Repository) GetTrustModel() TrustModelType {
+ trustModel := repo.TrustModel
+ if trustModel == DefaultTrustModel {
+ trustModel = ToTrustModel(setting.Repository.Signing.DefaultTrustModel)
+ if trustModel == DefaultTrustModel {
+ return CollaboratorTrustModel
+ }
+ }
+ return trustModel
+}
+
+// MustNotBeArchived returns ErrRepoIsArchived if the repo is archived
+func (repo *Repository) MustNotBeArchived() error {
+ if repo.IsArchived {
+ return ErrRepoIsArchived{Repo: repo}
+ }
+ return nil
+}
+
+// __________ .__ __
+// \______ \ ____ ______ ____ _____|__|/ |_ ___________ ___.__.
+// | _// __ \\____ \ / _ \/ ___/ \ __\/ _ \_ __ < | |
+// | | \ ___/| |_> > <_> )___ \| || | ( <_> ) | \/\___ |
+// |____|_ /\___ > __/ \____/____ >__||__| \____/|__| / ____|
+// \/ \/|__| \/ \/
+
+// ErrRepoNotExist represents a "RepoNotExist" kind of error.
+type ErrRepoNotExist struct {
+ ID int64
+ UID int64
+ OwnerName string
+ Name string
+}
+
+// IsErrRepoNotExist checks if an error is a ErrRepoNotExist.
+func IsErrRepoNotExist(err error) bool {
+ _, ok := err.(ErrRepoNotExist)
+ return ok
+}
+
+func (err ErrRepoNotExist) Error() string {
+ return fmt.Sprintf("repository does not exist [id: %d, uid: %d, owner_name: %s, name: %s]",
+ err.ID, err.UID, err.OwnerName, err.Name)
+}
+
+// Unwrap unwraps this error as a ErrNotExist error
+func (err ErrRepoNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// GetRepositoryByOwnerAndName returns the repository by given owner name and repo name
+func GetRepositoryByOwnerAndName(ctx context.Context, ownerName, repoName string) (*Repository, error) {
+ var repo Repository
+ has, err := db.GetEngine(ctx).Table("repository").Select("repository.*").
+ Join("INNER", "`user`", "`user`.id = repository.owner_id").
+ Where("repository.lower_name = ?", strings.ToLower(repoName)).
+ And("`user`.lower_name = ?", strings.ToLower(ownerName)).
+ Get(&repo)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrRepoNotExist{0, 0, ownerName, repoName}
+ }
+ return &repo, nil
+}
+
+// GetRepositoryByName returns the repository by given name under user if exists.
+func GetRepositoryByName(ctx context.Context, ownerID int64, name string) (*Repository, error) {
+ var repo Repository
+ has, err := db.GetEngine(ctx).
+ Where("`owner_id`=?", ownerID).
+ And("`lower_name`=?", strings.ToLower(name)).
+ NoAutoCondition().
+ Get(&repo)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrRepoNotExist{0, ownerID, "", name}
+ }
+ return &repo, err
+}
+
+// getRepositoryURLPathSegments returns segments (owner, reponame) extracted from a url
+func getRepositoryURLPathSegments(repoURL string) []string {
+ if strings.HasPrefix(repoURL, setting.AppURL) {
+ return strings.Split(strings.TrimPrefix(repoURL, setting.AppURL), "/")
+ }
+
+ sshURLVariants := [4]string{
+ setting.SSH.Domain + ":",
+ setting.SSH.User + "@" + setting.SSH.Domain + ":",
+ "git+ssh://" + setting.SSH.Domain + "/",
+ "git+ssh://" + setting.SSH.User + "@" + setting.SSH.Domain + "/",
+ }
+
+ for _, sshURL := range sshURLVariants {
+ if strings.HasPrefix(repoURL, sshURL) {
+ return strings.Split(strings.TrimPrefix(repoURL, sshURL), "/")
+ }
+ }
+
+ return nil
+}
+
+// GetRepositoryByURL returns the repository by given url
+func GetRepositoryByURL(ctx context.Context, repoURL string) (*Repository, error) {
+ // possible urls for git:
+ // https://my.domain/sub-path/<owner>/<repo>.git
+ // https://my.domain/sub-path/<owner>/<repo>
+ // git+ssh://user@my.domain/<owner>/<repo>.git
+ // git+ssh://user@my.domain/<owner>/<repo>
+ // user@my.domain:<owner>/<repo>.git
+ // user@my.domain:<owner>/<repo>
+
+ pathSegments := getRepositoryURLPathSegments(repoURL)
+
+ if len(pathSegments) != 2 {
+ return nil, fmt.Errorf("unknown or malformed repository URL")
+ }
+
+ ownerName := pathSegments[0]
+ repoName := strings.TrimSuffix(pathSegments[1], ".git")
+ return GetRepositoryByOwnerAndName(ctx, ownerName, repoName)
+}
+
+// GetRepositoryByID returns the repository by given id if exists.
+func GetRepositoryByID(ctx context.Context, id int64) (*Repository, error) {
+ repo := new(Repository)
+ has, err := db.GetEngine(ctx).ID(id).Get(repo)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrRepoNotExist{id, 0, "", ""}
+ }
+ return repo, nil
+}
+
+// GetRepositoriesMapByIDs returns the repositories by given id slice.
+func GetRepositoriesMapByIDs(ctx context.Context, ids []int64) (map[int64]*Repository, error) {
+ repos := make(map[int64]*Repository, len(ids))
+ return repos, db.GetEngine(ctx).In("id", ids).Find(&repos)
+}
+
+// IsRepositoryModelOrDirExist returns true if the repository with given name under user has already existed.
+func IsRepositoryModelOrDirExist(ctx context.Context, u *user_model.User, repoName string) (bool, error) {
+ has, err := IsRepositoryModelExist(ctx, u, repoName)
+ if err != nil {
+ return false, err
+ }
+ isDir, err := util.IsDir(RepoPath(u.Name, repoName))
+ return has || isDir, err
+}
+
+func IsRepositoryModelExist(ctx context.Context, u *user_model.User, repoName string) (bool, error) {
+ return db.GetEngine(ctx).Get(&Repository{
+ OwnerID: u.ID,
+ LowerName: strings.ToLower(repoName),
+ })
+}
+
+// GetTemplateRepo populates repo.TemplateRepo for a generated repository and
+// returns an error on failure (NOTE: no error is returned for
+// non-generated repositories, and TemplateRepo will be left untouched)
+func GetTemplateRepo(ctx context.Context, repo *Repository) (*Repository, error) {
+ if !repo.IsGenerated() {
+ return nil, nil
+ }
+
+ return GetRepositoryByID(ctx, repo.TemplateID)
+}
+
+// TemplateRepo returns the repository, which is template of this repository
+func (repo *Repository) TemplateRepo(ctx context.Context) *Repository {
+ repo, err := GetTemplateRepo(ctx, repo)
+ if err != nil {
+ log.Error("TemplateRepo: %v", err)
+ return nil
+ }
+ return repo
+}
+
+type CountRepositoryOptions struct {
+ OwnerID int64
+ Private optional.Option[bool]
+}
+
+// CountRepositories returns number of repositories.
+// Argument private only takes effect when it is false,
+// set it true to count all repositories.
+func CountRepositories(ctx context.Context, opts CountRepositoryOptions) (int64, error) {
+ sess := db.GetEngine(ctx).Where("id > 0")
+
+ if opts.OwnerID > 0 {
+ sess.And("owner_id = ?", opts.OwnerID)
+ }
+ if opts.Private.Has() {
+ sess.And("is_private=?", opts.Private.Value())
+ }
+
+ count, err := sess.Count(new(Repository))
+ if err != nil {
+ return 0, fmt.Errorf("countRepositories: %w", err)
+ }
+ return count, nil
+}
+
+// UpdateRepoIssueNumbers updates one of a repositories amount of (open|closed) (issues|PRs) with the current count
+func UpdateRepoIssueNumbers(ctx context.Context, repoID int64, isPull, isClosed bool) error {
+ field := "num_"
+ if isClosed {
+ field += "closed_"
+ }
+ if isPull {
+ field += "pulls"
+ } else {
+ field += "issues"
+ }
+
+ subQuery := builder.Select("count(*)").
+ From("issue").Where(builder.Eq{
+ "repo_id": repoID,
+ "is_pull": isPull,
+ }.And(builder.If(isClosed, builder.Eq{"is_closed": isClosed})))
+
+ // builder.Update(cond) will generate SQL like UPDATE ... SET cond
+ query := builder.Update(builder.Eq{field: subQuery}).
+ From("repository").
+ Where(builder.Eq{"id": repoID})
+ _, err := db.Exec(ctx, query)
+ return err
+}
+
+// CountNullArchivedRepository counts the number of repositories with is_archived is null
+func CountNullArchivedRepository(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.IsNull{"is_archived"}).Count(new(Repository))
+}
+
+// FixNullArchivedRepository sets is_archived to false where it is null
+func FixNullArchivedRepository(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.IsNull{"is_archived"}).Cols("is_archived").NoAutoTime().Update(&Repository{
+ IsArchived: false,
+ })
+}
+
+// UpdateRepositoryOwnerName updates the owner name of all repositories owned by the user
+func UpdateRepositoryOwnerName(ctx context.Context, oldUserName, newUserName string) error {
+ if _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET owner_name=? WHERE owner_name=?", newUserName, oldUserName); err != nil {
+ return fmt.Errorf("change repo owner name: %w", err)
+ }
+ return nil
+}
diff --git a/models/repo/repo_flags.go b/models/repo/repo_flags.go
new file mode 100644
index 0000000..de76ed2
--- /dev/null
+++ b/models/repo/repo_flags.go
@@ -0,0 +1,102 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+)
+
+// RepoFlag represents a single flag against a repository
+type RepoFlag struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX"`
+ Name string `xorm:"UNIQUE(s) INDEX"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoFlag))
+}
+
+// TableName provides the real table name
+func (RepoFlag) TableName() string {
+ return "forgejo_repo_flag"
+}
+
+// ListFlags returns the array of flags on the repo.
+func (repo *Repository) ListFlags(ctx context.Context) ([]RepoFlag, error) {
+ var flags []RepoFlag
+ err := db.GetEngine(ctx).Table(&RepoFlag{}).Where("repo_id = ?", repo.ID).Find(&flags)
+ if err != nil {
+ return nil, err
+ }
+ return flags, nil
+}
+
+// IsFlagged returns whether a repo has any flags or not
+func (repo *Repository) IsFlagged(ctx context.Context) bool {
+ has, _ := db.Exist[RepoFlag](ctx, builder.Eq{"repo_id": repo.ID})
+ return has
+}
+
+// GetFlag returns a single RepoFlag based on its name
+func (repo *Repository) GetFlag(ctx context.Context, flagName string) (bool, *RepoFlag, error) {
+ flag, has, err := db.Get[RepoFlag](ctx, builder.Eq{"repo_id": repo.ID, "name": flagName})
+ if err != nil {
+ return false, nil, err
+ }
+ return has, flag, nil
+}
+
+// HasFlag returns true if a repo has a given flag, false otherwise
+func (repo *Repository) HasFlag(ctx context.Context, flagName string) bool {
+ has, _ := db.Exist[RepoFlag](ctx, builder.Eq{"repo_id": repo.ID, "name": flagName})
+ return has
+}
+
+// AddFlag adds a new flag to the repo
+func (repo *Repository) AddFlag(ctx context.Context, flagName string) error {
+ return db.Insert(ctx, RepoFlag{
+ RepoID: repo.ID,
+ Name: flagName,
+ })
+}
+
+// DeleteFlag removes a flag from the repo
+func (repo *Repository) DeleteFlag(ctx context.Context, flagName string) (int64, error) {
+ return db.DeleteByBean(ctx, &RepoFlag{RepoID: repo.ID, Name: flagName})
+}
+
+// ReplaceAllFlags replaces all flags of a repo with a new set
+func (repo *Repository) ReplaceAllFlags(ctx context.Context, flagNames []string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := db.DeleteBeans(ctx, &RepoFlag{RepoID: repo.ID}); err != nil {
+ return err
+ }
+
+ if len(flagNames) == 0 {
+ return committer.Commit()
+ }
+
+ var flags []RepoFlag
+ for _, name := range flagNames {
+ flags = append(flags, RepoFlag{
+ RepoID: repo.ID,
+ Name: name,
+ })
+ }
+ if err := db.Insert(ctx, &flags); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
diff --git a/models/repo/repo_flags_test.go b/models/repo/repo_flags_test.go
new file mode 100644
index 0000000..bccefcf
--- /dev/null
+++ b/models/repo/repo_flags_test.go
@@ -0,0 +1,115 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepositoryFlags(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 10})
+
+ // ********************
+ // ** NEGATIVE TESTS **
+ // ********************
+
+ // Unless we add flags, the repo has none
+ flags, err := repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Empty(t, flags)
+
+ // If the repo has no flags, it is not flagged
+ flagged := repo.IsFlagged(db.DefaultContext)
+ assert.False(t, flagged)
+
+ // Trying to find a flag when there is none
+ has := repo.HasFlag(db.DefaultContext, "foo")
+ assert.False(t, has)
+
+ // Trying to retrieve a non-existent flag indicates not found
+ has, _, err = repo.GetFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+ assert.False(t, has)
+
+ // Deleting a non-existent flag fails
+ deleted, err := repo.DeleteFlag(db.DefaultContext, "no-such-flag")
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), deleted)
+
+ // ********************
+ // ** POSITIVE TESTS **
+ // ********************
+
+ // Adding a flag works
+ err = repo.AddFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+
+ // Adding it again fails
+ err = repo.AddFlag(db.DefaultContext, "foo")
+ require.Error(t, err)
+
+ // Listing flags includes the one we added
+ flags, err = repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, flags, 1)
+ assert.Equal(t, "foo", flags[0].Name)
+
+ // With a flag added, the repo is flagged
+ flagged = repo.IsFlagged(db.DefaultContext)
+ assert.True(t, flagged)
+
+ // The flag can be found
+ has = repo.HasFlag(db.DefaultContext, "foo")
+ assert.True(t, has)
+
+ // Added flag can be retrieved
+ _, flag, err := repo.GetFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+ assert.Equal(t, "foo", flag.Name)
+
+ // Deleting a flag works
+ deleted, err = repo.DeleteFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), deleted)
+
+ // The list is now empty
+ flags, err = repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Empty(t, flags)
+
+ // Replacing an empty list works
+ err = repo.ReplaceAllFlags(db.DefaultContext, []string{"bar"})
+ require.NoError(t, err)
+
+ // The repo is now flagged with "bar"
+ has = repo.HasFlag(db.DefaultContext, "bar")
+ assert.True(t, has)
+
+ // Replacing a tag set with another works
+ err = repo.ReplaceAllFlags(db.DefaultContext, []string{"baz", "quux"})
+ require.NoError(t, err)
+
+ // The repo now has two tags
+ flags, err = repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, flags, 2)
+ assert.Equal(t, "baz", flags[0].Name)
+ assert.Equal(t, "quux", flags[1].Name)
+
+ // Replacing flags with an empty set deletes all flags
+ err = repo.ReplaceAllFlags(db.DefaultContext, []string{})
+ require.NoError(t, err)
+
+ // The repo is now unflagged
+ flagged = repo.IsFlagged(db.DefaultContext)
+ assert.False(t, flagged)
+}
diff --git a/models/repo/repo_indexer.go b/models/repo/repo_indexer.go
new file mode 100644
index 0000000..6e19d8f
--- /dev/null
+++ b/models/repo/repo_indexer.go
@@ -0,0 +1,114 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+)
+
+// RepoIndexerType specifies the repository indexer type
+type RepoIndexerType int //revive:disable-line:exported
+
+const (
+ // RepoIndexerTypeCode code indexer
+ RepoIndexerTypeCode RepoIndexerType = iota // 0
+ // RepoIndexerTypeStats repository stats indexer
+ RepoIndexerTypeStats // 1
+)
+
+// RepoIndexerStatus status of a repo's entry in the repo indexer
+// For now, implicitly refers to default branch
+type RepoIndexerStatus struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX(s)"`
+ CommitSha string `xorm:"VARCHAR(64)"`
+ IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoIndexerStatus))
+}
+
+// GetUnindexedRepos returns repos which do not have an indexer status
+func GetUnindexedRepos(ctx context.Context, indexerType RepoIndexerType, maxRepoID int64, page, pageSize int) ([]int64, error) {
+ ids := make([]int64, 0, 50)
+ cond := builder.Cond(builder.IsNull{
+ "repo_indexer_status.id",
+ }).And(builder.Eq{
+ "repository.is_empty": false,
+ })
+ sess := db.GetEngine(ctx).Table("repository").Join("LEFT OUTER", "repo_indexer_status", "repository.id = repo_indexer_status.repo_id AND repo_indexer_status.indexer_type = ?", indexerType)
+ if maxRepoID > 0 {
+ cond = builder.And(cond, builder.Lte{
+ "repository.id": maxRepoID,
+ })
+ }
+ if page >= 0 && pageSize > 0 {
+ start := 0
+ if page > 0 {
+ start = (page - 1) * pageSize
+ }
+ sess.Limit(pageSize, start)
+ }
+
+ sess.Where(cond).Cols("repository.id").Desc("repository.id")
+ err := sess.Find(&ids)
+ return ids, err
+}
+
+// GetIndexerStatus loads repo codes indxer status
+func GetIndexerStatus(ctx context.Context, repo *Repository, indexerType RepoIndexerType) (*RepoIndexerStatus, error) {
+ switch indexerType {
+ case RepoIndexerTypeCode:
+ if repo.CodeIndexerStatus != nil {
+ return repo.CodeIndexerStatus, nil
+ }
+ case RepoIndexerTypeStats:
+ if repo.StatsIndexerStatus != nil {
+ return repo.StatsIndexerStatus, nil
+ }
+ }
+ status := &RepoIndexerStatus{RepoID: repo.ID}
+ if has, err := db.GetEngine(ctx).Where("`indexer_type` = ?", indexerType).Get(status); err != nil {
+ return nil, err
+ } else if !has {
+ status.IndexerType = indexerType
+ status.CommitSha = ""
+ }
+ switch indexerType {
+ case RepoIndexerTypeCode:
+ repo.CodeIndexerStatus = status
+ case RepoIndexerTypeStats:
+ repo.StatsIndexerStatus = status
+ }
+ return status, nil
+}
+
+// UpdateIndexerStatus updates indexer status
+func UpdateIndexerStatus(ctx context.Context, repo *Repository, indexerType RepoIndexerType, sha string) error {
+ status, err := GetIndexerStatus(ctx, repo, indexerType)
+ if err != nil {
+ return fmt.Errorf("UpdateIndexerStatus: Unable to getIndexerStatus for repo: %s Error: %w", repo.FullName(), err)
+ }
+
+ if len(status.CommitSha) == 0 {
+ status.CommitSha = sha
+ if err := db.Insert(ctx, status); err != nil {
+ return fmt.Errorf("UpdateIndexerStatus: Unable to insert repoIndexerStatus for repo: %s Sha: %s Error: %w", repo.FullName(), sha, err)
+ }
+ return nil
+ }
+ status.CommitSha = sha
+ _, err = db.GetEngine(ctx).ID(status.ID).Cols("commit_sha").
+ Update(status)
+ if err != nil {
+ return fmt.Errorf("UpdateIndexerStatus: Unable to update repoIndexerStatus for repo: %s Sha: %s Error: %w", repo.FullName(), sha, err)
+ }
+ return nil
+}
diff --git a/models/repo/repo_list.go b/models/repo/repo_list.go
new file mode 100644
index 0000000..fc51f64
--- /dev/null
+++ b/models/repo/repo_list.go
@@ -0,0 +1,757 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// FindReposMapByIDs find repos as map
+func FindReposMapByIDs(ctx context.Context, repoIDs []int64, res map[int64]*Repository) error {
+ return db.GetEngine(ctx).In("id", repoIDs).Find(&res)
+}
+
+// RepositoryListDefaultPageSize is the default number of repositories
+// to load in memory when running administrative tasks on all (or almost
+// all) of them.
+// The number should be low enough to avoid filling up all RAM with
+// repository data...
+const RepositoryListDefaultPageSize = 64
+
+// RepositoryList contains a list of repositories
+type RepositoryList []*Repository
+
+func (repos RepositoryList) Len() int {
+ return len(repos)
+}
+
+func (repos RepositoryList) Less(i, j int) bool {
+ return repos[i].FullName() < repos[j].FullName()
+}
+
+func (repos RepositoryList) Swap(i, j int) {
+ repos[i], repos[j] = repos[j], repos[i]
+}
+
+// ValuesRepository converts a repository map to a list
+// FIXME: Remove in favor of maps.values when MIN_GO_VERSION >= 1.18
+func ValuesRepository(m map[int64]*Repository) []*Repository {
+ values := make([]*Repository, 0, len(m))
+ for _, v := range m {
+ values = append(values, v)
+ }
+ return values
+}
+
+// RepositoryListOfMap make list from values of map
+func RepositoryListOfMap(repoMap map[int64]*Repository) RepositoryList {
+ return RepositoryList(ValuesRepository(repoMap))
+}
+
+func (repos RepositoryList) LoadUnits(ctx context.Context) error {
+ if len(repos) == 0 {
+ return nil
+ }
+
+ // Load units.
+ units := make([]*RepoUnit, 0, len(repos)*6)
+ if err := db.GetEngine(ctx).
+ In("repo_id", repos.IDs()).
+ Find(&units); err != nil {
+ return fmt.Errorf("find units: %w", err)
+ }
+
+ unitsMap := make(map[int64][]*RepoUnit, len(repos))
+ for _, unit := range units {
+ if !unit.Type.UnitGlobalDisabled() {
+ unitsMap[unit.RepoID] = append(unitsMap[unit.RepoID], unit)
+ }
+ }
+
+ for _, repo := range repos {
+ repo.Units = unitsMap[repo.ID]
+ }
+
+ return nil
+}
+
+func (repos RepositoryList) IDs() []int64 {
+ repoIDs := make([]int64, len(repos))
+ for i := range repos {
+ repoIDs[i] = repos[i].ID
+ }
+ return repoIDs
+}
+
+// LoadAttributes loads the attributes for the given RepositoryList
+func (repos RepositoryList) LoadAttributes(ctx context.Context) error {
+ if len(repos) == 0 {
+ return nil
+ }
+
+ userIDs := container.FilterSlice(repos, func(repo *Repository) (int64, bool) {
+ return repo.OwnerID, true
+ })
+ repoIDs := make([]int64, len(repos))
+ for i := range repos {
+ repoIDs[i] = repos[i].ID
+ }
+
+ // Load owners.
+ users := make(map[int64]*user_model.User, len(userIDs))
+ if err := db.GetEngine(ctx).
+ Where("id > 0").
+ In("id", userIDs).
+ Find(&users); err != nil {
+ return fmt.Errorf("find users: %w", err)
+ }
+ for i := range repos {
+ repos[i].Owner = users[repos[i].OwnerID]
+ }
+
+ // Load primary language.
+ stats := make(LanguageStatList, 0, len(repos))
+ if err := db.GetEngine(ctx).
+ Where("`is_primary` = ? AND `language` != ?", true, "other").
+ In("`repo_id`", repoIDs).
+ Find(&stats); err != nil {
+ return fmt.Errorf("find primary languages: %w", err)
+ }
+ stats.LoadAttributes()
+ for i := range repos {
+ for _, st := range stats {
+ if st.RepoID == repos[i].ID {
+ repos[i].PrimaryLanguage = st
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// SearchRepoOptions holds the search options
+type SearchRepoOptions struct {
+ db.ListOptions
+ Actor *user_model.User
+ Keyword string
+ OwnerID int64
+ PriorityOwnerID int64
+ TeamID int64
+ OrderBy db.SearchOrderBy
+ Private bool // Include private repositories in results
+ StarredByID int64
+ WatchedByID int64
+ AllPublic bool // Include also all public repositories of users and public organisations
+ AllLimited bool // Include also all public repositories of limited organisations
+ // None -> include public and private
+ // True -> include just private
+ // False -> include just public
+ IsPrivate optional.Option[bool]
+ // None -> include collaborative AND non-collaborative
+ // True -> include just collaborative
+ // False -> include just non-collaborative
+ Collaborate optional.Option[bool]
+ // What type of unit the user can be collaborative in,
+ // it is ignored if Collaborate is False.
+ // TypeInvalid means any unit type.
+ UnitType unit.Type
+ // None -> include forks AND non-forks
+ // True -> include just forks
+ // False -> include just non-forks
+ Fork optional.Option[bool]
+ // None -> include templates AND non-templates
+ // True -> include just templates
+ // False -> include just non-templates
+ Template optional.Option[bool]
+ // None -> include mirrors AND non-mirrors
+ // True -> include just mirrors
+ // False -> include just non-mirrors
+ Mirror optional.Option[bool]
+ // None -> include archived AND non-archived
+ // True -> include just archived
+ // False -> include just non-archived
+ Archived optional.Option[bool]
+ // only search topic name
+ TopicOnly bool
+ // only search repositories with specified primary language
+ Language string
+ // include description in keyword search
+ IncludeDescription bool
+ // None -> include has milestones AND has no milestone
+ // True -> include just has milestones
+ // False -> include just has no milestone
+ HasMilestones optional.Option[bool]
+ // LowerNames represents valid lower names to restrict to
+ LowerNames []string
+ // When specified true, apply some filters over the conditions:
+ // - Don't show forks, when opts.Fork is OptionalBoolNone.
+ // - Do not display repositories that don't have a description, an icon and topics.
+ OnlyShowRelevant bool
+}
+
+// UserOwnedRepoCond returns user ownered repositories
+func UserOwnedRepoCond(userID int64) builder.Cond {
+ return builder.Eq{
+ "repository.owner_id": userID,
+ }
+}
+
+// UserAssignedRepoCond return user as assignee repositories list
+func UserAssignedRepoCond(id string, userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{
+ "repository.is_private": false,
+ },
+ builder.In(id,
+ builder.Select("issue.repo_id").From("issue_assignees").
+ InnerJoin("issue", "issue.id = issue_assignees.issue_id").
+ Where(builder.Eq{
+ "issue_assignees.assignee_id": userID,
+ }),
+ ),
+ )
+}
+
+// UserCreateIssueRepoCond return user created issues repositories list
+func UserCreateIssueRepoCond(id string, userID int64, isPull bool) builder.Cond {
+ return builder.And(
+ builder.Eq{
+ "repository.is_private": false,
+ },
+ builder.In(id,
+ builder.Select("issue.repo_id").From("issue").
+ Where(builder.Eq{
+ "issue.poster_id": userID,
+ "issue.is_pull": isPull,
+ }),
+ ),
+ )
+}
+
+// UserMentionedRepoCond return user metinoed repositories list
+func UserMentionedRepoCond(id string, userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{
+ "repository.is_private": false,
+ },
+ builder.In(id,
+ builder.Select("issue.repo_id").From("issue_user").
+ InnerJoin("issue", "issue.id = issue_user.issue_id").
+ Where(builder.Eq{
+ "issue_user.is_mentioned": true,
+ "issue_user.uid": userID,
+ }),
+ ),
+ )
+}
+
+// UserAccessRepoCond returns a condition for selecting all repositories a user has unit independent access to
+func UserAccessRepoCond(idStr string, userID int64) builder.Cond {
+ return builder.In(idStr, builder.Select("repo_id").
+ From("`access`").
+ Where(builder.And(
+ builder.Eq{"`access`.user_id": userID},
+ builder.Gt{"`access`.mode": int(perm.AccessModeNone)},
+ )),
+ )
+}
+
+// userCollaborationRepoCond returns a condition for selecting all repositories a user is collaborator in
+func UserCollaborationRepoCond(idStr string, userID int64) builder.Cond {
+ return builder.In(idStr, builder.Select("repo_id").
+ From("`collaboration`").
+ Where(builder.And(
+ builder.Eq{"`collaboration`.user_id": userID},
+ )),
+ )
+}
+
+// UserOrgTeamRepoCond selects repos that the given user has access to through team membership
+func UserOrgTeamRepoCond(idStr string, userID int64) builder.Cond {
+ return builder.In(idStr, userOrgTeamRepoBuilder(userID))
+}
+
+// userOrgTeamRepoBuilder returns repo ids where user's teams can access.
+func userOrgTeamRepoBuilder(userID int64) *builder.Builder {
+ return builder.Select("`team_repo`.repo_id").
+ From("team_repo").
+ Join("INNER", "team_user", "`team_user`.team_id = `team_repo`.team_id").
+ Where(builder.Eq{"`team_user`.uid": userID})
+}
+
+// userOrgTeamUnitRepoBuilder returns repo ids where user's teams can access the special unit.
+func userOrgTeamUnitRepoBuilder(userID int64, unitType unit.Type) *builder.Builder {
+ return userOrgTeamRepoBuilder(userID).
+ Join("INNER", "team_unit", "`team_unit`.team_id = `team_repo`.team_id").
+ Where(builder.Eq{"`team_unit`.`type`": unitType}).
+ And(builder.Gt{"`team_unit`.`access_mode`": int(perm.AccessModeNone)})
+}
+
+// userOrgTeamUnitRepoCond returns a condition to select repo ids where user's teams can access the special unit.
+func userOrgTeamUnitRepoCond(idStr string, userID int64, unitType unit.Type) builder.Cond {
+ return builder.In(idStr, userOrgTeamUnitRepoBuilder(userID, unitType))
+}
+
+// UserOrgUnitRepoCond selects repos that the given user has access to through org and the special unit
+func UserOrgUnitRepoCond(idStr string, userID, orgID int64, unitType unit.Type) builder.Cond {
+ return builder.In(idStr,
+ userOrgTeamUnitRepoBuilder(userID, unitType).
+ And(builder.Eq{"`team_unit`.org_id": orgID}),
+ )
+}
+
+// userOrgPublicRepoCond returns the condition that one user could access all public repositories in organizations
+func userOrgPublicRepoCond(userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{"`repository`.is_private": false},
+ builder.In("`repository`.owner_id",
+ builder.Select("`org_user`.org_id").
+ From("org_user").
+ Where(builder.Eq{"`org_user`.uid": userID}),
+ ),
+ )
+}
+
+// userOrgPublicRepoCondPrivate returns the condition that one user could access all public repositories in private organizations
+func userOrgPublicRepoCondPrivate(userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{"`repository`.is_private": false},
+ builder.In("`repository`.owner_id",
+ builder.Select("`org_user`.org_id").
+ From("org_user").
+ Join("INNER", "`user`", "`user`.id = `org_user`.org_id").
+ Where(builder.Eq{
+ "`org_user`.uid": userID,
+ "`user`.`type`": user_model.UserTypeOrganization,
+ "`user`.visibility": structs.VisibleTypePrivate,
+ }),
+ ),
+ )
+}
+
+// UserOrgPublicUnitRepoCond returns the condition that one user could access all public repositories in the special organization
+func UserOrgPublicUnitRepoCond(userID, orgID int64) builder.Cond {
+ return userOrgPublicRepoCond(userID).
+ And(builder.Eq{"`repository`.owner_id": orgID})
+}
+
+// SearchRepositoryCondition creates a query condition according search repository options
+func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
+ cond := builder.NewCond()
+
+ if opts.Private {
+ if opts.Actor != nil && !opts.Actor.IsAdmin && opts.Actor.ID != opts.OwnerID {
+ // OK we're in the context of a User
+ cond = cond.And(AccessibleRepositoryCondition(opts.Actor, unit.TypeInvalid))
+ }
+ } else {
+ // Not looking at private organisations and users
+ // We should be able to see all non-private repositories that
+ // isn't in a private or limited organisation.
+ cond = cond.And(
+ builder.Eq{"is_private": false},
+ builder.NotIn("owner_id", builder.Select("id").From("`user`").Where(
+ builder.Or(builder.Eq{"visibility": structs.VisibleTypeLimited}, builder.Eq{"visibility": structs.VisibleTypePrivate}),
+ )))
+ }
+
+ if opts.IsPrivate.Has() {
+ cond = cond.And(builder.Eq{"is_private": opts.IsPrivate.Value()})
+ }
+
+ if opts.Template.Has() {
+ cond = cond.And(builder.Eq{"is_template": opts.Template.Value()})
+ }
+
+ // Restrict to starred repositories
+ if opts.StarredByID > 0 {
+ cond = cond.And(builder.In("id", builder.Select("repo_id").From("star").Where(builder.Eq{"uid": opts.StarredByID})))
+ }
+
+ // Restrict to watched repositories
+ if opts.WatchedByID > 0 {
+ cond = cond.And(builder.In("id", builder.Select("repo_id").From("watch").Where(builder.Eq{"user_id": opts.WatchedByID})))
+ }
+
+ // Restrict repositories to those the OwnerID owns or contributes to as per opts.Collaborate
+ if opts.OwnerID > 0 {
+ accessCond := builder.NewCond()
+ if !opts.Collaborate.Value() {
+ accessCond = builder.Eq{"owner_id": opts.OwnerID}
+ }
+
+ if opts.Collaborate.ValueOrDefault(true) {
+ // A Collaboration is:
+
+ collaborateCond := builder.NewCond()
+ // 1. Repository we don't own
+ collaborateCond = collaborateCond.And(builder.Neq{"owner_id": opts.OwnerID})
+ // 2. But we can see because of:
+ {
+ userAccessCond := builder.NewCond()
+ // A. We have unit independent access
+ userAccessCond = userAccessCond.Or(UserAccessRepoCond("`repository`.id", opts.OwnerID))
+ // B. We are in a team for
+ if opts.UnitType == unit.TypeInvalid {
+ userAccessCond = userAccessCond.Or(UserOrgTeamRepoCond("`repository`.id", opts.OwnerID))
+ } else {
+ userAccessCond = userAccessCond.Or(userOrgTeamUnitRepoCond("`repository`.id", opts.OwnerID, opts.UnitType))
+ }
+ // C. Public repositories in organizations that we are member of
+ userAccessCond = userAccessCond.Or(userOrgPublicRepoCondPrivate(opts.OwnerID))
+ collaborateCond = collaborateCond.And(userAccessCond)
+ }
+ if !opts.Private {
+ collaborateCond = collaborateCond.And(builder.Expr("owner_id NOT IN (SELECT org_id FROM org_user WHERE org_user.uid = ? AND org_user.is_public = ?)", opts.OwnerID, false))
+ }
+
+ accessCond = accessCond.Or(collaborateCond)
+ }
+
+ if opts.AllPublic {
+ accessCond = accessCond.Or(builder.Eq{"is_private": false}.And(builder.In("owner_id", builder.Select("`user`.id").From("`user`").Where(builder.Eq{"`user`.visibility": structs.VisibleTypePublic}))))
+ }
+
+ if opts.AllLimited {
+ accessCond = accessCond.Or(builder.Eq{"is_private": false}.And(builder.In("owner_id", builder.Select("`user`.id").From("`user`").Where(builder.Eq{"`user`.visibility": structs.VisibleTypeLimited}))))
+ }
+
+ cond = cond.And(accessCond)
+ }
+
+ if opts.TeamID > 0 {
+ cond = cond.And(builder.In("`repository`.id", builder.Select("`team_repo`.repo_id").From("team_repo").Where(builder.Eq{"`team_repo`.team_id": opts.TeamID})))
+ }
+
+ if opts.Keyword != "" {
+ // separate keyword
+ subQueryCond := builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+ if opts.TopicOnly {
+ subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
+ } else {
+ subQueryCond = subQueryCond.Or(builder.Like{"topic.name", strings.ToLower(v)})
+ }
+ }
+ subQuery := builder.Select("repo_topic.repo_id").From("repo_topic").
+ Join("INNER", "topic", "topic.id = repo_topic.topic_id").
+ Where(subQueryCond).
+ GroupBy("repo_topic.repo_id")
+
+ keywordCond := builder.In("id", subQuery)
+ if !opts.TopicOnly {
+ likes := builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+ likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
+
+ // If the string looks like "org/repo", match against that pattern too
+ if opts.TeamID == 0 && strings.Count(opts.Keyword, "/") == 1 {
+ pieces := strings.Split(opts.Keyword, "/")
+ ownerName := pieces[0]
+ repoName := pieces[1]
+ likes = likes.Or(builder.And(builder.Like{"owner_name", strings.ToLower(ownerName)}, builder.Like{"lower_name", strings.ToLower(repoName)}))
+ }
+
+ if opts.IncludeDescription {
+ likes = likes.Or(builder.Like{"LOWER(description)", strings.ToLower(v)})
+ }
+ }
+ keywordCond = keywordCond.Or(likes)
+ }
+ cond = cond.And(keywordCond)
+ }
+
+ if opts.Language != "" {
+ cond = cond.And(builder.In("id", builder.
+ Select("repo_id").
+ From("language_stat").
+ Where(builder.Eq{"language": opts.Language}).And(builder.Eq{"is_primary": true})))
+ }
+
+ if opts.Fork.Has() || opts.OnlyShowRelevant {
+ if opts.OnlyShowRelevant && !opts.Fork.Has() {
+ cond = cond.And(builder.Eq{"is_fork": false})
+ } else {
+ cond = cond.And(builder.Eq{"is_fork": opts.Fork.Value()})
+ }
+ }
+
+ if opts.Mirror.Has() {
+ cond = cond.And(builder.Eq{"is_mirror": opts.Mirror.Value()})
+ }
+
+ if opts.Actor != nil && opts.Actor.IsRestricted {
+ cond = cond.And(AccessibleRepositoryCondition(opts.Actor, unit.TypeInvalid))
+ }
+
+ if opts.Archived.Has() {
+ cond = cond.And(builder.Eq{"is_archived": opts.Archived.Value()})
+ }
+
+ if opts.HasMilestones.Has() {
+ if opts.HasMilestones.Value() {
+ cond = cond.And(builder.Gt{"num_milestones": 0})
+ } else {
+ cond = cond.And(builder.Eq{"num_milestones": 0}.Or(builder.IsNull{"num_milestones"}))
+ }
+ }
+
+ if opts.OnlyShowRelevant {
+ // Only show a repo that has at least a topic, an icon, or a description
+ subQueryCond := builder.NewCond()
+
+ // Topic checking. Topics are present.
+ if setting.Database.Type.IsPostgreSQL() { // postgres stores the topics as json and not as text
+ subQueryCond = subQueryCond.Or(builder.And(builder.NotNull{"topics"}, builder.Neq{"(topics)::text": "[]"}))
+ } else {
+ subQueryCond = subQueryCond.Or(builder.And(builder.Neq{"topics": "null"}, builder.Neq{"topics": "[]"}))
+ }
+
+ // Description checking. Description not empty
+ subQueryCond = subQueryCond.Or(builder.Neq{"description": ""})
+
+ // Repo has a avatar
+ subQueryCond = subQueryCond.Or(builder.Neq{"avatar": ""})
+
+ // Always hide repo's that are empty
+ subQueryCond = subQueryCond.And(builder.Eq{"is_empty": false})
+
+ cond = cond.And(subQueryCond)
+ }
+
+ return cond
+}
+
+// SearchRepository returns repositories based on search options,
+// it returns results in given range and number of total results.
+func SearchRepository(ctx context.Context, opts *SearchRepoOptions) (RepositoryList, int64, error) {
+ cond := SearchRepositoryCondition(opts)
+ return SearchRepositoryByCondition(ctx, opts, cond, true)
+}
+
+// CountRepository counts repositories based on search options,
+func CountRepository(ctx context.Context, opts *SearchRepoOptions) (int64, error) {
+ return db.GetEngine(ctx).Where(SearchRepositoryCondition(opts)).Count(new(Repository))
+}
+
+// SearchRepositoryByCondition search repositories by condition
+func SearchRepositoryByCondition(ctx context.Context, opts *SearchRepoOptions, cond builder.Cond, loadAttributes bool) (RepositoryList, int64, error) {
+ sess, count, err := searchRepositoryByCondition(ctx, opts, cond)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ defaultSize := 50
+ if opts.PageSize > 0 {
+ defaultSize = opts.PageSize
+ }
+ repos := make(RepositoryList, 0, defaultSize)
+ if err := sess.Find(&repos); err != nil {
+ return nil, 0, fmt.Errorf("Repo: %w", err)
+ }
+
+ if opts.PageSize <= 0 {
+ count = int64(len(repos))
+ }
+
+ if loadAttributes {
+ if err := repos.LoadAttributes(ctx); err != nil {
+ return nil, 0, fmt.Errorf("LoadAttributes: %w", err)
+ }
+ }
+
+ return repos, count, nil
+}
+
+func searchRepositoryByCondition(ctx context.Context, opts *SearchRepoOptions, cond builder.Cond) (db.Engine, int64, error) {
+ if opts.Page <= 0 {
+ opts.Page = 1
+ }
+
+ if len(opts.OrderBy) == 0 {
+ opts.OrderBy = db.SearchOrderByAlphabetically
+ }
+
+ args := make([]any, 0)
+ if opts.PriorityOwnerID > 0 {
+ opts.OrderBy = db.SearchOrderBy(fmt.Sprintf("CASE WHEN owner_id = ? THEN 0 ELSE owner_id END, %s", opts.OrderBy))
+ args = append(args, opts.PriorityOwnerID)
+ } else if strings.Count(opts.Keyword, "/") == 1 {
+ // With "owner/repo" search times, prioritise results which match the owner field
+ orgName := strings.Split(opts.Keyword, "/")[0]
+ opts.OrderBy = db.SearchOrderBy(fmt.Sprintf("CASE WHEN owner_name LIKE ? THEN 0 ELSE 1 END, %s", opts.OrderBy))
+ args = append(args, orgName)
+ }
+
+ sess := db.GetEngine(ctx)
+
+ var count int64
+ if opts.PageSize > 0 {
+ var err error
+ count, err = sess.
+ Where(cond).
+ Count(new(Repository))
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %w", err)
+ }
+ }
+
+ sess = sess.Where(cond).OrderBy(opts.OrderBy.String(), args...)
+ if opts.PageSize > 0 {
+ sess = sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ return sess, count, nil
+}
+
+// SearchRepositoryIDsByCondition search repository IDs by given condition.
+func SearchRepositoryIDsByCondition(ctx context.Context, cond builder.Cond) ([]int64, error) {
+ repoIDs := make([]int64, 0, 10)
+ return repoIDs, db.GetEngine(ctx).
+ Table("repository").
+ Cols("id").
+ Where(cond).
+ Find(&repoIDs)
+}
+
+// AccessibleRepositoryCondition takes a user a returns a condition for checking if a repository is accessible
+func AccessibleRepositoryCondition(user *user_model.User, unitType unit.Type) builder.Cond {
+ cond := builder.NewCond()
+
+ if user == nil || !user.IsRestricted || user.ID <= 0 {
+ orgVisibilityLimit := []structs.VisibleType{structs.VisibleTypePrivate}
+ if user == nil || user.ID <= 0 {
+ orgVisibilityLimit = append(orgVisibilityLimit, structs.VisibleTypeLimited)
+ }
+ // 1. Be able to see all non-private repositories that either:
+ cond = cond.Or(builder.And(
+ builder.Eq{"`repository`.is_private": false},
+ // 2. Aren't in an private organisation/user or limited organisation/user if the doer is not logged in.
+ builder.NotIn("`repository`.owner_id", builder.Select("id").From("`user`").Where(
+ builder.In("visibility", orgVisibilityLimit)))))
+ }
+
+ if user != nil {
+ // 2. Be able to see all repositories that we have unit independent access to
+ // 3. Be able to see all repositories through team membership(s)
+ if unitType == unit.TypeInvalid {
+ // Regardless of UnitType
+ cond = cond.Or(
+ UserAccessRepoCond("`repository`.id", user.ID),
+ UserOrgTeamRepoCond("`repository`.id", user.ID),
+ )
+ } else {
+ // For a specific UnitType
+ cond = cond.Or(
+ UserCollaborationRepoCond("`repository`.id", user.ID),
+ userOrgTeamUnitRepoCond("`repository`.id", user.ID, unitType),
+ )
+ }
+ // 4. Repositories that we directly own
+ cond = cond.Or(builder.Eq{"`repository`.owner_id": user.ID})
+ if !user.IsRestricted {
+ // 5. Be able to see all public repos in private organizations that we are an org_user of
+ cond = cond.Or(userOrgPublicRepoCond(user.ID))
+ }
+ }
+
+ return cond
+}
+
+// SearchRepositoryByName takes keyword and part of repository name to search,
+// it returns results in given range and number of total results.
+func SearchRepositoryByName(ctx context.Context, opts *SearchRepoOptions) (RepositoryList, int64, error) {
+ opts.IncludeDescription = false
+ return SearchRepository(ctx, opts)
+}
+
+// SearchRepositoryIDs takes keyword and part of repository name to search,
+// it returns results in given range and number of total results.
+func SearchRepositoryIDs(ctx context.Context, opts *SearchRepoOptions) ([]int64, int64, error) {
+ opts.IncludeDescription = false
+
+ cond := SearchRepositoryCondition(opts)
+
+ sess, count, err := searchRepositoryByCondition(ctx, opts, cond)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ defaultSize := 50
+ if opts.PageSize > 0 {
+ defaultSize = opts.PageSize
+ }
+
+ ids := make([]int64, 0, defaultSize)
+ err = sess.Select("id").Table("repository").Find(&ids)
+ if opts.PageSize <= 0 {
+ count = int64(len(ids))
+ }
+
+ return ids, count, err
+}
+
+// AccessibleRepoIDsQuery queries accessible repository ids. Usable as a subquery wherever repo ids need to be filtered.
+func AccessibleRepoIDsQuery(user *user_model.User) *builder.Builder {
+ // NB: Please note this code needs to still work if user is nil
+ return builder.Select("id").From("repository").Where(AccessibleRepositoryCondition(user, unit.TypeInvalid))
+}
+
+// FindUserCodeAccessibleRepoIDs finds all at Code level accessible repositories' ID by the user's id
+func FindUserCodeAccessibleRepoIDs(ctx context.Context, user *user_model.User) ([]int64, error) {
+ return SearchRepositoryIDsByCondition(ctx, AccessibleRepositoryCondition(user, unit.TypeCode))
+}
+
+// FindUserCodeAccessibleOwnerRepoIDs finds all repository IDs for the given owner whose code the user can see.
+func FindUserCodeAccessibleOwnerRepoIDs(ctx context.Context, ownerID int64, user *user_model.User) ([]int64, error) {
+ return SearchRepositoryIDsByCondition(ctx, builder.NewCond().And(
+ builder.Eq{"owner_id": ownerID},
+ AccessibleRepositoryCondition(user, unit.TypeCode),
+ ))
+}
+
+// GetUserRepositories returns a list of repositories of given user.
+func GetUserRepositories(ctx context.Context, opts *SearchRepoOptions) (RepositoryList, int64, error) {
+ if len(opts.OrderBy) == 0 {
+ opts.OrderBy = "updated_unix DESC"
+ }
+
+ cond := builder.NewCond()
+ if opts.Actor == nil {
+ return nil, 0, util.NewInvalidArgumentErrorf("GetUserRepositories: Actor is needed but not given")
+ }
+ cond = cond.And(builder.Eq{"owner_id": opts.Actor.ID})
+ if !opts.Private {
+ cond = cond.And(builder.Eq{"is_private": false})
+ }
+
+ if len(opts.LowerNames) > 0 {
+ cond = cond.And(builder.In("lower_name", opts.LowerNames))
+ }
+
+ sess := db.GetEngine(ctx)
+
+ count, err := sess.Where(cond).Count(new(Repository))
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %w", err)
+ }
+
+ sess = sess.Where(cond).OrderBy(opts.OrderBy.String())
+ repos := make(RepositoryList, 0, opts.PageSize)
+ return repos, count, db.SetSessionPagination(sess, opts).Find(&repos)
+}
diff --git a/models/repo/repo_list_test.go b/models/repo/repo_list_test.go
new file mode 100644
index 0000000..8c13f38
--- /dev/null
+++ b/models/repo/repo_list_test.go
@@ -0,0 +1,450 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "path/filepath"
+ "slices"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func getTestCases() []struct {
+ name string
+ opts *repo_model.SearchRepoOptions
+ count int
+} {
+ testCases := []struct {
+ name string
+ opts *repo_model.SearchRepoOptions
+ count int
+ }{
+ {
+ name: "PublicRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{PageSize: 10}, Collaborate: optional.Some(false)},
+ count: 7,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFirstPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 1, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitSecondPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 2, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitThirdPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFourthPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicRepositoriesOfUser",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Collaborate: optional.Some(false)},
+ count: 2,
+ },
+ {
+ name: "PublicRepositoriesOfUser2",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Collaborate: optional.Some(false)},
+ count: 0,
+ },
+ {
+ name: "PublicRepositoriesOfOrg3",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Collaborate: optional.Some(false)},
+ count: 2,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, Collaborate: optional.Some(false)},
+ count: 4,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser2",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, Collaborate: optional.Some(false)},
+ count: 0,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrg3",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true, Collaborate: optional.Some(false)},
+ count: 4,
+ },
+ {
+ name: "PublicRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15},
+ count: 5,
+ },
+ {
+ name: "PublicRepositoriesOfUser2IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18},
+ count: 1,
+ },
+ {
+ name: "PublicRepositoriesOfOrg3IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20},
+ count: 3,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true},
+ count: 9,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser2IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true},
+ count: 4,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrg3IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true},
+ count: 7,
+ },
+ {
+ name: "PublicRepositoriesOfOrganization",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Collaborate: optional.Some(false)},
+ count: 1,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrganization",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Private: true, Collaborate: optional.Some(false)},
+ count: 2,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{PageSize: 10}, AllPublic: true, Collaborate: optional.Some(false)},
+ count: 7,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, AllPublic: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, AllPublic: true, Template: optional.Some(false)},
+ count: 35,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true, AllLimited: true, Template: optional.Some(false)},
+ count: 40,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "test", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true},
+ count: 16,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUser2IncludingCollaborativeByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "test", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, AllPublic: true},
+ count: 14,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesOfOrganization",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, AllPublic: true, Collaborate: optional.Some(false), Template: optional.Some(false)},
+ count: 35,
+ },
+ {
+ name: "AllTemplates",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Template: optional.Some(true)},
+ count: 2,
+ },
+ {
+ name: "OwnerSlashRepoSearch",
+ opts: &repo_model.SearchRepoOptions{Keyword: "user/repo2", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, OwnerID: 0},
+ count: 2,
+ },
+ {
+ name: "OwnerSlashSearch",
+ opts: &repo_model.SearchRepoOptions{Keyword: "user20/", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, OwnerID: 0},
+ count: 4,
+ },
+ }
+
+ return testCases
+}
+
+func TestSearchRepository(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // test search public repository on explore page
+ repos, count, err := repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "repo_12",
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ if assert.Len(t, repos, 1) {
+ assert.Equal(t, "test_repo_12", repos[0].Name)
+ }
+ assert.Equal(t, int64(1), count)
+
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "test_repo",
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), count)
+ assert.Len(t, repos, 2)
+
+ // test search private repository on explore page
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "repo_13",
+ Private: true,
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ if assert.Len(t, repos, 1) {
+ assert.Equal(t, "test_repo_13", repos[0].Name)
+ }
+ assert.Equal(t, int64(1), count)
+
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "test_repo",
+ Private: true,
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(3), count)
+ assert.Len(t, repos, 3)
+
+ // Test non existing owner
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{OwnerID: unittest.NonexistentID})
+
+ require.NoError(t, err)
+ assert.Empty(t, repos)
+ assert.Equal(t, int64(0), count)
+
+ // Test search within description
+ repos, count, err = repo_model.SearchRepository(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "description_14",
+ Collaborate: optional.Some(false),
+ IncludeDescription: true,
+ })
+
+ require.NoError(t, err)
+ if assert.Len(t, repos, 1) {
+ assert.Equal(t, "test_repo_14", repos[0].Name)
+ }
+ assert.Equal(t, int64(1), count)
+
+ // Test NOT search within description
+ repos, count, err = repo_model.SearchRepository(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "description_14",
+ Collaborate: optional.Some(false),
+ IncludeDescription: false,
+ })
+
+ require.NoError(t, err)
+ assert.Empty(t, repos)
+ assert.Equal(t, int64(0), count)
+
+ testCases := getTestCases()
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ repos, count, err := repo_model.SearchRepositoryByName(db.DefaultContext, testCase.opts)
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(testCase.count), count)
+
+ page := testCase.opts.Page
+ if page <= 0 {
+ page = 1
+ }
+ expectedLen := testCase.opts.PageSize
+ if testCase.opts.PageSize*page > testCase.count+testCase.opts.PageSize {
+ expectedLen = 0
+ } else if testCase.opts.PageSize*page > testCase.count {
+ expectedLen = testCase.count % testCase.opts.PageSize
+ }
+ if assert.Len(t, repos, expectedLen) {
+ for _, repo := range repos {
+ assert.NotEmpty(t, repo.Name)
+
+ if len(testCase.opts.Keyword) > 0 {
+ // Keyword match condition is different for search terms of form "owner/repo"
+ if strings.Count(testCase.opts.Keyword, "/") == 1 {
+ // May still match as a whole...
+ wholeMatch := strings.Contains(repo.Name, testCase.opts.Keyword)
+
+ pieces := strings.Split(testCase.opts.Keyword, "/")
+ ownerName := pieces[0]
+ repoName := pieces[1]
+ // ... or match in parts
+ splitMatch := strings.Contains(repo.OwnerName, ownerName) && strings.Contains(repo.Name, repoName)
+
+ assert.True(t, wholeMatch || splitMatch, "Keyword '%s' does not match repo '%s/%s'", testCase.opts.Keyword, repo.Owner.Name, repo.Name)
+ } else {
+ assert.Contains(t, repo.Name, testCase.opts.Keyword)
+ }
+ }
+
+ if !testCase.opts.Private {
+ assert.False(t, repo.IsPrivate)
+ }
+
+ if testCase.opts.Fork.Value() && testCase.opts.Mirror.Value() {
+ assert.True(t, repo.IsFork && repo.IsMirror)
+ } else {
+ if testCase.opts.Fork.Has() {
+ assert.Equal(t, testCase.opts.Fork.Value(), repo.IsFork)
+ }
+
+ if testCase.opts.Mirror.Has() {
+ assert.Equal(t, testCase.opts.Mirror.Value(), repo.IsMirror)
+ }
+ }
+
+ if testCase.opts.OwnerID > 0 && !testCase.opts.AllPublic {
+ if testCase.opts.Collaborate.Has() {
+ if testCase.opts.Collaborate.Value() {
+ assert.NotEqual(t, testCase.opts.OwnerID, repo.Owner.ID)
+ } else {
+ assert.Equal(t, testCase.opts.OwnerID, repo.Owner.ID)
+ }
+ }
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestCountRepository(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testCases := getTestCases()
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ count, err := repo_model.CountRepository(db.DefaultContext, testCase.opts)
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(testCase.count), count)
+ })
+ }
+}
+
+func TestSearchRepositoryByTopicName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testCases := []struct {
+ name string
+ opts *repo_model.SearchRepoOptions
+ count int
+ }{
+ {
+ name: "AllPublic/SearchPublicRepositoriesFromTopicAndName",
+ opts: &repo_model.SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql"},
+ count: 2,
+ },
+ {
+ name: "AllPublic/OnlySearchPublicRepositoriesFromTopic",
+ opts: &repo_model.SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql", TopicOnly: true},
+ count: 1,
+ },
+ {
+ name: "AllPublic/OnlySearchMultipleKeywordPublicRepositoriesFromTopic",
+ opts: &repo_model.SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql,golang", TopicOnly: true},
+ count: 2,
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ _, count, err := repo_model.SearchRepositoryByName(db.DefaultContext, testCase.opts)
+ require.NoError(t, err)
+ assert.Equal(t, int64(testCase.count), count)
+ })
+ }
+}
+
+func TestSearchRepositoryIDsByCondition(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/repo/TestSearchRepositoryIDsByCondition/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+ // Sanity check of the database
+ limitedUser := unittest.AssertExistsAndLoadBean(t, &user.User{ID: 33, Visibility: structs.VisibleTypeLimited})
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1001, OwnerID: limitedUser.ID})
+
+ testCases := []struct {
+ user *user.User
+ repoIDs []int64
+ }{
+ {
+ user: nil,
+ repoIDs: []int64{1, 4, 8, 9, 10, 11, 12, 14, 17, 18, 21, 23, 25, 27, 29, 32, 33, 34, 35, 36, 37, 42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 57, 58, 60, 61, 62, 1059},
+ },
+ {
+ user: unittest.AssertExistsAndLoadBean(t, &user.User{ID: 4}),
+ repoIDs: []int64{1, 3, 4, 8, 9, 10, 11, 12, 14, 17, 18, 21, 23, 25, 27, 29, 32, 33, 34, 35, 36, 37, 38, 40, 42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 57, 58, 60, 61, 62, 1001, 1059},
+ },
+ {
+ user: unittest.AssertExistsAndLoadBean(t, &user.User{ID: 5}),
+ repoIDs: []int64{1, 4, 8, 9, 10, 11, 12, 14, 17, 18, 21, 23, 25, 27, 29, 32, 33, 34, 35, 36, 37, 38, 40, 42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 57, 58, 60, 61, 62, 1001, 1059},
+ },
+ }
+
+ for _, testCase := range testCases {
+ repoIDs, err := repo_model.FindUserCodeAccessibleRepoIDs(db.DefaultContext, testCase.user)
+ require.NoError(t, err)
+
+ slices.Sort(repoIDs)
+ assert.EqualValues(t, testCase.repoIDs, repoIDs)
+ }
+}
diff --git a/models/repo/repo_repository.go b/models/repo/repo_repository.go
new file mode 100644
index 0000000..6780165
--- /dev/null
+++ b/models/repo/repo_repository.go
@@ -0,0 +1,60 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func init() {
+ db.RegisterModel(new(FollowingRepo))
+}
+
+func FindFollowingReposByRepoID(ctx context.Context, repoID int64) ([]*FollowingRepo, error) {
+ maxFollowingRepos := 10
+ sess := db.GetEngine(ctx).Where("repo_id=?", repoID)
+ sess = sess.Limit(maxFollowingRepos, 0)
+ followingRepoList := make([]*FollowingRepo, 0, maxFollowingRepos)
+ err := sess.Find(&followingRepoList)
+ if err != nil {
+ return make([]*FollowingRepo, 0, maxFollowingRepos), err
+ }
+ for _, followingRepo := range followingRepoList {
+ if res, err := validation.IsValid(*followingRepo); !res {
+ return make([]*FollowingRepo, 0, maxFollowingRepos), err
+ }
+ }
+ return followingRepoList, nil
+}
+
+func StoreFollowingRepos(ctx context.Context, localRepoID int64, followingRepoList []*FollowingRepo) error {
+ for _, followingRepo := range followingRepoList {
+ if res, err := validation.IsValid(*followingRepo); !res {
+ return err
+ }
+ }
+
+ // Begin transaction
+ ctx, committer, err := db.TxContext((ctx))
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ _, err = db.GetEngine(ctx).Where("repo_id=?", localRepoID).Delete(FollowingRepo{})
+ if err != nil {
+ return err
+ }
+ for _, followingRepo := range followingRepoList {
+ _, err = db.GetEngine(ctx).Insert(followingRepo)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Commit transaction
+ return committer.Commit()
+}
diff --git a/models/repo/repo_test.go b/models/repo/repo_test.go
new file mode 100644
index 0000000..56b8479
--- /dev/null
+++ b/models/repo/repo_test.go
@@ -0,0 +1,230 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ countRepospts = repo_model.CountRepositoryOptions{OwnerID: 10}
+ countReposptsPublic = repo_model.CountRepositoryOptions{OwnerID: 10, Private: optional.Some(false)}
+ countReposptsPrivate = repo_model.CountRepositoryOptions{OwnerID: 10, Private: optional.Some(true)}
+)
+
+func TestGetRepositoryCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ ctx := db.DefaultContext
+ count, err1 := repo_model.CountRepositories(ctx, countRepospts)
+ privateCount, err2 := repo_model.CountRepositories(ctx, countReposptsPrivate)
+ publicCount, err3 := repo_model.CountRepositories(ctx, countReposptsPublic)
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ require.NoError(t, err3)
+ assert.Equal(t, int64(3), count)
+ assert.Equal(t, privateCount+publicCount, count)
+}
+
+func TestGetPublicRepositoryCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := repo_model.CountRepositories(db.DefaultContext, countReposptsPublic)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), count)
+}
+
+func TestGetPrivateRepositoryCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := repo_model.CountRepositories(db.DefaultContext, countReposptsPrivate)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), count)
+}
+
+func TestRepoAPIURL(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 10})
+
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user12/repo10", repo.APIURL())
+}
+
+func TestWatchRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ const repoID = 3
+ const userID = 2
+
+ require.NoError(t, repo_model.WatchRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{RepoID: repoID, UserID: userID})
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repoID})
+
+ require.NoError(t, repo_model.WatchRepo(db.DefaultContext, userID, repoID, false))
+ unittest.AssertNotExistsBean(t, &repo_model.Watch{RepoID: repoID, UserID: userID})
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repoID})
+}
+
+func TestMetas(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := &repo_model.Repository{Name: "testRepo"}
+ repo.Owner = &user_model.User{Name: "testOwner"}
+ repo.OwnerName = repo.Owner.Name
+
+ repo.Units = nil
+
+ metas := repo.ComposeMetas(db.DefaultContext)
+ assert.Equal(t, "testRepo", metas["repo"])
+ assert.Equal(t, "testOwner", metas["user"])
+
+ externalTracker := repo_model.RepoUnit{
+ Type: unit.TypeExternalTracker,
+ Config: &repo_model.ExternalTrackerConfig{
+ ExternalTrackerFormat: "https://someurl.com/{user}/{repo}/{issue}",
+ },
+ }
+
+ testSuccess := func(expectedStyle string) {
+ repo.Units = []*repo_model.RepoUnit{&externalTracker}
+ repo.RenderingMetas = nil
+ metas := repo.ComposeMetas(db.DefaultContext)
+ assert.Equal(t, expectedStyle, metas["style"])
+ assert.Equal(t, "testRepo", metas["repo"])
+ assert.Equal(t, "testOwner", metas["user"])
+ assert.Equal(t, "https://someurl.com/{user}/{repo}/{issue}", metas["format"])
+ }
+
+ testSuccess(markup.IssueNameStyleNumeric)
+
+ externalTracker.ExternalTrackerConfig().ExternalTrackerStyle = markup.IssueNameStyleAlphanumeric
+ testSuccess(markup.IssueNameStyleAlphanumeric)
+
+ externalTracker.ExternalTrackerConfig().ExternalTrackerStyle = markup.IssueNameStyleNumeric
+ testSuccess(markup.IssueNameStyleNumeric)
+
+ externalTracker.ExternalTrackerConfig().ExternalTrackerStyle = markup.IssueNameStyleRegexp
+ testSuccess(markup.IssueNameStyleRegexp)
+
+ repo, err := repo_model.GetRepositoryByID(db.DefaultContext, 3)
+ require.NoError(t, err)
+
+ metas = repo.ComposeMetas(db.DefaultContext)
+ assert.Contains(t, metas, "org")
+ assert.Contains(t, metas, "teams")
+ assert.Equal(t, "org3", metas["org"])
+ assert.Equal(t, ",owners,team1,", metas["teams"])
+}
+
+func TestGetRepositoryByURL(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ t.Run("InvalidPath", func(t *testing.T) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, "something")
+
+ assert.Nil(t, repo)
+ require.Error(t, err)
+ })
+
+ t.Run("ValidHttpURL", func(t *testing.T) {
+ test := func(t *testing.T, url string) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, url)
+
+ assert.NotNil(t, repo)
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), repo.ID)
+ assert.Equal(t, int64(2), repo.OwnerID)
+ }
+
+ test(t, "https://try.gitea.io/user2/repo2")
+ test(t, "https://try.gitea.io/user2/repo2.git")
+ })
+
+ t.Run("ValidGitSshURL", func(t *testing.T) {
+ test := func(t *testing.T, url string) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, url)
+
+ assert.NotNil(t, repo)
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), repo.ID)
+ assert.Equal(t, int64(2), repo.OwnerID)
+ }
+
+ test(t, "git+ssh://sshuser@try.gitea.io/user2/repo2")
+ test(t, "git+ssh://sshuser@try.gitea.io/user2/repo2.git")
+
+ test(t, "git+ssh://try.gitea.io/user2/repo2")
+ test(t, "git+ssh://try.gitea.io/user2/repo2.git")
+ })
+
+ t.Run("ValidImplicitSshURL", func(t *testing.T) {
+ test := func(t *testing.T, url string) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, url)
+
+ assert.NotNil(t, repo)
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), repo.ID)
+ assert.Equal(t, int64(2), repo.OwnerID)
+ }
+
+ test(t, "sshuser@try.gitea.io:user2/repo2")
+ test(t, "sshuser@try.gitea.io:user2/repo2.git")
+
+ test(t, "try.gitea.io:user2/repo2")
+ test(t, "try.gitea.io:user2/repo2.git")
+ })
+}
+
+func TestComposeSSHCloneURL(t *testing.T) {
+ defer test.MockVariableValue(&setting.SSH, setting.SSH)()
+ defer test.MockVariableValue(&setting.Repository, setting.Repository)()
+
+ setting.SSH.User = "git"
+
+ // test SSH_DOMAIN
+ setting.SSH.Domain = "domain"
+ setting.SSH.Port = 22
+ setting.Repository.UseCompatSSHURI = false
+ assert.Equal(t, "git@domain:user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ setting.Repository.UseCompatSSHURI = true
+ assert.Equal(t, "ssh://git@domain/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ // test SSH_DOMAIN while use non-standard SSH port
+ setting.SSH.Port = 123
+ setting.Repository.UseCompatSSHURI = false
+ assert.Equal(t, "ssh://git@domain:123/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ setting.Repository.UseCompatSSHURI = true
+ assert.Equal(t, "ssh://git@domain:123/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+
+ // test IPv6 SSH_DOMAIN
+ setting.Repository.UseCompatSSHURI = false
+ setting.SSH.Domain = "::1"
+ setting.SSH.Port = 22
+ assert.Equal(t, "git@[::1]:user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ setting.SSH.Port = 123
+ assert.Equal(t, "ssh://git@[::1]:123/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+}
+
+func TestAPActorID(t *testing.T) {
+ repo := repo_model.Repository{ID: 1}
+ url := repo.APActorID()
+ expected := "https://try.gitea.io/api/v1/activitypub/repository-id/1"
+ if url != expected {
+ t.Errorf("unexpected APActorID, expected: %q, actual: %q", expected, url)
+ }
+}
diff --git a/models/repo/repo_unit.go b/models/repo/repo_unit.go
new file mode 100644
index 0000000..ed55384
--- /dev/null
+++ b/models/repo/repo_unit.go
@@ -0,0 +1,317 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/convert"
+)
+
+// ErrUnitTypeNotExist represents a "UnitTypeNotExist" kind of error.
+type ErrUnitTypeNotExist struct {
+ UT unit.Type
+}
+
+// IsErrUnitTypeNotExist checks if an error is a ErrUnitNotExist.
+func IsErrUnitTypeNotExist(err error) bool {
+ _, ok := err.(ErrUnitTypeNotExist)
+ return ok
+}
+
+func (err ErrUnitTypeNotExist) Error() string {
+ return fmt.Sprintf("Unit type does not exist: %s", err.UT.String())
+}
+
+func (err ErrUnitTypeNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// RepoUnitAccessMode specifies the users access mode to a repo unit
+type UnitAccessMode int
+
+const (
+ // UnitAccessModeUnset - no unit mode set
+ UnitAccessModeUnset UnitAccessMode = iota // 0
+ // UnitAccessModeNone no access
+ UnitAccessModeNone // 1
+ // UnitAccessModeRead read access
+ UnitAccessModeRead // 2
+ // UnitAccessModeWrite write access
+ UnitAccessModeWrite // 3
+)
+
+func (mode UnitAccessMode) ToAccessMode(modeIfUnset perm.AccessMode) perm.AccessMode {
+ switch mode {
+ case UnitAccessModeUnset:
+ return modeIfUnset
+ case UnitAccessModeNone:
+ return perm.AccessModeNone
+ case UnitAccessModeRead:
+ return perm.AccessModeRead
+ case UnitAccessModeWrite:
+ return perm.AccessModeWrite
+ default:
+ return perm.AccessModeNone
+ }
+}
+
+// RepoUnit describes all units of a repository
+type RepoUnit struct { //revive:disable-line:exported
+ ID int64
+ RepoID int64 `xorm:"INDEX(s)"`
+ Type unit.Type `xorm:"INDEX(s)"`
+ Config convert.Conversion `xorm:"TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+ DefaultPermissions UnitAccessMode `xorm:"NOT NULL DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoUnit))
+}
+
+// UnitConfig describes common unit config
+type UnitConfig struct{}
+
+// FromDB fills up a UnitConfig from serialized format.
+func (cfg *UnitConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a UnitConfig to a serialized format.
+func (cfg *UnitConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// ExternalWikiConfig describes external wiki config
+type ExternalWikiConfig struct {
+ ExternalWikiURL string
+}
+
+// FromDB fills up a ExternalWikiConfig from serialized format.
+func (cfg *ExternalWikiConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a ExternalWikiConfig to a serialized format.
+func (cfg *ExternalWikiConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// ExternalTrackerConfig describes external tracker config
+type ExternalTrackerConfig struct {
+ ExternalTrackerURL string
+ ExternalTrackerFormat string
+ ExternalTrackerStyle string
+ ExternalTrackerRegexpPattern string
+}
+
+// FromDB fills up a ExternalTrackerConfig from serialized format.
+func (cfg *ExternalTrackerConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a ExternalTrackerConfig to a serialized format.
+func (cfg *ExternalTrackerConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// IssuesConfig describes issues config
+type IssuesConfig struct {
+ EnableTimetracker bool
+ AllowOnlyContributorsToTrackTime bool
+ EnableDependencies bool
+}
+
+// FromDB fills up a IssuesConfig from serialized format.
+func (cfg *IssuesConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a IssuesConfig to a serialized format.
+func (cfg *IssuesConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// PullRequestsConfig describes pull requests config
+type PullRequestsConfig struct {
+ IgnoreWhitespaceConflicts bool
+ AllowMerge bool
+ AllowRebase bool
+ AllowRebaseMerge bool
+ AllowSquash bool
+ AllowFastForwardOnly bool
+ AllowManualMerge bool
+ AutodetectManualMerge bool
+ AllowRebaseUpdate bool
+ DefaultDeleteBranchAfterMerge bool
+ DefaultMergeStyle MergeStyle
+ DefaultAllowMaintainerEdit bool
+}
+
+// FromDB fills up a PullRequestsConfig from serialized format.
+func (cfg *PullRequestsConfig) FromDB(bs []byte) error {
+ // AllowRebaseUpdate = true as default for existing PullRequestConfig in DB
+ cfg.AllowRebaseUpdate = true
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a PullRequestsConfig to a serialized format.
+func (cfg *PullRequestsConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// IsMergeStyleAllowed returns if merge style is allowed
+func (cfg *PullRequestsConfig) IsMergeStyleAllowed(mergeStyle MergeStyle) bool {
+ return mergeStyle == MergeStyleMerge && cfg.AllowMerge ||
+ mergeStyle == MergeStyleRebase && cfg.AllowRebase ||
+ mergeStyle == MergeStyleRebaseMerge && cfg.AllowRebaseMerge ||
+ mergeStyle == MergeStyleSquash && cfg.AllowSquash ||
+ mergeStyle == MergeStyleFastForwardOnly && cfg.AllowFastForwardOnly ||
+ mergeStyle == MergeStyleManuallyMerged && cfg.AllowManualMerge
+}
+
+// GetDefaultMergeStyle returns the default merge style for this pull request
+func (cfg *PullRequestsConfig) GetDefaultMergeStyle() MergeStyle {
+ if len(cfg.DefaultMergeStyle) != 0 {
+ return cfg.DefaultMergeStyle
+ }
+
+ if setting.Repository.PullRequest.DefaultMergeStyle != "" {
+ return MergeStyle(setting.Repository.PullRequest.DefaultMergeStyle)
+ }
+
+ return MergeStyleMerge
+}
+
+type ActionsConfig struct {
+ DisabledWorkflows []string
+}
+
+func (cfg *ActionsConfig) EnableWorkflow(file string) {
+ cfg.DisabledWorkflows = util.SliceRemoveAll(cfg.DisabledWorkflows, file)
+}
+
+func (cfg *ActionsConfig) ToString() string {
+ return strings.Join(cfg.DisabledWorkflows, ",")
+}
+
+func (cfg *ActionsConfig) IsWorkflowDisabled(file string) bool {
+ return slices.Contains(cfg.DisabledWorkflows, file)
+}
+
+func (cfg *ActionsConfig) DisableWorkflow(file string) {
+ for _, workflow := range cfg.DisabledWorkflows {
+ if file == workflow {
+ return
+ }
+ }
+
+ cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file)
+}
+
+// FromDB fills up a ActionsConfig from serialized format.
+func (cfg *ActionsConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a ActionsConfig to a serialized format.
+func (cfg *ActionsConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// BeforeSet is invoked from XORM before setting the value of a field of this object.
+func (r *RepoUnit) BeforeSet(colName string, val xorm.Cell) {
+ if colName == "type" {
+ switch unit.Type(db.Cell2Int64(val)) {
+ case unit.TypeExternalWiki:
+ r.Config = new(ExternalWikiConfig)
+ case unit.TypeExternalTracker:
+ r.Config = new(ExternalTrackerConfig)
+ case unit.TypePullRequests:
+ r.Config = new(PullRequestsConfig)
+ case unit.TypeIssues:
+ r.Config = new(IssuesConfig)
+ case unit.TypeActions:
+ r.Config = new(ActionsConfig)
+ case unit.TypeCode, unit.TypeReleases, unit.TypeWiki, unit.TypeProjects, unit.TypePackages:
+ fallthrough
+ default:
+ r.Config = new(UnitConfig)
+ }
+ }
+}
+
+// Unit returns Unit
+func (r *RepoUnit) Unit() unit.Unit {
+ return unit.Units[r.Type]
+}
+
+// CodeConfig returns config for unit.TypeCode
+func (r *RepoUnit) CodeConfig() *UnitConfig {
+ return r.Config.(*UnitConfig)
+}
+
+// PullRequestsConfig returns config for unit.TypePullRequests
+func (r *RepoUnit) PullRequestsConfig() *PullRequestsConfig {
+ return r.Config.(*PullRequestsConfig)
+}
+
+// ReleasesConfig returns config for unit.TypeReleases
+func (r *RepoUnit) ReleasesConfig() *UnitConfig {
+ return r.Config.(*UnitConfig)
+}
+
+// ExternalWikiConfig returns config for unit.TypeExternalWiki
+func (r *RepoUnit) ExternalWikiConfig() *ExternalWikiConfig {
+ return r.Config.(*ExternalWikiConfig)
+}
+
+// IssuesConfig returns config for unit.TypeIssues
+func (r *RepoUnit) IssuesConfig() *IssuesConfig {
+ return r.Config.(*IssuesConfig)
+}
+
+// ExternalTrackerConfig returns config for unit.TypeExternalTracker
+func (r *RepoUnit) ExternalTrackerConfig() *ExternalTrackerConfig {
+ return r.Config.(*ExternalTrackerConfig)
+}
+
+// ActionsConfig returns config for unit.ActionsConfig
+func (r *RepoUnit) ActionsConfig() *ActionsConfig {
+ return r.Config.(*ActionsConfig)
+}
+
+func getUnitsByRepoID(ctx context.Context, repoID int64) (units []*RepoUnit, err error) {
+ var tmpUnits []*RepoUnit
+ if err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Find(&tmpUnits); err != nil {
+ return nil, err
+ }
+
+ for _, u := range tmpUnits {
+ if !u.Type.UnitGlobalDisabled() {
+ units = append(units, u)
+ }
+ }
+
+ return units, nil
+}
+
+// UpdateRepoUnit updates the provided repo unit
+func UpdateRepoUnit(ctx context.Context, unit *RepoUnit) error {
+ _, err := db.GetEngine(ctx).ID(unit.ID).Update(unit)
+ return err
+}
diff --git a/models/repo/repo_unit_test.go b/models/repo/repo_unit_test.go
new file mode 100644
index 0000000..deee1a7
--- /dev/null
+++ b/models/repo/repo_unit_test.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/perm"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestActionsConfig(t *testing.T) {
+ cfg := &ActionsConfig{}
+ cfg.DisableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{"test1.yaml"}, cfg.DisabledWorkflows)
+
+ cfg.DisableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{"test1.yaml"}, cfg.DisabledWorkflows)
+
+ cfg.EnableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{}, cfg.DisabledWorkflows)
+
+ cfg.EnableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{}, cfg.DisabledWorkflows)
+
+ cfg.DisableWorkflow("test1.yaml")
+ cfg.DisableWorkflow("test2.yaml")
+ cfg.DisableWorkflow("test3.yaml")
+ assert.EqualValues(t, "test1.yaml,test2.yaml,test3.yaml", cfg.ToString())
+}
+
+func TestRepoUnitAccessMode(t *testing.T) {
+ assert.Equal(t, perm.AccessModeNone, UnitAccessModeNone.ToAccessMode(perm.AccessModeAdmin))
+ assert.Equal(t, perm.AccessModeRead, UnitAccessModeRead.ToAccessMode(perm.AccessModeAdmin))
+ assert.Equal(t, perm.AccessModeWrite, UnitAccessModeWrite.ToAccessMode(perm.AccessModeAdmin))
+ assert.Equal(t, perm.AccessModeRead, UnitAccessModeUnset.ToAccessMode(perm.AccessModeRead))
+}
diff --git a/models/repo/search.go b/models/repo/search.go
new file mode 100644
index 0000000..ffb8e26
--- /dev/null
+++ b/models/repo/search.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import "code.gitea.io/gitea/models/db"
+
+// OrderByMap represents all possible search order
+var OrderByMap = map[string]map[string]db.SearchOrderBy{
+ "asc": {
+ "alpha": "owner_name ASC, name ASC",
+ "created": db.SearchOrderByOldest,
+ "updated": db.SearchOrderByLeastUpdated,
+ "size": "size ASC",
+ "git_size": "git_size ASC",
+ "lfs_size": "lfs_size ASC",
+ "id": db.SearchOrderByID,
+ "stars": db.SearchOrderByStars,
+ "forks": db.SearchOrderByForks,
+ },
+ "desc": {
+ "alpha": "owner_name DESC, name DESC",
+ "created": db.SearchOrderByNewest,
+ "updated": db.SearchOrderByRecentUpdated,
+ "size": "size DESC",
+ "git_size": "git_size DESC",
+ "lfs_size": "lfs_size DESC",
+ "id": db.SearchOrderByIDReverse,
+ "stars": db.SearchOrderByStarsReverse,
+ "forks": db.SearchOrderByForksReverse,
+ },
+}
+
+// OrderByFlatMap is similar to OrderByMap but use human language keywords
+// to decide between asc and desc
+var OrderByFlatMap = map[string]db.SearchOrderBy{
+ "newest": OrderByMap["desc"]["created"],
+ "oldest": OrderByMap["asc"]["created"],
+ "recentupdate": OrderByMap["desc"]["updated"],
+ "leastupdate": OrderByMap["asc"]["updated"],
+ "reversealphabetically": OrderByMap["desc"]["alpha"],
+ "alphabetically": OrderByMap["asc"]["alpha"],
+ "reversesize": OrderByMap["desc"]["size"],
+ "size": OrderByMap["asc"]["size"],
+ "reversegitsize": OrderByMap["desc"]["git_size"],
+ "gitsize": OrderByMap["asc"]["git_size"],
+ "reverselfssize": OrderByMap["desc"]["lfs_size"],
+ "lfssize": OrderByMap["asc"]["lfs_size"],
+ "moststars": OrderByMap["desc"]["stars"],
+ "feweststars": OrderByMap["asc"]["stars"],
+ "mostforks": OrderByMap["desc"]["forks"],
+ "fewestforks": OrderByMap["asc"]["forks"],
+}
diff --git a/models/repo/star.go b/models/repo/star.go
new file mode 100644
index 0000000..6073714
--- /dev/null
+++ b/models/repo/star.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// Star represents a starred repo by an user.
+type Star struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"UNIQUE(s)"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+}
+
+func init() {
+ db.RegisterModel(new(Star))
+}
+
+// StarRepo or unstar repository.
+func StarRepo(ctx context.Context, userID, repoID int64, star bool) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ staring := IsStaring(ctx, userID, repoID)
+
+ if star {
+ if staring {
+ return nil
+ }
+
+ if err := db.Insert(ctx, &Star{UID: userID, RepoID: repoID}); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoID); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ } else {
+ if !staring {
+ return nil
+ }
+
+ if _, err := db.DeleteByBean(ctx, &Star{UID: userID, RepoID: repoID}); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoID); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+// IsStaring checks if user has starred given repository.
+func IsStaring(ctx context.Context, userID, repoID int64) bool {
+ has, _ := db.GetEngine(ctx).Get(&Star{UID: userID, RepoID: repoID})
+ return has
+}
+
+// GetStargazers returns the users that starred the repo.
+func GetStargazers(ctx context.Context, repo *Repository, opts db.ListOptions) ([]*user_model.User, error) {
+ sess := db.GetEngine(ctx).Where("star.repo_id = ?", repo.ID).
+ Join("LEFT", "star", "`user`.id = star.uid")
+ if opts.Page > 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+
+ users := make([]*user_model.User, 0, opts.PageSize)
+ return users, sess.Find(&users)
+ }
+
+ users := make([]*user_model.User, 0, 8)
+ return users, sess.Find(&users)
+}
+
+// ClearRepoStars clears all stars for a repository and from the user that starred it.
+// Used when a repository is set to private.
+func ClearRepoStars(ctx context.Context, repoID int64) error {
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_stars=num_stars-1 WHERE id IN (SELECT `uid` FROM `star` WHERE repo_id = ?)", repoID); err != nil {
+ return err
+ }
+
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_stars = 0 WHERE id = ?", repoID); err != nil {
+ return err
+ }
+
+ return db.DeleteBeans(ctx, Star{RepoID: repoID})
+}
diff --git a/models/repo/star_test.go b/models/repo/star_test.go
new file mode 100644
index 0000000..73b362c
--- /dev/null
+++ b/models/repo/star_test.go
@@ -0,0 +1,72 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStarRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ const userID = 2
+ const repoID = 1
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, false))
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+}
+
+func TestIsStaring(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, repo_model.IsStaring(db.DefaultContext, 2, 4))
+ assert.False(t, repo_model.IsStaring(db.DefaultContext, 3, 4))
+}
+
+func TestRepository_GetStargazers(t *testing.T) {
+ // repo with stargazers
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ gazers, err := repo_model.GetStargazers(db.DefaultContext, repo, db.ListOptions{Page: 0})
+ require.NoError(t, err)
+ if assert.Len(t, gazers, 1) {
+ assert.Equal(t, int64(2), gazers[0].ID)
+ }
+}
+
+func TestRepository_GetStargazers2(t *testing.T) {
+ // repo with stargazers
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+ gazers, err := repo_model.GetStargazers(db.DefaultContext, repo, db.ListOptions{Page: 0})
+ require.NoError(t, err)
+ assert.Empty(t, gazers)
+}
+
+func TestClearRepoStars(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ const userID = 2
+ const repoID = 1
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, false))
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.ClearRepoStars(db.DefaultContext, repoID))
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ gazers, err := repo_model.GetStargazers(db.DefaultContext, repo, db.ListOptions{Page: 0})
+ require.NoError(t, err)
+ assert.Empty(t, gazers)
+}
diff --git a/models/repo/topic.go b/models/repo/topic.go
new file mode 100644
index 0000000..6db6c8a
--- /dev/null
+++ b/models/repo/topic.go
@@ -0,0 +1,389 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+func init() {
+ db.RegisterModel(new(Topic))
+ db.RegisterModel(new(RepoTopic))
+}
+
+var topicPattern = regexp.MustCompile(`^[a-z0-9][-.a-z0-9]*$`)
+
+// Topic represents a topic of repositories
+type Topic struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string `xorm:"UNIQUE VARCHAR(50)"`
+ RepoCount int
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+// RepoTopic represents associated repositories and topics
+type RepoTopic struct { //revive:disable-line:exported
+ RepoID int64 `xorm:"pk"`
+ TopicID int64 `xorm:"pk"`
+}
+
+// ErrTopicNotExist represents an error that a topic is not exist
+type ErrTopicNotExist struct {
+ Name string
+}
+
+// IsErrTopicNotExist checks if an error is an ErrTopicNotExist.
+func IsErrTopicNotExist(err error) bool {
+ _, ok := err.(ErrTopicNotExist)
+ return ok
+}
+
+// Error implements error interface
+func (err ErrTopicNotExist) Error() string {
+ return fmt.Sprintf("topic is not exist [name: %s]", err.Name)
+}
+
+func (err ErrTopicNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ValidateTopic checks a topic by length and match pattern rules
+func ValidateTopic(topic string) bool {
+ return len(topic) <= 35 && topicPattern.MatchString(topic)
+}
+
+// SanitizeAndValidateTopics sanitizes and checks an array or topics
+func SanitizeAndValidateTopics(topics []string) (validTopics, invalidTopics []string) {
+ validTopics = make([]string, 0)
+ mValidTopics := make(container.Set[string])
+ invalidTopics = make([]string, 0)
+
+ for _, topic := range topics {
+ topic = strings.TrimSpace(strings.ToLower(topic))
+ // ignore empty string
+ if len(topic) == 0 {
+ continue
+ }
+ // ignore same topic twice
+ if mValidTopics.Contains(topic) {
+ continue
+ }
+ if ValidateTopic(topic) {
+ validTopics = append(validTopics, topic)
+ mValidTopics.Add(topic)
+ } else {
+ invalidTopics = append(invalidTopics, topic)
+ }
+ }
+
+ return validTopics, invalidTopics
+}
+
+// GetTopicByName retrieves topic by name
+func GetTopicByName(ctx context.Context, name string) (*Topic, error) {
+ var topic Topic
+ if has, err := db.GetEngine(ctx).Where("name = ?", name).Get(&topic); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrTopicNotExist{name}
+ }
+ return &topic, nil
+}
+
+// addTopicByNameToRepo adds a topic name to a repo and increments the topic count.
+// Returns topic after the addition
+func addTopicByNameToRepo(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ var topic Topic
+ e := db.GetEngine(ctx)
+ has, err := e.Where("name = ?", topicName).Get(&topic)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ topic.Name = topicName
+ topic.RepoCount = 1
+ if err := db.Insert(ctx, &topic); err != nil {
+ return nil, err
+ }
+ } else {
+ topic.RepoCount++
+ if _, err := e.ID(topic.ID).Cols("repo_count").Update(&topic); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := db.Insert(ctx, &RepoTopic{
+ RepoID: repoID,
+ TopicID: topic.ID,
+ }); err != nil {
+ return nil, err
+ }
+
+ return &topic, nil
+}
+
+// removeTopicFromRepo remove a topic from a repo and decrements the topic repo count
+func removeTopicFromRepo(ctx context.Context, repoID int64, topic *Topic) error {
+ topic.RepoCount--
+ e := db.GetEngine(ctx)
+ if _, err := e.ID(topic.ID).Cols("repo_count").Update(topic); err != nil {
+ return err
+ }
+
+ if _, err := e.Delete(&RepoTopic{
+ RepoID: repoID,
+ TopicID: topic.ID,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RemoveTopicsFromRepo remove all topics from the repo and decrements respective topics repo count
+func RemoveTopicsFromRepo(ctx context.Context, repoID int64) error {
+ e := db.GetEngine(ctx)
+ _, err := e.Where(
+ builder.In("id",
+ builder.Select("topic_id").From("repo_topic").Where(builder.Eq{"repo_id": repoID}),
+ ),
+ ).Cols("repo_count").SetExpr("repo_count", "repo_count-1").Update(&Topic{})
+ if err != nil {
+ return err
+ }
+
+ if _, err = e.Delete(&RepoTopic{RepoID: repoID}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// FindTopicOptions represents the options when fdin topics
+type FindTopicOptions struct {
+ db.ListOptions
+ RepoID int64
+ Keyword string
+}
+
+func (opts *FindTopicOptions) toConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_topic.repo_id": opts.RepoID})
+ }
+
+ if opts.Keyword != "" {
+ cond = cond.And(builder.Like{"topic.name", opts.Keyword})
+ }
+
+ return cond
+}
+
+// FindTopics retrieves the topics via FindTopicOptions
+func FindTopics(ctx context.Context, opts *FindTopicOptions) ([]*Topic, int64, error) {
+ sess := db.GetEngine(ctx).Select("topic.*").Where(opts.toConds())
+ orderBy := "topic.repo_count DESC"
+ if opts.RepoID > 0 {
+ sess.Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id")
+ orderBy = "topic.name" // when render topics for a repo, it's better to sort them by name, to get consistent result
+ }
+ if opts.PageSize > 0 {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+ topics := make([]*Topic, 0, 10)
+ total, err := sess.OrderBy(orderBy).FindAndCount(&topics)
+ return topics, total, err
+}
+
+// CountTopics counts the number of topics matching the FindTopicOptions
+func CountTopics(ctx context.Context, opts *FindTopicOptions) (int64, error) {
+ sess := db.GetEngine(ctx).Where(opts.toConds())
+ if opts.RepoID > 0 {
+ sess.Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id")
+ }
+ return sess.Count(new(Topic))
+}
+
+// GetRepoTopicByName retrieves topic from name for a repo if it exist
+func GetRepoTopicByName(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ cond := builder.NewCond()
+ var topic Topic
+ cond = cond.And(builder.Eq{"repo_topic.repo_id": repoID}).And(builder.Eq{"topic.name": topicName})
+ sess := db.GetEngine(ctx).Table("topic").Where(cond)
+ sess.Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id")
+ has, err := sess.Select("topic.*").Get(&topic)
+ if has {
+ return &topic, err
+ }
+ return nil, err
+}
+
+// AddTopic adds a topic name to a repository (if it does not already have it)
+func AddTopic(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ topic, err := GetRepoTopicByName(ctx, repoID, topicName)
+ if err != nil {
+ return nil, err
+ }
+ if topic != nil {
+ // Repo already have topic
+ return topic, nil
+ }
+
+ topic, err = addTopicByNameToRepo(ctx, repoID, topicName)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = syncTopicsInRepository(sess, repoID); err != nil {
+ return nil, err
+ }
+
+ return topic, committer.Commit()
+}
+
+// DeleteTopic removes a topic name from a repository (if it has it)
+func DeleteTopic(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ topic, err := GetRepoTopicByName(ctx, repoID, topicName)
+ if err != nil {
+ return nil, err
+ }
+ if topic == nil {
+ // Repo doesn't have topic, can't be removed
+ return nil, nil
+ }
+
+ err = removeTopicFromRepo(ctx, repoID, topic)
+ if err != nil {
+ return nil, err
+ }
+
+ err = syncTopicsInRepository(db.GetEngine(ctx), repoID)
+
+ return topic, err
+}
+
+// SaveTopics save topics to a repository
+func SaveTopics(ctx context.Context, repoID int64, topicNames ...string) error {
+ topics, _, err := FindTopics(ctx, &FindTopicOptions{
+ RepoID: repoID,
+ })
+ if err != nil {
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ var addedTopicNames []string
+ for _, topicName := range topicNames {
+ if strings.TrimSpace(topicName) == "" {
+ continue
+ }
+
+ var found bool
+ for _, t := range topics {
+ if strings.EqualFold(topicName, t.Name) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ addedTopicNames = append(addedTopicNames, topicName)
+ }
+ }
+
+ var removeTopics []*Topic
+ for _, t := range topics {
+ var found bool
+ for _, topicName := range topicNames {
+ if strings.EqualFold(topicName, t.Name) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ removeTopics = append(removeTopics, t)
+ }
+ }
+
+ for _, topicName := range addedTopicNames {
+ _, err := addTopicByNameToRepo(ctx, repoID, topicName)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, topic := range removeTopics {
+ err := removeTopicFromRepo(ctx, repoID, topic)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := syncTopicsInRepository(sess, repoID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// GenerateTopics generates topics from a template repository
+func GenerateTopics(ctx context.Context, templateRepo, generateRepo *Repository) error {
+ for _, topic := range templateRepo.Topics {
+ if _, err := addTopicByNameToRepo(ctx, generateRepo.ID, topic); err != nil {
+ return err
+ }
+ }
+
+ return syncTopicsInRepository(db.GetEngine(ctx), generateRepo.ID)
+}
+
+// syncTopicsInRepository makes sure topics in the topics table are copied into the topics field of the repository
+func syncTopicsInRepository(sess db.Engine, repoID int64) error {
+ topicNames := make([]string, 0, 25)
+ if err := sess.Table("topic").Cols("name").
+ Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id").
+ Where("repo_topic.repo_id = ?", repoID).Asc("topic.name").Find(&topicNames); err != nil {
+ return err
+ }
+
+ if _, err := sess.ID(repoID).Cols("topics").Update(&Repository{
+ Topics: topicNames,
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CountOrphanedAttachments returns the number of topics that don't belong to any repository.
+func CountOrphanedTopics(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_count = 0").Count(new(Topic))
+}
+
+// DeleteOrphanedAttachments delete all topics that don't belong to any repository.
+func DeleteOrphanedTopics(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_count = 0").Delete(new(Topic))
+}
diff --git a/models/repo/topic_test.go b/models/repo/topic_test.go
new file mode 100644
index 0000000..45cee52
--- /dev/null
+++ b/models/repo/topic_test.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddTopic(t *testing.T) {
+ totalNrOfTopics := 6
+ repo1NrOfTopics := 3
+
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ topics, _, err := repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{})
+ require.NoError(t, err)
+ assert.Len(t, topics, totalNrOfTopics)
+
+ topics, total, err := repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ ListOptions: db.ListOptions{Page: 1, PageSize: 2},
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, 2)
+ assert.EqualValues(t, 6, total)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ RepoID: 1,
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, repo1NrOfTopics)
+
+ require.NoError(t, repo_model.SaveTopics(db.DefaultContext, 2, "golang"))
+ repo2NrOfTopics := 1
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{})
+ require.NoError(t, err)
+ assert.Len(t, topics, totalNrOfTopics)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ RepoID: 2,
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, repo2NrOfTopics)
+
+ require.NoError(t, repo_model.SaveTopics(db.DefaultContext, 2, "golang", "gitea"))
+ repo2NrOfTopics = 2
+ totalNrOfTopics++
+ topic, err := repo_model.GetTopicByName(db.DefaultContext, "gitea")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, topic.RepoCount)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{})
+ require.NoError(t, err)
+ assert.Len(t, topics, totalNrOfTopics)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ RepoID: 2,
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, repo2NrOfTopics)
+}
+
+func TestTopicValidator(t *testing.T) {
+ assert.True(t, repo_model.ValidateTopic("12345"))
+ assert.True(t, repo_model.ValidateTopic("2-test"))
+ assert.True(t, repo_model.ValidateTopic("foo.bar"))
+ assert.True(t, repo_model.ValidateTopic("test-3"))
+ assert.True(t, repo_model.ValidateTopic("first"))
+ assert.True(t, repo_model.ValidateTopic("second-test-topic"))
+ assert.True(t, repo_model.ValidateTopic("third-project-topic-with-max-length"))
+
+ assert.False(t, repo_model.ValidateTopic("$fourth-test,topic"))
+ assert.False(t, repo_model.ValidateTopic("-fifth-test-topic"))
+ assert.False(t, repo_model.ValidateTopic("sixth-go-project-topic-with-excess-length"))
+ assert.False(t, repo_model.ValidateTopic(".foo"))
+}
diff --git a/models/repo/update.go b/models/repo/update.go
new file mode 100644
index 0000000..e7ca224
--- /dev/null
+++ b/models/repo/update.go
@@ -0,0 +1,145 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// UpdateRepositoryOwnerNames updates repository owner_names (this should only be used when the ownerName has changed case)
+func UpdateRepositoryOwnerNames(ctx context.Context, ownerID int64, ownerName string) error {
+ if ownerID == 0 {
+ return nil
+ }
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err := db.GetEngine(ctx).Where("owner_id = ?", ownerID).Cols("owner_name").Update(&Repository{
+ OwnerName: ownerName,
+ }); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// UpdateRepositoryUpdatedTime updates a repository's updated time
+func UpdateRepositoryUpdatedTime(ctx context.Context, repoID int64, updateTime time.Time) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE repository SET updated_unix = ? WHERE id = ?", updateTime.Unix(), repoID)
+ return err
+}
+
+// UpdateRepositoryCols updates repository's columns
+func UpdateRepositoryCols(ctx context.Context, repo *Repository, cols ...string) error {
+ _, err := db.GetEngine(ctx).ID(repo.ID).Cols(cols...).Update(repo)
+ return err
+}
+
+// ErrReachLimitOfRepo represents a "ReachLimitOfRepo" kind of error.
+type ErrReachLimitOfRepo struct {
+ Limit int
+}
+
+// IsErrReachLimitOfRepo checks if an error is a ErrReachLimitOfRepo.
+func IsErrReachLimitOfRepo(err error) bool {
+ _, ok := err.(ErrReachLimitOfRepo)
+ return ok
+}
+
+func (err ErrReachLimitOfRepo) Error() string {
+ return fmt.Sprintf("user has reached maximum limit of repositories [limit: %d]", err.Limit)
+}
+
+func (err ErrReachLimitOfRepo) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrRepoAlreadyExist represents a "RepoAlreadyExist" kind of error.
+type ErrRepoAlreadyExist struct {
+ Uname string
+ Name string
+}
+
+// IsErrRepoAlreadyExist checks if an error is a ErrRepoAlreadyExist.
+func IsErrRepoAlreadyExist(err error) bool {
+ _, ok := err.(ErrRepoAlreadyExist)
+ return ok
+}
+
+func (err ErrRepoAlreadyExist) Error() string {
+ return fmt.Sprintf("repository already exists [uname: %s, name: %s]", err.Uname, err.Name)
+}
+
+func (err ErrRepoAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrRepoFilesAlreadyExist represents a "RepoFilesAlreadyExist" kind of error.
+type ErrRepoFilesAlreadyExist struct {
+ Uname string
+ Name string
+}
+
+// IsErrRepoFilesAlreadyExist checks if an error is a ErrRepoAlreadyExist.
+func IsErrRepoFilesAlreadyExist(err error) bool {
+ _, ok := err.(ErrRepoFilesAlreadyExist)
+ return ok
+}
+
+func (err ErrRepoFilesAlreadyExist) Error() string {
+ return fmt.Sprintf("repository files already exist [uname: %s, name: %s]", err.Uname, err.Name)
+}
+
+func (err ErrRepoFilesAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// CheckCreateRepository check if could created a repository
+func CheckCreateRepository(ctx context.Context, doer, u *user_model.User, name string, overwriteOrAdopt bool) error {
+ if !doer.CanCreateRepo() {
+ return ErrReachLimitOfRepo{u.MaxRepoCreation}
+ }
+
+ if err := IsUsableRepoName(name); err != nil {
+ return err
+ }
+
+ has, err := IsRepositoryModelOrDirExist(ctx, u, name)
+ if err != nil {
+ return fmt.Errorf("IsRepositoryExist: %w", err)
+ } else if has {
+ return ErrRepoAlreadyExist{u.Name, name}
+ }
+
+ repoPath := RepoPath(u.Name, name)
+ isExist, err := util.IsExist(repoPath)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", repoPath, err)
+ return err
+ }
+ if !overwriteOrAdopt && isExist {
+ return ErrRepoFilesAlreadyExist{u.Name, name}
+ }
+ return nil
+}
+
+// UpdateRepoSize updates the repository size, calculating it using getDirectorySize
+func UpdateRepoSize(ctx context.Context, repoID, gitSize, lfsSize int64) error {
+ _, err := db.GetEngine(ctx).ID(repoID).Cols("size", "git_size", "lfs_size").NoAutoTime().Update(&Repository{
+ Size: gitSize + lfsSize,
+ GitSize: gitSize,
+ LFSSize: lfsSize,
+ })
+ return err
+}
diff --git a/models/repo/upload.go b/models/repo/upload.go
new file mode 100644
index 0000000..18834f6
--- /dev/null
+++ b/models/repo/upload.go
@@ -0,0 +1,175 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "mime/multipart"
+ "os"
+ "path"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ gouuid "github.com/google/uuid"
+)
+
+// ErrUploadNotExist represents a "UploadNotExist" kind of error.
+type ErrUploadNotExist struct {
+ ID int64
+ UUID string
+}
+
+// IsErrUploadNotExist checks if an error is a ErrUploadNotExist.
+func IsErrUploadNotExist(err error) bool {
+ _, ok := err.(ErrUploadNotExist)
+ return ok
+}
+
+func (err ErrUploadNotExist) Error() string {
+ return fmt.Sprintf("attachment does not exist [id: %d, uuid: %s]", err.ID, err.UUID)
+}
+
+func (err ErrUploadNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Upload represent a uploaded file to a repo to be deleted when moved
+type Upload struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ Name string
+}
+
+func init() {
+ db.RegisterModel(new(Upload))
+}
+
+// UploadLocalPath returns where uploads is stored in local file system based on given UUID.
+func UploadLocalPath(uuid string) string {
+ return path.Join(setting.Repository.Upload.TempPath, uuid[0:1], uuid[1:2], uuid)
+}
+
+// LocalPath returns where uploads are temporarily stored in local file system.
+func (upload *Upload) LocalPath() string {
+ return UploadLocalPath(upload.UUID)
+}
+
+// NewUpload creates a new upload object.
+func NewUpload(ctx context.Context, name string, buf []byte, file multipart.File) (_ *Upload, err error) {
+ upload := &Upload{
+ UUID: gouuid.New().String(),
+ Name: name,
+ }
+
+ localPath := upload.LocalPath()
+ if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil {
+ return nil, fmt.Errorf("MkdirAll: %w", err)
+ }
+
+ fw, err := os.Create(localPath)
+ if err != nil {
+ return nil, fmt.Errorf("Create: %w", err)
+ }
+ defer fw.Close()
+
+ if _, err = fw.Write(buf); err != nil {
+ return nil, fmt.Errorf("Write: %w", err)
+ } else if _, err = io.Copy(fw, file); err != nil {
+ return nil, fmt.Errorf("Copy: %w", err)
+ }
+
+ if _, err := db.GetEngine(ctx).Insert(upload); err != nil {
+ return nil, err
+ }
+
+ return upload, nil
+}
+
+// GetUploadByUUID returns the Upload by UUID
+func GetUploadByUUID(ctx context.Context, uuid string) (*Upload, error) {
+ upload := &Upload{}
+ has, err := db.GetEngine(ctx).Where("uuid=?", uuid).Get(upload)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrUploadNotExist{0, uuid}
+ }
+ return upload, nil
+}
+
+// GetUploadsByUUIDs returns multiple uploads by UUIDS
+func GetUploadsByUUIDs(ctx context.Context, uuids []string) ([]*Upload, error) {
+ if len(uuids) == 0 {
+ return []*Upload{}, nil
+ }
+
+ // Silently drop invalid uuids.
+ uploads := make([]*Upload, 0, len(uuids))
+ return uploads, db.GetEngine(ctx).In("uuid", uuids).Find(&uploads)
+}
+
+// DeleteUploads deletes multiple uploads
+func DeleteUploads(ctx context.Context, uploads ...*Upload) (err error) {
+ if len(uploads) == 0 {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ ids := make([]int64, len(uploads))
+ for i := 0; i < len(uploads); i++ {
+ ids[i] = uploads[i].ID
+ }
+ if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil {
+ return fmt.Errorf("delete uploads: %w", err)
+ }
+
+ if err = committer.Commit(); err != nil {
+ return err
+ }
+
+ for _, upload := range uploads {
+ localPath := upload.LocalPath()
+ isFile, err := util.IsFile(localPath)
+ if err != nil {
+ log.Error("Unable to check if %s is a file. Error: %v", localPath, err)
+ }
+ if !isFile {
+ continue
+ }
+
+ if err := util.Remove(localPath); err != nil {
+ return fmt.Errorf("remove upload: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// DeleteUploadByUUID deletes a upload by UUID
+func DeleteUploadByUUID(ctx context.Context, uuid string) error {
+ upload, err := GetUploadByUUID(ctx, uuid)
+ if err != nil {
+ if IsErrUploadNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetUploadByUUID: %w", err)
+ }
+
+ if err := DeleteUploads(ctx, upload); err != nil {
+ return fmt.Errorf("DeleteUpload: %w", err)
+ }
+
+ return nil
+}
diff --git a/models/repo/user_repo.go b/models/repo/user_repo.go
new file mode 100644
index 0000000..6790ee1
--- /dev/null
+++ b/models/repo/user_repo.go
@@ -0,0 +1,197 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ api "code.gitea.io/gitea/modules/structs"
+
+ "xorm.io/builder"
+)
+
+// GetStarredRepos returns the repos starred by a particular user
+func GetStarredRepos(ctx context.Context, userID int64, private bool, listOptions db.ListOptions) ([]*Repository, error) {
+ sess := db.GetEngine(ctx).
+ Where("star.uid=?", userID).
+ Join("LEFT", "star", "`repository`.id=`star`.repo_id")
+ if !private {
+ sess = sess.And("is_private=?", false)
+ }
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+
+ repos := make([]*Repository, 0, listOptions.PageSize)
+ return repos, sess.Find(&repos)
+ }
+
+ repos := make([]*Repository, 0, 10)
+ return repos, sess.Find(&repos)
+}
+
+// GetWatchedRepos returns the repos watched by a particular user
+func GetWatchedRepos(ctx context.Context, userID int64, private bool, listOptions db.ListOptions) ([]*Repository, int64, error) {
+ sess := db.GetEngine(ctx).
+ Where("watch.user_id=?", userID).
+ And("`watch`.mode<>?", WatchModeDont).
+ Join("LEFT", "watch", "`repository`.id=`watch`.repo_id")
+ if !private {
+ sess = sess.And("is_private=?", false)
+ }
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+
+ repos := make([]*Repository, 0, listOptions.PageSize)
+ total, err := sess.FindAndCount(&repos)
+ return repos, total, err
+ }
+
+ repos := make([]*Repository, 0, 10)
+ total, err := sess.FindAndCount(&repos)
+ return repos, total, err
+}
+
+// GetRepoAssignees returns all users that have write access and can be assigned to issues
+// of the repository,
+func GetRepoAssignees(ctx context.Context, repo *Repository) (_ []*user_model.User, err error) {
+ if err = repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ e := db.GetEngine(ctx)
+ userIDs := make([]int64, 0, 10)
+ if err = e.Table("access").
+ Where("repo_id = ? AND mode >= ?", repo.ID, perm.AccessModeWrite).
+ Select("user_id").
+ Find(&userIDs); err != nil {
+ return nil, err
+ }
+
+ additionalUserIDs := make([]int64, 0, 10)
+ if err = e.Table("team_user").
+ Join("INNER", "team_repo", "`team_repo`.team_id = `team_user`.team_id").
+ Join("INNER", "team_unit", "`team_unit`.team_id = `team_user`.team_id").
+ Where("`team_repo`.repo_id = ? AND (`team_unit`.access_mode >= ? OR (`team_unit`.access_mode = ? AND `team_unit`.`type` = ?))",
+ repo.ID, perm.AccessModeWrite, perm.AccessModeRead, unit.TypePullRequests).
+ Distinct("`team_user`.uid").
+ Select("`team_user`.uid").
+ Find(&additionalUserIDs); err != nil {
+ return nil, err
+ }
+
+ uniqueUserIDs := make(container.Set[int64])
+ uniqueUserIDs.AddMultiple(userIDs...)
+ uniqueUserIDs.AddMultiple(additionalUserIDs...)
+
+ // Leave a seat for owner itself to append later, but if owner is an organization
+ // and just waste 1 unit is cheaper than re-allocate memory once.
+ users := make([]*user_model.User, 0, len(uniqueUserIDs)+1)
+ if len(userIDs) > 0 {
+ if err = e.In("id", uniqueUserIDs.Values()).
+ Where(builder.Eq{"`user`.is_active": true}).
+ OrderBy(user_model.GetOrderByName()).
+ Find(&users); err != nil {
+ return nil, err
+ }
+ }
+ if !repo.Owner.IsOrganization() && !uniqueUserIDs.Contains(repo.OwnerID) {
+ users = append(users, repo.Owner)
+ }
+
+ return users, nil
+}
+
+// GetReviewers get all users can be requested to review:
+// * for private repositories this returns all users that have read access or higher to the repository.
+// * for public repositories this returns all users that have read access or higher to the repository,
+// all repo watchers and all organization members.
+// TODO: may be we should have a busy choice for users to block review request to them.
+func GetReviewers(ctx context.Context, repo *Repository, doerID, posterID int64) ([]*user_model.User, error) {
+ // Get the owner of the repository - this often already pre-cached and if so saves complexity for the following queries
+ if err := repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ cond := builder.And(builder.Neq{"`user`.id": posterID}).
+ And(builder.Eq{"`user`.is_active": true})
+
+ if repo.IsPrivate || repo.Owner.Visibility == api.VisibleTypePrivate {
+ // This a private repository:
+ // Anyone who can read the repository is a requestable reviewer
+
+ cond = cond.And(builder.In("`user`.id",
+ builder.Select("user_id").From("access").Where(
+ builder.Eq{"repo_id": repo.ID}.
+ And(builder.Gte{"mode": perm.AccessModeRead}),
+ ),
+ ))
+
+ if repo.Owner.Type == user_model.UserTypeIndividual && repo.Owner.ID != posterID {
+ // as private *user* repos don't generate an entry in the `access` table,
+ // the owner of a private repo needs to be explicitly added.
+ cond = cond.Or(builder.Eq{"`user`.id": repo.Owner.ID})
+ }
+ } else {
+ // This is a "public" repository:
+ // Any user that has read access, is a watcher or organization member can be requested to review
+ cond = cond.And(builder.And(builder.In("`user`.id",
+ builder.Select("user_id").From("access").
+ Where(builder.Eq{"repo_id": repo.ID}.
+ And(builder.Gte{"mode": perm.AccessModeRead})),
+ ).Or(builder.In("`user`.id",
+ builder.Select("user_id").From("watch").
+ Where(builder.Eq{"repo_id": repo.ID}.
+ And(builder.In("mode", WatchModeNormal, WatchModeAuto))),
+ ).Or(builder.In("`user`.id",
+ builder.Select("uid").From("org_user").
+ Where(builder.Eq{"org_id": repo.OwnerID}),
+ )))))
+ }
+
+ users := make([]*user_model.User, 0, 8)
+ return users, db.GetEngine(ctx).Where(cond).OrderBy(user_model.GetOrderByName()).Find(&users)
+}
+
+// GetIssuePostersWithSearch returns users with limit of 30 whose username started with prefix that have authored an issue/pull request for the given repository
+// If isShowFullName is set to true, also include full name prefix search
+func GetIssuePostersWithSearch(ctx context.Context, repo *Repository, isPull bool, search string, isShowFullName bool) ([]*user_model.User, error) {
+ users := make([]*user_model.User, 0, 30)
+ var prefixCond builder.Cond = builder.Like{"name", search + "%"}
+ if isShowFullName {
+ prefixCond = prefixCond.Or(builder.Like{"full_name", "%" + search + "%"})
+ }
+
+ cond := builder.In("`user`.id",
+ builder.Select("poster_id").From("issue").Where(
+ builder.Eq{"repo_id": repo.ID}.
+ And(builder.Eq{"is_pull": isPull}),
+ ).GroupBy("poster_id")).And(prefixCond)
+
+ return users, db.GetEngine(ctx).
+ Where(cond).
+ Cols("id", "name", "full_name", "avatar", "avatar_email", "use_custom_avatar").
+ OrderBy("name").
+ Limit(30).
+ Find(&users)
+}
+
+// GetWatchedRepoIDsOwnedBy returns the repos owned by a particular user watched by a particular user
+func GetWatchedRepoIDsOwnedBy(ctx context.Context, userID, ownedByUserID int64) ([]int64, error) {
+ repoIDs := make([]int64, 0, 10)
+ err := db.GetEngine(ctx).
+ Table("repository").
+ Select("`repository`.id").
+ Join("LEFT", "watch", "`repository`.id=`watch`.repo_id").
+ Where("`watch`.user_id=?", userID).
+ And("`watch`.mode<>?", WatchModeDont).
+ And("`repository`.owner_id=?", ownedByUserID).Find(&repoIDs)
+ return repoIDs, err
+}
diff --git a/models/repo/user_repo_test.go b/models/repo/user_repo_test.go
new file mode 100644
index 0000000..c784a55
--- /dev/null
+++ b/models/repo/user_repo_test.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepoAssignees(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ users, err := repo_model.GetRepoAssignees(db.DefaultContext, repo2)
+ require.NoError(t, err)
+ assert.Len(t, users, 1)
+ assert.Equal(t, int64(2), users[0].ID)
+
+ repo21 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 21})
+ users, err = repo_model.GetRepoAssignees(db.DefaultContext, repo21)
+ require.NoError(t, err)
+ if assert.Len(t, users, 3) {
+ assert.ElementsMatch(t, []int64{15, 16, 18}, []int64{users[0].ID, users[1].ID, users[2].ID})
+ }
+
+ // do not return deactivated users
+ require.NoError(t, user_model.UpdateUserCols(db.DefaultContext, &user_model.User{ID: 15, IsActive: false}, "is_active"))
+ users, err = repo_model.GetRepoAssignees(db.DefaultContext, repo21)
+ require.NoError(t, err)
+ if assert.Len(t, users, 2) {
+ assert.NotContains(t, []int64{users[0].ID, users[1].ID}, 15)
+ }
+}
+
+func TestRepoGetReviewers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // test public repo
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ ctx := db.DefaultContext
+ reviewers, err := repo_model.GetReviewers(ctx, repo1, 2, 2)
+ require.NoError(t, err)
+ if assert.Len(t, reviewers, 3) {
+ assert.ElementsMatch(t, []int64{1, 4, 11}, []int64{reviewers[0].ID, reviewers[1].ID, reviewers[2].ID})
+ }
+
+ // should include doer if doer is not PR poster.
+ reviewers, err = repo_model.GetReviewers(ctx, repo1, 11, 2)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 3)
+
+ // should not include PR poster, if PR poster would be otherwise eligible
+ reviewers, err = repo_model.GetReviewers(ctx, repo1, 11, 4)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 2)
+
+ // test private user repo
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+
+ reviewers, err = repo_model.GetReviewers(ctx, repo2, 2, 4)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 1)
+ assert.EqualValues(t, 2, reviewers[0].ID)
+
+ // test private org repo
+ repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+
+ reviewers, err = repo_model.GetReviewers(ctx, repo3, 2, 1)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 2)
+
+ reviewers, err = repo_model.GetReviewers(ctx, repo3, 2, 2)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 1)
+}
+
+func GetWatchedRepoIDsOwnedBy(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 9})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ repoIDs, err := repo_model.GetWatchedRepoIDsOwnedBy(db.DefaultContext, user1.ID, user2.ID)
+ require.NoError(t, err)
+ assert.Len(t, repoIDs, 1)
+ assert.EqualValues(t, 1, repoIDs[0])
+}
diff --git a/models/repo/watch.go b/models/repo/watch.go
new file mode 100644
index 0000000..6974d89
--- /dev/null
+++ b/models/repo/watch.go
@@ -0,0 +1,190 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// WatchMode specifies what kind of watch the user has on a repository
+type WatchMode int8
+
+const (
+ // WatchModeNone don't watch
+ WatchModeNone WatchMode = iota // 0
+ // WatchModeNormal watch repository (from other sources)
+ WatchModeNormal // 1
+ // WatchModeDont explicit don't auto-watch
+ WatchModeDont // 2
+ // WatchModeAuto watch repository (from AutoWatchOnChanges)
+ WatchModeAuto // 3
+)
+
+// Watch is connection request for receiving repository notification.
+type Watch struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"UNIQUE(watch)"`
+ RepoID int64 `xorm:"UNIQUE(watch)"`
+ Mode WatchMode `xorm:"SMALLINT NOT NULL DEFAULT 1"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(Watch))
+}
+
+// GetWatch gets what kind of subscription a user has on a given repository; returns dummy record if none found
+func GetWatch(ctx context.Context, userID, repoID int64) (Watch, error) {
+ watch := Watch{UserID: userID, RepoID: repoID}
+ has, err := db.GetEngine(ctx).Get(&watch)
+ if err != nil {
+ return watch, err
+ }
+ if !has {
+ watch.Mode = WatchModeNone
+ }
+ return watch, nil
+}
+
+// IsWatchMode Decodes watchability of WatchMode
+func IsWatchMode(mode WatchMode) bool {
+ return mode != WatchModeNone && mode != WatchModeDont
+}
+
+// IsWatching checks if user has watched given repository.
+func IsWatching(ctx context.Context, userID, repoID int64) bool {
+ watch, err := GetWatch(ctx, userID, repoID)
+ return err == nil && IsWatchMode(watch.Mode)
+}
+
+func watchRepoMode(ctx context.Context, watch Watch, mode WatchMode) (err error) {
+ if watch.Mode == mode {
+ return nil
+ }
+ if mode == WatchModeAuto && (watch.Mode == WatchModeDont || IsWatchMode(watch.Mode)) {
+ // Don't auto watch if already watching or deliberately not watching
+ return nil
+ }
+
+ hadrec := watch.Mode != WatchModeNone
+ needsrec := mode != WatchModeNone
+ repodiff := 0
+
+ if IsWatchMode(mode) && !IsWatchMode(watch.Mode) {
+ repodiff = 1
+ } else if !IsWatchMode(mode) && IsWatchMode(watch.Mode) {
+ repodiff = -1
+ }
+
+ watch.Mode = mode
+
+ if !hadrec && needsrec {
+ watch.Mode = mode
+ if err = db.Insert(ctx, watch); err != nil {
+ return err
+ }
+ } else if needsrec {
+ watch.Mode = mode
+ if _, err := db.GetEngine(ctx).ID(watch.ID).AllCols().Update(watch); err != nil {
+ return err
+ }
+ } else if _, err = db.DeleteByID[Watch](ctx, watch.ID); err != nil {
+ return err
+ }
+ if repodiff != 0 {
+ _, err = db.GetEngine(ctx).Exec("UPDATE `repository` SET num_watches = num_watches + ? WHERE id = ?", repodiff, watch.RepoID)
+ }
+ return err
+}
+
+// WatchRepoMode watch repository in specific mode.
+func WatchRepoMode(ctx context.Context, userID, repoID int64, mode WatchMode) (err error) {
+ var watch Watch
+ if watch, err = GetWatch(ctx, userID, repoID); err != nil {
+ return err
+ }
+ return watchRepoMode(ctx, watch, mode)
+}
+
+// WatchRepo watch or unwatch repository.
+func WatchRepo(ctx context.Context, userID, repoID int64, doWatch bool) (err error) {
+ var watch Watch
+ if watch, err = GetWatch(ctx, userID, repoID); err != nil {
+ return err
+ }
+ if !doWatch && watch.Mode == WatchModeAuto {
+ err = watchRepoMode(ctx, watch, WatchModeDont)
+ } else if !doWatch {
+ err = watchRepoMode(ctx, watch, WatchModeNone)
+ } else {
+ err = watchRepoMode(ctx, watch, WatchModeNormal)
+ }
+ return err
+}
+
+// GetWatchers returns all watchers of given repository.
+func GetWatchers(ctx context.Context, repoID int64) ([]*Watch, error) {
+ watches := make([]*Watch, 0, 10)
+ return watches, db.GetEngine(ctx).Where("`watch`.repo_id=?", repoID).
+ And("`watch`.mode<>?", WatchModeDont).
+ And("`user`.is_active=?", true).
+ And("`user`.prohibit_login=?", false).
+ Join("INNER", "`user`", "`user`.id = `watch`.user_id").
+ Find(&watches)
+}
+
+// GetRepoWatchersIDs returns IDs of watchers for a given repo ID
+// but avoids joining with `user` for performance reasons
+// User permissions must be verified elsewhere if required
+func GetRepoWatchersIDs(ctx context.Context, repoID int64) ([]int64, error) {
+ ids := make([]int64, 0, 64)
+ return ids, db.GetEngine(ctx).Table("watch").
+ Where("watch.repo_id=?", repoID).
+ And("watch.mode<>?", WatchModeDont).
+ Select("user_id").
+ Find(&ids)
+}
+
+// GetRepoWatchers returns range of users watching given repository.
+func GetRepoWatchers(ctx context.Context, repoID int64, opts db.ListOptions) ([]*user_model.User, error) {
+ sess := db.GetEngine(ctx).Where("watch.repo_id=?", repoID).
+ Join("LEFT", "watch", "`user`.id=`watch`.user_id").
+ And("`watch`.mode<>?", WatchModeDont)
+ if opts.Page > 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+ users := make([]*user_model.User, 0, opts.PageSize)
+
+ return users, sess.Find(&users)
+ }
+
+ users := make([]*user_model.User, 0, 8)
+ return users, sess.Find(&users)
+}
+
+// WatchIfAuto subscribes to repo if AutoWatchOnChanges is set
+func WatchIfAuto(ctx context.Context, userID, repoID int64, isWrite bool) error {
+ if !isWrite || !setting.Service.AutoWatchOnChanges {
+ return nil
+ }
+ watch, err := GetWatch(ctx, userID, repoID)
+ if err != nil {
+ return err
+ }
+ if watch.Mode != WatchModeNone {
+ return nil
+ }
+ return watchRepoMode(ctx, watch, WatchModeAuto)
+}
+
+// UnwatchRepos will unwatch the user from all given repositories.
+func UnwatchRepos(ctx context.Context, userID int64, repoIDs []int64) error {
+ _, err := db.GetEngine(ctx).Where("user_id=?", userID).In("repo_id", repoIDs).Delete(&Watch{})
+ return err
+}
diff --git a/models/repo/watch_test.go b/models/repo/watch_test.go
new file mode 100644
index 0000000..dbf1505
--- /dev/null
+++ b/models/repo/watch_test.go
@@ -0,0 +1,153 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsWatching(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ assert.True(t, repo_model.IsWatching(db.DefaultContext, 1, 1))
+ assert.True(t, repo_model.IsWatching(db.DefaultContext, 4, 1))
+ assert.True(t, repo_model.IsWatching(db.DefaultContext, 11, 1))
+
+ assert.False(t, repo_model.IsWatching(db.DefaultContext, 1, 5))
+ assert.False(t, repo_model.IsWatching(db.DefaultContext, 8, 1))
+ assert.False(t, repo_model.IsWatching(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+}
+
+func TestGetWatchers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ watches, err := repo_model.GetWatchers(db.DefaultContext, repo.ID)
+ require.NoError(t, err)
+ // One watchers are inactive, thus minus 1
+ assert.Len(t, watches, repo.NumWatches-1)
+ for _, watch := range watches {
+ assert.EqualValues(t, repo.ID, watch.RepoID)
+ }
+
+ watches, err = repo_model.GetWatchers(db.DefaultContext, unittest.NonexistentID)
+ require.NoError(t, err)
+ assert.Empty(t, watches)
+}
+
+func TestRepository_GetWatchers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ watchers, err := repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, repo.NumWatches)
+ for _, watcher := range watchers {
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{UserID: watcher.ID, RepoID: repo.ID})
+ }
+
+ repo = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 9})
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Empty(t, watchers)
+}
+
+func TestWatchIfAuto(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ watchers, err := repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, repo.NumWatches)
+
+ setting.Service.AutoWatchOnChanges = false
+
+ prevCount := repo.NumWatches
+
+ // Must not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 8, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Should not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 10, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ setting.Service.AutoWatchOnChanges = true
+
+ // Must not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 8, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Should not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 12, 1, false))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Should add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 12, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount+1)
+
+ // Should remove watch, inhibit from adding auto
+ require.NoError(t, repo_model.WatchRepo(db.DefaultContext, 12, 1, false))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Must not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 12, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+}
+
+func TestWatchRepoMode(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 0)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeAuto))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 1)
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1, Mode: repo_model.WatchModeAuto}, 1)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeNormal))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 1)
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1, Mode: repo_model.WatchModeNormal}, 1)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeDont))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 1)
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1, Mode: repo_model.WatchModeDont}, 1)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeNone))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 0)
+}
+
+func TestUnwatchRepos(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{UserID: 4, RepoID: 1})
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{UserID: 4, RepoID: 2})
+
+ err := repo_model.UnwatchRepos(db.DefaultContext, 4, []int64{1, 2})
+ require.NoError(t, err)
+
+ unittest.AssertNotExistsBean(t, &repo_model.Watch{UserID: 4, RepoID: 1})
+ unittest.AssertNotExistsBean(t, &repo_model.Watch{UserID: 4, RepoID: 2})
+}
diff --git a/models/repo/wiki.go b/models/repo/wiki.go
new file mode 100644
index 0000000..b378666
--- /dev/null
+++ b/models/repo/wiki.go
@@ -0,0 +1,96 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrWikiAlreadyExist represents a "WikiAlreadyExist" kind of error.
+type ErrWikiAlreadyExist struct {
+ Title string
+}
+
+// IsErrWikiAlreadyExist checks if an error is an ErrWikiAlreadyExist.
+func IsErrWikiAlreadyExist(err error) bool {
+ _, ok := err.(ErrWikiAlreadyExist)
+ return ok
+}
+
+func (err ErrWikiAlreadyExist) Error() string {
+ return fmt.Sprintf("wiki page already exists [title: %s]", err.Title)
+}
+
+func (err ErrWikiAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrWikiReservedName represents a reserved name error.
+type ErrWikiReservedName struct {
+ Title string
+}
+
+// IsErrWikiReservedName checks if an error is an ErrWikiReservedName.
+func IsErrWikiReservedName(err error) bool {
+ _, ok := err.(ErrWikiReservedName)
+ return ok
+}
+
+func (err ErrWikiReservedName) Error() string {
+ return fmt.Sprintf("wiki title is reserved: %s", err.Title)
+}
+
+func (err ErrWikiReservedName) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrWikiInvalidFileName represents an invalid wiki file name.
+type ErrWikiInvalidFileName struct {
+ FileName string
+}
+
+// IsErrWikiInvalidFileName checks if an error is an ErrWikiInvalidFileName.
+func IsErrWikiInvalidFileName(err error) bool {
+ _, ok := err.(ErrWikiInvalidFileName)
+ return ok
+}
+
+func (err ErrWikiInvalidFileName) Error() string {
+ return fmt.Sprintf("Invalid wiki filename: %s", err.FileName)
+}
+
+func (err ErrWikiInvalidFileName) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// WikiCloneLink returns clone URLs of repository wiki.
+func (repo *Repository) WikiCloneLink() *CloneLink {
+ return repo.cloneLink(true)
+}
+
+// WikiPath returns wiki data path by given user and repository name.
+func WikiPath(userName, repoName string) string {
+ return filepath.Join(user_model.UserPath(userName), strings.ToLower(repoName)+".wiki.git")
+}
+
+// WikiPath returns wiki data path for given repository.
+func (repo *Repository) WikiPath() string {
+ return WikiPath(repo.OwnerName, repo.Name)
+}
+
+// HasWiki returns true if repository has wiki.
+func (repo *Repository) HasWiki() bool {
+ isDir, err := util.IsDir(repo.WikiPath())
+ if err != nil {
+ log.Error("Unable to check if %s is a directory: %v", repo.WikiPath(), err)
+ }
+ return isDir
+}
diff --git a/models/repo/wiki_test.go b/models/repo/wiki_test.go
new file mode 100644
index 0000000..28495a4
--- /dev/null
+++ b/models/repo/wiki_test.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "path/filepath"
+ "testing"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepository_WikiCloneLink(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ cloneLink := repo.WikiCloneLink()
+ assert.Equal(t, "ssh://sshuser@try.gitea.io:3000/user2/repo1.wiki.git", cloneLink.SSH)
+ assert.Equal(t, "https://try.gitea.io/user2/repo1.wiki.git", cloneLink.HTTPS)
+}
+
+func TestWikiPath(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ expected := filepath.Join(setting.RepoRootPath, "user2/repo1.wiki.git")
+ assert.Equal(t, expected, repo_model.WikiPath("user2", "repo1"))
+}
+
+func TestRepository_WikiPath(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ expected := filepath.Join(setting.RepoRootPath, "user2/repo1.wiki.git")
+ assert.Equal(t, expected, repo.WikiPath())
+}
+
+func TestRepository_HasWiki(t *testing.T) {
+ unittest.PrepareTestEnv(t)
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.True(t, repo1.HasWiki())
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ assert.False(t, repo2.HasWiki())
+}
diff --git a/models/repo_test.go b/models/repo_test.go
new file mode 100644
index 0000000..958725f
--- /dev/null
+++ b/models/repo_test.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestCheckRepoStats(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ require.NoError(t, CheckRepoStats(db.DefaultContext))
+}
+
+func TestDoctorUserStarNum(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ require.NoError(t, DoctorUserStarNum(db.DefaultContext))
+}
diff --git a/models/repo_transfer.go b/models/repo_transfer.go
new file mode 100644
index 0000000..0c23d75
--- /dev/null
+++ b/models/repo_transfer.go
@@ -0,0 +1,185 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// RepoTransfer is used to manage repository transfers
+type RepoTransfer struct {
+ ID int64 `xorm:"pk autoincr"`
+ DoerID int64
+ Doer *user_model.User `xorm:"-"`
+ RecipientID int64
+ Recipient *user_model.User `xorm:"-"`
+ RepoID int64
+ TeamIDs []int64
+ Teams []*organization.Team `xorm:"-"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL updated"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoTransfer))
+}
+
+// LoadAttributes fetches the transfer recipient from the database
+func (r *RepoTransfer) LoadAttributes(ctx context.Context) error {
+ if r.Recipient == nil {
+ u, err := user_model.GetUserByID(ctx, r.RecipientID)
+ if err != nil {
+ return err
+ }
+
+ r.Recipient = u
+ }
+
+ if r.Recipient.IsOrganization() && len(r.TeamIDs) != len(r.Teams) {
+ for _, v := range r.TeamIDs {
+ team, err := organization.GetTeamByID(ctx, v)
+ if err != nil {
+ return err
+ }
+
+ if team.OrgID != r.Recipient.ID {
+ return fmt.Errorf("team %d belongs not to org %d", v, r.Recipient.ID)
+ }
+
+ r.Teams = append(r.Teams, team)
+ }
+ }
+
+ if r.Doer == nil {
+ u, err := user_model.GetUserByID(ctx, r.DoerID)
+ if err != nil {
+ return err
+ }
+
+ r.Doer = u
+ }
+
+ return nil
+}
+
+// CanUserAcceptTransfer checks if the user has the rights to accept/decline a repo transfer.
+// For user, it checks if it's himself
+// For organizations, it checks if the user is able to create repos
+func (r *RepoTransfer) CanUserAcceptTransfer(ctx context.Context, u *user_model.User) bool {
+ if err := r.LoadAttributes(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return false
+ }
+
+ if !r.Recipient.IsOrganization() {
+ return r.RecipientID == u.ID
+ }
+
+ allowed, err := organization.CanCreateOrgRepo(ctx, r.RecipientID, u.ID)
+ if err != nil {
+ log.Error("CanCreateOrgRepo: %v", err)
+ return false
+ }
+
+ return allowed
+}
+
+// GetPendingRepositoryTransfer fetches the most recent and ongoing transfer
+// process for the repository
+func GetPendingRepositoryTransfer(ctx context.Context, repo *repo_model.Repository) (*RepoTransfer, error) {
+ transfer := new(RepoTransfer)
+
+ has, err := db.GetEngine(ctx).Where("repo_id = ? ", repo.ID).Get(transfer)
+ if err != nil {
+ return nil, err
+ }
+
+ if !has {
+ return nil, ErrNoPendingRepoTransfer{RepoID: repo.ID}
+ }
+
+ return transfer, nil
+}
+
+func DeleteRepositoryTransfer(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Delete(&RepoTransfer{})
+ return err
+}
+
+// TestRepositoryReadyForTransfer make sure repo is ready to transfer
+func TestRepositoryReadyForTransfer(status repo_model.RepositoryStatus) error {
+ switch status {
+ case repo_model.RepositoryBeingMigrated:
+ return errors.New("repo is not ready, currently migrating")
+ case repo_model.RepositoryPendingTransfer:
+ return ErrRepoTransferInProgress{}
+ }
+ return nil
+}
+
+// CreatePendingRepositoryTransfer transfer a repo from one owner to a new one.
+// it marks the repository transfer as "pending"
+func CreatePendingRepositoryTransfer(ctx context.Context, doer, newOwner *user_model.User, repoID int64, teams []*organization.Team) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ repo, err := repo_model.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return err
+ }
+
+ // Make sure repo is ready to transfer
+ if err := TestRepositoryReadyForTransfer(repo.Status); err != nil {
+ return err
+ }
+
+ repo.Status = repo_model.RepositoryPendingTransfer
+ if err := repo_model.UpdateRepositoryCols(ctx, repo, "status"); err != nil {
+ return err
+ }
+
+ // Check if new owner has repository with same name.
+ if has, err := repo_model.IsRepositoryModelExist(ctx, newOwner, repo.Name); err != nil {
+ return fmt.Errorf("IsRepositoryExist: %w", err)
+ } else if has {
+ return repo_model.ErrRepoAlreadyExist{
+ Uname: newOwner.LowerName,
+ Name: repo.Name,
+ }
+ }
+
+ transfer := &RepoTransfer{
+ RepoID: repo.ID,
+ RecipientID: newOwner.ID,
+ CreatedUnix: timeutil.TimeStampNow(),
+ UpdatedUnix: timeutil.TimeStampNow(),
+ DoerID: doer.ID,
+ TeamIDs: make([]int64, 0, len(teams)),
+ }
+
+ for k := range teams {
+ transfer.TeamIDs = append(transfer.TeamIDs, teams[k].ID)
+ }
+
+ return db.Insert(ctx, transfer)
+ })
+}
+
+// GetPendingTransfers returns the pending transfers of recipient which were sent by by doer.
+func GetPendingTransferIDs(ctx context.Context, reciepientID, doerID int64) ([]int64, error) {
+ pendingTransferIDs := make([]int64, 0, 8)
+ return pendingTransferIDs, db.GetEngine(ctx).Table("repo_transfer").
+ Where("doer_id = ?", doerID).
+ And("recipient_id = ?", reciepientID).
+ Cols("id").
+ Find(&pendingTransferIDs)
+}
diff --git a/models/repo_transfer_test.go b/models/repo_transfer_test.go
new file mode 100644
index 0000000..6b6d5a8
--- /dev/null
+++ b/models/repo_transfer_test.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetPendingTransferIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+ reciepient := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ pendingTransfer := unittest.AssertExistsAndLoadBean(t, &RepoTransfer{RecipientID: reciepient.ID, DoerID: doer.ID})
+
+ pendingTransferIDs, err := GetPendingTransferIDs(db.DefaultContext, reciepient.ID, doer.ID)
+ require.NoError(t, err)
+ if assert.Len(t, pendingTransferIDs, 1) {
+ assert.EqualValues(t, pendingTransfer.ID, pendingTransferIDs[0])
+ }
+}
diff --git a/models/secret/secret.go b/models/secret/secret.go
new file mode 100644
index 0000000..ce0ad65
--- /dev/null
+++ b/models/secret/secret.go
@@ -0,0 +1,167 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package secret
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ actions_module "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/log"
+ secret_module "code.gitea.io/gitea/modules/secret"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// Secret represents a secret
+//
+// It can be:
+// 1. org/user level secret, OwnerID is org/user ID and RepoID is 0
+// 2. repo level secret, OwnerID is 0 and RepoID is repo ID
+//
+// Please note that it's not acceptable to have both OwnerID and RepoID to be non-zero,
+// or it will be complicated to find secrets belonging to a specific owner.
+// For example, conditions like `OwnerID = 1` will also return secret {OwnerID: 1, RepoID: 1},
+// but it's a repo level secret, not an org/user level secret.
+// To avoid this, make it clear with {OwnerID: 0, RepoID: 1} for repo level secrets.
+//
+// Please note that it's not acceptable to have both OwnerID and RepoID to zero, global secrets are not supported.
+// It's for security reasons, admin may be not aware of that the secrets could be stolen by any user when setting them as global.
+type Secret struct {
+ ID int64
+ OwnerID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL"`
+ RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL DEFAULT 0"`
+ Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"`
+ Data string `xorm:"LONGTEXT"` // encrypted data
+ CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"`
+}
+
+// ErrSecretNotFound represents a "secret not found" error.
+type ErrSecretNotFound struct {
+ Name string
+}
+
+func (err ErrSecretNotFound) Error() string {
+ return fmt.Sprintf("secret was not found [name: %s]", err.Name)
+}
+
+func (err ErrSecretNotFound) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// InsertEncryptedSecret Creates, encrypts, and validates a new secret with yet unencrypted data and insert into database
+func InsertEncryptedSecret(ctx context.Context, ownerID, repoID int64, name, data string) (*Secret, error) {
+ if ownerID != 0 && repoID != 0 {
+ // It's trying to create a secret that belongs to a repository, but OwnerID has been set accidentally.
+ // Remove OwnerID to avoid confusion; it's not worth returning an error here.
+ ownerID = 0
+ }
+ if ownerID == 0 && repoID == 0 {
+ return nil, fmt.Errorf("%w: ownerID and repoID cannot be both zero, global secrets are not supported", util.ErrInvalidArgument)
+ }
+
+ encrypted, err := secret_module.EncryptSecret(setting.SecretKey, data)
+ if err != nil {
+ return nil, err
+ }
+ secret := &Secret{
+ OwnerID: ownerID,
+ RepoID: repoID,
+ Name: strings.ToUpper(name),
+ Data: encrypted,
+ }
+ return secret, db.Insert(ctx, secret)
+}
+
+func init() {
+ db.RegisterModel(new(Secret))
+}
+
+type FindSecretsOptions struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64 // it will be ignored if RepoID is set
+ SecretID int64
+ Name string
+}
+
+func (opts FindSecretsOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ if opts.RepoID != 0 { // if RepoID is set
+ // ignore OwnerID and treat it as 0
+ cond = cond.And(builder.Eq{"owner_id": 0})
+ } else {
+ cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
+ }
+
+ if opts.SecretID != 0 {
+ cond = cond.And(builder.Eq{"id": opts.SecretID})
+ }
+ if opts.Name != "" {
+ cond = cond.And(builder.Eq{"name": strings.ToUpper(opts.Name)})
+ }
+
+ return cond
+}
+
+// UpdateSecret changes org or user reop secret.
+func UpdateSecret(ctx context.Context, secretID int64, data string) error {
+ encrypted, err := secret_module.EncryptSecret(setting.SecretKey, data)
+ if err != nil {
+ return err
+ }
+
+ s := &Secret{
+ Data: encrypted,
+ }
+ affected, err := db.GetEngine(ctx).ID(secretID).Cols("data").Update(s)
+ if affected != 1 {
+ return ErrSecretNotFound{}
+ }
+ return err
+}
+
+func GetSecretsOfTask(ctx context.Context, task *actions_model.ActionTask) (map[string]string, error) {
+ secrets := map[string]string{}
+
+ secrets["GITHUB_TOKEN"] = task.Token
+ secrets["GITEA_TOKEN"] = task.Token
+
+ if task.Job.Run.IsForkPullRequest && task.Job.Run.TriggerEvent != actions_module.GithubEventPullRequestTarget {
+ // ignore secrets for fork pull request, except GITHUB_TOKEN and GITEA_TOKEN which are automatically generated.
+ // for the tasks triggered by pull_request_target event, they could access the secrets because they will run in the context of the base branch
+ // see the documentation: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
+ return secrets, nil
+ }
+
+ ownerSecrets, err := db.Find[Secret](ctx, FindSecretsOptions{OwnerID: task.Job.Run.Repo.OwnerID})
+ if err != nil {
+ log.Error("find secrets of owner %v: %v", task.Job.Run.Repo.OwnerID, err)
+ return nil, err
+ }
+ repoSecrets, err := db.Find[Secret](ctx, FindSecretsOptions{RepoID: task.Job.Run.RepoID})
+ if err != nil {
+ log.Error("find secrets of repo %v: %v", task.Job.Run.RepoID, err)
+ return nil, err
+ }
+
+ for _, secret := range append(ownerSecrets, repoSecrets...) {
+ v, err := secret_module.DecryptSecret(setting.SecretKey, secret.Data)
+ if err != nil {
+ log.Error("decrypt secret %v %q: %v", secret.ID, secret.Name, err)
+ return nil, err
+ }
+ secrets[secret.Name] = v
+ }
+
+ return secrets, nil
+}
diff --git a/models/shared/types/ownertype.go b/models/shared/types/ownertype.go
new file mode 100644
index 0000000..a1d46c9
--- /dev/null
+++ b/models/shared/types/ownertype.go
@@ -0,0 +1,29 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package types
+
+import "code.gitea.io/gitea/modules/translation"
+
+type OwnerType string
+
+const (
+ OwnerTypeSystemGlobal = "system-global"
+ OwnerTypeIndividual = "individual"
+ OwnerTypeRepository = "repository"
+ OwnerTypeOrganization = "organization"
+)
+
+func (o OwnerType) LocaleString(locale translation.Locale) string {
+ switch o {
+ case OwnerTypeSystemGlobal:
+ return locale.TrString("concept_system_global")
+ case OwnerTypeIndividual:
+ return locale.TrString("concept_user_individual")
+ case OwnerTypeRepository:
+ return locale.TrString("concept_code_repository")
+ case OwnerTypeOrganization:
+ return locale.TrString("concept_user_organization")
+ }
+ return locale.TrString("unknown")
+}
diff --git a/models/system/appstate.go b/models/system/appstate.go
new file mode 100644
index 0000000..01faa1a
--- /dev/null
+++ b/models/system/appstate.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package system
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// AppState represents a state record in database
+// if one day we would make Gitea run as a cluster,
+// we can introduce a new field `Scope` here to store different states for different nodes
+type AppState struct {
+ ID string `xorm:"pk varchar(200)"`
+ Revision int64
+ Content string `xorm:"LONGTEXT"`
+}
+
+func init() {
+ db.RegisterModel(new(AppState))
+}
+
+// SaveAppStateContent saves the app state item to database
+func SaveAppStateContent(ctx context.Context, key, content string) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ eng := db.GetEngine(ctx)
+ // try to update existing row
+ res, err := eng.Exec("UPDATE app_state SET revision=revision+1, content=? WHERE id=?", content, key)
+ if err != nil {
+ return err
+ }
+ rows, _ := res.RowsAffected()
+ if rows != 0 {
+ // the existing row is updated, so we can return
+ return nil
+ }
+ // if no existing row, insert a new row
+ _, err = eng.Insert(&AppState{ID: key, Content: content})
+ return err
+ })
+}
+
+// GetAppStateContent gets an app state from database
+func GetAppStateContent(ctx context.Context, key string) (content string, err error) {
+ e := db.GetEngine(ctx)
+ appState := &AppState{ID: key}
+ has, err := e.Get(appState)
+ if err != nil {
+ return "", err
+ } else if !has {
+ return "", nil
+ }
+ return appState.Content, nil
+}
diff --git a/models/system/main_test.go b/models/system/main_test.go
new file mode 100644
index 0000000..6bc27a7
--- /dev/null
+++ b/models/system/main_test.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package system_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models" // register models
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/system" // register models of system
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/system/notice.go b/models/system/notice.go
new file mode 100644
index 0000000..e7ec6a9
--- /dev/null
+++ b/models/system/notice.go
@@ -0,0 +1,128 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package system
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// NoticeType describes the notice type
+type NoticeType int
+
+const (
+ // NoticeRepository type
+ NoticeRepository NoticeType = iota + 1
+ // NoticeTask type
+ NoticeTask
+)
+
+// Notice represents a system notice for admin.
+type Notice struct {
+ ID int64 `xorm:"pk autoincr"`
+ Type NoticeType
+ Description string `xorm:"TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+}
+
+func init() {
+ db.RegisterModel(new(Notice))
+}
+
+// TrStr returns a translation format string.
+func (n *Notice) TrStr() string {
+ return fmt.Sprintf("admin.notices.type_%d", n.Type)
+}
+
+// CreateNotice creates new system notice.
+func CreateNotice(ctx context.Context, tp NoticeType, desc string, args ...any) error {
+ if len(args) > 0 {
+ desc = fmt.Sprintf(desc, args...)
+ }
+ n := &Notice{
+ Type: tp,
+ Description: desc,
+ }
+ return db.Insert(ctx, n)
+}
+
+// CreateRepositoryNotice creates new system notice with type NoticeRepository.
+func CreateRepositoryNotice(desc string, args ...any) error {
+ // Note we use the db.DefaultContext here rather than passing in a context as the context may be cancelled
+ return CreateNotice(db.DefaultContext, NoticeRepository, desc, args...)
+}
+
+// RemoveAllWithNotice removes all directories in given path and
+// creates a system notice when error occurs.
+func RemoveAllWithNotice(ctx context.Context, title, path string) {
+ if err := util.RemoveAll(path); err != nil {
+ desc := fmt.Sprintf("%s [%s]: %v", title, path, err)
+ log.Warn(title+" [%s]: %v", path, err)
+ // Note we use the db.DefaultContext here rather than passing in a context as the context may be cancelled
+ if err = CreateNotice(db.DefaultContext, NoticeRepository, desc); err != nil {
+ log.Error("CreateRepositoryNotice: %v", err)
+ }
+ }
+}
+
+// RemoveStorageWithNotice removes a file from the storage and
+// creates a system notice when error occurs.
+func RemoveStorageWithNotice(ctx context.Context, bucket storage.ObjectStorage, title, path string) {
+ if err := bucket.Delete(path); err != nil {
+ desc := fmt.Sprintf("%s [%s]: %v", title, path, err)
+ log.Warn(title+" [%s]: %v", path, err)
+
+ // Note we use the db.DefaultContext here rather than passing in a context as the context may be cancelled
+ if err = CreateNotice(db.DefaultContext, NoticeRepository, desc); err != nil {
+ log.Error("CreateRepositoryNotice: %v", err)
+ }
+ }
+}
+
+// CountNotices returns number of notices.
+func CountNotices(ctx context.Context) int64 {
+ count, _ := db.GetEngine(ctx).Count(new(Notice))
+ return count
+}
+
+// Notices returns notices in given page.
+func Notices(ctx context.Context, page, pageSize int) ([]*Notice, error) {
+ notices := make([]*Notice, 0, pageSize)
+ return notices, db.GetEngine(ctx).
+ Limit(pageSize, (page-1)*pageSize).
+ Desc("created_unix").
+ Find(&notices)
+}
+
+// DeleteNotices deletes all notices with ID from start to end (inclusive).
+func DeleteNotices(ctx context.Context, start, end int64) error {
+ if start == 0 && end == 0 {
+ _, err := db.GetEngine(ctx).Exec("DELETE FROM notice")
+ return err
+ }
+
+ sess := db.GetEngine(ctx).Where("id >= ?", start)
+ if end > 0 {
+ sess.And("id <= ?", end)
+ }
+ _, err := sess.Delete(new(Notice))
+ return err
+}
+
+// DeleteOldSystemNotices deletes all old system notices from database.
+func DeleteOldSystemNotices(ctx context.Context, olderThan time.Duration) (err error) {
+ if olderThan <= 0 {
+ return nil
+ }
+
+ _, err = db.GetEngine(ctx).Where("created_unix < ?", time.Now().Add(-olderThan).Unix()).Delete(&Notice{})
+ return err
+}
diff --git a/models/system/notice_test.go b/models/system/notice_test.go
new file mode 100644
index 0000000..bfb7862
--- /dev/null
+++ b/models/system/notice_test.go
@@ -0,0 +1,110 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package system_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/system"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNotice_TrStr(t *testing.T) {
+ notice := &system.Notice{
+ Type: system.NoticeRepository,
+ Description: "test description",
+ }
+ assert.Equal(t, "admin.notices.type_1", notice.TrStr())
+}
+
+func TestCreateNotice(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ noticeBean := &system.Notice{
+ Type: system.NoticeRepository,
+ Description: "test description",
+ }
+ unittest.AssertNotExistsBean(t, noticeBean)
+ require.NoError(t, system.CreateNotice(db.DefaultContext, noticeBean.Type, noticeBean.Description))
+ unittest.AssertExistsAndLoadBean(t, noticeBean)
+}
+
+func TestCreateRepositoryNotice(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ noticeBean := &system.Notice{
+ Type: system.NoticeRepository,
+ Description: "test description",
+ }
+ unittest.AssertNotExistsBean(t, noticeBean)
+ require.NoError(t, system.CreateRepositoryNotice(noticeBean.Description))
+ unittest.AssertExistsAndLoadBean(t, noticeBean)
+}
+
+// TODO TestRemoveAllWithNotice
+
+func TestCountNotices(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.Equal(t, int64(3), system.CountNotices(db.DefaultContext))
+}
+
+func TestNotices(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ notices, err := system.Notices(db.DefaultContext, 1, 2)
+ require.NoError(t, err)
+ if assert.Len(t, notices, 2) {
+ assert.Equal(t, int64(3), notices[0].ID)
+ assert.Equal(t, int64(2), notices[1].ID)
+ }
+
+ notices, err = system.Notices(db.DefaultContext, 2, 2)
+ require.NoError(t, err)
+ if assert.Len(t, notices, 1) {
+ assert.Equal(t, int64(1), notices[0].ID)
+ }
+}
+
+func TestDeleteNotices(t *testing.T) {
+ // delete a non-empty range
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 1})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 2})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 3})
+ require.NoError(t, system.DeleteNotices(db.DefaultContext, 1, 2))
+ unittest.AssertNotExistsBean(t, &system.Notice{ID: 1})
+ unittest.AssertNotExistsBean(t, &system.Notice{ID: 2})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 3})
+}
+
+func TestDeleteNotices2(t *testing.T) {
+ // delete an empty range
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 1})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 2})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 3})
+ require.NoError(t, system.DeleteNotices(db.DefaultContext, 3, 2))
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 1})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 2})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 3})
+}
+
+func TestDeleteNoticesByIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 1})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 2})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 3})
+ err := db.DeleteByIDs[system.Notice](db.DefaultContext, 1, 3)
+ require.NoError(t, err)
+ unittest.AssertNotExistsBean(t, &system.Notice{ID: 1})
+ unittest.AssertExistsAndLoadBean(t, &system.Notice{ID: 2})
+ unittest.AssertNotExistsBean(t, &system.Notice{ID: 3})
+}
diff --git a/models/system/setting.go b/models/system/setting.go
new file mode 100644
index 0000000..4472b4c
--- /dev/null
+++ b/models/system/setting.go
@@ -0,0 +1,152 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package system
+
+import (
+ "context"
+ "math"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting/config"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+type Setting struct {
+ ID int64 `xorm:"pk autoincr"`
+ SettingKey string `xorm:"varchar(255) unique"` // key should be lowercase
+ SettingValue string `xorm:"text"`
+ Version int `xorm:"version"`
+ Created timeutil.TimeStamp `xorm:"created"`
+ Updated timeutil.TimeStamp `xorm:"updated"`
+}
+
+// TableName sets the table name for the settings struct
+func (s *Setting) TableName() string {
+ return "system_setting"
+}
+
+func init() {
+ db.RegisterModel(new(Setting))
+}
+
+const keyRevision = "revision"
+
+func GetRevision(ctx context.Context) int {
+ revision, exist, err := db.Get[Setting](ctx, builder.Eq{"setting_key": keyRevision})
+ if err != nil {
+ return 0
+ } else if !exist {
+ err = db.Insert(ctx, &Setting{SettingKey: keyRevision, Version: 1})
+ if err != nil {
+ return 0
+ }
+ return 1
+ }
+ if revision.Version <= 0 || revision.Version >= math.MaxInt-1 {
+ _, err = db.Exec(ctx, "UPDATE system_setting SET version=1 WHERE setting_key=?", keyRevision)
+ if err != nil {
+ return 0
+ }
+ return 1
+ }
+ return revision.Version
+}
+
+func GetAllSettings(ctx context.Context) (revision int, res map[string]string, err error) {
+ _ = GetRevision(ctx) // prepare the "revision" key ahead
+ var settings []*Setting
+ if err := db.GetEngine(ctx).
+ Find(&settings); err != nil {
+ return 0, nil, err
+ }
+ res = make(map[string]string)
+ for _, s := range settings {
+ if s.SettingKey == keyRevision {
+ revision = s.Version
+ }
+ res[s.SettingKey] = s.SettingValue
+ }
+ return revision, res, nil
+}
+
+func SetSettings(ctx context.Context, settings map[string]string) error {
+ _ = GetRevision(ctx) // prepare the "revision" key ahead
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ e := db.GetEngine(ctx)
+ _, err := db.Exec(ctx, "UPDATE system_setting SET version=version+1 WHERE setting_key=?", keyRevision)
+ if err != nil {
+ return err
+ }
+ for k, v := range settings {
+ res, err := e.Exec("UPDATE system_setting SET version=version+1, setting_value=? WHERE setting_key=?", v, k)
+ if err != nil {
+ return err
+ }
+ rows, _ := res.RowsAffected()
+ if rows == 0 { // if no existing row, insert a new row
+ if _, err = e.Insert(&Setting{SettingKey: k, SettingValue: v}); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ })
+}
+
+type dbConfigCachedGetter struct {
+ mu sync.RWMutex
+
+ cacheTime time.Time
+ revision int
+ settings map[string]string
+}
+
+var _ config.DynKeyGetter = (*dbConfigCachedGetter)(nil)
+
+func (d *dbConfigCachedGetter) GetValue(ctx context.Context, key string) (v string, has bool) {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+ v, has = d.settings[key]
+ return v, has
+}
+
+func (d *dbConfigCachedGetter) GetRevision(ctx context.Context) int {
+ d.mu.RLock()
+ cachedDuration := time.Since(d.cacheTime)
+ cachedRevision := d.revision
+ d.mu.RUnlock()
+
+ if cachedDuration < time.Second {
+ return cachedRevision
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ if GetRevision(ctx) != d.revision {
+ rev, set, err := GetAllSettings(ctx)
+ if err != nil {
+ log.Error("Unable to get all settings: %v", err)
+ } else {
+ d.revision = rev
+ d.settings = set
+ }
+ }
+ d.cacheTime = time.Now()
+ return d.revision
+}
+
+func (d *dbConfigCachedGetter) InvalidateCache() {
+ d.mu.Lock()
+ d.cacheTime = time.Time{}
+ d.mu.Unlock()
+}
+
+func NewDatabaseDynKeyGetter() config.DynKeyGetter {
+ return &dbConfigCachedGetter{}
+}
diff --git a/models/system/setting_test.go b/models/system/setting_test.go
new file mode 100644
index 0000000..7a7fa02
--- /dev/null
+++ b/models/system/setting_test.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package system_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/system"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSettings(t *testing.T) {
+ keyName := "test.key"
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ require.NoError(t, db.TruncateBeans(db.DefaultContext, &system.Setting{}))
+
+ rev, settings, err := system.GetAllSettings(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, rev)
+ assert.Len(t, settings, 1) // there is only one "revision" key
+
+ err = system.SetSettings(db.DefaultContext, map[string]string{keyName: "true"})
+ require.NoError(t, err)
+ rev, settings, err = system.GetAllSettings(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, rev)
+ assert.Len(t, settings, 2)
+ assert.EqualValues(t, "true", settings[keyName])
+
+ err = system.SetSettings(db.DefaultContext, map[string]string{keyName: "false"})
+ require.NoError(t, err)
+ rev, settings, err = system.GetAllSettings(db.DefaultContext)
+ require.NoError(t, err)
+ assert.EqualValues(t, 3, rev)
+ assert.Len(t, settings, 2)
+ assert.EqualValues(t, "false", settings[keyName])
+
+ // setting the same value should not trigger DuplicateKey error, and the "version" should be increased
+ err = system.SetSettings(db.DefaultContext, map[string]string{keyName: "false"})
+ require.NoError(t, err)
+
+ rev, settings, err = system.GetAllSettings(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, settings, 2)
+ assert.EqualValues(t, 4, rev)
+}
diff --git a/models/unit/unit.go b/models/unit/unit.go
new file mode 100644
index 0000000..5a8b911
--- /dev/null
+++ b/models/unit/unit.go
@@ -0,0 +1,437 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unit
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync/atomic"
+
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// Type is Unit's Type
+type Type int
+
+// Enumerate all the unit types
+const (
+ TypeInvalid Type = iota // 0 invalid
+ TypeCode // 1 code
+ TypeIssues // 2 issues
+ TypePullRequests // 3 PRs
+ TypeReleases // 4 Releases
+ TypeWiki // 5 Wiki
+ TypeExternalWiki // 6 ExternalWiki
+ TypeExternalTracker // 7 ExternalTracker
+ TypeProjects // 8 Projects
+ TypePackages // 9 Packages
+ TypeActions // 10 Actions
+)
+
+// Value returns integer value for unit type
+func (u Type) Value() int {
+ return int(u)
+}
+
+func (u Type) String() string {
+ switch u {
+ case TypeCode:
+ return "TypeCode"
+ case TypeIssues:
+ return "TypeIssues"
+ case TypePullRequests:
+ return "TypePullRequests"
+ case TypeReleases:
+ return "TypeReleases"
+ case TypeWiki:
+ return "TypeWiki"
+ case TypeExternalWiki:
+ return "TypeExternalWiki"
+ case TypeExternalTracker:
+ return "TypeExternalTracker"
+ case TypeProjects:
+ return "TypeProjects"
+ case TypePackages:
+ return "TypePackages"
+ case TypeActions:
+ return "TypeActions"
+ }
+ return fmt.Sprintf("Unknown Type %d", u)
+}
+
+func (u Type) LogString() string {
+ return fmt.Sprintf("<UnitType:%d:%s>", u, u.String())
+}
+
+var (
+ // AllRepoUnitTypes contains all the unit types
+ AllRepoUnitTypes = []Type{
+ TypeCode,
+ TypeIssues,
+ TypePullRequests,
+ TypeReleases,
+ TypeWiki,
+ TypeExternalWiki,
+ TypeExternalTracker,
+ TypeProjects,
+ TypePackages,
+ TypeActions,
+ }
+
+ // DefaultRepoUnits contains the default unit types
+ DefaultRepoUnits = []Type{
+ TypeCode,
+ TypeIssues,
+ TypePullRequests,
+ TypeReleases,
+ TypeWiki,
+ TypeProjects,
+ TypePackages,
+ TypeActions,
+ }
+
+ // ForkRepoUnits contains the default unit types for forks
+ DefaultForkRepoUnits = []Type{
+ TypeCode,
+ TypePullRequests,
+ }
+
+ // NotAllowedDefaultRepoUnits contains units that can't be default
+ NotAllowedDefaultRepoUnits = []Type{
+ TypeExternalWiki,
+ TypeExternalTracker,
+ }
+
+ disabledRepoUnitsAtomic atomic.Pointer[[]Type] // the units that have been globally disabled
+
+ // AllowedRepoUnitGroups contains the units that have been globally enabled,
+ // with mutually exclusive units grouped together.
+ AllowedRepoUnitGroups = [][]Type{}
+)
+
+// DisabledRepoUnitsGet returns the globally disabled units, it is a quick patch to fix data-race during testing.
+// Because the queue worker might read when a test is mocking the value. FIXME: refactor to a clear solution later.
+func DisabledRepoUnitsGet() []Type {
+ v := disabledRepoUnitsAtomic.Load()
+ if v == nil {
+ return nil
+ }
+ return *v
+}
+
+func DisabledRepoUnitsSet(v []Type) {
+ disabledRepoUnitsAtomic.Store(&v)
+}
+
+// Get valid set of default repository units from settings
+func validateDefaultRepoUnits(defaultUnits, settingDefaultUnits []Type) []Type {
+ units := defaultUnits
+
+ // Use setting if not empty
+ if len(settingDefaultUnits) > 0 {
+ units = make([]Type, 0, len(settingDefaultUnits))
+ for _, settingUnit := range settingDefaultUnits {
+ if !settingUnit.CanBeDefault() {
+ log.Warn("Not allowed as default unit: %s", settingUnit.String())
+ continue
+ }
+ units = append(units, settingUnit)
+ }
+ }
+
+ // Remove disabled units
+ for _, disabledUnit := range DisabledRepoUnitsGet() {
+ for i, unit := range units {
+ if unit == disabledUnit {
+ units = append(units[:i], units[i+1:]...)
+ }
+ }
+ }
+
+ return units
+}
+
+// LoadUnitConfig load units from settings
+func LoadUnitConfig() error {
+ disabledRepoUnits, invalidKeys := FindUnitTypes(setting.Repository.DisabledRepoUnits...)
+ if len(invalidKeys) > 0 {
+ log.Warn("Invalid keys in disabled repo units: %s", strings.Join(invalidKeys, ", "))
+ }
+ DisabledRepoUnitsSet(disabledRepoUnits)
+
+ setDefaultRepoUnits, invalidKeys := FindUnitTypes(setting.Repository.DefaultRepoUnits...)
+ if len(invalidKeys) > 0 {
+ log.Warn("Invalid keys in default repo units: %s", strings.Join(invalidKeys, ", "))
+ }
+ DefaultRepoUnits = validateDefaultRepoUnits(DefaultRepoUnits, setDefaultRepoUnits)
+ if len(DefaultRepoUnits) == 0 {
+ return errors.New("no default repository units found")
+ }
+ setDefaultForkRepoUnits, invalidKeys := FindUnitTypes(setting.Repository.DefaultForkRepoUnits...)
+ if len(invalidKeys) > 0 {
+ log.Warn("Invalid keys in default fork repo units: %s", strings.Join(invalidKeys, ", "))
+ }
+ DefaultForkRepoUnits = validateDefaultRepoUnits(DefaultForkRepoUnits, setDefaultForkRepoUnits)
+ if len(DefaultForkRepoUnits) == 0 {
+ return errors.New("no default fork repository units found")
+ }
+
+ // Collect the allowed repo unit groups. Mutually exclusive units are
+ // grouped together.
+ AllowedRepoUnitGroups = [][]Type{}
+ for _, unit := range []Type{
+ TypeCode,
+ TypePullRequests,
+ TypeProjects,
+ TypePackages,
+ TypeActions,
+ } {
+ // If unit is globally disabled, ignore it.
+ if unit.UnitGlobalDisabled() {
+ continue
+ }
+
+ // If it is allowed, add it to the group list.
+ AllowedRepoUnitGroups = append(AllowedRepoUnitGroups, []Type{unit})
+ }
+
+ addMutuallyExclusiveGroup := func(unit1, unit2 Type) {
+ var list []Type
+
+ if !unit1.UnitGlobalDisabled() {
+ list = append(list, unit1)
+ }
+
+ if !unit2.UnitGlobalDisabled() {
+ list = append(list, unit2)
+ }
+
+ if len(list) > 0 {
+ AllowedRepoUnitGroups = append(AllowedRepoUnitGroups, list)
+ }
+ }
+
+ addMutuallyExclusiveGroup(TypeIssues, TypeExternalTracker)
+ addMutuallyExclusiveGroup(TypeWiki, TypeExternalWiki)
+
+ return nil
+}
+
+// UnitGlobalDisabled checks if unit type is global disabled
+func (u Type) UnitGlobalDisabled() bool {
+ for _, ud := range DisabledRepoUnitsGet() {
+ if u == ud {
+ return true
+ }
+ }
+ return false
+}
+
+// CanBeDefault checks if the unit type can be a default repo unit
+func (u *Type) CanBeDefault() bool {
+ for _, nadU := range NotAllowedDefaultRepoUnits {
+ if *u == nadU {
+ return false
+ }
+ }
+ return true
+}
+
+// Unit is a section of one repository
+type Unit struct {
+ Type Type
+ Name string
+ NameKey string
+ URI string
+ DescKey string
+ Idx int
+ MaxAccessMode perm.AccessMode // The max access mode of the unit. i.e. Read means this unit can only be read.
+}
+
+// IsLessThan compares order of two units
+func (u Unit) IsLessThan(unit Unit) bool {
+ if (u.Type == TypeExternalTracker || u.Type == TypeExternalWiki) && unit.Type != TypeExternalTracker && unit.Type != TypeExternalWiki {
+ return false
+ }
+ return u.Idx < unit.Idx
+}
+
+// MaxPerm returns the max perms of this unit
+func (u Unit) MaxPerm() perm.AccessMode {
+ if u.Type == TypeExternalTracker || u.Type == TypeExternalWiki {
+ return perm.AccessModeRead
+ }
+ return perm.AccessModeAdmin
+}
+
+// Enumerate all the units
+var (
+ UnitCode = Unit{
+ TypeCode,
+ "code",
+ "repo.code",
+ "/",
+ "repo.code.desc",
+ 0,
+ perm.AccessModeOwner,
+ }
+
+ UnitIssues = Unit{
+ TypeIssues,
+ "issues",
+ "repo.issues",
+ "/issues",
+ "repo.issues.desc",
+ 1,
+ perm.AccessModeOwner,
+ }
+
+ UnitExternalTracker = Unit{
+ TypeExternalTracker,
+ "ext_issues",
+ "repo.ext_issues",
+ "/issues",
+ "repo.ext_issues.desc",
+ 1,
+ perm.AccessModeRead,
+ }
+
+ UnitPullRequests = Unit{
+ TypePullRequests,
+ "pulls",
+ "repo.pulls",
+ "/pulls",
+ "repo.pulls.desc",
+ 2,
+ perm.AccessModeOwner,
+ }
+
+ UnitReleases = Unit{
+ TypeReleases,
+ "releases",
+ "repo.releases",
+ "/releases",
+ "repo.releases.desc",
+ 3,
+ perm.AccessModeOwner,
+ }
+
+ UnitWiki = Unit{
+ TypeWiki,
+ "wiki",
+ "repo.wiki",
+ "/wiki",
+ "repo.wiki.desc",
+ 4,
+ perm.AccessModeOwner,
+ }
+
+ UnitExternalWiki = Unit{
+ TypeExternalWiki,
+ "ext_wiki",
+ "repo.ext_wiki",
+ "/wiki",
+ "repo.ext_wiki.desc",
+ 4,
+ perm.AccessModeRead,
+ }
+
+ UnitProjects = Unit{
+ TypeProjects,
+ "projects",
+ "repo.projects",
+ "/projects",
+ "repo.projects.desc",
+ 5,
+ perm.AccessModeOwner,
+ }
+
+ UnitPackages = Unit{
+ TypePackages,
+ "packages",
+ "repo.packages",
+ "/packages",
+ "packages.desc",
+ 6,
+ perm.AccessModeRead,
+ }
+
+ UnitActions = Unit{
+ TypeActions,
+ "actions",
+ "repo.actions",
+ "/actions",
+ "actions.unit.desc",
+ 7,
+ perm.AccessModeOwner,
+ }
+
+ // Units contains all the units
+ Units = map[Type]Unit{
+ TypeCode: UnitCode,
+ TypeIssues: UnitIssues,
+ TypeExternalTracker: UnitExternalTracker,
+ TypePullRequests: UnitPullRequests,
+ TypeReleases: UnitReleases,
+ TypeWiki: UnitWiki,
+ TypeExternalWiki: UnitExternalWiki,
+ TypeProjects: UnitProjects,
+ TypePackages: UnitPackages,
+ TypeActions: UnitActions,
+ }
+)
+
+// FindUnitTypes give the unit key names and return valid unique units and invalid keys
+func FindUnitTypes(nameKeys ...string) (res []Type, invalidKeys []string) {
+ m := make(container.Set[Type])
+ for _, key := range nameKeys {
+ t := TypeFromKey(key)
+ if t == TypeInvalid {
+ invalidKeys = append(invalidKeys, key)
+ } else if m.Add(t) {
+ res = append(res, t)
+ }
+ }
+ return res, invalidKeys
+}
+
+// TypeFromKey give the unit key name and return unit
+func TypeFromKey(nameKey string) Type {
+ for t, u := range Units {
+ if strings.EqualFold(nameKey, u.NameKey) {
+ return t
+ }
+ }
+ return TypeInvalid
+}
+
+// AllUnitKeyNames returns all unit key names
+func AllUnitKeyNames() []string {
+ res := make([]string, 0, len(Units))
+ for _, u := range Units {
+ res = append(res, u.NameKey)
+ }
+ return res
+}
+
+// MinUnitAccessMode returns the minial permission of the permission map
+func MinUnitAccessMode(unitsMap map[Type]perm.AccessMode) perm.AccessMode {
+ res := perm.AccessModeNone
+ for t, mode := range unitsMap {
+ // Don't allow `TypeExternal{Tracker,Wiki}` to influence this as they can only be set to READ perms.
+ if t == TypeExternalTracker || t == TypeExternalWiki {
+ continue
+ }
+
+ // get the minial permission great than AccessModeNone except all are AccessModeNone
+ if mode > perm.AccessModeNone && (res == perm.AccessModeNone || mode < res) {
+ res = mode
+ }
+ }
+ return res
+}
diff --git a/models/unit/unit_test.go b/models/unit/unit_test.go
new file mode 100644
index 0000000..a739677
--- /dev/null
+++ b/models/unit/unit_test.go
@@ -0,0 +1,96 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unit
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLoadUnitConfig(t *testing.T) {
+ t.Run("regular", func(t *testing.T) {
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
+ DisabledRepoUnitsSet(disabledRepoUnits)
+ DefaultRepoUnits = defaultRepoUnits
+ DefaultForkRepoUnits = defaultForkRepoUnits
+ }(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
+ setting.Repository.DisabledRepoUnits = disabledRepoUnits
+ setting.Repository.DefaultRepoUnits = defaultRepoUnits
+ setting.Repository.DefaultForkRepoUnits = defaultForkRepoUnits
+ }(setting.Repository.DisabledRepoUnits, setting.Repository.DefaultRepoUnits, setting.Repository.DefaultForkRepoUnits)
+
+ setting.Repository.DisabledRepoUnits = []string{"repo.issues"}
+ setting.Repository.DefaultRepoUnits = []string{"repo.code", "repo.releases", "repo.issues", "repo.pulls"}
+ setting.Repository.DefaultForkRepoUnits = []string{"repo.releases"}
+ require.NoError(t, LoadUnitConfig())
+ assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
+ assert.Equal(t, []Type{TypeCode, TypeReleases, TypePullRequests}, DefaultRepoUnits)
+ assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
+ })
+ t.Run("invalid", func(t *testing.T) {
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
+ DisabledRepoUnitsSet(disabledRepoUnits)
+ DefaultRepoUnits = defaultRepoUnits
+ DefaultForkRepoUnits = defaultForkRepoUnits
+ }(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
+ setting.Repository.DisabledRepoUnits = disabledRepoUnits
+ setting.Repository.DefaultRepoUnits = defaultRepoUnits
+ setting.Repository.DefaultForkRepoUnits = defaultForkRepoUnits
+ }(setting.Repository.DisabledRepoUnits, setting.Repository.DefaultRepoUnits, setting.Repository.DefaultForkRepoUnits)
+
+ setting.Repository.DisabledRepoUnits = []string{"repo.issues", "invalid.1"}
+ setting.Repository.DefaultRepoUnits = []string{"repo.code", "invalid.2", "repo.releases", "repo.issues", "repo.pulls"}
+ setting.Repository.DefaultForkRepoUnits = []string{"invalid.3", "repo.releases"}
+ require.NoError(t, LoadUnitConfig())
+ assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
+ assert.Equal(t, []Type{TypeCode, TypeReleases, TypePullRequests}, DefaultRepoUnits)
+ assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
+ })
+ t.Run("duplicate", func(t *testing.T) {
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
+ DisabledRepoUnitsSet(disabledRepoUnits)
+ DefaultRepoUnits = defaultRepoUnits
+ DefaultForkRepoUnits = defaultForkRepoUnits
+ }(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
+ setting.Repository.DisabledRepoUnits = disabledRepoUnits
+ setting.Repository.DefaultRepoUnits = defaultRepoUnits
+ setting.Repository.DefaultForkRepoUnits = defaultForkRepoUnits
+ }(setting.Repository.DisabledRepoUnits, setting.Repository.DefaultRepoUnits, setting.Repository.DefaultForkRepoUnits)
+
+ setting.Repository.DisabledRepoUnits = []string{"repo.issues", "repo.issues"}
+ setting.Repository.DefaultRepoUnits = []string{"repo.code", "repo.releases", "repo.issues", "repo.pulls", "repo.code"}
+ setting.Repository.DefaultForkRepoUnits = []string{"repo.releases", "repo.releases"}
+ require.NoError(t, LoadUnitConfig())
+ assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
+ assert.Equal(t, []Type{TypeCode, TypeReleases, TypePullRequests}, DefaultRepoUnits)
+ assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
+ })
+ t.Run("empty_default", func(t *testing.T) {
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []Type) {
+ DisabledRepoUnitsSet(disabledRepoUnits)
+ DefaultRepoUnits = defaultRepoUnits
+ DefaultForkRepoUnits = defaultForkRepoUnits
+ }(DisabledRepoUnitsGet(), DefaultRepoUnits, DefaultForkRepoUnits)
+ defer func(disabledRepoUnits, defaultRepoUnits, defaultForkRepoUnits []string) {
+ setting.Repository.DisabledRepoUnits = disabledRepoUnits
+ setting.Repository.DefaultRepoUnits = defaultRepoUnits
+ setting.Repository.DefaultForkRepoUnits = defaultForkRepoUnits
+ }(setting.Repository.DisabledRepoUnits, setting.Repository.DefaultRepoUnits, setting.Repository.DefaultForkRepoUnits)
+
+ setting.Repository.DisabledRepoUnits = []string{"repo.issues", "repo.issues"}
+ setting.Repository.DefaultRepoUnits = []string{}
+ setting.Repository.DefaultForkRepoUnits = []string{"repo.releases", "repo.releases"}
+ require.NoError(t, LoadUnitConfig())
+ assert.Equal(t, []Type{TypeIssues}, DisabledRepoUnitsGet())
+ assert.ElementsMatch(t, []Type{TypeCode, TypePullRequests, TypeReleases, TypeWiki, TypePackages, TypeProjects, TypeActions}, DefaultRepoUnits)
+ assert.Equal(t, []Type{TypeReleases}, DefaultForkRepoUnits)
+ })
+}
diff --git a/models/unittest/consistency.go b/models/unittest/consistency.go
new file mode 100644
index 0000000..4e26de7
--- /dev/null
+++ b/models/unittest/consistency.go
@@ -0,0 +1,192 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unittest
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/builder"
+)
+
+const (
+ // these const values are copied from `models` package to prevent from cycle-import
+ modelsUserTypeOrganization = 1
+ modelsRepoWatchModeDont = 2
+ modelsCommentTypeComment = 0
+)
+
+var consistencyCheckMap = make(map[string]func(t *testing.T, bean any))
+
+// CheckConsistencyFor test that all matching database entries are consistent
+func CheckConsistencyFor(t *testing.T, beansToCheck ...any) {
+ for _, bean := range beansToCheck {
+ sliceType := reflect.SliceOf(reflect.TypeOf(bean))
+ sliceValue := reflect.MakeSlice(sliceType, 0, 10)
+
+ ptrToSliceValue := reflect.New(sliceType)
+ ptrToSliceValue.Elem().Set(sliceValue)
+
+ require.NoError(t, db.GetEngine(db.DefaultContext).Table(bean).Find(ptrToSliceValue.Interface()))
+ sliceValue = ptrToSliceValue.Elem()
+
+ for i := 0; i < sliceValue.Len(); i++ {
+ entity := sliceValue.Index(i).Interface()
+ checkForConsistency(t, entity)
+ }
+ }
+}
+
+func checkForConsistency(t *testing.T, bean any) {
+ tb, err := db.TableInfo(bean)
+ require.NoError(t, err)
+ f := consistencyCheckMap[tb.Name]
+ if f == nil {
+ assert.FailNow(t, "unknown bean type: %#v", bean)
+ }
+ f(t, bean)
+}
+
+func init() {
+ parseBool := func(v string) bool {
+ b, _ := strconv.ParseBool(v)
+ return b
+ }
+ parseInt := func(v string) int {
+ i, _ := strconv.Atoi(v)
+ return i
+ }
+
+ checkForUserConsistency := func(t *testing.T, bean any) {
+ user := reflectionWrap(bean)
+ AssertCountByCond(t, "repository", builder.Eq{"owner_id": user.int("ID")}, user.int("NumRepos"))
+ AssertCountByCond(t, "star", builder.Eq{"uid": user.int("ID")}, user.int("NumStars"))
+ AssertCountByCond(t, "org_user", builder.Eq{"org_id": user.int("ID")}, user.int("NumMembers"))
+ AssertCountByCond(t, "team", builder.Eq{"org_id": user.int("ID")}, user.int("NumTeams"))
+ AssertCountByCond(t, "follow", builder.Eq{"user_id": user.int("ID")}, user.int("NumFollowing"))
+ AssertCountByCond(t, "follow", builder.Eq{"follow_id": user.int("ID")}, user.int("NumFollowers"))
+ if user.int("Type") != modelsUserTypeOrganization {
+ assert.EqualValues(t, 0, user.int("NumMembers"), "Unexpected number of members for user id: %d", user.int("ID"))
+ assert.EqualValues(t, 0, user.int("NumTeams"), "Unexpected number of teams for user id: %d", user.int("ID"))
+ }
+ }
+
+ checkForRepoConsistency := func(t *testing.T, bean any) {
+ repo := reflectionWrap(bean)
+ assert.Equal(t, repo.str("LowerName"), strings.ToLower(repo.str("Name")), "repo: %+v", repo)
+ AssertCountByCond(t, "star", builder.Eq{"repo_id": repo.int("ID")}, repo.int("NumStars"))
+ AssertCountByCond(t, "milestone", builder.Eq{"repo_id": repo.int("ID")}, repo.int("NumMilestones"))
+ AssertCountByCond(t, "repository", builder.Eq{"fork_id": repo.int("ID")}, repo.int("NumForks"))
+ if repo.bool("IsFork") {
+ AssertExistsAndLoadMap(t, "repository", builder.Eq{"id": repo.int("ForkID")})
+ }
+
+ actual := GetCountByCond(t, "watch", builder.Eq{"repo_id": repo.int("ID")}.
+ And(builder.Neq{"mode": modelsRepoWatchModeDont}))
+ assert.EqualValues(t, repo.int("NumWatches"), actual,
+ "Unexpected number of watches for repo id: %d", repo.int("ID"))
+
+ actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": false, "repo_id": repo.int("ID")})
+ assert.EqualValues(t, repo.int("NumIssues"), actual,
+ "Unexpected number of issues for repo id: %d", repo.int("ID"))
+
+ actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": false, "is_closed": true, "repo_id": repo.int("ID")})
+ assert.EqualValues(t, repo.int("NumClosedIssues"), actual,
+ "Unexpected number of closed issues for repo id: %d", repo.int("ID"))
+
+ actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": true, "repo_id": repo.int("ID")})
+ assert.EqualValues(t, repo.int("NumPulls"), actual,
+ "Unexpected number of pulls for repo id: %d", repo.int("ID"))
+
+ actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": true, "is_closed": true, "repo_id": repo.int("ID")})
+ assert.EqualValues(t, repo.int("NumClosedPulls"), actual,
+ "Unexpected number of closed pulls for repo id: %d", repo.int("ID"))
+
+ actual = GetCountByCond(t, "milestone", builder.Eq{"is_closed": true, "repo_id": repo.int("ID")})
+ assert.EqualValues(t, repo.int("NumClosedMilestones"), actual,
+ "Unexpected number of closed milestones for repo id: %d", repo.int("ID"))
+ }
+
+ checkForIssueConsistency := func(t *testing.T, bean any) {
+ issue := reflectionWrap(bean)
+ typeComment := modelsCommentTypeComment
+ actual := GetCountByCond(t, "comment", builder.Eq{"`type`": typeComment, "issue_id": issue.int("ID")})
+ assert.EqualValues(t, issue.int("NumComments"), actual, "Unexpected number of comments for issue id: %d", issue.int("ID"))
+ if issue.bool("IsPull") {
+ prRow := AssertExistsAndLoadMap(t, "pull_request", builder.Eq{"issue_id": issue.int("ID")})
+ assert.EqualValues(t, parseInt(prRow["index"]), issue.int("Index"), "Unexpected index for issue id: %d", issue.int("ID"))
+ }
+ }
+
+ checkForPullRequestConsistency := func(t *testing.T, bean any) {
+ pr := reflectionWrap(bean)
+ issueRow := AssertExistsAndLoadMap(t, "issue", builder.Eq{"id": pr.int("IssueID")})
+ assert.True(t, parseBool(issueRow["is_pull"]))
+ assert.EqualValues(t, parseInt(issueRow["index"]), pr.int("Index"), "Unexpected index for pull request id: %d", pr.int("ID"))
+ }
+
+ checkForMilestoneConsistency := func(t *testing.T, bean any) {
+ milestone := reflectionWrap(bean)
+ AssertCountByCond(t, "issue", builder.Eq{"milestone_id": milestone.int("ID")}, milestone.int("NumIssues"))
+
+ actual := GetCountByCond(t, "issue", builder.Eq{"is_closed": true, "milestone_id": milestone.int("ID")})
+ assert.EqualValues(t, milestone.int("NumClosedIssues"), actual, "Unexpected number of closed issues for milestone id: %d", milestone.int("ID"))
+
+ completeness := 0
+ if milestone.int("NumIssues") > 0 {
+ completeness = milestone.int("NumClosedIssues") * 100 / milestone.int("NumIssues")
+ }
+ assert.Equal(t, completeness, milestone.int("Completeness"))
+ }
+
+ checkForLabelConsistency := func(t *testing.T, bean any) {
+ label := reflectionWrap(bean)
+ issueLabels, err := db.GetEngine(db.DefaultContext).Table("issue_label").
+ Where(builder.Eq{"label_id": label.int("ID")}).
+ Query()
+ require.NoError(t, err)
+
+ assert.Len(t, issueLabels, label.int("NumIssues"), "Unexpected number of issue for label id: %d", label.int("ID"))
+
+ issueIDs := make([]int, len(issueLabels))
+ for i, issueLabel := range issueLabels {
+ issueIDs[i], _ = strconv.Atoi(string(issueLabel["issue_id"]))
+ }
+
+ expected := int64(0)
+ if len(issueIDs) > 0 {
+ expected = GetCountByCond(t, "issue", builder.In("id", issueIDs).And(builder.Eq{"is_closed": true}))
+ }
+ assert.EqualValues(t, expected, label.int("NumClosedIssues"), "Unexpected number of closed issues for label id: %d", label.int("ID"))
+ }
+
+ checkForTeamConsistency := func(t *testing.T, bean any) {
+ team := reflectionWrap(bean)
+ AssertCountByCond(t, "team_user", builder.Eq{"team_id": team.int("ID")}, team.int("NumMembers"))
+ AssertCountByCond(t, "team_repo", builder.Eq{"team_id": team.int("ID")}, team.int("NumRepos"))
+ }
+
+ checkForActionConsistency := func(t *testing.T, bean any) {
+ action := reflectionWrap(bean)
+ if action.int("RepoID") != 1700 { // dangling intentional
+ repoRow := AssertExistsAndLoadMap(t, "repository", builder.Eq{"id": action.int("RepoID")})
+ assert.Equal(t, parseBool(repoRow["is_private"]), action.bool("IsPrivate"), "Unexpected is_private field for action id: %d", action.int("ID"))
+ }
+ }
+
+ consistencyCheckMap["user"] = checkForUserConsistency
+ consistencyCheckMap["repository"] = checkForRepoConsistency
+ consistencyCheckMap["issue"] = checkForIssueConsistency
+ consistencyCheckMap["pull_request"] = checkForPullRequestConsistency
+ consistencyCheckMap["milestone"] = checkForMilestoneConsistency
+ consistencyCheckMap["label"] = checkForLabelConsistency
+ consistencyCheckMap["team"] = checkForTeamConsistency
+ consistencyCheckMap["action"] = checkForActionConsistency
+}
diff --git a/models/unittest/fixtures.go b/models/unittest/fixtures.go
new file mode 100644
index 0000000..63b26a0
--- /dev/null
+++ b/models/unittest/fixtures.go
@@ -0,0 +1,144 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+//nolint:forbidigo
+package unittest
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/auth/password/hash"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/go-testfixtures/testfixtures/v3"
+ "xorm.io/xorm"
+ "xorm.io/xorm/schemas"
+)
+
+var fixturesLoader *testfixtures.Loader
+
+// GetXORMEngine gets the XORM engine
+func GetXORMEngine(engine ...*xorm.Engine) (x *xorm.Engine) {
+ if len(engine) == 1 {
+ return engine[0]
+ }
+ return db.DefaultContext.(*db.Context).Engine().(*xorm.Engine)
+}
+
+func OverrideFixtures(opts FixturesOptions, engine ...*xorm.Engine) func() {
+ old := fixturesLoader
+ if err := InitFixtures(opts, engine...); err != nil {
+ panic(err)
+ }
+ return func() {
+ fixturesLoader = old
+ }
+}
+
+// InitFixtures initialize test fixtures for a test database
+func InitFixtures(opts FixturesOptions, engine ...*xorm.Engine) (err error) {
+ e := GetXORMEngine(engine...)
+ var fixtureOptionFiles func(*testfixtures.Loader) error
+ if opts.Dir != "" {
+ fixtureOptionFiles = testfixtures.Directory(opts.Dir)
+ } else {
+ fixtureOptionFiles = testfixtures.Files(opts.Files...)
+ }
+ var fixtureOptionDirs []func(*testfixtures.Loader) error
+ if opts.Dirs != nil {
+ for _, dir := range opts.Dirs {
+ fixtureOptionDirs = append(fixtureOptionDirs, testfixtures.Directory(filepath.Join(opts.Base, dir)))
+ }
+ }
+ dialect := "unknown"
+ switch e.Dialect().URI().DBType {
+ case schemas.POSTGRES:
+ dialect = "postgres"
+ case schemas.MYSQL:
+ dialect = "mysql"
+ case schemas.SQLITE:
+ dialect = "sqlite3"
+ default:
+ fmt.Println("Unsupported RDBMS for integration tests")
+ os.Exit(1)
+ }
+ loaderOptions := []func(loader *testfixtures.Loader) error{
+ testfixtures.Database(e.DB().DB),
+ testfixtures.Dialect(dialect),
+ testfixtures.DangerousSkipTestDatabaseCheck(),
+ fixtureOptionFiles,
+ }
+ loaderOptions = append(loaderOptions, fixtureOptionDirs...)
+
+ if e.Dialect().URI().DBType == schemas.POSTGRES {
+ loaderOptions = append(loaderOptions, testfixtures.SkipResetSequences())
+ }
+
+ fixturesLoader, err = testfixtures.New(loaderOptions...)
+ if err != nil {
+ return err
+ }
+
+ // register the dummy hash algorithm function used in the test fixtures
+ _ = hash.Register("dummy", hash.NewDummyHasher)
+
+ setting.PasswordHashAlgo, _ = hash.SetDefaultPasswordHashAlgorithm("dummy")
+
+ return err
+}
+
+// LoadFixtures load fixtures for a test database
+func LoadFixtures(engine ...*xorm.Engine) error {
+ e := GetXORMEngine(engine...)
+ var err error
+ // (doubt) database transaction conflicts could occur and result in ROLLBACK? just try for a few times.
+ for i := 0; i < 5; i++ {
+ if err = fixturesLoader.Load(); err == nil {
+ break
+ }
+ time.Sleep(200 * time.Millisecond)
+ }
+ if err != nil {
+ fmt.Printf("LoadFixtures failed after retries: %v\n", err)
+ }
+ // Now if we're running postgres we need to tell it to update the sequences
+ if e.Dialect().URI().DBType == schemas.POSTGRES {
+ results, err := e.QueryString(`SELECT 'SELECT SETVAL(' ||
+ quote_literal(quote_ident(PGT.schemaname) || '.' || quote_ident(S.relname)) ||
+ ', COALESCE(MAX(' ||quote_ident(C.attname)|| '), 1) ) FROM ' ||
+ quote_ident(PGT.schemaname)|| '.'||quote_ident(T.relname)|| ';'
+ FROM pg_class AS S,
+ pg_depend AS D,
+ pg_class AS T,
+ pg_attribute AS C,
+ pg_tables AS PGT
+ WHERE S.relkind = 'S'
+ AND S.oid = D.objid
+ AND D.refobjid = T.oid
+ AND D.refobjid = C.attrelid
+ AND D.refobjsubid = C.attnum
+ AND T.relname = PGT.tablename
+ ORDER BY S.relname;`)
+ if err != nil {
+ fmt.Printf("Failed to generate sequence update: %v\n", err)
+ return err
+ }
+ for _, r := range results {
+ for _, value := range r {
+ _, err = e.Exec(value)
+ if err != nil {
+ fmt.Printf("Failed to update sequence: %s Error: %v\n", value, err)
+ return err
+ }
+ }
+ }
+ }
+ _ = hash.Register("dummy", hash.NewDummyHasher)
+ setting.PasswordHashAlgo, _ = hash.SetDefaultPasswordHashAlgorithm("dummy")
+
+ return err
+}
diff --git a/models/unittest/fscopy.go b/models/unittest/fscopy.go
new file mode 100644
index 0000000..74b12d5
--- /dev/null
+++ b/models/unittest/fscopy.go
@@ -0,0 +1,102 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unittest
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path"
+ "strings"
+
+ "code.gitea.io/gitea/modules/util"
+)
+
+// Copy copies file from source to target path.
+func Copy(src, dest string) error {
+ // Gather file information to set back later.
+ si, err := os.Lstat(src)
+ if err != nil {
+ return err
+ }
+
+ // Handle symbolic link.
+ if si.Mode()&os.ModeSymlink != 0 {
+ target, err := os.Readlink(src)
+ if err != nil {
+ return err
+ }
+ // NOTE: os.Chmod and os.Chtimes don't recognize symbolic link,
+ // which will lead "no such file or directory" error.
+ return os.Symlink(target, dest)
+ }
+
+ sr, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer sr.Close()
+
+ dw, err := os.Create(dest)
+ if err != nil {
+ return err
+ }
+ defer dw.Close()
+
+ if _, err = io.Copy(dw, sr); err != nil {
+ return err
+ }
+
+ // Set back file information.
+ if err = os.Chtimes(dest, si.ModTime(), si.ModTime()); err != nil {
+ return err
+ }
+ return os.Chmod(dest, si.Mode())
+}
+
+// CopyDir copy files recursively from source to target directory.
+//
+// The filter accepts a function that process the path info.
+// and should return true for need to filter.
+//
+// It returns error when error occurs in underlying functions.
+func CopyDir(srcPath, destPath string, filters ...func(filePath string) bool) error {
+ // Check if target directory exists.
+ if _, err := os.Stat(destPath); !errors.Is(err, os.ErrNotExist) {
+ return util.NewAlreadyExistErrorf("file or directory already exists: %s", destPath)
+ }
+
+ err := os.MkdirAll(destPath, os.ModePerm)
+ if err != nil {
+ return err
+ }
+
+ // Gather directory info.
+ infos, err := util.StatDir(srcPath, true)
+ if err != nil {
+ return err
+ }
+
+ var filter func(filePath string) bool
+ if len(filters) > 0 {
+ filter = filters[0]
+ }
+
+ for _, info := range infos {
+ if filter != nil && filter(info) {
+ continue
+ }
+
+ curPath := path.Join(destPath, info)
+ if strings.HasSuffix(info, "/") {
+ err = os.MkdirAll(curPath, os.ModePerm)
+ } else {
+ err = Copy(path.Join(srcPath, info), curPath)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/models/unittest/mock_http.go b/models/unittest/mock_http.go
new file mode 100644
index 0000000..aea2489
--- /dev/null
+++ b/models/unittest/mock_http.go
@@ -0,0 +1,115 @@
+// Copyright 2017 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unittest
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "slices"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/modules/log"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Mocks HTTP responses of a third-party service (such as GitHub, GitLab…)
+// This has two modes:
+// - live mode: the requests made to the mock HTTP server are transmitted to the live
+// service, and responses are saved as test data files
+// - test mode: the responses to requests to the mock HTTP server are read from the
+// test data files
+func NewMockWebServer(t *testing.T, liveServerBaseURL, testDataDir string, liveMode bool) *httptest.Server {
+ mockServerBaseURL := ""
+ ignoredHeaders := []string{"cf-ray", "server", "date", "report-to", "nel", "x-request-id"}
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ path := NormalizedFullPath(r.URL)
+ log.Info("Mock HTTP Server: got request for path %s", r.URL.Path)
+ // TODO check request method (support POST?)
+ fixturePath := fmt.Sprintf("%s/%s_%s", testDataDir, r.Method, url.PathEscape(path))
+ if liveMode {
+ liveURL := fmt.Sprintf("%s%s", liveServerBaseURL, path)
+
+ request, err := http.NewRequest(r.Method, liveURL, nil)
+ require.NoError(t, err, "constructing an HTTP request to %s failed", liveURL)
+ for headerName, headerValues := range r.Header {
+ // do not pass on the encoding: let the Transport of the HTTP client handle that for us
+ if strings.ToLower(headerName) != "accept-encoding" {
+ for _, headerValue := range headerValues {
+ request.Header.Add(headerName, headerValue)
+ }
+ }
+ }
+
+ response, err := http.DefaultClient.Do(request)
+ require.NoError(t, err, "HTTP request to %s failed: %s", liveURL)
+ assert.Less(t, response.StatusCode, 400, "unexpected status code for %s", liveURL)
+
+ fixture, err := os.Create(fixturePath)
+ require.NoError(t, err, "failed to open the fixture file %s for writing", fixturePath)
+ defer fixture.Close()
+ fixtureWriter := bufio.NewWriter(fixture)
+
+ for headerName, headerValues := range response.Header {
+ for _, headerValue := range headerValues {
+ if !slices.Contains(ignoredHeaders, strings.ToLower(headerName)) {
+ _, err := fixtureWriter.WriteString(fmt.Sprintf("%s: %s\n", headerName, headerValue))
+ require.NoError(t, err, "writing the header of the HTTP response to the fixture file failed")
+ }
+ }
+ }
+ _, err = fixtureWriter.WriteString("\n")
+ require.NoError(t, err, "writing the header of the HTTP response to the fixture file failed")
+ fixtureWriter.Flush()
+
+ log.Info("Mock HTTP Server: writing response to %s", fixturePath)
+ _, err = io.Copy(fixture, response.Body)
+ require.NoError(t, err, "writing the body of the HTTP response to %s failed", liveURL)
+
+ err = fixture.Sync()
+ require.NoError(t, err, "writing the body of the HTTP response to the fixture file failed")
+ }
+
+ fixture, err := os.ReadFile(fixturePath)
+ require.NoError(t, err, "missing mock HTTP response: "+fixturePath)
+
+ w.WriteHeader(http.StatusOK)
+
+ // replace any mention of the live HTTP service by the mocked host
+ stringFixture := strings.ReplaceAll(string(fixture), liveServerBaseURL, mockServerBaseURL)
+ // parse back the fixture file into a series of HTTP headers followed by response body
+ lines := strings.Split(stringFixture, "\n")
+ for idx, line := range lines {
+ colonIndex := strings.Index(line, ": ")
+ if colonIndex != -1 {
+ w.Header().Set(line[0:colonIndex], line[colonIndex+2:])
+ } else {
+ // we reached the end of the headers (empty line), so what follows is the body
+ responseBody := strings.Join(lines[idx+1:], "\n")
+ _, err := w.Write([]byte(responseBody))
+ require.NoError(t, err, "writing the body of the HTTP response failed")
+ break
+ }
+ }
+ }))
+ mockServerBaseURL = server.URL
+ return server
+}
+
+func NormalizedFullPath(url *url.URL) string {
+ // TODO normalize path (remove trailing slash?)
+ // TODO normalize RawQuery (order query parameters?)
+ if len(url.Query()) == 0 {
+ return url.EscapedPath()
+ }
+ return fmt.Sprintf("%s?%s", url.EscapedPath(), url.RawQuery)
+}
diff --git a/models/unittest/reflection.go b/models/unittest/reflection.go
new file mode 100644
index 0000000..141fc66
--- /dev/null
+++ b/models/unittest/reflection.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unittest
+
+import (
+ "log"
+ "reflect"
+)
+
+func fieldByName(v reflect.Value, field string) reflect.Value {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ f := v.FieldByName(field)
+ if !f.IsValid() {
+ log.Panicf("can not read %s for %v", field, v)
+ }
+ return f
+}
+
+type reflectionValue struct {
+ v reflect.Value
+}
+
+func reflectionWrap(v any) *reflectionValue {
+ return &reflectionValue{v: reflect.ValueOf(v)}
+}
+
+func (rv *reflectionValue) int(field string) int {
+ return int(fieldByName(rv.v, field).Int())
+}
+
+func (rv *reflectionValue) str(field string) string {
+ return fieldByName(rv.v, field).String()
+}
+
+func (rv *reflectionValue) bool(field string) bool {
+ return fieldByName(rv.v, field).Bool()
+}
diff --git a/models/unittest/testdb.go b/models/unittest/testdb.go
new file mode 100644
index 0000000..94a3253
--- /dev/null
+++ b/models/unittest/testdb.go
@@ -0,0 +1,267 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unittest
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/system"
+ "code.gitea.io/gitea/modules/auth/password/hash"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/setting/config"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/stretchr/testify/require"
+ "xorm.io/xorm"
+ "xorm.io/xorm/names"
+)
+
+// giteaRoot a path to the gitea root
+var (
+ giteaRoot string
+ fixturesDir string
+)
+
+// FixturesDir returns the fixture directory
+func FixturesDir() string {
+ return fixturesDir
+}
+
+func fatalTestError(fmtStr string, args ...any) {
+ _, _ = fmt.Fprintf(os.Stderr, fmtStr, args...)
+ os.Exit(1)
+}
+
+// InitSettings initializes config provider and load common settings for tests
+func InitSettings() {
+ if setting.CustomConf == "" {
+ setting.CustomConf = filepath.Join(setting.CustomPath, "conf/app-unittest-tmp.ini")
+ _ = os.Remove(setting.CustomConf)
+ }
+ setting.InitCfgProvider(setting.CustomConf)
+ setting.LoadCommonSettings()
+
+ if err := setting.PrepareAppDataPath(); err != nil {
+ log.Fatalf("Can not prepare APP_DATA_PATH: %v", err)
+ }
+ // register the dummy hash algorithm function used in the test fixtures
+ _ = hash.Register("dummy", hash.NewDummyHasher)
+
+ setting.PasswordHashAlgo, _ = hash.SetDefaultPasswordHashAlgorithm("dummy")
+}
+
+// TestOptions represents test options
+type TestOptions struct {
+ FixtureFiles []string
+ SetUp func() error // SetUp will be executed before all tests in this package
+ TearDown func() error // TearDown will be executed after all tests in this package
+}
+
+// MainTest a reusable TestMain(..) function for unit tests that need to use a
+// test database. Creates the test database, and sets necessary settings.
+func MainTest(m *testing.M, testOpts ...*TestOptions) {
+ searchDir, _ := os.Getwd()
+ for searchDir != "" {
+ if _, err := os.Stat(filepath.Join(searchDir, "go.mod")); err == nil {
+ break // The "go.mod" should be the one for Gitea repository
+ }
+ if dir := filepath.Dir(searchDir); dir == searchDir {
+ searchDir = "" // reaches the root of filesystem
+ } else {
+ searchDir = dir
+ }
+ }
+ if searchDir == "" {
+ panic("The tests should run in a Gitea repository, there should be a 'go.mod' in the root")
+ }
+
+ giteaRoot = searchDir
+ setting.CustomPath = filepath.Join(giteaRoot, "custom")
+ InitSettings()
+
+ fixturesDir = filepath.Join(giteaRoot, "models", "fixtures")
+ var opts FixturesOptions
+ if len(testOpts) == 0 || len(testOpts[0].FixtureFiles) == 0 {
+ opts.Dir = fixturesDir
+ } else {
+ for _, f := range testOpts[0].FixtureFiles {
+ if len(f) != 0 {
+ opts.Files = append(opts.Files, filepath.Join(fixturesDir, f))
+ }
+ }
+ }
+
+ if err := CreateTestEngine(opts); err != nil {
+ fatalTestError("Error creating test engine: %v\n", err)
+ }
+
+ setting.AppURL = "https://try.gitea.io/"
+ setting.RunUser = "runuser"
+ setting.SSH.User = "sshuser"
+ setting.SSH.BuiltinServerUser = "builtinuser"
+ setting.SSH.Port = 3000
+ setting.SSH.Domain = "try.gitea.io"
+ setting.Database.Type = "sqlite3"
+ setting.Repository.DefaultBranch = "master" // many test code still assume that default branch is called "master"
+ repoRootPath, err := os.MkdirTemp(os.TempDir(), "repos")
+ if err != nil {
+ fatalTestError("TempDir: %v\n", err)
+ }
+ setting.RepoRootPath = repoRootPath
+ appDataPath, err := os.MkdirTemp(os.TempDir(), "appdata")
+ if err != nil {
+ fatalTestError("TempDir: %v\n", err)
+ }
+ setting.AppDataPath = appDataPath
+ setting.AppWorkPath = giteaRoot
+ setting.StaticRootPath = giteaRoot
+ setting.GravatarSource = "https://secure.gravatar.com/avatar/"
+
+ setting.Attachment.Storage.Path = filepath.Join(setting.AppDataPath, "attachments")
+
+ setting.LFS.Storage.Path = filepath.Join(setting.AppDataPath, "lfs")
+
+ setting.Avatar.Storage.Path = filepath.Join(setting.AppDataPath, "avatars")
+
+ setting.RepoAvatar.Storage.Path = filepath.Join(setting.AppDataPath, "repo-avatars")
+
+ setting.RepoArchive.Storage.Path = filepath.Join(setting.AppDataPath, "repo-archive")
+
+ setting.Packages.Storage.Path = filepath.Join(setting.AppDataPath, "packages")
+
+ setting.Actions.LogStorage.Path = filepath.Join(setting.AppDataPath, "actions_log")
+
+ setting.Git.HomePath = filepath.Join(setting.AppDataPath, "home")
+
+ setting.IncomingEmail.ReplyToAddress = "incoming+%{token}@localhost"
+
+ config.SetDynGetter(system.NewDatabaseDynKeyGetter())
+
+ if err = storage.Init(); err != nil {
+ fatalTestError("storage.Init: %v\n", err)
+ }
+ if err = util.RemoveAll(repoRootPath); err != nil {
+ fatalTestError("util.RemoveAll: %v\n", err)
+ }
+ if err = CopyDir(filepath.Join(giteaRoot, "tests", "gitea-repositories-meta"), setting.RepoRootPath); err != nil {
+ fatalTestError("util.CopyDir: %v\n", err)
+ }
+
+ if err = git.InitFull(context.Background()); err != nil {
+ fatalTestError("git.Init: %v\n", err)
+ }
+ ownerDirs, err := os.ReadDir(setting.RepoRootPath)
+ if err != nil {
+ fatalTestError("unable to read the new repo root: %v\n", err)
+ }
+ for _, ownerDir := range ownerDirs {
+ if !ownerDir.Type().IsDir() {
+ continue
+ }
+ repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
+ if err != nil {
+ fatalTestError("unable to read the new repo root: %v\n", err)
+ }
+ for _, repoDir := range repoDirs {
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
+ }
+ }
+
+ if len(testOpts) > 0 && testOpts[0].SetUp != nil {
+ if err := testOpts[0].SetUp(); err != nil {
+ fatalTestError("set up failed: %v\n", err)
+ }
+ }
+
+ exitStatus := m.Run()
+
+ if len(testOpts) > 0 && testOpts[0].TearDown != nil {
+ if err := testOpts[0].TearDown(); err != nil {
+ fatalTestError("tear down failed: %v\n", err)
+ }
+ }
+
+ if err = util.RemoveAll(repoRootPath); err != nil {
+ fatalTestError("util.RemoveAll: %v\n", err)
+ }
+ if err = util.RemoveAll(appDataPath); err != nil {
+ fatalTestError("util.RemoveAll: %v\n", err)
+ }
+ os.Exit(exitStatus)
+}
+
+// FixturesOptions fixtures needs to be loaded options
+type FixturesOptions struct {
+ Dir string
+ Files []string
+ Dirs []string
+ Base string
+}
+
+// CreateTestEngine creates a memory database and loads the fixture data from fixturesDir
+func CreateTestEngine(opts FixturesOptions) error {
+ x, err := xorm.NewEngine("sqlite3", "file::memory:?cache=shared&_txlock=immediate")
+ if err != nil {
+ if strings.Contains(err.Error(), "unknown driver") {
+ return fmt.Errorf(`sqlite3 requires: import _ "github.com/mattn/go-sqlite3" or -tags sqlite,sqlite_unlock_notify%s%w`, "\n", err)
+ }
+ return err
+ }
+ x.SetMapper(names.GonicMapper{})
+ db.SetDefaultEngine(context.Background(), x)
+
+ if err = db.SyncAllTables(); err != nil {
+ return err
+ }
+ switch os.Getenv("GITEA_UNIT_TESTS_LOG_SQL") {
+ case "true", "1":
+ x.ShowSQL(true)
+ }
+
+ return InitFixtures(opts)
+}
+
+// PrepareTestDatabase load test fixtures into test database
+func PrepareTestDatabase() error {
+ return LoadFixtures()
+}
+
+// PrepareTestEnv prepares the environment for unit tests. Can only be called
+// by tests that use the above MainTest(..) function.
+func PrepareTestEnv(t testing.TB) {
+ require.NoError(t, PrepareTestDatabase())
+ require.NoError(t, util.RemoveAll(setting.RepoRootPath))
+ metaPath := filepath.Join(giteaRoot, "tests", "gitea-repositories-meta")
+ require.NoError(t, CopyDir(metaPath, setting.RepoRootPath))
+ ownerDirs, err := os.ReadDir(setting.RepoRootPath)
+ require.NoError(t, err)
+ for _, ownerDir := range ownerDirs {
+ if !ownerDir.Type().IsDir() {
+ continue
+ }
+ repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
+ require.NoError(t, err)
+ for _, repoDir := range repoDirs {
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
+ _ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
+ }
+ }
+
+ base.SetupGiteaRoot() // Makes sure GITEA_ROOT is set
+}
diff --git a/models/unittest/unit_tests.go b/models/unittest/unit_tests.go
new file mode 100644
index 0000000..157c676
--- /dev/null
+++ b/models/unittest/unit_tests.go
@@ -0,0 +1,164 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package unittest
+
+import (
+ "math"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "xorm.io/builder"
+)
+
+// Code in this file is mainly used by unittest.CheckConsistencyFor, which is not in the unit test for various reasons.
+// In the future if we can decouple CheckConsistencyFor into separate unit test code, then this file can be moved into unittest package too.
+
+// NonexistentID an ID that will never exist
+const NonexistentID = int64(math.MaxInt64)
+
+type testCond struct {
+ query any
+ args []any
+}
+
+type testOrderBy string
+
+// Cond create a condition with arguments for a test
+func Cond(query any, args ...any) any {
+ return &testCond{query: query, args: args}
+}
+
+// OrderBy creates "ORDER BY" a test query
+func OrderBy(orderBy string) any {
+ return testOrderBy(orderBy)
+}
+
+func whereOrderConditions(e db.Engine, conditions []any) db.Engine {
+ orderBy := "id" // query must have the "ORDER BY", otherwise the result is not deterministic
+ for _, condition := range conditions {
+ switch cond := condition.(type) {
+ case *testCond:
+ e = e.Where(cond.query, cond.args...)
+ case testOrderBy:
+ orderBy = string(cond)
+ default:
+ e = e.Where(cond)
+ }
+ }
+ return e.OrderBy(orderBy)
+}
+
+// LoadBeanIfExists loads beans from fixture database if exist
+func LoadBeanIfExists(bean any, conditions ...any) (bool, error) {
+ e := db.GetEngine(db.DefaultContext)
+ return whereOrderConditions(e, conditions).Get(bean)
+}
+
+// BeanExists for testing, check if a bean exists
+func BeanExists(t testing.TB, bean any, conditions ...any) bool {
+ exists, err := LoadBeanIfExists(bean, conditions...)
+ require.NoError(t, err)
+ return exists
+}
+
+// AssertExistsAndLoadBean assert that a bean exists and load it from the test database
+func AssertExistsAndLoadBean[T any](t testing.TB, bean T, conditions ...any) T {
+ exists, err := LoadBeanIfExists(bean, conditions...)
+ require.NoError(t, err)
+ assert.True(t, exists,
+ "Expected to find %+v (of type %T, with conditions %+v), but did not",
+ bean, bean, conditions)
+ return bean
+}
+
+// AssertExistsAndLoadMap assert that a row exists and load it from the test database
+func AssertExistsAndLoadMap(t testing.TB, table string, conditions ...any) map[string]string {
+ e := db.GetEngine(db.DefaultContext).Table(table)
+ res, err := whereOrderConditions(e, conditions).Query()
+ require.NoError(t, err)
+ assert.Len(t, res, 1,
+ "Expected to find one row in %s (with conditions %+v), but found %d",
+ table, conditions, len(res),
+ )
+
+ if len(res) == 1 {
+ rec := map[string]string{}
+ for k, v := range res[0] {
+ rec[k] = string(v)
+ }
+ return rec
+ }
+ return nil
+}
+
+// GetCount get the count of a bean
+func GetCount(t testing.TB, bean any, conditions ...any) int {
+ e := db.GetEngine(db.DefaultContext)
+ for _, condition := range conditions {
+ switch cond := condition.(type) {
+ case *testCond:
+ e = e.Where(cond.query, cond.args...)
+ default:
+ e = e.Where(cond)
+ }
+ }
+ count, err := e.Count(bean)
+ require.NoError(t, err)
+ return int(count)
+}
+
+// AssertNotExistsBean assert that a bean does not exist in the test database
+func AssertNotExistsBean(t testing.TB, bean any, conditions ...any) {
+ exists, err := LoadBeanIfExists(bean, conditions...)
+ require.NoError(t, err)
+ assert.False(t, exists)
+}
+
+// AssertExistsIf asserts that a bean exists or does not exist, depending on
+// what is expected.
+func AssertExistsIf(t testing.TB, expected bool, bean any, conditions ...any) {
+ exists, err := LoadBeanIfExists(bean, conditions...)
+ require.NoError(t, err)
+ assert.Equal(t, expected, exists)
+}
+
+// AssertSuccessfulInsert assert that beans is successfully inserted
+func AssertSuccessfulInsert(t testing.TB, beans ...any) {
+ err := db.Insert(db.DefaultContext, beans...)
+ require.NoError(t, err)
+}
+
+// AssertSuccessfulDelete assert that beans is successfully deleted
+func AssertSuccessfulDelete(t require.TestingT, beans ...any) {
+ err := db.DeleteBeans(db.DefaultContext, beans...)
+ require.NoError(t, err)
+}
+
+// AssertCount assert the count of a bean
+func AssertCount(t testing.TB, bean, expected any) bool {
+ return assert.EqualValues(t, expected, GetCount(t, bean))
+}
+
+// AssertInt64InRange assert value is in range [low, high]
+func AssertInt64InRange(t testing.TB, low, high, value int64) {
+ assert.True(t, value >= low && value <= high,
+ "Expected value in range [%d, %d], found %d", low, high, value)
+}
+
+// GetCountByCond get the count of database entries matching bean
+func GetCountByCond(t testing.TB, tableName string, cond builder.Cond) int64 {
+ e := db.GetEngine(db.DefaultContext)
+ count, err := e.Table(tableName).Where(cond).Count()
+ require.NoError(t, err)
+ return count
+}
+
+// AssertCountByCond test the count of database entries matching bean
+func AssertCountByCond(t testing.TB, tableName string, cond builder.Cond, expected int) bool {
+ return assert.EqualValues(t, expected, GetCountByCond(t, tableName, cond),
+ "Failed consistency test, the counted bean (of table %s) was %+v", tableName, cond)
+}
diff --git a/models/user/avatar.go b/models/user/avatar.go
new file mode 100644
index 0000000..c6937d7
--- /dev/null
+++ b/models/user/avatar.go
@@ -0,0 +1,115 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "crypto/md5"
+ "fmt"
+ "image/png"
+ "io"
+ "strings"
+
+ "code.gitea.io/gitea/models/avatars"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/avatar"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+)
+
+// CustomAvatarRelativePath returns user custom avatar relative path.
+func (u *User) CustomAvatarRelativePath() string {
+ return u.Avatar
+}
+
+// GenerateRandomAvatar generates a random avatar for user.
+func GenerateRandomAvatar(ctx context.Context, u *User) error {
+ seed := u.Email
+ if len(seed) == 0 {
+ seed = u.Name
+ }
+
+ img, err := avatar.RandomImage([]byte(seed))
+ if err != nil {
+ return fmt.Errorf("RandomImage: %w", err)
+ }
+
+ u.Avatar = avatars.HashEmail(seed)
+
+ // Don't share the images so that we can delete them easily
+ if err := storage.SaveFrom(storage.Avatars, u.CustomAvatarRelativePath(), func(w io.Writer) error {
+ if err := png.Encode(w, img); err != nil {
+ log.Error("Encode: %v", err)
+ }
+ return err
+ }); err != nil {
+ return fmt.Errorf("Failed to create dir %s: %w", u.CustomAvatarRelativePath(), err)
+ }
+
+ if _, err := db.GetEngine(ctx).ID(u.ID).Cols("avatar").Update(u); err != nil {
+ return err
+ }
+
+ log.Info("New random avatar created: %d", u.ID)
+ return nil
+}
+
+// AvatarLinkWithSize returns a link to the user's avatar with size. size <= 0 means default size
+func (u *User) AvatarLinkWithSize(ctx context.Context, size int) string {
+ if u.IsGhost() {
+ return avatars.DefaultAvatarLink()
+ }
+
+ useLocalAvatar := false
+ autoGenerateAvatar := false
+
+ disableGravatar := setting.Config().Picture.DisableGravatar.Value(ctx)
+
+ switch {
+ case u.UseCustomAvatar:
+ useLocalAvatar = true
+ case disableGravatar, setting.OfflineMode:
+ useLocalAvatar = true
+ autoGenerateAvatar = true
+ }
+
+ if useLocalAvatar {
+ if u.Avatar == "" && autoGenerateAvatar {
+ if err := GenerateRandomAvatar(ctx, u); err != nil {
+ log.Error("GenerateRandomAvatar: %v", err)
+ }
+ }
+ if u.Avatar == "" {
+ return avatars.DefaultAvatarLink()
+ }
+ return avatars.GenerateUserAvatarImageLink(u.Avatar, size)
+ }
+ return avatars.GenerateEmailAvatarFastLink(ctx, u.AvatarEmail, size)
+}
+
+// AvatarLink returns the full avatar link with http host
+func (u *User) AvatarLink(ctx context.Context) string {
+ link := u.AvatarLinkWithSize(ctx, 0)
+ if !strings.HasPrefix(link, "//") && !strings.Contains(link, "://") {
+ return setting.AppURL + strings.TrimPrefix(link, setting.AppSubURL+"/")
+ }
+ return link
+}
+
+// IsUploadAvatarChanged returns true if the current user's avatar would be changed with the provided data
+func (u *User) IsUploadAvatarChanged(data []byte) bool {
+ if !u.UseCustomAvatar || len(u.Avatar) == 0 {
+ return true
+ }
+ avatarID := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", u.ID, md5.Sum(data)))))
+ return u.Avatar != avatarID
+}
+
+// ExistsWithAvatarAtStoragePath returns true if there is a user with this Avatar
+func ExistsWithAvatarAtStoragePath(ctx context.Context, storagePath string) (bool, error) {
+ // See func (u *User) CustomAvatarRelativePath()
+ // u.Avatar is used directly as the storage path - therefore we can check for existence directly using the path
+ return db.GetEngine(ctx).Where("`avatar`=?", storagePath).Exist(new(User))
+}
diff --git a/models/user/badge.go b/models/user/badge.go
new file mode 100644
index 0000000..ee52b44
--- /dev/null
+++ b/models/user/badge.go
@@ -0,0 +1,41 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// Badge represents a user badge
+type Badge struct {
+ ID int64 `xorm:"pk autoincr"`
+ Description string
+ ImageURL string
+}
+
+// UserBadge represents a user badge
+type UserBadge struct { //nolint:revive
+ ID int64 `xorm:"pk autoincr"`
+ BadgeID int64
+ UserID int64 `xorm:"INDEX"`
+}
+
+func init() {
+ db.RegisterModel(new(Badge))
+ db.RegisterModel(new(UserBadge))
+}
+
+// GetUserBadges returns the user's badges.
+func GetUserBadges(ctx context.Context, u *User) ([]*Badge, int64, error) {
+ sess := db.GetEngine(ctx).
+ Select("`badge`.*").
+ Join("INNER", "user_badge", "`user_badge`.badge_id=badge.id").
+ Where("user_badge.user_id=?", u.ID)
+
+ badges := make([]*Badge, 0, 8)
+ count, err := sess.FindAndCount(&badges)
+ return badges, count, err
+}
diff --git a/models/user/block.go b/models/user/block.go
new file mode 100644
index 0000000..189cacc
--- /dev/null
+++ b/models/user/block.go
@@ -0,0 +1,91 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "errors"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// ErrBlockedByUser defines an error stating that the user is not allowed to perform the action because they are blocked.
+var ErrBlockedByUser = errors.New("user is blocked by the poster or repository owner")
+
+// BlockedUser represents a blocked user entry.
+type BlockedUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ // UID of the one who got blocked.
+ BlockID int64 `xorm:"index"`
+ // UID of the one who did the block action.
+ UserID int64 `xorm:"index"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+// TableName provides the real table name
+func (*BlockedUser) TableName() string {
+ return "forgejo_blocked_user"
+}
+
+func init() {
+ db.RegisterModel(new(BlockedUser))
+}
+
+// IsBlocked returns if userID has blocked blockID.
+func IsBlocked(ctx context.Context, userID, blockID int64) bool {
+ has, _ := db.GetEngine(ctx).Exist(&BlockedUser{UserID: userID, BlockID: blockID})
+ return has
+}
+
+// IsBlockedMultiple returns if one of the userIDs has blocked blockID.
+func IsBlockedMultiple(ctx context.Context, userIDs []int64, blockID int64) bool {
+ has, _ := db.GetEngine(ctx).In("user_id", userIDs).Exist(&BlockedUser{BlockID: blockID})
+ return has
+}
+
+// UnblockUser removes the blocked user entry.
+func UnblockUser(ctx context.Context, userID, blockID int64) error {
+ _, err := db.GetEngine(ctx).Delete(&BlockedUser{UserID: userID, BlockID: blockID})
+ return err
+}
+
+// CountBlockedUsers returns the number of users the user has blocked.
+func CountBlockedUsers(ctx context.Context, userID int64) (int64, error) {
+ return db.GetEngine(ctx).Where("user_id=?", userID).Count(&BlockedUser{})
+}
+
+// ListBlockedUsers returns the users that the user has blocked.
+// The created_unix field of the user struct is overridden by the creation_unix
+// field of blockeduser.
+func ListBlockedUsers(ctx context.Context, userID int64, opts db.ListOptions) ([]*User, error) {
+ sess := db.GetEngine(ctx).
+ Select("`forgejo_blocked_user`.created_unix, `user`.*").
+ Join("INNER", "forgejo_blocked_user", "`user`.id=`forgejo_blocked_user`.block_id").
+ Where("`forgejo_blocked_user`.user_id=?", userID)
+
+ if opts.Page > 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+ users := make([]*User, 0, opts.PageSize)
+
+ return users, sess.Find(&users)
+ }
+
+ users := make([]*User, 0, 8)
+ return users, sess.Find(&users)
+}
+
+// ListBlockedByUsersID returns the ids of the users that blocked the user.
+func ListBlockedByUsersID(ctx context.Context, userID int64) ([]int64, error) {
+ users := make([]int64, 0, 8)
+ err := db.GetEngine(ctx).
+ Table("user").
+ Select("`user`.id").
+ Join("INNER", "forgejo_blocked_user", "`user`.id=`forgejo_blocked_user`.user_id").
+ Where("`forgejo_blocked_user`.block_id=?", userID).
+ Find(&users)
+
+ return users, err
+}
diff --git a/models/user/block_test.go b/models/user/block_test.go
new file mode 100644
index 0000000..a795ef3
--- /dev/null
+++ b/models/user/block_test.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsBlocked(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, user_model.IsBlocked(db.DefaultContext, 4, 1))
+
+ // Simple test cases to ensure the function can also respond with false.
+ assert.False(t, user_model.IsBlocked(db.DefaultContext, 1, 1))
+ assert.False(t, user_model.IsBlocked(db.DefaultContext, 3, 2))
+}
+
+func TestIsBlockedMultiple(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, user_model.IsBlockedMultiple(db.DefaultContext, []int64{4}, 1))
+ assert.True(t, user_model.IsBlockedMultiple(db.DefaultContext, []int64{4, 3, 4, 5}, 1))
+
+ // Simple test cases to ensure the function can also respond with false.
+ assert.False(t, user_model.IsBlockedMultiple(db.DefaultContext, []int64{1}, 1))
+ assert.False(t, user_model.IsBlockedMultiple(db.DefaultContext, []int64{3, 4, 1}, 2))
+}
+
+func TestUnblockUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, user_model.IsBlocked(db.DefaultContext, 4, 1))
+
+ require.NoError(t, user_model.UnblockUser(db.DefaultContext, 4, 1))
+
+ // Simple test cases to ensure the function can also respond with false.
+ assert.False(t, user_model.IsBlocked(db.DefaultContext, 4, 1))
+}
+
+func TestListBlockedUsers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ blockedUsers, err := user_model.ListBlockedUsers(db.DefaultContext, 4, db.ListOptions{})
+ require.NoError(t, err)
+ if assert.Len(t, blockedUsers, 1) {
+ assert.EqualValues(t, 1, blockedUsers[0].ID)
+ // The function returns the created Unix of the block, not that of the user.
+ assert.EqualValues(t, 1671607299, blockedUsers[0].CreatedUnix)
+ }
+}
+
+func TestListBlockedByUsersID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ blockedByUserIDs, err := user_model.ListBlockedByUsersID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ if assert.Len(t, blockedByUserIDs, 1) {
+ assert.EqualValues(t, 4, blockedByUserIDs[0])
+ }
+}
+
+func TestCountBlockedUsers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := user_model.CountBlockedUsers(db.DefaultContext, 4)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count)
+
+ count, err = user_model.CountBlockedUsers(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+}
diff --git a/models/user/email_address.go b/models/user/email_address.go
new file mode 100644
index 0000000..011c3ed
--- /dev/null
+++ b/models/user/email_address.go
@@ -0,0 +1,483 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+ "net/mail"
+ "regexp"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/validation"
+
+ "xorm.io/builder"
+)
+
+// ErrEmailNotActivated e-mail address has not been activated error
+var ErrEmailNotActivated = util.NewInvalidArgumentErrorf("e-mail address has not been activated")
+
+// ErrEmailCharIsNotSupported e-mail address contains unsupported character
+type ErrEmailCharIsNotSupported struct {
+ Email string
+}
+
+// IsErrEmailCharIsNotSupported checks if an error is an ErrEmailCharIsNotSupported
+func IsErrEmailCharIsNotSupported(err error) bool {
+ _, ok := err.(ErrEmailCharIsNotSupported)
+ return ok
+}
+
+func (err ErrEmailCharIsNotSupported) Error() string {
+ return fmt.Sprintf("e-mail address contains unsupported character [email: %s]", err.Email)
+}
+
+func (err ErrEmailCharIsNotSupported) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrEmailInvalid represents an error where the email address does not comply with RFC 5322
+// or has a leading '-' character
+type ErrEmailInvalid struct {
+ Email string
+}
+
+// IsErrEmailInvalid checks if an error is an ErrEmailInvalid
+func IsErrEmailInvalid(err error) bool {
+ _, ok := err.(ErrEmailInvalid)
+ return ok
+}
+
+func (err ErrEmailInvalid) Error() string {
+ return fmt.Sprintf("e-mail invalid [email: %s]", err.Email)
+}
+
+func (err ErrEmailInvalid) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrEmailAlreadyUsed represents a "EmailAlreadyUsed" kind of error.
+type ErrEmailAlreadyUsed struct {
+ Email string
+}
+
+// IsErrEmailAlreadyUsed checks if an error is a ErrEmailAlreadyUsed.
+func IsErrEmailAlreadyUsed(err error) bool {
+ _, ok := err.(ErrEmailAlreadyUsed)
+ return ok
+}
+
+func (err ErrEmailAlreadyUsed) Error() string {
+ return fmt.Sprintf("e-mail already in use [email: %s]", err.Email)
+}
+
+func (err ErrEmailAlreadyUsed) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrEmailAddressNotExist email address not exist
+type ErrEmailAddressNotExist struct {
+ Email string
+}
+
+// IsErrEmailAddressNotExist checks if an error is an ErrEmailAddressNotExist
+func IsErrEmailAddressNotExist(err error) bool {
+ _, ok := err.(ErrEmailAddressNotExist)
+ return ok
+}
+
+func (err ErrEmailAddressNotExist) Error() string {
+ return fmt.Sprintf("Email address does not exist [email: %s]", err.Email)
+}
+
+func (err ErrEmailAddressNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrPrimaryEmailCannotDelete primary email address cannot be deleted
+type ErrPrimaryEmailCannotDelete struct {
+ Email string
+}
+
+// IsErrPrimaryEmailCannotDelete checks if an error is an ErrPrimaryEmailCannotDelete
+func IsErrPrimaryEmailCannotDelete(err error) bool {
+ _, ok := err.(ErrPrimaryEmailCannotDelete)
+ return ok
+}
+
+func (err ErrPrimaryEmailCannotDelete) Error() string {
+ return fmt.Sprintf("Primary email address cannot be deleted [email: %s]", err.Email)
+}
+
+func (err ErrPrimaryEmailCannotDelete) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// EmailAddress is the list of all email addresses of a user. It also contains the
+// primary email address which is saved in user table.
+type EmailAddress struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX NOT NULL"`
+ Email string `xorm:"UNIQUE NOT NULL"`
+ LowerEmail string `xorm:"UNIQUE NOT NULL"`
+ IsActivated bool
+ IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(EmailAddress))
+}
+
+// BeforeInsert will be invoked by XORM before inserting a record
+func (email *EmailAddress) BeforeInsert() {
+ if email.LowerEmail == "" {
+ email.LowerEmail = strings.ToLower(email.Email)
+ }
+}
+
+func InsertEmailAddress(ctx context.Context, email *EmailAddress) (*EmailAddress, error) {
+ if err := db.Insert(ctx, email); err != nil {
+ return nil, err
+ }
+ return email, nil
+}
+
+func UpdateEmailAddress(ctx context.Context, email *EmailAddress) error {
+ _, err := db.GetEngine(ctx).ID(email.ID).AllCols().Update(email)
+ return err
+}
+
+var emailRegexp = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+-/=?^_`{|}~]*@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
+
+// ValidateEmail check if email is a valid & allowed address
+func ValidateEmail(email string) error {
+ if err := validateEmailBasic(email); err != nil {
+ return err
+ }
+ return validateEmailDomain(email)
+}
+
+// ValidateEmailForAdmin check if email is a valid address when admins manually add or edit users
+func ValidateEmailForAdmin(email string) error {
+ return validateEmailBasic(email)
+ // In this case we do not need to check the email domain
+}
+
+func GetEmailAddressByEmail(ctx context.Context, email string) (*EmailAddress, error) {
+ ea := &EmailAddress{}
+ if has, err := db.GetEngine(ctx).Where("lower_email=?", strings.ToLower(email)).Get(ea); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrEmailAddressNotExist{email}
+ }
+ return ea, nil
+}
+
+func GetEmailAddressOfUser(ctx context.Context, email string, uid int64) (*EmailAddress, error) {
+ ea := &EmailAddress{}
+ if has, err := db.GetEngine(ctx).Where("lower_email=? AND uid=?", strings.ToLower(email), uid).Get(ea); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrEmailAddressNotExist{email}
+ }
+ return ea, nil
+}
+
+func GetPrimaryEmailAddressOfUser(ctx context.Context, uid int64) (*EmailAddress, error) {
+ ea := &EmailAddress{}
+ if has, err := db.GetEngine(ctx).Where("uid=? AND is_primary=?", uid, true).Get(ea); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrEmailAddressNotExist{}
+ }
+ return ea, nil
+}
+
+// GetEmailAddresses returns all email addresses belongs to given user.
+func GetEmailAddresses(ctx context.Context, uid int64) ([]*EmailAddress, error) {
+ emails := make([]*EmailAddress, 0, 5)
+ if err := db.GetEngine(ctx).
+ Where("uid=?", uid).
+ Asc("id").
+ Find(&emails); err != nil {
+ return nil, err
+ }
+ return emails, nil
+}
+
+type ActivatedEmailAddress struct {
+ ID int64
+ Email string
+}
+
+func GetActivatedEmailAddresses(ctx context.Context, uid int64) ([]*ActivatedEmailAddress, error) {
+ emails := make([]*ActivatedEmailAddress, 0, 8)
+ if err := db.GetEngine(ctx).
+ Table("email_address").
+ Select("id, email").
+ Where("uid=?", uid).
+ And("is_activated=?", true).
+ Asc("id").
+ Find(&emails); err != nil {
+ return nil, err
+ }
+ return emails, nil
+}
+
+// GetEmailAddressByID gets a user's email address by ID
+func GetEmailAddressByID(ctx context.Context, uid, id int64) (*EmailAddress, error) {
+ // User ID is required for security reasons
+ email := &EmailAddress{UID: uid}
+ if has, err := db.GetEngine(ctx).ID(id).Get(email); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, nil
+ }
+ return email, nil
+}
+
+// IsEmailActive check if email is activated with a different emailID
+func IsEmailActive(ctx context.Context, email string, excludeEmailID int64) (bool, error) {
+ if len(email) == 0 {
+ return true, nil
+ }
+
+ // Can't filter by boolean field unless it's explicit
+ cond := builder.NewCond()
+ cond = cond.And(builder.Eq{"lower_email": strings.ToLower(email)}, builder.Neq{"id": excludeEmailID})
+ if setting.Service.RegisterEmailConfirm {
+ // Inactive (unvalidated) addresses don't count as active if email validation is required
+ cond = cond.And(builder.Eq{"is_activated": true})
+ }
+
+ var em EmailAddress
+ if has, err := db.GetEngine(ctx).Where(cond).Get(&em); has || err != nil {
+ if has {
+ log.Info("isEmailActive(%q, %d) found duplicate in email ID %d", email, excludeEmailID, em.ID)
+ }
+ return has, err
+ }
+
+ return false, nil
+}
+
+// IsEmailUsed returns true if the email has been used.
+func IsEmailUsed(ctx context.Context, email string) (bool, error) {
+ if len(email) == 0 {
+ return true, nil
+ }
+
+ return db.GetEngine(ctx).Where("lower_email=?", strings.ToLower(email)).Get(&EmailAddress{})
+}
+
+// ActivateEmail activates the email address to given user.
+func ActivateEmail(ctx context.Context, email *EmailAddress) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ if err := updateActivation(ctx, email, true); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+func updateActivation(ctx context.Context, email *EmailAddress, activate bool) error {
+ user, err := GetUserByID(ctx, email.UID)
+ if err != nil {
+ return err
+ }
+ if user.Rands, err = GetUserSalt(); err != nil {
+ return err
+ }
+ email.IsActivated = activate
+ if _, err := db.GetEngine(ctx).ID(email.ID).Cols("is_activated").Update(email); err != nil {
+ return err
+ }
+ return UpdateUserCols(ctx, user, "rands")
+}
+
+// SearchEmailOrderBy is used to sort the results from SearchEmails()
+type SearchEmailOrderBy string
+
+func (s SearchEmailOrderBy) String() string {
+ return string(s)
+}
+
+// Strings for sorting result
+const (
+ SearchEmailOrderByEmail SearchEmailOrderBy = "email_address.lower_email ASC, email_address.is_primary DESC, email_address.id ASC"
+ SearchEmailOrderByEmailReverse SearchEmailOrderBy = "email_address.lower_email DESC, email_address.is_primary ASC, email_address.id DESC"
+ SearchEmailOrderByName SearchEmailOrderBy = "`user`.lower_name ASC, email_address.is_primary DESC, email_address.id ASC"
+ SearchEmailOrderByNameReverse SearchEmailOrderBy = "`user`.lower_name DESC, email_address.is_primary ASC, email_address.id DESC"
+)
+
+// SearchEmailOptions are options to search e-mail addresses for the admin panel
+type SearchEmailOptions struct {
+ db.ListOptions
+ Keyword string
+ SortType SearchEmailOrderBy
+ IsPrimary optional.Option[bool]
+ IsActivated optional.Option[bool]
+}
+
+// SearchEmailResult is an e-mail address found in the user or email_address table
+type SearchEmailResult struct {
+ ID int64
+ UID int64
+ Email string
+ IsActivated bool
+ IsPrimary bool
+ // From User
+ Name string
+ FullName string
+}
+
+// SearchEmails takes options i.e. keyword and part of email name to search,
+// it returns results in given range and number of total results.
+func SearchEmails(ctx context.Context, opts *SearchEmailOptions) ([]*SearchEmailResult, int64, error) {
+ var cond builder.Cond = builder.Eq{"`user`.`type`": UserTypeIndividual}
+ if len(opts.Keyword) > 0 {
+ likeStr := "%" + strings.ToLower(opts.Keyword) + "%"
+ cond = cond.And(builder.Or(
+ builder.Like{"lower(`user`.full_name)", likeStr},
+ builder.Like{"`user`.lower_name", likeStr},
+ builder.Like{"email_address.lower_email", likeStr},
+ ))
+ }
+
+ if opts.IsPrimary.Has() {
+ cond = cond.And(builder.Eq{"email_address.is_primary": opts.IsPrimary.Value()})
+ }
+
+ if opts.IsActivated.Has() {
+ cond = cond.And(builder.Eq{"email_address.is_activated": opts.IsActivated.Value()})
+ }
+
+ count, err := db.GetEngine(ctx).Join("INNER", "`user`", "`user`.id = email_address.uid").
+ Where(cond).Count(new(EmailAddress))
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %w", err)
+ }
+
+ orderby := opts.SortType.String()
+ if orderby == "" {
+ orderby = SearchEmailOrderByEmail.String()
+ }
+
+ opts.SetDefaultValues()
+
+ emails := make([]*SearchEmailResult, 0, opts.PageSize)
+ err = db.GetEngine(ctx).Table("email_address").
+ Select("email_address.*, `user`.name, `user`.full_name").
+ Join("INNER", "`user`", "`user`.id = email_address.uid").
+ Where(cond).
+ OrderBy(orderby).
+ Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).
+ Find(&emails)
+
+ return emails, count, err
+}
+
+// ActivateUserEmail will change the activated state of an email address,
+// either primary or secondary (all in the email_address table)
+func ActivateUserEmail(ctx context.Context, userID int64, email string, activate bool) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ // Activate/deactivate a user's secondary email address
+ // First check if there's another user active with the same address
+ addr, exist, err := db.Get[EmailAddress](ctx, builder.Eq{"uid": userID, "lower_email": strings.ToLower(email)})
+ if err != nil {
+ return err
+ } else if !exist {
+ return fmt.Errorf("no such email: %d (%s)", userID, email)
+ }
+
+ if addr.IsActivated == activate {
+ // Already in the desired state; no action
+ return nil
+ }
+ if activate {
+ if used, err := IsEmailActive(ctx, email, addr.ID); err != nil {
+ return fmt.Errorf("unable to check isEmailActive() for %s: %w", email, err)
+ } else if used {
+ return ErrEmailAlreadyUsed{Email: email}
+ }
+ }
+ if err = updateActivation(ctx, addr, activate); err != nil {
+ return fmt.Errorf("unable to updateActivation() for %d:%s: %w", addr.ID, addr.Email, err)
+ }
+
+ // Activate/deactivate a user's primary email address and account
+ if addr.IsPrimary {
+ user, exist, err := db.Get[User](ctx, builder.Eq{"id": userID, "email": email})
+ if err != nil {
+ return err
+ } else if !exist {
+ return fmt.Errorf("no user with ID: %d and Email: %s", userID, email)
+ }
+
+ // The user's activation state should be synchronized with the primary email
+ if user.IsActive != activate {
+ user.IsActive = activate
+ if user.Rands, err = GetUserSalt(); err != nil {
+ return fmt.Errorf("unable to generate salt: %w", err)
+ }
+ if err = UpdateUserCols(ctx, user, "is_active", "rands"); err != nil {
+ return fmt.Errorf("unable to updateUserCols() for user ID: %d: %w", userID, err)
+ }
+ }
+ }
+
+ return committer.Commit()
+}
+
+// validateEmailBasic checks whether the email complies with the rules
+func validateEmailBasic(email string) error {
+ if len(email) == 0 {
+ return ErrEmailInvalid{email}
+ }
+
+ if !emailRegexp.MatchString(email) {
+ return ErrEmailCharIsNotSupported{email}
+ }
+
+ if email[0] == '-' {
+ return ErrEmailInvalid{email}
+ }
+
+ if _, err := mail.ParseAddress(email); err != nil {
+ return ErrEmailInvalid{email}
+ }
+
+ return nil
+}
+
+// validateEmailDomain checks whether the email domain is allowed or blocked
+func validateEmailDomain(email string) error {
+ if !IsEmailDomainAllowed(email) {
+ return ErrEmailInvalid{email}
+ }
+
+ return nil
+}
+
+func IsEmailDomainAllowed(email string) bool {
+ if len(setting.Service.EmailDomainAllowList) == 0 {
+ return !validation.IsEmailDomainListed(setting.Service.EmailDomainBlockList, email)
+ }
+
+ return validation.IsEmailDomainListed(setting.Service.EmailDomainAllowList, email)
+}
diff --git a/models/user/email_address_test.go b/models/user/email_address_test.go
new file mode 100644
index 0000000..b918f21
--- /dev/null
+++ b/models/user/email_address_test.go
@@ -0,0 +1,222 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "fmt"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetEmailAddresses(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ emails, _ := user_model.GetEmailAddresses(db.DefaultContext, int64(1))
+ if assert.Len(t, emails, 3) {
+ assert.True(t, emails[0].IsPrimary)
+ assert.True(t, emails[2].IsActivated)
+ assert.False(t, emails[2].IsPrimary)
+ }
+
+ emails, _ = user_model.GetEmailAddresses(db.DefaultContext, int64(2))
+ if assert.Len(t, emails, 2) {
+ assert.True(t, emails[0].IsPrimary)
+ assert.True(t, emails[0].IsActivated)
+ }
+}
+
+func TestIsEmailUsed(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ isExist, _ := user_model.IsEmailUsed(db.DefaultContext, "")
+ assert.True(t, isExist)
+ isExist, _ = user_model.IsEmailUsed(db.DefaultContext, "user11@example.com")
+ assert.True(t, isExist)
+ isExist, _ = user_model.IsEmailUsed(db.DefaultContext, "user1234567890@example.com")
+ assert.False(t, isExist)
+}
+
+func TestActivate(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ email := &user_model.EmailAddress{
+ ID: int64(1),
+ UID: int64(1),
+ Email: "user11@example.com",
+ }
+ require.NoError(t, user_model.ActivateEmail(db.DefaultContext, email))
+
+ emails, _ := user_model.GetEmailAddresses(db.DefaultContext, int64(1))
+ assert.Len(t, emails, 3)
+ assert.True(t, emails[0].IsActivated)
+ assert.True(t, emails[0].IsPrimary)
+ assert.False(t, emails[1].IsPrimary)
+ assert.True(t, emails[2].IsActivated)
+ assert.False(t, emails[2].IsPrimary)
+}
+
+func TestListEmails(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Must find all users and their emails
+ opts := &user_model.SearchEmailOptions{
+ ListOptions: db.ListOptions{
+ PageSize: 10000,
+ },
+ }
+ emails, count, err := user_model.SearchEmails(db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.Greater(t, count, int64(5))
+
+ contains := func(match func(s *user_model.SearchEmailResult) bool) bool {
+ for _, v := range emails {
+ if match(v) {
+ return true
+ }
+ }
+ return false
+ }
+
+ assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 18 }))
+ // 'org3' is an organization
+ assert.False(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 3 }))
+
+ // Must find no records
+ opts = &user_model.SearchEmailOptions{Keyword: "NOTFOUND"}
+ emails, count, err = user_model.SearchEmails(db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), count)
+
+ // Must find users 'user2', 'user28', etc.
+ opts = &user_model.SearchEmailOptions{Keyword: "user2"}
+ emails, count, err = user_model.SearchEmails(db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.NotEqual(t, int64(0), count)
+ assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 2 }))
+ assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 27 }))
+
+ // Must find only primary addresses (i.e. from the `user` table)
+ opts = &user_model.SearchEmailOptions{IsPrimary: optional.Some(true)}
+ emails, _, err = user_model.SearchEmails(db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.IsPrimary }))
+ assert.False(t, contains(func(s *user_model.SearchEmailResult) bool { return !s.IsPrimary }))
+
+ // Must find only inactive addresses (i.e. not validated)
+ opts = &user_model.SearchEmailOptions{IsActivated: optional.Some(false)}
+ emails, _, err = user_model.SearchEmails(db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return !s.IsActivated }))
+ assert.False(t, contains(func(s *user_model.SearchEmailResult) bool { return s.IsActivated }))
+
+ // Must find more than one page, but retrieve only one
+ opts = &user_model.SearchEmailOptions{
+ ListOptions: db.ListOptions{
+ PageSize: 5,
+ Page: 1,
+ },
+ }
+ emails, count, err = user_model.SearchEmails(db.DefaultContext, opts)
+ require.NoError(t, err)
+ assert.Len(t, emails, 5)
+ assert.Greater(t, count, int64(len(emails)))
+}
+
+func TestEmailAddressValidate(t *testing.T) {
+ kases := map[string]error{
+ "abc@gmail.com": nil,
+ "132@hotmail.com": nil,
+ "1-3-2@test.org": nil,
+ "1.3.2@test.org": nil,
+ "a_123@test.org.cn": nil,
+ `first.last@iana.org`: nil,
+ `first!last@iana.org`: nil,
+ `first#last@iana.org`: nil,
+ `first$last@iana.org`: nil,
+ `first%last@iana.org`: nil,
+ `first&last@iana.org`: nil,
+ `first'last@iana.org`: nil,
+ `first*last@iana.org`: nil,
+ `first+last@iana.org`: nil,
+ `first/last@iana.org`: nil,
+ `first=last@iana.org`: nil,
+ `first?last@iana.org`: nil,
+ `first^last@iana.org`: nil,
+ "first`last@iana.org": nil,
+ `first{last@iana.org`: nil,
+ `first|last@iana.org`: nil,
+ `first}last@iana.org`: nil,
+ `first~last@iana.org`: nil,
+ `first;last@iana.org`: user_model.ErrEmailCharIsNotSupported{`first;last@iana.org`},
+ ".233@qq.com": user_model.ErrEmailInvalid{".233@qq.com"},
+ "!233@qq.com": nil,
+ "#233@qq.com": nil,
+ "$233@qq.com": nil,
+ "%233@qq.com": nil,
+ "&233@qq.com": nil,
+ "'233@qq.com": nil,
+ "*233@qq.com": nil,
+ "+233@qq.com": nil,
+ "-233@qq.com": user_model.ErrEmailInvalid{"-233@qq.com"},
+ "/233@qq.com": nil,
+ "=233@qq.com": nil,
+ "?233@qq.com": nil,
+ "^233@qq.com": nil,
+ "_233@qq.com": nil,
+ "`233@qq.com": nil,
+ "{233@qq.com": nil,
+ "|233@qq.com": nil,
+ "}233@qq.com": nil,
+ "~233@qq.com": nil,
+ ";233@qq.com": user_model.ErrEmailCharIsNotSupported{";233@qq.com"},
+ "Foo <foo@bar.com>": user_model.ErrEmailCharIsNotSupported{"Foo <foo@bar.com>"},
+ string([]byte{0xE2, 0x84, 0xAA}): user_model.ErrEmailCharIsNotSupported{string([]byte{0xE2, 0x84, 0xAA})},
+ }
+ for kase, err := range kases {
+ t.Run(kase, func(t *testing.T) {
+ assert.EqualValues(t, err, user_model.ValidateEmail(kase))
+ })
+ }
+}
+
+func TestGetActivatedEmailAddresses(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testCases := []struct {
+ UID int64
+ expected []*user_model.ActivatedEmailAddress
+ }{
+ {
+ UID: 1,
+ expected: []*user_model.ActivatedEmailAddress{{ID: 9, Email: "user1@example.com"}, {ID: 33, Email: "user1-2@example.com"}, {ID: 34, Email: "user1-3@example.com"}},
+ },
+ {
+ UID: 2,
+ expected: []*user_model.ActivatedEmailAddress{{ID: 3, Email: "user2@example.com"}},
+ },
+ {
+ UID: 4,
+ expected: []*user_model.ActivatedEmailAddress{{ID: 11, Email: "user4@example.com"}},
+ },
+ {
+ UID: 11,
+ expected: []*user_model.ActivatedEmailAddress{},
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(fmt.Sprintf("User %d", testCase.UID), func(t *testing.T) {
+ emails, err := user_model.GetActivatedEmailAddresses(db.DefaultContext, testCase.UID)
+ require.NoError(t, err)
+ assert.Equal(t, testCase.expected, emails)
+ })
+ }
+}
diff --git a/models/user/error.go b/models/user/error.go
new file mode 100644
index 0000000..cbf1999
--- /dev/null
+++ b/models/user/error.go
@@ -0,0 +1,109 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "fmt"
+
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrUserAlreadyExist represents a "user already exists" error.
+type ErrUserAlreadyExist struct {
+ Name string
+}
+
+// IsErrUserAlreadyExist checks if an error is a ErrUserAlreadyExists.
+func IsErrUserAlreadyExist(err error) bool {
+ _, ok := err.(ErrUserAlreadyExist)
+ return ok
+}
+
+func (err ErrUserAlreadyExist) Error() string {
+ return fmt.Sprintf("user already exists [name: %s]", err.Name)
+}
+
+// Unwrap unwraps this error as a ErrExist error
+func (err ErrUserAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrUserNotExist represents a "UserNotExist" kind of error.
+type ErrUserNotExist struct {
+ UID int64
+ Name string
+}
+
+// IsErrUserNotExist checks if an error is a ErrUserNotExist.
+func IsErrUserNotExist(err error) bool {
+ _, ok := err.(ErrUserNotExist)
+ return ok
+}
+
+func (err ErrUserNotExist) Error() string {
+ return fmt.Sprintf("user does not exist [uid: %d, name: %s]", err.UID, err.Name)
+}
+
+// Unwrap unwraps this error as a ErrNotExist error
+func (err ErrUserNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrUserProhibitLogin represents a "ErrUserProhibitLogin" kind of error.
+type ErrUserProhibitLogin struct {
+ UID int64
+ Name string
+}
+
+// IsErrUserProhibitLogin checks if an error is a ErrUserProhibitLogin
+func IsErrUserProhibitLogin(err error) bool {
+ _, ok := err.(ErrUserProhibitLogin)
+ return ok
+}
+
+func (err ErrUserProhibitLogin) Error() string {
+ return fmt.Sprintf("user is not allowed login [uid: %d, name: %s]", err.UID, err.Name)
+}
+
+// Unwrap unwraps this error as a ErrPermission error
+func (err ErrUserProhibitLogin) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrUserInactive represents a "ErrUserInactive" kind of error.
+type ErrUserInactive struct {
+ UID int64
+ Name string
+}
+
+// IsErrUserInactive checks if an error is a ErrUserInactive
+func IsErrUserInactive(err error) bool {
+ _, ok := err.(ErrUserInactive)
+ return ok
+}
+
+func (err ErrUserInactive) Error() string {
+ return fmt.Sprintf("user is inactive [uid: %d, name: %s]", err.UID, err.Name)
+}
+
+// Unwrap unwraps this error as a ErrPermission error
+func (err ErrUserInactive) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrUserIsNotLocal represents a "ErrUserIsNotLocal" kind of error.
+type ErrUserIsNotLocal struct {
+ UID int64
+ Name string
+}
+
+func (err ErrUserIsNotLocal) Error() string {
+ return fmt.Sprintf("user is not local type [uid: %d, name: %s]", err.UID, err.Name)
+}
+
+// IsErrUserIsNotLocal
+func IsErrUserIsNotLocal(err error) bool {
+ _, ok := err.(ErrUserIsNotLocal)
+ return ok
+}
diff --git a/models/user/external_login_user.go b/models/user/external_login_user.go
new file mode 100644
index 0000000..965b7a5
--- /dev/null
+++ b/models/user/external_login_user.go
@@ -0,0 +1,184 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrExternalLoginUserAlreadyExist represents a "ExternalLoginUserAlreadyExist" kind of error.
+type ErrExternalLoginUserAlreadyExist struct {
+ ExternalID string
+ UserID int64
+ LoginSourceID int64
+}
+
+// IsErrExternalLoginUserAlreadyExist checks if an error is a ExternalLoginUserAlreadyExist.
+func IsErrExternalLoginUserAlreadyExist(err error) bool {
+ _, ok := err.(ErrExternalLoginUserAlreadyExist)
+ return ok
+}
+
+func (err ErrExternalLoginUserAlreadyExist) Error() string {
+ return fmt.Sprintf("external login user already exists [externalID: %s, userID: %d, loginSourceID: %d]", err.ExternalID, err.UserID, err.LoginSourceID)
+}
+
+func (err ErrExternalLoginUserAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrExternalLoginUserNotExist represents a "ExternalLoginUserNotExist" kind of error.
+type ErrExternalLoginUserNotExist struct {
+ UserID int64
+ LoginSourceID int64
+}
+
+// IsErrExternalLoginUserNotExist checks if an error is a ExternalLoginUserNotExist.
+func IsErrExternalLoginUserNotExist(err error) bool {
+ _, ok := err.(ErrExternalLoginUserNotExist)
+ return ok
+}
+
+func (err ErrExternalLoginUserNotExist) Error() string {
+ return fmt.Sprintf("external login user link does not exists [userID: %d, loginSourceID: %d]", err.UserID, err.LoginSourceID)
+}
+
+func (err ErrExternalLoginUserNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ExternalLoginUser makes the connecting between some existing user and additional external login sources
+type ExternalLoginUser struct {
+ ExternalID string `xorm:"pk NOT NULL"`
+ UserID int64 `xorm:"INDEX NOT NULL"`
+ LoginSourceID int64 `xorm:"pk NOT NULL"`
+ RawData map[string]any `xorm:"TEXT JSON"`
+ Provider string `xorm:"index VARCHAR(25)"`
+ Email string
+ Name string
+ FirstName string
+ LastName string
+ NickName string
+ Description string
+ AvatarURL string `xorm:"TEXT"`
+ Location string
+ AccessToken string `xorm:"TEXT"`
+ AccessTokenSecret string `xorm:"TEXT"`
+ RefreshToken string `xorm:"TEXT"`
+ ExpiresAt time.Time
+}
+
+type ExternalUserMigrated interface {
+ GetExternalName() string
+ GetExternalID() int64
+}
+
+type ExternalUserRemappable interface {
+ GetUserID() int64
+ RemapExternalUser(externalName string, externalID, userID int64) error
+ ExternalUserMigrated
+}
+
+func init() {
+ db.RegisterModel(new(ExternalLoginUser))
+}
+
+// GetExternalLogin checks if a externalID in loginSourceID scope already exists
+func GetExternalLogin(ctx context.Context, externalLoginUser *ExternalLoginUser) (bool, error) {
+ return db.GetEngine(ctx).Get(externalLoginUser)
+}
+
+// LinkExternalToUser link the external user to the user
+func LinkExternalToUser(ctx context.Context, user *User, externalLoginUser *ExternalLoginUser) error {
+ has, err := db.Exist[ExternalLoginUser](ctx, builder.Eq{
+ "external_id": externalLoginUser.ExternalID,
+ "login_source_id": externalLoginUser.LoginSourceID,
+ })
+ if err != nil {
+ return err
+ } else if has {
+ return ErrExternalLoginUserAlreadyExist{externalLoginUser.ExternalID, user.ID, externalLoginUser.LoginSourceID}
+ }
+
+ _, err = db.GetEngine(ctx).Insert(externalLoginUser)
+ return err
+}
+
+// RemoveAccountLink will remove all external login sources for the given user
+func RemoveAccountLink(ctx context.Context, user *User, loginSourceID int64) (int64, error) {
+ deleted, err := db.GetEngine(ctx).Delete(&ExternalLoginUser{UserID: user.ID, LoginSourceID: loginSourceID})
+ if err != nil {
+ return deleted, err
+ }
+ if deleted < 1 {
+ return deleted, ErrExternalLoginUserNotExist{user.ID, loginSourceID}
+ }
+ return deleted, err
+}
+
+// RemoveAllAccountLinks will remove all external login sources for the given user
+func RemoveAllAccountLinks(ctx context.Context, user *User) error {
+ _, err := db.GetEngine(ctx).Delete(&ExternalLoginUser{UserID: user.ID})
+ return err
+}
+
+// GetUserIDByExternalUserID get user id according to provider and userID
+func GetUserIDByExternalUserID(ctx context.Context, provider, userID string) (int64, error) {
+ var id int64
+ _, err := db.GetEngine(ctx).Table("external_login_user").
+ Select("user_id").
+ Where("provider=?", provider).
+ And("external_id=?", userID).
+ Get(&id)
+ if err != nil {
+ return 0, err
+ }
+ return id, nil
+}
+
+// UpdateExternalUserByExternalID updates an external user's information
+func UpdateExternalUserByExternalID(ctx context.Context, external *ExternalLoginUser) error {
+ has, err := db.Exist[ExternalLoginUser](ctx, builder.Eq{
+ "external_id": external.ExternalID,
+ "login_source_id": external.LoginSourceID,
+ })
+ if err != nil {
+ return err
+ } else if !has {
+ return ErrExternalLoginUserNotExist{external.UserID, external.LoginSourceID}
+ }
+
+ _, err = db.GetEngine(ctx).Where("external_id=? AND login_source_id=?", external.ExternalID, external.LoginSourceID).AllCols().Update(external)
+ return err
+}
+
+// FindExternalUserOptions represents an options to find external users
+type FindExternalUserOptions struct {
+ db.ListOptions
+ Provider string
+ UserID int64
+ OrderBy string
+}
+
+func (opts FindExternalUserOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if len(opts.Provider) > 0 {
+ cond = cond.And(builder.Eq{"provider": opts.Provider})
+ }
+ if opts.UserID > 0 {
+ cond = cond.And(builder.Eq{"user_id": opts.UserID})
+ }
+ return cond
+}
+
+func (opts FindExternalUserOptions) ToOrders() string {
+ return opts.OrderBy
+}
diff --git a/models/user/federated_user.go b/models/user/federated_user.go
new file mode 100644
index 0000000..1fc42c3
--- /dev/null
+++ b/models/user/federated_user.go
@@ -0,0 +1,35 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "code.gitea.io/gitea/modules/validation"
+)
+
+type FederatedUser struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"NOT NULL"`
+ ExternalID string `xorm:"UNIQUE(federation_user_mapping) NOT NULL"`
+ FederationHostID int64 `xorm:"UNIQUE(federation_user_mapping) NOT NULL"`
+}
+
+func NewFederatedUser(userID int64, externalID string, federationHostID int64) (FederatedUser, error) {
+ result := FederatedUser{
+ UserID: userID,
+ ExternalID: externalID,
+ FederationHostID: federationHostID,
+ }
+ if valid, err := validation.IsValid(result); !valid {
+ return FederatedUser{}, err
+ }
+ return result, nil
+}
+
+func (user FederatedUser) Validate() []string {
+ var result []string
+ result = append(result, validation.ValidateNotEmpty(user.UserID, "UserID")...)
+ result = append(result, validation.ValidateNotEmpty(user.ExternalID, "ExternalID")...)
+ result = append(result, validation.ValidateNotEmpty(user.FederationHostID, "FederationHostID")...)
+ return result
+}
diff --git a/models/user/federated_user_test.go b/models/user/federated_user_test.go
new file mode 100644
index 0000000..6a21126
--- /dev/null
+++ b/models/user/federated_user_test.go
@@ -0,0 +1,29 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func Test_FederatedUserValidation(t *testing.T) {
+ sut := FederatedUser{
+ UserID: 12,
+ ExternalID: "12",
+ FederationHostID: 1,
+ }
+ if res, err := validation.IsValid(sut); !res {
+ t.Errorf("sut should be valid but was %q", err)
+ }
+
+ sut = FederatedUser{
+ ExternalID: "12",
+ FederationHostID: 1,
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid")
+ }
+}
diff --git a/models/user/fixtures/user.yml b/models/user/fixtures/user.yml
new file mode 100644
index 0000000..b1892f3
--- /dev/null
+++ b/models/user/fixtures/user.yml
@@ -0,0 +1,36 @@
+-
+ id: 1041
+ lower_name: remote01
+ name: remote01
+ full_name: Remote01
+ email: remote01@example.com
+ keep_email_private: false
+ email_notifications_preference: onmention
+ passwd: ZogKvWdyEx:password
+ passwd_hash_algo: dummy
+ must_change_password: false
+ login_source: 1001
+ login_name: 123
+ type: 5
+ salt: ZogKvWdyEx
+ max_repo_creation: -1
+ is_active: true
+ is_admin: false
+ is_restricted: false
+ allow_git_hook: false
+ allow_import_local: false
+ allow_create_organization: true
+ prohibit_login: true
+ avatar: avatarremote01
+ avatar_email: avatarremote01@example.com
+ use_custom_avatar: false
+ num_followers: 0
+ num_following: 0
+ num_stars: 0
+ num_repos: 0
+ num_teams: 0
+ num_members: 0
+ visibility: 0
+ repo_admin_change_team_access: false
+ theme: ""
+ keep_activity_private: false
diff --git a/models/user/follow.go b/models/user/follow.go
new file mode 100644
index 0000000..9c3283b
--- /dev/null
+++ b/models/user/follow.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// Follow represents relations of user and their followers.
+type Follow struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"UNIQUE(follow)"`
+ FollowID int64 `xorm:"UNIQUE(follow)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+}
+
+func init() {
+ db.RegisterModel(new(Follow))
+}
+
+// IsFollowing returns true if user is following followID.
+func IsFollowing(ctx context.Context, userID, followID int64) bool {
+ has, _ := db.GetEngine(ctx).Get(&Follow{UserID: userID, FollowID: followID})
+ return has
+}
+
+// FollowUser marks someone be another's follower.
+func FollowUser(ctx context.Context, userID, followID int64) (err error) {
+ if userID == followID || IsFollowing(ctx, userID, followID) {
+ return nil
+ }
+
+ if IsBlocked(ctx, userID, followID) || IsBlocked(ctx, followID, userID) {
+ return ErrBlockedByUser
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err = db.Insert(ctx, &Follow{UserID: userID, FollowID: followID}); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_followers = num_followers + 1 WHERE id = ?", followID); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_following = num_following + 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
+
+// UnfollowUser unmarks someone as another's follower.
+func UnfollowUser(ctx context.Context, userID, followID int64) (err error) {
+ if userID == followID || !IsFollowing(ctx, userID, followID) {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err = db.DeleteByBean(ctx, &Follow{UserID: userID, FollowID: followID}); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_followers = num_followers - 1 WHERE id = ?", followID); err != nil {
+ return err
+ }
+
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_following = num_following - 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ return committer.Commit()
+}
diff --git a/models/user/follow_test.go b/models/user/follow_test.go
new file mode 100644
index 0000000..8c56164
--- /dev/null
+++ b/models/user/follow_test.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsFollowing(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, user_model.IsFollowing(db.DefaultContext, 4, 2))
+ assert.False(t, user_model.IsFollowing(db.DefaultContext, 2, 4))
+ assert.False(t, user_model.IsFollowing(db.DefaultContext, 5, unittest.NonexistentID))
+ assert.False(t, user_model.IsFollowing(db.DefaultContext, unittest.NonexistentID, 5))
+ assert.False(t, user_model.IsFollowing(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+}
diff --git a/models/user/list.go b/models/user/list.go
new file mode 100644
index 0000000..ca589d1
--- /dev/null
+++ b/models/user/list.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+)
+
+// UserList is a list of user.
+// This type provide valuable methods to retrieve information for a group of users efficiently.
+type UserList []*User //revive:disable-line:exported
+
+// GetUserIDs returns a slice of user's id
+func (users UserList) GetUserIDs() []int64 {
+ userIDs := make([]int64, 0, len(users))
+ for _, user := range users {
+ userIDs = append(userIDs, user.ID) // Considering that user id are unique in the list
+ }
+ return userIDs
+}
+
+// GetTwoFaStatus return state of 2FA enrollement
+func (users UserList) GetTwoFaStatus(ctx context.Context) map[int64]bool {
+ results := make(map[int64]bool, len(users))
+ for _, user := range users {
+ results[user.ID] = false // Set default to false
+ }
+
+ if tokenMaps, err := users.loadTwoFactorStatus(ctx); err == nil {
+ for _, token := range tokenMaps {
+ results[token.UID] = true
+ }
+ }
+
+ if ids, err := users.userIDsWithWebAuthn(ctx); err == nil {
+ for _, id := range ids {
+ results[id] = true
+ }
+ }
+
+ return results
+}
+
+func (users UserList) loadTwoFactorStatus(ctx context.Context) (map[int64]*auth.TwoFactor, error) {
+ if len(users) == 0 {
+ return nil, nil
+ }
+
+ userIDs := users.GetUserIDs()
+ tokenMaps := make(map[int64]*auth.TwoFactor, len(userIDs))
+ if err := db.GetEngine(ctx).In("uid", userIDs).Find(&tokenMaps); err != nil {
+ return nil, fmt.Errorf("find two factor: %w", err)
+ }
+ return tokenMaps, nil
+}
+
+func (users UserList) userIDsWithWebAuthn(ctx context.Context) ([]int64, error) {
+ if len(users) == 0 {
+ return nil, nil
+ }
+ ids := make([]int64, 0, len(users))
+ if err := db.GetEngine(ctx).Table(new(auth.WebAuthnCredential)).In("user_id", users.GetUserIDs()).Select("user_id").Distinct("user_id").Find(&ids); err != nil {
+ return nil, fmt.Errorf("find two factor: %w", err)
+ }
+ return ids, nil
+}
+
+// GetUsersByIDs returns all resolved users from a list of Ids.
+func GetUsersByIDs(ctx context.Context, ids []int64) (UserList, error) {
+ ous := make([]*User, 0, len(ids))
+ if len(ids) == 0 {
+ return ous, nil
+ }
+ err := db.GetEngine(ctx).In("id", ids).
+ Asc("name").
+ Find(&ous)
+ return ous, err
+}
diff --git a/models/user/main_test.go b/models/user/main_test.go
new file mode 100644
index 0000000..a626d32
--- /dev/null
+++ b/models/user/main_test.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/user"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/user/must_change_password.go b/models/user/must_change_password.go
new file mode 100644
index 0000000..7eab08d
--- /dev/null
+++ b/models/user/must_change_password.go
@@ -0,0 +1,49 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+func SetMustChangePassword(ctx context.Context, all, mustChangePassword bool, include, exclude []string) (int64, error) {
+ sliceTrimSpaceDropEmpty := func(input []string) []string {
+ output := make([]string, 0, len(input))
+ for _, in := range input {
+ in = strings.ToLower(strings.TrimSpace(in))
+ if in == "" {
+ continue
+ }
+ output = append(output, in)
+ }
+ return output
+ }
+
+ var cond builder.Cond
+
+ // Only include the users where something changes to get an accurate count
+ cond = builder.Neq{"must_change_password": mustChangePassword}
+
+ if !all {
+ include = sliceTrimSpaceDropEmpty(include)
+ if len(include) == 0 {
+ return 0, util.NewSilentWrapErrorf(util.ErrInvalidArgument, "no users to include provided")
+ }
+
+ cond = cond.And(builder.In("lower_name", include))
+ }
+
+ exclude = sliceTrimSpaceDropEmpty(exclude)
+ if len(exclude) > 0 {
+ cond = cond.And(builder.NotIn("lower_name", exclude))
+ }
+
+ return db.GetEngine(ctx).Where(cond).MustCols("must_change_password").Update(&User{MustChangePassword: mustChangePassword})
+}
diff --git a/models/user/openid.go b/models/user/openid.go
new file mode 100644
index 0000000..ee4ecab
--- /dev/null
+++ b/models/user/openid.go
@@ -0,0 +1,111 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrOpenIDNotExist openid is not known
+var ErrOpenIDNotExist = util.NewNotExistErrorf("OpenID is unknown")
+
+// UserOpenID is the list of all OpenID identities of a user.
+// Since this is a middle table, name it OpenID is not suitable, so we ignore the lint here
+type UserOpenID struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"INDEX NOT NULL"`
+ URI string `xorm:"UNIQUE NOT NULL"`
+ Show bool `xorm:"DEFAULT false"`
+}
+
+func init() {
+ db.RegisterModel(new(UserOpenID))
+}
+
+// GetUserOpenIDs returns all openid addresses that belongs to given user.
+func GetUserOpenIDs(ctx context.Context, uid int64) ([]*UserOpenID, error) {
+ openids := make([]*UserOpenID, 0, 5)
+ if err := db.GetEngine(ctx).
+ Where("uid=?", uid).
+ Asc("id").
+ Find(&openids); err != nil {
+ return nil, err
+ }
+
+ return openids, nil
+}
+
+// isOpenIDUsed returns true if the openid has been used.
+func isOpenIDUsed(ctx context.Context, uri string) (bool, error) {
+ if len(uri) == 0 {
+ return true, nil
+ }
+
+ return db.GetEngine(ctx).Get(&UserOpenID{URI: uri})
+}
+
+// ErrOpenIDAlreadyUsed represents a "OpenIDAlreadyUsed" kind of error.
+type ErrOpenIDAlreadyUsed struct {
+ OpenID string
+}
+
+// IsErrOpenIDAlreadyUsed checks if an error is a ErrOpenIDAlreadyUsed.
+func IsErrOpenIDAlreadyUsed(err error) bool {
+ _, ok := err.(ErrOpenIDAlreadyUsed)
+ return ok
+}
+
+func (err ErrOpenIDAlreadyUsed) Error() string {
+ return fmt.Sprintf("OpenID already in use [oid: %s]", err.OpenID)
+}
+
+func (err ErrOpenIDAlreadyUsed) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// AddUserOpenID adds an pre-verified/normalized OpenID URI to given user.
+// NOTE: make sure openid.URI is normalized already
+func AddUserOpenID(ctx context.Context, openid *UserOpenID) error {
+ used, err := isOpenIDUsed(ctx, openid.URI)
+ if err != nil {
+ return err
+ } else if used {
+ return ErrOpenIDAlreadyUsed{openid.URI}
+ }
+
+ return db.Insert(ctx, openid)
+}
+
+// DeleteUserOpenID deletes an openid address of given user.
+func DeleteUserOpenID(ctx context.Context, openid *UserOpenID) (err error) {
+ var deleted int64
+ // ask to check UID
+ address := UserOpenID{
+ UID: openid.UID,
+ }
+ if openid.ID > 0 {
+ deleted, err = db.GetEngine(ctx).ID(openid.ID).Delete(&address)
+ } else {
+ deleted, err = db.GetEngine(ctx).
+ Where("openid=?", openid.URI).
+ Delete(&address)
+ }
+
+ if err != nil {
+ return err
+ } else if deleted != 1 {
+ return ErrOpenIDNotExist
+ }
+ return nil
+}
+
+// ToggleUserOpenIDVisibility toggles visibility of an openid address of given user.
+func ToggleUserOpenIDVisibility(ctx context.Context, id int64) (err error) {
+ _, err = db.GetEngine(ctx).Exec("update `user_open_id` set `show` = not `show` where `id` = ?", id)
+ return err
+}
diff --git a/models/user/openid_test.go b/models/user/openid_test.go
new file mode 100644
index 0000000..c2857aa
--- /dev/null
+++ b/models/user/openid_test.go
@@ -0,0 +1,68 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetUserOpenIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ oids, err := user_model.GetUserOpenIDs(db.DefaultContext, int64(1))
+ require.NoError(t, err)
+
+ if assert.Len(t, oids, 2) {
+ assert.Equal(t, "https://user1.domain1.tld/", oids[0].URI)
+ assert.False(t, oids[0].Show)
+ assert.Equal(t, "http://user1.domain2.tld/", oids[1].URI)
+ assert.True(t, oids[1].Show)
+ }
+
+ oids, err = user_model.GetUserOpenIDs(db.DefaultContext, int64(2))
+ require.NoError(t, err)
+
+ if assert.Len(t, oids, 1) {
+ assert.Equal(t, "https://domain1.tld/user2/", oids[0].URI)
+ assert.True(t, oids[0].Show)
+ }
+}
+
+func TestToggleUserOpenIDVisibility(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ oids, err := user_model.GetUserOpenIDs(db.DefaultContext, int64(2))
+ require.NoError(t, err)
+
+ if !assert.Len(t, oids, 1) {
+ return
+ }
+ assert.True(t, oids[0].Show)
+
+ err = user_model.ToggleUserOpenIDVisibility(db.DefaultContext, oids[0].ID)
+ require.NoError(t, err)
+
+ oids, err = user_model.GetUserOpenIDs(db.DefaultContext, int64(2))
+ require.NoError(t, err)
+
+ if !assert.Len(t, oids, 1) {
+ return
+ }
+ assert.False(t, oids[0].Show)
+ err = user_model.ToggleUserOpenIDVisibility(db.DefaultContext, oids[0].ID)
+ require.NoError(t, err)
+
+ oids, err = user_model.GetUserOpenIDs(db.DefaultContext, int64(2))
+ require.NoError(t, err)
+
+ if assert.Len(t, oids, 1) {
+ assert.True(t, oids[0].Show)
+ }
+}
diff --git a/models/user/redirect.go b/models/user/redirect.go
new file mode 100644
index 0000000..5a40d4d
--- /dev/null
+++ b/models/user/redirect.go
@@ -0,0 +1,87 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrUserRedirectNotExist represents a "UserRedirectNotExist" kind of error.
+type ErrUserRedirectNotExist struct {
+ Name string
+}
+
+// IsErrUserRedirectNotExist check if an error is an ErrUserRedirectNotExist.
+func IsErrUserRedirectNotExist(err error) bool {
+ _, ok := err.(ErrUserRedirectNotExist)
+ return ok
+}
+
+func (err ErrUserRedirectNotExist) Error() string {
+ return fmt.Sprintf("user redirect does not exist [name: %s]", err.Name)
+}
+
+func (err ErrUserRedirectNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Redirect represents that a user name should be redirected to another
+type Redirect struct {
+ ID int64 `xorm:"pk autoincr"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RedirectUserID int64 // userID to redirect to
+}
+
+// TableName provides the real table name
+func (Redirect) TableName() string {
+ return "user_redirect"
+}
+
+func init() {
+ db.RegisterModel(new(Redirect))
+}
+
+// LookupUserRedirect look up userID if a user has a redirect name
+func LookupUserRedirect(ctx context.Context, userName string) (int64, error) {
+ userName = strings.ToLower(userName)
+ redirect := &Redirect{LowerName: userName}
+ if has, err := db.GetEngine(ctx).Get(redirect); err != nil {
+ return 0, err
+ } else if !has {
+ return 0, ErrUserRedirectNotExist{Name: userName}
+ }
+ return redirect.RedirectUserID, nil
+}
+
+// NewUserRedirect create a new user redirect
+func NewUserRedirect(ctx context.Context, ID int64, oldUserName, newUserName string) error {
+ oldUserName = strings.ToLower(oldUserName)
+ newUserName = strings.ToLower(newUserName)
+
+ if err := DeleteUserRedirect(ctx, oldUserName); err != nil {
+ return err
+ }
+
+ if err := DeleteUserRedirect(ctx, newUserName); err != nil {
+ return err
+ }
+
+ return db.Insert(ctx, &Redirect{
+ LowerName: oldUserName,
+ RedirectUserID: ID,
+ })
+}
+
+// DeleteUserRedirect delete any redirect from the specified user name to
+// anything else
+func DeleteUserRedirect(ctx context.Context, userName string) error {
+ userName = strings.ToLower(userName)
+ _, err := db.GetEngine(ctx).Delete(&Redirect{LowerName: userName})
+ return err
+}
diff --git a/models/user/redirect_test.go b/models/user/redirect_test.go
new file mode 100644
index 0000000..35fd29a
--- /dev/null
+++ b/models/user/redirect_test.go
@@ -0,0 +1,26 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLookupUserRedirect(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ userID, err := user_model.LookupUserRedirect(db.DefaultContext, "olduser1")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, userID)
+
+ _, err = user_model.LookupUserRedirect(db.DefaultContext, "doesnotexist")
+ assert.True(t, user_model.IsErrUserRedirectNotExist(err))
+}
diff --git a/models/user/search.go b/models/user/search.go
new file mode 100644
index 0000000..04c434e
--- /dev/null
+++ b/models/user/search.go
@@ -0,0 +1,178 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/structs"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// SearchUserOptions contains the options for searching
+type SearchUserOptions struct {
+ db.ListOptions
+
+ Keyword string
+ Type UserType
+ UID int64
+ LoginName string // this option should be used only for admin user
+ SourceID int64 // this option should be used only for admin user
+ OrderBy db.SearchOrderBy
+ Visible []structs.VisibleType
+ Actor *User // The user doing the search
+ SearchByEmail bool // Search by email as well as username/full name
+
+ SupportedSortOrders container.Set[string] // if not nil, only allow to use the sort orders in this set
+
+ IsActive optional.Option[bool]
+ IsAdmin optional.Option[bool]
+ IsRestricted optional.Option[bool]
+ IsTwoFactorEnabled optional.Option[bool]
+ IsProhibitLogin optional.Option[bool]
+ IncludeReserved bool
+
+ ExtraParamStrings map[string]string
+}
+
+func (opts *SearchUserOptions) toSearchQueryBase(ctx context.Context) *xorm.Session {
+ var cond builder.Cond
+ if opts.Type == UserTypeIndividual {
+ cond = builder.In("type", UserTypeIndividual, UserTypeRemoteUser)
+ } else {
+ cond = builder.Eq{"type": opts.Type}
+ }
+ if opts.IncludeReserved {
+ if opts.Type == UserTypeIndividual {
+ cond = cond.Or(builder.Eq{"type": UserTypeUserReserved}).Or(
+ builder.Eq{"type": UserTypeBot},
+ ).Or(
+ builder.Eq{"type": UserTypeRemoteUser},
+ )
+ } else if opts.Type == UserTypeOrganization {
+ cond = cond.Or(builder.Eq{"type": UserTypeOrganizationReserved})
+ }
+ }
+
+ if len(opts.Keyword) > 0 {
+ lowerKeyword := strings.ToLower(opts.Keyword)
+ keywordCond := builder.Or(
+ builder.Like{"lower_name", lowerKeyword},
+ builder.Like{"LOWER(full_name)", lowerKeyword},
+ )
+ if opts.SearchByEmail {
+ keywordCond = keywordCond.Or(builder.Like{"LOWER(email)", lowerKeyword})
+ }
+
+ cond = cond.And(keywordCond)
+ }
+
+ // If visibility filtered
+ if len(opts.Visible) > 0 {
+ cond = cond.And(builder.In("visibility", opts.Visible))
+ }
+
+ cond = cond.And(BuildCanSeeUserCondition(opts.Actor))
+
+ if opts.UID > 0 {
+ cond = cond.And(builder.Eq{"id": opts.UID})
+ }
+
+ if opts.SourceID > 0 {
+ cond = cond.And(builder.Eq{"login_source": opts.SourceID})
+ }
+ if opts.LoginName != "" {
+ cond = cond.And(builder.Eq{"login_name": opts.LoginName})
+ }
+
+ if opts.IsActive.Has() {
+ cond = cond.And(builder.Eq{"is_active": opts.IsActive.Value()})
+ }
+
+ if opts.IsAdmin.Has() {
+ cond = cond.And(builder.Eq{"is_admin": opts.IsAdmin.Value()})
+ }
+
+ if opts.IsRestricted.Has() {
+ cond = cond.And(builder.Eq{"is_restricted": opts.IsRestricted.Value()})
+ }
+
+ if opts.IsProhibitLogin.Has() {
+ cond = cond.And(builder.Eq{"prohibit_login": opts.IsProhibitLogin.Value()})
+ }
+
+ e := db.GetEngine(ctx)
+ if !opts.IsTwoFactorEnabled.Has() {
+ return e.Where(cond)
+ }
+
+ // 2fa filter uses LEFT JOIN to check whether a user has a 2fa record
+ // While using LEFT JOIN, sometimes the performance might not be good, but it won't be a problem now, such SQL is seldom executed.
+ // There are some possible methods to refactor this SQL in future when we really need to optimize the performance (but not now):
+ // (1) add a column in user table (2) add a setting value in user_setting table (3) use search engines (bleve/elasticsearch)
+ if opts.IsTwoFactorEnabled.Value() {
+ cond = cond.And(builder.Expr("two_factor.uid IS NOT NULL"))
+ } else {
+ cond = cond.And(builder.Expr("two_factor.uid IS NULL"))
+ }
+
+ return e.Join("LEFT OUTER", "two_factor", "two_factor.uid = `user`.id").
+ Where(cond)
+}
+
+// SearchUsers takes options i.e. keyword and part of user name to search,
+// it returns results in given range and number of total results.
+func SearchUsers(ctx context.Context, opts *SearchUserOptions) (users []*User, _ int64, _ error) {
+ sessCount := opts.toSearchQueryBase(ctx)
+ defer sessCount.Close()
+ count, err := sessCount.Count(new(User))
+ if err != nil {
+ return nil, 0, fmt.Errorf("count: %w", err)
+ }
+
+ if len(opts.OrderBy) == 0 {
+ opts.OrderBy = db.SearchOrderByAlphabetically
+ }
+
+ sessQuery := opts.toSearchQueryBase(ctx).OrderBy(opts.OrderBy.String())
+ defer sessQuery.Close()
+ if opts.PageSize > 0 {
+ sessQuery = db.SetSessionPagination(sessQuery, opts)
+ }
+
+ // the sql may contain JOIN, so we must only select User related columns
+ sessQuery = sessQuery.Select("`user`.*")
+ users = make([]*User, 0, opts.PageSize)
+ return users, count, sessQuery.Find(&users)
+}
+
+// BuildCanSeeUserCondition creates a condition which can be used to restrict results to users/orgs the actor can see
+func BuildCanSeeUserCondition(actor *User) builder.Cond {
+ if actor != nil {
+ // If Admin - they see all users!
+ if !actor.IsAdmin {
+ // Users can see an organization they are a member of
+ cond := builder.In("`user`.id", builder.Select("org_id").From("org_user").Where(builder.Eq{"uid": actor.ID}))
+ if !actor.IsRestricted {
+ // Not-Restricted users can see public and limited users/organizations
+ cond = cond.Or(builder.In("`user`.visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))
+ }
+ // Don't forget about self
+ return cond.Or(builder.Eq{"`user`.id": actor.ID})
+ }
+
+ return nil
+ }
+
+ // Force visibility for privacy
+ // Not logged in - only public users
+ return builder.In("`user`.visibility", structs.VisibleTypePublic)
+}
diff --git a/models/user/setting.go b/models/user/setting.go
new file mode 100644
index 0000000..b4af0e5
--- /dev/null
+++ b/models/user/setting.go
@@ -0,0 +1,212 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/cache"
+ setting_module "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// Setting is a key value store of user settings
+type Setting struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"index unique(key_userid)"` // to load all of someone's settings
+ SettingKey string `xorm:"varchar(255) index unique(key_userid)"` // ensure key is always lowercase
+ SettingValue string `xorm:"text"`
+}
+
+// TableName sets the table name for the settings struct
+func (s *Setting) TableName() string {
+ return "user_setting"
+}
+
+func init() {
+ db.RegisterModel(new(Setting))
+}
+
+// ErrUserSettingIsNotExist represents an error that a setting is not exist with special key
+type ErrUserSettingIsNotExist struct {
+ Key string
+}
+
+// Error implements error
+func (err ErrUserSettingIsNotExist) Error() string {
+ return fmt.Sprintf("Setting[%s] is not exist", err.Key)
+}
+
+func (err ErrUserSettingIsNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// IsErrUserSettingIsNotExist return true if err is ErrSettingIsNotExist
+func IsErrUserSettingIsNotExist(err error) bool {
+ _, ok := err.(ErrUserSettingIsNotExist)
+ return ok
+}
+
+// genSettingCacheKey returns the cache key for some configuration
+func genSettingCacheKey(userID int64, key string) string {
+ return fmt.Sprintf("user_%d.setting.%s", userID, key)
+}
+
+// GetSetting returns the setting value via the key
+func GetSetting(ctx context.Context, uid int64, key string) (string, error) {
+ return cache.GetString(genSettingCacheKey(uid, key), func() (string, error) {
+ res, err := GetSettingNoCache(ctx, uid, key)
+ if err != nil {
+ return "", err
+ }
+ return res.SettingValue, nil
+ })
+}
+
+// GetSettingNoCache returns specific setting without using the cache
+func GetSettingNoCache(ctx context.Context, uid int64, key string) (*Setting, error) {
+ v, err := GetSettings(ctx, uid, []string{key})
+ if err != nil {
+ return nil, err
+ }
+ if len(v) == 0 {
+ return nil, ErrUserSettingIsNotExist{key}
+ }
+ return v[key], nil
+}
+
+// GetSettings returns specific settings from user
+func GetSettings(ctx context.Context, uid int64, keys []string) (map[string]*Setting, error) {
+ settings := make([]*Setting, 0, len(keys))
+ if err := db.GetEngine(ctx).
+ Where("user_id=?", uid).
+ And(builder.In("setting_key", keys)).
+ Find(&settings); err != nil {
+ return nil, err
+ }
+ settingsMap := make(map[string]*Setting)
+ for _, s := range settings {
+ settingsMap[s.SettingKey] = s
+ }
+ return settingsMap, nil
+}
+
+// GetUserAllSettings returns all settings from user
+func GetUserAllSettings(ctx context.Context, uid int64) (map[string]*Setting, error) {
+ settings := make([]*Setting, 0, 5)
+ if err := db.GetEngine(ctx).
+ Where("user_id=?", uid).
+ Find(&settings); err != nil {
+ return nil, err
+ }
+ settingsMap := make(map[string]*Setting)
+ for _, s := range settings {
+ settingsMap[s.SettingKey] = s
+ }
+ return settingsMap, nil
+}
+
+func validateUserSettingKey(key string) error {
+ if len(key) == 0 {
+ return fmt.Errorf("setting key must be set")
+ }
+ if strings.ToLower(key) != key {
+ return fmt.Errorf("setting key should be lowercase")
+ }
+ return nil
+}
+
+// GetUserSetting gets a specific setting for a user
+func GetUserSetting(ctx context.Context, userID int64, key string, def ...string) (string, error) {
+ if err := validateUserSettingKey(key); err != nil {
+ return "", err
+ }
+
+ setting := &Setting{UserID: userID, SettingKey: key}
+ has, err := db.GetEngine(ctx).Get(setting)
+ if err != nil {
+ return "", err
+ }
+ if !has {
+ if len(def) == 1 {
+ return def[0], nil
+ }
+ return "", nil
+ }
+ return setting.SettingValue, nil
+}
+
+// DeleteUserSetting deletes a specific setting for a user
+func DeleteUserSetting(ctx context.Context, userID int64, key string) error {
+ if err := validateUserSettingKey(key); err != nil {
+ return err
+ }
+
+ cache.Remove(genSettingCacheKey(userID, key))
+ _, err := db.GetEngine(ctx).Delete(&Setting{UserID: userID, SettingKey: key})
+
+ return err
+}
+
+// SetUserSetting updates a users' setting for a specific key
+func SetUserSetting(ctx context.Context, userID int64, key, value string) error {
+ if err := validateUserSettingKey(key); err != nil {
+ return err
+ }
+
+ if err := upsertUserSettingValue(ctx, userID, key, value); err != nil {
+ return err
+ }
+
+ cc := cache.GetCache()
+ if cc != nil {
+ return cc.Put(genSettingCacheKey(userID, key), value, setting_module.CacheService.TTLSeconds())
+ }
+
+ return nil
+}
+
+func upsertUserSettingValue(ctx context.Context, userID int64, key, value string) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ e := db.GetEngine(ctx)
+
+ // here we use a general method to do a safe upsert for different databases (and most transaction levels)
+ // 1. try to UPDATE the record and acquire the transaction write lock
+ // if UPDATE returns non-zero rows are changed, OK, the setting is saved correctly
+ // if UPDATE returns "0 rows changed", two possibilities: (a) record doesn't exist (b) value is not changed
+ // 2. do a SELECT to check if the row exists or not (we already have the transaction lock)
+ // 3. if the row doesn't exist, do an INSERT (we are still protected by the transaction lock, so it's safe)
+ //
+ // to optimize the SELECT in step 2, we can use an extra column like `revision=revision+1`
+ // to make sure the UPDATE always returns a non-zero value for existing (unchanged) records.
+
+ res, err := e.Exec("UPDATE user_setting SET setting_value=? WHERE setting_key=? AND user_id=?", value, key, userID)
+ if err != nil {
+ return err
+ }
+ rows, _ := res.RowsAffected()
+ if rows > 0 {
+ // the existing row is updated, so we can return
+ return nil
+ }
+
+ // in case the value isn't changed, update would return 0 rows changed, so we need this check
+ has, err := e.Exist(&Setting{UserID: userID, SettingKey: key})
+ if err != nil {
+ return err
+ }
+ if has {
+ return nil
+ }
+
+ // if no existing row, insert a new row
+ _, err = e.Insert(&Setting{UserID: userID, SettingKey: key, SettingValue: value})
+ return err
+ })
+}
diff --git a/models/user/setting_keys.go b/models/user/setting_keys.go
new file mode 100644
index 0000000..0e2c936
--- /dev/null
+++ b/models/user/setting_keys.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+const (
+ // SettingsKeyHiddenCommentTypes is the setting key for hidden comment types
+ SettingsKeyHiddenCommentTypes = "issue.hidden_comment_types"
+ // SettingsKeyDiffWhitespaceBehavior is the setting key for whitespace behavior of diff
+ SettingsKeyDiffWhitespaceBehavior = "diff.whitespace_behaviour"
+ // SettingsKeyShowOutdatedComments is the setting key whether or not to show outdated comments in PRs
+ SettingsKeyShowOutdatedComments = "comment_code.show_outdated"
+ // UserActivityPubPrivPem is user's private key
+ UserActivityPubPrivPem = "activitypub.priv_pem"
+ // UserActivityPubPubPem is user's public key
+ UserActivityPubPubPem = "activitypub.pub_pem"
+)
diff --git a/models/user/setting_test.go b/models/user/setting_test.go
new file mode 100644
index 0000000..0b05c54
--- /dev/null
+++ b/models/user/setting_test.go
@@ -0,0 +1,61 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSettings(t *testing.T) {
+ keyName := "test_user_setting"
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ newSetting := &user_model.Setting{UserID: 99, SettingKey: keyName, SettingValue: "Gitea User Setting Test"}
+
+ // create setting
+ err := user_model.SetUserSetting(db.DefaultContext, newSetting.UserID, newSetting.SettingKey, newSetting.SettingValue)
+ require.NoError(t, err)
+ // test about saving unchanged values
+ err = user_model.SetUserSetting(db.DefaultContext, newSetting.UserID, newSetting.SettingKey, newSetting.SettingValue)
+ require.NoError(t, err)
+
+ // get specific setting
+ settings, err := user_model.GetSettings(db.DefaultContext, 99, []string{keyName})
+ require.NoError(t, err)
+ assert.Len(t, settings, 1)
+ assert.EqualValues(t, newSetting.SettingValue, settings[keyName].SettingValue)
+
+ settingValue, err := user_model.GetUserSetting(db.DefaultContext, 99, keyName)
+ require.NoError(t, err)
+ assert.EqualValues(t, newSetting.SettingValue, settingValue)
+
+ settingValue, err = user_model.GetUserSetting(db.DefaultContext, 99, "no_such")
+ require.NoError(t, err)
+ assert.EqualValues(t, "", settingValue)
+
+ // updated setting
+ updatedSetting := &user_model.Setting{UserID: 99, SettingKey: keyName, SettingValue: "Updated"}
+ err = user_model.SetUserSetting(db.DefaultContext, updatedSetting.UserID, updatedSetting.SettingKey, updatedSetting.SettingValue)
+ require.NoError(t, err)
+
+ // get all settings
+ settings, err = user_model.GetUserAllSettings(db.DefaultContext, 99)
+ require.NoError(t, err)
+ assert.Len(t, settings, 1)
+ assert.EqualValues(t, updatedSetting.SettingValue, settings[updatedSetting.SettingKey].SettingValue)
+
+ // delete setting
+ err = user_model.DeleteUserSetting(db.DefaultContext, 99, keyName)
+ require.NoError(t, err)
+ settings, err = user_model.GetUserAllSettings(db.DefaultContext, 99)
+ require.NoError(t, err)
+ assert.Empty(t, settings)
+}
diff --git a/models/user/user.go b/models/user/user.go
new file mode 100644
index 0000000..3f12f8e
--- /dev/null
+++ b/models/user/user.go
@@ -0,0 +1,1365 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "crypto/subtle"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/mail"
+ "net/url"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+ "unicode"
+
+ _ "image/jpeg" // Needed for jpeg support
+
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/auth/openid"
+ "code.gitea.io/gitea/modules/auth/password/hash"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/validation"
+
+ "golang.org/x/text/runes"
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+ "xorm.io/builder"
+)
+
+// UserType defines the user type
+type UserType int //revive:disable-line:exported
+
+const (
+ // UserTypeIndividual defines an individual user
+ UserTypeIndividual UserType = iota // Historic reason to make it starts at 0.
+
+ // UserTypeOrganization defines an organization
+ UserTypeOrganization // 1
+
+ // UserTypeUserReserved reserves a (non-existing) user, i.e. to prevent a spam user from re-registering after being deleted, or to reserve the name until the user is actually created later on
+ UserTypeUserReserved // 2
+
+ // UserTypeOrganizationReserved reserves a (non-existing) organization, to be used in combination with UserTypeUserReserved
+ UserTypeOrganizationReserved // 3
+
+ // UserTypeBot defines a bot user
+ UserTypeBot // 4
+
+ // UserTypeRemoteUser defines a remote user for federated users
+ UserTypeRemoteUser // 5
+)
+
+const (
+ // EmailNotificationsEnabled indicates that the user would like to receive all email notifications except your own
+ EmailNotificationsEnabled = "enabled"
+ // EmailNotificationsOnMention indicates that the user would like to be notified via email when mentioned.
+ EmailNotificationsOnMention = "onmention"
+ // EmailNotificationsDisabled indicates that the user would not like to be notified via email.
+ EmailNotificationsDisabled = "disabled"
+ // EmailNotificationsAndYourOwn indicates that the user would like to receive all email notifications and your own
+ EmailNotificationsAndYourOwn = "andyourown"
+)
+
+// User represents the object of individual and member of organization.
+type User struct {
+ ID int64 `xorm:"pk autoincr"`
+ LowerName string `xorm:"UNIQUE NOT NULL"`
+ Name string `xorm:"UNIQUE NOT NULL"`
+ FullName string
+ // Email is the primary email address (to be used for communication)
+ Email string `xorm:"NOT NULL"`
+ KeepEmailPrivate bool
+ EmailNotificationsPreference string `xorm:"VARCHAR(20) NOT NULL DEFAULT 'enabled'"`
+ Passwd string `xorm:"NOT NULL"`
+ PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"`
+
+ // MustChangePassword is an attribute that determines if a user
+ // is to change their password after registration.
+ MustChangePassword bool `xorm:"NOT NULL DEFAULT false"`
+
+ LoginType auth.Type
+ LoginSource int64 `xorm:"NOT NULL DEFAULT 0"`
+ LoginName string
+ Type UserType
+ Location string
+ Website string
+ Pronouns string
+ Rands string `xorm:"VARCHAR(32)"`
+ Salt string `xorm:"VARCHAR(32)"`
+ Language string `xorm:"VARCHAR(5)"`
+ Description string
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ LastLoginUnix timeutil.TimeStamp `xorm:"INDEX"`
+
+ // Remember visibility choice for convenience, true for private
+ LastRepoVisibility bool
+ // Maximum repository creation limit, -1 means use global default
+ MaxRepoCreation int `xorm:"NOT NULL DEFAULT -1"`
+
+ // IsActive true: primary email is activated, user can access Web UI and Git SSH.
+ // false: an inactive user can only log in Web UI for account operations (ex: activate the account by email), no other access.
+ IsActive bool `xorm:"INDEX"`
+ // the user is a Gitea admin, who can access all repositories and the admin pages.
+ IsAdmin bool
+ // true: the user is only allowed to see organizations/repositories that they has explicit rights to.
+ // (ex: in private Gitea instances user won't be allowed to see even organizations/repositories that are set as public)
+ IsRestricted bool `xorm:"NOT NULL DEFAULT false"`
+
+ AllowGitHook bool
+ AllowImportLocal bool // Allow migrate repository by local path
+ AllowCreateOrganization bool `xorm:"DEFAULT true"`
+
+ // true: the user is not allowed to log in Web UI. Git/SSH access could still be allowed (please refer to Git/SSH access related code/documents)
+ ProhibitLogin bool `xorm:"NOT NULL DEFAULT false"`
+
+ // Avatar
+ Avatar string `xorm:"VARCHAR(2048) NOT NULL"`
+ AvatarEmail string `xorm:"NOT NULL"`
+ UseCustomAvatar bool
+
+ // For federation
+ NormalizedFederatedURI string
+
+ // Counters
+ NumFollowers int
+ NumFollowing int `xorm:"NOT NULL DEFAULT 0"`
+ NumStars int
+ NumRepos int
+
+ // For organization
+ NumTeams int
+ NumMembers int
+ Visibility structs.VisibleType `xorm:"NOT NULL DEFAULT 0"`
+ RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"`
+
+ // Preferences
+ DiffViewStyle string `xorm:"NOT NULL DEFAULT ''"`
+ Theme string `xorm:"NOT NULL DEFAULT ''"`
+ KeepActivityPrivate bool `xorm:"NOT NULL DEFAULT false"`
+ EnableRepoUnitHints bool `xorm:"NOT NULL DEFAULT true"`
+}
+
+func init() {
+ db.RegisterModel(new(User))
+}
+
+// SearchOrganizationsOptions options to filter organizations
+type SearchOrganizationsOptions struct {
+ db.ListOptions
+ All bool
+}
+
+func (u *User) LogString() string {
+ if u == nil {
+ return "<User nil>"
+ }
+ return fmt.Sprintf("<User %d:%s>", u.ID, u.Name)
+}
+
+// BeforeUpdate is invoked from XORM before updating this object.
+func (u *User) BeforeUpdate() {
+ if u.MaxRepoCreation < -1 {
+ u.MaxRepoCreation = -1
+ }
+
+ // Organization does not need email
+ u.Email = strings.ToLower(u.Email)
+ if !u.IsOrganization() {
+ if len(u.AvatarEmail) == 0 {
+ u.AvatarEmail = u.Email
+ }
+ }
+
+ u.LowerName = strings.ToLower(u.Name)
+ u.Location = base.TruncateString(u.Location, 255)
+ u.Website = base.TruncateString(u.Website, 255)
+ u.Description = base.TruncateString(u.Description, 255)
+}
+
+// AfterLoad is invoked from XORM after filling all the fields of this object.
+func (u *User) AfterLoad() {
+ if u.Theme == "" {
+ u.Theme = setting.UI.DefaultTheme
+ }
+}
+
+// SetLastLogin set time to last login
+func (u *User) SetLastLogin() {
+ u.LastLoginUnix = timeutil.TimeStampNow()
+}
+
+// GetPlaceholderEmail returns an noreply email
+func (u *User) GetPlaceholderEmail() string {
+ return fmt.Sprintf("%s@%s", u.LowerName, setting.Service.NoReplyAddress)
+}
+
+// GetEmail returns an noreply email, if the user has set to keep his
+// email address private, otherwise the primary email address.
+func (u *User) GetEmail() string {
+ if u.KeepEmailPrivate {
+ return u.GetPlaceholderEmail()
+ }
+ return u.Email
+}
+
+// GetAllUsers returns a slice of all individual users found in DB.
+func GetAllUsers(ctx context.Context) ([]*User, error) {
+ users := make([]*User, 0)
+ return users, db.GetEngine(ctx).OrderBy("id").In("type", UserTypeIndividual, UserTypeRemoteUser).Find(&users)
+}
+
+// GetAllAdmins returns a slice of all adminusers found in DB.
+func GetAllAdmins(ctx context.Context) ([]*User, error) {
+ users := make([]*User, 0)
+ return users, db.GetEngine(ctx).OrderBy("id").Where("type = ?", UserTypeIndividual).And("is_admin = ?", true).Find(&users)
+}
+
+// IsLocal returns true if user login type is LoginPlain.
+func (u *User) IsLocal() bool {
+ return u.LoginType <= auth.Plain
+}
+
+// IsOAuth2 returns true if user login type is LoginOAuth2.
+func (u *User) IsOAuth2() bool {
+ return u.LoginType == auth.OAuth2
+}
+
+// MaxCreationLimit returns the number of repositories a user is allowed to create
+func (u *User) MaxCreationLimit() int {
+ if u.MaxRepoCreation <= -1 {
+ return setting.Repository.MaxCreationLimit
+ }
+ return u.MaxRepoCreation
+}
+
+// CanCreateRepo returns if user login can create a repository
+// NOTE: functions calling this assume a failure due to repository count limit; if new checks are added, those functions should be revised
+func (u *User) CanCreateRepo() bool {
+ if u.IsAdmin {
+ return true
+ }
+ if u.MaxRepoCreation <= -1 {
+ if setting.Repository.MaxCreationLimit <= -1 {
+ return true
+ }
+ return u.NumRepos < setting.Repository.MaxCreationLimit
+ }
+ return u.NumRepos < u.MaxRepoCreation
+}
+
+// CanCreateOrganization returns true if user can create organisation.
+func (u *User) CanCreateOrganization() bool {
+ return u.IsAdmin || (u.AllowCreateOrganization && !setting.Admin.DisableRegularOrgCreation)
+}
+
+// CanEditGitHook returns true if user can edit Git hooks.
+func (u *User) CanEditGitHook() bool {
+ return !setting.DisableGitHooks && (u.IsAdmin || u.AllowGitHook)
+}
+
+// CanForkRepo returns if user login can fork a repository
+// It checks especially that the user can create repos, and potentially more
+func (u *User) CanForkRepo() bool {
+ if setting.Repository.AllowForkWithoutMaximumLimit {
+ return true
+ }
+ return u.CanCreateRepo()
+}
+
+// CanImportLocal returns true if user can migrate repository by local path.
+func (u *User) CanImportLocal() bool {
+ if !setting.ImportLocalPaths || u == nil {
+ return false
+ }
+ return u.IsAdmin || u.AllowImportLocal
+}
+
+// DashboardLink returns the user dashboard page link.
+func (u *User) DashboardLink() string {
+ if u.IsOrganization() {
+ return u.OrganisationLink() + "/dashboard"
+ }
+ return setting.AppSubURL + "/"
+}
+
+// HomeLink returns the user or organization home page link.
+func (u *User) HomeLink() string {
+ return setting.AppSubURL + "/" + url.PathEscape(u.Name)
+}
+
+// HTMLURL returns the user or organization's full link.
+func (u *User) HTMLURL() string {
+ return setting.AppURL + url.PathEscape(u.Name)
+}
+
+// APActorID returns the IRI to the api endpoint of the user
+func (u *User) APActorID() string {
+ return fmt.Sprintf("%vapi/v1/activitypub/user-id/%v", setting.AppURL, url.PathEscape(fmt.Sprintf("%v", u.ID)))
+}
+
+// OrganisationLink returns the organization sub page link.
+func (u *User) OrganisationLink() string {
+ return setting.AppSubURL + "/org/" + url.PathEscape(u.Name)
+}
+
+// GenerateEmailAuthorizationCode generates an activation code based for the user for the specified purpose.
+// The standard expiry is ActiveCodeLives minutes.
+func (u *User) GenerateEmailAuthorizationCode(ctx context.Context, purpose auth.AuthorizationPurpose) (string, error) {
+ lookup, validator, err := auth.GenerateAuthToken(ctx, u.ID, timeutil.TimeStampNow().Add(int64(setting.Service.ActiveCodeLives)*60), purpose)
+ if err != nil {
+ return "", err
+ }
+ return lookup + ":" + validator, nil
+}
+
+// GetUserFollowers returns range of user's followers.
+func GetUserFollowers(ctx context.Context, u, viewer *User, listOptions db.ListOptions) ([]*User, int64, error) {
+ sess := db.GetEngine(ctx).
+ Select("`user`.*").
+ Join("LEFT", "follow", "`user`.id=follow.user_id").
+ Where("follow.follow_id=?", u.ID).
+ And("`user`.type=?", UserTypeIndividual).
+ And(isUserVisibleToViewerCond(viewer))
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+
+ users := make([]*User, 0, listOptions.PageSize)
+ count, err := sess.FindAndCount(&users)
+ return users, count, err
+ }
+
+ users := make([]*User, 0, 8)
+ count, err := sess.FindAndCount(&users)
+ return users, count, err
+}
+
+// GetUserFollowing returns range of user's following.
+func GetUserFollowing(ctx context.Context, u, viewer *User, listOptions db.ListOptions) ([]*User, int64, error) {
+ sess := db.GetEngine(ctx).
+ Select("`user`.*").
+ Join("LEFT", "follow", "`user`.id=follow.follow_id").
+ Where("follow.user_id=?", u.ID).
+ And("`user`.type IN (?, ?)", UserTypeIndividual, UserTypeOrganization).
+ And(isUserVisibleToViewerCond(viewer))
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+
+ users := make([]*User, 0, listOptions.PageSize)
+ count, err := sess.FindAndCount(&users)
+ return users, count, err
+ }
+
+ users := make([]*User, 0, 8)
+ count, err := sess.FindAndCount(&users)
+ return users, count, err
+}
+
+// NewGitSig generates and returns the signature of given user.
+func (u *User) NewGitSig() *git.Signature {
+ return &git.Signature{
+ Name: u.GitName(),
+ Email: u.GetEmail(),
+ When: time.Now(),
+ }
+}
+
+// SetPassword hashes a password using the algorithm defined in the config value of PASSWORD_HASH_ALGO
+// change passwd, salt and passwd_hash_algo fields
+func (u *User) SetPassword(passwd string) (err error) {
+ // Invalidate all authentication tokens for this user.
+ if err := auth.DeleteAuthTokenByUser(db.DefaultContext, u.ID); err != nil {
+ return err
+ }
+
+ if u.Salt, err = GetUserSalt(); err != nil {
+ return err
+ }
+ if u.Passwd, err = hash.Parse(setting.PasswordHashAlgo).Hash(passwd, u.Salt); err != nil {
+ return err
+ }
+ u.PasswdHashAlgo = setting.PasswordHashAlgo
+
+ return nil
+}
+
+// ValidatePassword checks if the given password matches the one belonging to the user.
+func (u *User) ValidatePassword(passwd string) bool {
+ return hash.Parse(u.PasswdHashAlgo).VerifyPassword(passwd, u.Passwd, u.Salt)
+}
+
+// IsPasswordSet checks if the password is set or left empty
+func (u *User) IsPasswordSet() bool {
+ return len(u.Passwd) != 0
+}
+
+// IsOrganization returns true if user is actually a organization.
+func (u *User) IsOrganization() bool {
+ return u.Type == UserTypeOrganization
+}
+
+// IsIndividual returns true if user is actually a individual user.
+func (u *User) IsIndividual() bool {
+ return u.Type == UserTypeIndividual
+}
+
+func (u *User) IsUser() bool {
+ return u.Type == UserTypeIndividual || u.Type == UserTypeBot
+}
+
+// IsBot returns whether or not the user is of type bot
+func (u *User) IsBot() bool {
+ return u.Type == UserTypeBot
+}
+
+func (u *User) IsRemote() bool {
+ return u.Type == UserTypeRemoteUser
+}
+
+// DisplayName returns full name if it's not empty,
+// returns username otherwise.
+func (u *User) DisplayName() string {
+ trimmed := strings.TrimSpace(u.FullName)
+ if len(trimmed) > 0 {
+ return trimmed
+ }
+ return u.Name
+}
+
+var emailToReplacer = strings.NewReplacer(
+ "\n", "",
+ "\r", "",
+ "<", "",
+ ">", "",
+ ",", "",
+ ":", "",
+ ";", "",
+)
+
+// EmailTo returns a string suitable to be put into a e-mail `To:` header.
+func (u *User) EmailTo(overrideMail ...string) string {
+ sanitizedDisplayName := emailToReplacer.Replace(u.DisplayName())
+
+ email := u.Email
+ if len(overrideMail) > 0 {
+ email = overrideMail[0]
+ }
+
+ // should be an edge case but nice to have
+ if sanitizedDisplayName == email {
+ return email
+ }
+
+ address, err := mail.ParseAddress(fmt.Sprintf("%s <%s>", sanitizedDisplayName, email))
+ if err != nil {
+ return email
+ }
+
+ return address.String()
+}
+
+// GetDisplayName returns full name if it's not empty and DEFAULT_SHOW_FULL_NAME is set,
+// returns username otherwise.
+func (u *User) GetDisplayName() string {
+ if setting.UI.DefaultShowFullName {
+ trimmed := strings.TrimSpace(u.FullName)
+ if len(trimmed) > 0 {
+ return trimmed
+ }
+ }
+ return u.Name
+}
+
+// GetCompleteName returns the full name and username in the form of
+// "Full Name (username)" if full name is not empty, otherwise it returns
+// "username".
+func (u *User) GetCompleteName() string {
+ trimmedFullName := strings.TrimSpace(u.FullName)
+ if len(trimmedFullName) > 0 {
+ return fmt.Sprintf("%s (%s)", trimmedFullName, u.Name)
+ }
+ return u.Name
+}
+
+func gitSafeName(name string) string {
+ return strings.TrimSpace(strings.NewReplacer("\n", "", "<", "", ">", "").Replace(name))
+}
+
+// GitName returns a git safe name
+func (u *User) GitName() string {
+ gitName := gitSafeName(u.FullName)
+ if len(gitName) > 0 {
+ return gitName
+ }
+ // Although u.Name should be safe if created in our system
+ // LDAP users may have bad names
+ gitName = gitSafeName(u.Name)
+ if len(gitName) > 0 {
+ return gitName
+ }
+ // Totally pathological name so it's got to be:
+ return fmt.Sprintf("user-%d", u.ID)
+}
+
+// ShortName ellipses username to length
+func (u *User) ShortName(length int) string {
+ if setting.UI.DefaultShowFullName && len(u.FullName) > 0 {
+ return base.EllipsisString(u.FullName, length)
+ }
+ return base.EllipsisString(u.Name, length)
+}
+
+// IsMailable checks if a user is eligible
+// to receive emails.
+func (u *User) IsMailable() bool {
+ return u.IsActive
+}
+
+// IsUserExist checks if given user name exist,
+// the user name should be noncased unique.
+// If uid is presented, then check will rule out that one,
+// it is used when update a user name in settings page.
+func IsUserExist(ctx context.Context, uid int64, name string) (bool, error) {
+ if len(name) == 0 {
+ return false, nil
+ }
+ return db.GetEngine(ctx).
+ Where("id!=?", uid).
+ Get(&User{LowerName: strings.ToLower(name)})
+}
+
+// Note: As of the beginning of 2022, it is recommended to use at least
+// 64 bits of salt, but NIST is already recommending to use to 128 bits.
+// (16 bytes = 16 * 8 = 128 bits)
+const SaltByteLength = 16
+
+// GetUserSalt returns a random user salt token.
+func GetUserSalt() (string, error) {
+ rBytes, err := util.CryptoRandomBytes(SaltByteLength)
+ if err != nil {
+ return "", err
+ }
+ // Returns a 32 bytes long string.
+ return hex.EncodeToString(rBytes), nil
+}
+
+// Note: The set of characters here can safely expand without a breaking change,
+// but characters removed from this set can cause user account linking to break
+var (
+ customCharsReplacement = strings.NewReplacer("Æ", "AE")
+ removeCharsRE = regexp.MustCompile(`['´\x60]`)
+ removeDiacriticsTransform = transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
+ replaceCharsHyphenRE = regexp.MustCompile(`[\s~+]`)
+)
+
+// normalizeUserName returns a string with single-quotes and diacritics
+// removed, and any other non-supported username characters replaced with
+// a `-` character
+func NormalizeUserName(s string) (string, error) {
+ strDiacriticsRemoved, n, err := transform.String(removeDiacriticsTransform, customCharsReplacement.Replace(s))
+ if err != nil {
+ return "", fmt.Errorf("Failed to normalize character `%v` in provided username `%v`", s[n], s)
+ }
+ return replaceCharsHyphenRE.ReplaceAllLiteralString(removeCharsRE.ReplaceAllLiteralString(strDiacriticsRemoved, ""), "-"), nil
+}
+
+var (
+ reservedUsernames = []string{
+ ".",
+ "..",
+ ".well-known",
+ "admin",
+ "api",
+ "assets",
+ "attachments",
+ "avatar",
+ "avatars",
+ "captcha",
+ "commits",
+ "debug",
+ "devtest",
+ "error",
+ "explore",
+ "favicon.ico",
+ "ghost",
+ "issues",
+ "login",
+ "manifest.json",
+ "metrics",
+ "milestones",
+ "new",
+ "notifications",
+ "org",
+ "pulls",
+ "raw",
+ "repo",
+ "repo-avatars",
+ "robots.txt",
+ "search",
+ "serviceworker.js",
+ "ssh_info",
+ "swagger.v1.json",
+ "user",
+ "v2",
+ "gitea-actions",
+ "forgejo-actions",
+ }
+
+ // DON'T ADD ANY NEW STUFF, WE SOLVE THIS WITH `/user/{obj}` PATHS!
+ reservedUserPatterns = []string{"*.keys", "*.gpg", "*.rss", "*.atom", "*.png"}
+)
+
+// IsUsableUsername returns an error when a username is reserved
+func IsUsableUsername(name string) error {
+ // Validate username make sure it satisfies requirement.
+ if !validation.IsValidUsername(name) {
+ // Note: usually this error is normally caught up earlier in the UI
+ return db.ErrNameCharsNotAllowed{Name: name}
+ }
+ return db.IsUsableName(reservedUsernames, reservedUserPatterns, name)
+}
+
+// CreateUserOverwriteOptions are an optional options who overwrite system defaults on user creation
+type CreateUserOverwriteOptions struct {
+ KeepEmailPrivate optional.Option[bool]
+ Visibility *structs.VisibleType
+ AllowCreateOrganization optional.Option[bool]
+ EmailNotificationsPreference *string
+ MaxRepoCreation *int
+ Theme *string
+ IsRestricted optional.Option[bool]
+ IsActive optional.Option[bool]
+}
+
+// CreateUser creates record of a new user.
+func CreateUser(ctx context.Context, u *User, overwriteDefault ...*CreateUserOverwriteOptions) (err error) {
+ return createUser(ctx, u, false, overwriteDefault...)
+}
+
+// AdminCreateUser is used by admins to manually create users
+func AdminCreateUser(ctx context.Context, u *User, overwriteDefault ...*CreateUserOverwriteOptions) (err error) {
+ return createUser(ctx, u, true, overwriteDefault...)
+}
+
+// createUser creates record of a new user.
+func createUser(ctx context.Context, u *User, createdByAdmin bool, overwriteDefault ...*CreateUserOverwriteOptions) (err error) {
+ if err = IsUsableUsername(u.Name); err != nil {
+ return err
+ }
+
+ // set system defaults
+ u.KeepEmailPrivate = setting.Service.DefaultKeepEmailPrivate
+ u.Visibility = setting.Service.DefaultUserVisibilityMode
+ u.AllowCreateOrganization = setting.Service.DefaultAllowCreateOrganization && !setting.Admin.DisableRegularOrgCreation
+ u.EmailNotificationsPreference = setting.Admin.DefaultEmailNotification
+ u.MaxRepoCreation = -1
+ u.Theme = setting.UI.DefaultTheme
+ u.IsRestricted = setting.Service.DefaultUserIsRestricted
+ u.IsActive = !(setting.Service.RegisterEmailConfirm || setting.Service.RegisterManualConfirm)
+
+ // Ensure consistency of the dates.
+ if u.UpdatedUnix < u.CreatedUnix {
+ u.UpdatedUnix = u.CreatedUnix
+ }
+
+ // overwrite defaults if set
+ if len(overwriteDefault) != 0 && overwriteDefault[0] != nil {
+ overwrite := overwriteDefault[0]
+ if overwrite.KeepEmailPrivate.Has() {
+ u.KeepEmailPrivate = overwrite.KeepEmailPrivate.Value()
+ }
+ if overwrite.Visibility != nil {
+ u.Visibility = *overwrite.Visibility
+ }
+ if overwrite.AllowCreateOrganization.Has() {
+ u.AllowCreateOrganization = overwrite.AllowCreateOrganization.Value()
+ }
+ if overwrite.EmailNotificationsPreference != nil {
+ u.EmailNotificationsPreference = *overwrite.EmailNotificationsPreference
+ }
+ if overwrite.MaxRepoCreation != nil {
+ u.MaxRepoCreation = *overwrite.MaxRepoCreation
+ }
+ if overwrite.Theme != nil {
+ u.Theme = *overwrite.Theme
+ }
+ if overwrite.IsRestricted.Has() {
+ u.IsRestricted = overwrite.IsRestricted.Value()
+ }
+ if overwrite.IsActive.Has() {
+ u.IsActive = overwrite.IsActive.Value()
+ }
+ }
+
+ // validate data
+ if err := ValidateUser(u); err != nil {
+ return err
+ }
+
+ if createdByAdmin {
+ if err := ValidateEmailForAdmin(u.Email); err != nil {
+ return err
+ }
+ } else {
+ if err := ValidateEmail(u.Email); err != nil {
+ return err
+ }
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ isExist, err := IsUserExist(ctx, 0, u.Name)
+ if err != nil {
+ return err
+ } else if isExist {
+ return ErrUserAlreadyExist{u.Name}
+ }
+
+ isExist, err = IsEmailUsed(ctx, u.Email)
+ if err != nil {
+ return err
+ } else if isExist {
+ return ErrEmailAlreadyUsed{
+ Email: u.Email,
+ }
+ }
+
+ // prepare for database
+
+ u.LowerName = strings.ToLower(u.Name)
+ u.AvatarEmail = u.Email
+ if u.Rands, err = GetUserSalt(); err != nil {
+ return err
+ }
+ if u.Passwd != "" {
+ if err = u.SetPassword(u.Passwd); err != nil {
+ return err
+ }
+ } else {
+ u.Salt = ""
+ u.PasswdHashAlgo = ""
+ }
+
+ // save changes to database
+
+ if err = DeleteUserRedirect(ctx, u.Name); err != nil {
+ return err
+ }
+
+ if u.CreatedUnix == 0 {
+ // Caller expects auto-time for creation & update timestamps.
+ err = db.Insert(ctx, u)
+ } else {
+ // Caller sets the timestamps themselves. They are responsible for ensuring
+ // both `CreatedUnix` and `UpdatedUnix` are set appropriately.
+ _, err = db.GetEngine(ctx).NoAutoTime().Insert(u)
+ }
+ if err != nil {
+ return err
+ }
+
+ // insert email address
+ if err := db.Insert(ctx, &EmailAddress{
+ UID: u.ID,
+ Email: u.Email,
+ LowerEmail: strings.ToLower(u.Email),
+ IsActivated: u.IsActive,
+ IsPrimary: true,
+ }); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// IsLastAdminUser check whether user is the last admin
+func IsLastAdminUser(ctx context.Context, user *User) bool {
+ if user.IsAdmin && CountUsers(ctx, &CountUserFilter{IsAdmin: optional.Some(true)}) <= 1 {
+ return true
+ }
+ return false
+}
+
+// CountUserFilter represent optional filters for CountUsers
+type CountUserFilter struct {
+ LastLoginSince *int64
+ IsAdmin optional.Option[bool]
+}
+
+// CountUsers returns number of users.
+func CountUsers(ctx context.Context, opts *CountUserFilter) int64 {
+ return countUsers(ctx, opts)
+}
+
+func countUsers(ctx context.Context, opts *CountUserFilter) int64 {
+ sess := db.GetEngine(ctx)
+ cond := builder.NewCond()
+ cond = cond.And(builder.Eq{"type": UserTypeIndividual})
+
+ if opts != nil {
+ if opts.LastLoginSince != nil {
+ cond = cond.And(builder.Gte{"last_login_unix": *opts.LastLoginSince})
+ }
+
+ if opts.IsAdmin.Has() {
+ cond = cond.And(builder.Eq{"is_admin": opts.IsAdmin.Value()})
+ }
+ }
+
+ count, err := sess.Where(cond).Count(new(User))
+ if err != nil {
+ log.Error("user.countUsers: %v", err)
+ }
+
+ return count
+}
+
+// VerifyUserActiveCode verifies that the code is valid for the given purpose for this user.
+// If delete is specified, the token will be deleted.
+func VerifyUserAuthorizationToken(ctx context.Context, code string, purpose auth.AuthorizationPurpose, delete bool) (*User, error) {
+ lookupKey, validator, found := strings.Cut(code, ":")
+ if !found {
+ return nil, nil
+ }
+
+ authToken, err := auth.FindAuthToken(ctx, lookupKey, purpose)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ if authToken.IsExpired() {
+ return nil, auth.DeleteAuthToken(ctx, authToken)
+ }
+
+ rawValidator, err := hex.DecodeString(validator)
+ if err != nil {
+ return nil, err
+ }
+
+ if subtle.ConstantTimeCompare([]byte(authToken.HashedValidator), []byte(auth.HashValidator(rawValidator))) == 0 {
+ return nil, errors.New("validator doesn't match")
+ }
+
+ u, err := GetUserByID(ctx, authToken.UID)
+ if err != nil {
+ if IsErrUserNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ if delete {
+ if err := auth.DeleteAuthToken(ctx, authToken); err != nil {
+ return nil, err
+ }
+ }
+
+ return u, nil
+}
+
+// ValidateUser check if user is valid to insert / update into database
+func ValidateUser(u *User, cols ...string) error {
+ if len(cols) == 0 || util.SliceContainsString(cols, "visibility", true) {
+ if !setting.Service.AllowedUserVisibilityModesSlice.IsAllowedVisibility(u.Visibility) && !u.IsOrganization() {
+ return fmt.Errorf("visibility Mode not allowed: %s", u.Visibility.String())
+ }
+ }
+
+ return nil
+}
+
+func (u User) Validate() []string {
+ var result []string
+ if err := ValidateUser(&u); err != nil {
+ result = append(result, err.Error())
+ }
+ if err := ValidateEmail(u.Email); err != nil {
+ result = append(result, err.Error())
+ }
+ return result
+}
+
+// UpdateUserCols update user according special columns
+func UpdateUserCols(ctx context.Context, u *User, cols ...string) error {
+ if err := ValidateUser(u, cols...); err != nil {
+ return err
+ }
+
+ _, err := db.GetEngine(ctx).ID(u.ID).Cols(cols...).Update(u)
+ return err
+}
+
+// GetInactiveUsers gets all inactive users
+func GetInactiveUsers(ctx context.Context, olderThan time.Duration) ([]*User, error) {
+ cond := builder.And(
+ builder.Eq{"is_active": false},
+ builder.Or( // only plain user
+ builder.Eq{"`type`": UserTypeIndividual},
+ builder.Eq{"`type`": UserTypeUserReserved},
+ ),
+ )
+
+ if olderThan > 0 {
+ cond = cond.And(builder.Lt{"created_unix": time.Now().Add(-olderThan).Unix()})
+ }
+
+ users := make([]*User, 0, 10)
+ return users, db.GetEngine(ctx).
+ Where(cond).
+ Find(&users)
+}
+
+// UserPath returns the path absolute path of user repositories.
+func UserPath(userName string) string { //revive:disable-line:exported
+ return filepath.Join(setting.RepoRootPath, strings.ToLower(userName))
+}
+
+// GetUserByID returns the user object by given ID if exists.
+func GetUserByID(ctx context.Context, id int64) (*User, error) {
+ u := new(User)
+ has, err := db.GetEngine(ctx).ID(id).Get(u)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrUserNotExist{UID: id}
+ }
+ return u, nil
+}
+
+// GetUserByIDs returns the user objects by given IDs if exists.
+func GetUserByIDs(ctx context.Context, ids []int64) ([]*User, error) {
+ if len(ids) == 0 {
+ return nil, nil
+ }
+
+ users := make([]*User, 0, len(ids))
+ err := db.GetEngine(ctx).In("id", ids).
+ Table("user").
+ Find(&users)
+ return users, err
+}
+
+func IsValidUserID(id int64) bool {
+ return id > 0 || id == GhostUserID || id == ActionsUserID
+}
+
+func GetUserFromMap(id int64, idMap map[int64]*User) (int64, *User) {
+ if user, ok := idMap[id]; ok {
+ return id, user
+ }
+ if id == ActionsUserID {
+ return ActionsUserID, NewActionsUser()
+ }
+ return GhostUserID, NewGhostUser()
+}
+
+// GetPossibleUserByID returns the user if id > 0 or return system usrs if id < 0
+func GetPossibleUserByID(ctx context.Context, id int64) (*User, error) {
+ switch id {
+ case GhostUserID:
+ return NewGhostUser(), nil
+ case ActionsUserID:
+ return NewActionsUser(), nil
+ case 0:
+ return nil, ErrUserNotExist{}
+ default:
+ return GetUserByID(ctx, id)
+ }
+}
+
+// GetPossibleUserByIDs returns the users if id > 0 or return system users if id < 0
+func GetPossibleUserByIDs(ctx context.Context, ids []int64) ([]*User, error) {
+ uniqueIDs := container.SetOf(ids...)
+ users := make([]*User, 0, len(ids))
+ _ = uniqueIDs.Remove(0)
+ if uniqueIDs.Remove(GhostUserID) {
+ users = append(users, NewGhostUser())
+ }
+ if uniqueIDs.Remove(ActionsUserID) {
+ users = append(users, NewActionsUser())
+ }
+ res, err := GetUserByIDs(ctx, uniqueIDs.Values())
+ if err != nil {
+ return nil, err
+ }
+ users = append(users, res...)
+ return users, nil
+}
+
+// GetUserByNameCtx returns user by given name.
+func GetUserByName(ctx context.Context, name string) (*User, error) {
+ if len(name) == 0 {
+ return nil, ErrUserNotExist{Name: name}
+ }
+ // adding Type: UserTypeIndividual is a noop because it is zero and discarded
+ u := &User{LowerName: strings.ToLower(name)}
+ has, err := db.GetEngine(ctx).Get(u)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrUserNotExist{Name: name}
+ }
+ return u, nil
+}
+
+// GetUserEmailsByNames returns a list of e-mails corresponds to names of users
+// that have their email notifications set to enabled or onmention.
+func GetUserEmailsByNames(ctx context.Context, names []string) []string {
+ mails := make([]string, 0, len(names))
+ for _, name := range names {
+ u, err := GetUserByName(ctx, name)
+ if err != nil {
+ continue
+ }
+ if u.IsMailable() && u.EmailNotificationsPreference != EmailNotificationsDisabled {
+ mails = append(mails, u.Email)
+ }
+ }
+ return mails
+}
+
+// GetMaileableUsersByIDs gets users from ids, but only if they can receive mails
+func GetMaileableUsersByIDs(ctx context.Context, ids []int64, isMention bool) ([]*User, error) {
+ if len(ids) == 0 {
+ return nil, nil
+ }
+ ous := make([]*User, 0, len(ids))
+
+ if isMention {
+ return ous, db.GetEngine(ctx).
+ In("id", ids).
+ Where("`type` = ?", UserTypeIndividual).
+ And("`prohibit_login` = ?", false).
+ And("`is_active` = ?", true).
+ In("`email_notifications_preference`", EmailNotificationsEnabled, EmailNotificationsOnMention, EmailNotificationsAndYourOwn).
+ Find(&ous)
+ }
+
+ return ous, db.GetEngine(ctx).
+ In("id", ids).
+ Where("`type` = ?", UserTypeIndividual).
+ And("`prohibit_login` = ?", false).
+ And("`is_active` = ?", true).
+ In("`email_notifications_preference`", EmailNotificationsEnabled, EmailNotificationsAndYourOwn).
+ Find(&ous)
+}
+
+// GetUserNamesByIDs returns usernames for all resolved users from a list of Ids.
+func GetUserNamesByIDs(ctx context.Context, ids []int64) ([]string, error) {
+ unames := make([]string, 0, len(ids))
+ err := db.GetEngine(ctx).In("id", ids).
+ Table("user").
+ Asc("name").
+ Cols("name").
+ Find(&unames)
+ return unames, err
+}
+
+// GetUserNameByID returns username for the id
+func GetUserNameByID(ctx context.Context, id int64) (string, error) {
+ var name string
+ has, err := db.GetEngine(ctx).Table("user").Where("id = ?", id).Cols("name").Get(&name)
+ if err != nil {
+ return "", err
+ }
+ if has {
+ return name, nil
+ }
+ return "", nil
+}
+
+// GetUserIDsByNames returns a slice of ids corresponds to names.
+func GetUserIDsByNames(ctx context.Context, names []string, ignoreNonExistent bool) ([]int64, error) {
+ ids := make([]int64, 0, len(names))
+ for _, name := range names {
+ u, err := GetUserByName(ctx, name)
+ if err != nil {
+ if ignoreNonExistent {
+ continue
+ }
+ return nil, err
+ }
+ ids = append(ids, u.ID)
+ }
+ return ids, nil
+}
+
+// GetUsersBySource returns a list of Users for a login source
+func GetUsersBySource(ctx context.Context, s *auth.Source) ([]*User, error) {
+ var users []*User
+ err := db.GetEngine(ctx).Where("login_type = ? AND login_source = ?", s.Type, s.ID).Find(&users)
+ return users, err
+}
+
+// UserCommit represents a commit with validation of user.
+type UserCommit struct { //revive:disable-line:exported
+ User *User
+ *git.Commit
+}
+
+// ValidateCommitWithEmail check if author's e-mail of commit is corresponding to a user.
+func ValidateCommitWithEmail(ctx context.Context, c *git.Commit) *User {
+ if c.Author == nil {
+ return nil
+ }
+ u, err := GetUserByEmail(ctx, c.Author.Email)
+ if err != nil {
+ return nil
+ }
+ return u
+}
+
+// ValidateCommitsWithEmails checks if authors' e-mails of commits are corresponding to users.
+func ValidateCommitsWithEmails(ctx context.Context, oldCommits []*git.Commit) []*UserCommit {
+ var (
+ emails = make(map[string]*User)
+ newCommits = make([]*UserCommit, 0, len(oldCommits))
+ )
+ for _, c := range oldCommits {
+ var u *User
+ if c.Author != nil {
+ if v, ok := emails[c.Author.Email]; !ok {
+ u, _ = GetUserByEmail(ctx, c.Author.Email)
+ emails[c.Author.Email] = u
+ } else {
+ u = v
+ }
+ }
+
+ newCommits = append(newCommits, &UserCommit{
+ User: u,
+ Commit: c,
+ })
+ }
+ return newCommits
+}
+
+// GetUserByEmail returns the user object by given e-mail if exists.
+func GetUserByEmail(ctx context.Context, email string) (*User, error) {
+ if len(email) == 0 {
+ return nil, ErrUserNotExist{Name: email}
+ }
+
+ email = strings.ToLower(email)
+ // Otherwise, check in alternative list for activated email addresses
+ emailAddress := &EmailAddress{LowerEmail: email, IsActivated: true}
+ has, err := db.GetEngine(ctx).Get(emailAddress)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return GetUserByID(ctx, emailAddress.UID)
+ }
+
+ // Finally, if email address is the protected email address:
+ if strings.HasSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) {
+ username := strings.TrimSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress))
+ user := &User{}
+ has, err := db.GetEngine(ctx).Where("lower_name=?", username).Get(user)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return user, nil
+ }
+ }
+
+ return nil, ErrUserNotExist{Name: email}
+}
+
+// GetUser checks if a user already exists
+func GetUser(ctx context.Context, user *User) (bool, error) {
+ return db.GetEngine(ctx).Get(user)
+}
+
+// GetUserByOpenID returns the user object by given OpenID if exists.
+func GetUserByOpenID(ctx context.Context, uri string) (*User, error) {
+ if len(uri) == 0 {
+ return nil, ErrUserNotExist{Name: uri}
+ }
+
+ uri, err := openid.Normalize(uri)
+ if err != nil {
+ return nil, err
+ }
+
+ log.Trace("Normalized OpenID URI: " + uri)
+
+ // Otherwise, check in openid table
+ oid := &UserOpenID{}
+ has, err := db.GetEngine(ctx).Where("uri=?", uri).Get(oid)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return GetUserByID(ctx, oid.UID)
+ }
+
+ return nil, ErrUserNotExist{Name: uri}
+}
+
+// GetAdminUser returns the first administrator
+func GetAdminUser(ctx context.Context) (*User, error) {
+ var admin User
+ has, err := db.GetEngine(ctx).
+ Where("is_admin=?", true).
+ Asc("id"). // Reliably get the admin with the lowest ID.
+ Get(&admin)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrUserNotExist{}
+ }
+
+ return &admin, nil
+}
+
+func isUserVisibleToViewerCond(viewer *User) builder.Cond {
+ if viewer != nil && viewer.IsAdmin {
+ return builder.NewCond()
+ }
+
+ if viewer == nil || viewer.IsRestricted {
+ return builder.Eq{
+ "`user`.visibility": structs.VisibleTypePublic,
+ }
+ }
+
+ return builder.Neq{
+ "`user`.visibility": structs.VisibleTypePrivate,
+ }.Or(
+ // viewer self
+ builder.Eq{"`user`.id": viewer.ID},
+ // viewer's following
+ builder.In("`user`.id",
+ builder.
+ Select("`follow`.user_id").
+ From("follow").
+ Where(builder.Eq{"`follow`.follow_id": viewer.ID})),
+ // viewer's org user
+ builder.In("`user`.id",
+ builder.
+ Select("`team_user`.uid").
+ From("team_user").
+ Join("INNER", "`team_user` AS t2", "`team_user`.org_id = `t2`.org_id").
+ Where(builder.Eq{"`t2`.uid": viewer.ID})),
+ // viewer's org
+ builder.In("`user`.id",
+ builder.
+ Select("`team_user`.org_id").
+ From("team_user").
+ Where(builder.Eq{"`team_user`.uid": viewer.ID})))
+}
+
+// IsUserVisibleToViewer check if viewer is able to see user profile
+func IsUserVisibleToViewer(ctx context.Context, u, viewer *User) bool {
+ if viewer != nil && (viewer.IsAdmin || viewer.ID == u.ID) {
+ return true
+ }
+
+ switch u.Visibility {
+ case structs.VisibleTypePublic:
+ return true
+ case structs.VisibleTypeLimited:
+ if viewer == nil || viewer.IsRestricted {
+ return false
+ }
+ return true
+ case structs.VisibleTypePrivate:
+ if viewer == nil || viewer.IsRestricted {
+ return false
+ }
+
+ // If they follow - they see each over
+ follower := IsFollowing(ctx, u.ID, viewer.ID)
+ if follower {
+ return true
+ }
+
+ // Now we need to check if they in some organization together
+ count, err := db.GetEngine(ctx).Table("team_user").
+ Where(
+ builder.And(
+ builder.Eq{"uid": viewer.ID},
+ builder.Or(
+ builder.Eq{"org_id": u.ID},
+ builder.In("org_id",
+ builder.Select("org_id").
+ From("team_user", "t2").
+ Where(builder.Eq{"uid": u.ID}))))).
+ Count()
+ if err != nil {
+ return false
+ }
+
+ if count == 0 {
+ // No common organization
+ return false
+ }
+
+ // they are in an organization together
+ return true
+ }
+ return false
+}
+
+// CountWrongUserType count OrgUser who have wrong type
+func CountWrongUserType(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.Eq{"type": 0}.And(builder.Neq{"num_teams": 0})).Count(new(User))
+}
+
+// FixWrongUserType fix OrgUser who have wrong type
+func FixWrongUserType(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.Eq{"type": 0}.And(builder.Neq{"num_teams": 0})).Cols("type").NoAutoTime().Update(&User{Type: 1})
+}
+
+func GetOrderByName() string {
+ if setting.UI.DefaultShowFullName {
+ return "full_name, name"
+ }
+ return "name"
+}
+
+// IsFeatureDisabledWithLoginType checks if a user feature is disabled, taking into account the login type of the
+// user if applicable
+func IsFeatureDisabledWithLoginType(user *User, feature string) bool {
+ // NOTE: in the long run it may be better to check the ExternalLoginUser table rather than user.LoginType
+ return (user != nil && user.LoginType > auth.Plain && setting.Admin.ExternalUserDisableFeatures.Contains(feature)) ||
+ setting.Admin.UserDisabledFeatures.Contains(feature)
+}
+
+// DisabledFeaturesWithLoginType returns the set of user features disabled, taking into account the login type
+// of the user if applicable
+func DisabledFeaturesWithLoginType(user *User) *container.Set[string] {
+ // NOTE: in the long run it may be better to check the ExternalLoginUser table rather than user.LoginType
+ if user != nil && user.LoginType > auth.Plain {
+ return &setting.Admin.ExternalUserDisableFeatures
+ }
+ return &setting.Admin.UserDisabledFeatures
+}
diff --git a/models/user/user_repository.go b/models/user/user_repository.go
new file mode 100644
index 0000000..c06441b
--- /dev/null
+++ b/models/user/user_repository.go
@@ -0,0 +1,83 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func init() {
+ db.RegisterModel(new(FederatedUser))
+}
+
+func CreateFederatedUser(ctx context.Context, user *User, federatedUser *FederatedUser) error {
+ if res, err := validation.IsValid(user); !res {
+ return err
+ }
+ overwrite := CreateUserOverwriteOptions{
+ IsActive: optional.Some(false),
+ IsRestricted: optional.Some(false),
+ }
+
+ // Begin transaction
+ ctx, committer, err := db.TxContext((ctx))
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := CreateUser(ctx, user, &overwrite); err != nil {
+ return err
+ }
+
+ federatedUser.UserID = user.ID
+ if res, err := validation.IsValid(federatedUser); !res {
+ return err
+ }
+
+ _, err = db.GetEngine(ctx).Insert(federatedUser)
+ if err != nil {
+ return err
+ }
+
+ // Commit transaction
+ return committer.Commit()
+}
+
+func FindFederatedUser(ctx context.Context, externalID string,
+ federationHostID int64,
+) (*User, *FederatedUser, error) {
+ federatedUser := new(FederatedUser)
+ user := new(User)
+ has, err := db.GetEngine(ctx).Where("external_id=? and federation_host_id=?", externalID, federationHostID).Get(federatedUser)
+ if err != nil {
+ return nil, nil, err
+ } else if !has {
+ return nil, nil, nil
+ }
+ has, err = db.GetEngine(ctx).ID(federatedUser.UserID).Get(user)
+ if err != nil {
+ return nil, nil, err
+ } else if !has {
+ return nil, nil, fmt.Errorf("User %v for federated user is missing", federatedUser.UserID)
+ }
+
+ if res, err := validation.IsValid(*user); !res {
+ return nil, nil, err
+ }
+ if res, err := validation.IsValid(*federatedUser); !res {
+ return nil, nil, err
+ }
+ return user, federatedUser, nil
+}
+
+func DeleteFederatedUser(ctx context.Context, userID int64) error {
+ _, err := db.GetEngine(ctx).Delete(&FederatedUser{UserID: userID})
+ return err
+}
diff --git a/models/user/user_system.go b/models/user/user_system.go
new file mode 100644
index 0000000..ba9a213
--- /dev/null
+++ b/models/user/user_system.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "net/url"
+ "strings"
+
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+)
+
+const (
+ GhostUserID = -1
+ GhostUserName = "Ghost"
+ GhostUserLowerName = "ghost"
+)
+
+// NewGhostUser creates and returns a fake user for someone has deleted their account.
+func NewGhostUser() *User {
+ return &User{
+ ID: GhostUserID,
+ Name: GhostUserName,
+ LowerName: GhostUserLowerName,
+ }
+}
+
+// IsGhost check if user is fake user for a deleted account
+func (u *User) IsGhost() bool {
+ if u == nil {
+ return false
+ }
+ return u.ID == GhostUserID && u.Name == GhostUserName
+}
+
+// NewReplaceUser creates and returns a fake user for external user
+func NewReplaceUser(name string) *User {
+ return &User{
+ ID: 0,
+ Name: name,
+ LowerName: strings.ToLower(name),
+ }
+}
+
+const (
+ ActionsUserID = -2
+ ActionsUserName = "forgejo-actions"
+ ActionsFullName = "Forgejo Actions"
+ ActionsEmail = "noreply@forgejo.org"
+)
+
+// NewActionsUser creates and returns a fake user for running the actions.
+func NewActionsUser() *User {
+ return &User{
+ ID: ActionsUserID,
+ Name: ActionsUserName,
+ LowerName: ActionsUserName,
+ IsActive: true,
+ FullName: ActionsFullName,
+ Email: ActionsEmail,
+ KeepEmailPrivate: true,
+ LoginName: ActionsUserName,
+ Type: UserTypeIndividual,
+ AllowCreateOrganization: true,
+ Visibility: structs.VisibleTypePublic,
+ }
+}
+
+func (u *User) IsActions() bool {
+ return u != nil && u.ID == ActionsUserID
+}
+
+const (
+ APActorUserID = -3
+ APActorUserName = "actor"
+ APActorEmail = "noreply@forgejo.org"
+)
+
+func NewAPActorUser() *User {
+ return &User{
+ ID: APActorUserID,
+ Name: APActorUserName,
+ LowerName: APActorUserName,
+ IsActive: true,
+ Email: APActorEmail,
+ KeepEmailPrivate: true,
+ LoginName: APActorUserName,
+ Type: UserTypeIndividual,
+ Visibility: structs.VisibleTypePublic,
+ }
+}
+
+func APActorUserAPActorID() string {
+ path, _ := url.JoinPath(setting.AppURL, "/api/v1/activitypub/actor")
+ return path
+}
diff --git a/models/user/user_test.go b/models/user/user_test.go
new file mode 100644
index 0000000..f0b7e16
--- /dev/null
+++ b/models/user/user_test.go
@@ -0,0 +1,781 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user_test
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/auth/password/hash"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/test"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/tests"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOAuth2Application_LoadUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ app := unittest.AssertExistsAndLoadBean(t, &auth.OAuth2Application{ID: 1})
+ user, err := user_model.GetUserByID(db.DefaultContext, app.UID)
+ require.NoError(t, err)
+ assert.NotNil(t, user)
+}
+
+func TestIsValidUserID(t *testing.T) {
+ assert.False(t, user_model.IsValidUserID(-30))
+ assert.False(t, user_model.IsValidUserID(0))
+ assert.True(t, user_model.IsValidUserID(user_model.GhostUserID))
+ assert.True(t, user_model.IsValidUserID(user_model.ActionsUserID))
+ assert.True(t, user_model.IsValidUserID(200))
+}
+
+func TestGetUserFromMap(t *testing.T) {
+ id := int64(200)
+ idMap := map[int64]*user_model.User{
+ id: {ID: id},
+ }
+
+ ghostID := int64(user_model.GhostUserID)
+ actionsID := int64(user_model.ActionsUserID)
+ actualID, actualUser := user_model.GetUserFromMap(-20, idMap)
+ assert.Equal(t, ghostID, actualID)
+ assert.Equal(t, ghostID, actualUser.ID)
+
+ actualID, actualUser = user_model.GetUserFromMap(0, idMap)
+ assert.Equal(t, ghostID, actualID)
+ assert.Equal(t, ghostID, actualUser.ID)
+
+ actualID, actualUser = user_model.GetUserFromMap(ghostID, idMap)
+ assert.Equal(t, ghostID, actualID)
+ assert.Equal(t, ghostID, actualUser.ID)
+
+ actualID, actualUser = user_model.GetUserFromMap(actionsID, idMap)
+ assert.Equal(t, actionsID, actualID)
+ assert.Equal(t, actionsID, actualUser.ID)
+}
+
+func TestGetUserByName(t *testing.T) {
+ defer tests.AddFixtures("models/user/fixtures/")()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ {
+ _, err := user_model.GetUserByName(db.DefaultContext, "")
+ assert.True(t, user_model.IsErrUserNotExist(err), err)
+ }
+ {
+ _, err := user_model.GetUserByName(db.DefaultContext, "UNKNOWN")
+ assert.True(t, user_model.IsErrUserNotExist(err), err)
+ }
+ {
+ user, err := user_model.GetUserByName(db.DefaultContext, "USER2")
+ require.NoError(t, err)
+ assert.Equal(t, "user2", user.Name)
+ }
+ {
+ user, err := user_model.GetUserByName(db.DefaultContext, "org3")
+ require.NoError(t, err)
+ assert.Equal(t, "org3", user.Name)
+ }
+ {
+ user, err := user_model.GetUserByName(db.DefaultContext, "remote01")
+ require.NoError(t, err)
+ assert.Equal(t, "remote01", user.Name)
+ }
+}
+
+func TestGetUserEmailsByNames(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // ignore none active user email
+ assert.ElementsMatch(t, []string{"user8@example.com"}, user_model.GetUserEmailsByNames(db.DefaultContext, []string{"user8", "user9"}))
+ assert.ElementsMatch(t, []string{"user8@example.com", "user5@example.com"}, user_model.GetUserEmailsByNames(db.DefaultContext, []string{"user8", "user5"}))
+
+ assert.ElementsMatch(t, []string{"user8@example.com"}, user_model.GetUserEmailsByNames(db.DefaultContext, []string{"user8", "org7"}))
+}
+
+func TestCanCreateOrganization(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ admin := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ assert.True(t, admin.CanCreateOrganization())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ assert.True(t, user.CanCreateOrganization())
+ // Disable user create organization permission.
+ user.AllowCreateOrganization = false
+ assert.False(t, user.CanCreateOrganization())
+
+ setting.Admin.DisableRegularOrgCreation = true
+ user.AllowCreateOrganization = true
+ assert.True(t, admin.CanCreateOrganization())
+ assert.False(t, user.CanCreateOrganization())
+}
+
+func TestGetAllUsers(t *testing.T) {
+ defer tests.AddFixtures("models/user/fixtures/")()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ users, err := user_model.GetAllUsers(db.DefaultContext)
+ require.NoError(t, err)
+
+ found := make(map[user_model.UserType]bool, 0)
+ for _, user := range users {
+ found[user.Type] = true
+ }
+ assert.True(t, found[user_model.UserTypeIndividual], users)
+ assert.True(t, found[user_model.UserTypeRemoteUser], users)
+ assert.False(t, found[user_model.UserTypeOrganization], users)
+}
+
+func TestAPActorID(t *testing.T) {
+ user := user_model.User{ID: 1}
+ url := user.APActorID()
+ expected := "https://try.gitea.io/api/v1/activitypub/user-id/1"
+ if url != expected {
+ t.Errorf("unexpected APActorID, expected: %q, actual: %q", expected, url)
+ }
+}
+
+func TestSearchUsers(t *testing.T) {
+ defer tests.AddFixtures("models/user/fixtures/")()
+ require.NoError(t, unittest.PrepareTestDatabase())
+ testSuccess := func(opts *user_model.SearchUserOptions, expectedUserOrOrgIDs []int64) {
+ users, _, err := user_model.SearchUsers(db.DefaultContext, opts)
+ require.NoError(t, err)
+ cassText := fmt.Sprintf("ids: %v, opts: %v", expectedUserOrOrgIDs, opts)
+ if assert.Len(t, users, len(expectedUserOrOrgIDs), "case: %s", cassText) {
+ for i, expectedID := range expectedUserOrOrgIDs {
+ assert.EqualValues(t, expectedID, users[i].ID, "case: %s", cassText)
+ }
+ }
+ }
+
+ // test orgs
+ testOrgSuccess := func(opts *user_model.SearchUserOptions, expectedOrgIDs []int64) {
+ opts.Type = user_model.UserTypeOrganization
+ testSuccess(opts, expectedOrgIDs)
+ }
+
+ testOrgSuccess(&user_model.SearchUserOptions{OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 1, PageSize: 2}},
+ []int64{3, 6})
+
+ testOrgSuccess(&user_model.SearchUserOptions{OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 2, PageSize: 2}},
+ []int64{7, 17})
+
+ testOrgSuccess(&user_model.SearchUserOptions{OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 3, PageSize: 2}},
+ []int64{19, 25})
+
+ testOrgSuccess(&user_model.SearchUserOptions{OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 4, PageSize: 2}},
+ []int64{26, 41})
+
+ testOrgSuccess(&user_model.SearchUserOptions{ListOptions: db.ListOptions{Page: 5, PageSize: 2}},
+ []int64{})
+
+ // test users
+ testUserSuccess := func(opts *user_model.SearchUserOptions, expectedUserIDs []int64) {
+ opts.Type = user_model.UserTypeIndividual
+ testSuccess(opts, expectedUserIDs)
+ }
+
+ testUserSuccess(&user_model.SearchUserOptions{OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 1}},
+ []int64{1, 2, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 21, 24, 27, 28, 29, 30, 32, 34, 37, 38, 39, 40, 1041})
+
+ testUserSuccess(&user_model.SearchUserOptions{ListOptions: db.ListOptions{Page: 1}, IsActive: optional.Some(false)},
+ []int64{9})
+
+ testUserSuccess(&user_model.SearchUserOptions{OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 1}, IsActive: optional.Some(true)},
+ []int64{1, 2, 4, 5, 8, 10, 11, 12, 13, 14, 15, 16, 18, 20, 21, 24, 27, 28, 29, 30, 32, 34, 37, 38, 39, 40, 1041})
+
+ testUserSuccess(&user_model.SearchUserOptions{Keyword: "user1", OrderBy: "id ASC", ListOptions: db.ListOptions{Page: 1}, IsActive: optional.Some(true)},
+ []int64{1, 10, 11, 12, 13, 14, 15, 16, 18})
+
+ // order by name asc default
+ testUserSuccess(&user_model.SearchUserOptions{Keyword: "user1", ListOptions: db.ListOptions{Page: 1}, IsActive: optional.Some(true)},
+ []int64{1, 10, 11, 12, 13, 14, 15, 16, 18})
+
+ testUserSuccess(&user_model.SearchUserOptions{ListOptions: db.ListOptions{Page: 1}, IsAdmin: optional.Some(true)},
+ []int64{1})
+
+ testUserSuccess(&user_model.SearchUserOptions{ListOptions: db.ListOptions{Page: 1}, IsRestricted: optional.Some(true)},
+ []int64{29})
+
+ testUserSuccess(&user_model.SearchUserOptions{ListOptions: db.ListOptions{Page: 1}, IsProhibitLogin: optional.Some(true)},
+ []int64{1041, 37})
+
+ testUserSuccess(&user_model.SearchUserOptions{ListOptions: db.ListOptions{Page: 1}, IsTwoFactorEnabled: optional.Some(true)},
+ []int64{24})
+}
+
+func TestEmailNotificationPreferences(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ for _, test := range []struct {
+ expected string
+ userID int64
+ }{
+ {user_model.EmailNotificationsEnabled, 1},
+ {user_model.EmailNotificationsEnabled, 2},
+ {user_model.EmailNotificationsOnMention, 3},
+ {user_model.EmailNotificationsOnMention, 4},
+ {user_model.EmailNotificationsEnabled, 5},
+ {user_model.EmailNotificationsEnabled, 6},
+ {user_model.EmailNotificationsDisabled, 7},
+ {user_model.EmailNotificationsEnabled, 8},
+ {user_model.EmailNotificationsOnMention, 9},
+ } {
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: test.userID})
+ assert.Equal(t, test.expected, user.EmailNotificationsPreference)
+ }
+}
+
+func TestHashPasswordDeterministic(t *testing.T) {
+ b := make([]byte, 16)
+ u := &user_model.User{}
+ algos := hash.RecommendedHashAlgorithms
+ for j := 0; j < len(algos); j++ {
+ u.PasswdHashAlgo = algos[j]
+ for i := 0; i < 50; i++ {
+ // generate a random password
+ rand.Read(b)
+ pass := string(b)
+
+ // save the current password in the user - hash it and store the result
+ u.SetPassword(pass)
+ r1 := u.Passwd
+
+ // run again
+ u.SetPassword(pass)
+ r2 := u.Passwd
+
+ assert.NotEqual(t, r1, r2)
+ assert.True(t, u.ValidatePassword(pass))
+ }
+ }
+}
+
+func BenchmarkHashPassword(b *testing.B) {
+ // BenchmarkHashPassword ensures that it takes a reasonable amount of time
+ // to hash a password - in order to protect from brute-force attacks.
+ pass := "password1337"
+ u := &user_model.User{Passwd: pass}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ u.SetPassword(pass)
+ }
+}
+
+func TestNewGitSig(t *testing.T) {
+ users := make([]*user_model.User, 0, 20)
+ err := db.GetEngine(db.DefaultContext).Find(&users)
+ require.NoError(t, err)
+
+ for _, user := range users {
+ sig := user.NewGitSig()
+ assert.NotContains(t, sig.Name, "<")
+ assert.NotContains(t, sig.Name, ">")
+ assert.NotContains(t, sig.Name, "\n")
+ assert.NotEmpty(t, strings.TrimSpace(sig.Name))
+ }
+}
+
+func TestDisplayName(t *testing.T) {
+ users := make([]*user_model.User, 0, 20)
+ err := db.GetEngine(db.DefaultContext).Find(&users)
+ require.NoError(t, err)
+
+ for _, user := range users {
+ displayName := user.DisplayName()
+ assert.Equal(t, strings.TrimSpace(displayName), displayName)
+ if len(strings.TrimSpace(user.FullName)) == 0 {
+ assert.Equal(t, user.Name, displayName)
+ }
+ assert.NotEmpty(t, strings.TrimSpace(displayName))
+ }
+}
+
+func TestCreateUserInvalidEmail(t *testing.T) {
+ user := &user_model.User{
+ Name: "GiteaBot",
+ Email: "GiteaBot@gitea.io\r\n",
+ Passwd: ";p['////..-++']",
+ IsAdmin: false,
+ Theme: setting.UI.DefaultTheme,
+ MustChangePassword: false,
+ }
+
+ err := user_model.CreateUser(db.DefaultContext, user)
+ require.Error(t, err)
+ assert.True(t, user_model.IsErrEmailCharIsNotSupported(err))
+}
+
+func TestCreateUserEmailAlreadyUsed(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ // add new user with user2's email
+ user.Name = "testuser"
+ user.LowerName = strings.ToLower(user.Name)
+ user.ID = 0
+ err := user_model.CreateUser(db.DefaultContext, user)
+ require.Error(t, err)
+ assert.True(t, user_model.IsErrEmailAlreadyUsed(err))
+}
+
+func TestCreateUserCustomTimestamps(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ // Add new user with a custom creation timestamp.
+ var creationTimestamp timeutil.TimeStamp = 12345
+ user.Name = "testuser"
+ user.LowerName = strings.ToLower(user.Name)
+ user.ID = 0
+ user.Email = "unique@example.com"
+ user.CreatedUnix = creationTimestamp
+ err := user_model.CreateUser(db.DefaultContext, user)
+ require.NoError(t, err)
+
+ fetched, err := user_model.GetUserByID(context.Background(), user.ID)
+ require.NoError(t, err)
+ assert.Equal(t, creationTimestamp, fetched.CreatedUnix)
+ assert.Equal(t, creationTimestamp, fetched.UpdatedUnix)
+}
+
+func TestCreateUserWithoutCustomTimestamps(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ // There is no way to use a mocked time for the XORM auto-time functionality,
+ // so use the real clock to approximate the expected timestamp.
+ timestampStart := time.Now().Unix()
+
+ // Add new user without a custom creation timestamp.
+ user.Name = "Testuser"
+ user.LowerName = strings.ToLower(user.Name)
+ user.ID = 0
+ user.Email = "unique@example.com"
+ user.CreatedUnix = 0
+ user.UpdatedUnix = 0
+ err := user_model.CreateUser(db.DefaultContext, user)
+ require.NoError(t, err)
+
+ timestampEnd := time.Now().Unix()
+
+ fetched, err := user_model.GetUserByID(context.Background(), user.ID)
+ require.NoError(t, err)
+
+ assert.LessOrEqual(t, timestampStart, fetched.CreatedUnix)
+ assert.LessOrEqual(t, fetched.CreatedUnix, timestampEnd)
+
+ assert.LessOrEqual(t, timestampStart, fetched.UpdatedUnix)
+ assert.LessOrEqual(t, fetched.UpdatedUnix, timestampEnd)
+}
+
+func TestGetUserIDsByNames(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // ignore non existing
+ IDs, err := user_model.GetUserIDsByNames(db.DefaultContext, []string{"user1", "user2", "none_existing_user"}, true)
+ require.NoError(t, err)
+ assert.Equal(t, []int64{1, 2}, IDs)
+
+ // ignore non existing
+ IDs, err = user_model.GetUserIDsByNames(db.DefaultContext, []string{"user1", "do_not_exist"}, false)
+ require.Error(t, err)
+ assert.Equal(t, []int64(nil), IDs)
+}
+
+func TestGetMaileableUsersByIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ results, err := user_model.GetMaileableUsersByIDs(db.DefaultContext, []int64{1, 4}, false)
+ require.NoError(t, err)
+ assert.Len(t, results, 1)
+ if len(results) > 1 {
+ assert.Equal(t, 1, results[0].ID)
+ }
+
+ results, err = user_model.GetMaileableUsersByIDs(db.DefaultContext, []int64{1, 4}, true)
+ require.NoError(t, err)
+ assert.Len(t, results, 2)
+ if len(results) > 2 {
+ assert.Equal(t, 1, results[0].ID)
+ assert.Equal(t, 4, results[1].ID)
+ }
+}
+
+func TestNewUserRedirect(t *testing.T) {
+ // redirect to a completely new name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ require.NoError(t, user_model.NewUserRedirect(db.DefaultContext, user.ID, user.Name, "newusername"))
+
+ unittest.AssertExistsAndLoadBean(t, &user_model.Redirect{
+ LowerName: user.LowerName,
+ RedirectUserID: user.ID,
+ })
+ unittest.AssertExistsAndLoadBean(t, &user_model.Redirect{
+ LowerName: "olduser1",
+ RedirectUserID: user.ID,
+ })
+}
+
+func TestNewUserRedirect2(t *testing.T) {
+ // redirect to previously used name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ require.NoError(t, user_model.NewUserRedirect(db.DefaultContext, user.ID, user.Name, "olduser1"))
+
+ unittest.AssertExistsAndLoadBean(t, &user_model.Redirect{
+ LowerName: user.LowerName,
+ RedirectUserID: user.ID,
+ })
+ unittest.AssertNotExistsBean(t, &user_model.Redirect{
+ LowerName: "olduser1",
+ RedirectUserID: user.ID,
+ })
+}
+
+func TestNewUserRedirect3(t *testing.T) {
+ // redirect for a previously-unredirected user
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ require.NoError(t, user_model.NewUserRedirect(db.DefaultContext, user.ID, user.Name, "newusername"))
+
+ unittest.AssertExistsAndLoadBean(t, &user_model.Redirect{
+ LowerName: user.LowerName,
+ RedirectUserID: user.ID,
+ })
+}
+
+func TestGetUserByOpenID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ _, err := user_model.GetUserByOpenID(db.DefaultContext, "https://unknown")
+ if assert.Error(t, err) {
+ assert.True(t, user_model.IsErrUserNotExist(err))
+ }
+
+ user, err := user_model.GetUserByOpenID(db.DefaultContext, "https://user1.domain1.tld")
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(1), user.ID)
+
+ user, err = user_model.GetUserByOpenID(db.DefaultContext, "https://domain1.tld/user2/")
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), user.ID)
+}
+
+func TestFollowUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(followerID, followedID int64) {
+ require.NoError(t, user_model.FollowUser(db.DefaultContext, followerID, followedID))
+ unittest.AssertExistsAndLoadBean(t, &user_model.Follow{UserID: followerID, FollowID: followedID})
+ }
+ testSuccess(4, 2)
+ testSuccess(5, 2)
+
+ require.NoError(t, user_model.FollowUser(db.DefaultContext, 2, 2))
+
+ // Blocked user.
+ require.ErrorIs(t, user_model.ErrBlockedByUser, user_model.FollowUser(db.DefaultContext, 1, 4))
+ require.ErrorIs(t, user_model.ErrBlockedByUser, user_model.FollowUser(db.DefaultContext, 4, 1))
+ unittest.AssertNotExistsBean(t, &user_model.Follow{UserID: 1, FollowID: 4})
+ unittest.AssertNotExistsBean(t, &user_model.Follow{UserID: 4, FollowID: 1})
+
+ unittest.CheckConsistencyFor(t, &user_model.User{})
+}
+
+func TestUnfollowUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testSuccess := func(followerID, followedID int64) {
+ require.NoError(t, user_model.UnfollowUser(db.DefaultContext, followerID, followedID))
+ unittest.AssertNotExistsBean(t, &user_model.Follow{UserID: followerID, FollowID: followedID})
+ }
+ testSuccess(4, 2)
+ testSuccess(5, 2)
+ testSuccess(2, 2)
+
+ unittest.CheckConsistencyFor(t, &user_model.User{})
+}
+
+func TestIsUserVisibleToViewer(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}) // admin, public
+ user4 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4}) // normal, public
+ user20 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 20}) // public, same team as user31
+ user29 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 29}) // public, is restricted
+ user31 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 31}) // private, same team as user20
+ user33 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 33}) // limited, follows 31
+
+ test := func(u, viewer *user_model.User, expected bool) {
+ name := func(u *user_model.User) string {
+ if u == nil {
+ return "<nil>"
+ }
+ return u.Name
+ }
+ assert.Equal(t, expected, user_model.IsUserVisibleToViewer(db.DefaultContext, u, viewer), "user %v should be visible to viewer %v: %v", name(u), name(viewer), expected)
+ }
+
+ // admin viewer
+ test(user1, user1, true)
+ test(user20, user1, true)
+ test(user31, user1, true)
+ test(user33, user1, true)
+
+ // non admin viewer
+ test(user4, user4, true)
+ test(user20, user4, true)
+ test(user31, user4, false)
+ test(user33, user4, true)
+ test(user4, nil, true)
+
+ // public user
+ test(user4, user20, true)
+ test(user4, user31, true)
+ test(user4, user33, true)
+
+ // limited user
+ test(user33, user33, true)
+ test(user33, user4, true)
+ test(user33, user29, false)
+ test(user33, nil, false)
+
+ // private user
+ test(user31, user31, true)
+ test(user31, user4, false)
+ test(user31, user20, true)
+ test(user31, user29, false)
+ test(user31, user33, true)
+ test(user31, nil, false)
+}
+
+func TestGetAllAdmins(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ admins, err := user_model.GetAllAdmins(db.DefaultContext)
+ require.NoError(t, err)
+
+ assert.Len(t, admins, 1)
+ assert.Equal(t, int64(1), admins[0].ID)
+}
+
+func Test_ValidateUser(t *testing.T) {
+ oldSetting := setting.Service.AllowedUserVisibilityModesSlice
+ defer func() {
+ setting.Service.AllowedUserVisibilityModesSlice = oldSetting
+ }()
+ setting.Service.AllowedUserVisibilityModesSlice = []bool{true, false, true}
+ kases := map[*user_model.User]bool{
+ {ID: 1, Visibility: structs.VisibleTypePublic}: true,
+ {ID: 2, Visibility: structs.VisibleTypeLimited}: false,
+ {ID: 2, Visibility: structs.VisibleTypePrivate}: true,
+ }
+ for kase, expected := range kases {
+ assert.EqualValues(t, expected, nil == user_model.ValidateUser(kase))
+ }
+}
+
+func Test_NormalizeUserFromEmail(t *testing.T) {
+ oldSetting := setting.Service.AllowDotsInUsernames
+ defer func() {
+ setting.Service.AllowDotsInUsernames = oldSetting
+ }()
+ setting.Service.AllowDotsInUsernames = true
+ testCases := []struct {
+ Input string
+ Expected string
+ IsNormalizedValid bool
+ }{
+ {"test", "test", true},
+ {"Sinéad.O'Connor", "Sinead.OConnor", true},
+ {"Æsir", "AEsir", true},
+ // \u00e9\u0065\u0301
+ {"éé", "ee", true},
+ {"Awareness Hub", "Awareness-Hub", true},
+ {"double__underscore", "double__underscore", false}, // We should consider squashing double non-alpha characters
+ {".bad.", ".bad.", false},
+ {"new😀user", "new😀user", false}, // No plans to support
+ }
+ for _, testCase := range testCases {
+ normalizedName, err := user_model.NormalizeUserName(testCase.Input)
+ require.NoError(t, err)
+ assert.EqualValues(t, testCase.Expected, normalizedName)
+ if testCase.IsNormalizedValid {
+ require.NoError(t, user_model.IsUsableUsername(normalizedName))
+ } else {
+ require.Error(t, user_model.IsUsableUsername(normalizedName))
+ }
+ }
+}
+
+func TestEmailTo(t *testing.T) {
+ testCases := []struct {
+ fullName string
+ mail string
+ result string
+ }{
+ {"Awareness Hub", "awareness@hub.net", `"Awareness Hub" <awareness@hub.net>`},
+ {"name@example.com", "name@example.com", "name@example.com"},
+ {"Hi Its <Mee>", "ee@mail.box", `"Hi Its Mee" <ee@mail.box>`},
+ {"Sinéad.O'Connor", "sinead.oconnor@gmail.com", "=?utf-8?b?U2luw6lhZC5PJ0Nvbm5vcg==?= <sinead.oconnor@gmail.com>"},
+ {"Æsir", "aesir@gmx.de", "=?utf-8?q?=C3=86sir?= <aesir@gmx.de>"},
+ {"new😀user", "new.user@alo.com", "=?utf-8?q?new=F0=9F=98=80user?= <new.user@alo.com>"}, // codespell-ignore
+ {`"quoted"`, "quoted@test.com", `"quoted" <quoted@test.com>`},
+ {`gusted`, "gusted@test.com", `"gusted" <gusted@test.com>`},
+ {`Joe Q. Public`, "john.q.public@example.com", `"Joe Q. Public" <john.q.public@example.com>`},
+ {`Who?`, "one@y.test", `"Who?" <one@y.test>`},
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.result, func(t *testing.T) {
+ testUser := &user_model.User{FullName: testCase.fullName, Email: testCase.mail}
+ assert.EqualValues(t, testCase.result, testUser.EmailTo())
+ })
+ }
+
+ t.Run("Override user's email", func(t *testing.T) {
+ testUser := &user_model.User{FullName: "Christine Jorgensen", Email: "christine@test.com"}
+ assert.EqualValues(t, `"Christine Jorgensen" <christine@example.org>`, testUser.EmailTo("christine@example.org"))
+ })
+}
+
+func TestDisabledUserFeatures(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testValues := container.SetOf(setting.UserFeatureDeletion,
+ setting.UserFeatureManageSSHKeys,
+ setting.UserFeatureManageGPGKeys)
+
+ oldSetting := setting.Admin.ExternalUserDisableFeatures
+ defer func() {
+ setting.Admin.ExternalUserDisableFeatures = oldSetting
+ }()
+ setting.Admin.ExternalUserDisableFeatures = testValues
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ assert.Empty(t, setting.Admin.UserDisabledFeatures.Values())
+
+ // no features should be disabled with a plain login type
+ assert.LessOrEqual(t, user.LoginType, auth.Plain)
+ assert.Empty(t, user_model.DisabledFeaturesWithLoginType(user).Values())
+ for _, f := range testValues.Values() {
+ assert.False(t, user_model.IsFeatureDisabledWithLoginType(user, f))
+ }
+
+ // check disabled features with external login type
+ user.LoginType = auth.OAuth2
+
+ // all features should be disabled
+ assert.NotEmpty(t, user_model.DisabledFeaturesWithLoginType(user).Values())
+ for _, f := range testValues.Values() {
+ assert.True(t, user_model.IsFeatureDisabledWithLoginType(user, f))
+ }
+}
+
+func TestGenerateEmailAuthorizationCode(t *testing.T) {
+ defer test.MockVariableValue(&setting.Service.ActiveCodeLives, 2)()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ code, err := user.GenerateEmailAuthorizationCode(db.DefaultContext, auth.UserActivation)
+ require.NoError(t, err)
+
+ lookupKey, validator, ok := strings.Cut(code, ":")
+ assert.True(t, ok)
+
+ rawValidator, err := hex.DecodeString(validator)
+ require.NoError(t, err)
+
+ authToken, err := auth.FindAuthToken(db.DefaultContext, lookupKey, auth.UserActivation)
+ require.NoError(t, err)
+ assert.False(t, authToken.IsExpired())
+ assert.EqualValues(t, authToken.HashedValidator, auth.HashValidator(rawValidator))
+
+ authToken.Expiry = authToken.Expiry.Add(-int64(setting.Service.ActiveCodeLives) * 60)
+ assert.True(t, authToken.IsExpired())
+}
+
+func TestVerifyUserAuthorizationToken(t *testing.T) {
+ defer test.MockVariableValue(&setting.Service.ActiveCodeLives, 2)()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+
+ code, err := user.GenerateEmailAuthorizationCode(db.DefaultContext, auth.UserActivation)
+ require.NoError(t, err)
+
+ lookupKey, _, ok := strings.Cut(code, ":")
+ assert.True(t, ok)
+
+ t.Run("Wrong purpose", func(t *testing.T) {
+ u, err := user_model.VerifyUserAuthorizationToken(db.DefaultContext, code, auth.PasswordReset, false)
+ require.NoError(t, err)
+ assert.Nil(t, u)
+ })
+
+ t.Run("No delete", func(t *testing.T) {
+ u, err := user_model.VerifyUserAuthorizationToken(db.DefaultContext, code, auth.UserActivation, false)
+ require.NoError(t, err)
+ assert.EqualValues(t, user.ID, u.ID)
+
+ authToken, err := auth.FindAuthToken(db.DefaultContext, lookupKey, auth.UserActivation)
+ require.NoError(t, err)
+ assert.NotNil(t, authToken)
+ })
+
+ t.Run("Delete", func(t *testing.T) {
+ u, err := user_model.VerifyUserAuthorizationToken(db.DefaultContext, code, auth.UserActivation, true)
+ require.NoError(t, err)
+ assert.EqualValues(t, user.ID, u.ID)
+
+ authToken, err := auth.FindAuthToken(db.DefaultContext, lookupKey, auth.UserActivation)
+ require.ErrorIs(t, err, util.ErrNotExist)
+ assert.Nil(t, authToken)
+ })
+}
+
+func TestGetInactiveUsers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // all inactive users
+ // user1's createdunix is 1730468968
+ users, err := user_model.GetInactiveUsers(db.DefaultContext, 0)
+ require.NoError(t, err)
+ assert.Len(t, users, 1)
+ interval := time.Now().Unix() - 1730468968 + 3600*24
+ users, err = user_model.GetInactiveUsers(db.DefaultContext, time.Duration(interval*int64(time.Second)))
+ require.NoError(t, err)
+ require.Empty(t, users)
+}
diff --git a/models/user/user_update.go b/models/user/user_update.go
new file mode 100644
index 0000000..66702e2
--- /dev/null
+++ b/models/user/user_update.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+func IncrUserRepoNum(ctx context.Context, userID int64) error {
+ _, err := db.GetEngine(ctx).Incr("num_repos").ID(userID).Update(new(User))
+ return err
+}
diff --git a/models/webhook/hooktask.go b/models/webhook/hooktask.go
new file mode 100644
index 0000000..8734feb
--- /dev/null
+++ b/models/webhook/hooktask.go
@@ -0,0 +1,262 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package webhook
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ gouuid "github.com/google/uuid"
+ "xorm.io/builder"
+)
+
+// ___ ___ __ ___________ __
+// / | \ ____ ____ | | _\__ ___/____ _____| | __
+// / ~ \/ _ \ / _ \| |/ / | | \__ \ / ___/ |/ /
+// \ Y ( <_> | <_> ) < | | / __ \_\___ \| <
+// \___|_ / \____/ \____/|__|_ \ |____| (____ /____ >__|_ \
+// \/ \/ \/ \/ \/
+
+// HookRequest represents hook task request information.
+type HookRequest struct {
+ URL string `json:"url"`
+ HTTPMethod string `json:"http_method"`
+ Headers map[string]string `json:"headers"`
+ Body string `json:"body"`
+}
+
+// HookResponse represents hook task response information.
+type HookResponse struct {
+ Status int `json:"status"`
+ Headers map[string]string `json:"headers"`
+ Body string `json:"body"`
+}
+
+// HookTask represents a hook task.
+type HookTask struct {
+ ID int64 `xorm:"pk autoincr"`
+ HookID int64 `xorm:"index"`
+ UUID string `xorm:"unique"`
+ PayloadContent string `xorm:"LONGTEXT"`
+ // PayloadVersion number to allow for smooth version upgrades:
+ // - PayloadVersion 1: PayloadContent contains the JSON as sent to the URL
+ // - PayloadVersion 2: PayloadContent contains the original event
+ PayloadVersion int `xorm:"DEFAULT 1"`
+
+ EventType webhook_module.HookEventType
+ IsDelivered bool
+ Delivered timeutil.TimeStampNano
+
+ // History info.
+ IsSucceed bool
+ RequestContent string `xorm:"LONGTEXT"`
+ RequestInfo *HookRequest `xorm:"-"`
+ ResponseContent string `xorm:"LONGTEXT"`
+ ResponseInfo *HookResponse `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(HookTask))
+}
+
+// BeforeUpdate will be invoked by XORM before updating a record
+// representing this object
+func (t *HookTask) BeforeUpdate() {
+ if t.RequestInfo != nil {
+ t.RequestContent = t.simpleMarshalJSON(t.RequestInfo)
+ }
+ if t.ResponseInfo != nil {
+ t.ResponseContent = t.simpleMarshalJSON(t.ResponseInfo)
+ }
+}
+
+// AfterLoad updates the webhook object upon setting a column
+func (t *HookTask) AfterLoad() {
+ if len(t.RequestContent) == 0 {
+ return
+ }
+
+ t.RequestInfo = &HookRequest{}
+ if err := json.Unmarshal([]byte(t.RequestContent), t.RequestInfo); err != nil {
+ log.Error("Unmarshal RequestContent[%d]: %v", t.ID, err)
+ }
+
+ if len(t.ResponseContent) > 0 {
+ t.ResponseInfo = &HookResponse{}
+ if err := json.Unmarshal([]byte(t.ResponseContent), t.ResponseInfo); err != nil {
+ log.Error("Unmarshal ResponseContent[%d]: %v", t.ID, err)
+ }
+ }
+}
+
+func (t *HookTask) simpleMarshalJSON(v any) string {
+ p, err := json.Marshal(v)
+ if err != nil {
+ log.Error("Marshal [%d]: %v", t.ID, err)
+ }
+ return string(p)
+}
+
+// HookTasks returns a list of hook tasks by given conditions, order by ID desc.
+func HookTasks(ctx context.Context, hookID int64, page int) ([]*HookTask, error) {
+ tasks := make([]*HookTask, 0, setting.Webhook.PagingNum)
+ return tasks, db.GetEngine(ctx).
+ Limit(setting.Webhook.PagingNum, (page-1)*setting.Webhook.PagingNum).
+ Where("hook_id=?", hookID).
+ Desc("id").
+ Find(&tasks)
+}
+
+// CreateHookTask creates a new hook task,
+// it handles conversion from Payload to PayloadContent.
+func CreateHookTask(ctx context.Context, t *HookTask) (*HookTask, error) {
+ t.UUID = gouuid.New().String()
+ if t.Delivered == 0 {
+ t.Delivered = timeutil.TimeStampNanoNow()
+ }
+ if t.PayloadVersion == 0 {
+ return nil, errors.New("missing HookTask.PayloadVersion")
+ }
+ return t, db.Insert(ctx, t)
+}
+
+func GetHookTaskByID(ctx context.Context, id int64) (*HookTask, error) {
+ t := &HookTask{}
+
+ has, err := db.GetEngine(ctx).ID(id).Get(t)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrHookTaskNotExist{
+ TaskID: id,
+ }
+ }
+ return t, nil
+}
+
+// UpdateHookTask updates information of hook task.
+func UpdateHookTask(ctx context.Context, t *HookTask) error {
+ _, err := db.GetEngine(ctx).ID(t.ID).AllCols().Update(t)
+ return err
+}
+
+// ReplayHookTask copies a hook task to get re-delivered
+func ReplayHookTask(ctx context.Context, hookID int64, uuid string) (*HookTask, error) {
+ task, exist, err := db.Get[HookTask](ctx, builder.Eq{"hook_id": hookID, "uuid": uuid})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, ErrHookTaskNotExist{
+ HookID: hookID,
+ UUID: uuid,
+ }
+ }
+
+ return CreateHookTask(ctx, &HookTask{
+ HookID: task.HookID,
+ PayloadContent: task.PayloadContent,
+ EventType: task.EventType,
+ PayloadVersion: task.PayloadVersion,
+ })
+}
+
+// FindUndeliveredHookTaskIDs will find the next 100 undelivered hook tasks with ID greater than the provided lowerID
+func FindUndeliveredHookTaskIDs(ctx context.Context, lowerID int64) ([]int64, error) {
+ const batchSize = 100
+
+ tasks := make([]int64, 0, batchSize)
+ return tasks, db.GetEngine(ctx).
+ Select("id").
+ Table(new(HookTask)).
+ Where("is_delivered=?", false).
+ And("id > ?", lowerID).
+ Asc("id").
+ Limit(batchSize).
+ Find(&tasks)
+}
+
+func MarkTaskDelivered(ctx context.Context, task *HookTask) (bool, error) {
+ count, err := db.GetEngine(ctx).ID(task.ID).Where("is_delivered = ?", false).Cols("is_delivered").Update(&HookTask{
+ ID: task.ID,
+ IsDelivered: true,
+ })
+
+ return count != 0, err
+}
+
+// CleanupHookTaskTable deletes rows from hook_task as needed.
+func CleanupHookTaskTable(ctx context.Context, cleanupType HookTaskCleanupType, olderThan time.Duration, numberToKeep int) error {
+ log.Trace("Doing: CleanupHookTaskTable")
+
+ if cleanupType == OlderThan {
+ deleteOlderThan := time.Now().Add(-olderThan).UnixNano()
+ deletes, err := db.GetEngine(ctx).
+ Where("is_delivered = ? and delivered < ?", true, deleteOlderThan).
+ Delete(new(HookTask))
+ if err != nil {
+ return err
+ }
+ log.Trace("Deleted %d rows from hook_task", deletes)
+ } else if cleanupType == PerWebhook {
+ hookIDs := make([]int64, 0, 10)
+ err := db.GetEngine(ctx).
+ Table("webhook").
+ Where("id > 0").
+ Cols("id").
+ Find(&hookIDs)
+ if err != nil {
+ return err
+ }
+ for _, hookID := range hookIDs {
+ select {
+ case <-ctx.Done():
+ return db.ErrCancelledf("Before deleting hook_task records for hook id %d", hookID)
+ default:
+ }
+ if err = deleteDeliveredHookTasksByWebhook(ctx, hookID, numberToKeep); err != nil {
+ return err
+ }
+ }
+ }
+ log.Trace("Finished: CleanupHookTaskTable")
+ return nil
+}
+
+func deleteDeliveredHookTasksByWebhook(ctx context.Context, hookID int64, numberDeliveriesToKeep int) error {
+ log.Trace("Deleting hook_task rows for webhook %d, keeping the most recent %d deliveries", hookID, numberDeliveriesToKeep)
+ deliveryDates := make([]int64, 0, 10)
+ err := db.GetEngine(ctx).Table("hook_task").
+ Where("hook_task.hook_id = ? AND hook_task.is_delivered = ? AND hook_task.delivered is not null", hookID, true).
+ Cols("hook_task.delivered").
+ Join("INNER", "webhook", "hook_task.hook_id = webhook.id").
+ OrderBy("hook_task.delivered desc").
+ Limit(1, numberDeliveriesToKeep).
+ Find(&deliveryDates)
+ if err != nil {
+ return err
+ }
+
+ if len(deliveryDates) > 0 {
+ deletes, err := db.GetEngine(ctx).
+ Where("hook_id = ? and is_delivered = ? and delivered <= ?", hookID, true, deliveryDates[0]).
+ Delete(new(HookTask))
+ if err != nil {
+ return err
+ }
+ log.Trace("Deleted %d hook_task rows for webhook %d", deletes, hookID)
+ } else {
+ log.Trace("No hook_task rows to delete for webhook %d", hookID)
+ }
+
+ return nil
+}
diff --git a/models/webhook/main_test.go b/models/webhook/main_test.go
new file mode 100644
index 0000000..f19465d
--- /dev/null
+++ b/models/webhook/main_test.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package webhook
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m, &unittest.TestOptions{
+ FixtureFiles: []string{
+ "webhook.yml",
+ "hook_task.yml",
+ },
+ })
+}
diff --git a/models/webhook/webhook.go b/models/webhook/webhook.go
new file mode 100644
index 0000000..f3370f3
--- /dev/null
+++ b/models/webhook/webhook.go
@@ -0,0 +1,516 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package webhook
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/secret"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "xorm.io/builder"
+)
+
+// ErrWebhookNotExist represents a "WebhookNotExist" kind of error.
+type ErrWebhookNotExist struct {
+ ID int64
+}
+
+// IsErrWebhookNotExist checks if an error is a ErrWebhookNotExist.
+func IsErrWebhookNotExist(err error) bool {
+ _, ok := err.(ErrWebhookNotExist)
+ return ok
+}
+
+func (err ErrWebhookNotExist) Error() string {
+ return fmt.Sprintf("webhook does not exist [id: %d]", err.ID)
+}
+
+func (err ErrWebhookNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrHookTaskNotExist represents a "HookTaskNotExist" kind of error.
+type ErrHookTaskNotExist struct {
+ TaskID int64
+ HookID int64
+ UUID string
+}
+
+// IsErrHookTaskNotExist checks if an error is a ErrHookTaskNotExist.
+func IsErrHookTaskNotExist(err error) bool {
+ _, ok := err.(ErrHookTaskNotExist)
+ return ok
+}
+
+func (err ErrHookTaskNotExist) Error() string {
+ return fmt.Sprintf("hook task does not exist [task: %d, hook: %d, uuid: %s]", err.TaskID, err.HookID, err.UUID)
+}
+
+func (err ErrHookTaskNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// HookContentType is the content type of a web hook
+type HookContentType int
+
+const (
+ // ContentTypeJSON is a JSON payload for web hooks
+ ContentTypeJSON HookContentType = iota + 1
+ // ContentTypeForm is an url-encoded form payload for web hook
+ ContentTypeForm
+)
+
+var hookContentTypes = map[string]HookContentType{
+ "json": ContentTypeJSON,
+ "form": ContentTypeForm,
+}
+
+// ToHookContentType returns HookContentType by given name.
+func ToHookContentType(name string) HookContentType {
+ return hookContentTypes[name]
+}
+
+// HookTaskCleanupType is the type of cleanup to perform on hook_task
+type HookTaskCleanupType int
+
+const (
+ // OlderThan hook_task rows will be cleaned up by the age of the row
+ OlderThan HookTaskCleanupType = iota
+ // PerWebhook hook_task rows will be cleaned up by leaving the most recent deliveries for each webhook
+ PerWebhook
+)
+
+var hookTaskCleanupTypes = map[string]HookTaskCleanupType{
+ "OlderThan": OlderThan,
+ "PerWebhook": PerWebhook,
+}
+
+// ToHookTaskCleanupType returns HookTaskCleanupType by given name.
+func ToHookTaskCleanupType(name string) HookTaskCleanupType {
+ return hookTaskCleanupTypes[name]
+}
+
+// Name returns the name of a given web hook's content type
+func (t HookContentType) Name() string {
+ switch t {
+ case ContentTypeJSON:
+ return "json"
+ case ContentTypeForm:
+ return "form"
+ }
+ return ""
+}
+
+// IsValidHookContentType returns true if given name is a valid hook content type.
+func IsValidHookContentType(name string) bool {
+ _, ok := hookContentTypes[name]
+ return ok
+}
+
+// Webhook represents a web hook object.
+type Webhook struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"` // An ID of 0 indicates either a default or system webhook
+ OwnerID int64 `xorm:"INDEX"`
+ IsSystemWebhook bool
+ URL string `xorm:"url TEXT"`
+ HTTPMethod string `xorm:"http_method"`
+ ContentType HookContentType
+ Secret string `xorm:"TEXT"`
+ Events string `xorm:"TEXT"`
+ *webhook_module.HookEvent `xorm:"-"`
+ IsActive bool `xorm:"INDEX"`
+ Type webhook_module.HookType `xorm:"VARCHAR(16) 'type'"`
+ Meta string `xorm:"TEXT"` // store hook-specific attributes
+ LastStatus webhook_module.HookStatus // Last delivery status
+
+ // HeaderAuthorizationEncrypted should be accessed using HeaderAuthorization() and SetHeaderAuthorization()
+ HeaderAuthorizationEncrypted string `xorm:"TEXT"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(Webhook))
+}
+
+// AfterLoad updates the webhook object upon setting a column
+func (w *Webhook) AfterLoad() {
+ w.HookEvent = &webhook_module.HookEvent{}
+ if err := json.Unmarshal([]byte(w.Events), w.HookEvent); err != nil {
+ log.Error("Unmarshal[%d]: %v", w.ID, err)
+ }
+}
+
+// History returns history of webhook by given conditions.
+func (w *Webhook) History(ctx context.Context, page int) ([]*HookTask, error) {
+ return HookTasks(ctx, w.ID, page)
+}
+
+// UpdateEvent handles conversion from HookEvent to Events.
+func (w *Webhook) UpdateEvent() error {
+ data, err := json.Marshal(w.HookEvent)
+ w.Events = string(data)
+ return err
+}
+
+// HasCreateEvent returns true if hook enabled create event.
+func (w *Webhook) HasCreateEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Create)
+}
+
+// HasDeleteEvent returns true if hook enabled delete event.
+func (w *Webhook) HasDeleteEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Delete)
+}
+
+// HasForkEvent returns true if hook enabled fork event.
+func (w *Webhook) HasForkEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Fork)
+}
+
+// HasIssuesEvent returns true if hook enabled issues event.
+func (w *Webhook) HasIssuesEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Issues)
+}
+
+// HasIssuesAssignEvent returns true if hook enabled issues assign event.
+func (w *Webhook) HasIssuesAssignEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.IssueAssign)
+}
+
+// HasIssuesLabelEvent returns true if hook enabled issues label event.
+func (w *Webhook) HasIssuesLabelEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.IssueLabel)
+}
+
+// HasIssuesMilestoneEvent returns true if hook enabled issues milestone event.
+func (w *Webhook) HasIssuesMilestoneEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.IssueMilestone)
+}
+
+// HasIssueCommentEvent returns true if hook enabled issue_comment event.
+func (w *Webhook) HasIssueCommentEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.IssueComment)
+}
+
+// HasPushEvent returns true if hook enabled push event.
+func (w *Webhook) HasPushEvent() bool {
+ return w.PushOnly || w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Push)
+}
+
+// HasPullRequestEvent returns true if hook enabled pull request event.
+func (w *Webhook) HasPullRequestEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequest)
+}
+
+// HasPullRequestAssignEvent returns true if hook enabled pull request assign event.
+func (w *Webhook) HasPullRequestAssignEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestAssign)
+}
+
+// HasPullRequestLabelEvent returns true if hook enabled pull request label event.
+func (w *Webhook) HasPullRequestLabelEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestLabel)
+}
+
+// HasPullRequestMilestoneEvent returns true if hook enabled pull request milestone event.
+func (w *Webhook) HasPullRequestMilestoneEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestMilestone)
+}
+
+// HasPullRequestCommentEvent returns true if hook enabled pull_request_comment event.
+func (w *Webhook) HasPullRequestCommentEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestComment)
+}
+
+// HasPullRequestApprovedEvent returns true if hook enabled pull request review event.
+func (w *Webhook) HasPullRequestApprovedEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestReview)
+}
+
+// HasPullRequestRejectedEvent returns true if hook enabled pull request review event.
+func (w *Webhook) HasPullRequestRejectedEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestReview)
+}
+
+// HasPullRequestReviewCommentEvent returns true if hook enabled pull request review event.
+func (w *Webhook) HasPullRequestReviewCommentEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestReview)
+}
+
+// HasPullRequestSyncEvent returns true if hook enabled pull request sync event.
+func (w *Webhook) HasPullRequestSyncEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestSync)
+}
+
+// HasWikiEvent returns true if hook enabled wiki event.
+func (w *Webhook) HasWikiEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvent.Wiki)
+}
+
+// HasReleaseEvent returns if hook enabled release event.
+func (w *Webhook) HasReleaseEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Release)
+}
+
+// HasRepositoryEvent returns if hook enabled repository event.
+func (w *Webhook) HasRepositoryEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Repository)
+}
+
+// HasPackageEvent returns if hook enabled package event.
+func (w *Webhook) HasPackageEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.Package)
+}
+
+// HasPullRequestReviewRequestEvent returns true if hook enabled pull request review request event.
+func (w *Webhook) HasPullRequestReviewRequestEvent() bool {
+ return w.SendEverything ||
+ (w.ChooseEvents && w.HookEvents.PullRequestReviewRequest)
+}
+
+// EventCheckers returns event checkers
+func (w *Webhook) EventCheckers() []struct {
+ Has func() bool
+ Type webhook_module.HookEventType
+} {
+ return []struct {
+ Has func() bool
+ Type webhook_module.HookEventType
+ }{
+ {w.HasCreateEvent, webhook_module.HookEventCreate},
+ {w.HasDeleteEvent, webhook_module.HookEventDelete},
+ {w.HasForkEvent, webhook_module.HookEventFork},
+ {w.HasPushEvent, webhook_module.HookEventPush},
+ {w.HasIssuesEvent, webhook_module.HookEventIssues},
+ {w.HasIssuesAssignEvent, webhook_module.HookEventIssueAssign},
+ {w.HasIssuesLabelEvent, webhook_module.HookEventIssueLabel},
+ {w.HasIssuesMilestoneEvent, webhook_module.HookEventIssueMilestone},
+ {w.HasIssueCommentEvent, webhook_module.HookEventIssueComment},
+ {w.HasPullRequestEvent, webhook_module.HookEventPullRequest},
+ {w.HasPullRequestAssignEvent, webhook_module.HookEventPullRequestAssign},
+ {w.HasPullRequestLabelEvent, webhook_module.HookEventPullRequestLabel},
+ {w.HasPullRequestMilestoneEvent, webhook_module.HookEventPullRequestMilestone},
+ {w.HasPullRequestCommentEvent, webhook_module.HookEventPullRequestComment},
+ {w.HasPullRequestApprovedEvent, webhook_module.HookEventPullRequestReviewApproved},
+ {w.HasPullRequestRejectedEvent, webhook_module.HookEventPullRequestReviewRejected},
+ {w.HasPullRequestCommentEvent, webhook_module.HookEventPullRequestReviewComment},
+ {w.HasPullRequestSyncEvent, webhook_module.HookEventPullRequestSync},
+ {w.HasWikiEvent, webhook_module.HookEventWiki},
+ {w.HasRepositoryEvent, webhook_module.HookEventRepository},
+ {w.HasReleaseEvent, webhook_module.HookEventRelease},
+ {w.HasPackageEvent, webhook_module.HookEventPackage},
+ {w.HasPullRequestReviewRequestEvent, webhook_module.HookEventPullRequestReviewRequest},
+ }
+}
+
+// EventsArray returns an array of hook events
+func (w *Webhook) EventsArray() []string {
+ events := make([]string, 0, 7)
+
+ for _, c := range w.EventCheckers() {
+ if c.Has() {
+ events = append(events, string(c.Type))
+ }
+ }
+ return events
+}
+
+// HeaderAuthorization returns the decrypted Authorization header.
+// Not on the reference (*w), to be accessible on WebhooksNew.
+func (w Webhook) HeaderAuthorization() (string, error) {
+ if w.HeaderAuthorizationEncrypted == "" {
+ return "", nil
+ }
+ return secret.DecryptSecret(setting.SecretKey, w.HeaderAuthorizationEncrypted)
+}
+
+// HeaderAuthorizationTrimPrefix returns the decrypted Authorization with a specified prefix trimmed.
+func (w Webhook) HeaderAuthorizationTrimPrefix(prefix string) (string, error) {
+ s, err := w.HeaderAuthorization()
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimPrefix(s, prefix), nil
+}
+
+// SetHeaderAuthorization encrypts and sets the Authorization header.
+func (w *Webhook) SetHeaderAuthorization(cleartext string) error {
+ if cleartext == "" {
+ w.HeaderAuthorizationEncrypted = ""
+ return nil
+ }
+ ciphertext, err := secret.EncryptSecret(setting.SecretKey, cleartext)
+ if err != nil {
+ return err
+ }
+ w.HeaderAuthorizationEncrypted = ciphertext
+ return nil
+}
+
+// CreateWebhook creates a new web hook.
+func CreateWebhook(ctx context.Context, w *Webhook) error {
+ w.Type = strings.TrimSpace(w.Type)
+ return db.Insert(ctx, w)
+}
+
+// CreateWebhooks creates multiple web hooks
+func CreateWebhooks(ctx context.Context, ws []*Webhook) error {
+ // xorm returns err "no element on slice when insert" for empty slices.
+ if len(ws) == 0 {
+ return nil
+ }
+ for i := 0; i < len(ws); i++ {
+ ws[i].Type = strings.TrimSpace(ws[i].Type)
+ }
+ return db.Insert(ctx, ws)
+}
+
+// GetWebhookByID returns webhook of repository by given ID.
+func GetWebhookByID(ctx context.Context, id int64) (*Webhook, error) {
+ bean := new(Webhook)
+ has, err := db.GetEngine(ctx).ID(id).Get(bean)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrWebhookNotExist{ID: id}
+ }
+ return bean, nil
+}
+
+// GetWebhookByRepoID returns webhook of repository by given ID.
+func GetWebhookByRepoID(ctx context.Context, repoID, id int64) (*Webhook, error) {
+ webhook := new(Webhook)
+ has, err := db.GetEngine(ctx).Where("id=? AND repo_id=?", id, repoID).Get(webhook)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrWebhookNotExist{ID: id}
+ }
+ return webhook, nil
+}
+
+// GetWebhookByOwnerID returns webhook of a user or organization by given ID.
+func GetWebhookByOwnerID(ctx context.Context, ownerID, id int64) (*Webhook, error) {
+ webhook := new(Webhook)
+ has, err := db.GetEngine(ctx).Where("id=? AND owner_id=?", id, ownerID).Get(webhook)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrWebhookNotExist{ID: id}
+ }
+ return webhook, nil
+}
+
+// ListWebhookOptions are options to filter webhooks on ListWebhooksByOpts
+type ListWebhookOptions struct {
+ db.ListOptions
+ RepoID int64
+ OwnerID int64
+ IsActive optional.Option[bool]
+}
+
+func (opts ListWebhookOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID != 0 {
+ cond = cond.And(builder.Eq{"webhook.repo_id": opts.RepoID})
+ }
+ if opts.OwnerID != 0 {
+ cond = cond.And(builder.Eq{"webhook.owner_id": opts.OwnerID})
+ }
+ if opts.IsActive.Has() {
+ cond = cond.And(builder.Eq{"webhook.is_active": opts.IsActive.Value()})
+ }
+ return cond
+}
+
+var _ db.FindOptionsOrder = ListWebhookOptions{}
+
+// ToOrders implements db.FindOptionsOrder, to sort the webhooks by id asc
+func (opts ListWebhookOptions) ToOrders() string {
+ return "webhook.id"
+}
+
+// UpdateWebhook updates information of webhook.
+func UpdateWebhook(ctx context.Context, w *Webhook) error {
+ _, err := db.GetEngine(ctx).ID(w.ID).AllCols().Update(w)
+ return err
+}
+
+// UpdateWebhookLastStatus updates last status of webhook.
+func UpdateWebhookLastStatus(ctx context.Context, w *Webhook) error {
+ _, err := db.GetEngine(ctx).ID(w.ID).Cols("last_status").Update(w)
+ return err
+}
+
+// DeleteWebhookByID uses argument bean as query condition,
+// ID must be specified and do not assign unnecessary fields.
+func DeleteWebhookByID(ctx context.Context, id int64) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if count, err := db.DeleteByID[Webhook](ctx, id); err != nil {
+ return err
+ } else if count == 0 {
+ return ErrWebhookNotExist{ID: id}
+ } else if _, err = db.DeleteByBean(ctx, &HookTask{HookID: id}); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// DeleteWebhookByRepoID deletes webhook of repository by given ID.
+func DeleteWebhookByRepoID(ctx context.Context, repoID, id int64) error {
+ if _, err := GetWebhookByRepoID(ctx, repoID, id); err != nil {
+ return err
+ }
+ return DeleteWebhookByID(ctx, id)
+}
+
+// DeleteWebhookByOwnerID deletes webhook of a user or organization by given ID.
+func DeleteWebhookByOwnerID(ctx context.Context, ownerID, id int64) error {
+ if _, err := GetWebhookByOwnerID(ctx, ownerID, id); err != nil {
+ return err
+ }
+ return DeleteWebhookByID(ctx, id)
+}
diff --git a/models/webhook/webhook_system.go b/models/webhook/webhook_system.go
new file mode 100644
index 0000000..62e8286
--- /dev/null
+++ b/models/webhook/webhook_system.go
@@ -0,0 +1,83 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package webhook
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// GetDefaultWebhooks returns all admin-default webhooks.
+func GetDefaultWebhooks(ctx context.Context) ([]*Webhook, error) {
+ return getAdminWebhooks(ctx, false)
+}
+
+// GetSystemOrDefaultWebhook returns admin system or default webhook by given ID.
+func GetSystemOrDefaultWebhook(ctx context.Context, id int64) (*Webhook, error) {
+ webhook := &Webhook{ID: id}
+ has, err := db.GetEngine(ctx).
+ Where("repo_id=? AND owner_id=?", 0, 0).
+ Get(webhook)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrWebhookNotExist{ID: id}
+ }
+ return webhook, nil
+}
+
+// GetSystemWebhooks returns all admin system webhooks.
+func GetSystemWebhooks(ctx context.Context, onlyActive bool) ([]*Webhook, error) {
+ return getAdminWebhooks(ctx, true, onlyActive)
+}
+
+func getAdminWebhooks(ctx context.Context, systemWebhooks bool, onlyActive ...bool) ([]*Webhook, error) {
+ webhooks := make([]*Webhook, 0, 5)
+ if len(onlyActive) > 0 && onlyActive[0] {
+ return webhooks, db.GetEngine(ctx).
+ Where("repo_id=? AND owner_id=? AND is_system_webhook=? AND is_active = ?", 0, 0, systemWebhooks, true).
+ OrderBy("id").
+ Find(&webhooks)
+ }
+ return webhooks, db.GetEngine(ctx).
+ Where("repo_id=? AND owner_id=? AND is_system_webhook=?", 0, 0, systemWebhooks).
+ OrderBy("id").
+ Find(&webhooks)
+}
+
+// DeleteDefaultSystemWebhook deletes an admin-configured default or system webhook (where Org and Repo ID both 0)
+func DeleteDefaultSystemWebhook(ctx context.Context, id int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ count, err := db.GetEngine(ctx).
+ Where("repo_id=? AND owner_id=?", 0, 0).
+ Delete(&Webhook{ID: id})
+ if err != nil {
+ return err
+ } else if count == 0 {
+ return ErrWebhookNotExist{ID: id}
+ }
+
+ _, err = db.DeleteByBean(ctx, &HookTask{HookID: id})
+ return err
+ })
+}
+
+// CopyDefaultWebhooksToRepo creates copies of the default webhooks in a new repo
+func CopyDefaultWebhooksToRepo(ctx context.Context, repoID int64) error {
+ ws, err := GetDefaultWebhooks(ctx)
+ if err != nil {
+ return fmt.Errorf("GetDefaultWebhooks: %v", err)
+ }
+
+ for _, w := range ws {
+ w.ID = 0
+ w.RepoID = repoID
+ if err := CreateWebhook(ctx, w); err != nil {
+ return fmt.Errorf("CreateWebhook: %v", err)
+ }
+ }
+ return nil
+}
diff --git a/models/webhook/webhook_test.go b/models/webhook/webhook_test.go
new file mode 100644
index 0000000..848440b
--- /dev/null
+++ b/models/webhook/webhook_test.go
@@ -0,0 +1,350 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package webhook
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/timeutil"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHookContentType_Name(t *testing.T) {
+ assert.Equal(t, "json", ContentTypeJSON.Name())
+ assert.Equal(t, "form", ContentTypeForm.Name())
+}
+
+func TestIsValidHookContentType(t *testing.T) {
+ assert.True(t, IsValidHookContentType("json"))
+ assert.True(t, IsValidHookContentType("form"))
+ assert.False(t, IsValidHookContentType("invalid"))
+}
+
+func TestWebhook_History(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ webhook := unittest.AssertExistsAndLoadBean(t, &Webhook{ID: 1})
+ tasks, err := webhook.History(db.DefaultContext, 0)
+ require.NoError(t, err)
+ if assert.Len(t, tasks, 3) {
+ assert.Equal(t, int64(3), tasks[0].ID)
+ assert.Equal(t, int64(2), tasks[1].ID)
+ assert.Equal(t, int64(1), tasks[2].ID)
+ }
+
+ webhook = unittest.AssertExistsAndLoadBean(t, &Webhook{ID: 2})
+ tasks, err = webhook.History(db.DefaultContext, 0)
+ require.NoError(t, err)
+ assert.Empty(t, tasks)
+}
+
+func TestWebhook_UpdateEvent(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ webhook := unittest.AssertExistsAndLoadBean(t, &Webhook{ID: 1})
+ hookEvent := &webhook_module.HookEvent{
+ PushOnly: true,
+ SendEverything: false,
+ ChooseEvents: false,
+ HookEvents: webhook_module.HookEvents{
+ Create: false,
+ Push: true,
+ PullRequest: false,
+ },
+ }
+ webhook.HookEvent = hookEvent
+ require.NoError(t, webhook.UpdateEvent())
+ assert.NotEmpty(t, webhook.Events)
+ actualHookEvent := &webhook_module.HookEvent{}
+ require.NoError(t, json.Unmarshal([]byte(webhook.Events), actualHookEvent))
+ assert.Equal(t, *hookEvent, *actualHookEvent)
+}
+
+func TestWebhook_EventsArray(t *testing.T) {
+ assert.Equal(t, []string{
+ "create", "delete", "fork", "push",
+ "issues", "issue_assign", "issue_label", "issue_milestone", "issue_comment",
+ "pull_request", "pull_request_assign", "pull_request_label", "pull_request_milestone",
+ "pull_request_comment", "pull_request_review_approved", "pull_request_review_rejected",
+ "pull_request_review_comment", "pull_request_sync", "wiki", "repository", "release",
+ "package", "pull_request_review_request",
+ },
+ (&Webhook{
+ HookEvent: &webhook_module.HookEvent{SendEverything: true},
+ }).EventsArray(),
+ )
+
+ assert.Equal(t, []string{"push"},
+ (&Webhook{
+ HookEvent: &webhook_module.HookEvent{PushOnly: true},
+ }).EventsArray(),
+ )
+}
+
+func TestCreateWebhook(t *testing.T) {
+ hook := &Webhook{
+ RepoID: 3,
+ URL: "www.example.com/unit_test",
+ ContentType: ContentTypeJSON,
+ Events: `{"push_only":false,"send_everything":false,"choose_events":false,"events":{"create":false,"push":true,"pull_request":true}}`,
+ }
+ unittest.AssertNotExistsBean(t, hook)
+ require.NoError(t, CreateWebhook(db.DefaultContext, hook))
+ unittest.AssertExistsAndLoadBean(t, hook)
+}
+
+func TestGetWebhookByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hook, err := GetWebhookByRepoID(db.DefaultContext, 1, 1)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), hook.ID)
+
+ _, err = GetWebhookByRepoID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ require.Error(t, err)
+ assert.True(t, IsErrWebhookNotExist(err))
+}
+
+func TestGetWebhookByOwnerID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hook, err := GetWebhookByOwnerID(db.DefaultContext, 3, 3)
+ require.NoError(t, err)
+ assert.Equal(t, int64(3), hook.ID)
+
+ _, err = GetWebhookByOwnerID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ require.Error(t, err)
+ assert.True(t, IsErrWebhookNotExist(err))
+}
+
+func TestGetActiveWebhooksByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ activateWebhook(t, 1)
+
+ hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{RepoID: 1, IsActive: optional.Some(true)})
+ require.NoError(t, err)
+ if assert.Len(t, hooks, 1) {
+ assert.Equal(t, int64(1), hooks[0].ID)
+ assert.True(t, hooks[0].IsActive)
+ }
+}
+
+func TestGetWebhooksByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{RepoID: 1})
+ require.NoError(t, err)
+ if assert.Len(t, hooks, 2) {
+ assert.Equal(t, int64(1), hooks[0].ID)
+ assert.Equal(t, int64(2), hooks[1].ID)
+ }
+}
+
+func TestGetActiveWebhooksByOwnerID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ activateWebhook(t, 3)
+
+ hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{OwnerID: 3, IsActive: optional.Some(true)})
+ require.NoError(t, err)
+ if assert.Len(t, hooks, 1) {
+ assert.Equal(t, int64(3), hooks[0].ID)
+ assert.True(t, hooks[0].IsActive)
+ }
+}
+
+func activateWebhook(t *testing.T, hookID int64) {
+ t.Helper()
+ updated, err := db.GetEngine(db.DefaultContext).ID(hookID).Cols("is_active").Update(Webhook{IsActive: true})
+ assert.Equal(t, int64(1), updated)
+ require.NoError(t, err)
+}
+
+func TestGetWebhooksByOwnerID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ activateWebhook(t, 3)
+
+ hooks, err := db.Find[Webhook](db.DefaultContext, ListWebhookOptions{OwnerID: 3})
+ require.NoError(t, err)
+ if assert.Len(t, hooks, 1) {
+ assert.Equal(t, int64(3), hooks[0].ID)
+ assert.True(t, hooks[0].IsActive)
+ }
+}
+
+func TestUpdateWebhook(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hook := unittest.AssertExistsAndLoadBean(t, &Webhook{ID: 2})
+ hook.IsActive = true
+ hook.ContentType = ContentTypeForm
+ unittest.AssertNotExistsBean(t, hook)
+ require.NoError(t, UpdateWebhook(db.DefaultContext, hook))
+ unittest.AssertExistsAndLoadBean(t, hook)
+}
+
+func TestDeleteWebhookByRepoID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ unittest.AssertExistsAndLoadBean(t, &Webhook{ID: 2, RepoID: 1})
+ require.NoError(t, DeleteWebhookByRepoID(db.DefaultContext, 1, 2))
+ unittest.AssertNotExistsBean(t, &Webhook{ID: 2, RepoID: 1})
+
+ err := DeleteWebhookByRepoID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ require.Error(t, err)
+ assert.True(t, IsErrWebhookNotExist(err))
+}
+
+func TestDeleteWebhookByOwnerID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ unittest.AssertExistsAndLoadBean(t, &Webhook{ID: 3, OwnerID: 3})
+ require.NoError(t, DeleteWebhookByOwnerID(db.DefaultContext, 3, 3))
+ unittest.AssertNotExistsBean(t, &Webhook{ID: 3, OwnerID: 3})
+
+ err := DeleteWebhookByOwnerID(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID)
+ require.Error(t, err)
+ assert.True(t, IsErrWebhookNotExist(err))
+}
+
+func TestHookTasks(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTasks, err := HookTasks(db.DefaultContext, 1, 1)
+ require.NoError(t, err)
+ if assert.Len(t, hookTasks, 3) {
+ assert.Equal(t, int64(3), hookTasks[0].ID)
+ assert.Equal(t, int64(2), hookTasks[1].ID)
+ assert.Equal(t, int64(1), hookTasks[2].ID)
+ }
+
+ hookTasks, err = HookTasks(db.DefaultContext, unittest.NonexistentID, 1)
+ require.NoError(t, err)
+ assert.Empty(t, hookTasks)
+}
+
+func TestCreateHookTask(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 3,
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+}
+
+func TestUpdateHookTask(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ hook := unittest.AssertExistsAndLoadBean(t, &HookTask{ID: 1})
+ hook.PayloadContent = "new payload content"
+ hook.IsDelivered = true
+ unittest.AssertNotExistsBean(t, hook)
+ require.NoError(t, UpdateHookTask(db.DefaultContext, hook))
+ unittest.AssertExistsAndLoadBean(t, hook)
+}
+
+func TestCleanupHookTaskTable_PerWebhook_DeletesDelivered(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 3,
+ IsDelivered: true,
+ Delivered: timeutil.TimeStampNanoNow(),
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+
+ require.NoError(t, CleanupHookTaskTable(context.Background(), PerWebhook, 168*time.Hour, 0))
+ unittest.AssertNotExistsBean(t, hookTask)
+}
+
+func TestCleanupHookTaskTable_PerWebhook_LeavesUndelivered(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 4,
+ IsDelivered: false,
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+
+ require.NoError(t, CleanupHookTaskTable(context.Background(), PerWebhook, 168*time.Hour, 0))
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+}
+
+func TestCleanupHookTaskTable_PerWebhook_LeavesMostRecentTask(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 4,
+ IsDelivered: true,
+ Delivered: timeutil.TimeStampNanoNow(),
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+
+ require.NoError(t, CleanupHookTaskTable(context.Background(), PerWebhook, 168*time.Hour, 1))
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+}
+
+func TestCleanupHookTaskTable_OlderThan_DeletesDelivered(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 3,
+ IsDelivered: true,
+ Delivered: timeutil.TimeStampNano(time.Now().AddDate(0, 0, -8).UnixNano()),
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+
+ require.NoError(t, CleanupHookTaskTable(context.Background(), OlderThan, 168*time.Hour, 0))
+ unittest.AssertNotExistsBean(t, hookTask)
+}
+
+func TestCleanupHookTaskTable_OlderThan_LeavesUndelivered(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 4,
+ IsDelivered: false,
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+
+ require.NoError(t, CleanupHookTaskTable(context.Background(), OlderThan, 168*time.Hour, 0))
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+}
+
+func TestCleanupHookTaskTable_OlderThan_LeavesTaskEarlierThanAgeToDelete(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ hookTask := &HookTask{
+ HookID: 4,
+ IsDelivered: true,
+ Delivered: timeutil.TimeStampNano(time.Now().AddDate(0, 0, -6).UnixNano()),
+ PayloadVersion: 2,
+ }
+ unittest.AssertNotExistsBean(t, hookTask)
+ _, err := CreateHookTask(db.DefaultContext, hookTask)
+ require.NoError(t, err)
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+
+ require.NoError(t, CleanupHookTaskTable(context.Background(), OlderThan, 168*time.Hour, 0))
+ unittest.AssertExistsAndLoadBean(t, hookTask)
+}