1
0
mirror of https://github.com/go-gitea/gitea.git synced 2024-11-04 08:17:24 -05:00

Merge remote-tracking branch 'origin/main' into zzc/dev/sidebar_board_option

This commit is contained in:
a1012112796 2024-04-23 09:44:07 +00:00
commit 092c46d18e
No known key found for this signature in database
GPG Key ID: E5FB19032C2C2A64
132 changed files with 244 additions and 297 deletions

View File

@ -310,7 +310,7 @@ rules:
jquery/no-merge: [2] jquery/no-merge: [2]
jquery/no-param: [2] jquery/no-param: [2]
jquery/no-parent: [0] jquery/no-parent: [0]
jquery/no-parents: [0] jquery/no-parents: [2]
jquery/no-parse-html: [2] jquery/no-parse-html: [2]
jquery/no-prop: [2] jquery/no-prop: [2]
jquery/no-proxy: [2] jquery/no-proxy: [2]
@ -319,8 +319,8 @@ rules:
jquery/no-show: [2] jquery/no-show: [2]
jquery/no-size: [2] jquery/no-size: [2]
jquery/no-sizzle: [2] jquery/no-sizzle: [2]
jquery/no-slide: [0] jquery/no-slide: [2]
jquery/no-submit: [0] jquery/no-submit: [2]
jquery/no-text: [0] jquery/no-text: [0]
jquery/no-toggle: [2] jquery/no-toggle: [2]
jquery/no-trigger: [0] jquery/no-trigger: [0]
@ -458,7 +458,7 @@ rules:
no-jquery/no-other-utils: [2] no-jquery/no-other-utils: [2]
no-jquery/no-param: [2] no-jquery/no-param: [2]
no-jquery/no-parent: [0] no-jquery/no-parent: [0]
no-jquery/no-parents: [0] no-jquery/no-parents: [2]
no-jquery/no-parse-html-literal: [0] no-jquery/no-parse-html-literal: [0]
no-jquery/no-parse-html: [2] no-jquery/no-parse-html: [2]
no-jquery/no-parse-json: [2] no-jquery/no-parse-json: [2]

View File

@ -1,13 +1,14 @@
linters: linters:
enable-all: false
disable-all: true
fast: false
enable: enable:
- bidichk - bidichk
# - deadcode # deprecated - https://github.com/golangci/golangci-lint/issues/1841
- depguard - depguard
- dupl - dupl
- errcheck - errcheck
- forbidigo - forbidigo
- gocritic - gocritic
# - gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time.
- gofmt - gofmt
- gofumpt - gofumpt
- gosimple - gosimple
@ -17,20 +18,18 @@ linters:
- nolintlint - nolintlint
- revive - revive
- staticcheck - staticcheck
# - structcheck # deprecated - https://github.com/golangci/golangci-lint/issues/1841
- stylecheck - stylecheck
- typecheck - typecheck
- unconvert - unconvert
- unused - unused
# - varcheck # deprecated - https://github.com/golangci/golangci-lint/issues/1841
- wastedassign - wastedassign
enable-all: false
disable-all: true
fast: false
run: run:
timeout: 10m timeout: 10m
output:
sort-results: true
linters-settings: linters-settings:
stylecheck: stylecheck:
checks: ["all", "-ST1005", "-ST1003"] checks: ["all", "-ST1005", "-ST1003"]
@ -47,27 +46,37 @@ linters-settings:
errorCode: 1 errorCode: 1
warningCode: 1 warningCode: 1
rules: rules:
- name: atomic
- name: bare-return
- name: blank-imports - name: blank-imports
- name: constant-logical-expr
- name: context-as-argument - name: context-as-argument
- name: context-keys-type - name: context-keys-type
- name: dot-imports - name: dot-imports
- name: duplicated-imports
- name: empty-lines
- name: error-naming
- name: error-return - name: error-return
- name: error-strings - name: error-strings
- name: error-naming - name: errorf
- name: exported - name: exported
- name: identical-branches
- name: if-return - name: if-return
- name: increment-decrement - name: increment-decrement
- name: var-naming - name: indent-error-flow
- name: var-declaration - name: modifies-value-receiver
- name: package-comments - name: package-comments
- name: range - name: range
- name: receiver-naming - name: receiver-naming
- name: redefines-builtin-id
- name: string-of-int
- name: superfluous-else
- name: time-naming - name: time-naming
- name: unconditional-recursion
- name: unexported-return - name: unexported-return
- name: indent-error-flow - name: unreachable-code
- name: errorf - name: var-declaration
- name: duplicated-imports - name: var-naming
- name: modifies-value-receiver
gofumpt: gofumpt:
extra-rules: true extra-rules: true
depguard: depguard:
@ -93,8 +102,8 @@ issues:
max-issues-per-linter: 0 max-issues-per-linter: 0
max-same-issues: 0 max-same-issues: 0
exclude-dirs: [node_modules, public, web_src] exclude-dirs: [node_modules, public, web_src]
exclude-case-sensitive: true
exclude-rules: exclude-rules:
# Exclude some linters from running on tests files.
- path: _test\.go - path: _test\.go
linters: linters:
- gocyclo - gocyclo
@ -112,19 +121,19 @@ issues:
- path: cmd - path: cmd
linters: linters:
- forbidigo - forbidigo
- linters: - text: "webhook"
linters:
- dupl - dupl
text: "webhook" - text: "`ID' should not be capitalized"
- linters: linters:
- gocritic - gocritic
text: "`ID' should not be capitalized" - text: "swagger"
- linters: linters:
- unused - unused
- deadcode - deadcode
text: "swagger" - text: "argument x is overwritten before first use"
- linters: linters:
- staticcheck - staticcheck
text: "argument x is overwritten before first use"
- text: "commentFormatting: put a space between `//` and comment text" - text: "commentFormatting: put a space between `//` and comment text"
linters: linters:
- gocritic - gocritic

View File

@ -465,7 +465,7 @@ func hookPrintResult(output, isCreate bool, branch, url string) {
fmt.Fprintf(os.Stderr, " %s\n", url) fmt.Fprintf(os.Stderr, " %s\n", url)
} }
fmt.Fprintln(os.Stderr, "") fmt.Fprintln(os.Stderr, "")
os.Stderr.Sync() _ = os.Stderr.Sync()
} }
func pushOptions() map[string]string { func pushOptions() map[string]string {

View File

@ -270,7 +270,7 @@ func CountRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {
// Only affect action runners were a owner ID is set, as actions runners // Only affect action runners were a owner ID is set, as actions runners
// could also be created on a repository. // could also be created on a repository.
return db.GetEngine(ctx).Table("action_runner"). return db.GetEngine(ctx).Table("action_runner").
Join("LEFT", "user", "`action_runner`.owner_id = `user`.id"). Join("LEFT", "`user`", "`action_runner`.owner_id = `user`.id").
Where("`action_runner`.owner_id != ?", 0). Where("`action_runner`.owner_id != ?", 0).
And(builder.IsNull{"`user`.id"}). And(builder.IsNull{"`user`.id"}).
Count(new(ActionRunner)) Count(new(ActionRunner))
@ -279,7 +279,7 @@ func CountRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {
func FixRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) { func FixRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {
subQuery := builder.Select("`action_runner`.id"). subQuery := builder.Select("`action_runner`.id").
From("`action_runner`"). From("`action_runner`").
Join("LEFT", "user", "`action_runner`.owner_id = `user`.id"). Join("LEFT", "`user`", "`action_runner`.owner_id = `user`.id").
Where(builder.Neq{"`action_runner`.owner_id": 0}). Where(builder.Neq{"`action_runner`.owner_id": 0}).
And(builder.IsNull{"`user`.id"}) And(builder.IsNull{"`user`.id"})
b := builder.Delete(builder.In("id", subQuery)).From("`action_runner`") b := builder.Delete(builder.In("id", subQuery)).From("`action_runner`")
@ -289,3 +289,25 @@ func FixRunnersWithoutBelongingOwner(ctx context.Context) (int64, error) {
} }
return res.RowsAffected() return res.RowsAffected()
} }
func CountRunnersWithoutBelongingRepo(ctx context.Context) (int64, error) {
return db.GetEngine(ctx).Table("action_runner").
Join("LEFT", "`repository`", "`action_runner`.repo_id = `repository`.id").
Where("`action_runner`.repo_id != ?", 0).
And(builder.IsNull{"`repository`.id"}).
Count(new(ActionRunner))
}
func FixRunnersWithoutBelongingRepo(ctx context.Context) (int64, error) {
subQuery := builder.Select("`action_runner`.id").
From("`action_runner`").
Join("LEFT", "`repository`", "`action_runner`.repo_id = `repository`.id").
Where(builder.Neq{"`action_runner`.repo_id": 0}).
And(builder.IsNull{"`repository`.id"})
b := builder.Delete(builder.In("id", subQuery)).From("`action_runner`")
res, err := db.GetEngine(ctx).Exec(b)
if err != nil {
return 0, err
}
return res.RowsAffected()
}

View File

@ -110,7 +110,6 @@ func ParseCommitWithSignature(ctx context.Context, c *git.Commit) *CommitVerific
Reason: "gpg.error.no_committer_account", Reason: "gpg.error.no_committer_account",
} }
} }
} }
} }

View File

@ -13,8 +13,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
//////////////////// Application
func TestOAuth2Application_GenerateClientSecret(t *testing.T) { func TestOAuth2Application_GenerateClientSecret(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase()) assert.NoError(t, unittest.PrepareTestDatabase())
app := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1}) app := unittest.AssertExistsAndLoadBean(t, &auth_model.OAuth2Application{ID: 1})

View File

@ -227,7 +227,6 @@ func NamesToBean(names ...string) ([]any, error) {
// Need to map provided names to beans... // Need to map provided names to beans...
beanMap := make(map[string]any) beanMap := make(map[string]any)
for _, bean := range tables { for _, bean := range tables {
beanMap[strings.ToLower(reflect.Indirect(reflect.ValueOf(bean)).Type().Name())] = bean beanMap[strings.ToLower(reflect.Indirect(reflect.ValueOf(bean)).Type().Name())] = bean
beanMap[strings.ToLower(x.TableName(bean))] = bean beanMap[strings.ToLower(x.TableName(bean))] = bean
beanMap[strings.ToLower(x.TableName(bean, true))] = bean beanMap[strings.ToLower(x.TableName(bean, true))] = bean

View File

@ -345,11 +345,9 @@ func CreateReview(ctx context.Context, opts CreateReviewOptions) (*Review, error
return nil, err return nil, err
} }
} }
} else if opts.ReviewerTeam != nil { } else if opts.ReviewerTeam != nil {
review.Type = ReviewTypeRequest review.Type = ReviewTypeRequest
review.ReviewerTeamID = opts.ReviewerTeam.ID review.ReviewerTeamID = opts.ReviewerTeam.ID
} else { } else {
return nil, fmt.Errorf("provide either reviewer or reviewer team") return nil, fmt.Errorf("provide either reviewer or reviewer team")
} }

View File

@ -177,7 +177,6 @@ func RecreateTable(sess *xorm.Session, bean any) error {
log.Error("Unable to recreate uniques on table %s. Error: %v", tableName, err) log.Error("Unable to recreate uniques on table %s. Error: %v", tableName, err)
return err return err
} }
case setting.Database.Type.IsMySQL(): case setting.Database.Type.IsMySQL():
// MySQL will drop all the constraints on the old table // MySQL will drop all the constraints on the old table
if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil { if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
@ -228,7 +227,6 @@ func RecreateTable(sess *xorm.Session, bean any) error {
return err return err
} }
sequenceMap[sequence] = sequenceData sequenceMap[sequence] = sequenceData
} }
// CASCADE causes postgres to drop all the constraints on the old table // CASCADE causes postgres to drop all the constraints on the old table
@ -293,9 +291,7 @@ func RecreateTable(sess *xorm.Session, bean any) error {
return err return err
} }
} }
} }
case setting.Database.Type.IsMSSQL(): case setting.Database.Type.IsMSSQL():
// MSSQL will drop all the constraints on the old table // MSSQL will drop all the constraints on the old table
if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil { if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
@ -308,7 +304,6 @@ func RecreateTable(sess *xorm.Session, bean any) error {
log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err) log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
return err return err
} }
default: default:
log.Fatal("Unrecognized DB") log.Fatal("Unrecognized DB")
} }

View File

@ -584,6 +584,8 @@ var migrations = []Migration{
NewMigration("Add missing field of commit status summary table", v1_23.AddCommitStatusSummary2), NewMigration("Add missing field of commit status summary table", v1_23.AddCommitStatusSummary2),
// v297 -> v298 // v297 -> v298
NewMigration("Add everyone_access_mode for repo_unit", v1_23.AddRepoUnitEveryoneAccessMode), NewMigration("Add everyone_access_mode for repo_unit", v1_23.AddRepoUnitEveryoneAccessMode),
// v298 -> v299
NewMigration("Drop wrongly created table o_auth2_application", v1_23.DropWronglyCreatedTable),
} }
// GetCurrentDBVersion returns the current db version // GetCurrentDBVersion returns the current db version

View File

@ -262,7 +262,6 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
for _, u := range units { for _, u := range units {
var found bool var found bool
for _, team := range teams { for _, team := range teams {
var teamU []*TeamUnit var teamU []*TeamUnit
var unitEnabled bool var unitEnabled bool
err = sess.Where("team_id = ?", team.ID).Find(&teamU) err = sess.Where("team_id = ?", team.ID).Find(&teamU)
@ -331,7 +330,6 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
} }
if !protectedBranch.EnableApprovalsWhitelist { if !protectedBranch.EnableApprovalsWhitelist {
perm, err := getUserRepoPermission(sess, baseRepo, reviewer) perm, err := getUserRepoPermission(sess, baseRepo, reviewer)
if err != nil { if err != nil {
return false, err return false, err

View File

@ -9,9 +9,9 @@ import (
// AddConfidentialColumnToOAuth2ApplicationTable: add ConfidentialClient column, setting existing rows to true // AddConfidentialColumnToOAuth2ApplicationTable: add ConfidentialClient column, setting existing rows to true
func AddConfidentialClientColumnToOAuth2ApplicationTable(x *xorm.Engine) error { func AddConfidentialClientColumnToOAuth2ApplicationTable(x *xorm.Engine) error {
type OAuth2Application struct { type oauth2Application struct {
ID int64
ConfidentialClient bool `xorm:"NOT NULL DEFAULT TRUE"` ConfidentialClient bool `xorm:"NOT NULL DEFAULT TRUE"`
} }
return x.Sync(new(oauth2Application))
return x.Sync(new(OAuth2Application))
} }

View File

@ -13,12 +13,12 @@ import (
func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) { func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) {
// premigration // premigration
type OAuth2Application struct { type oauth2Application struct {
ID int64 ID int64
} }
// Prepare and load the testing database // Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(OAuth2Application)) x, deferable := base.PrepareTestEnv(t, 0, new(oauth2Application))
defer deferable() defer deferable()
if x == nil || t.Failed() { if x == nil || t.Failed() {
return return
@ -36,7 +36,7 @@ func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) {
} }
got := []ExpectedOAuth2Application{} got := []ExpectedOAuth2Application{}
if err := x.Table("o_auth2_application").Select("id, confidential_client").Find(&got); !assert.NoError(t, err) { if err := x.Table("oauth2_application").Select("id, confidential_client").Find(&got); !assert.NoError(t, err) {
return return
} }

View File

@ -104,7 +104,7 @@ func ChangeContainerMetadataMultiArch(x *xorm.Engine) error {
// Convert to new metadata format // Convert to new metadata format
new := &MetadataNew{ newMetadata := &MetadataNew{
Type: old.Type, Type: old.Type,
IsTagged: old.IsTagged, IsTagged: old.IsTagged,
Platform: old.Platform, Platform: old.Platform,
@ -119,7 +119,7 @@ func ChangeContainerMetadataMultiArch(x *xorm.Engine) error {
Manifests: manifests, Manifests: manifests,
} }
metadataJSON, err := json.Marshal(new) metadataJSON, err := json.Marshal(newMetadata)
if err != nil { if err != nil {
return err return err
} }

View File

@ -0,0 +1,10 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_23 //nolint
import "xorm.io/xorm"
func DropWronglyCreatedTable(x *xorm.Engine) error {
return x.DropTables("o_auth2_application")
}

View File

@ -61,7 +61,6 @@ func AddScratchHash(x *xorm.Engine) error {
if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil { if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil {
return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err) return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err)
} }
} }
} }

View File

@ -81,7 +81,6 @@ func HashAppToken(x *xorm.Engine) error {
if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil { if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil {
return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err) return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err)
} }
} }
} }

View File

@ -226,9 +226,8 @@ func GetTeamIDsByNames(ctx context.Context, orgID int64, names []string, ignoreN
if err != nil { if err != nil {
if ignoreNonExistent { if ignoreNonExistent {
continue continue
} else {
return nil, err
} }
return nil, err
} }
ids = append(ids, u.ID) ids = append(ids, u.ID)
} }

View File

@ -110,13 +110,11 @@ func createBoardsForProjectsType(ctx context.Context, project *Project) error {
var items []string var items []string
switch project.BoardType { switch project.BoardType {
case BoardTypeBugTriage: case BoardTypeBugTriage:
items = setting.Project.ProjectBoardBugTriageType items = setting.Project.ProjectBoardBugTriageType
case BoardTypeBasicKanban: case BoardTypeBasicKanban:
items = setting.Project.ProjectBoardBasicKanbanType items = setting.Project.ProjectBoardBasicKanbanType
case BoardTypeNone: case BoardTypeNone:
fallthrough fallthrough
default: default:

View File

@ -170,7 +170,6 @@ func GetReviewers(ctx context.Context, repo *Repository, doerID, posterID int64)
// the owner of a private repo needs to be explicitly added. // the owner of a private repo needs to be explicitly added.
cond = cond.Or(builder.Eq{"`user`.id": repo.Owner.ID}) cond = cond.Or(builder.Eq{"`user`.id": repo.Owner.ID})
} }
} else { } else {
// This is a "public" repository: // This is a "public" repository:
// Any user that has read access, is a watcher or organization member can be requested to review // Any user that has read access, is a watcher or organization member can be requested to review

View File

@ -988,9 +988,8 @@ func GetUserIDsByNames(ctx context.Context, names []string, ignoreNonExistent bo
if err != nil { if err != nil {
if ignoreNonExistent { if ignoreNonExistent {
continue continue
} else {
return nil, err
} }
return nil, err
} }
ids = append(ids, u.ID) ids = append(ids, u.ID)
} }

View File

@ -63,16 +63,16 @@ func NewComplexity() {
func setupComplexity(values []string) { func setupComplexity(values []string) {
if len(values) != 1 || values[0] != "off" { if len(values) != 1 || values[0] != "off" {
for _, val := range values { for _, val := range values {
if complex, ok := charComplexities[val]; ok { if complexity, ok := charComplexities[val]; ok {
validChars += complex.ValidChars validChars += complexity.ValidChars
requiredList = append(requiredList, complex) requiredList = append(requiredList, complexity)
} }
} }
if len(requiredList) == 0 { if len(requiredList) == 0 {
// No valid character classes found; use all classes as default // No valid character classes found; use all classes as default
for _, complex := range charComplexities { for _, complexity := range charComplexities {
validChars += complex.ValidChars validChars += complexity.ValidChars
requiredList = append(requiredList, complex) requiredList = append(requiredList, complexity)
} }
} }
} }

View File

@ -307,10 +307,10 @@ func ParseTreeLine(objectFormat ObjectFormat, rd *bufio.Reader, modeBuf, fnameBu
// Deal with the binary hash // Deal with the binary hash
idx = 0 idx = 0
len := objectFormat.FullLength() / 2 length := objectFormat.FullLength() / 2
for idx < len { for idx < length {
var read int var read int
read, err = rd.Read(shaBuf[idx:len]) read, err = rd.Read(shaBuf[idx:length])
n += read n += read
if err != nil { if err != nil {
return mode, fname, sha, n, err return mode, fname, sha, n, err

View File

@ -49,9 +49,8 @@ readLoop:
if len(line) > 0 && line[0] == ' ' { if len(line) > 0 && line[0] == ' ' {
_, _ = signatureSB.Write(line[1:]) _, _ = signatureSB.Write(line[1:])
continue continue
} else {
pgpsig = false
} }
pgpsig = false
} }
if !message { if !message {

View File

@ -232,7 +232,6 @@ func FindLFSFile(repo *git.Repository, objectID git.ObjectID) ([]*LFSResult, err
errChan <- err errChan <- err
break break
} }
} }
}() }()

View File

@ -251,18 +251,18 @@ func (repo *Repository) CommitsByFileAndRange(opts CommitsByFileAndRangeOptions)
return nil, err return nil, err
} }
len := objectFormat.FullLength() length := objectFormat.FullLength()
commits := []*Commit{} commits := []*Commit{}
shaline := make([]byte, len+1) shaline := make([]byte, length+1)
for { for {
n, err := io.ReadFull(stdoutReader, shaline) n, err := io.ReadFull(stdoutReader, shaline)
if err != nil || n < len { if err != nil || n < length {
if err == io.EOF { if err == io.EOF {
err = nil err = nil
} }
return commits, err return commits, err
} }
objectID, err := NewIDFromString(string(shaline[0:len])) objectID, err := NewIDFromString(string(shaline[0:length]))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -64,7 +64,6 @@ func getRefURL(refURL, urlPrefix, repoFullName, sshDomain string) string {
// ex: git@try.gitea.io:go-gitea/gitea // ex: git@try.gitea.io:go-gitea/gitea
match := scpSyntax.FindAllStringSubmatch(refURI, -1) match := scpSyntax.FindAllStringSubmatch(refURI, -1)
if len(match) > 0 { if len(match) > 0 {
m := match[0] m := match[0]
refHostname := m[2] refHostname := m[2]
pth := m[3] pth := m[3]

View File

@ -191,7 +191,6 @@ func (b *Indexer) addDelete(filename string, repo *repo_model.Repository, batch
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error { func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error {
batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize) batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize)
if len(changes.Updates) > 0 { if len(changes.Updates) > 0 {
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first! // Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil { if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil {
log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err) log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err)
@ -335,7 +334,6 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
if result, err = b.inner.Indexer.Search(facetRequest); err != nil { if result, err = b.inner.Indexer.Search(facetRequest); err != nil {
return 0, nil, nil, err return 0, nil, nil, err
} }
} }
languagesFacet := result.Facets["languages"] languagesFacet := result.Facets["languages"]
for _, term := range languagesFacet.Terms.Terms() { for _, term := range languagesFacet.Terms.Terms() {

View File

@ -68,7 +68,7 @@ func ToSearchOptions(keyword string, opts *issues_model.IssuesOptions) *SearchOp
searchOpt.Paginator = opts.Paginator searchOpt.Paginator = opts.Paginator
switch opts.SortType { switch opts.SortType {
case "": case "", "latest":
searchOpt.SortBy = SortByCreatedDesc searchOpt.SortBy = SortByCreatedDesc
case "oldest": case "oldest":
searchOpt.SortBy = SortByCreatedAsc searchOpt.SortBy = SortByCreatedAsc
@ -86,7 +86,7 @@ func ToSearchOptions(keyword string, opts *issues_model.IssuesOptions) *SearchOp
searchOpt.SortBy = SortByDeadlineDesc searchOpt.SortBy = SortByDeadlineDesc
case "priority", "priorityrepo", "project-column-sorting": case "priority", "priorityrepo", "project-column-sorting":
// Unsupported sort type for search // Unsupported sort type for search
searchOpt.SortBy = SortByUpdatedDesc fallthrough
default: default:
searchOpt.SortBy = SortByUpdatedDesc searchOpt.SortBy = SortByUpdatedDesc
} }

View File

@ -145,7 +145,6 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (
query := elastic.NewBoolQuery() query := elastic.NewBoolQuery()
if options.Keyword != "" { if options.Keyword != "" {
searchType := esMultiMatchTypePhrasePrefix searchType := esMultiMatchTypePhrasePrefix
if options.IsFuzzyKeyword { if options.IsFuzzyKeyword {
searchType = esMultiMatchTypeBestFields searchType = esMultiMatchTypeBestFields

View File

@ -125,7 +125,6 @@ func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, ms
if mode.Colorize { if mode.Colorize {
buf = append(buf, resetBytes...) buf = append(buf, resetBytes...)
} }
} }
if flags&(Lshortfile|Llongfile) != 0 { if flags&(Lshortfile|Llongfile) != 0 {
if mode.Colorize { if mode.Colorize {

View File

@ -466,7 +466,6 @@ func TestColorPreview(t *testing.T) {
res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase) res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase)
assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase) assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase)
assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase) assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase)
} }
negativeTests := []string{ negativeTests := []string{
@ -549,7 +548,6 @@ func TestMathBlock(t *testing.T) {
res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase) res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase)
assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase) assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase)
assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase) assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase)
} }
} }

View File

@ -147,35 +147,35 @@ func (e *MarshalEncoder) marshalIntInternal(i int64) error {
return e.w.WriteByte(byte(i - 5)) return e.w.WriteByte(byte(i - 5))
} }
var len int var length int
if 122 < i && i <= 0xff { if 122 < i && i <= 0xff {
len = 1 length = 1
} else if 0xff < i && i <= 0xffff { } else if 0xff < i && i <= 0xffff {
len = 2 length = 2
} else if 0xffff < i && i <= 0xffffff { } else if 0xffff < i && i <= 0xffffff {
len = 3 length = 3
} else if 0xffffff < i && i <= 0x3fffffff { } else if 0xffffff < i && i <= 0x3fffffff {
len = 4 length = 4
} else if -0x100 <= i && i < -123 { } else if -0x100 <= i && i < -123 {
len = -1 length = -1
} else if -0x10000 <= i && i < -0x100 { } else if -0x10000 <= i && i < -0x100 {
len = -2 length = -2
} else if -0x1000000 <= i && i < -0x100000 { } else if -0x1000000 <= i && i < -0x100000 {
len = -3 length = -3
} else if -0x40000000 <= i && i < -0x1000000 { } else if -0x40000000 <= i && i < -0x1000000 {
len = -4 length = -4
} else { } else {
return ErrInvalidIntRange return ErrInvalidIntRange
} }
if err := e.w.WriteByte(byte(len)); err != nil { if err := e.w.WriteByte(byte(length)); err != nil {
return err return err
} }
if len < 0 { if length < 0 {
len = -len length = -length
} }
for c := 0; c < len; c++ { for c := 0; c < length; c++ {
if err := e.w.WriteByte(byte(i >> uint(8*c) & 0xff)); err != nil { if err := e.w.WriteByte(byte(i >> uint(8*c) & 0xff)); err != nil {
return err return err
} }
@ -244,13 +244,13 @@ func (e *MarshalEncoder) marshalArray(arr reflect.Value) error {
return err return err
} }
len := arr.Len() length := arr.Len()
if err := e.marshalIntInternal(int64(len)); err != nil { if err := e.marshalIntInternal(int64(length)); err != nil {
return err return err
} }
for i := 0; i < len; i++ { for i := 0; i < length; i++ {
if err := e.marshal(arr.Index(i).Interface()); err != nil { if err := e.marshal(arr.Index(i).Interface()); err != nil {
return err return err
} }

View File

@ -339,7 +339,6 @@ func (pm *Manager) ProcessStacktraces(flat, noSystem bool) ([]*Process, int, int
} }
sort.Slice(processes, after(processes)) sort.Slice(processes, after(processes))
if !flat { if !flat {
var sortChildren func(process *Process) var sortChildren func(process *Process)
sortChildren = func(process *Process) { sortChildren = func(process *Process) {

View File

@ -63,6 +63,8 @@ func (q *WorkerPoolQueue[T]) doDispatchBatchToWorker(wg *workerGroup[T], flushCh
// TODO: the logic could be improved in the future, to avoid a data-race between "doStartNewWorker" and "workerNum" // TODO: the logic could be improved in the future, to avoid a data-race between "doStartNewWorker" and "workerNum"
// The root problem is that if we skip "doStartNewWorker" here, the "workerNum" might be decreased by other workers later // The root problem is that if we skip "doStartNewWorker" here, the "workerNum" might be decreased by other workers later
// So ideally, it should check whether there are enough workers by some approaches, and start new workers if necessary. // So ideally, it should check whether there are enough workers by some approaches, and start new workers if necessary.
// This data-race is not serious, as long as a new worker will be started soon to make sure there are enough workers,
// so no need to hugely refactor at the moment.
q.workerNumMu.Lock() q.workerNumMu.Lock()
noWorker := q.workerNum == 0 noWorker := q.workerNum == 0
if full || noWorker { if full || noWorker {
@ -136,6 +138,14 @@ func (q *WorkerPoolQueue[T]) basePushForShutdown(items ...T) bool {
return true return true
} }
func resetIdleTicker(t *time.Ticker, dur time.Duration) {
t.Reset(dur)
select {
case <-t.C:
default:
}
}
// doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items. // doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items.
func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) { func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
wp.wg.Add(1) wp.wg.Add(1)
@ -146,8 +156,6 @@ func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
log.Debug("Queue %q starts new worker", q.GetName()) log.Debug("Queue %q starts new worker", q.GetName())
defer log.Debug("Queue %q stops idle worker", q.GetName()) defer log.Debug("Queue %q stops idle worker", q.GetName())
atomic.AddInt32(&q.workerStartedCounter, 1) // Only increase counter, used for debugging
t := time.NewTicker(workerIdleDuration) t := time.NewTicker(workerIdleDuration)
defer t.Stop() defer t.Stop()
@ -169,11 +177,7 @@ func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
} }
q.doWorkerHandle(batch) q.doWorkerHandle(batch)
// reset the idle ticker, and drain the tick after reset in case a tick is already triggered // reset the idle ticker, and drain the tick after reset in case a tick is already triggered
t.Reset(workerIdleDuration) resetIdleTicker(t, workerIdleDuration) // key code for TestWorkerPoolQueueWorkerIdleReset
select {
case <-t.C:
default:
}
case <-t.C: case <-t.C:
q.workerNumMu.Lock() q.workerNumMu.Lock()
keepWorking = q.workerNum <= 1 // keep the last worker running keepWorking = q.workerNum <= 1 // keep the last worker running

View File

@ -40,8 +40,6 @@ type WorkerPoolQueue[T any] struct {
workerMaxNum int workerMaxNum int
workerActiveNum int workerActiveNum int
workerNumMu sync.Mutex workerNumMu sync.Mutex
workerStartedCounter int32
} }
type flushType chan struct{} type flushType chan struct{}

View File

@ -5,8 +5,10 @@ package queue
import ( import (
"context" "context"
"slices"
"strconv" "strconv"
"sync" "sync"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -250,23 +252,34 @@ func TestWorkerPoolQueueShutdown(t *testing.T) {
func TestWorkerPoolQueueWorkerIdleReset(t *testing.T) { func TestWorkerPoolQueueWorkerIdleReset(t *testing.T) {
defer test.MockVariableValue(&workerIdleDuration, 10*time.Millisecond)() defer test.MockVariableValue(&workerIdleDuration, 10*time.Millisecond)()
defer mockBackoffDuration(10 * time.Millisecond)() defer mockBackoffDuration(5 * time.Millisecond)()
var q *WorkerPoolQueue[int]
var handledCount atomic.Int32
var hasOnlyOneWorkerRunning atomic.Bool
handler := func(items ...int) (unhandled []int) { handler := func(items ...int) (unhandled []int) {
time.Sleep(50 * time.Millisecond) handledCount.Add(int32(len(items)))
// make each work have different duration, and check the active worker number periodically
var activeNums []int
for i := 0; i < 5-items[0]%2; i++ {
time.Sleep(workerIdleDuration * 2)
activeNums = append(activeNums, q.GetWorkerActiveNumber())
}
// When the queue never becomes empty, the existing workers should keep working
// It is not 100% true at the moment because the data-race in workergroup.go is not resolved, see that TODO */
// If the "active worker numbers" is like [2 2 ... 1 1], it means that an existing worker exited and the no new worker is started.
if slices.Equal([]int{1, 1}, activeNums[len(activeNums)-2:]) {
hasOnlyOneWorkerRunning.Store(true)
}
return nil return nil
} }
q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 2, Length: 100}, handler, false)
q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 2, Length: 100}, handler, false)
stop := runWorkerPoolQueue(q) stop := runWorkerPoolQueue(q)
for i := 0; i < 20; i++ { for i := 0; i < 100; i++ {
assert.NoError(t, q.Push(i)) assert.NoError(t, q.Push(i))
} }
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
assert.EqualValues(t, 2, q.GetWorkerNumber()) assert.Greater(t, int(handledCount.Load()), 4) // make sure there are enough items handled during the test
assert.EqualValues(t, 2, q.GetWorkerActiveNumber()) assert.False(t, hasOnlyOneWorkerRunning.Load(), "a slow handler should not block other workers from starting")
// when the queue never becomes empty, the existing workers should keep working
assert.EqualValues(t, 2, q.workerStartedCounter)
stop() stop()
} }

View File

@ -32,7 +32,6 @@ func CreateTemporaryPath(prefix string) (string, error) {
if err != nil { if err != nil {
log.Error("Unable to create temporary directory: %s-*.git (%v)", prefix, err) log.Error("Unable to create temporary directory: %s-*.git (%v)", prefix, err)
return "", fmt.Errorf("Failed to create dir %s-*.git: %w", prefix, err) return "", fmt.Errorf("Failed to create dir %s-*.git: %w", prefix, err)
} }
return basePath, nil return basePath, nil
} }

View File

@ -19,9 +19,8 @@ func loadTimeFrom(rootCfg ConfigProvider) {
DefaultUILocation, err = time.LoadLocation(zone) DefaultUILocation, err = time.LoadLocation(zone)
if err != nil { if err != nil {
log.Fatal("Load time zone failed: %v", err) log.Fatal("Load time zone failed: %v", err)
} else {
log.Info("Default UI Location is %v", zone)
} }
log.Info("Default UI Location is %v", zone)
} }
if DefaultUILocation == nil { if DefaultUILocation == nil {
DefaultUILocation = time.Local DefaultUILocation = time.Local

View File

@ -138,10 +138,9 @@ func wrapTmplErrMsg(msg string) {
if setting.IsProd { if setting.IsProd {
// in prod mode, Gitea must have correct templates to run // in prod mode, Gitea must have correct templates to run
log.Fatal("Gitea can't run with template errors: %s", msg) log.Fatal("Gitea can't run with template errors: %s", msg)
} else { }
// in dev mode, do not need to really exit, because the template errors could be fixed by developer soon and the templates get reloaded // in dev mode, do not need to really exit, because the template errors could be fixed by developer soon and the templates get reloaded
log.Error("There are template errors but Gitea continues to run in dev mode: %s", msg) log.Error("There are template errors but Gitea continues to run in dev mode: %s", msg)
}
} }
type templateErrorPrettier struct { type templateErrorPrettier struct {

View File

@ -84,9 +84,8 @@ func Mailer(ctx context.Context) (*texttmpl.Template, *template.Template) {
if err = buildSubjectBodyTemplate(subjectTemplates, bodyTemplates, tmplName, content); err != nil { if err = buildSubjectBodyTemplate(subjectTemplates, bodyTemplates, tmplName, content); err != nil {
if firstRun { if firstRun {
log.Fatal("Failed to parse mail template, err: %v", err) log.Fatal("Failed to parse mail template, err: %v", err)
} else {
log.Error("Failed to parse mail template, err: %v", err)
} }
log.Error("Failed to parse mail template, err: %v", err)
} }
} }
} }

View File

@ -121,9 +121,9 @@ func Test_NormalizeEOL(t *testing.T) {
} }
func Test_RandomInt(t *testing.T) { func Test_RandomInt(t *testing.T) {
int, err := CryptoRandomInt(255) randInt, err := CryptoRandomInt(255)
assert.True(t, int >= 0) assert.True(t, randInt >= 0)
assert.True(t, int <= 255) assert.True(t, randInt <= 255)
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -1186,7 +1186,6 @@ action.blocked_user=Nelze provést akci, protože jste zablokování vlastníkem
download_archive=Stáhnout repozitář download_archive=Stáhnout repozitář
more_operations=Další operace more_operations=Další operace
no_desc=Bez popisu
quick_guide=Krátká příručka quick_guide=Krátká příručka
clone_this_repo=Naklonovat tento repozitář clone_this_repo=Naklonovat tento repozitář
cite_this_repo=Citovat tento repozitář cite_this_repo=Citovat tento repozitář

View File

@ -1187,7 +1187,6 @@ action.blocked_user=Die Aktion kann nicht ausgeführt werden, da du vom Reposito
download_archive=Repository herunterladen download_archive=Repository herunterladen
more_operations=Weitere Operationen more_operations=Weitere Operationen
no_desc=Keine Beschreibung
quick_guide=Kurzanleitung quick_guide=Kurzanleitung
clone_this_repo=Dieses Repository klonen clone_this_repo=Dieses Repository klonen
cite_this_repo=Dieses Repository zitieren cite_this_repo=Dieses Repository zitieren

View File

@ -1118,7 +1118,6 @@ fork=Fork
download_archive=Λήψη Αποθετηρίου download_archive=Λήψη Αποθετηρίου
more_operations=Περισσότερες Λειτουργίες more_operations=Περισσότερες Λειτουργίες
no_desc=Χωρίς Περιγραφή
quick_guide=Γρήγορος Οδηγός quick_guide=Γρήγορος Οδηγός
clone_this_repo=Κλωνοποίηση αυτού του αποθετηρίου clone_this_repo=Κλωνοποίηση αυτού του αποθετηρίου
cite_this_repo=Αναφορά σε αυτό το αποθετήριο cite_this_repo=Αναφορά σε αυτό το αποθετήριο

View File

@ -1111,7 +1111,6 @@ fork=Fork
download_archive=Descargar repositorio download_archive=Descargar repositorio
more_operations=Más operaciones more_operations=Más operaciones
no_desc=Sin descripción
quick_guide=Guía rápida quick_guide=Guía rápida
clone_this_repo=Clonar este repositorio clone_this_repo=Clonar este repositorio
cite_this_repo=Citar este repositorio cite_this_repo=Citar este repositorio

View File

@ -874,7 +874,6 @@ star=ستاره دار کن
fork=انشعاب fork=انشعاب
download_archive=دانلود مخزن download_archive=دانلود مخزن
no_desc=بدون توضیح
quick_guide=راهنمای سریع quick_guide=راهنمای سریع
clone_this_repo=همسان‌سازی این مخزن clone_this_repo=همسان‌سازی این مخزن
create_new_repo_command=ایجاد یک مخزن جدید در خط فرمان create_new_repo_command=ایجاد یک مخزن جدید در خط فرمان

View File

@ -718,7 +718,6 @@ unstar=Poista tähti
star=Tähti star=Tähti
download_archive=Lataa repo download_archive=Lataa repo
no_desc=Ei kuvausta
quick_guide=Pikaopas quick_guide=Pikaopas
clone_this_repo=Kloonaa tämä repo clone_this_repo=Kloonaa tämä repo

View File

@ -1130,7 +1130,6 @@ fork=Bifurcation
download_archive=Télécharger ce dépôt download_archive=Télécharger ce dépôt
more_operations=Plus d'opérations more_operations=Plus d'opérations
no_desc=Aucune description
quick_guide=Introduction rapide quick_guide=Introduction rapide
clone_this_repo=Cloner ce dépôt clone_this_repo=Cloner ce dépôt
cite_this_repo=Citer ce dépôt cite_this_repo=Citer ce dépôt

View File

@ -656,7 +656,6 @@ star=Csillagozás
fork=Tükrözés fork=Tükrözés
download_archive=Tároló letöltése download_archive=Tároló letöltése
no_desc=Nincs leírás
quick_guide=Gyors útmutató quick_guide=Gyors útmutató
clone_this_repo=Tároló klónozása clone_this_repo=Tároló klónozása
create_new_repo_command=Egy új tároló létrehozása a parancssorból create_new_repo_command=Egy új tároló létrehozása a parancssorból

View File

@ -570,7 +570,6 @@ star=Bintang
fork=Garpu fork=Garpu
download_archive=Unduh Repositori download_archive=Unduh Repositori
no_desc=Tidak ada Deskripsi
quick_guide=Panduan Cepat quick_guide=Panduan Cepat
clone_this_repo=Klon repositori ini clone_this_repo=Klon repositori ini
create_new_repo_command=Membuat repositori baru pada baris perintah create_new_repo_command=Membuat repositori baru pada baris perintah

View File

@ -647,7 +647,6 @@ star=Bæta við eftirlæti
fork=Tvískipta fork=Tvískipta
download_archive=Hlaða Miður Geymslu download_archive=Hlaða Miður Geymslu
no_desc=Engin Lýsing
quick_guide=Stuttar Leiðbeiningar quick_guide=Stuttar Leiðbeiningar
clone_this_repo=Afrita þetta hugbúnaðarsafn clone_this_repo=Afrita þetta hugbúnaðarsafn
create_new_repo_command=Að búa til nýja geymslu með skipanalínu create_new_repo_command=Að búa til nýja geymslu með skipanalínu

View File

@ -936,7 +936,6 @@ star=Vota
fork=Forka fork=Forka
download_archive=Scarica Repository download_archive=Scarica Repository
no_desc=Nessuna descrizione
quick_guide=Guida rapida quick_guide=Guida rapida
clone_this_repo=Clona questo repository clone_this_repo=Clona questo repository
create_new_repo_command=Creazione di un nuovo repository da riga di comando create_new_repo_command=Creazione di un nuovo repository da riga di comando

View File

@ -1188,7 +1188,6 @@ action.blocked_user=リポジトリのオーナーがあなたをブロックし
download_archive=リポジトリをダウンロード download_archive=リポジトリをダウンロード
more_operations=その他の操作 more_operations=その他の操作
no_desc=説明なし
quick_guide=クイック ガイド quick_guide=クイック ガイド
clone_this_repo=このリポジトリのクローンを作成 clone_this_repo=このリポジトリのクローンを作成
cite_this_repo=このリポジトリを引用 cite_this_repo=このリポジトリを引用

View File

@ -606,7 +606,6 @@ star=좋아요
fork=포크 fork=포크
download_archive=저장소 다운로드 download_archive=저장소 다운로드
no_desc=설명 없음
quick_guide=퀵 가이드 quick_guide=퀵 가이드
clone_this_repo=이 저장소 복제 clone_this_repo=이 저장소 복제
create_new_repo_command=커맨드 라인에서 새 레포리지터리 생성 create_new_repo_command=커맨드 라인에서 새 레포리지터리 생성

View File

@ -1119,7 +1119,6 @@ fork=Atdalīts
download_archive=Lejupielādēt repozitoriju download_archive=Lejupielādēt repozitoriju
more_operations=Vairāk darbību more_operations=Vairāk darbību
no_desc=Nav apraksta
quick_guide=Īsa pamācība quick_guide=Īsa pamācība
clone_this_repo=Klonēt šo repozitoriju clone_this_repo=Klonēt šo repozitoriju
cite_this_repo=Citēt šo repozitoriju cite_this_repo=Citēt šo repozitoriju

View File

@ -934,7 +934,6 @@ star=Ster
fork=Vork fork=Vork
download_archive=Download repository download_archive=Download repository
no_desc=Geen omschrijving
quick_guide=Snelstart gids quick_guide=Snelstart gids
clone_this_repo=Kloon deze repository clone_this_repo=Kloon deze repository
create_new_repo_command=Maak een nieuwe repository aan vanaf de console create_new_repo_command=Maak een nieuwe repository aan vanaf de console

View File

@ -877,7 +877,6 @@ star=Polub
fork=Forkuj fork=Forkuj
download_archive=Pobierz repozytorium download_archive=Pobierz repozytorium
no_desc=Brak opisu
quick_guide=Skrócona instrukcja quick_guide=Skrócona instrukcja
clone_this_repo=Klonuj repozytorium clone_this_repo=Klonuj repozytorium
create_new_repo_command=Tworzenie nowego repozytorium z linii poleceń create_new_repo_command=Tworzenie nowego repozytorium z linii poleceń

View File

@ -1115,7 +1115,6 @@ fork=Fork
download_archive=Baixar repositório download_archive=Baixar repositório
more_operations=Mais Operações more_operations=Mais Operações
no_desc=Nenhuma descrição
quick_guide=Guia Rápido quick_guide=Guia Rápido
clone_this_repo=Clonar este repositório clone_this_repo=Clonar este repositório
cite_this_repo=Citar este repositório cite_this_repo=Citar este repositório

View File

@ -1193,7 +1193,6 @@ action.blocked_user=Não pode realizar a operação porque foi bloqueado/a pelo/
download_archive=Descarregar repositório download_archive=Descarregar repositório
more_operations=Mais operações more_operations=Mais operações
no_desc=Sem descrição
quick_guide=Guia rápido quick_guide=Guia rápido
clone_this_repo=Clonar este repositório clone_this_repo=Clonar este repositório
cite_this_repo=Citar este repositório cite_this_repo=Citar este repositório

View File

@ -1098,7 +1098,6 @@ fork=Форкнуть
download_archive=Скачать репозиторий download_archive=Скачать репозиторий
more_operations=Ещё действия more_operations=Ещё действия
no_desc=Нет описания
quick_guide=Краткое руководство quick_guide=Краткое руководство
clone_this_repo=Клонировать репозиторий clone_this_repo=Клонировать репозиторий
cite_this_repo=Сослаться на этот репозиторий cite_this_repo=Сослаться на этот репозиторий

View File

@ -846,7 +846,6 @@ star=ස්ටාර්
fork=දෙබලක fork=දෙබලක
download_archive=කෝෂ්ඨය බාගන්න download_archive=කෝෂ්ඨය බාගන්න
no_desc=සවිස්තරයක් නැත
quick_guide=ඉක්මන් මාර්ගෝපදේශය quick_guide=ඉක්මන් මාර්ගෝපදේශය
clone_this_repo=මෙම ගබඩාව පරිගණක ක්රිඩාවට සමාන clone_this_repo=මෙම ගබඩාව පරිගණක ක්රිඩාවට සමාන
create_new_repo_command=විධාන රේඛාවේ නව ගබඩාවක් නිර්මාණය කිරීම create_new_repo_command=විධාන රේඛාවේ නව ගබඩාවක් නිර්මාණය කිරීම

View File

@ -964,7 +964,6 @@ star=Hviezdička
download_archive=Stiahnuť repozitár download_archive=Stiahnuť repozitár
more_operations=Viac operácií more_operations=Viac operácií
no_desc=Bez popisu
quick_guide=Rýchly sprievodca quick_guide=Rýchly sprievodca
clone_this_repo=Klonovať tento repozitár clone_this_repo=Klonovať tento repozitár
create_new_repo_command=Vytvoriť nový repozitár v príkazovom riadku create_new_repo_command=Vytvoriť nový repozitár v príkazovom riadku

View File

@ -718,7 +718,6 @@ star=Stjärnmärk
fork=Förgrening fork=Förgrening
download_archive=Ladda Ned Utvecklingskatalogen download_archive=Ladda Ned Utvecklingskatalogen
no_desc=Ingen beskrivning
quick_guide=Snabbguide quick_guide=Snabbguide
clone_this_repo=Klona detta repo clone_this_repo=Klona detta repo
create_new_repo_command=Skapa en ny utvecklingskatalog på kommandoraden create_new_repo_command=Skapa en ny utvecklingskatalog på kommandoraden

View File

@ -1193,7 +1193,6 @@ action.blocked_user=İşlem gerçekleştirilemiyor, depo sahibi tarafından enge
download_archive=Depoyu İndir download_archive=Depoyu İndir
more_operations=Daha Fazla İşlem more_operations=Daha Fazla İşlem
no_desc=ıklama Yok
quick_guide=Hızlı Başlangıç Kılavuzu quick_guide=Hızlı Başlangıç Kılavuzu
clone_this_repo=Bu depoyu klonla clone_this_repo=Bu depoyu klonla
cite_this_repo=Bu depoya atıf ver cite_this_repo=Bu depoya atıf ver

View File

@ -882,7 +882,6 @@ star=В обрані
fork=Форк fork=Форк
download_archive=Скачати репозиторій download_archive=Скачати репозиторій
no_desc=Без опису
quick_guide=Короткий посібник quick_guide=Короткий посібник
clone_this_repo=Кнонувати цей репозиторій clone_this_repo=Кнонувати цей репозиторій
create_new_repo_command=Створити новий репозиторій з командного рядка create_new_repo_command=Створити новий репозиторій з командного рядка

View File

@ -1193,7 +1193,6 @@ action.blocked_user=无法执行操作,因为您已被仓库所有者屏蔽。
download_archive=下载此仓库 download_archive=下载此仓库
more_operations=更多操作 more_operations=更多操作
no_desc=暂无描述
quick_guide=快速帮助 quick_guide=快速帮助
clone_this_repo=克隆当前仓库 clone_this_repo=克隆当前仓库
cite_this_repo=引用此仓库 cite_this_repo=引用此仓库

View File

@ -344,7 +344,6 @@ unstar=取消收藏
star=收藏 star=收藏
fork=複製 fork=複製
no_desc=暫無描述
quick_guide=快速幫助 quick_guide=快速幫助
clone_this_repo=複製當前儲存庫 clone_this_repo=複製當前儲存庫
create_new_repo_command=從命令列建立新儲存庫。 create_new_repo_command=從命令列建立新儲存庫。

View File

@ -1016,7 +1016,6 @@ fork=Fork
download_archive=下載此儲存庫 download_archive=下載此儲存庫
more_operations=更多操作 more_operations=更多操作
no_desc=暫無描述
quick_guide=快速幫助 quick_guide=快速幫助
clone_this_repo=Clone 此儲存庫 clone_this_repo=Clone 此儲存庫
cite_this_repo=引用此儲存庫 cite_this_repo=引用此儲存庫

View File

@ -144,7 +144,6 @@ func ArtifactContexter() func(next http.Handler) http.Handler {
var task *actions.ActionTask var task *actions.ActionTask
if err == nil { if err == nil {
task, err = actions.GetTaskByID(req.Context(), tID) task, err = actions.GetTaskByID(req.Context(), tID)
if err != nil { if err != nil {
log.Error("Error runner api getting task by ID: %v", err) log.Error("Error runner api getting task by ID: %v", err)

View File

@ -96,12 +96,12 @@ func UploadPackageFile(ctx *context.Context) {
return return
} }
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -310,12 +310,12 @@ func uploadFile(ctx *context.Context, fileFilter container.Set[string], fileKey
return return
} }
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusBadRequest, err) apiError(ctx, http.StatusBadRequest, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -174,12 +174,12 @@ func EnumeratePackages(ctx *context.Context) {
} }
func UploadPackageFile(ctx *context.Context) { func UploadPackageFile(ctx *context.Context) {
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -385,9 +385,9 @@ func EndUploadBlob(ctx *context.Context) {
} }
return return
} }
close := true doClose := true
defer func() { defer func() {
if close { if doClose {
uploader.Close() uploader.Close()
} }
}() }()
@ -427,7 +427,7 @@ func EndUploadBlob(ctx *context.Context) {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
close = false doClose = false
if err := container_service.RemoveBlobUploadByID(ctx, uploader.ID); err != nil { if err := container_service.RemoveBlobUploadByID(ctx, uploader.ID); err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)

View File

@ -151,12 +151,12 @@ func UploadBinaryPackageFile(ctx *context.Context) {
} }
func uploadPackageFile(ctx *context.Context, compositeKey string, properties map[string]string) { func uploadPackageFile(ctx *context.Context, compositeKey string, properties map[string]string) {
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusBadRequest, err) apiError(ctx, http.StatusBadRequest, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -127,12 +127,12 @@ func UploadPackageFile(ctx *context.Context) {
return return
} }
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -90,12 +90,12 @@ func UploadPackage(ctx *context.Context) {
return return
} }
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -154,12 +154,12 @@ func resolvePackage(ctx *context.Context, ownerID int64, name, version string) (
} }
func UploadPackage(ctx *context.Context) { func UploadPackage(ctx *context.Context) {
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -594,13 +594,13 @@ func UploadSymbolPackage(ctx *context.Context) {
func processUploadedFile(ctx *context.Context, expectedType nuget_module.PackageType) (*nuget_module.Package, *packages_module.HashedBuffer, []io.Closer) { func processUploadedFile(ctx *context.Context, expectedType nuget_module.PackageType) (*nuget_module.Package, *packages_module.HashedBuffer, []io.Closer) {
closables := make([]io.Closer, 0, 2) closables := make([]io.Closer, 0, 2)
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusBadRequest, err) apiError(ctx, http.StatusBadRequest, err)
return nil, nil, closables return nil, nil, closables
} }
if close { if needToClose {
closables = append(closables, upload) closables = append(closables, upload)
} }

View File

@ -117,12 +117,12 @@ func GetRepositoryFile(ctx *context.Context) {
} }
func UploadPackageFile(ctx *context.Context) { func UploadPackageFile(ctx *context.Context) {
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusInternalServerError, err) apiError(ctx, http.StatusInternalServerError, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -197,12 +197,12 @@ func DownloadPackageFile(ctx *context.Context) {
// UploadPackageFile adds a file to the package. If the package does not exist, it gets created. // UploadPackageFile adds a file to the package. If the package does not exist, it gets created.
func UploadPackageFile(ctx *context.Context) { func UploadPackageFile(ctx *context.Context) {
upload, close, err := ctx.UploadStream() upload, needToClose, err := ctx.UploadStream()
if err != nil { if err != nil {
apiError(ctx, http.StatusBadRequest, err) apiError(ctx, http.StatusBadRequest, err)
return return
} }
if close { if needToClose {
defer upload.Close() defer upload.Close()
} }

View File

@ -16,7 +16,7 @@ import (
// CompareDiff compare two branches or commits // CompareDiff compare two branches or commits
func CompareDiff(ctx *context.APIContext) { func CompareDiff(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/compare/{basehead} Get commit comparison information // swagger:operation GET /repos/{owner}/{repo}/compare/{basehead} repository repoCompareDiff
// --- // ---
// summary: Get commit comparison information // summary: Get commit comparison information
// produces: // produces:

View File

@ -217,7 +217,6 @@ func SearchIssues(ctx *context.APIContext) {
var includedAnyLabels []int64 var includedAnyLabels []int64
{ {
labels := ctx.FormTrim("labels") labels := ctx.FormTrim("labels")
var includedLabelNames []string var includedLabelNames []string
if len(labels) > 0 { if len(labels) > 0 {

View File

@ -180,7 +180,6 @@ func ListPushMirrors(ctx *context.APIContext) {
if err == nil { if err == nil {
responsePushMirrors = append(responsePushMirrors, m) responsePushMirrors = append(responsePushMirrors, m)
} }
} }
ctx.SetLinkHeader(len(responsePushMirrors), utils.GetListOptions(ctx).PageSize) ctx.SetLinkHeader(len(responsePushMirrors), utils.GetListOptions(ctx).PageSize)
ctx.SetTotalCountHeader(count) ctx.SetTotalCountHeader(count)

View File

@ -1061,7 +1061,6 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
isSameRepo = true isSameRepo = true
headUser = ctx.Repo.Owner headUser = ctx.Repo.Owner
headBranch = headInfos[0] headBranch = headInfos[0]
} else if len(headInfos) == 2 { } else if len(headInfos) == 2 {
headUser, err = user_model.GetUserByName(ctx, headInfos[0]) headUser, err = user_model.GetUserByName(ctx, headInfos[0])
if err != nil { if err != nil {
@ -1075,7 +1074,6 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption)
headBranch = headInfos[1] headBranch = headInfos[1]
// The head repository can also point to the same repo // The head repository can also point to the same repo
isSameRepo = ctx.Repo.Owner.ID == headUser.ID isSameRepo = ctx.Repo.Owner.ID == headUser.ID
} else { } else {
ctx.NotFound() ctx.NotFound()
return nil, nil, nil, nil, "", "" return nil, nil, nil, nil, "", ""

View File

@ -728,7 +728,6 @@ func apiReviewRequest(ctx *context.APIContext, opts api.PullReviewRequestOptions
} }
if ctx.Repo.Repository.Owner.IsOrganization() && len(opts.TeamReviewers) > 0 { if ctx.Repo.Repository.Owner.IsOrganization() && len(opts.TeamReviewers) > 0 {
teamReviewers := make([]*organization.Team, 0, len(opts.TeamReviewers)) teamReviewers := make([]*organization.Team, 0, len(opts.TeamReviewers))
for _, t := range opts.TeamReviewers { for _, t := range opts.TeamReviewers {
var teamReviewer *organization.Team var teamReviewer *organization.Team

View File

@ -1084,7 +1084,6 @@ func updateMirror(ctx *context.APIContext, opts api.EditRepoOption) error {
// update MirrorInterval // update MirrorInterval
if opts.MirrorInterval != nil { if opts.MirrorInterval != nil {
// MirrorInterval should be a duration // MirrorInterval should be a duration
interval, err := time.ParseDuration(*opts.MirrorInterval) interval, err := time.ParseDuration(*opts.MirrorInterval)
if err != nil { if err != nil {

View File

@ -478,7 +478,6 @@ func findEntryForFile(commit *git.Commit, target string) (*git.TreeEntry, error)
func findWikiRepoCommit(ctx *context.APIContext) (*git.Repository, *git.Commit) { func findWikiRepoCommit(ctx *context.APIContext) (*git.Repository, *git.Commit) {
wikiRepo, err := gitrepo.OpenWikiRepository(ctx, ctx.Repo.Repository) wikiRepo, err := gitrepo.OpenWikiRepository(ctx, ctx.Repo.Repository)
if err != nil { if err != nil {
if git.IsErrNotExist(err) || err.Error() == "no such file or directory" { if git.IsErrNotExist(err) || err.Error() == "no such file or directory" {
ctx.NotFound(err) ctx.NotFound(err)
} else { } else {

View File

@ -198,7 +198,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
UserMsg: fmt.Sprintf("branch %s is protected from force push", branchName), UserMsg: fmt.Sprintf("branch %s is protected from force push", branchName),
}) })
return return
} }
} }

View File

@ -644,7 +644,6 @@ func ArtifactsDownloadView(ctx *context_module.Context) {
writer := zip.NewWriter(ctx.Resp) writer := zip.NewWriter(ctx.Resp)
defer writer.Close() defer writer.Close()
for _, art := range artifacts { for _, art := range artifacts {
f, err := storage.ActionsArtifacts.Open(art.StoragePath) f, err := storage.ActionsArtifacts.Open(art.StoragePath)
if err != nil { if err != nil {
ctx.Error(http.StatusInternalServerError, err.Error()) ctx.Error(http.StatusInternalServerError, err.Error())

View File

@ -933,7 +933,6 @@ func setTemplateIfExists(ctx *context.Context, ctxDataKey string, possibleFiles
} }
} }
} }
} }
if template.Ref != "" && !strings.HasPrefix(template.Ref, "refs/") { // Assume that the ref intended is always a branch - for tags users should use refs/tags/<ref> if template.Ref != "" && !strings.HasPrefix(template.Ref, "refs/") { // Assume that the ref intended is always a branch - for tags users should use refs/tags/<ref>
@ -1681,7 +1680,6 @@ func ViewIssue(ctx *context.Context) {
if comment.ProjectID > 0 && comment.Project == nil { if comment.ProjectID > 0 && comment.Project == nil {
comment.Project = ghostProject comment.Project = ghostProject
} }
} else if comment.Type == issues_model.CommentTypeAssignees || comment.Type == issues_model.CommentTypeReviewRequest { } else if comment.Type == issues_model.CommentTypeAssignees || comment.Type == issues_model.CommentTypeReviewRequest {
if err = comment.LoadAssigneeUserAndTeam(ctx); err != nil { if err = comment.LoadAssigneeUserAndTeam(ctx); err != nil {
ctx.ServerError("LoadAssigneeUserAndTeam", err) ctx.ServerError("LoadAssigneeUserAndTeam", err)
@ -2621,7 +2619,6 @@ func SearchIssues(ctx *context.Context) {
var includedAnyLabels []int64 var includedAnyLabels []int64
{ {
labels := ctx.FormTrim("labels") labels := ctx.FormTrim("labels")
var includedLabelNames []string var includedLabelNames []string
if len(labels) > 0 { if len(labels) > 0 {
@ -3005,7 +3002,6 @@ func NewComment(ctx *context.Context) {
if (ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) || (ctx.IsSigned && issue.IsPoster(ctx.Doer.ID))) && if (ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) || (ctx.IsSigned && issue.IsPoster(ctx.Doer.ID))) &&
(form.Status == "reopen" || form.Status == "close") && (form.Status == "reopen" || form.Status == "close") &&
!(issue.IsPull && issue.PullRequest.HasMerged) { !(issue.IsPull && issue.PullRequest.HasMerged) {
// Duplication and conflict check should apply to reopen pull request. // Duplication and conflict check should apply to reopen pull request.
var pr *issues_model.PullRequest var pr *issues_model.PullRequest

View File

@ -443,7 +443,6 @@ func PrepareViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git.C
} }
if pb != nil && pb.EnableStatusCheck { if pb != nil && pb.EnableStatusCheck {
var missingRequiredChecks []string var missingRequiredChecks []string
for _, requiredContext := range pb.StatusCheckContexts { for _, requiredContext := range pb.StatusCheckContexts {
contextFound := false contextFound := false
@ -646,7 +645,6 @@ func viewPullFiles(ctx *context.Context, specifiedStartCommit, specifiedEndCommi
// Validate the given commit sha to show (if any passed) // Validate the given commit sha to show (if any passed)
if willShowSpecifiedCommit || willShowSpecifiedCommitRange { if willShowSpecifiedCommit || willShowSpecifiedCommitRange {
foundStartCommit := len(specifiedStartCommit) == 0 foundStartCommit := len(specifiedStartCommit) == 0
foundEndCommit := len(specifiedEndCommit) == 0 foundEndCommit := len(specifiedEndCommit) == 0
@ -974,7 +972,6 @@ func UpdatePullRequest(ctx *context.Context) {
ctx.Flash.Error(flashError) ctx.Flash.Error(flashError)
ctx.Redirect(issue.Link()) ctx.Redirect(issue.Link())
return return
} }
ctx.Flash.Error(err.Error()) ctx.Flash.Error(err.Error())
ctx.Redirect(issue.Link()) ctx.Redirect(issue.Link())

View File

@ -318,7 +318,6 @@ func UpdateViewedFiles(ctx *context.Context) {
updatedFiles := make(map[string]pull_model.ViewedState, len(data.Files)) updatedFiles := make(map[string]pull_model.ViewedState, len(data.Files))
for file, viewed := range data.Files { for file, viewed := range data.Files {
// Only unviewed and viewed are possible, has-changed can not be set from the outside // Only unviewed and viewed are possible, has-changed can not be set from the outside
state := pull_model.Unviewed state := pull_model.Unviewed
if viewed { if viewed {

View File

@ -347,7 +347,6 @@ func loadLatestCommitData(ctx *context.Context, latestCommit *git.Commit) bool {
// or of directory if not in root directory. // or of directory if not in root directory.
ctx.Data["LatestCommit"] = latestCommit ctx.Data["LatestCommit"] = latestCommit
if latestCommit != nil { if latestCommit != nil {
verification := asymkey_model.ParseCommitWithSignature(ctx, latestCommit) verification := asymkey_model.ParseCommitWithSignature(ctx, latestCommit)
if err := asymkey_model.CalculateTrustStatus(verification, ctx.Repo.Repository.GetTrustModel(), func(user *user_model.User) (bool, error) { if err := asymkey_model.CalculateTrustStatus(verification, ctx.Repo.Repository.GetTrustModel(), func(user *user_model.User) (bool, error) {

View File

@ -298,13 +298,15 @@ func handleWorkflows(
TriggerEvent: dwf.TriggerEvent.Name, TriggerEvent: dwf.TriggerEvent.Name,
Status: actions_model.StatusWaiting, Status: actions_model.StatusWaiting,
} }
if need, err := ifNeedApproval(ctx, run, input.Repo, input.Doer); err != nil {
need, err := ifNeedApproval(ctx, run, input.Repo, input.Doer)
if err != nil {
log.Error("check if need approval for repo %d with user %d: %v", input.Repo.ID, input.Doer.ID, err) log.Error("check if need approval for repo %d with user %d: %v", input.Repo.ID, input.Doer.ID, err)
continue continue
} else {
run.NeedApproval = need
} }
run.NeedApproval = need
if err := run.LoadAttributes(ctx); err != nil { if err := run.LoadAttributes(ctx); err != nil {
log.Error("LoadAttributes: %v", err) log.Error("LoadAttributes: %v", err)
continue continue

View File

@ -156,7 +156,6 @@ func (source *Source) Sync(ctx context.Context, updateExisting bool) error {
!strings.EqualFold(usr.Email, su.Mail) || !strings.EqualFold(usr.Email, su.Mail) ||
usr.FullName != fullName || usr.FullName != fullName ||
!usr.IsActive { !usr.IsActive {
log.Trace("SyncExternalUsers[%s]: Updating user %s", source.authSource.Name, usr.Name) log.Trace("SyncExternalUsers[%s]: Updating user %s", source.authSource.Name, usr.Name)
opts := &user_service.UpdateOptions{ opts := &user_service.UpdateOptions{

View File

@ -825,7 +825,6 @@ func getRefName(ctx *Base, repo *Repository, pathType RepoRefType) string {
case RepoRefBranch: case RepoRefBranch:
ref := getRefNameFromPath(ctx, repo, path, repo.GitRepo.IsBranchExist) ref := getRefNameFromPath(ctx, repo, path, repo.GitRepo.IsBranchExist)
if len(ref) == 0 { if len(ref) == 0 {
// check if ref is HEAD // check if ref is HEAD
parts := strings.Split(path, "/") parts := strings.Split(path, "/")
if parts[0] == headRefName { if parts[0] == headRefName {
@ -968,7 +967,6 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
return cancel return cancel
} }
ctx.Repo.CommitID = ctx.Repo.Commit.ID.String() ctx.Repo.CommitID = ctx.Repo.Commit.ID.String()
} else if refType.RefTypeIncludesTags() && ctx.Repo.GitRepo.IsTagExist(refName) { } else if refType.RefTypeIncludesTags() && ctx.Repo.GitRepo.IsTagExist(refName) {
ctx.Repo.IsViewTag = true ctx.Repo.IsViewTag = true
ctx.Repo.TagName = refName ctx.Repo.TagName = refName

View File

@ -152,6 +152,12 @@ func prepareDBConsistencyChecks() []consistencyCheck {
Fixer: actions_model.FixRunnersWithoutBelongingOwner, Fixer: actions_model.FixRunnersWithoutBelongingOwner,
FixedMessage: "Removed", FixedMessage: "Removed",
}, },
{
Name: "Action Runners without existing repository",
Counter: actions_model.CountRunnersWithoutBelongingRepo,
Fixer: actions_model.FixRunnersWithoutBelongingRepo,
FixedMessage: "Removed",
},
{ {
Name: "Topics with empty repository count", Name: "Topics with empty repository count",
Counter: repo_model.CountOrphanedTopics, Counter: repo_model.CountOrphanedTopics,

View File

@ -1044,10 +1044,10 @@ func createDiffFile(diff *Diff, line string) *DiffFile {
// diff --git a/b b/b b/b b/b b/b b/b // diff --git a/b b/b b/b b/b b/b b/b
// //
midpoint := (len(line) + len(cmdDiffHead) - 1) / 2 midpoint := (len(line) + len(cmdDiffHead) - 1) / 2
new, old := line[len(cmdDiffHead):midpoint], line[midpoint+1:] newPart, oldPart := line[len(cmdDiffHead):midpoint], line[midpoint+1:]
if len(new) > 2 && len(old) > 2 && new[2:] == old[2:] { if len(newPart) > 2 && len(oldPart) > 2 && newPart[2:] == oldPart[2:] {
curFile.OldName = old[2:] curFile.OldName = oldPart[2:]
curFile.Name = old[2:] curFile.Name = oldPart[2:]
} }
} }
} }
@ -1181,7 +1181,6 @@ func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, fi
defer deferable() defer deferable()
for _, diffFile := range diff.Files { for _, diffFile := range diff.Files {
isVendored := optional.None[bool]() isVendored := optional.None[bool]()
isGenerated := optional.None[bool]() isGenerated := optional.None[bool]()
if checker != nil { if checker != nil {

Some files were not shown because too many files have changed in this diff Show More