2015-12-04 17:16:42 -05:00
|
|
|
// Copyright 2015 The Gogs Authors. All rights reserved.
|
2020-09-17 17:33:23 -04:00
|
|
|
// Copyright 2018 The Gitea Authors. All rights reserved.
|
2022-11-27 13:20:29 -05:00
|
|
|
// SPDX-License-Identifier: MIT
|
2015-12-04 17:16:42 -05:00
|
|
|
|
2015-12-17 02:28:47 -05:00
|
|
|
package convert
|
2015-12-04 17:16:42 -05:00
|
|
|
|
|
|
|
import (
|
2022-12-02 21:48:26 -05:00
|
|
|
"context"
|
2015-12-04 17:16:42 -05:00
|
|
|
"fmt"
|
2020-12-03 09:05:48 -05:00
|
|
|
"strconv"
|
2021-06-17 12:04:10 -04:00
|
|
|
"strings"
|
2020-12-03 09:05:48 -05:00
|
|
|
"time"
|
2015-12-04 17:16:42 -05:00
|
|
|
|
Add API endpoints for getting action jobs status (#26673)
Sample of response, it is similar to Github actions
ref
https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#list-workflow-runs-for-a-repository
``` json
{
"workflow_runs": [
{
"id": 3,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "6d8d29a9f7a01ded8f8aeb64341cb31ee1ab5f19",
"run_number": 3,
"event": "push",
"display_title": "More job",
"status": "success",
"workflow_id": "demo2.yaml",
"url": "/chester/test/actions/runs/3",
"created_at": "2023-08-22T13:41:33-04:00",
"updated_at": "2023-08-22T13:41:37-04:00",
"run_started_at": "2023-08-22T13:41:33-04:00"
},
{
"id": 2,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "6d8d29a9f7a01ded8f8aeb64341cb31ee1ab5f19",
"run_number": 2,
"event": "push",
"display_title": "More job",
"status": "success",
"workflow_id": "demo.yaml",
"url": "/chester/test/actions/runs/2",
"created_at": "2023-08-22T13:41:30-04:00",
"updated_at": "2023-08-22T13:41:33-04:00",
"run_started_at": "2023-08-22T13:41:30-04:00"
},
{
"id": 1,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "e5369ab054cae79899ba36e45ee82811a6e0acd5",
"run_number": 1,
"event": "push",
"display_title": "Add job",
"status": "failure",
"workflow_id": "demo.yaml",
"url": "/chester/test/actions/runs/1",
"created_at": "2023-08-22T13:15:21-04:00",
"updated_at": "2023-08-22T13:18:10-04:00",
"run_started_at": "2023-08-22T13:15:21-04:00"
}
],
"total_count": 3
}
```
---------
Co-authored-by: yp05327 <576951401@qq.com>
Co-authored-by: puni9869 <80308335+puni9869@users.noreply.github.com>
2024-04-30 21:40:23 -04:00
|
|
|
actions_model "code.gitea.io/gitea/models/actions"
|
2021-12-10 03:14:24 -05:00
|
|
|
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
2022-01-02 08:12:35 -05:00
|
|
|
"code.gitea.io/gitea/models/auth"
|
2022-06-12 11:51:54 -04:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-06-13 05:37:59 -04:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2022-03-29 02:29:02 -04:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
2021-11-28 06:58:28 -05:00
|
|
|
"code.gitea.io/gitea/models/perm"
|
2022-05-11 06:09:36 -04:00
|
|
|
access_model "code.gitea.io/gitea/models/perm/access"
|
2021-12-09 20:27:50 -05:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-09 14:57:58 -05:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
2021-11-11 02:03:30 -05:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2024-04-17 09:24:07 -04:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2019-03-27 05:33:00 -04:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
2018-02-20 07:50:42 -05:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
Add API endpoints for getting action jobs status (#26673)
Sample of response, it is similar to Github actions
ref
https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#list-workflow-runs-for-a-repository
``` json
{
"workflow_runs": [
{
"id": 3,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "6d8d29a9f7a01ded8f8aeb64341cb31ee1ab5f19",
"run_number": 3,
"event": "push",
"display_title": "More job",
"status": "success",
"workflow_id": "demo2.yaml",
"url": "/chester/test/actions/runs/3",
"created_at": "2023-08-22T13:41:33-04:00",
"updated_at": "2023-08-22T13:41:37-04:00",
"run_started_at": "2023-08-22T13:41:33-04:00"
},
{
"id": 2,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "6d8d29a9f7a01ded8f8aeb64341cb31ee1ab5f19",
"run_number": 2,
"event": "push",
"display_title": "More job",
"status": "success",
"workflow_id": "demo.yaml",
"url": "/chester/test/actions/runs/2",
"created_at": "2023-08-22T13:41:30-04:00",
"updated_at": "2023-08-22T13:41:33-04:00",
"run_started_at": "2023-08-22T13:41:30-04:00"
},
{
"id": 1,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "e5369ab054cae79899ba36e45ee82811a6e0acd5",
"run_number": 1,
"event": "push",
"display_title": "Add job",
"status": "failure",
"workflow_id": "demo.yaml",
"url": "/chester/test/actions/runs/1",
"created_at": "2023-08-22T13:15:21-04:00",
"updated_at": "2023-08-22T13:18:10-04:00",
"run_started_at": "2023-08-22T13:15:21-04:00"
}
],
"total_count": 3
}
```
---------
Co-authored-by: yp05327 <576951401@qq.com>
Co-authored-by: puni9869 <80308335+puni9869@users.noreply.github.com>
2024-04-30 21:40:23 -04:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2019-05-11 06:21:34 -04:00
|
|
|
api "code.gitea.io/gitea/modules/structs"
|
2018-02-20 07:50:42 -05:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2022-09-28 22:27:20 -04:00
|
|
|
"code.gitea.io/gitea/services/gitdiff"
|
2015-12-04 17:16:42 -05:00
|
|
|
)
|
|
|
|
|
2016-11-24 02:04:31 -05:00
|
|
|
// ToEmail convert models.EmailAddress to api.Email
|
2021-11-11 02:03:30 -05:00
|
|
|
func ToEmail(email *user_model.EmailAddress) *api.Email {
|
2015-12-15 22:57:18 -05:00
|
|
|
return &api.Email{
|
|
|
|
Email: email.Email,
|
|
|
|
Verified: email.IsActivated,
|
|
|
|
Primary: email.IsPrimary,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-14 03:54:40 -04:00
|
|
|
// ToEmail convert models.EmailAddress to api.Email
|
|
|
|
func ToEmailSearch(email *user_model.SearchEmailResult) *api.Email {
|
|
|
|
return &api.Email{
|
|
|
|
Email: email.Email,
|
|
|
|
Verified: email.IsActivated,
|
|
|
|
Primary: email.IsPrimary,
|
|
|
|
UserID: email.UID,
|
|
|
|
UserName: email.Name,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-08 10:31:11 -04:00
|
|
|
// ToBranch convert a git.Commit and git.Branch to an api.Branch
|
2023-06-29 06:03:20 -04:00
|
|
|
func ToBranch(ctx context.Context, repo *repo_model.Repository, branchName string, c *git.Commit, bp *git_model.ProtectedBranch, user *user_model.User, isRepoAdmin bool) (*api.Branch, error) {
|
2019-11-16 14:39:18 -05:00
|
|
|
if bp == nil {
|
2020-03-20 23:41:33 -04:00
|
|
|
var hasPerm bool
|
2022-04-28 11:45:33 -04:00
|
|
|
var canPush bool
|
2020-03-20 23:41:33 -04:00
|
|
|
var err error
|
|
|
|
if user != nil {
|
2023-07-22 10:14:27 -04:00
|
|
|
hasPerm, err = access_model.HasAccessUnit(ctx, user, repo, unit.TypeCode, perm.AccessModeWrite)
|
2020-03-20 23:41:33 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-04-28 11:45:33 -04:00
|
|
|
|
2023-07-22 10:14:27 -04:00
|
|
|
perms, err := access_model.GetUserRepoPermission(ctx, repo, user)
|
2022-04-28 11:45:33 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-07-22 10:14:27 -04:00
|
|
|
canPush = issues_model.CanMaintainerWriteToBranch(ctx, perms, branchName, user)
|
2019-11-16 14:39:18 -05:00
|
|
|
}
|
2020-03-20 23:41:33 -04:00
|
|
|
|
|
|
|
return &api.Branch{
|
2023-06-29 06:03:20 -04:00
|
|
|
Name: branchName,
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
Commit: ToPayloadCommit(ctx, repo, c),
|
2020-03-20 23:41:33 -04:00
|
|
|
Protected: false,
|
|
|
|
RequiredApprovals: 0,
|
|
|
|
EnableStatusCheck: false,
|
|
|
|
StatusCheckContexts: []string{},
|
2022-04-28 11:45:33 -04:00
|
|
|
UserCanPush: canPush,
|
2020-03-20 23:41:33 -04:00
|
|
|
UserCanMerge: hasPerm,
|
|
|
|
}, nil
|
2020-02-12 18:19:35 -05:00
|
|
|
}
|
|
|
|
|
2020-03-19 11:39:08 -04:00
|
|
|
branch := &api.Branch{
|
2023-06-29 06:03:20 -04:00
|
|
|
Name: branchName,
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
Commit: ToPayloadCommit(ctx, repo, c),
|
2020-03-20 23:41:33 -04:00
|
|
|
Protected: true,
|
|
|
|
RequiredApprovals: bp.RequiredApprovals,
|
|
|
|
EnableStatusCheck: bp.EnableStatusCheck,
|
|
|
|
StatusCheckContexts: bp.StatusCheckContexts,
|
|
|
|
}
|
|
|
|
|
|
|
|
if isRepoAdmin {
|
2023-01-16 03:00:22 -05:00
|
|
|
branch.EffectiveBranchProtectionName = bp.RuleName
|
2020-02-12 18:19:35 -05:00
|
|
|
}
|
2020-03-19 11:39:08 -04:00
|
|
|
|
|
|
|
if user != nil {
|
2023-07-22 10:14:27 -04:00
|
|
|
permission, err := access_model.GetUserRepoPermission(ctx, repo, user)
|
2020-08-20 03:48:40 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-01-16 03:00:22 -05:00
|
|
|
bp.Repo = repo
|
2023-07-22 10:14:27 -04:00
|
|
|
branch.UserCanPush = bp.CanUserPush(ctx, user)
|
|
|
|
branch.UserCanMerge = git_model.IsUserMergeWhitelisted(ctx, bp, user.ID, permission)
|
2020-03-19 11:39:08 -04:00
|
|
|
}
|
2020-03-20 23:41:33 -04:00
|
|
|
|
|
|
|
return branch, nil
|
2020-02-12 18:19:35 -05:00
|
|
|
}
|
|
|
|
|
2024-04-17 09:24:07 -04:00
|
|
|
// getWhitelistEntities returns the names of the entities that are in the whitelist
|
|
|
|
func getWhitelistEntities[T *user_model.User | *organization.Team](entities []T, whitelistIDs []int64) []string {
|
|
|
|
whitelistUserIDsSet := container.SetOf(whitelistIDs...)
|
|
|
|
whitelistNames := make([]string, 0)
|
|
|
|
for _, entity := range entities {
|
|
|
|
switch v := any(entity).(type) {
|
|
|
|
case *user_model.User:
|
|
|
|
if whitelistUserIDsSet.Contains(v.ID) {
|
|
|
|
whitelistNames = append(whitelistNames, v.Name)
|
|
|
|
}
|
|
|
|
case *organization.Team:
|
|
|
|
if whitelistUserIDsSet.Contains(v.ID) {
|
|
|
|
whitelistNames = append(whitelistNames, v.Name)
|
|
|
|
}
|
|
|
|
}
|
2020-02-12 18:19:35 -05:00
|
|
|
}
|
2024-04-17 09:24:07 -04:00
|
|
|
|
|
|
|
return whitelistNames
|
|
|
|
}
|
|
|
|
|
|
|
|
// ToBranchProtection convert a ProtectedBranch to api.BranchProtection
|
|
|
|
func ToBranchProtection(ctx context.Context, bp *git_model.ProtectedBranch, repo *repo_model.Repository) *api.BranchProtection {
|
|
|
|
readers, err := access_model.GetRepoReaders(ctx, repo)
|
2020-02-12 18:19:35 -05:00
|
|
|
if err != nil {
|
2024-04-17 09:24:07 -04:00
|
|
|
log.Error("GetRepoReaders: %v", err)
|
2020-02-12 18:19:35 -05:00
|
|
|
}
|
2024-04-17 09:24:07 -04:00
|
|
|
|
|
|
|
pushWhitelistUsernames := getWhitelistEntities(readers, bp.WhitelistUserIDs)
|
2024-07-05 14:21:56 -04:00
|
|
|
forcePushAllowlistUsernames := getWhitelistEntities(readers, bp.ForcePushAllowlistUserIDs)
|
2024-04-17 09:24:07 -04:00
|
|
|
mergeWhitelistUsernames := getWhitelistEntities(readers, bp.MergeWhitelistUserIDs)
|
|
|
|
approvalsWhitelistUsernames := getWhitelistEntities(readers, bp.ApprovalsWhitelistUserIDs)
|
|
|
|
|
|
|
|
teamReaders, err := organization.OrgFromUser(repo.Owner).TeamsWithAccessToRepo(ctx, repo.ID, perm.AccessModeRead)
|
2020-02-12 18:19:35 -05:00
|
|
|
if err != nil {
|
2024-04-17 09:24:07 -04:00
|
|
|
log.Error("Repo.Owner.TeamsWithAccessToRepo: %v", err)
|
2020-02-12 18:19:35 -05:00
|
|
|
}
|
|
|
|
|
2024-04-17 09:24:07 -04:00
|
|
|
pushWhitelistTeams := getWhitelistEntities(teamReaders, bp.WhitelistTeamIDs)
|
2024-07-05 14:21:56 -04:00
|
|
|
forcePushAllowlistTeams := getWhitelistEntities(teamReaders, bp.ForcePushAllowlistTeamIDs)
|
2024-04-17 09:24:07 -04:00
|
|
|
mergeWhitelistTeams := getWhitelistEntities(teamReaders, bp.MergeWhitelistTeamIDs)
|
|
|
|
approvalsWhitelistTeams := getWhitelistEntities(teamReaders, bp.ApprovalsWhitelistTeamIDs)
|
|
|
|
|
2023-01-16 03:00:22 -05:00
|
|
|
branchName := ""
|
|
|
|
if !git_model.IsRuleNameSpecial(bp.RuleName) {
|
|
|
|
branchName = bp.RuleName
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:19:35 -05:00
|
|
|
return &api.BranchProtection{
|
2023-01-16 03:00:22 -05:00
|
|
|
BranchName: branchName,
|
|
|
|
RuleName: bp.RuleName,
|
2020-11-28 14:30:46 -05:00
|
|
|
EnablePush: bp.CanPush,
|
|
|
|
EnablePushWhitelist: bp.EnableWhitelist,
|
|
|
|
PushWhitelistUsernames: pushWhitelistUsernames,
|
|
|
|
PushWhitelistTeams: pushWhitelistTeams,
|
|
|
|
PushWhitelistDeployKeys: bp.WhitelistDeployKeys,
|
2024-07-05 14:21:56 -04:00
|
|
|
EnableForcePush: bp.CanForcePush,
|
|
|
|
EnableForcePushAllowlist: bp.EnableForcePushAllowlist,
|
|
|
|
ForcePushAllowlistUsernames: forcePushAllowlistUsernames,
|
|
|
|
ForcePushAllowlistTeams: forcePushAllowlistTeams,
|
|
|
|
ForcePushAllowlistDeployKeys: bp.ForcePushAllowlistDeployKeys,
|
2020-11-28 14:30:46 -05:00
|
|
|
EnableMergeWhitelist: bp.EnableMergeWhitelist,
|
|
|
|
MergeWhitelistUsernames: mergeWhitelistUsernames,
|
|
|
|
MergeWhitelistTeams: mergeWhitelistTeams,
|
|
|
|
EnableStatusCheck: bp.EnableStatusCheck,
|
|
|
|
StatusCheckContexts: bp.StatusCheckContexts,
|
|
|
|
RequiredApprovals: bp.RequiredApprovals,
|
|
|
|
EnableApprovalsWhitelist: bp.EnableApprovalsWhitelist,
|
|
|
|
ApprovalsWhitelistUsernames: approvalsWhitelistUsernames,
|
|
|
|
ApprovalsWhitelistTeams: approvalsWhitelistTeams,
|
|
|
|
BlockOnRejectedReviews: bp.BlockOnRejectedReviews,
|
|
|
|
BlockOnOfficialReviewRequests: bp.BlockOnOfficialReviewRequests,
|
|
|
|
BlockOnOutdatedBranch: bp.BlockOnOutdatedBranch,
|
|
|
|
DismissStaleApprovals: bp.DismissStaleApprovals,
|
2024-01-15 02:20:01 -05:00
|
|
|
IgnoreStaleApprovals: bp.IgnoreStaleApprovals,
|
2020-11-28 14:30:46 -05:00
|
|
|
RequireSignedCommits: bp.RequireSignedCommits,
|
|
|
|
ProtectedFilePatterns: bp.ProtectedFilePatterns,
|
2021-09-11 10:21:17 -04:00
|
|
|
UnprotectedFilePatterns: bp.UnprotectedFilePatterns,
|
2020-11-28 14:30:46 -05:00
|
|
|
Created: bp.CreatedUnix.AsTime(),
|
|
|
|
Updated: bp.UpdatedUnix.AsTime(),
|
2016-03-13 23:20:22 -04:00
|
|
|
}
|
2016-01-28 14:49:05 -05:00
|
|
|
}
|
|
|
|
|
2019-06-08 10:31:11 -04:00
|
|
|
// ToTag convert a git.Tag to an api.Tag
|
2021-12-09 20:27:50 -05:00
|
|
|
func ToTag(repo *repo_model.Repository, t *git.Tag) *api.Tag {
|
2019-02-07 07:00:52 -05:00
|
|
|
return &api.Tag{
|
2019-06-08 10:31:11 -04:00
|
|
|
Name: t.Name,
|
2021-06-17 12:04:10 -04:00
|
|
|
Message: strings.TrimSpace(t.Message),
|
2019-06-08 10:31:11 -04:00
|
|
|
ID: t.ID.String(),
|
|
|
|
Commit: ToCommitMeta(repo, t),
|
|
|
|
ZipballURL: util.URLJoin(repo.HTMLURL(), "archive", t.Name+".zip"),
|
|
|
|
TarballURL: util.URLJoin(repo.HTMLURL(), "archive", t.Name+".tar.gz"),
|
2019-02-07 07:00:52 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add API endpoints for getting action jobs status (#26673)
Sample of response, it is similar to Github actions
ref
https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#list-workflow-runs-for-a-repository
``` json
{
"workflow_runs": [
{
"id": 3,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "6d8d29a9f7a01ded8f8aeb64341cb31ee1ab5f19",
"run_number": 3,
"event": "push",
"display_title": "More job",
"status": "success",
"workflow_id": "demo2.yaml",
"url": "/chester/test/actions/runs/3",
"created_at": "2023-08-22T13:41:33-04:00",
"updated_at": "2023-08-22T13:41:37-04:00",
"run_started_at": "2023-08-22T13:41:33-04:00"
},
{
"id": 2,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "6d8d29a9f7a01ded8f8aeb64341cb31ee1ab5f19",
"run_number": 2,
"event": "push",
"display_title": "More job",
"status": "success",
"workflow_id": "demo.yaml",
"url": "/chester/test/actions/runs/2",
"created_at": "2023-08-22T13:41:30-04:00",
"updated_at": "2023-08-22T13:41:33-04:00",
"run_started_at": "2023-08-22T13:41:30-04:00"
},
{
"id": 1,
"name": "Explore-Gitea-Actions",
"head_branch": "main",
"head_sha": "e5369ab054cae79899ba36e45ee82811a6e0acd5",
"run_number": 1,
"event": "push",
"display_title": "Add job",
"status": "failure",
"workflow_id": "demo.yaml",
"url": "/chester/test/actions/runs/1",
"created_at": "2023-08-22T13:15:21-04:00",
"updated_at": "2023-08-22T13:18:10-04:00",
"run_started_at": "2023-08-22T13:15:21-04:00"
}
],
"total_count": 3
}
```
---------
Co-authored-by: yp05327 <576951401@qq.com>
Co-authored-by: puni9869 <80308335+puni9869@users.noreply.github.com>
2024-04-30 21:40:23 -04:00
|
|
|
// ToActionTask convert a actions_model.ActionTask to an api.ActionTask
|
|
|
|
func ToActionTask(ctx context.Context, t *actions_model.ActionTask) (*api.ActionTask, error) {
|
|
|
|
if err := t.LoadAttributes(ctx); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
url := strings.TrimSuffix(setting.AppURL, "/") + t.GetRunLink()
|
|
|
|
|
|
|
|
return &api.ActionTask{
|
|
|
|
ID: t.ID,
|
|
|
|
Name: t.Job.Name,
|
|
|
|
HeadBranch: t.Job.Run.PrettyRef(),
|
|
|
|
HeadSHA: t.Job.CommitSHA,
|
|
|
|
RunNumber: t.Job.Run.Index,
|
|
|
|
Event: t.Job.Run.TriggerEvent,
|
|
|
|
DisplayTitle: t.Job.Run.Title,
|
|
|
|
Status: t.Status.String(),
|
|
|
|
WorkflowID: t.Job.Run.WorkflowID,
|
|
|
|
URL: url,
|
|
|
|
CreatedAt: t.Created.AsLocalTime(),
|
|
|
|
UpdatedAt: t.Updated.AsLocalTime(),
|
|
|
|
RunStartedAt: t.Started.AsLocalTime(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-06-08 10:31:11 -04:00
|
|
|
// ToVerification convert a git.Commit.Signature to an api.PayloadCommitVerification
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
func ToVerification(ctx context.Context, c *git.Commit) *api.PayloadCommitVerification {
|
|
|
|
verif := asymkey_model.ParseCommitWithSignature(ctx, c)
|
2019-10-16 09:42:42 -04:00
|
|
|
commitVerification := &api.PayloadCommitVerification{
|
|
|
|
Verified: verif.Verified,
|
|
|
|
Reason: verif.Reason,
|
|
|
|
}
|
2019-06-08 10:31:11 -04:00
|
|
|
if c.Signature != nil {
|
2019-10-16 09:42:42 -04:00
|
|
|
commitVerification.Signature = c.Signature.Signature
|
|
|
|
commitVerification.Payload = c.Signature.Payload
|
2019-06-08 10:31:11 -04:00
|
|
|
}
|
2019-10-16 09:42:42 -04:00
|
|
|
if verif.SigningUser != nil {
|
2021-11-08 02:04:13 -05:00
|
|
|
commitVerification.Signer = &api.PayloadUser{
|
2019-10-16 09:42:42 -04:00
|
|
|
Name: verif.SigningUser.Name,
|
|
|
|
Email: verif.SigningUser.Email,
|
|
|
|
}
|
2016-01-28 14:49:05 -05:00
|
|
|
}
|
2019-10-16 09:42:42 -04:00
|
|
|
return commitVerification
|
2016-01-28 14:49:05 -05:00
|
|
|
}
|
|
|
|
|
2021-12-10 03:14:24 -05:00
|
|
|
// ToPublicKey convert asymkey_model.PublicKey to api.PublicKey
|
|
|
|
func ToPublicKey(apiLink string, key *asymkey_model.PublicKey) *api.PublicKey {
|
2015-12-04 17:16:42 -05:00
|
|
|
return &api.PublicKey{
|
2017-11-28 10:21:39 -05:00
|
|
|
ID: key.ID,
|
|
|
|
Key: key.Content,
|
2020-12-25 04:59:32 -05:00
|
|
|
URL: fmt.Sprintf("%s%d", apiLink, key.ID),
|
2017-11-28 10:21:39 -05:00
|
|
|
Title: key.Name,
|
|
|
|
Fingerprint: key.Fingerprint,
|
2017-12-10 23:37:04 -05:00
|
|
|
Created: key.CreatedUnix.AsTime(),
|
2015-12-04 17:16:42 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-15 21:27:35 -04:00
|
|
|
// ToGPGKey converts models.GPGKey to api.GPGKey
|
2021-12-10 03:14:24 -05:00
|
|
|
func ToGPGKey(key *asymkey_model.GPGKey) *api.GPGKey {
|
2017-03-15 21:27:35 -04:00
|
|
|
subkeys := make([]*api.GPGKey, len(key.SubsKey))
|
|
|
|
for id, k := range key.SubsKey {
|
|
|
|
subkeys[id] = &api.GPGKey{
|
|
|
|
ID: k.ID,
|
|
|
|
PrimaryKeyID: k.PrimaryKeyID,
|
|
|
|
KeyID: k.KeyID,
|
|
|
|
PublicKey: k.Content,
|
2017-12-10 23:37:04 -05:00
|
|
|
Created: k.CreatedUnix.AsTime(),
|
|
|
|
Expires: k.ExpiredUnix.AsTime(),
|
2017-03-15 21:27:35 -04:00
|
|
|
CanSign: k.CanSign,
|
|
|
|
CanEncryptComms: k.CanEncryptComms,
|
|
|
|
CanEncryptStorage: k.CanEncryptStorage,
|
|
|
|
CanCertify: k.CanSign,
|
2021-07-13 09:28:07 -04:00
|
|
|
Verified: k.Verified,
|
2017-03-15 21:27:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
emails := make([]*api.GPGKeyEmail, len(key.Emails))
|
|
|
|
for i, e := range key.Emails {
|
|
|
|
emails[i] = ToGPGKeyEmail(e)
|
|
|
|
}
|
|
|
|
return &api.GPGKey{
|
|
|
|
ID: key.ID,
|
|
|
|
PrimaryKeyID: key.PrimaryKeyID,
|
|
|
|
KeyID: key.KeyID,
|
|
|
|
PublicKey: key.Content,
|
2017-12-10 23:37:04 -05:00
|
|
|
Created: key.CreatedUnix.AsTime(),
|
|
|
|
Expires: key.ExpiredUnix.AsTime(),
|
2017-03-15 21:27:35 -04:00
|
|
|
Emails: emails,
|
|
|
|
SubsKey: subkeys,
|
|
|
|
CanSign: key.CanSign,
|
|
|
|
CanEncryptComms: key.CanEncryptComms,
|
|
|
|
CanEncryptStorage: key.CanEncryptStorage,
|
|
|
|
CanCertify: key.CanSign,
|
2021-07-13 09:28:07 -04:00
|
|
|
Verified: key.Verified,
|
2017-03-15 21:27:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ToGPGKeyEmail convert models.EmailAddress to api.GPGKeyEmail
|
2021-11-11 02:03:30 -05:00
|
|
|
func ToGPGKeyEmail(email *user_model.EmailAddress) *api.GPGKeyEmail {
|
2017-03-15 21:27:35 -04:00
|
|
|
return &api.GPGKeyEmail{
|
|
|
|
Email: email.Email,
|
|
|
|
Verified: email.IsActivated,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 01:31:08 -04:00
|
|
|
// ToGitHook convert git.Hook to api.GitHook
|
|
|
|
func ToGitHook(h *git.Hook) *api.GitHook {
|
|
|
|
return &api.GitHook{
|
|
|
|
Name: h.Name(),
|
|
|
|
IsActive: h.IsActive,
|
|
|
|
Content: h.Content,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-10 03:14:24 -05:00
|
|
|
// ToDeployKey convert asymkey_model.DeployKey to api.DeployKey
|
|
|
|
func ToDeployKey(apiLink string, key *asymkey_model.DeployKey) *api.DeployKey {
|
2015-12-04 17:16:42 -05:00
|
|
|
return &api.DeployKey{
|
2018-10-31 23:40:49 -04:00
|
|
|
ID: key.ID,
|
|
|
|
KeyID: key.KeyID,
|
|
|
|
Key: key.Content,
|
|
|
|
Fingerprint: key.Fingerprint,
|
2020-12-25 04:59:32 -05:00
|
|
|
URL: fmt.Sprintf("%s%d", apiLink, key.ID),
|
2018-10-31 23:40:49 -04:00
|
|
|
Title: key.Name,
|
|
|
|
Created: key.CreatedUnix.AsTime(),
|
2021-11-28 06:58:28 -05:00
|
|
|
ReadOnly: key.Mode == perm.AccessModeRead, // All deploy keys are read-only.
|
2015-12-04 17:16:42 -05:00
|
|
|
}
|
|
|
|
}
|
2015-12-17 02:28:47 -05:00
|
|
|
|
2021-11-24 04:49:20 -05:00
|
|
|
// ToOrganization convert user_model.User to api.Organization
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
func ToOrganization(ctx context.Context, org *organization.Organization) *api.Organization {
|
2015-12-17 02:28:47 -05:00
|
|
|
return &api.Organization{
|
2019-09-23 16:08:03 -04:00
|
|
|
ID: org.ID,
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
AvatarURL: org.AsUser().AvatarLink(ctx),
|
2022-09-28 23:27:33 -04:00
|
|
|
Name: org.Name,
|
2019-09-23 16:08:03 -04:00
|
|
|
UserName: org.Name,
|
|
|
|
FullName: org.FullName,
|
2023-07-25 04:26:27 -04:00
|
|
|
Email: org.Email,
|
2019-09-23 16:08:03 -04:00
|
|
|
Description: org.Description,
|
|
|
|
Website: org.Website,
|
|
|
|
Location: org.Location,
|
|
|
|
Visibility: org.Visibility.String(),
|
|
|
|
RepoAdminChangeTeamAccess: org.RepoAdminChangeTeamAccess,
|
2015-12-17 02:28:47 -05:00
|
|
|
}
|
|
|
|
}
|
2016-03-21 12:47:54 -04:00
|
|
|
|
2022-05-13 13:27:58 -04:00
|
|
|
// ToTeam convert models.Team to api.Team
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
func ToTeam(ctx context.Context, team *organization.Team, loadOrg ...bool) (*api.Team, error) {
|
|
|
|
teams, err := ToTeams(ctx, []*organization.Team{team}, len(loadOrg) != 0 && loadOrg[0])
|
2022-05-13 13:27:58 -04:00
|
|
|
if err != nil || len(teams) == 0 {
|
|
|
|
return nil, err
|
2020-10-20 14:18:25 -04:00
|
|
|
}
|
2022-05-13 13:27:58 -04:00
|
|
|
return teams[0], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ToTeams convert models.Team list to api.Team list
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
func ToTeams(ctx context.Context, teams []*organization.Team, loadOrgs bool) ([]*api.Team, error) {
|
2022-05-13 13:27:58 -04:00
|
|
|
cache := make(map[int64]*api.Organization)
|
2024-01-11 17:12:08 -05:00
|
|
|
apiTeams := make([]*api.Team, 0, len(teams))
|
|
|
|
for _, t := range teams {
|
|
|
|
if err := t.LoadUnits(ctx); err != nil {
|
2022-05-13 13:27:58 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-01-11 17:12:08 -05:00
|
|
|
apiTeam := &api.Team{
|
|
|
|
ID: t.ID,
|
|
|
|
Name: t.Name,
|
|
|
|
Description: t.Description,
|
|
|
|
IncludesAllRepositories: t.IncludesAllRepositories,
|
|
|
|
CanCreateOrgRepo: t.CanCreateOrgRepo,
|
2024-04-17 11:58:37 -04:00
|
|
|
Permission: t.AccessMode.ToString(),
|
2024-01-11 17:12:08 -05:00
|
|
|
Units: t.GetUnitNames(),
|
|
|
|
UnitsMap: t.GetUnitsMap(),
|
2022-05-13 13:27:58 -04:00
|
|
|
}
|
2020-10-20 14:18:25 -04:00
|
|
|
|
2022-05-13 13:27:58 -04:00
|
|
|
if loadOrgs {
|
2024-01-11 17:12:08 -05:00
|
|
|
apiOrg, ok := cache[t.OrgID]
|
2022-05-13 13:27:58 -04:00
|
|
|
if !ok {
|
2024-01-11 17:12:08 -05:00
|
|
|
org, err := organization.GetOrgByID(ctx, t.OrgID)
|
2022-05-13 13:27:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
apiOrg = ToOrganization(ctx, org)
|
2024-01-11 17:12:08 -05:00
|
|
|
cache[t.OrgID] = apiOrg
|
2022-05-13 13:27:58 -04:00
|
|
|
}
|
2024-01-11 17:12:08 -05:00
|
|
|
apiTeam.Organization = apiOrg
|
2022-05-13 13:27:58 -04:00
|
|
|
}
|
2024-01-11 17:12:08 -05:00
|
|
|
|
|
|
|
apiTeams = append(apiTeams, apiTeam)
|
2016-03-21 12:47:54 -04:00
|
|
|
}
|
2022-05-13 13:27:58 -04:00
|
|
|
return apiTeams, nil
|
2016-03-21 12:47:54 -04:00
|
|
|
}
|
2019-04-15 12:36:59 -04:00
|
|
|
|
2019-06-08 10:31:11 -04:00
|
|
|
// ToAnnotatedTag convert git.Tag to api.AnnotatedTag
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
func ToAnnotatedTag(ctx context.Context, repo *repo_model.Repository, t *git.Tag, c *git.Commit) *api.AnnotatedTag {
|
2019-06-08 10:31:11 -04:00
|
|
|
return &api.AnnotatedTag{
|
|
|
|
Tag: t.Name,
|
|
|
|
SHA: t.ID.String(),
|
|
|
|
Object: ToAnnotatedTagObject(repo, c),
|
|
|
|
Message: t.Message,
|
|
|
|
URL: util.URLJoin(repo.APIURL(), "git/tags", t.ID.String()),
|
|
|
|
Tagger: ToCommitUser(t.Tagger),
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 08:37:34 -05:00
|
|
|
Verification: ToVerification(ctx, c),
|
2019-06-08 10:31:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ToAnnotatedTagObject convert a git.Commit to an api.AnnotatedTagObject
|
2021-12-09 20:27:50 -05:00
|
|
|
func ToAnnotatedTagObject(repo *repo_model.Repository, commit *git.Commit) *api.AnnotatedTagObject {
|
2019-06-08 10:31:11 -04:00
|
|
|
return &api.AnnotatedTagObject{
|
|
|
|
SHA: commit.ID.String(),
|
|
|
|
Type: string(git.ObjectCommit),
|
|
|
|
URL: util.URLJoin(repo.APIURL(), "git/commits", commit.ID.String()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-14 12:56:10 -04:00
|
|
|
// ToTagProtection convert a git.ProtectedTag to an api.TagProtection
|
|
|
|
func ToTagProtection(ctx context.Context, pt *git_model.ProtectedTag, repo *repo_model.Repository) *api.TagProtection {
|
|
|
|
readers, err := access_model.GetRepoReaders(ctx, repo)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("GetRepoReaders: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
whitelistUsernames := getWhitelistEntities(readers, pt.AllowlistUserIDs)
|
|
|
|
|
|
|
|
teamReaders, err := organization.OrgFromUser(repo.Owner).TeamsWithAccessToRepo(ctx, repo.ID, perm.AccessModeRead)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Repo.Owner.TeamsWithAccessToRepo: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
whitelistTeams := getWhitelistEntities(teamReaders, pt.AllowlistTeamIDs)
|
|
|
|
|
|
|
|
return &api.TagProtection{
|
|
|
|
ID: pt.ID,
|
|
|
|
NamePattern: pt.NamePattern,
|
|
|
|
WhitelistUsernames: whitelistUsernames,
|
|
|
|
WhitelistTeams: whitelistTeams,
|
|
|
|
Created: pt.CreatedUnix.AsTime(),
|
|
|
|
Updated: pt.UpdatedUnix.AsTime(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-03 11:46:24 -04:00
|
|
|
// ToTopicResponse convert from models.Topic to api.TopicResponse
|
2021-12-12 10:48:20 -05:00
|
|
|
func ToTopicResponse(topic *repo_model.Topic) *api.TopicResponse {
|
2019-09-03 11:46:24 -04:00
|
|
|
return &api.TopicResponse{
|
|
|
|
ID: topic.ID,
|
|
|
|
Name: topic.Name,
|
|
|
|
RepoCount: topic.RepoCount,
|
|
|
|
Created: topic.CreatedUnix.AsTime(),
|
|
|
|
Updated: topic.UpdatedUnix.AsTime(),
|
|
|
|
}
|
|
|
|
}
|
2020-02-29 01:19:32 -05:00
|
|
|
|
2022-01-02 08:12:35 -05:00
|
|
|
// ToOAuth2Application convert from auth.OAuth2Application to api.OAuth2Application
|
|
|
|
func ToOAuth2Application(app *auth.OAuth2Application) *api.OAuth2Application {
|
2020-02-29 01:19:32 -05:00
|
|
|
return &api.OAuth2Application{
|
2024-07-19 14:28:30 -04:00
|
|
|
ID: app.ID,
|
|
|
|
Name: app.Name,
|
|
|
|
ClientID: app.ClientID,
|
|
|
|
ClientSecret: app.ClientSecret,
|
|
|
|
ConfidentialClient: app.ConfidentialClient,
|
|
|
|
SkipSecondaryAuthorization: app.SkipSecondaryAuthorization,
|
|
|
|
RedirectURIs: app.RedirectURIs,
|
|
|
|
Created: app.CreatedUnix.AsTime(),
|
2020-02-29 01:19:32 -05:00
|
|
|
}
|
|
|
|
}
|
2020-10-17 00:23:08 -04:00
|
|
|
|
2020-12-03 09:05:48 -05:00
|
|
|
// ToLFSLock convert a LFSLock to api.LFSLock
|
2022-12-02 21:48:26 -05:00
|
|
|
func ToLFSLock(ctx context.Context, l *git_model.LFSLock) *api.LFSLock {
|
|
|
|
u, err := user_model.GetUserByID(ctx, l.OwnerID)
|
2021-11-24 04:49:20 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-12-03 09:05:48 -05:00
|
|
|
return &api.LFSLock{
|
|
|
|
ID: strconv.FormatInt(l.ID, 10),
|
|
|
|
Path: l.Path,
|
|
|
|
LockedAt: l.Created.Round(time.Second),
|
|
|
|
Owner: &api.LFSLockOwner{
|
2022-10-11 21:03:15 -04:00
|
|
|
Name: u.Name,
|
2020-12-03 09:05:48 -05:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2022-09-28 22:27:20 -04:00
|
|
|
|
|
|
|
// ToChangedFile convert a gitdiff.DiffFile to api.ChangedFile
|
|
|
|
func ToChangedFile(f *gitdiff.DiffFile, repo *repo_model.Repository, commit string) *api.ChangedFile {
|
|
|
|
status := "changed"
|
|
|
|
if f.IsDeleted {
|
|
|
|
status = "deleted"
|
|
|
|
} else if f.IsCreated {
|
|
|
|
status = "added"
|
|
|
|
} else if f.IsRenamed && f.Type == gitdiff.DiffFileCopy {
|
|
|
|
status = "copied"
|
|
|
|
} else if f.IsRenamed && f.Type == gitdiff.DiffFileRename {
|
|
|
|
status = "renamed"
|
|
|
|
} else if f.Addition == 0 && f.Deletion == 0 {
|
|
|
|
status = "unchanged"
|
|
|
|
}
|
|
|
|
|
|
|
|
file := &api.ChangedFile{
|
|
|
|
Filename: f.GetDiffFileName(),
|
|
|
|
Status: status,
|
|
|
|
Additions: f.Addition,
|
|
|
|
Deletions: f.Deletion,
|
|
|
|
Changes: f.Addition + f.Deletion,
|
|
|
|
HTMLURL: fmt.Sprint(repo.HTMLURL(), "/src/commit/", commit, "/", util.PathEscapeSegments(f.GetDiffFileName())),
|
|
|
|
ContentsURL: fmt.Sprint(repo.APIURL(), "/contents/", util.PathEscapeSegments(f.GetDiffFileName()), "?ref=", commit),
|
|
|
|
RawURL: fmt.Sprint(repo.HTMLURL(), "/raw/commit/", commit, "/", util.PathEscapeSegments(f.GetDiffFileName())),
|
|
|
|
}
|
|
|
|
|
|
|
|
if status == "rename" {
|
|
|
|
file.PreviousFilename = f.OldName
|
|
|
|
}
|
|
|
|
|
|
|
|
return file
|
|
|
|
}
|