2020-02-02 18:19:58 -05:00
|
|
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package queue
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2022-03-31 13:01:43 -04:00
|
|
|
"runtime/pprof"
|
2020-02-02 18:19:58 -05:00
|
|
|
"sync"
|
2022-01-22 16:22:14 -05:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
2020-02-02 18:19:58 -05:00
|
|
|
|
2021-10-17 07:43:25 -04:00
|
|
|
"code.gitea.io/gitea/modules/json"
|
2020-02-02 18:19:58 -05:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ChannelUniqueQueueType is the type for channel queue
|
|
|
|
const ChannelUniqueQueueType Type = "unique-channel"
|
|
|
|
|
|
|
|
// ChannelUniqueQueueConfiguration is the configuration for a ChannelUniqueQueue
|
|
|
|
type ChannelUniqueQueueConfiguration ChannelQueueConfiguration
|
|
|
|
|
|
|
|
// ChannelUniqueQueue implements UniqueQueue
|
|
|
|
//
|
|
|
|
// It is basically a thin wrapper around a WorkerPool but keeps a store of
|
|
|
|
// what has been pushed within a table.
|
|
|
|
//
|
|
|
|
// Please note that this Queue does not guarantee that a particular
|
|
|
|
// task cannot be processed twice or more at the same time. Uniqueness is
|
|
|
|
// only guaranteed whilst the task is waiting in the queue.
|
|
|
|
type ChannelUniqueQueue struct {
|
|
|
|
*WorkerPool
|
2021-05-15 10:22:26 -04:00
|
|
|
lock sync.Mutex
|
2021-10-17 07:43:25 -04:00
|
|
|
table map[string]bool
|
2021-05-15 10:22:26 -04:00
|
|
|
shutdownCtx context.Context
|
|
|
|
shutdownCtxCancel context.CancelFunc
|
|
|
|
terminateCtx context.Context
|
|
|
|
terminateCtxCancel context.CancelFunc
|
|
|
|
exemplar interface{}
|
|
|
|
workers int
|
|
|
|
name string
|
2020-02-02 18:19:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewChannelUniqueQueue create a memory channel queue
|
|
|
|
func NewChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
|
|
|
|
configInterface, err := toConfig(ChannelUniqueQueueConfiguration{}, cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
config := configInterface.(ChannelUniqueQueueConfiguration)
|
|
|
|
if config.BatchLength == 0 {
|
|
|
|
config.BatchLength = 1
|
|
|
|
}
|
2021-05-15 10:22:26 -04:00
|
|
|
|
|
|
|
terminateCtx, terminateCtxCancel := context.WithCancel(context.Background())
|
|
|
|
shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx)
|
|
|
|
|
2020-02-02 18:19:58 -05:00
|
|
|
queue := &ChannelUniqueQueue{
|
2021-10-17 07:43:25 -04:00
|
|
|
table: map[string]bool{},
|
2021-05-15 10:22:26 -04:00
|
|
|
shutdownCtx: shutdownCtx,
|
|
|
|
shutdownCtxCancel: shutdownCtxCancel,
|
|
|
|
terminateCtx: terminateCtx,
|
|
|
|
terminateCtxCancel: terminateCtxCancel,
|
|
|
|
exemplar: exemplar,
|
|
|
|
workers: config.Workers,
|
|
|
|
name: config.Name,
|
2020-02-02 18:19:58 -05:00
|
|
|
}
|
2022-01-22 16:22:14 -05:00
|
|
|
queue.WorkerPool = NewWorkerPool(func(data ...Data) (unhandled []Data) {
|
2020-02-02 18:19:58 -05:00
|
|
|
for _, datum := range data {
|
2021-10-17 07:43:25 -04:00
|
|
|
// No error is possible here because PushFunc ensures that this can be marshalled
|
|
|
|
bs, _ := json.Marshal(datum)
|
|
|
|
|
2020-02-02 18:19:58 -05:00
|
|
|
queue.lock.Lock()
|
2021-10-17 07:43:25 -04:00
|
|
|
delete(queue.table, string(bs))
|
2020-02-02 18:19:58 -05:00
|
|
|
queue.lock.Unlock()
|
2021-10-17 07:43:25 -04:00
|
|
|
|
2022-01-22 16:22:14 -05:00
|
|
|
if u := handle(datum); u != nil {
|
|
|
|
if queue.IsPaused() {
|
|
|
|
// We can only pushback to the channel if we're paused.
|
|
|
|
go func() {
|
|
|
|
if err := queue.Push(u[0]); err != nil {
|
|
|
|
log.Error("Unable to push back to queue %d. Error: %v", queue.qid, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
} else {
|
|
|
|
unhandled = append(unhandled, u...)
|
|
|
|
}
|
|
|
|
}
|
2020-02-02 18:19:58 -05:00
|
|
|
}
|
2022-01-22 16:22:14 -05:00
|
|
|
return unhandled
|
2020-02-02 18:19:58 -05:00
|
|
|
}, config.WorkerPoolConfiguration)
|
|
|
|
|
|
|
|
queue.qid = GetManager().Add(queue, ChannelUniqueQueueType, config, exemplar)
|
|
|
|
return queue, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run starts to run the queue
|
2021-05-15 10:22:26 -04:00
|
|
|
func (q *ChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
|
2022-03-31 13:01:43 -04:00
|
|
|
pprof.SetGoroutineLabels(q.baseCtx)
|
2021-05-15 10:22:26 -04:00
|
|
|
atShutdown(q.Shutdown)
|
|
|
|
atTerminate(q.Terminate)
|
2020-02-02 18:19:58 -05:00
|
|
|
log.Debug("ChannelUniqueQueue: %s Starting", q.name)
|
2021-05-15 10:22:26 -04:00
|
|
|
_ = q.AddWorkers(q.workers, 0)
|
2020-02-02 18:19:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Push will push data into the queue if the data is not already in the queue
|
|
|
|
func (q *ChannelUniqueQueue) Push(data Data) error {
|
|
|
|
return q.PushFunc(data, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PushFunc will push data into the queue
|
|
|
|
func (q *ChannelUniqueQueue) PushFunc(data Data, fn func() error) error {
|
|
|
|
if !assignableTo(data, q.exemplar) {
|
2022-01-24 17:54:35 -05:00
|
|
|
return fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in queue: %s", data, q.exemplar, q.name)
|
2020-02-02 18:19:58 -05:00
|
|
|
}
|
2021-10-17 07:43:25 -04:00
|
|
|
|
|
|
|
bs, err := json.Marshal(data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-02-02 18:19:58 -05:00
|
|
|
q.lock.Lock()
|
|
|
|
locked := true
|
|
|
|
defer func() {
|
|
|
|
if locked {
|
|
|
|
q.lock.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
2021-10-17 07:43:25 -04:00
|
|
|
if _, ok := q.table[string(bs)]; ok {
|
2020-02-02 18:19:58 -05:00
|
|
|
return ErrAlreadyInQueue
|
|
|
|
}
|
|
|
|
// FIXME: We probably need to implement some sort of limit here
|
|
|
|
// If the downstream queue blocks this table will grow without limit
|
2021-10-17 07:43:25 -04:00
|
|
|
q.table[string(bs)] = true
|
2020-02-02 18:19:58 -05:00
|
|
|
if fn != nil {
|
|
|
|
err := fn()
|
|
|
|
if err != nil {
|
2021-10-17 07:43:25 -04:00
|
|
|
delete(q.table, string(bs))
|
2020-02-02 18:19:58 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
locked = false
|
|
|
|
q.lock.Unlock()
|
|
|
|
q.WorkerPool.Push(data)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Has checks if the data is in the queue
|
|
|
|
func (q *ChannelUniqueQueue) Has(data Data) (bool, error) {
|
2021-10-17 07:43:25 -04:00
|
|
|
bs, err := json.Marshal(data)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2020-02-02 18:19:58 -05:00
|
|
|
q.lock.Lock()
|
|
|
|
defer q.lock.Unlock()
|
2021-10-17 07:43:25 -04:00
|
|
|
_, has := q.table[string(bs)]
|
2020-02-02 18:19:58 -05:00
|
|
|
return has, nil
|
|
|
|
}
|
|
|
|
|
2022-01-22 16:22:14 -05:00
|
|
|
// Flush flushes the channel with a timeout - the Flush worker will be registered as a flush worker with the manager
|
|
|
|
func (q *ChannelUniqueQueue) Flush(timeout time.Duration) error {
|
|
|
|
if q.IsPaused() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
ctx, cancel := q.commonRegisterWorkers(1, timeout, true)
|
|
|
|
defer cancel()
|
|
|
|
return q.FlushWithContext(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FlushWithContext is very similar to CleanUp but it will return as soon as the dataChan is empty
|
|
|
|
func (q *ChannelUniqueQueue) FlushWithContext(ctx context.Context) error {
|
|
|
|
log.Trace("ChannelUniqueQueue: %d Flush", q.qid)
|
|
|
|
paused, _ := q.IsPausedIsResumed()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-paused:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
select {
|
2022-01-29 06:37:08 -05:00
|
|
|
case data, ok := <-q.dataChan:
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2022-01-22 16:22:14 -05:00
|
|
|
if unhandled := q.handle(data); unhandled != nil {
|
|
|
|
log.Error("Unhandled Data whilst flushing queue %d", q.qid)
|
|
|
|
}
|
|
|
|
atomic.AddInt64(&q.numInQueue, -1)
|
|
|
|
case <-q.baseCtx.Done():
|
|
|
|
return q.baseCtx.Err()
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-15 10:22:26 -04:00
|
|
|
// Shutdown processing from this queue
|
|
|
|
func (q *ChannelUniqueQueue) Shutdown() {
|
|
|
|
log.Trace("ChannelUniqueQueue: %s Shutting down", q.name)
|
|
|
|
select {
|
|
|
|
case <-q.shutdownCtx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
log.Trace("ChannelUniqueQueue: %s Flushing", q.name)
|
|
|
|
if err := q.FlushWithContext(q.terminateCtx); err != nil {
|
|
|
|
log.Warn("ChannelUniqueQueue: %s Terminated before completed flushing", q.name)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Debug("ChannelUniqueQueue: %s Flushed", q.name)
|
|
|
|
}()
|
|
|
|
q.shutdownCtxCancel()
|
|
|
|
log.Debug("ChannelUniqueQueue: %s Shutdown", q.name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate this queue and close the queue
|
|
|
|
func (q *ChannelUniqueQueue) Terminate() {
|
|
|
|
log.Trace("ChannelUniqueQueue: %s Terminating", q.name)
|
|
|
|
q.Shutdown()
|
|
|
|
select {
|
|
|
|
case <-q.terminateCtx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
q.terminateCtxCancel()
|
2022-03-31 13:01:43 -04:00
|
|
|
q.baseCtxFinished()
|
2021-05-15 10:22:26 -04:00
|
|
|
log.Debug("ChannelUniqueQueue: %s Terminated", q.name)
|
|
|
|
}
|
|
|
|
|
2020-02-02 18:19:58 -05:00
|
|
|
// Name returns the name of this queue
|
|
|
|
func (q *ChannelUniqueQueue) Name() string {
|
|
|
|
return q.name
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
queuesMap[ChannelUniqueQueueType] = NewChannelUniqueQueue
|
|
|
|
}
|