1
0
mirror of https://github.com/go-gitea/gitea.git synced 2024-12-04 14:46:57 -05:00
gitea/modules/globallock/locker_test.go
Jason Song 1e4be0945b
Introduce globallock as distributed locks (#31908)
To help #31813, but do not replace it, since this PR just introduces the
new module but misses some work:

- New option in settings. `#31813` has done it.
- Use the locks in business logic. `#31813` has done it.

So I think the most efficient way is to merge this PR first (if it's
acceptable) and then finish #31813.

## Design principles

### Use spinlock even in memory implementation

In actual use cases, users may cancel requests. `sync.Mutex` will block
the goroutine until the lock is acquired even if the request is
canceled. And the spinlock is more suitable for this scenario since it's
possible to give up the lock acquisition.

Although the spinlock consumes more CPU resources, I think it's
acceptable in most cases.

### Do not expose the mutex to callers

If we expose the mutex to callers, it's possible for callers to reuse
the mutex, which causes more complexity.

For example:
```go
lock := GetLocker(key)
lock.Lock()
// ...
// even if the lock is unlocked, we cannot GC the lock,
// since the caller may still use it again.
lock.Unlock()
lock.Lock()
// ...
lock.Unlock()

// callers have to GC the lock manually.
RemoveLocker(key)
```

That's why
https://github.com/go-gitea/gitea/pull/31813#discussion_r1721200549

In this PR, we only expose `ReleaseFunc` to callers. So callers just
need to call `ReleaseFunc` to release the lock, and do not need to care
about the lock's lifecycle.
```go
_, release, err := locker.Lock(ctx, key)
if err != nil {
    return err
}
// ...
release()

// if callers want to lock again, they have to re-acquire the lock.
_, release, err := locker.Lock(ctx, key)
// ...
```

In this way, it's also much easier for redis implementation to extend
the mutex automatically, so that callers do not need to care about the
lock's lifecycle. See also
https://github.com/go-gitea/gitea/pull/31813#discussion_r1722659743

### Use "release" instead of "unlock"

For "unlock", it has the meaning of "unlock an acquired lock". So it's
not acceptable to call "unlock" when failed to acquire the lock, or call
"unlock" multiple times. It causes more complexity for callers to decide
whether to call "unlock" or not.

So we use "release" instead of "unlock" to make it clear. Whether the
lock is acquired or not, callers can always call "release", and it's
also safe to call "release" multiple times.

But the code DO NOT expect callers to not call "release" after acquiring
the lock. If callers forget to call "release", it will cause resource
leak. That's why it's always safe to call "release" without extra
checks: to avoid callers to forget to call it.

### Acquired locks could be lost

Unlike `sync.Mutex` which will be locked forever once acquired until
calling `Unlock`, in the new module, the acquired lock could be lost.

For example, the caller has acquired the lock, and it holds the lock for
a long time since auto-extending is working for redis. However, it lost
the connection to the redis server, and it's impossible to extend the
lock anymore.

If the caller don't stop what it's doing, another instance which can
connect to the redis server could acquire the lock, and do the same
thing, which could cause data inconsistency.

So the caller should know what happened, the solution is to return a new
context which will be canceled if the lock is lost or released:

```go
ctx, release, err := locker.Lock(ctx, key)
if err != nil {
    return err
}
defer release()
// ...
DoSomething(ctx)

// the lock is lost now, then ctx has been canceled.

// Failed, since ctx has been canceled.
DoSomethingElse(ctx)
```

### Multiple ways to use the lock

1. Regular way

```go
ctx, release, err := Lock(ctx, key)
if err != nil {
    return err
}
defer release()
// ...
```

2. Early release

```go
ctx, release, err := Lock(ctx, key)
if err != nil {
    return err
}
defer release()
// ...
// release the lock earlier and reset the context back
ctx = release()
// continue to do something else
// ...
```

3. Functional way

```go
if err := LockAndDo(ctx, key, func(ctx context.Context) error {
    // ...
    return nil
}); err != nil {
    return err
}
```
2024-08-26 22:27:57 +08:00

212 lines
5.2 KiB
Go

// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package globallock
import (
"context"
"os"
"sync"
"testing"
"time"
"github.com/go-redsync/redsync/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLocker(t *testing.T) {
t.Run("redis", func(t *testing.T) {
url := "redis://127.0.0.1:6379/0"
if os.Getenv("CI") == "" {
// Make it possible to run tests against a local redis instance
url = os.Getenv("TEST_REDIS_URL")
if url == "" {
t.Skip("TEST_REDIS_URL not set and not running in CI")
return
}
}
oldExpiry := redisLockExpiry
redisLockExpiry = 5 * time.Second // make it shorter for testing
defer func() {
redisLockExpiry = oldExpiry
}()
locker := NewRedisLocker(url)
testLocker(t, locker)
testRedisLocker(t, locker.(*redisLocker))
require.NoError(t, locker.(*redisLocker).Close())
})
t.Run("memory", func(t *testing.T) {
locker := NewMemoryLocker()
testLocker(t, locker)
testMemoryLocker(t, locker.(*memoryLocker))
})
}
func testLocker(t *testing.T, locker Locker) {
t.Run("lock", func(t *testing.T) {
parentCtx := context.Background()
ctx, release, err := locker.Lock(parentCtx, "test")
defer release()
assert.NotEqual(t, parentCtx, ctx) // new context should be returned
assert.NoError(t, err)
func() {
parentCtx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
ctx, release, err := locker.Lock(parentCtx, "test")
defer release()
assert.Error(t, err)
assert.Equal(t, parentCtx, ctx) // should return the same context
}()
release()
assert.Error(t, ctx.Err())
func() {
_, release, err := locker.Lock(context.Background(), "test")
defer release()
assert.NoError(t, err)
}()
})
t.Run("try lock", func(t *testing.T) {
parentCtx := context.Background()
ok, ctx, release, err := locker.TryLock(parentCtx, "test")
defer release()
assert.True(t, ok)
assert.NotEqual(t, parentCtx, ctx) // new context should be returned
assert.NoError(t, err)
func() {
parentCtx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
ok, ctx, release, err := locker.TryLock(parentCtx, "test")
defer release()
assert.False(t, ok)
assert.NoError(t, err)
assert.Equal(t, parentCtx, ctx) // should return the same context
}()
release()
assert.Error(t, ctx.Err())
func() {
ok, _, release, _ := locker.TryLock(context.Background(), "test")
defer release()
assert.True(t, ok)
}()
})
t.Run("wait and acquired", func(t *testing.T) {
ctx := context.Background()
_, release, err := locker.Lock(ctx, "test")
require.NoError(t, err)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
started := time.Now()
_, release, err := locker.Lock(context.Background(), "test") // should be blocked for seconds
defer release()
assert.Greater(t, time.Since(started), time.Second)
assert.NoError(t, err)
}()
time.Sleep(2 * time.Second)
release()
wg.Wait()
})
t.Run("continue after release", func(t *testing.T) {
ctx := context.Background()
ctxBeforeLock := ctx
ctx, release, err := locker.Lock(ctx, "test")
require.NoError(t, err)
assert.NoError(t, ctx.Err())
assert.NotEqual(t, ctxBeforeLock, ctx)
ctxBeforeRelease := ctx
ctx = release()
assert.NoError(t, ctx.Err())
assert.Error(t, ctxBeforeRelease.Err())
// so it can continue with ctx to do more work
})
t.Run("multiple release", func(t *testing.T) {
ctx := context.Background()
_, release1, err := locker.Lock(ctx, "test")
require.NoError(t, err)
release1()
_, release2, err := locker.Lock(ctx, "test")
defer release2()
require.NoError(t, err)
// Call release1 again,
// it should not panic or block,
// and it shouldn't affect the other lock
release1()
ok, _, release3, err := locker.TryLock(ctx, "test")
defer release3()
require.NoError(t, err)
// It should be able to acquire the lock;
// otherwise, it means the lock has been released by release1
assert.False(t, ok)
})
}
// testMemoryLocker does specific tests for memoryLocker
func testMemoryLocker(t *testing.T, locker *memoryLocker) {
// nothing to do
}
// testRedisLocker does specific tests for redisLocker
func testRedisLocker(t *testing.T, locker *redisLocker) {
defer func() {
// This case should be tested at the end.
// Otherwise, it will affect other tests.
t.Run("close", func(t *testing.T) {
assert.NoError(t, locker.Close())
_, _, err := locker.Lock(context.Background(), "test")
assert.Error(t, err)
})
}()
t.Run("failed extend", func(t *testing.T) {
ctx, release, err := locker.Lock(context.Background(), "test")
defer release()
require.NoError(t, err)
// It simulates that there are some problems with extending like network issues or redis server down.
v, ok := locker.mutexM.Load("test")
require.True(t, ok)
m := v.(*redisMutex)
_, _ = m.mutex.Unlock() // release it to make it impossible to extend
select {
case <-time.After(redisLockExpiry + time.Second):
t.Errorf("lock should be expired")
case <-ctx.Done():
var errTaken *redsync.ErrTaken
assert.ErrorAs(t, context.Cause(ctx), &errTaken)
}
})
}