|
| 1 | +package keymutex |
| 2 | + |
| 3 | +import ( |
| 4 | + "sync" |
| 5 | + "testing" |
| 6 | + |
| 7 | + "github.com/stretchr/testify/require" |
| 8 | +) |
| 9 | + |
| 10 | +func TestKeyMutex(t *testing.T) { |
| 11 | + var km KeyMutex[int] |
| 12 | + var wg sync.WaitGroup |
| 13 | + |
| 14 | + var sequence1, sequence2 []string |
| 15 | + key1 := 1 |
| 16 | + key2 := 2 |
| 17 | + |
| 18 | + km.Lock(1) |
| 19 | + |
| 20 | + // In the background, queue a sequence of events |
| 21 | + wg.Add(2) |
| 22 | + go func() { |
| 23 | + defer wg.Done() |
| 24 | + km.Lock(key1) |
| 25 | + require.Equal(t, 1, km.refCounts[key1], "refCounts[key1] should be 1") |
| 26 | + defer km.Unlock(key1) |
| 27 | + go func() { |
| 28 | + defer wg.Done() |
| 29 | + km.Lock(key1) |
| 30 | + defer km.Unlock(key1) |
| 31 | + sequence1 = append(sequence1, "C") |
| 32 | + }() |
| 33 | + sequence1 = append(sequence1, "B") |
| 34 | + km.Unlock(key1) |
| 35 | + }() |
| 36 | + |
| 37 | + // This should not deadlock, even though key1 is already locked |
| 38 | + km.Lock(key2) |
| 39 | + require.Equal(t, 1, km.refCounts[key2], "refCounts[key2] should be 1") |
| 40 | + sequence2 = append(sequence2, "A") |
| 41 | + km.Unlock(key2) |
| 42 | + key2RefCount, key2RefCountOk := km.refCounts[key2] |
| 43 | + require.Equal(t, 0, key2RefCount, "refCounts[key2] should be 0") |
| 44 | + require.Equal(t, false, key2RefCountOk, "refCounts[key2] should not exist") |
| 45 | + |
| 46 | + // Add to the sequence and unlock the key, allowing the goroutines to continue |
| 47 | + sequence1 = append(sequence1, "A") |
| 48 | + km.Unlock(key1) |
| 49 | + |
| 50 | + // Wait for the goroutines to finish |
| 51 | + wg.Wait() |
| 52 | + |
| 53 | + require.Equal(t, []string{"A", "B", "C"}, sequence1) |
| 54 | + require.Equal(t, []string{"A"}, sequence2) |
| 55 | + require.Equal(t, 0, km.refCounts[key1], "refCounts[key1] should be 0") |
| 56 | + require.Equal(t, 0, km.refCounts[key2], "refCounts[key2] should be 0") |
| 57 | +} |
| 58 | + |
| 59 | +func TestKeyMutexLocking(t *testing.T) { |
| 60 | + var km KeyMutex[int] |
| 61 | + var wgAcquiringLock sync.WaitGroup |
| 62 | + var wgAllLocksReleased sync.WaitGroup |
| 63 | + iterCount := 5 |
| 64 | + var grantedCount int |
| 65 | + |
| 66 | + km.Lock(1) |
| 67 | + |
| 68 | + chanUnsuspend := make(chan struct{}) |
| 69 | + |
| 70 | + // Queue up a bunch of goroutines waiting to acquire the same lock |
| 71 | + for i := 0; i < iterCount; i++ { |
| 72 | + wgAcquiringLock.Add(1) |
| 73 | + wgAllLocksReleased.Add(1) |
| 74 | + go func() { |
| 75 | + defer wgAllLocksReleased.Done() |
| 76 | + defer km.Unlock(1) |
| 77 | + chanWaiting := make(chan struct{}) |
| 78 | + go func() { |
| 79 | + <-chanWaiting |
| 80 | + wgAcquiringLock.Done() |
| 81 | + }() |
| 82 | + km.lockWithWaiting(1, chanWaiting) |
| 83 | + <-chanUnsuspend |
| 84 | + grantedCount++ |
| 85 | + }() |
| 86 | + } |
| 87 | + |
| 88 | + // Because we acquired the first lock, the grantedCount should still be zero here |
| 89 | + require.Equal(t, 0, grantedCount, "grantedCount should be 0") |
| 90 | + |
| 91 | + // Wait for all goroutines to be waiting to acquire the lock |
| 92 | + wgAcquiringLock.Wait() |
| 93 | + require.Equal(t, iterCount+1, km.refCounts[1], "refCounts[1] should be %d", iterCount+1) |
| 94 | + |
| 95 | + // Allow all locks to be acquired sequentially |
| 96 | + km.Unlock(1) |
| 97 | + close(chanUnsuspend) |
| 98 | + |
| 99 | + // Acquire one more lock, which should wait until all the other locks are released |
| 100 | + wgAllLocksReleased.Wait() |
| 101 | + require.Equal(t, 0, km.refCounts[1], "refCounts[1] should be 0") |
| 102 | + require.Equal(t, iterCount, grantedCount, "grantedCount should be %d", iterCount) |
| 103 | +} |
0 commit comments