Skip to content

Commit 0bd1bdd

Browse files
committed
chore: use testing/synctest in tests
Use Go's new `testing/synctest` in tests. Ref: https://go.dev/blog/testing-time Signed-off-by: Noel Georgi <[email protected]>
1 parent fe36b3d commit 0bd1bdd

File tree

7 files changed

+198
-195
lines changed

7 files changed

+198
-195
lines changed

cmd/talosctl/cmd/mgmt/cluster/create/clusterops/configmaker/internal/makers/qemu.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@ import (
1313
"slices"
1414
"strings"
1515

16-
"github.com/ghodss/yaml"
1716
"github.com/siderolabs/gen/xslices"
1817
"github.com/siderolabs/go-blockdevice/v2/encryption"
1918
"github.com/siderolabs/go-pointer"
2019
"github.com/siderolabs/go-procfs/procfs"
2120
sideronet "github.com/siderolabs/net"
21+
"gopkg.in/yaml.v3"
2222

2323
"github.com/siderolabs/talos/cmd/talosctl/cmd/mgmt/cluster/create/clusterops"
2424
"github.com/siderolabs/talos/cmd/talosctl/cmd/mgmt/cluster/create/clusterops/configmaker/internal/siderolinkbuilder"

go.mod

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ require (
5959
github.com/aws/aws-sdk-go-v2/service/kms v1.45.1
6060
github.com/aws/smithy-go v1.23.0
6161
github.com/beevik/ntp v1.4.3
62-
github.com/benbjohnson/clock v1.3.5 // project archived on 2023-05-18
6362
github.com/blang/semver/v4 v4.0.0
6463
github.com/cenkalti/backoff/v4 v4.3.0
6564
github.com/containerd/cgroups/v3 v3.0.5

go.sum

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,6 @@ github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
8787
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
8888
github.com/beevik/ntp v1.4.3 h1:PlbTvE5NNy4QHmA4Mg57n7mcFTmr1W1j3gcK7L1lqho=
8989
github.com/beevik/ntp v1.4.3/go.mod h1:Unr8Zg+2dRn7d8bHFuehIMSvvUYssHMxW3Q5Nx4RW5Q=
90-
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
91-
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
9290
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
9391
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
9492
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=

internal/app/machined/pkg/controllers/network/internal/probe/probe.go

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ import (
1111
"net"
1212
"sync"
1313
"syscall"
14+
"time"
1415

15-
"github.com/benbjohnson/clock"
1616
"github.com/siderolabs/gen/channel"
1717
"go.uber.org/zap"
1818

@@ -21,9 +21,8 @@ import (
2121

2222
// Runner describes a state of running probe.
2323
type Runner struct {
24-
ID string
25-
Spec network.ProbeSpecSpec
26-
Clock clock.Clock
24+
ID string
25+
Spec network.ProbeSpecSpec
2726

2827
cancel context.CancelFunc
2928
wg sync.WaitGroup
@@ -61,11 +60,7 @@ func (runner *Runner) Stop() {
6160
func (runner *Runner) run(ctx context.Context, notifyCh chan<- Notification, logger *zap.Logger) {
6261
logger = logger.With(zap.String("probe", runner.ID))
6362

64-
if runner.Clock == nil {
65-
runner.Clock = clock.New()
66-
}
67-
68-
ticker := runner.Clock.Ticker(runner.Spec.Interval)
63+
ticker := time.NewTicker(runner.Spec.Interval)
6964
defer ticker.Stop()
7065

7166
consecutiveFailures := 0

internal/app/machined/pkg/controllers/network/internal/probe/probe_test.go

Lines changed: 63 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@ import (
1010
"net/http/httptest"
1111
"net/url"
1212
"testing"
13+
"testing/synctest"
1314
"time"
1415

15-
"github.com/benbjohnson/clock"
1616
"github.com/stretchr/testify/assert"
1717
"github.com/stretchr/testify/require"
1818
"go.uber.org/zap/zaptest"
@@ -22,8 +22,6 @@ import (
2222
)
2323

2424
func TestProbeHTTP(t *testing.T) {
25-
t.Parallel()
26-
2725
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
2826
w.WriteHeader(http.StatusOK)
2927
}))
@@ -43,7 +41,7 @@ func TestProbeHTTP(t *testing.T) {
4341
},
4442
}
4543

46-
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
44+
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
4745
t.Cleanup(cancel)
4846

4947
notifyCh := make(chan probe.Notification)
@@ -80,73 +78,76 @@ func TestProbeHTTP(t *testing.T) {
8078
}
8179

8280
func TestProbeConsecutiveFailures(t *testing.T) {
83-
t.Parallel()
84-
85-
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
86-
w.WriteHeader(http.StatusOK)
87-
}))
88-
t.Cleanup(server.Close)
89-
90-
u, err := url.Parse(server.URL)
91-
require.NoError(t, err)
92-
93-
mockClock := clock.NewMock()
94-
95-
p := probe.Runner{
96-
ID: "consecutive-failures",
97-
Spec: network.ProbeSpecSpec{
98-
Interval: 10 * time.Millisecond,
99-
FailureThreshold: 3,
100-
TCP: network.TCPProbeSpec{
101-
Endpoint: u.Host,
102-
Timeout: time.Second,
81+
// Use synctest.Test to run the test in a controlled time bubble.
82+
// This allows us to test time-dependent behavior without actual delays,
83+
// making the test both faster and more deterministic.
84+
synctest.Test(t, func(t *testing.T) {
85+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
86+
w.WriteHeader(http.StatusOK)
87+
}))
88+
defer server.Close()
89+
90+
u, err := url.Parse(server.URL)
91+
require.NoError(t, err)
92+
93+
p := probe.Runner{
94+
ID: "consecutive-failures",
95+
Spec: network.ProbeSpecSpec{
96+
Interval: 10 * time.Millisecond,
97+
FailureThreshold: 3,
98+
TCP: network.TCPProbeSpec{
99+
Endpoint: u.Host,
100+
Timeout: time.Second,
101+
},
103102
},
104-
},
105-
Clock: mockClock,
106-
}
103+
}
107104

108-
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
109-
t.Cleanup(cancel)
105+
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
106+
defer cancel()
110107

111-
notifyCh := make(chan probe.Notification)
108+
notifyCh := make(chan probe.Notification)
112109

113-
p.Start(ctx, notifyCh, zaptest.NewLogger(t))
114-
t.Cleanup(p.Stop)
115-
116-
// first iteration should succeed
117-
assert.Equal(t, probe.Notification{
118-
ID: "consecutive-failures",
119-
Status: network.ProbeStatusSpec{
120-
Success: true,
121-
},
122-
}, <-notifyCh)
110+
p.Start(ctx, notifyCh, zaptest.NewLogger(t))
111+
defer p.Stop()
123112

124-
// stop the test server, probe should fail
125-
server.Close()
126-
127-
for range p.Spec.FailureThreshold - 1 {
128-
// probe should fail, but no notification should be sent yet (failure threshold not reached)
129-
mockClock.Add(p.Spec.Interval)
113+
// first iteration should succeed
114+
assert.Equal(t, probe.Notification{
115+
ID: "consecutive-failures",
116+
Status: network.ProbeStatusSpec{
117+
Success: true,
118+
},
119+
}, <-notifyCh)
130120

131-
select {
132-
case ev := <-notifyCh:
133-
require.Fail(t, "unexpected notification", "got: %v", ev)
134-
case <-time.After(100 * time.Millisecond):
121+
// stop the test server, probe should fail
122+
server.Close()
123+
124+
for range p.Spec.FailureThreshold - 1 {
125+
// probe should fail, but no notification should be sent yet (failure threshold not reached)
126+
// synctest.Wait() waits until all goroutines in the bubble are durably blocked,
127+
// which happens when the ticker in the probe runner is waiting for the next interval
128+
synctest.Wait()
129+
130+
select {
131+
case ev := <-notifyCh:
132+
require.Fail(t, "unexpected notification", "got: %v", ev)
133+
default:
134+
// Expected: no notification yet
135+
}
135136
}
136-
}
137137

138-
// advance clock to trigger another failure(s)
139-
mockClock.Add(p.Spec.Interval)
138+
// wait for next interval to trigger failure notification
139+
synctest.Wait()
140140

141-
notify := <-notifyCh
142-
assert.Equal(t, "consecutive-failures", notify.ID)
143-
assert.False(t, notify.Status.Success)
144-
assert.Contains(t, notify.Status.LastError, "connection refused")
141+
notify := <-notifyCh
142+
assert.Equal(t, "consecutive-failures", notify.ID)
143+
assert.False(t, notify.Status.Success)
144+
assert.Contains(t, notify.Status.LastError, "connection refused")
145145

146-
// advance clock to trigger another failure(s)
147-
mockClock.Add(p.Spec.Interval)
146+
// wait for next interval to trigger another failure notification
147+
synctest.Wait()
148148

149-
notify = <-notifyCh
150-
assert.Equal(t, "consecutive-failures", notify.ID)
151-
assert.False(t, notify.Status.Success)
149+
notify = <-notifyCh
150+
assert.Equal(t, "consecutive-failures", notify.ID)
151+
assert.False(t, notify.Status.Success)
152+
})
152153
}

internal/app/machined/pkg/controllers/runtime/cri_image_gc.go

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ import (
1010
"fmt"
1111
"time"
1212

13-
"github.com/benbjohnson/clock"
1413
containerd "github.com/containerd/containerd/v2/client"
1514
"github.com/containerd/containerd/v2/core/images"
1615
"github.com/containerd/containerd/v2/pkg/namespaces"
@@ -38,7 +37,6 @@ const ImageGCGracePeriod = 4 * ImageCleanupInterval
3837
// CRIImageGCController renders manifests based on templates and config/secrets.
3938
type CRIImageGCController struct {
4039
ImageServiceProvider func() (ImageServiceProvider, error)
41-
Clock clock.Clock
4240

4341
imageFirstSeenUnreferenced map[string]time.Time
4442
}
@@ -114,10 +112,6 @@ func (ctrl *CRIImageGCController) Run(ctx context.Context, r controller.Runtime,
114112
ctrl.ImageServiceProvider = defaultImageServiceProvider
115113
}
116114

117-
if ctrl.Clock == nil {
118-
ctrl.Clock = clock.New()
119-
}
120-
121115
if ctrl.imageFirstSeenUnreferenced == nil {
122116
ctrl.imageFirstSeenUnreferenced = map[string]time.Time{}
123117
}
@@ -128,7 +122,7 @@ func (ctrl *CRIImageGCController) Run(ctx context.Context, r controller.Runtime,
128122
imageServiceProvider ImageServiceProvider
129123
)
130124

131-
ticker := ctrl.Clock.Ticker(ImageCleanupInterval)
125+
ticker := time.NewTicker(ImageCleanupInterval)
132126
defer ticker.Stop()
133127

134128
defer func() {
@@ -179,7 +173,7 @@ func (ctrl *CRIImageGCController) Run(ctx context.Context, r controller.Runtime,
179173

180174
kubeletSpec, err := safe.ReaderGet[*k8s.KubeletSpec](ctx, r, resource.NewMetadata(k8s.NamespaceName, k8s.KubeletSpecType, k8s.KubeletID, resource.VersionUndefined))
181175
if err != nil && !state.IsNotFoundError(err) {
182-
return fmt.Errorf("error getting etcd spec: %w", err)
176+
return fmt.Errorf("error getting kubelet spec: %w", err)
183177
}
184178

185179
if kubeletSpec != nil {
@@ -285,14 +279,14 @@ func (ctrl *CRIImageGCController) cleanup(ctx context.Context, logger *zap.Logge
285279
}
286280

287281
if _, ok := ctrl.imageFirstSeenUnreferenced[image.Name]; !ok {
288-
ctrl.imageFirstSeenUnreferenced[image.Name] = ctrl.Clock.Now()
282+
ctrl.imageFirstSeenUnreferenced[image.Name] = time.Now()
289283
}
290284

291285
// calculate image age two ways, and pick the minimum:
292286
// * as CRI reports it, which is the time image got pulled
293287
// * as we see it, this means the image won't be deleted until it reaches the age of ImageGCGracePeriod from the moment it became unreferenced
294-
imageAgeCRI := ctrl.Clock.Since(image.CreatedAt)
295-
imageAgeInternal := ctrl.Clock.Since(ctrl.imageFirstSeenUnreferenced[image.Name])
288+
imageAgeCRI := time.Since(image.CreatedAt)
289+
imageAgeInternal := time.Since(ctrl.imageFirstSeenUnreferenced[image.Name])
296290

297291
imageAge := min(imageAgeCRI, imageAgeInternal)
298292

0 commit comments

Comments
 (0)