diff --git a/coderd/autobuild/lifecycle_executor.go b/coderd/autobuild/lifecycle_executor.go index 16072e6517125..945b5f8c7cd6d 100644 --- a/coderd/autobuild/lifecycle_executor.go +++ b/coderd/autobuild/lifecycle_executor.go @@ -29,6 +29,7 @@ import ( "github.com/coder/coder/v2/coderd/database/provisionerjobs" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/notifications" + "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/schedule" "github.com/coder/coder/v2/coderd/wsbuilder" "github.com/coder/coder/v2/codersdk" @@ -132,6 +133,39 @@ func (e *Executor) Run() { }) } +// hasValidProvisioner checks whether there is at least one valid (non-stale, correct tags) provisioner +// based on time t and the tags maps (such as from a templateVersionJob). +func (e *Executor) hasValidProvisioner(ctx context.Context, tx database.Store, t time.Time, ws database.Workspace, tags map[string]string) (bool, error) { + queryParams := database.GetProvisionerDaemonsByOrganizationParams{ + OrganizationID: ws.OrganizationID, + WantTags: tags, + } + + // nolint: gocritic // The user (in this case, the user/context for autostart builds) may not have the full + // permissions to read provisioner daemons, but we need to check if there's any for the job prior to the + // execution of the job via autostart to fix: https://github.com/coder/coder/issues/17941 + provisionerDaemons, err := tx.GetProvisionerDaemonsByOrganization(dbauthz.AsSystemReadProvisionerDaemons(ctx), queryParams) + if err != nil { + return false, xerrors.Errorf("get provisioner daemons: %w", err) + } + + logger := e.log.With(slog.F("tags", tags)) + // Check if any provisioners are active (not stale) + for _, pd := range provisionerDaemons { + if pd.LastSeenAt.Valid { + age := t.Sub(pd.LastSeenAt.Time) + if age <= provisionerdserver.StaleInterval { + logger.Debug(ctx, "hasValidProvisioner: found active provisioner", + slog.F("daemon_id", pd.ID), + ) + return true, nil + } + } + } + logger.Debug(ctx, "hasValidProvisioner: no active provisioners found") + return false, nil +} + func (e *Executor) runOnce(t time.Time) Stats { stats := Stats{ Transitions: make(map[uuid.UUID]database.WorkspaceTransition), @@ -281,6 +315,22 @@ func (e *Executor) runOnce(t time.Time) Stats { return nil } + // Get the template version job to access tags + templateVersionJob, err := tx.GetProvisionerJobByID(e.ctx, activeTemplateVersion.JobID) + if err != nil { + return xerrors.Errorf("get template version job: %w", err) + } + + // Before creating the workspace build, check for available provisioners + hasProvisioners, err := e.hasValidProvisioner(e.ctx, tx, t, ws, templateVersionJob.Tags) + if err != nil { + return xerrors.Errorf("check provisioner availability: %w", err) + } + if !hasProvisioners { + log.Warn(e.ctx, "skipping autostart - no available provisioners") + return nil // Skip this workspace + } + if nextTransition != "" { builder := wsbuilder.New(ws, nextTransition, *e.buildUsageChecker.Load()). SetLastWorkspaceBuildInTx(&latestBuild). diff --git a/coderd/autobuild/lifecycle_executor_test.go b/coderd/autobuild/lifecycle_executor_test.go index babca5431d6b7..df7a7ad231e59 100644 --- a/coderd/autobuild/lifecycle_executor_test.go +++ b/coderd/autobuild/lifecycle_executor_test.go @@ -9,6 +9,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbgen" "github.com/coder/coder/v2/coderd/database/pubsub" + "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/quartz" @@ -36,14 +37,18 @@ import ( "github.com/coder/coder/v2/testutil" ) +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, testutil.GoleakOptions...) +} + func TestExecutorAutostartOK(t *testing.T) { t.Parallel() var ( - sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") - tickCh = make(chan time.Time) - statsCh = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -55,10 +60,13 @@ func TestExecutorAutostartOK(t *testing.T) { ) // Given: workspace is stopped workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) - + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{}) + require.NoError(t, err) // When: the autobuild executor ticks after the scheduled time go func() { - tickCh <- sched.Next(workspace.LatestBuild.CreatedAt) + tickTime := sched.Next(workspace.LatestBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -114,8 +122,11 @@ func TestMultipleLifecycleExecutors(t *testing.T) { // Have the workspace stopped so we can perform an autostart workspace = coderdtest.MustTransitionWorkspace(t, clientA, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) // Get both clients to perform a lifecycle execution tick next := sched.Next(workspace.LatestBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, next) startCh := make(chan struct{}) go func() { @@ -187,14 +198,14 @@ func TestExecutorAutostartTemplateUpdated(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() var ( - sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") - ctx = context.Background() - err error - tickCh = make(chan time.Time) - statsCh = make(chan autobuild.Stats) - logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: !tc.expectStart}).Leveled(slog.LevelDebug) - enqueuer = notificationstest.FakeEnqueuer{} - client = coderdtest.New(t, &coderdtest.Options{ + sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") + ctx = context.Background() + err error + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: !tc.expectStart}).Leveled(slog.LevelDebug) + enqueuer = notificationstest.FakeEnqueuer{} + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -247,10 +258,15 @@ func TestExecutorAutostartTemplateUpdated(t *testing.T) { }, )) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + t.Log("sending autobuild tick") // When: the autobuild executor ticks after the scheduled time go func() { - tickCh <- sched.Next(workspace.LatestBuild.CreatedAt) + tickTime := sched.Next(workspace.LatestBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -414,9 +430,9 @@ func TestExecutorAutostopOK(t *testing.T) { t.Parallel() var ( - tickCh = make(chan time.Time) - statsCh = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -428,9 +444,14 @@ func TestExecutorAutostopOK(t *testing.T) { require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition) require.NotZero(t, workspace.LatestBuild.Deadline) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + // When: the autobuild executor ticks *after* the deadline: go func() { - tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute) + tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -449,10 +470,10 @@ func TestExecutorAutostopExtend(t *testing.T) { t.Parallel() var ( - ctx = context.Background() - tickCh = make(chan time.Time) - statsCh = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + ctx = context.Background() + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -472,9 +493,14 @@ func TestExecutorAutostopExtend(t *testing.T) { }) require.NoError(t, err, "extend workspace deadline") + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + // When: the autobuild executor ticks *after* the original deadline: go func() { - tickCh <- originalDeadline.Time.Add(time.Minute) + tickTime := originalDeadline.Time.Add(time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime }() // Then: nothing should happen and the workspace should stay running @@ -484,7 +510,9 @@ func TestExecutorAutostopExtend(t *testing.T) { // When: the autobuild executor ticks after the *new* deadline: go func() { - tickCh <- newDeadline.Add(time.Minute) + tickTime := newDeadline.Add(time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -666,9 +694,9 @@ func TestExecuteAutostopSuspendedUser(t *testing.T) { t.Parallel() var ( - tickCh = make(chan time.Time) - statsCh = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -676,6 +704,8 @@ func TestExecuteAutostopSuspendedUser(t *testing.T) { ) admin := coderdtest.CreateFirstUser(t, client) + // Wait for provisioner to be available + coderdtest.MustWaitForAnyProvisioner(t, db) version := coderdtest.CreateTemplateVersion(t, client, admin.OrganizationID, nil) coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) template := coderdtest.CreateTemplate(t, client, admin.OrganizationID, version.ID) @@ -753,17 +783,17 @@ func TestExecutorAutostartMultipleOK(t *testing.T) { t.Parallel() var ( - sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") - tickCh = make(chan time.Time) - tickCh2 = make(chan time.Time) - statsCh1 = make(chan autobuild.Stats) - statsCh2 = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") + tickCh = make(chan time.Time) + tickCh2 = make(chan time.Time) + statsCh1 = make(chan autobuild.Stats) + statsCh2 = make(chan autobuild.Stats) + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh1, }) - _ = coderdtest.New(t, &coderdtest.Options{ + _, _ = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh2, IncludeProvisionerDaemon: true, AutobuildStats: statsCh2, @@ -776,10 +806,15 @@ func TestExecutorAutostartMultipleOK(t *testing.T) { // Given: workspace is stopped workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + // When: the autobuild executor ticks past the scheduled time go func() { - tickCh <- sched.Next(workspace.LatestBuild.CreatedAt) - tickCh2 <- sched.Next(workspace.LatestBuild.CreatedAt) + tickTime := sched.Next(workspace.LatestBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime + tickCh2 <- tickTime close(tickCh) close(tickCh2) }() @@ -809,10 +844,10 @@ func TestExecutorAutostartWithParameters(t *testing.T) { ) var ( - sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") - tickCh = make(chan time.Time) - statsCh = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -841,9 +876,14 @@ func TestExecutorAutostartWithParameters(t *testing.T) { // Given: workspace is stopped workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + // When: the autobuild executor ticks after the scheduled time go func() { - tickCh <- sched.Next(workspace.LatestBuild.CreatedAt) + tickTime := sched.Next(workspace.LatestBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -911,7 +951,7 @@ func TestExecutorAutostopTemplateDisabled(t *testing.T) { tickCh = make(chan time.Time) statsCh = make(chan autobuild.Stats) - client = coderdtest.New(t, &coderdtest.Options{ + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, AutobuildStats: statsCh, @@ -935,9 +975,14 @@ func TestExecutorAutostopTemplateDisabled(t *testing.T) { // Then: the deadline should be set to the template default TTL assert.WithinDuration(t, workspace.LatestBuild.CreatedAt.Add(time.Hour), workspace.LatestBuild.Deadline.Time, time.Minute) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + // When: the autobuild executor ticks after the workspace setting, but before the template setting: go func() { - tickCh <- workspace.LatestBuild.Job.CompletedAt.Add(45 * time.Minute) + tickTime := workspace.LatestBuild.Job.CompletedAt.Add(45 * time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime }() // Then: nothing should happen @@ -947,7 +992,9 @@ func TestExecutorAutostopTemplateDisabled(t *testing.T) { // When: the autobuild executor ticks after the template setting: go func() { - tickCh <- workspace.LatestBuild.Job.CompletedAt.Add(61 * time.Minute) + tickTime := workspace.LatestBuild.Job.CompletedAt.Add(61 * time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -976,6 +1023,9 @@ func TestExecutorRequireActiveVersion(t *testing.T) { TemplateScheduleStore: schedule.NewAGPLTemplateScheduleStore(), }) ) + // Wait for provisioner to be available + coderdtest.MustWaitForAnyProvisioner(t, db) + ctx := testutil.Context(t, testutil.WaitShort) owner := coderdtest.CreateFirstUser(t, ownerClient) me, err := ownerClient.User(ctx, codersdk.Me) @@ -1012,7 +1062,13 @@ func TestExecutorRequireActiveVersion(t *testing.T) { req.TemplateVersionID = inactiveVersion.ID }) require.Equal(t, inactiveVersion.ID, ws.LatestBuild.TemplateVersionID) - ticker <- sched.Next(ws.LatestBuild.CreatedAt) + + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + + tickTime := sched.Next(ws.LatestBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh require.Len(t, stats.Transitions, 1) @@ -1132,7 +1188,7 @@ func TestNotifications(t *testing.T) { statCh = make(chan autobuild.Stats) notifyEnq = notificationstest.FakeEnqueuer{} timeTilDormant = time.Minute - client = coderdtest.New(t, &coderdtest.Options{ + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{ AutobuildTicker: ticker, AutobuildStats: statCh, IncludeProvisionerDaemon: true, @@ -1169,9 +1225,14 @@ func TestNotifications(t *testing.T) { workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) _ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, workspace.LatestBuild.ID) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + // Wait for workspace to become dormant notifyEnq.Clear() - ticker <- workspace.LastUsedAt.Add(timeTilDormant * 3) + tickTime := workspace.LastUsedAt.Add(timeTilDormant * 3) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime _ = testutil.TryReceive(testutil.Context(t, testutil.WaitShort), t, statCh) // Check that the workspace is dormant @@ -1245,9 +1306,14 @@ func TestExecutorPrebuilds(t *testing.T) { require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition) require.NotZero(t, prebuild.LatestBuild.Deadline) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), prebuild.OrganizationID, nil) + require.NoError(t, err) + // When: the autobuild executor ticks *after* the deadline: go func() { - tickCh <- prebuild.LatestBuild.Deadline.Time.Add(time.Minute) + tickTime := prebuild.LatestBuild.Deadline.Time.Add(time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime }() // Then: the prebuilt workspace should remain in a start transition @@ -1272,7 +1338,9 @@ func TestExecutorPrebuilds(t *testing.T) { // When: the autobuild executor ticks *after* the deadline: go func() { - tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute) + tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime close(tickCh) }() @@ -1560,6 +1628,25 @@ func mustProvisionWorkspace(t *testing.T, client *codersdk.Client, mut ...func(* return coderdtest.MustWorkspace(t, client, ws.ID) } +// mustProvisionWorkspaceWithProvisionerTags creates a workspace with a template version that has specific provisioner tags +func mustProvisionWorkspaceWithProvisionerTags(t *testing.T, client *codersdk.Client, provisionerTags map[string]string, mut ...func(*codersdk.CreateWorkspaceRequest)) codersdk.Workspace { + t.Helper() + user := coderdtest.CreateFirstUser(t, client) + + // Create template version with specific provisioner tags + version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil, func(request *codersdk.CreateTemplateVersionRequest) { + request.ProvisionerTags = provisionerTags + }) + coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID) + t.Logf("template version %s job has completed with provisioner tags %v", version.ID, provisionerTags) + + template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) + + ws := coderdtest.CreateWorkspace(t, client, template.ID, mut...) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID) + return coderdtest.MustWorkspace(t, client, ws.ID) +} + func mustProvisionWorkspaceWithParameters(t *testing.T, client *codersdk.Client, richParameters []*proto.RichParameter, mut ...func(*codersdk.CreateWorkspaceRequest)) codersdk.Workspace { t.Helper() user := coderdtest.CreateFirstUser(t, client) @@ -1597,6 +1684,79 @@ func mustWorkspaceParameters(t *testing.T, client *codersdk.Client, workspaceID require.NotEmpty(t, buildParameters) } -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m, testutil.GoleakOptions...) +func TestExecutorAutostartSkipsWhenNoProvisionersAvailable(t *testing.T) { + t.Parallel() + + var ( + sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *") + tickCh = make(chan time.Time) + statsCh = make(chan autobuild.Stats) + ) + + // Use provisioner daemon tags so we can test `hasAvailableProvisioner` more thoroughly. + // We can't overwrite owner or scope as there's a `provisionersdk.MutateTags` function that has restrictions on those. + provisionerDaemonTags := map[string]string{"test-tag": "asdf"} + t.Logf("Setting provisioner daemon tags: %v", provisionerDaemonTags) + + db, ps := dbtestutil.NewDB(t) + client, _, api := coderdtest.NewWithAPI(t, &coderdtest.Options{ + Database: db, + Pubsub: ps, + IncludeProvisionerDaemon: false, + AutobuildTicker: tickCh, + AutobuildStats: statsCh, + }) + + daemon1Closer := coderdtest.NewTaggedProvisionerDaemon(t, api, "name", provisionerDaemonTags) + t.Cleanup(func() { + _ = daemon1Closer.Close() + }) + + // Create workspace with autostart enabled and matching provisioner tags + workspace := mustProvisionWorkspaceWithProvisionerTags(t, client, provisionerDaemonTags, func(cwr *codersdk.CreateWorkspaceRequest) { + cwr.AutostartSchedule = ptr.Ref(sched.String()) + }) + + // Stop the workspace while provisioner is available + workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) + + // Wait for provisioner to be available for this specific workspace + coderdtest.MustWaitForProvisionersAvailable(t, db, workspace) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, provisionerDaemonTags) + require.NoError(t, err, "Error getting provisioner for workspace") + + daemon1Closer.Close() + + // Ensure the provisioner is stale + staleTime := sched.Next(workspace.LatestBuild.CreatedAt).Add((-1 * provisionerdserver.StaleInterval) + -10*time.Second) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, staleTime) + + // Trigger autobuild + tickCh <- sched.Next(workspace.LatestBuild.CreatedAt) + + stats := <-statsCh + + // This assertion should FAIL when provisioner is available (not stale), can confirm by commenting out the + // UpdateProvisionerLastSeenAt call above. + assert.Len(t, stats.Transitions, 0, "should not create builds when no provisioners available") + + daemon2Closer := coderdtest.NewTaggedProvisionerDaemon(t, api, "name", provisionerDaemonTags) + t.Cleanup(func() { + _ = daemon2Closer.Close() + }) + + // Ensure the provisioner is NOT stale, and see if we get a successful state transition. + p, err = coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, provisionerDaemonTags) + require.NoError(t, err, "Error getting provisioner for workspace") + notStaleTime := sched.Next(workspace.LatestBuild.CreatedAt).Add((-1 * provisionerdserver.StaleInterval) + 10*time.Second) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, notStaleTime) + + // Trigger autobuild + go func() { + tickCh <- sched.Next(workspace.LatestBuild.CreatedAt) + close(tickCh) + }() + stats = <-statsCh + + assert.Len(t, stats.Transitions, 1, "should not create builds when no provisioners available") } diff --git a/coderd/coderdtest/coderdtest.go b/coderd/coderdtest/coderdtest.go index 7085068e97ff4..0de5dbb710a0e 100644 --- a/coderd/coderdtest/coderdtest.go +++ b/coderd/coderdtest/coderdtest.go @@ -55,6 +55,7 @@ import ( "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/archive" "github.com/coder/coder/v2/coderd/files" + "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/wsbuilder" "github.com/coder/quartz" @@ -386,6 +387,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can options.NotificationsEnqueuer, experiments, ).WithStatsChannel(options.AutobuildStats) + lifecycleExecutor.Run() jobReaperTicker := time.NewTicker(options.DeploymentValues.JobReaperDetectorInterval.Value()) @@ -1590,3 +1592,112 @@ func DeploymentValues(t testing.TB, mut ...func(*codersdk.DeploymentValues)) *co } return cfg } + +// GetProvisionerForTags returns the first valid provisioner for a workspace + template tags. +func GetProvisionerForTags(tx database.Store, curTime time.Time, orgID uuid.UUID, tags map[string]string) (database.ProvisionerDaemon, error) { + if tags == nil { + tags = map[string]string{} + } + queryParams := database.GetProvisionerDaemonsByOrganizationParams{ + OrganizationID: orgID, + WantTags: tags, + } + + // nolint: gocritic // The user (in this case, the user/context for autostart builds) may not have the full + // permissions to read provisioner daemons, but we need to check if there's any for the job prior to the + // execution of the job via autostart to fix: https://github.com/coder/coder/issues/17941 + provisionerDaemons, err := tx.GetProvisionerDaemonsByOrganization(dbauthz.AsSystemReadProvisionerDaemons(context.Background()), queryParams) + if err != nil { + return database.ProvisionerDaemon{}, xerrors.Errorf("get provisioner daemons: %w", err) + } + + // Check if any provisioners are active (not stale) + for _, pd := range provisionerDaemons { + if pd.LastSeenAt.Valid { + age := curTime.Sub(pd.LastSeenAt.Time) + if age <= provisionerdserver.StaleInterval { + return pd, nil + } + } + } + return database.ProvisionerDaemon{}, xerrors.New("no available provisioners found") +} + +func ctxWithProvisionerPermissions(ctx context.Context) context.Context { + // Use system restricted context which has permissions to update provisioner daemons + //nolint: gocritic // We need system context to modify this. + return dbauthz.AsSystemRestricted(ctx) +} + +// UpdateProvisionerLastSeenAt updates the provisioner daemon's LastSeenAt timestamp +// to the specified time to prevent it from appearing stale during autobuild operations +func UpdateProvisionerLastSeenAt(t *testing.T, db database.Store, id uuid.UUID, tickTime time.Time) { + t.Helper() + ctx := ctxWithProvisionerPermissions(context.Background()) + t.Logf("Updating provisioner %s LastSeenAt to %v", id, tickTime) + err := db.UpdateProvisionerDaemonLastSeenAt(ctx, database.UpdateProvisionerDaemonLastSeenAtParams{ + ID: id, + LastSeenAt: sql.NullTime{Time: tickTime, Valid: true}, + }) + require.NoError(t, err) + t.Logf("Successfully updated provisioner LastSeenAt") +} + +func MustWaitForAnyProvisioner(t *testing.T, db database.Store) { + t.Helper() + ctx := ctxWithProvisionerPermissions(testutil.Context(t, testutil.WaitShort)) + require.Eventually(t, func() bool { + daemons, err := db.GetProvisionerDaemons(ctx) + return err == nil && len(daemons) > 0 + }, testutil.WaitShort, testutil.IntervalFast) +} + +// MustWaitForProvisionersAvailable waits for provisioners to be available for a specific workspace. +func MustWaitForProvisionersAvailable(t *testing.T, db database.Store, workspace codersdk.Workspace) uuid.UUID { + t.Helper() + ctx := ctxWithProvisionerPermissions(testutil.Context(t, testutil.WaitShort)) + id := uuid.UUID{} + // Get the workspace from the database + require.Eventually(t, func() bool { + ws, err := db.GetWorkspaceByID(ctx, workspace.ID) + if err != nil { + return false + } + + // Get the latest build + latestBuild, err := db.GetWorkspaceBuildByID(ctx, workspace.LatestBuild.ID) + if err != nil { + return false + } + + // Get the template version job + templateVersionJob, err := db.GetProvisionerJobByID(ctx, latestBuild.JobID) + if err != nil { + return false + } + + // Check if provisioners are available using the same logic as hasAvailableProvisioners + provisionerDaemons, err := db.GetProvisionerDaemonsByOrganization(ctx, database.GetProvisionerDaemonsByOrganizationParams{ + OrganizationID: ws.OrganizationID, + WantTags: templateVersionJob.Tags, + }) + if err != nil { + return false + } + + // Check if any provisioners are active (not stale) + now := time.Now() + for _, pd := range provisionerDaemons { + if pd.LastSeenAt.Valid { + age := now.Sub(pd.LastSeenAt.Time) + if age <= provisionerdserver.StaleInterval { + id = pd.ID + return true // Found an active provisioner + } + } + } + return false // No active provisioners found + }, testutil.WaitLong, testutil.IntervalFast) + + return id +} diff --git a/enterprise/coderd/workspaces_test.go b/enterprise/coderd/workspaces_test.go index 1f9a9a4897629..dad24460068cd 100644 --- a/enterprise/coderd/workspaces_test.go +++ b/enterprise/coderd/workspaces_test.go @@ -617,7 +617,7 @@ func TestWorkspaceAutobuild(t *testing.T) { failureTTL = time.Minute ) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ Logger: &logger, AutobuildTicker: ticker, @@ -642,7 +642,12 @@ func TestWorkspaceAutobuild(t *testing.T) { ws := coderdtest.CreateWorkspace(t, client, template.ID) build := coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID) require.Equal(t, codersdk.WorkspaceStatusFailed, build.Status) - ticker <- build.Job.CompletedAt.Add(failureTTL * 2) + tickTime := build.Job.CompletedAt.Add(failureTTL * 2) + + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect workspace to transition to stopped state for breaching // failure TTL. @@ -664,7 +669,7 @@ func TestWorkspaceAutobuild(t *testing.T) { failureTTL = time.Minute ) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ Logger: &logger, AutobuildTicker: ticker, @@ -689,7 +694,12 @@ func TestWorkspaceAutobuild(t *testing.T) { build := coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID) require.Equal(t, codersdk.WorkspaceStatusFailed, build.Status) // Make it impossible to trigger the failure TTL. - ticker <- build.Job.CompletedAt.Add(-failureTTL * 2) + tickTime := build.Job.CompletedAt.Add(-failureTTL * 2) + + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect no transitions since not enough time has elapsed. require.Len(t, stats.Transitions, 0) @@ -757,10 +767,11 @@ func TestWorkspaceAutobuild(t *testing.T) { client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ - AutobuildTicker: ticker, - AutobuildStats: statCh, - TemplateScheduleStore: schedule.NewEnterpriseTemplateScheduleStore(agplUserQuietHoursScheduleStore(), notifications.NewNoopEnqueuer(), logger, nil), - Auditor: auditRecorder, + AutobuildTicker: ticker, + AutobuildStats: statCh, + IncludeProvisionerDaemon: true, + TemplateScheduleStore: schedule.NewEnterpriseTemplateScheduleStore(agplUserQuietHoursScheduleStore(), notifications.NewNoopEnqueuer(), logger, nil), + Auditor: auditRecorder, }, LicenseOptions: &coderdenttest.LicenseOptions{ Features: license.Features{codersdk.FeatureAdvancedTemplateScheduling: 1}, @@ -788,7 +799,12 @@ func TestWorkspaceAutobuild(t *testing.T) { auditRecorder.ResetLogs() // Simulate being inactive. - ticker <- workspace.LastUsedAt.Add(inactiveTTL * 2) + tickTime := workspace.LastUsedAt.Add(inactiveTTL * 2) + + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect workspace to transition to stopped state for breaching @@ -811,7 +827,7 @@ func TestWorkspaceAutobuild(t *testing.T) { dormantLastUsedAt := ws.LastUsedAt // nolint:gocritic // this test is not testing RBAC. - err := client.UpdateWorkspaceDormancy(ctx, ws.ID, codersdk.UpdateWorkspaceDormancy{Dormant: false}) + err = client.UpdateWorkspaceDormancy(ctx, ws.ID, codersdk.UpdateWorkspaceDormancy{Dormant: false}) require.NoError(t, err) // Assert that we updated our last_used_at so that we don't immediately @@ -886,7 +902,12 @@ func TestWorkspaceAutobuild(t *testing.T) { } // Simulate being inactive. - ticker <- time.Now().Add(time.Hour) + // Fix provisioner stale issue by updating LastSeenAt to the tick time + tickTime := time.Now().Add(time.Hour) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspaces[0].OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect workspace to transition to stopped state for breaching @@ -995,7 +1016,7 @@ func TestWorkspaceAutobuild(t *testing.T) { ) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: ticker, IncludeProvisionerDaemon: true, @@ -1027,7 +1048,11 @@ func TestWorkspaceAutobuild(t *testing.T) { ws = coderdtest.MustTransitionWorkspace(t, client, ws.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) // Simulate not having accessed the workspace in a while. - ticker <- ws.LastUsedAt.Add(2 * inactiveTTL) + tickTime := ws.LastUsedAt.Add(2 * inactiveTTL) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect no transitions since workspace is stopped. require.Len(t, stats.Transitions, 0) @@ -1049,7 +1074,7 @@ func TestWorkspaceAutobuild(t *testing.T) { ) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: ticker, IncludeProvisionerDaemon: true, @@ -1077,7 +1102,11 @@ func TestWorkspaceAutobuild(t *testing.T) { require.Equal(t, codersdk.WorkspaceStatusRunning, build.Status) // Simulate not having accessed the workspace in a while. - ticker <- ws.LastUsedAt.Add(2 * transitionTTL) + tickTime := ws.LastUsedAt.Add(2 * transitionTTL) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect workspace to transition to stopped state for breaching // inactive TTL. @@ -1092,7 +1121,9 @@ func TestWorkspaceAutobuild(t *testing.T) { _ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID) // Simulate the workspace being dormant beyond the threshold. - ticker <- ws.DormantAt.Add(2 * transitionTTL) + tickTime2 := ws.DormantAt.Add(2 * transitionTTL) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime2 stats = <-statCh require.Len(t, stats.Transitions, 1) // The workspace should be scheduled for deletion. @@ -1104,7 +1135,7 @@ func TestWorkspaceAutobuild(t *testing.T) { // Assert that the workspace is actually deleted. //nolint:gocritic // ensuring workspace is deleted and not just invisible to us due to RBAC - _, err := client.Workspace(testutil.Context(t, testutil.WaitShort), ws.ID) + _, err = client.Workspace(testutil.Context(t, testutil.WaitShort), ws.ID) require.Error(t, err) cerr, ok := codersdk.AsError(err) require.True(t, ok) @@ -1121,7 +1152,7 @@ func TestWorkspaceAutobuild(t *testing.T) { ) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: ticker, IncludeProvisionerDaemon: true, @@ -1156,7 +1187,11 @@ func TestWorkspaceAutobuild(t *testing.T) { require.NotNil(t, ws.DormantAt) // Ensure we haven't breached our threshold. - ticker <- ws.DormantAt.Add(-dormantTTL * 2) + tickTime := ws.DormantAt.Add(-dormantTTL * 2) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh // Expect no transitions since not enough time has elapsed. require.Len(t, stats.Transitions, 0) @@ -1167,7 +1202,9 @@ func TestWorkspaceAutobuild(t *testing.T) { require.NoError(t, err) // Simlute the workspace breaching the threshold. - ticker <- ws.DormantAt.Add(dormantTTL * 2) + tickTime2 := ws.DormantAt.Add(dormantTTL * 2) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime2) + ticker <- tickTime2 stats = <-statCh require.Len(t, stats.Transitions, 1) require.Equal(t, database.WorkspaceTransitionDelete, stats.Transitions[ws.ID]) @@ -1184,7 +1221,7 @@ func TestWorkspaceAutobuild(t *testing.T) { ) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, @@ -1215,7 +1252,11 @@ func TestWorkspaceAutobuild(t *testing.T) { ws = coderdtest.MustTransitionWorkspace(t, client, ws.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) // Assert that autostart works when the workspace isn't dormant.. - tickCh <- sched.Next(ws.LatestBuild.CreatedAt) + tickTime := sched.Next(ws.LatestBuild.CreatedAt) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime stats := <-statsCh require.Len(t, stats.Errors, 0) require.Len(t, stats.Transitions, 1) @@ -1235,7 +1276,9 @@ func TestWorkspaceAutobuild(t *testing.T) { require.NoError(t, err) // We should see the workspace get stopped now. - tickCh <- ws.LastUsedAt.Add(inactiveTTL * 2) + tickTime2 := ws.LastUsedAt.Add(inactiveTTL * 2) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime2 stats = <-statsCh require.Len(t, stats.Errors, 0) require.Len(t, stats.Transitions, 1) @@ -1265,7 +1308,7 @@ func TestWorkspaceAutobuild(t *testing.T) { ) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: ticker, IncludeProvisionerDaemon: true, @@ -1333,13 +1376,19 @@ func TestWorkspaceAutobuild(t *testing.T) { // Simulate ticking an hour after the workspace is expected to be deleted. // Under normal circumstances this should result in a transition but // since our last build resulted in failure it should be skipped. - ticker <- build.Job.CompletedAt.Add(time.Hour) + tickTime := build.Job.CompletedAt.Add(time.Hour) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + ticker <- tickTime stats := <-statCh require.Len(t, stats.Transitions, 0) // Simulate ticking a day after the workspace was last attempted to // be deleted. This should result in an attempt. - ticker <- build.Job.CompletedAt.Add(time.Hour * 25) + tickTime2 := build.Job.CompletedAt.Add(time.Hour * 25) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime2) + ticker <- tickTime2 stats = <-statCh require.Len(t, stats.Transitions, 1) require.Equal(t, database.WorkspaceTransitionDelete, stats.Transitions[ws.ID]) @@ -1354,7 +1403,7 @@ func TestWorkspaceAutobuild(t *testing.T) { ) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, @@ -1399,7 +1448,11 @@ func TestWorkspaceAutobuild(t *testing.T) { require.NoError(t, err) // Kick of an autostart build. - tickCh <- sched.Next(ws.LatestBuild.CreatedAt) + tickTime := sched.Next(ws.LatestBuild.CreatedAt) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime stats := <-statsCh require.Len(t, stats.Errors, 0) require.Len(t, stats.Transitions, 1) @@ -1427,7 +1480,9 @@ func TestWorkspaceAutobuild(t *testing.T) { }) // Force an autostart transition again. - tickCh <- sched.Next(firstBuild.CreatedAt) + tickTime2 := sched.Next(firstBuild.CreatedAt) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + tickCh <- tickTime2 stats = <-statsCh require.Len(t, stats.Errors, 0) require.Len(t, stats.Transitions, 1) @@ -1451,7 +1506,7 @@ func TestWorkspaceAutobuild(t *testing.T) { clock.Set(dbtime.Now()) logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - client, user := coderdenttest.New(t, &coderdenttest.Options{ + client, db, user := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{ Options: &coderdtest.Options{ AutobuildTicker: tickCh, IncludeProvisionerDaemon: true, @@ -1492,6 +1547,9 @@ func TestWorkspaceAutobuild(t *testing.T) { next = sched.Next(next) clock.Set(next) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), ws.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, next) tickCh <- next stats := <-statsCh ws = coderdtest.MustWorkspace(t, client, ws.ID) @@ -2184,11 +2242,19 @@ func TestPrebuildsAutobuild(t *testing.T) { workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop) coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID) + // Wait for provisioner to be available for this specific workspace + coderdtest.MustWaitForProvisionersAvailable(t, db, prebuild) + + tickTime := sched.Next(prebuild.LatestBuild.CreatedAt).Add(time.Minute) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + // Tick at the next scheduled time after the prebuild’s LatestBuild.CreatedAt, // since the next allowed autostart is calculated starting from that point. // When: the autobuild executor ticks after the scheduled time go func() { - tickCh <- sched.Next(prebuild.LatestBuild.CreatedAt).Add(time.Minute) + tickCh <- tickTime }() // Then: the workspace should have a NextStartAt equal to the next autostart schedule @@ -2328,9 +2394,14 @@ func TestPrebuildsAutobuild(t *testing.T) { require.NotNil(t, workspace.DormantAt) require.NotNil(t, workspace.DeletingAt) + tickTime := workspace.DeletingAt.Add(time.Minute) + p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, nil) + require.NoError(t, err) + coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime) + // When: the autobuild executor ticks *after* the deletion TTL go func() { - tickCh <- workspace.DeletingAt.Add(time.Minute) + tickCh <- tickTime }() // Then: the workspace should be deleted
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies: