diff --git a/cli/exp_scaletest.go b/cli/exp_scaletest.go index f74cc49eb693f..d2ee36c1819eb 100644 --- a/cli/exp_scaletest.go +++ b/cli/exp_scaletest.go @@ -30,6 +30,7 @@ import ( "github.com/coder/coder/cryptorand" "github.com/coder/coder/scaletest/agentconn" "github.com/coder/coder/scaletest/createworkspaces" + "github.com/coder/coder/scaletest/dashboard" "github.com/coder/coder/scaletest/harness" "github.com/coder/coder/scaletest/reconnectingpty" "github.com/coder/coder/scaletest/workspacebuild" @@ -47,6 +48,7 @@ func (r *RootCmd) scaletestCmd() *clibase.Cmd { }, Children: []*clibase.Cmd{ r.scaletestCleanup(), + r.scaletestDashboard(), r.scaletestCreateWorkspaces(), r.scaletestWorkspaceTraffic(), }, @@ -317,6 +319,30 @@ func (s *scaletestOutputFlags) parse() ([]scaleTestOutput, error) { return out, nil } +type scaletestPrometheusFlags struct { + Address string + Wait time.Duration +} + +func (s *scaletestPrometheusFlags) attach(opts *clibase.OptionSet) { + *opts = append(*opts, + clibase.Option{ + Flag: "scaletest-prometheus-address", + Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS", + Default: "0.0.0.0:21112", + Description: "Address on which to expose scaletest Prometheus metrics.", + Value: clibase.StringOf(&s.Address), + }, + clibase.Option{ + Flag: "scaletest-prometheus-wait", + Env: "CODER_SCALETEST_PROMETHEUS_WAIT", + Default: "15s", + Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.", + Value: clibase.DurationOf(&s.Wait), + }, + ) +} + func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) { me, err := client.User(ctx, codersdk.Me) if err != nil { @@ -846,17 +872,16 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd { func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { var ( - tickInterval time.Duration - bytesPerTick int64 - ssh bool - scaletestPrometheusAddress string - scaletestPrometheusWait time.Duration + tickInterval time.Duration + bytesPerTick int64 + ssh bool client = &codersdk.Client{} tracingFlags = &scaletestTracingFlags{} strategy = &scaletestStrategyFlags{} cleanupStrategy = &scaletestStrategyFlags{cleanup: true} output = &scaletestOutputFlags{} + prometheusFlags = &scaletestPrometheusFlags{} ) cmd := &clibase.Cmd{ @@ -871,7 +896,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { metrics := workspacetraffic.NewMetrics(reg, "username", "workspace_name", "agent_name") logger := slog.Make(sloghuman.Sink(io.Discard)) - prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), scaletestPrometheusAddress, "prometheus") + prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus") defer prometheusSrvClose() // Bypass rate limiting @@ -905,8 +930,8 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { _, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err) } // Wait for prometheus metrics to be scraped - _, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", scaletestPrometheusWait) - <-time.After(scaletestPrometheusWait) + _, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait) + <-time.After(prometheusFlags.Wait) }() tracer := tracerProvider.Tracer(scaletestTracerName) @@ -1009,19 +1034,135 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { Description: "Send traffic over SSH.", Value: clibase.BoolOf(&ssh), }, + } + + tracingFlags.attach(&cmd.Options) + strategy.attach(&cmd.Options) + cleanupStrategy.attach(&cmd.Options) + output.attach(&cmd.Options) + prometheusFlags.attach(&cmd.Options) + + return cmd +} + +func (r *RootCmd) scaletestDashboard() *clibase.Cmd { + var ( + count int64 + minWait time.Duration + maxWait time.Duration + + client = &codersdk.Client{} + tracingFlags = &scaletestTracingFlags{} + strategy = &scaletestStrategyFlags{} + cleanupStrategy = &scaletestStrategyFlags{cleanup: true} + output = &scaletestOutputFlags{} + prometheusFlags = &scaletestPrometheusFlags{} + ) + + cmd := &clibase.Cmd{ + Use: "dashboard", + Short: "Generate traffic to the HTTP API to simulate use of the dashboard.", + Middleware: clibase.Chain( + r.InitClient(client), + ), + Handler: func(inv *clibase.Invocation) error { + ctx := inv.Context() + logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelInfo) + tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx) + if err != nil { + return xerrors.Errorf("create tracer provider: %w", err) + } + defer func() { + // Allow time for traces to flush even if command context is + // canceled. This is a no-op if tracing is not enabled. + _, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...") + if err := closeTracing(ctx); err != nil { + _, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err) + } + // Wait for prometheus metrics to be scraped + _, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait) + <-time.After(prometheusFlags.Wait) + }() + tracer := tracerProvider.Tracer(scaletestTracerName) + outputs, err := output.parse() + if err != nil { + return xerrors.Errorf("could not parse --output flags") + } + reg := prometheus.NewRegistry() + prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus") + defer prometheusSrvClose() + metrics := dashboard.NewMetrics(reg) + + th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy()) + + for i := int64(0); i < count; i++ { + name := fmt.Sprintf("dashboard-%d", i) + config := dashboard.Config{ + MinWait: minWait, + MaxWait: maxWait, + Trace: tracingEnabled, + Logger: logger.Named(name), + RollTable: dashboard.DefaultActions, + } + if err := config.Validate(); err != nil { + return err + } + var runner harness.Runnable = dashboard.NewRunner(client, metrics, config) + if tracingEnabled { + runner = &runnableTraceWrapper{ + tracer: tracer, + spanName: name, + runner: runner, + } + } + th.AddRun("dashboard", name, runner) + } + + _, _ = fmt.Fprintln(inv.Stderr, "Running load test...") + testCtx, testCancel := strategy.toContext(ctx) + defer testCancel() + err = th.Run(testCtx) + if err != nil { + return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err) + } + + res := th.Results() + for _, o := range outputs { + err = o.write(res, inv.Stdout) + if err != nil { + return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err) + } + } + + if res.TotalFail > 0 { + return xerrors.New("load test failed, see above for more details") + } + + return nil + }, + } + + cmd.Options = []clibase.Option{ { - Flag: "scaletest-prometheus-address", - Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS", - Default: "0.0.0.0:21112", - Description: "Address on which to expose scaletest Prometheus metrics.", - Value: clibase.StringOf(&scaletestPrometheusAddress), + Flag: "count", + Env: "CODER_SCALETEST_DASHBOARD_COUNT", + Default: "1", + Description: "Number of concurrent workers.", + Value: clibase.Int64Of(&count), }, { - Flag: "scaletest-prometheus-wait", - Env: "CODER_SCALETEST_PROMETHEUS_WAIT", - Default: "5s", - Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.", - Value: clibase.DurationOf(&scaletestPrometheusWait), + Flag: "min-wait", + Env: "CODER_SCALETEST_DASHBOARD_MIN_WAIT", + Default: "100ms", + Description: "Minimum wait between fetches.", + Value: clibase.DurationOf(&minWait), + }, + { + Flag: "max-wait", + Env: "CODER_SCALETEST_DASHBOARD_MAX_WAIT", + Default: "1s", + Description: "Maximum wait between fetches.", + Value: clibase.DurationOf(&maxWait), }, } @@ -1029,6 +1170,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { strategy.attach(&cmd.Options) cleanupStrategy.attach(&cmd.Options) output.attach(&cmd.Options) + prometheusFlags.attach(&cmd.Options) return cmd } diff --git a/cli/exp_scaletest_test.go b/cli/exp_scaletest_test.go index 940ba65eb9264..4c10b722ca357 100644 --- a/cli/exp_scaletest_test.go +++ b/cli/exp_scaletest_test.go @@ -78,3 +78,29 @@ func TestScaleTestWorkspaceTraffic(t *testing.T) { err := inv.WithContext(ctx).Run() require.ErrorContains(t, err, "no scaletest workspaces exist") } + +// This test just validates that the CLI command accepts its known arguments. +func TestScaleTestDashboard(t *testing.T) { + t.Parallel() + + ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium) + defer cancelFunc() + + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + + inv, root := clitest.New(t, "exp", "scaletest", "dashboard", + "--count", "1", + "--min-wait", "100ms", + "--max-wait", "1s", + "--timeout", "1s", + "--scaletest-prometheus-address", "127.0.0.1:0", + "--scaletest-prometheus-wait", "0s", + ) + clitest.SetupConfig(t, client, root) + var stdout, stderr bytes.Buffer + inv.Stdout = &stdout + inv.Stderr = &stderr + err := inv.WithContext(ctx).Run() + require.NoError(t, err, "") +} diff --git a/codersdk/rbacresources.go b/codersdk/rbacresources.go index 7db5fc0ec1c76..fc1a7b209b393 100644 --- a/codersdk/rbacresources.go +++ b/codersdk/rbacresources.go @@ -27,6 +27,47 @@ const ( ResourceSystem RBACResource = "system" ) +const ( + ActionCreate = "create" + ActionRead = "read" + ActionUpdate = "update" + ActionDelete = "delete" +) + +var ( + AllRBACResources = []RBACResource{ + ResourceWorkspace, + ResourceWorkspaceProxy, + ResourceWorkspaceExecution, + ResourceWorkspaceApplicationConnect, + ResourceAuditLog, + ResourceTemplate, + ResourceGroup, + ResourceFile, + ResourceProvisionerDaemon, + ResourceOrganization, + ResourceRoleAssignment, + ResourceOrgRoleAssignment, + ResourceAPIKey, + ResourceUser, + ResourceUserData, + ResourceOrganizationMember, + ResourceLicense, + ResourceDeploymentValues, + ResourceDeploymentStats, + ResourceReplicas, + ResourceDebugInfo, + ResourceSystem, + } + + AllRBACActions = []string{ + ActionCreate, + ActionRead, + ActionUpdate, + ActionDelete, + } +) + func (r RBACResource) String() string { return string(r) } diff --git a/scaletest/dashboard/cache.go b/scaletest/dashboard/cache.go new file mode 100644 index 0000000000000..2ac31ff22a525 --- /dev/null +++ b/scaletest/dashboard/cache.go @@ -0,0 +1,97 @@ +package dashboard + +import ( + "context" + "math/rand" + "sync" + + "github.com/coder/coder/codersdk" +) + +type cache struct { + sync.RWMutex + workspaces []codersdk.Workspace + templates []codersdk.Template + users []codersdk.User +} + +func (c *cache) fill(ctx context.Context, client *codersdk.Client) error { + c.Lock() + defer c.Unlock() + me, err := client.User(ctx, codersdk.Me) + if err != nil { + return err + } + ws, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{}) + if err != nil { + return err + } + c.workspaces = ws.Workspaces + tpl, err := client.TemplatesByOrganization(ctx, me.OrganizationIDs[0]) + if err != nil { + return err + } + c.templates = tpl + users, err := client.Users(ctx, codersdk.UsersRequest{}) + if err != nil { + return err + } + c.users = users.Users + return nil +} + +func (c *cache) setWorkspaces(ws []codersdk.Workspace) { + c.Lock() + c.workspaces = ws + c.Unlock() +} + +func (c *cache) setTemplates(t []codersdk.Template) { + c.Lock() + c.templates = t + c.Unlock() +} + +func (c *cache) randWorkspace() codersdk.Workspace { + c.RLock() + defer c.RUnlock() + if len(c.workspaces) == 0 { + return codersdk.Workspace{} + } + return pick(c.workspaces) +} + +func (c *cache) randTemplate() codersdk.Template { + c.RLock() + defer c.RUnlock() + if len(c.templates) == 0 { + return codersdk.Template{} + } + return pick(c.templates) +} + +func (c *cache) setUsers(u []codersdk.User) { + c.Lock() + c.users = u + c.Unlock() +} + +func (c *cache) randUser() codersdk.User { + c.RLock() + defer c.RUnlock() + if len(c.users) == 0 { + return codersdk.User{} + } + return pick(c.users) +} + +// pick chooses a random element from a slice. +// If the slice is empty, it returns the zero value of the type. +func pick[T any](s []T) T { + if len(s) == 0 { + var zero T + return zero + } + // nolint:gosec + return s[rand.Intn(len(s))] +} diff --git a/scaletest/dashboard/config.go b/scaletest/dashboard/config.go new file mode 100644 index 0000000000000..b269ee7119320 --- /dev/null +++ b/scaletest/dashboard/config.go @@ -0,0 +1,38 @@ +package dashboard + +import ( + "time" + + "golang.org/x/xerrors" + + "cdr.dev/slog" +) + +type Config struct { + // MinWait is the minimum interval between fetches. + MinWait time.Duration `json:"duration_min"` + // MaxWait is the maximum interval between fetches. + MaxWait time.Duration `json:"duration_max"` + // Trace is whether to trace the requests. + Trace bool `json:"trace"` + // Logger is the logger to use. + Logger slog.Logger `json:"-"` + // RollTable is the set of actions to perform + RollTable RollTable `json:"roll_table"` +} + +func (c Config) Validate() error { + if c.MinWait <= 0 { + return xerrors.Errorf("validate duration_min: must be greater than zero") + } + + if c.MaxWait <= 0 { + return xerrors.Errorf("validate duration_max: must be greater than zero") + } + + if c.MinWait > c.MaxWait { + return xerrors.Errorf("validate duration_min: must be less than duration_max") + } + + return nil +} diff --git a/scaletest/dashboard/metrics.go b/scaletest/dashboard/metrics.go new file mode 100644 index 0000000000000..513a319a07bae --- /dev/null +++ b/scaletest/dashboard/metrics.go @@ -0,0 +1,56 @@ +package dashboard + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type Metrics interface { + ObserveDuration(action string, d time.Duration) + IncErrors(action string) + IncStatuses(action string, code string) +} + +type PromMetrics struct { + durationSeconds *prometheus.HistogramVec + errors *prometheus.CounterVec + statuses *prometheus.CounterVec +} + +func NewMetrics(reg prometheus.Registerer) *PromMetrics { + m := &PromMetrics{ + durationSeconds: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "coderd", + Subsystem: "scaletest_dashboard", + Name: "duration_seconds", + }, []string{"action"}), + errors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "coderd", + Subsystem: "scaletest_dashboard", + Name: "errors_total", + }, []string{"action"}), + statuses: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "coderd", + Subsystem: "scaletest_dashboard", + Name: "statuses_total", + }, []string{"action", "code"}), + } + + reg.MustRegister(m.durationSeconds) + reg.MustRegister(m.errors) + reg.MustRegister(m.statuses) + return m +} + +func (p *PromMetrics) ObserveDuration(action string, d time.Duration) { + p.durationSeconds.WithLabelValues(action).Observe(d.Seconds()) +} + +func (p *PromMetrics) IncErrors(action string) { + p.errors.WithLabelValues(action).Inc() +} + +func (p *PromMetrics) IncStatuses(action string, code string) { + p.statuses.WithLabelValues(action, code).Inc() +} diff --git a/scaletest/dashboard/rolltable.go b/scaletest/dashboard/rolltable.go new file mode 100644 index 0000000000000..e237cf6983878 --- /dev/null +++ b/scaletest/dashboard/rolltable.go @@ -0,0 +1,304 @@ +package dashboard + +import ( + "context" + + "github.com/google/uuid" + + "github.com/coder/coder/codersdk" +) + +// DefaultActions is a table of actions to perform. +// D&D nerds will feel right at home here :-) +// Note that the order of the table is important! +// Entries must be in ascending order. +var DefaultActions RollTable = []RollTableEntry{ + {0, fetchWorkspaces, "fetch workspaces"}, + {1, fetchUsers, "fetch users"}, + {2, fetchTemplates, "fetch templates"}, + {3, authCheckAsOwner, "authcheck owner"}, + {4, authCheckAsNonOwner, "authcheck not owner"}, + {5, fetchAuditLog, "fetch audit log"}, + {6, fetchActiveUsers, "fetch active users"}, + {7, fetchSuspendedUsers, "fetch suspended users"}, + {8, fetchTemplateVersion, "fetch template version"}, + {9, fetchWorkspace, "fetch workspace"}, + {10, fetchTemplate, "fetch template"}, + {11, fetchUserByID, "fetch user by ID"}, + {12, fetchUserByUsername, "fetch user by username"}, + {13, fetchWorkspaceBuild, "fetch workspace build"}, + {14, fetchDeploymentConfig, "fetch deployment config"}, + {15, fetchWorkspaceQuotaForUser, "fetch workspace quota for user"}, + {16, fetchDeploymentStats, "fetch deployment stats"}, + {17, fetchWorkspaceLogs, "fetch workspace logs"}, +} + +// RollTable is a slice of rollTableEntry. +type RollTable []RollTableEntry + +// RollTableEntry is an entry in the roll table. +type RollTableEntry struct { + // Roll is the minimum number required to perform the action. + Roll int + // Fn is the function to call. + Fn func(ctx context.Context, p *Params) error + // Label is used for logging. + Label string +} + +// choose returns the first entry in the table that is greater than or equal to n. +func (r RollTable) choose(n int) RollTableEntry { + for _, entry := range r { + if entry.Roll >= n { + return entry + } + } + return RollTableEntry{} +} + +// max returns the maximum roll in the table. +// Important: this assumes that the table is sorted in ascending order. +func (r RollTable) max() int { + return r[len(r)-1].Roll +} + +// Params is a set of parameters to pass to the actions in a rollTable. +type Params struct { + // client is the client to use for performing the action. + client *codersdk.Client + // me is the currently authenticated user. Lots of actions require this. + me codersdk.User + // For picking random resource IDs, we need to know what resources are + // present. We store them in a cache to avoid fetching them every time. + // This may seem counter-intuitive for load testing, but we want to avoid + // muddying results. + c *cache +} + +// fetchWorkspaces fetches all workspaces. +func fetchWorkspaces(ctx context.Context, p *Params) error { + ws, err := p.client.Workspaces(ctx, codersdk.WorkspaceFilter{}) + if err != nil { + // store the workspaces for later use in case they change + p.c.setWorkspaces(ws.Workspaces) + } + return err +} + +// fetchUsers fetches all users. +func fetchUsers(ctx context.Context, p *Params) error { + users, err := p.client.Users(ctx, codersdk.UsersRequest{}) + if err != nil { + p.c.setUsers(users.Users) + } + return err +} + +// fetchActiveUsers fetches all active users +func fetchActiveUsers(ctx context.Context, p *Params) error { + _, err := p.client.Users(ctx, codersdk.UsersRequest{ + Status: codersdk.UserStatusActive, + }) + return err +} + +// fetchSuspendedUsers fetches all suspended users +func fetchSuspendedUsers(ctx context.Context, p *Params) error { + _, err := p.client.Users(ctx, codersdk.UsersRequest{ + Status: codersdk.UserStatusSuspended, + }) + return err +} + +// fetchTemplates fetches all templates. +func fetchTemplates(ctx context.Context, p *Params) error { + templates, err := p.client.TemplatesByOrganization(ctx, p.me.OrganizationIDs[0]) + if err != nil { + p.c.setTemplates(templates) + } + return err +} + +// fetchTemplateBuild fetches a single template version at random. +func fetchTemplateVersion(ctx context.Context, p *Params) error { + t := p.c.randTemplate() + _, err := p.client.TemplateVersion(ctx, t.ActiveVersionID) + return err +} + +// fetchWorkspace fetches a single workspace at random. +func fetchWorkspace(ctx context.Context, p *Params) error { + w := p.c.randWorkspace() + _, err := p.client.WorkspaceByOwnerAndName(ctx, w.OwnerName, w.Name, codersdk.WorkspaceOptions{}) + return err +} + +// fetchWorkspaceBuild fetches a single workspace build at random. +func fetchWorkspaceBuild(ctx context.Context, p *Params) error { + w := p.c.randWorkspace() + _, err := p.client.WorkspaceBuild(ctx, w.LatestBuild.ID) + return err +} + +// fetchTemplate fetches a single template at random. +func fetchTemplate(ctx context.Context, p *Params) error { + t := p.c.randTemplate() + _, err := p.client.Template(ctx, t.ID) + return err +} + +// fetchUserByID fetches a single user at random by ID. +func fetchUserByID(ctx context.Context, p *Params) error { + u := p.c.randUser() + _, err := p.client.User(ctx, u.ID.String()) + return err +} + +// fetchUserByUsername fetches a single user at random by username. +func fetchUserByUsername(ctx context.Context, p *Params) error { + u := p.c.randUser() + _, err := p.client.User(ctx, u.Username) + return err +} + +// fetchDeploymentConfig fetches the deployment config. +func fetchDeploymentConfig(ctx context.Context, p *Params) error { + _, err := p.client.DeploymentConfig(ctx) + return err +} + +// fetchWorkspaceQuotaForUser fetches the workspace quota for a random user. +func fetchWorkspaceQuotaForUser(ctx context.Context, p *Params) error { + u := p.c.randUser() + _, err := p.client.WorkspaceQuota(ctx, u.ID.String()) + return err +} + +// fetchDeploymentStats fetches the deployment stats. +func fetchDeploymentStats(ctx context.Context, p *Params) error { + _, err := p.client.DeploymentStats(ctx) + return err +} + +// fetchWorkspaceLogs fetches the logs for a random workspace. +func fetchWorkspaceLogs(ctx context.Context, p *Params) error { + w := p.c.randWorkspace() + ch, closer, err := p.client.WorkspaceBuildLogsAfter(ctx, w.LatestBuild.ID, 0) + if err != nil { + return err + } + defer func() { + _ = closer.Close() + }() + // Drain the channel. + for { + select { + case <-ctx.Done(): + return ctx.Err() + case l, ok := <-ch: + if !ok { + return nil + } + _ = l + } + } +} + +// fetchAuditLog fetches the audit log. +// As not all users have access to the audit log, we check first. +func fetchAuditLog(ctx context.Context, p *Params) error { + res, err := p.client.AuthCheck(ctx, codersdk.AuthorizationRequest{ + Checks: map[string]codersdk.AuthorizationCheck{ + "auditlog": { + Object: codersdk.AuthorizationObject{ + ResourceType: codersdk.ResourceAuditLog, + }, + Action: codersdk.ActionRead, + }, + }, + }) + if err != nil { + return err + } + if !res["auditlog"] { + return nil // we are not authorized to read the audit log + } + + // Fetch the first 25 audit log entries. + _, err = p.client.AuditLogs(ctx, codersdk.AuditLogsRequest{ + Pagination: codersdk.Pagination{ + Offset: 0, + Limit: 25, + }, + }) + return err +} + +// authCheckAsOwner performs an auth check as the owner of a random +// resource type and action. +func authCheckAsOwner(ctx context.Context, p *Params) error { + _, err := p.client.AuthCheck(ctx, randAuthReq( + ownedBy(p.me.ID), + withAction(randAction()), + withObjType(randObjectType()), + inOrg(p.me.OrganizationIDs[0]), + )) + return err +} + +// authCheckAsNonOwner performs an auth check as a non-owner of a random +// resource type and action. +func authCheckAsNonOwner(ctx context.Context, p *Params) error { + _, err := p.client.AuthCheck(ctx, randAuthReq( + ownedBy(uuid.New()), + withAction(randAction()), + withObjType(randObjectType()), + inOrg(p.me.OrganizationIDs[0]), + )) + return err +} + +// nolint: gosec +func randAuthReq(mut ...func(*codersdk.AuthorizationCheck)) codersdk.AuthorizationRequest { + var check codersdk.AuthorizationCheck + for _, m := range mut { + m(&check) + } + return codersdk.AuthorizationRequest{ + Checks: map[string]codersdk.AuthorizationCheck{ + "check": check, + }, + } +} + +func ownedBy(myID uuid.UUID) func(check *codersdk.AuthorizationCheck) { + return func(check *codersdk.AuthorizationCheck) { + check.Object.OwnerID = myID.String() + } +} + +func inOrg(orgID uuid.UUID) func(check *codersdk.AuthorizationCheck) { + return func(check *codersdk.AuthorizationCheck) { + check.Object.OrganizationID = orgID.String() + } +} + +func withObjType(objType codersdk.RBACResource) func(check *codersdk.AuthorizationCheck) { + return func(check *codersdk.AuthorizationCheck) { + check.Object.ResourceType = objType + } +} + +func withAction(action string) func(check *codersdk.AuthorizationCheck) { + return func(check *codersdk.AuthorizationCheck) { + check.Action = action + } +} + +func randAction() string { + return pick(codersdk.AllRBACActions) +} + +func randObjectType() codersdk.RBACResource { + return pick(codersdk.AllRBACResources) +} diff --git a/scaletest/dashboard/rolltable_internal_test.go b/scaletest/dashboard/rolltable_internal_test.go new file mode 100644 index 0000000000000..53b646df119d6 --- /dev/null +++ b/scaletest/dashboard/rolltable_internal_test.go @@ -0,0 +1,17 @@ +package dashboard + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_allActions_ordering(t *testing.T) { + t.Parallel() + + last := -1 + for idx, entry := range DefaultActions { + require.Greater(t, entry.Roll, last, "roll table must be in ascending order, entry %d is out of order", idx) + last = entry.Roll + } +} diff --git a/scaletest/dashboard/run.go b/scaletest/dashboard/run.go new file mode 100644 index 0000000000000..64c904177565e --- /dev/null +++ b/scaletest/dashboard/run.go @@ -0,0 +1,131 @@ +package dashboard + +import ( + "context" + "fmt" + "io" + "math/rand" + "time" + + "golang.org/x/xerrors" + + "cdr.dev/slog" + "github.com/coder/coder/codersdk" + "github.com/coder/coder/scaletest/harness" +) + +type Runner struct { + client *codersdk.Client + cfg Config + metrics Metrics +} + +var ( + _ harness.Runnable = &Runner{} + _ harness.Cleanable = &Runner{} +) + +func NewRunner(client *codersdk.Client, metrics Metrics, cfg Config) *Runner { + client.Trace = cfg.Trace + return &Runner{ + client: client, + cfg: cfg, + metrics: metrics, + } +} + +func (r *Runner) Run(ctx context.Context, _ string, _ io.Writer) error { + me, err := r.client.User(ctx, codersdk.Me) + if err != nil { + return err + } + if len(me.OrganizationIDs) == 0 { + return xerrors.Errorf("user has no organizations") + } + + c := &cache{} + if err := c.fill(ctx, r.client); err != nil { + return err + } + + p := &Params{ + client: r.client, + me: me, + c: c, + } + rolls := make(chan int) + go func() { + t := time.NewTicker(r.randWait()) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + rolls <- rand.Intn(r.cfg.RollTable.max() + 1) // nolint:gosec + t.Reset(r.randWait()) + } + } + }() + + for { + select { + case <-ctx.Done(): + return nil + case n := <-rolls: + act := r.cfg.RollTable.choose(n) + go r.do(ctx, act, p) + } + } +} + +func (*Runner) Cleanup(_ context.Context, _ string) error { + return nil +} + +func (r *Runner) do(ctx context.Context, act RollTableEntry, p *Params) { + select { + case <-ctx.Done(): + r.cfg.Logger.Info(ctx, "context done, stopping") + return + default: + var errored bool + cancelCtx, cancel := context.WithTimeout(ctx, r.cfg.MaxWait) + defer cancel() + start := time.Now() + err := act.Fn(cancelCtx, p) + cancel() + elapsed := time.Since(start) + if err != nil { + errored = true + r.cfg.Logger.Error( //nolint:gocritic + ctx, "action failed", + slog.Error(err), + slog.F("action", act.Label), + slog.F("elapsed", elapsed), + ) + } else { + r.cfg.Logger.Info(ctx, "completed successfully", + slog.F("action", act.Label), + slog.F("elapsed", elapsed), + ) + } + codeLabel := "200" + if apiErr, ok := codersdk.AsError(err); ok { + codeLabel = fmt.Sprintf("%d", apiErr.StatusCode()) + } else if xerrors.Is(err, context.Canceled) { + codeLabel = "timeout" + } + r.metrics.ObserveDuration(act.Label, elapsed) + r.metrics.IncStatuses(act.Label, codeLabel) + if errored { + r.metrics.IncErrors(act.Label) + } + } +} + +func (r *Runner) randWait() time.Duration { + // nolint:gosec // This is not for cryptographic purposes. Chill, gosec. Chill. + wait := time.Duration(rand.Intn(int(r.cfg.MaxWait) - int(r.cfg.MinWait))) + return r.cfg.MinWait + wait +} diff --git a/scaletest/dashboard/run_test.go b/scaletest/dashboard/run_test.go new file mode 100644 index 0000000000000..d522ba1a6ec88 --- /dev/null +++ b/scaletest/dashboard/run_test.go @@ -0,0 +1,125 @@ +package dashboard_test + +import ( + "context" + "runtime" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/coderd/coderdtest" + "github.com/coder/coder/scaletest/dashboard" + "github.com/coder/coder/testutil" +) + +func Test_Run(t *testing.T) { + t.Parallel() + if testutil.RaceEnabled() { + t.Skip("skipping timing-sensitive test because of race detector") + } + if runtime.GOOS == "windows" { + t.Skip("skipping test on Windows") + } + + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + + successfulAction := func(context.Context, *dashboard.Params) error { + return nil + } + failingAction := func(context.Context, *dashboard.Params) error { + return xerrors.Errorf("failed") + } + hangingAction := func(ctx context.Context, _ *dashboard.Params) error { + <-ctx.Done() + return ctx.Err() + } + + testActions := []dashboard.RollTableEntry{ + {0, successfulAction, "succeeds"}, + {1, failingAction, "fails"}, + {2, hangingAction, "hangs"}, + } + + log := slogtest.Make(t, &slogtest.Options{ + IgnoreErrors: true, + }) + m := &testMetrics{} + cfg := dashboard.Config{ + MinWait: time.Millisecond, + MaxWait: 10 * time.Millisecond, + Logger: log, + RollTable: testActions, + } + r := dashboard.NewRunner(client, m, cfg) + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort) + t.Cleanup(cancel) + done := make(chan error) + go func() { + defer close(done) + done <- r.Run(ctx, "", nil) + }() + err, ok := <-done + assert.True(t, ok) + require.NoError(t, err) + + if assert.NotEmpty(t, m.ObservedDurations["succeeds"]) { + assert.NotZero(t, m.ObservedDurations["succeeds"][0]) + } + + if assert.NotEmpty(t, m.ObservedDurations["fails"]) { + assert.NotZero(t, m.ObservedDurations["fails"][0]) + } + + if assert.NotEmpty(t, m.ObservedDurations["hangs"]) { + assert.GreaterOrEqual(t, m.ObservedDurations["hangs"][0], cfg.MaxWait.Seconds()) + } + assert.Zero(t, m.Errors["succeeds"]) + assert.NotZero(t, m.Errors["fails"]) + assert.NotZero(t, m.Errors["hangs"]) + assert.NotEmpty(t, m.Statuses["succeeds"]) + assert.NotEmpty(t, m.Statuses["fails"]) + assert.NotEmpty(t, m.Statuses["hangs"]) +} + +type testMetrics struct { + sync.RWMutex + ObservedDurations map[string][]float64 + Errors map[string]int + Statuses map[string]map[string]int +} + +func (m *testMetrics) ObserveDuration(action string, d time.Duration) { + m.Lock() + defer m.Unlock() + if m.ObservedDurations == nil { + m.ObservedDurations = make(map[string][]float64) + } + m.ObservedDurations[action] = append(m.ObservedDurations[action], d.Seconds()) +} + +func (m *testMetrics) IncErrors(action string) { + m.Lock() + defer m.Unlock() + if m.Errors == nil { + m.Errors = make(map[string]int) + } + m.Errors[action]++ +} + +func (m *testMetrics) IncStatuses(action string, code string) { + m.Lock() + defer m.Unlock() + if m.Statuses == nil { + m.Statuses = make(map[string]map[string]int) + } + if m.Statuses[action] == nil { + m.Statuses[action] = make(map[string]int) + } + m.Statuses[action][code]++ +}
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies: