Skip to content

Commit 3282908

Browse files
authored
feat(cli): add dashboard load test command (#8723)
1 parent c3aface commit 3282908

File tree

10 files changed

+995
-18
lines changed

10 files changed

+995
-18
lines changed

cli/exp_scaletest.go

Lines changed: 160 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"github.com/coder/coder/cryptorand"
3131
"github.com/coder/coder/scaletest/agentconn"
3232
"github.com/coder/coder/scaletest/createworkspaces"
33+
"github.com/coder/coder/scaletest/dashboard"
3334
"github.com/coder/coder/scaletest/harness"
3435
"github.com/coder/coder/scaletest/reconnectingpty"
3536
"github.com/coder/coder/scaletest/workspacebuild"
@@ -47,6 +48,7 @@ func (r *RootCmd) scaletestCmd() *clibase.Cmd {
4748
},
4849
Children: []*clibase.Cmd{
4950
r.scaletestCleanup(),
51+
r.scaletestDashboard(),
5052
r.scaletestCreateWorkspaces(),
5153
r.scaletestWorkspaceTraffic(),
5254
},
@@ -317,6 +319,30 @@ func (s *scaletestOutputFlags) parse() ([]scaleTestOutput, error) {
317319
return out, nil
318320
}
319321

322+
type scaletestPrometheusFlags struct {
323+
Address string
324+
Wait time.Duration
325+
}
326+
327+
func (s *scaletestPrometheusFlags) attach(opts *clibase.OptionSet) {
328+
*opts = append(*opts,
329+
clibase.Option{
330+
Flag: "scaletest-prometheus-address",
331+
Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS",
332+
Default: "0.0.0.0:21112",
333+
Description: "Address on which to expose scaletest Prometheus metrics.",
334+
Value: clibase.StringOf(&s.Address),
335+
},
336+
clibase.Option{
337+
Flag: "scaletest-prometheus-wait",
338+
Env: "CODER_SCALETEST_PROMETHEUS_WAIT",
339+
Default: "15s",
340+
Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.",
341+
Value: clibase.DurationOf(&s.Wait),
342+
},
343+
)
344+
}
345+
320346
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
321347
me, err := client.User(ctx, codersdk.Me)
322348
if err != nil {
@@ -846,17 +872,16 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
846872

847873
func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
848874
var (
849-
tickInterval time.Duration
850-
bytesPerTick int64
851-
ssh bool
852-
scaletestPrometheusAddress string
853-
scaletestPrometheusWait time.Duration
875+
tickInterval time.Duration
876+
bytesPerTick int64
877+
ssh bool
854878

855879
client = &codersdk.Client{}
856880
tracingFlags = &scaletestTracingFlags{}
857881
strategy = &scaletestStrategyFlags{}
858882
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
859883
output = &scaletestOutputFlags{}
884+
prometheusFlags = &scaletestPrometheusFlags{}
860885
)
861886

862887
cmd := &clibase.Cmd{
@@ -871,7 +896,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
871896
metrics := workspacetraffic.NewMetrics(reg, "username", "workspace_name", "agent_name")
872897

873898
logger := slog.Make(sloghuman.Sink(io.Discard))
874-
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), scaletestPrometheusAddress, "prometheus")
899+
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
875900
defer prometheusSrvClose()
876901

877902
// Bypass rate limiting
@@ -905,8 +930,8 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
905930
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
906931
}
907932
// Wait for prometheus metrics to be scraped
908-
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", scaletestPrometheusWait)
909-
<-time.After(scaletestPrometheusWait)
933+
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
934+
<-time.After(prometheusFlags.Wait)
910935
}()
911936
tracer := tracerProvider.Tracer(scaletestTracerName)
912937

@@ -1009,26 +1034,143 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
10091034
Description: "Send traffic over SSH.",
10101035
Value: clibase.BoolOf(&ssh),
10111036
},
1037+
}
1038+
1039+
tracingFlags.attach(&cmd.Options)
1040+
strategy.attach(&cmd.Options)
1041+
cleanupStrategy.attach(&cmd.Options)
1042+
output.attach(&cmd.Options)
1043+
prometheusFlags.attach(&cmd.Options)
1044+
1045+
return cmd
1046+
}
1047+
1048+
func (r *RootCmd) scaletestDashboard() *clibase.Cmd {
1049+
var (
1050+
count int64
1051+
minWait time.Duration
1052+
maxWait time.Duration
1053+
1054+
client = &codersdk.Client{}
1055+
tracingFlags = &scaletestTracingFlags{}
1056+
strategy = &scaletestStrategyFlags{}
1057+
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
1058+
output = &scaletestOutputFlags{}
1059+
prometheusFlags = &scaletestPrometheusFlags{}
1060+
)
1061+
1062+
cmd := &clibase.Cmd{
1063+
Use: "dashboard",
1064+
Short: "Generate traffic to the HTTP API to simulate use of the dashboard.",
1065+
Middleware: clibase.Chain(
1066+
r.InitClient(client),
1067+
),
1068+
Handler: func(inv *clibase.Invocation) error {
1069+
ctx := inv.Context()
1070+
logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelInfo)
1071+
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
1072+
if err != nil {
1073+
return xerrors.Errorf("create tracer provider: %w", err)
1074+
}
1075+
defer func() {
1076+
// Allow time for traces to flush even if command context is
1077+
// canceled. This is a no-op if tracing is not enabled.
1078+
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
1079+
if err := closeTracing(ctx); err != nil {
1080+
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
1081+
}
1082+
// Wait for prometheus metrics to be scraped
1083+
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
1084+
<-time.After(prometheusFlags.Wait)
1085+
}()
1086+
tracer := tracerProvider.Tracer(scaletestTracerName)
1087+
outputs, err := output.parse()
1088+
if err != nil {
1089+
return xerrors.Errorf("could not parse --output flags")
1090+
}
1091+
reg := prometheus.NewRegistry()
1092+
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
1093+
defer prometheusSrvClose()
1094+
metrics := dashboard.NewMetrics(reg)
1095+
1096+
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
1097+
1098+
for i := int64(0); i < count; i++ {
1099+
name := fmt.Sprintf("dashboard-%d", i)
1100+
config := dashboard.Config{
1101+
MinWait: minWait,
1102+
MaxWait: maxWait,
1103+
Trace: tracingEnabled,
1104+
Logger: logger.Named(name),
1105+
RollTable: dashboard.DefaultActions,
1106+
}
1107+
if err := config.Validate(); err != nil {
1108+
return err
1109+
}
1110+
var runner harness.Runnable = dashboard.NewRunner(client, metrics, config)
1111+
if tracingEnabled {
1112+
runner = &runnableTraceWrapper{
1113+
tracer: tracer,
1114+
spanName: name,
1115+
runner: runner,
1116+
}
1117+
}
1118+
th.AddRun("dashboard", name, runner)
1119+
}
1120+
1121+
_, _ = fmt.Fprintln(inv.Stderr, "Running load test...")
1122+
testCtx, testCancel := strategy.toContext(ctx)
1123+
defer testCancel()
1124+
err = th.Run(testCtx)
1125+
if err != nil {
1126+
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
1127+
}
1128+
1129+
res := th.Results()
1130+
for _, o := range outputs {
1131+
err = o.write(res, inv.Stdout)
1132+
if err != nil {
1133+
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
1134+
}
1135+
}
1136+
1137+
if res.TotalFail > 0 {
1138+
return xerrors.New("load test failed, see above for more details")
1139+
}
1140+
1141+
return nil
1142+
},
1143+
}
1144+
1145+
cmd.Options = []clibase.Option{
10121146
{
1013-
Flag: "scaletest-prometheus-address",
1014-
Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS",
1015-
Default: "0.0.0.0:21112",
1016-
Description: "Address on which to expose scaletest Prometheus metrics.",
1017-
Value: clibase.StringOf(&scaletestPrometheusAddress),
1147+
Flag: "count",
1148+
Env: "CODER_SCALETEST_DASHBOARD_COUNT",
1149+
Default: "1",
1150+
Description: "Number of concurrent workers.",
1151+
Value: clibase.Int64Of(&count),
10181152
},
10191153
{
1020-
Flag: "scaletest-prometheus-wait",
1021-
Env: "CODER_SCALETEST_PROMETHEUS_WAIT",
1022-
Default: "5s",
1023-
Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.",
1024-
Value: clibase.DurationOf(&scaletestPrometheusWait),
1154+
Flag: "min-wait",
1155+
Env: "CODER_SCALETEST_DASHBOARD_MIN_WAIT",
1156+
Default: "100ms",
1157+
Description: "Minimum wait between fetches.",
1158+
Value: clibase.DurationOf(&minWait),
1159+
},
1160+
{
1161+
Flag: "max-wait",
1162+
Env: "CODER_SCALETEST_DASHBOARD_MAX_WAIT",
1163+
Default: "1s",
1164+
Description: "Maximum wait between fetches.",
1165+
Value: clibase.DurationOf(&maxWait),
10251166
},
10261167
}
10271168

10281169
tracingFlags.attach(&cmd.Options)
10291170
strategy.attach(&cmd.Options)
10301171
cleanupStrategy.attach(&cmd.Options)
10311172
output.attach(&cmd.Options)
1173+
prometheusFlags.attach(&cmd.Options)
10321174

10331175
return cmd
10341176
}

cli/exp_scaletest_test.go

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,3 +78,29 @@ func TestScaleTestWorkspaceTraffic(t *testing.T) {
7878
err := inv.WithContext(ctx).Run()
7979
require.ErrorContains(t, err, "no scaletest workspaces exist")
8080
}
81+
82+
// This test just validates that the CLI command accepts its known arguments.
83+
func TestScaleTestDashboard(t *testing.T) {
84+
t.Parallel()
85+
86+
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
87+
defer cancelFunc()
88+
89+
client := coderdtest.New(t, nil)
90+
_ = coderdtest.CreateFirstUser(t, client)
91+
92+
inv, root := clitest.New(t, "exp", "scaletest", "dashboard",
93+
"--count", "1",
94+
"--min-wait", "100ms",
95+
"--max-wait", "1s",
96+
"--timeout", "1s",
97+
"--scaletest-prometheus-address", "127.0.0.1:0",
98+
"--scaletest-prometheus-wait", "0s",
99+
)
100+
clitest.SetupConfig(t, client, root)
101+
var stdout, stderr bytes.Buffer
102+
inv.Stdout = &stdout
103+
inv.Stderr = &stderr
104+
err := inv.WithContext(ctx).Run()
105+
require.NoError(t, err, "")
106+
}

codersdk/rbacresources.go

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,47 @@ const (
2727
ResourceSystem RBACResource = "system"
2828
)
2929

30+
const (
31+
ActionCreate = "create"
32+
ActionRead = "read"
33+
ActionUpdate = "update"
34+
ActionDelete = "delete"
35+
)
36+
37+
var (
38+
AllRBACResources = []RBACResource{
39+
ResourceWorkspace,
40+
ResourceWorkspaceProxy,
41+
ResourceWorkspaceExecution,
42+
ResourceWorkspaceApplicationConnect,
43+
ResourceAuditLog,
44+
ResourceTemplate,
45+
ResourceGroup,
46+
ResourceFile,
47+
ResourceProvisionerDaemon,
48+
ResourceOrganization,
49+
ResourceRoleAssignment,
50+
ResourceOrgRoleAssignment,
51+
ResourceAPIKey,
52+
ResourceUser,
53+
ResourceUserData,
54+
ResourceOrganizationMember,
55+
ResourceLicense,
56+
ResourceDeploymentValues,
57+
ResourceDeploymentStats,
58+
ResourceReplicas,
59+
ResourceDebugInfo,
60+
ResourceSystem,
61+
}
62+
63+
AllRBACActions = []string{
64+
ActionCreate,
65+
ActionRead,
66+
ActionUpdate,
67+
ActionDelete,
68+
}
69+
)
70+
3071
func (r RBACResource) String() string {
3172
return string(r)
3273
}

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy