Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
663dee0
feat(cli): add dashboard loadtest command
johnstcn Jul 25, 2023
8d95283
testing
johnstcn Jul 25, 2023
3eac7cd
testing again
johnstcn Jul 25, 2023
c382f4c
testing again
johnstcn Jul 25, 2023
7319170
testing again
johnstcn Jul 25, 2023
e310af7
testing again
johnstcn Jul 25, 2023
c6b5322
ensure we are logged in
johnstcn Jul 25, 2023
298a4f1
log elapsed time
johnstcn Jul 25, 2023
fb183e9
name logger
johnstcn Jul 25, 2023
7eeda39
add auth req
johnstcn Jul 25, 2023
8cbd90a
add more auth checks
johnstcn Jul 25, 2023
765db11
move to a roll table so we have probabilities
johnstcn Jul 25, 2023
08e6d41
extract to a roll table
johnstcn Jul 25, 2023
4edeacf
make fmt
johnstcn Jul 25, 2023
f5b134a
move main roll table closer to where it is used
johnstcn Jul 25, 2023
0c03f93
add test for roll table ordering
johnstcn Jul 26, 2023
70550aa
move
johnstcn Jul 26, 2023
c1291ad
add cache and lots more actions
johnstcn Jul 26, 2023
5d17397
fixup! add cache and lots more actions
johnstcn Jul 26, 2023
2027bb1
Merge remote-tracking branch 'origin/main' into cj/scaletest-dashboard
johnstcn Jul 26, 2023
e5f0bbb
add metrics
johnstcn Jul 26, 2023
a3b75ca
fixup! add metrics
johnstcn Jul 26, 2023
60b5a76
improve metrics errors
johnstcn Jul 26, 2023
2674b29
allow configuring actions
johnstcn Jul 26, 2023
471f021
add unit test
johnstcn Jul 26, 2023
98b6396
fixup! add unit test
johnstcn Jul 26, 2023
61c3a06
check latency in test
johnstcn Jul 26, 2023
925a446
appease linter
johnstcn Jul 26, 2023
8f4c1fc
bump scrape wait timeout
johnstcn Jul 26, 2023
f7fa1c6
skip test on windows
johnstcn Jul 26, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
178 changes: 160 additions & 18 deletions cli/exp_scaletest.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/coder/coder/cryptorand"
"github.com/coder/coder/scaletest/agentconn"
"github.com/coder/coder/scaletest/createworkspaces"
"github.com/coder/coder/scaletest/dashboard"
"github.com/coder/coder/scaletest/harness"
"github.com/coder/coder/scaletest/reconnectingpty"
"github.com/coder/coder/scaletest/workspacebuild"
Expand All @@ -47,6 +48,7 @@ func (r *RootCmd) scaletestCmd() *clibase.Cmd {
},
Children: []*clibase.Cmd{
r.scaletestCleanup(),
r.scaletestDashboard(),
r.scaletestCreateWorkspaces(),
r.scaletestWorkspaceTraffic(),
},
Expand Down Expand Up @@ -317,6 +319,30 @@ func (s *scaletestOutputFlags) parse() ([]scaleTestOutput, error) {
return out, nil
}

type scaletestPrometheusFlags struct {
Address string
Wait time.Duration
}

func (s *scaletestPrometheusFlags) attach(opts *clibase.OptionSet) {
*opts = append(*opts,
clibase.Option{
Flag: "scaletest-prometheus-address",
Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS",
Default: "0.0.0.0:21112",
Description: "Address on which to expose scaletest Prometheus metrics.",
Value: clibase.StringOf(&s.Address),
},
clibase.Option{
Flag: "scaletest-prometheus-wait",
Env: "CODER_SCALETEST_PROMETHEUS_WAIT",
Default: "15s",
Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.",
Value: clibase.DurationOf(&s.Wait),
},
)
}

func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
me, err := client.User(ctx, codersdk.Me)
if err != nil {
Expand Down Expand Up @@ -846,17 +872,16 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {

func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
var (
tickInterval time.Duration
bytesPerTick int64
ssh bool
scaletestPrometheusAddress string
scaletestPrometheusWait time.Duration
tickInterval time.Duration
bytesPerTick int64
ssh bool

client = &codersdk.Client{}
tracingFlags = &scaletestTracingFlags{}
strategy = &scaletestStrategyFlags{}
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
output = &scaletestOutputFlags{}
prometheusFlags = &scaletestPrometheusFlags{}
)

cmd := &clibase.Cmd{
Expand All @@ -871,7 +896,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
metrics := workspacetraffic.NewMetrics(reg, "username", "workspace_name", "agent_name")

logger := slog.Make(sloghuman.Sink(io.Discard))
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), scaletestPrometheusAddress, "prometheus")
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
defer prometheusSrvClose()

// Bypass rate limiting
Expand Down Expand Up @@ -905,8 +930,8 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
}
// Wait for prometheus metrics to be scraped
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", scaletestPrometheusWait)
<-time.After(scaletestPrometheusWait)
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
<-time.After(prometheusFlags.Wait)
}()
tracer := tracerProvider.Tracer(scaletestTracerName)

Expand Down Expand Up @@ -1009,26 +1034,143 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
Description: "Send traffic over SSH.",
Value: clibase.BoolOf(&ssh),
},
}

tracingFlags.attach(&cmd.Options)
strategy.attach(&cmd.Options)
cleanupStrategy.attach(&cmd.Options)
output.attach(&cmd.Options)
prometheusFlags.attach(&cmd.Options)

return cmd
}

func (r *RootCmd) scaletestDashboard() *clibase.Cmd {
var (
count int64
minWait time.Duration
maxWait time.Duration

client = &codersdk.Client{}
tracingFlags = &scaletestTracingFlags{}
strategy = &scaletestStrategyFlags{}
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
output = &scaletestOutputFlags{}
prometheusFlags = &scaletestPrometheusFlags{}
)

cmd := &clibase.Cmd{
Use: "dashboard",
Short: "Generate traffic to the HTTP API to simulate use of the dashboard.",
Middleware: clibase.Chain(
r.InitClient(client),
),
Handler: func(inv *clibase.Invocation) error {
ctx := inv.Context()
logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelInfo)
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
if err != nil {
return xerrors.Errorf("create tracer provider: %w", err)
}
defer func() {
// Allow time for traces to flush even if command context is
// canceled. This is a no-op if tracing is not enabled.
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
if err := closeTracing(ctx); err != nil {
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
}
// Wait for prometheus metrics to be scraped
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
<-time.After(prometheusFlags.Wait)
}()
tracer := tracerProvider.Tracer(scaletestTracerName)
outputs, err := output.parse()
if err != nil {
return xerrors.Errorf("could not parse --output flags")
}
reg := prometheus.NewRegistry()
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
defer prometheusSrvClose()
metrics := dashboard.NewMetrics(reg)

th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())

for i := int64(0); i < count; i++ {
name := fmt.Sprintf("dashboard-%d", i)
config := dashboard.Config{
MinWait: minWait,
MaxWait: maxWait,
Trace: tracingEnabled,
Logger: logger.Named(name),
RollTable: dashboard.DefaultActions,
}
if err := config.Validate(); err != nil {
return err
}
var runner harness.Runnable = dashboard.NewRunner(client, metrics, config)
if tracingEnabled {
runner = &runnableTraceWrapper{
tracer: tracer,
spanName: name,
runner: runner,
}
}
th.AddRun("dashboard", name, runner)
}

_, _ = fmt.Fprintln(inv.Stderr, "Running load test...")
testCtx, testCancel := strategy.toContext(ctx)
defer testCancel()
err = th.Run(testCtx)
if err != nil {
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
}

res := th.Results()
for _, o := range outputs {
err = o.write(res, inv.Stdout)
if err != nil {
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
}
}

if res.TotalFail > 0 {
return xerrors.New("load test failed, see above for more details")
}

return nil
},
}

cmd.Options = []clibase.Option{
{
Flag: "scaletest-prometheus-address",
Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS",
Default: "0.0.0.0:21112",
Description: "Address on which to expose scaletest Prometheus metrics.",
Value: clibase.StringOf(&scaletestPrometheusAddress),
Flag: "count",
Env: "CODER_SCALETEST_DASHBOARD_COUNT",
Default: "1",
Description: "Number of concurrent workers.",
Value: clibase.Int64Of(&count),
},
{
Flag: "scaletest-prometheus-wait",
Env: "CODER_SCALETEST_PROMETHEUS_WAIT",
Default: "5s",
Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.",
Value: clibase.DurationOf(&scaletestPrometheusWait),
Flag: "min-wait",
Env: "CODER_SCALETEST_DASHBOARD_MIN_WAIT",
Default: "100ms",
Description: "Minimum wait between fetches.",
Value: clibase.DurationOf(&minWait),
},
{
Flag: "max-wait",
Env: "CODER_SCALETEST_DASHBOARD_MAX_WAIT",
Default: "1s",
Description: "Maximum wait between fetches.",
Value: clibase.DurationOf(&maxWait),
},
}

tracingFlags.attach(&cmd.Options)
strategy.attach(&cmd.Options)
cleanupStrategy.attach(&cmd.Options)
output.attach(&cmd.Options)
prometheusFlags.attach(&cmd.Options)

return cmd
}
Expand Down
26 changes: 26 additions & 0 deletions cli/exp_scaletest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,3 +78,29 @@ func TestScaleTestWorkspaceTraffic(t *testing.T) {
err := inv.WithContext(ctx).Run()
require.ErrorContains(t, err, "no scaletest workspaces exist")
}

// This test just validates that the CLI command accepts its known arguments.
func TestScaleTestDashboard(t *testing.T) {
t.Parallel()

ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
defer cancelFunc()

client := coderdtest.New(t, nil)
_ = coderdtest.CreateFirstUser(t, client)

inv, root := clitest.New(t, "exp", "scaletest", "dashboard",
"--count", "1",
"--min-wait", "100ms",
"--max-wait", "1s",
"--timeout", "1s",
"--scaletest-prometheus-address", "127.0.0.1:0",
"--scaletest-prometheus-wait", "0s",
)
clitest.SetupConfig(t, client, root)
var stdout, stderr bytes.Buffer
inv.Stdout = &stdout
inv.Stderr = &stderr
err := inv.WithContext(ctx).Run()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: you don't need to perform any actions/waiters/or output string-checks?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is me being lazy 😅

require.NoError(t, err, "")
}
41 changes: 41 additions & 0 deletions codersdk/rbacresources.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,47 @@ const (
ResourceSystem RBACResource = "system"
)

const (
ActionCreate = "create"
ActionRead = "read"
ActionUpdate = "update"
ActionDelete = "delete"
)

var (
AllRBACResources = []RBACResource{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: It looks like something devs may forget to update in the future.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have rbac gen:

// Code generated by rbacgen/main.go. DO NOT EDIT.

I wonder if we can repurpose it for this too?

In a more general note though, we manually copy over all enums we have in the database package to the sdk package. So this problem already exists for all enum types.

ResourceWorkspace,
ResourceWorkspaceProxy,
ResourceWorkspaceExecution,
ResourceWorkspaceApplicationConnect,
ResourceAuditLog,
ResourceTemplate,
ResourceGroup,
ResourceFile,
ResourceProvisionerDaemon,
ResourceOrganization,
ResourceRoleAssignment,
ResourceOrgRoleAssignment,
ResourceAPIKey,
ResourceUser,
ResourceUserData,
ResourceOrganizationMember,
ResourceLicense,
ResourceDeploymentValues,
ResourceDeploymentStats,
ResourceReplicas,
ResourceDebugInfo,
ResourceSystem,
}

AllRBACActions = []string{
ActionCreate,
ActionRead,
ActionUpdate,
ActionDelete,
}
)

func (r RBACResource) String() string {
return string(r)
}
Loading
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy