From 894b29839973bafe9df14c7fd87086d19403acf7 Mon Sep 17 00:00:00 2001 From: Danielle Maywood Date: Fri, 15 Aug 2025 09:44:55 +0000 Subject: [PATCH 1/5] fix: stop reading closed channel for `/watch` devcontainers endpoint Fixes https://github.com/coder/coder/issues/19372 We increase the read limit to 4MiB (we use this limit elsewhere). We also make sure to stop sending messages of `containersCh` becomes closed. --- coderd/workspaceagents.go | 6 +++++- codersdk/workspacesdk/agentconn.go | 5 ++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/coderd/workspaceagents.go b/coderd/workspaceagents.go index d600eff6ecfec..f2ee1ac18e823 100644 --- a/coderd/workspaceagents.go +++ b/coderd/workspaceagents.go @@ -896,7 +896,11 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re case <-ctx.Done(): return - case containers := <-containersCh: + case containers, ok := <-containersCh: + if !ok { + return + } + if err := encoder.Encode(containers); err != nil { api.Logger.Error(ctx, "encode containers", slog.Error(err)) return diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go index ce66d5e1b8a70..1247abf4877fc 100644 --- a/codersdk/workspacesdk/agentconn.go +++ b/codersdk/workspacesdk/agentconn.go @@ -399,7 +399,8 @@ func (c *AgentConn) WatchContainers(ctx context.Context, logger slog.Logger) (<- url := fmt.Sprintf("http://%s%s", host, "/api/v0/containers/watch") conn, res, err := websocket.Dial(ctx, url, &websocket.DialOptions{ - HTTPClient: c.apiClient(), + HTTPClient: c.apiClient(), + CompressionMode: websocket.CompressionContextTakeover, }) if err != nil { if res == nil { @@ -411,6 +412,8 @@ func (c *AgentConn) WatchContainers(ctx context.Context, logger slog.Logger) (<- defer res.Body.Close() } + conn.SetReadLimit(1 << 22) // 4MiB + d := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger) return d.Chan(), d, nil } From 1abe70bc6a22142b2c109fe2b9ecc2cc54553c8f Mon Sep 17 00:00:00 2001 From: Danielle Maywood Date: Fri, 15 Aug 2025 09:57:40 +0000 Subject: [PATCH 2/5] chore: use NoContextTakeover compression mode --- codersdk/workspacesdk/agentconn.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go index 1247abf4877fc..2ecbc4ebecd35 100644 --- a/codersdk/workspacesdk/agentconn.go +++ b/codersdk/workspacesdk/agentconn.go @@ -400,7 +400,7 @@ func (c *AgentConn) WatchContainers(ctx context.Context, logger slog.Logger) (<- conn, res, err := websocket.Dial(ctx, url, &websocket.DialOptions{ HTTPClient: c.apiClient(), - CompressionMode: websocket.CompressionContextTakeover, + CompressionMode: websocket.CompressionNoContextTakeover, }) if err != nil { if res == nil { From 0ca20dc5bd7e1ddfc50fc87f5197fcdaaf62801b Mon Sep 17 00:00:00 2001 From: Danielle Maywood Date: Fri, 15 Aug 2025 10:18:45 +0000 Subject: [PATCH 3/5] test: add a test to validate behaviour --- coderd/workspaceagents_test.go | 370 ++++++++++++++++++++------------- codersdk/workspaceagents.go | 2 + 2 files changed, 224 insertions(+), 148 deletions(-) diff --git a/coderd/workspaceagents_test.go b/coderd/workspaceagents_test.go index 30859cb6391e6..948123598de9f 100644 --- a/coderd/workspaceagents_test.go +++ b/coderd/workspaceagents_test.go @@ -1389,169 +1389,147 @@ func TestWorkspaceAgentContainers(t *testing.T) { func TestWatchWorkspaceAgentDevcontainers(t *testing.T) { t.Parallel() - var ( - ctx = testutil.Context(t, testutil.WaitLong) - logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) - mClock = quartz.NewMock(t) - updaterTickerTrap = mClock.Trap().TickerFunc("updaterLoop") - mCtrl = gomock.NewController(t) - mCCLI = acmock.NewMockContainerCLI(mCtrl) - - client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{Logger: &logger}) - user = coderdtest.CreateFirstUser(t, client) - r = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{ - OrganizationID: user.OrganizationID, - OwnerID: user.UserID, - }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { - return agents - }).Do() + t.Run("OK", func(t *testing.T) { + t.Parallel() - fakeContainer1 = codersdk.WorkspaceAgentContainer{ - ID: "container1", - CreatedAt: dbtime.Now(), - FriendlyName: "container1", - Image: "busybox:latest", - Labels: map[string]string{ - agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project1", - agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project1/.devcontainer/devcontainer.json", - }, - Running: true, - Status: "running", - } + var ( + ctx = testutil.Context(t, testutil.WaitLong) + logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + mClock = quartz.NewMock(t) + updaterTickerTrap = mClock.Trap().TickerFunc("updaterLoop") + mCtrl = gomock.NewController(t) + mCCLI = acmock.NewMockContainerCLI(mCtrl) + + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{Logger: &logger}) + user = coderdtest.CreateFirstUser(t, client) + r = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + return agents + }).Do() + + fakeContainer1 = codersdk.WorkspaceAgentContainer{ + ID: "container1", + CreatedAt: dbtime.Now(), + FriendlyName: "container1", + Image: "busybox:latest", + Labels: map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project1", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project1/.devcontainer/devcontainer.json", + }, + Running: true, + Status: "running", + } - fakeContainer2 = codersdk.WorkspaceAgentContainer{ - ID: "container1", - CreatedAt: dbtime.Now(), - FriendlyName: "container2", - Image: "busybox:latest", - Labels: map[string]string{ - agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project2", - agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project2/.devcontainer/devcontainer.json", - }, - Running: true, - Status: "running", - } - ) + fakeContainer2 = codersdk.WorkspaceAgentContainer{ + ID: "container1", + CreatedAt: dbtime.Now(), + FriendlyName: "container2", + Image: "busybox:latest", + Labels: map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project2", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project2/.devcontainer/devcontainer.json", + }, + Running: true, + Status: "running", + } + ) - stages := []struct { - containers []codersdk.WorkspaceAgentContainer - expected codersdk.WorkspaceAgentListContainersResponse - }{ - { - containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, - expected: codersdk.WorkspaceAgentListContainersResponse{ - Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, - Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ - { - Name: "project1", - WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], - ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], - Status: "running", - Container: &fakeContainer1, + stages := []struct { + containers []codersdk.WorkspaceAgentContainer + expected codersdk.WorkspaceAgentListContainersResponse + }{ + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "project1", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer1, + }, }, }, }, - }, - { - containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, - expected: codersdk.WorkspaceAgentListContainersResponse{ - Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, - Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ - { - Name: "project1", - WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], - ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], - Status: "running", - Container: &fakeContainer1, - }, - { - Name: "project2", - WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], - ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], - Status: "running", - Container: &fakeContainer2, + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer1, fakeContainer2}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "project1", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer1, + }, + { + Name: "project2", + WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer2, + }, }, }, }, - }, - { - containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, - expected: codersdk.WorkspaceAgentListContainersResponse{ - Containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, - Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ - { - Name: "", - WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], - ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], - Status: "stopped", - Container: nil, - }, - { - Name: "project2", - WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], - ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], - Status: "running", - Container: &fakeContainer2, + { + containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, + expected: codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentContainer{fakeContainer2}, + Devcontainers: []codersdk.WorkspaceAgentDevcontainer{ + { + Name: "", + WorkspaceFolder: fakeContainer1.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer1.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "stopped", + Container: nil, + }, + { + Name: "project2", + WorkspaceFolder: fakeContainer2.Labels[agentcontainers.DevcontainerLocalFolderLabel], + ConfigPath: fakeContainer2.Labels[agentcontainers.DevcontainerConfigFileLabel], + Status: "running", + Container: &fakeContainer2, + }, }, }, }, - }, - } - - // Set up initial state for immediate send on connection - mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stages[0].containers}, nil) - mCCLI.EXPECT().DetectArchitecture(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() - - _ = agenttest.New(t, client.URL, r.AgentToken, func(o *agent.Options) { - o.Logger = logger.Named("agent") - o.Devcontainers = true - o.DevcontainerAPIOptions = []agentcontainers.Option{ - agentcontainers.WithClock(mClock), - agentcontainers.WithContainerCLI(mCCLI), - agentcontainers.WithWatcher(watcher.NewNoop()), } - }) - resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait() - require.Len(t, resources, 1, "expected one resource") - require.Len(t, resources[0].Agents, 1, "expected one agent") - agentID := resources[0].Agents[0].ID - - updaterTickerTrap.MustWait(ctx).MustRelease(ctx) - defer updaterTickerTrap.Close() + // Set up initial state for immediate send on connection + mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stages[0].containers}, nil) + mCCLI.EXPECT().DetectArchitecture(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() - containers, closer, err := client.WatchWorkspaceAgentContainers(ctx, agentID) - require.NoError(t, err) - defer func() { - closer.Close() - }() + _ = agenttest.New(t, client.URL, r.AgentToken, func(o *agent.Options) { + o.Logger = logger.Named("agent") + o.Devcontainers = true + o.DevcontainerAPIOptions = []agentcontainers.Option{ + agentcontainers.WithClock(mClock), + agentcontainers.WithContainerCLI(mCCLI), + agentcontainers.WithWatcher(watcher.NewNoop()), + } + }) - // Read initial state sent immediately on connection - var got codersdk.WorkspaceAgentListContainersResponse - select { - case <-ctx.Done(): - case got = <-containers: - } - require.NoError(t, ctx.Err()) - - require.Equal(t, stages[0].expected.Containers, got.Containers) - require.Len(t, got.Devcontainers, len(stages[0].expected.Devcontainers)) - for j, expectedDev := range stages[0].expected.Devcontainers { - gotDev := got.Devcontainers[j] - require.Equal(t, expectedDev.Name, gotDev.Name) - require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) - require.Equal(t, expectedDev.ConfigPath, gotDev.ConfigPath) - require.Equal(t, expectedDev.Status, gotDev.Status) - require.Equal(t, expectedDev.Container, gotDev.Container) - } + resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait() + require.Len(t, resources, 1, "expected one resource") + require.Len(t, resources[0].Agents, 1, "expected one agent") + agentID := resources[0].Agents[0].ID - // Process remaining stages through updater loop - for i, stage := range stages[1:] { - mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stage.containers}, nil) + updaterTickerTrap.MustWait(ctx).MustRelease(ctx) + defer updaterTickerTrap.Close() - _, aw := mClock.AdvanceNext() - aw.MustWait(ctx) + containers, closer, err := client.WatchWorkspaceAgentContainers(ctx, agentID) + require.NoError(t, err) + defer func() { + closer.Close() + }() + // Read initial state sent immediately on connection var got codersdk.WorkspaceAgentListContainersResponse select { case <-ctx.Done(): @@ -1559,9 +1537,9 @@ func TestWatchWorkspaceAgentDevcontainers(t *testing.T) { } require.NoError(t, ctx.Err()) - require.Equal(t, stages[i+1].expected.Containers, got.Containers) - require.Len(t, got.Devcontainers, len(stages[i+1].expected.Devcontainers)) - for j, expectedDev := range stages[i+1].expected.Devcontainers { + require.Equal(t, stages[0].expected.Containers, got.Containers) + require.Len(t, got.Devcontainers, len(stages[0].expected.Devcontainers)) + for j, expectedDev := range stages[0].expected.Devcontainers { gotDev := got.Devcontainers[j] require.Equal(t, expectedDev.Name, gotDev.Name) require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) @@ -1569,7 +1547,103 @@ func TestWatchWorkspaceAgentDevcontainers(t *testing.T) { require.Equal(t, expectedDev.Status, gotDev.Status) require.Equal(t, expectedDev.Container, gotDev.Container) } - } + + // Process remaining stages through updater loop + for i, stage := range stages[1:] { + mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: stage.containers}, nil) + + _, aw := mClock.AdvanceNext() + aw.MustWait(ctx) + + var got codersdk.WorkspaceAgentListContainersResponse + select { + case <-ctx.Done(): + case got = <-containers: + } + require.NoError(t, ctx.Err()) + + require.Equal(t, stages[i+1].expected.Containers, got.Containers) + require.Len(t, got.Devcontainers, len(stages[i+1].expected.Devcontainers)) + for j, expectedDev := range stages[i+1].expected.Devcontainers { + gotDev := got.Devcontainers[j] + require.Equal(t, expectedDev.Name, gotDev.Name) + require.Equal(t, expectedDev.WorkspaceFolder, gotDev.WorkspaceFolder) + require.Equal(t, expectedDev.ConfigPath, gotDev.ConfigPath) + require.Equal(t, expectedDev.Status, gotDev.Status) + require.Equal(t, expectedDev.Container, gotDev.Container) + } + } + }) + + t.Run("PayloadTooLarge", func(t *testing.T) { + t.Parallel() + + var ( + ctx = testutil.Context(t, testutil.WaitShort) + logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug) + mCtrl = gomock.NewController(t) + mCCLI = acmock.NewMockContainerCLI(mCtrl) + + client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{Logger: &logger}) + user = coderdtest.CreateFirstUser(t, client) + r = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + return agents + }).Do() + ) + + // WebSocket limit is 4MiB, so we want to ensure we create _more_ than 4MiB worth of payload. + // Creating 20,000 fake containers creates a payload of roughly 7MiB. + var fakeContainers []codersdk.WorkspaceAgentContainer + for range 20_000 { + fakeContainers = append(fakeContainers, codersdk.WorkspaceAgentContainer{ + CreatedAt: time.Now(), + ID: uuid.NewString(), + FriendlyName: uuid.NewString(), + Image: "busybox:latest", + Labels: map[string]string{ + agentcontainers.DevcontainerLocalFolderLabel: "/home/coder/project", + agentcontainers.DevcontainerConfigFileLabel: "/home/coder/project/.devcontainer/devcontainer.json", + }, + Running: false, + Ports: []codersdk.WorkspaceAgentContainerPort{}, + Status: string(codersdk.WorkspaceAgentDevcontainerStatusRunning), + Volumes: map[string]string{}, + }) + } + + mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{Containers: fakeContainers}, nil) + mCCLI.EXPECT().DetectArchitecture(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + _ = agenttest.New(t, client.URL, r.AgentToken, func(o *agent.Options) { + o.Logger = logger.Named("agent") + o.Devcontainers = true + o.DevcontainerAPIOptions = []agentcontainers.Option{ + agentcontainers.WithContainerCLI(mCCLI), + agentcontainers.WithWatcher(watcher.NewNoop()), + } + }) + + resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait() + require.Len(t, resources, 1, "expected one resource") + require.Len(t, resources[0].Agents, 1, "expected one agent") + agentID := resources[0].Agents[0].ID + + containers, closer, err := client.WatchWorkspaceAgentContainers(ctx, agentID) + require.NoError(t, err) + defer func() { + closer.Close() + }() + + select { + case <-ctx.Done(): + t.Fail() + case _, ok := <-containers: + require.False(t, ok) + } + }) } func TestWorkspaceAgentRecreateDevcontainer(t *testing.T) { diff --git a/codersdk/workspaceagents.go b/codersdk/workspaceagents.go index 1eb37bb07c989..34a9cc687df60 100644 --- a/codersdk/workspaceagents.go +++ b/codersdk/workspaceagents.go @@ -563,6 +563,8 @@ func (c *Client) WatchWorkspaceAgentContainers(ctx context.Context, agentID uuid return nil, nil, ReadBodyAsError(res) } + conn.SetReadLimit(1 << 22) // 4MiB + d := wsjson.NewDecoder[WorkspaceAgentListContainersResponse](conn, websocket.MessageText, c.logger) return d.Chan(), d, nil } From a42d658362d980d3a8082c7bdb7a5d32c0b272d1 Mon Sep 17 00:00:00 2001 From: Danielle Maywood Date: Fri, 15 Aug 2025 10:39:34 +0000 Subject: [PATCH 4/5] fix: accept compression mode --- agent/agentcontainers/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/agentcontainers/api.go b/agent/agentcontainers/api.go index fb459804646ae..b0a9ab5a59fe8 100644 --- a/agent/agentcontainers/api.go +++ b/agent/agentcontainers/api.go @@ -763,7 +763,7 @@ func (api *API) broadcastUpdatesLocked() { func (api *API) watchContainers(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() - conn, err := websocket.Accept(rw, r, nil) + conn, err := websocket.Accept(rw, r, &websocket.AcceptOptions{CompressionMode: websocket.CompressionNoContextTakeover}) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Failed to upgrade connection to websocket.", From a4c5bd3430607b2b283ed5f5da81e6adb6152ffa Mon Sep 17 00:00:00 2001 From: Danielle Maywood Date: Fri, 15 Aug 2025 11:06:48 +0000 Subject: [PATCH 5/5] chore: comment justifications --- agent/agentcontainers/api.go | 6 +++++- codersdk/workspaceagents.go | 8 +++++++- codersdk/workspacesdk/agentconn.go | 9 ++++++++- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/agent/agentcontainers/api.go b/agent/agentcontainers/api.go index b0a9ab5a59fe8..d77d4209cb245 100644 --- a/agent/agentcontainers/api.go +++ b/agent/agentcontainers/api.go @@ -763,7 +763,11 @@ func (api *API) broadcastUpdatesLocked() { func (api *API) watchContainers(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() - conn, err := websocket.Accept(rw, r, &websocket.AcceptOptions{CompressionMode: websocket.CompressionNoContextTakeover}) + conn, err := websocket.Accept(rw, r, &websocket.AcceptOptions{ + // We want `NoContextTakeover` compression to balance improving + // bandwidth cost/latency with minimal memory usage overhead. + CompressionMode: websocket.CompressionNoContextTakeover, + }) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Failed to upgrade connection to websocket.", diff --git a/codersdk/workspaceagents.go b/codersdk/workspaceagents.go index 34a9cc687df60..4f3faedb534fc 100644 --- a/codersdk/workspaceagents.go +++ b/codersdk/workspaceagents.go @@ -550,7 +550,9 @@ func (c *Client) WatchWorkspaceAgentContainers(ctx context.Context, agentID uuid }}) conn, res, err := websocket.Dial(ctx, reqURL.String(), &websocket.DialOptions{ - CompressionMode: websocket.CompressionDisabled, + // We want `NoContextTakeover` compression to balance improving + // bandwidth cost/latency with minimal memory usage overhead. + CompressionMode: websocket.CompressionNoContextTakeover, HTTPClient: &http.Client{ Jar: jar, Transport: c.HTTPClient.Transport, @@ -563,6 +565,10 @@ func (c *Client) WatchWorkspaceAgentContainers(ctx context.Context, agentID uuid return nil, nil, ReadBodyAsError(res) } + // When a workspace has a few devcontainers running, or a single devcontainer + // has a large amount of apps, then each payload can easily exceed 32KiB. + // We up the limit to 4MiB to give us plenty of headroom for workspaces that + // have lots of dev containers with lots of apps. conn.SetReadLimit(1 << 22) // 4MiB d := wsjson.NewDecoder[WorkspaceAgentListContainersResponse](conn, websocket.MessageText, c.logger) diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go index 2ecbc4ebecd35..9c65b7ee9a1e1 100644 --- a/codersdk/workspacesdk/agentconn.go +++ b/codersdk/workspacesdk/agentconn.go @@ -399,7 +399,10 @@ func (c *AgentConn) WatchContainers(ctx context.Context, logger slog.Logger) (<- url := fmt.Sprintf("http://%s%s", host, "/api/v0/containers/watch") conn, res, err := websocket.Dial(ctx, url, &websocket.DialOptions{ - HTTPClient: c.apiClient(), + HTTPClient: c.apiClient(), + + // We want `NoContextTakeover` compression to balance improving + // bandwidth cost/latency with minimal memory usage overhead. CompressionMode: websocket.CompressionNoContextTakeover, }) if err != nil { @@ -412,6 +415,10 @@ func (c *AgentConn) WatchContainers(ctx context.Context, logger slog.Logger) (<- defer res.Body.Close() } + // When a workspace has a few devcontainers running, or a single devcontainer + // has a large amount of apps, then each payload can easily exceed 32KiB. + // We up the limit to 4MiB to give us plenty of headroom for workspaces that + // have lots of dev containers with lots of apps. conn.SetReadLimit(1 << 22) // 4MiB d := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger) pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy