From 496a0962afef4bdec38be73f10f28812072769f3 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Thu, 12 Jun 2025 13:01:28 -0500 Subject: [PATCH 01/15] chore: check proper rbac perms on open file in cache --- coderd/coderd.go | 11 +++++++---- coderd/database/dbauthz/dbauthz.go | 22 ++++++++++++++++++++++ coderd/files/cache.go | 30 ++++++++++++++++++++++++------ coderd/httpmw/apikey.go | 10 +++++++--- coderd/httpmw/apikey_test.go | 2 +- coderd/rbac/authz.go | 1 + 6 files changed, 62 insertions(+), 14 deletions(-) diff --git a/coderd/coderd.go b/coderd/coderd.go index 8cc5435542189..d7e1509cf8a6b 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -572,10 +572,13 @@ func New(options *Options) *API { TemplateScheduleStore: options.TemplateScheduleStore, UserQuietHoursScheduleStore: options.UserQuietHoursScheduleStore, AccessControlStore: options.AccessControlStore, - FileCache: files.NewFromStore(options.Database, options.PrometheusRegistry), - Experiments: experiments, - WebpushDispatcher: options.WebPushDispatcher, - healthCheckGroup: &singleflight.Group[string, *healthsdk.HealthcheckReport]{}, + FileCache: files.NewFromStore(options.Database, options.PrometheusRegistry, func(ctx context.Context, action policy.Action, object rbac.Object) error { + subject := httpmw.UserAuthorizationCtx(ctx) + return options.Authorizer.Authorize(ctx, subject, action, object) + }), + Experiments: experiments, + WebpushDispatcher: options.WebPushDispatcher, + healthCheckGroup: &singleflight.Group[string, *healthsdk.HealthcheckReport]{}, Acquirer: provisionerdserver.NewAcquirer( ctx, options.Logger.Named("acquirer"), diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 5bfa015af3d78..16cb58524fc5a 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -432,6 +432,24 @@ var ( }), Scope: rbac.ScopeAll, }.WithCachedASTValue() + + subjectFileReader = rbac.Subject{ + Type: rbac.SubjectTypeFileReader, + FriendlyName: "Can Read All Files", + ID: uuid.Nil.String(), + Roles: rbac.Roles([]rbac.Role{ + { + Identifier: rbac.RoleIdentifier{Name: "file-reader"}, + DisplayName: "FileReader", + Site: rbac.Permissions(map[string][]policy.Action{ + rbac.ResourceFile.Type: {policy.ActionRead}, + }), + Org: map[string][]rbac.Permission{}, + User: []rbac.Permission{}, + }, + }), + Scope: rbac.ScopeAll, + }.WithCachedASTValue() ) // AsProvisionerd returns a context with an actor that has permissions required @@ -498,6 +516,10 @@ func AsPrebuildsOrchestrator(ctx context.Context) context.Context { return As(ctx, subjectPrebuildsOrchestrator) } +func AsFileReader(ctx context.Context) context.Context { + return As(ctx, subjectFileReader) +} + var AsRemoveActor = rbac.Subject{ ID: "remove-actor", } diff --git a/coderd/files/cache.go b/coderd/files/cache.go index 92b8ea33ed52f..0c24b29bee095 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -13,33 +13,42 @@ import ( archivefs "github.com/coder/coder/v2/archive/fs" "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbauthz" + "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/coderd/util/lazy" ) +type AuthorizeFile func(ctx context.Context, action policy.Action, object rbac.Object) error + // NewFromStore returns a file cache that will fetch files from the provided // database. -func NewFromStore(store database.Store, registerer prometheus.Registerer) *Cache { +func NewFromStore(store database.Store, registerer prometheus.Registerer, authz AuthorizeFile) *Cache { fetch := func(ctx context.Context, fileID uuid.UUID) (cacheEntryValue, error) { - file, err := store.GetFileByID(ctx, fileID) + // Make sure the read does not fail due to authorization issues. + // Authz is checked on the Acquire call, so this is safe. + file, err := store.GetFileByID(dbauthz.AsFileReader(ctx), fileID) if err != nil { return cacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err) } content := bytes.NewBuffer(file.Data) return cacheEntryValue{ - FS: archivefs.FromTarReader(content), - size: int64(content.Len()), + object: rbac.ResourceFile.WithID(file.ID).WithOwner(file.CreatedBy.String()), + FS: archivefs.FromTarReader(content), + size: int64(content.Len()), }, nil } - return New(fetch, registerer) + return New(fetch, registerer, authz) } -func New(fetch fetcher, registerer prometheus.Registerer) *Cache { +func New(fetch fetcher, registerer prometheus.Registerer, authz AuthorizeFile) *Cache { return (&Cache{ lock: sync.Mutex{}, data: make(map[uuid.UUID]*cacheEntry), fetcher: fetch, + authz: authz, }).registerMetrics(registerer) } @@ -101,6 +110,7 @@ type Cache struct { lock sync.Mutex data map[uuid.UUID]*cacheEntry fetcher + authz AuthorizeFile // metrics cacheMetrics @@ -118,6 +128,7 @@ type cacheMetrics struct { } type cacheEntryValue struct { + object rbac.Object fs.FS size int64 } @@ -146,6 +157,13 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { c.Release(fileID) return nil, err } + + // Always check the caller can actually read the file. + if c.authz(ctx, policy.ActionRead, it.object) != nil { + c.Release(fileID) + return nil, err + } + return it.FS, err } diff --git a/coderd/httpmw/apikey.go b/coderd/httpmw/apikey.go index 4b92848b773e2..861746c9bf27b 100644 --- a/coderd/httpmw/apikey.go +++ b/coderd/httpmw/apikey.go @@ -47,14 +47,18 @@ func APIKey(r *http.Request) database.APIKey { // UserAuthorizationOptional may return the roles and scope used for // authorization. Depends on the ExtractAPIKey handler. -func UserAuthorizationOptional(r *http.Request) (rbac.Subject, bool) { - return dbauthz.ActorFromContext(r.Context()) +func UserAuthorizationOptional(ctx context.Context) (rbac.Subject, bool) { + return dbauthz.ActorFromContext(ctx) } // UserAuthorization returns the roles and scope used for authorization. Depends // on the ExtractAPIKey handler. func UserAuthorization(r *http.Request) rbac.Subject { - auth, ok := UserAuthorizationOptional(r) + return UserAuthorizationCtx(r.Context()) +} + +func UserAuthorizationCtx(ctx context.Context) rbac.Subject { + auth, ok := UserAuthorizationOptional(ctx) if !ok { panic("developer error: ExtractAPIKey middleware not provided") } diff --git a/coderd/httpmw/apikey_test.go b/coderd/httpmw/apikey_test.go index 06ee93422bbf9..2bdccd97e17b4 100644 --- a/coderd/httpmw/apikey_test.go +++ b/coderd/httpmw/apikey_test.go @@ -58,7 +58,7 @@ func TestAPIKey(t *testing.T) { assert.NoError(t, err, "actor rego ok") } - auth, ok := httpmw.UserAuthorizationOptional(r) + auth, ok := httpmw.UserAuthorizationOptional(r.Context()) assert.True(t, ok, "httpmw auth ok") if ok { _, err := auth.Roles.Expand() diff --git a/coderd/rbac/authz.go b/coderd/rbac/authz.go index 9e3a0536279ae..a5d9d969cab6f 100644 --- a/coderd/rbac/authz.go +++ b/coderd/rbac/authz.go @@ -74,6 +74,7 @@ const ( SubjectTypeSystemRestricted SubjectType = "system_restricted" SubjectTypeNotifier SubjectType = "notifier" SubjectTypeSubAgentAPI SubjectType = "sub_agent_api" + SubjectTypeFileReader SubjectType = "file_reader" ) // Subject is a struct that contains all the elements of a subject in an rbac From b909207df9e21efa4a82215529b4bbbd4d46015e Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Thu, 12 Jun 2025 13:45:42 -0500 Subject: [PATCH 02/15] chore: unit tests for cache authz --- coderd/coderd.go | 11 +-- coderd/coderdtest/authorize.go | 6 +- coderd/database/dbauthz/dbauthz.go | 3 +- coderd/files/cache.go | 8 +- coderd/files/cache_internal_test.go | 10 ++- coderd/files/cache_test.go | 119 ++++++++++++++++++++++++++++ coderd/rbac/authz.go | 4 + 7 files changed, 148 insertions(+), 13 deletions(-) create mode 100644 coderd/files/cache_test.go diff --git a/coderd/coderd.go b/coderd/coderd.go index d7e1509cf8a6b..5d075ba82e9b6 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -572,13 +572,10 @@ func New(options *Options) *API { TemplateScheduleStore: options.TemplateScheduleStore, UserQuietHoursScheduleStore: options.UserQuietHoursScheduleStore, AccessControlStore: options.AccessControlStore, - FileCache: files.NewFromStore(options.Database, options.PrometheusRegistry, func(ctx context.Context, action policy.Action, object rbac.Object) error { - subject := httpmw.UserAuthorizationCtx(ctx) - return options.Authorizer.Authorize(ctx, subject, action, object) - }), - Experiments: experiments, - WebpushDispatcher: options.WebPushDispatcher, - healthCheckGroup: &singleflight.Group[string, *healthsdk.HealthcheckReport]{}, + FileCache: files.NewFromStore(options.Database, options.PrometheusRegistry, options.Authorizer.Authorize), + Experiments: experiments, + WebpushDispatcher: options.WebPushDispatcher, + healthCheckGroup: &singleflight.Group[string, *healthsdk.HealthcheckReport]{}, Acquirer: provisionerdserver.NewAcquirer( ctx, options.Logger.Named("acquirer"), diff --git a/coderd/coderdtest/authorize.go b/coderd/coderdtest/authorize.go index 279405c4e6a21..67551d0e3d2dd 100644 --- a/coderd/coderdtest/authorize.go +++ b/coderd/coderdtest/authorize.go @@ -234,6 +234,10 @@ func (r *RecordingAuthorizer) AssertOutOfOrder(t *testing.T, actor rbac.Subject, // AssertActor asserts in order. If the order of authz calls does not match, // this will fail. func (r *RecordingAuthorizer) AssertActor(t *testing.T, actor rbac.Subject, did ...ActionObjectPair) { + r.AssertActorID(t, actor.ID, did...) +} + +func (r *RecordingAuthorizer) AssertActorID(t *testing.T, id string, did ...ActionObjectPair) { r.Lock() defer r.Unlock() ptr := 0 @@ -242,7 +246,7 @@ func (r *RecordingAuthorizer) AssertActor(t *testing.T, actor rbac.Subject, did // Finished all assertions return } - if call.Actor.ID == actor.ID { + if call.Actor.ID == id { action, object := did[ptr].Action, did[ptr].Object assert.Equalf(t, action, call.Action, "assert action %d", ptr) assert.Equalf(t, object, call.Object, "assert object %d", ptr) diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index 16cb58524fc5a..e76d0dd035d61 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -436,7 +436,8 @@ var ( subjectFileReader = rbac.Subject{ Type: rbac.SubjectTypeFileReader, FriendlyName: "Can Read All Files", - ID: uuid.Nil.String(), + // Arbitrary uuid to have a unique ID for this subject. + ID: rbac.SubjectTypeFileReaderID, Roles: rbac.Roles([]rbac.Role{ { Identifier: rbac.RoleIdentifier{Name: "file-reader"}, diff --git a/coderd/files/cache.go b/coderd/files/cache.go index 0c24b29bee095..e146014722ad5 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -19,7 +19,7 @@ import ( "github.com/coder/coder/v2/coderd/util/lazy" ) -type AuthorizeFile func(ctx context.Context, action policy.Action, object rbac.Object) error +type AuthorizeFile func(ctx context.Context, subject rbac.Subject, action policy.Action, object rbac.Object) error // NewFromStore returns a file cache that will fetch files from the provided // database. @@ -158,8 +158,12 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { return nil, err } + subject, ok := dbauthz.ActorFromContext(ctx) + if !ok { + return nil, dbauthz.ErrNoActor + } // Always check the caller can actually read the file. - if c.authz(ctx, policy.ActionRead, it.object) != nil { + if err := c.authz(ctx, subject, policy.ActionRead, it.object); err != nil { c.Release(fileID) return nil, err } diff --git a/coderd/files/cache_internal_test.go b/coderd/files/cache_internal_test.go index 6ad84185b44b6..dc8cdfa6a5aad 100644 --- a/coderd/files/cache_internal_test.go +++ b/coderd/files/cache_internal_test.go @@ -13,9 +13,15 @@ import ( "golang.org/x/sync/errgroup" "github.com/coder/coder/v2/coderd/coderdtest/promhelp" + "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/testutil" ) +func authzAlwaysTrue(_ context.Context, _ rbac.Subject, _ policy.Action, _ rbac.Object) error { + return nil +} + func cachePromMetricName(metric string) string { return "coderd_file_cache_" + metric } @@ -33,7 +39,7 @@ func TestConcurrency(t *testing.T) { // will be waiting in line, ensuring that no one duplicated a fetch. time.Sleep(testutil.IntervalMedium) return cacheEntryValue{FS: emptyFS, size: fileSize}, nil - }, reg) + }, reg, authzAlwaysTrue) batches := 1000 groups := make([]*errgroup.Group, 0, batches) @@ -83,7 +89,7 @@ func TestRelease(t *testing.T) { FS: emptyFS, size: fileSize, }, nil - }, reg) + }, reg, authzAlwaysTrue) batches := 100 ids := make([]uuid.UUID, 0, batches) diff --git a/coderd/files/cache_test.go b/coderd/files/cache_test.go new file mode 100644 index 0000000000000..7fc9c22601457 --- /dev/null +++ b/coderd/files/cache_test.go @@ -0,0 +1,119 @@ +package files_test + +import ( + "testing" + + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/database" + "github.com/coder/coder/v2/coderd/database/dbauthz" + "github.com/coder/coder/v2/coderd/database/dbgen" + "github.com/coder/coder/v2/coderd/database/dbtestutil" + "github.com/coder/coder/v2/coderd/files" + "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/rbac/policy" + "github.com/coder/coder/v2/testutil" +) + +func TestCacheRBAC(t *testing.T) { + t.Parallel() + + db, cache, rec := cacheAuthzSetup(t) + ctx := testutil.Context(t, testutil.WaitMedium) + + file := dbgen.File(t, db, database.File{}) + + nobodyID := uuid.New() + nobody := dbauthz.As(ctx, rbac.Subject{ + ID: nobodyID.String(), + Roles: rbac.Roles{}, + Scope: rbac.ScopeAll, + }) + + userID := uuid.New() + userReader := dbauthz.As(ctx, rbac.Subject{ + ID: userID.String(), + Roles: rbac.Roles{ + must(rbac.RoleByName(rbac.RoleTemplateAdmin())), + }, + Scope: rbac.ScopeAll, + }) + + cacheReader := dbauthz.AsFileReader(ctx) + + t.Run("NoRolesOpen", func(t *testing.T) { + // Ensure start is clean + require.Equal(t, 0, cache.Count()) + rec.Reset() + + _, err := cache.Acquire(nobody, file.ID) + require.Error(t, err) + require.True(t, rbac.IsUnauthorizedError(err)) + + // Ensure that the cache is empty + require.Equal(t, 0, cache.Count()) + + // Check the assertions + rec.AssertActorID(t, nobodyID.String(), rec.Pair(policy.ActionRead, file)) + rec.AssertActorID(t, rbac.SubjectTypeFileReaderID, rec.Pair(policy.ActionRead, file)) + }) + + t.Run("CacheHasFile", func(t *testing.T) { + rec.Reset() + require.Equal(t, 0, cache.Count()) + + // Read the file with a file reader to put it into the cache. + _, err := cache.Acquire(cacheReader, file.ID) + require.NoError(t, err) + require.Equal(t, 1, cache.Count()) + + // "nobody" should not be able to read the file. + _, err = cache.Acquire(nobody, file.ID) + require.Error(t, err) + require.True(t, rbac.IsUnauthorizedError(err)) + require.Equal(t, 1, cache.Count()) + + // UserReader can + _, err = cache.Acquire(userReader, file.ID) + require.NoError(t, err) + require.Equal(t, 1, cache.Count()) + + cache.Release(file.ID) + cache.Release(file.ID) + require.Equal(t, 0, cache.Count()) + + rec.AssertActorID(t, nobodyID.String(), rec.Pair(policy.ActionRead, file)) + rec.AssertActorID(t, rbac.SubjectTypeFileReaderID, rec.Pair(policy.ActionRead, file)) + rec.AssertActorID(t, userID.String(), rec.Pair(policy.ActionRead, file)) + }) +} + +func cacheAuthzSetup(t *testing.T) (database.Store, *files.Cache, *coderdtest.RecordingAuthorizer) { + t.Helper() + + logger := slogtest.Make(t, &slogtest.Options{}) + reg := prometheus.NewRegistry() + + db, _ := dbtestutil.NewDB(t) + authz := rbac.NewAuthorizer(reg) + rec := &coderdtest.RecordingAuthorizer{ + Called: nil, + Wrapped: authz, + } + + // Dbauthz wrap the db + db = dbauthz.New(db, rec, logger, coderdtest.AccessControlStorePointer()) + c := files.NewFromStore(db, reg, rec.Authorize) + return db, c, rec +} + +func must[T any](t T, err error) T { + if err != nil { + panic(err) + } + return t +} diff --git a/coderd/rbac/authz.go b/coderd/rbac/authz.go index a5d9d969cab6f..a7f77d57ab253 100644 --- a/coderd/rbac/authz.go +++ b/coderd/rbac/authz.go @@ -77,6 +77,10 @@ const ( SubjectTypeFileReader SubjectType = "file_reader" ) +const ( + SubjectTypeFileReaderID = "acbf0be6-6fed-47b6-8c43-962cb5cab994" +) + // Subject is a struct that contains all the elements of a subject in an rbac // authorize. type Subject struct { From bcfd754ac6962ab4e34d98a853276ca1a72f98fe Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Thu, 12 Jun 2025 13:47:34 -0500 Subject: [PATCH 03/15] use more reduced role --- coderd/parameters.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coderd/parameters.go b/coderd/parameters.go index d8551b2031f7a..4a0d776889744 100644 --- a/coderd/parameters.go +++ b/coderd/parameters.go @@ -119,7 +119,7 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r * // nolint:gocritic // We need to fetch the templates files for the Terraform // evaluator, and the user likely does not have permission. - fileCtx := dbauthz.AsProvisionerd(ctx) + fileCtx := dbauthz.AsFileReader(ctx) fileID, err := api.Database.GetFileIDByTemplateVersionID(fileCtx, templateVersion.ID) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ From f4f849bb6689122c04243b3a6ad0664d0c8c8f7b Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Thu, 12 Jun 2025 13:57:22 -0500 Subject: [PATCH 04/15] linting --- coderd/files/cache.go | 1 + coderd/files/cache_test.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/coderd/files/cache.go b/coderd/files/cache.go index e146014722ad5..2774c58640174 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -27,6 +27,7 @@ func NewFromStore(store database.Store, registerer prometheus.Registerer, authz fetch := func(ctx context.Context, fileID uuid.UUID) (cacheEntryValue, error) { // Make sure the read does not fail due to authorization issues. // Authz is checked on the Acquire call, so this is safe. + //nolint:gocritic file, err := store.GetFileByID(dbauthz.AsFileReader(ctx), fileID) if err != nil { return cacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err) diff --git a/coderd/files/cache_test.go b/coderd/files/cache_test.go index 7fc9c22601457..0c309d1bd00a2 100644 --- a/coderd/files/cache_test.go +++ b/coderd/files/cache_test.go @@ -19,6 +19,7 @@ import ( "github.com/coder/coder/v2/testutil" ) +// nolint:paralleltest,tparallel // Serially testing is easier func TestCacheRBAC(t *testing.T) { t.Parallel() @@ -43,6 +44,7 @@ func TestCacheRBAC(t *testing.T) { Scope: rbac.ScopeAll, }) + //nolint:gocritic // Unit testing cacheReader := dbauthz.AsFileReader(ctx) t.Run("NoRolesOpen", func(t *testing.T) { From 12f91e2cef043d6ae2ba02068062f1ccd9c8a3a2 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Thu, 12 Jun 2025 14:00:40 -0500 Subject: [PATCH 05/15] linting --- coderd/files/cache_internal_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/coderd/files/cache_internal_test.go b/coderd/files/cache_internal_test.go index dc8cdfa6a5aad..d7c4a856b47a6 100644 --- a/coderd/files/cache_internal_test.go +++ b/coderd/files/cache_internal_test.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/coder/coder/v2/coderd/coderdtest/promhelp" + "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/testutil" @@ -28,6 +29,7 @@ func cachePromMetricName(metric string) string { func TestConcurrency(t *testing.T) { t.Parallel() + ctx := dbauthz.AsFileReader(t.Context()) const fileSize = 10 emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) @@ -57,7 +59,7 @@ func TestConcurrency(t *testing.T) { g.Go(func() error { // We don't bother to Release these references because the Cache will be // released at the end of the test anyway. - _, err := c.Acquire(t.Context(), id) + _, err := c.Acquire(ctx, id) return err }) } @@ -80,6 +82,7 @@ func TestConcurrency(t *testing.T) { func TestRelease(t *testing.T) { t.Parallel() + ctx := dbauthz.AsFileReader(t.Context()) const fileSize = 10 emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) @@ -101,7 +104,7 @@ func TestRelease(t *testing.T) { batchSize := 10 for openedIdx, id := range ids { for batchIdx := range batchSize { - it, err := c.Acquire(t.Context(), id) + it, err := c.Acquire(ctx, id) require.NoError(t, err) require.Equal(t, emptyFS, it) From 3ddc7a9118f7413c3e146bc5c0202fcb882bf23a Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Thu, 12 Jun 2025 14:18:53 -0500 Subject: [PATCH 06/15] linting --- coderd/files/cache_internal_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coderd/files/cache_internal_test.go b/coderd/files/cache_internal_test.go index d7c4a856b47a6..54f270de7f8fd 100644 --- a/coderd/files/cache_internal_test.go +++ b/coderd/files/cache_internal_test.go @@ -29,6 +29,7 @@ func cachePromMetricName(metric string) string { func TestConcurrency(t *testing.T) { t.Parallel() + //nolint:gocritic // Unit testing ctx := dbauthz.AsFileReader(t.Context()) const fileSize = 10 @@ -82,6 +83,7 @@ func TestConcurrency(t *testing.T) { func TestRelease(t *testing.T) { t.Parallel() + //nolint:gocritic // Unit testing ctx := dbauthz.AsFileReader(t.Context()) const fileSize = 10 From 201583727931dc0524b942660808eaf7a331e916 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 11:37:03 -0500 Subject: [PATCH 07/15] add unit test for user_id query param --- coderd/parameters.go | 16 +++++++++++++++- coderd/parameters_test.go | 4 ++-- codersdk/parameters.go | 13 +++++++++++-- enterprise/coderd/parameters_test.go | 20 +++++++++++++++++++- 4 files changed, 47 insertions(+), 6 deletions(-) diff --git a/coderd/parameters.go b/coderd/parameters.go index 4a0d776889744..9cd57d819f27a 100644 --- a/coderd/parameters.go +++ b/coderd/parameters.go @@ -58,11 +58,25 @@ func (api *API) templateVersionDynamicParametersEvaluate(rw http.ResponseWriter, // @Router /templateversions/{templateversion}/dynamic-parameters [get] func (api *API) templateVersionDynamicParametersWebsocket(rw http.ResponseWriter, r *http.Request) { apikey := httpmw.APIKey(r) + userID := apikey.UserID + + qUserID := r.URL.Query().Get("user_id") + if qUserID != "" { + uid, err := uuid.Parse(qUserID) + if err != nil { + httpapi.Write(r.Context(), rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid user_id query parameter", + Detail: err.Error(), + }) + return + } + userID = uid + } api.templateVersionDynamicParameters(true, codersdk.DynamicParametersRequest{ ID: -1, Inputs: map[string]string{}, - OwnerID: apikey.UserID, + OwnerID: userID, })(rw, r) } diff --git a/coderd/parameters_test.go b/coderd/parameters_test.go index 640dc3ad22e55..3c792c2ce9a7a 100644 --- a/coderd/parameters_test.go +++ b/coderd/parameters_test.go @@ -56,7 +56,7 @@ func TestDynamicParametersOwnerSSHPublicKey(t *testing.T) { _ = coderdtest.CreateTemplate(t, templateAdmin, owner.OrganizationID, version.ID) ctx := testutil.Context(t, testutil.WaitShort) - stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) + stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, codersdk.Me, version.ID) require.NoError(t, err) defer stream.Close(websocket.StatusGoingAway) @@ -387,7 +387,7 @@ func setupDynamicParamsTest(t *testing.T, args setupDynamicParamsTestParams) dyn require.NoError(t, err) ctx := testutil.Context(t, testutil.WaitShort) - stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) + stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, codersdk.Me, version.ID) if args.expectWebsocketError { require.Errorf(t, err, "expected error forming websocket") } else { diff --git a/codersdk/parameters.go b/codersdk/parameters.go index 035537d34259e..dbbe3cdbd2fc7 100644 --- a/codersdk/parameters.go +++ b/codersdk/parameters.go @@ -125,8 +125,17 @@ type DynamicParametersResponse struct { // TODO: Workspace tags } -func (c *Client) TemplateVersionDynamicParameters(ctx context.Context, version uuid.UUID) (*wsjson.Stream[DynamicParametersResponse, DynamicParametersRequest], error) { - conn, err := c.Dial(ctx, fmt.Sprintf("/api/v2/templateversions/%s/dynamic-parameters", version), nil) +func (c *Client) TemplateVersionDynamicParameters(ctx context.Context, userID string, version uuid.UUID) (*wsjson.Stream[DynamicParametersResponse, DynamicParametersRequest], error) { + endpoint := fmt.Sprintf("/api/v2/templateversions/%s/dynamic-parameters", version) + if userID != Me { + uid, err := uuid.Parse(userID) + if err != nil { + return nil, fmt.Errorf("invalid user ID: %w", err) + } + endpoint += fmt.Sprintf("?user_id=%s", uid.String()) + } + + conn, err := c.Dial(ctx, endpoint, nil) if err != nil { return nil, err } diff --git a/enterprise/coderd/parameters_test.go b/enterprise/coderd/parameters_test.go index 5fc0eaa4aa369..93f5057206527 100644 --- a/enterprise/coderd/parameters_test.go +++ b/enterprise/coderd/parameters_test.go @@ -32,6 +32,7 @@ func TestDynamicParametersOwnerGroups(t *testing.T) { }, ) templateAdmin, templateAdminUser := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.RoleTemplateAdmin()) + _, noGroupUser := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID) // Create the group to be asserted group := coderdtest.CreateGroup(t, ownerClient, owner.OrganizationID, "bloob", templateAdminUser) @@ -57,7 +58,24 @@ func TestDynamicParametersOwnerGroups(t *testing.T) { _ = coderdtest.CreateTemplate(t, templateAdmin, owner.OrganizationID, version.ID) ctx := testutil.Context(t, testutil.WaitShort) - stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) + + // First check with a no group admin user, that they do not see the extra group + // Use the admin client, as the user might not have access to the template. + // Also checking that the admin can see the form for the other user. + noGroupStream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, noGroupUser.ID.String(), version.ID) + require.NoError(t, err) + defer noGroupStream.Close(websocket.StatusGoingAway) + noGroupPreviews := noGroupStream.Chan() + noGroupPreview := testutil.RequireReceive(ctx, t, noGroupPreviews) + require.Equal(t, -1, noGroupPreview.ID) + require.Empty(t, noGroupPreview.Diagnostics) + require.Equal(t, "group", noGroupPreview.Parameters[0].Name) + require.Equal(t, database.EveryoneGroup, noGroupPreview.Parameters[0].Value.Value) + require.Equal(t, 1, len(noGroupPreview.Parameters[0].Options)) // Only 1 group + noGroupStream.Close(websocket.StatusGoingAway) + + // Now try with a user with more than 1 group + stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, codersdk.Me, version.ID) require.NoError(t, err) defer stream.Close(websocket.StatusGoingAway) From 4256a6ce4f1b2e8a7db8a4c09256d2677f0da570 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 11:37:48 -0500 Subject: [PATCH 08/15] Revert "add unit test for user_id query param" This reverts commit 201583727931dc0524b942660808eaf7a331e916. --- coderd/parameters.go | 16 +--------------- coderd/parameters_test.go | 4 ++-- codersdk/parameters.go | 13 ++----------- enterprise/coderd/parameters_test.go | 20 +------------------- 4 files changed, 6 insertions(+), 47 deletions(-) diff --git a/coderd/parameters.go b/coderd/parameters.go index 9cd57d819f27a..4a0d776889744 100644 --- a/coderd/parameters.go +++ b/coderd/parameters.go @@ -58,25 +58,11 @@ func (api *API) templateVersionDynamicParametersEvaluate(rw http.ResponseWriter, // @Router /templateversions/{templateversion}/dynamic-parameters [get] func (api *API) templateVersionDynamicParametersWebsocket(rw http.ResponseWriter, r *http.Request) { apikey := httpmw.APIKey(r) - userID := apikey.UserID - - qUserID := r.URL.Query().Get("user_id") - if qUserID != "" { - uid, err := uuid.Parse(qUserID) - if err != nil { - httpapi.Write(r.Context(), rw, http.StatusBadRequest, codersdk.Response{ - Message: "Invalid user_id query parameter", - Detail: err.Error(), - }) - return - } - userID = uid - } api.templateVersionDynamicParameters(true, codersdk.DynamicParametersRequest{ ID: -1, Inputs: map[string]string{}, - OwnerID: userID, + OwnerID: apikey.UserID, })(rw, r) } diff --git a/coderd/parameters_test.go b/coderd/parameters_test.go index 3c792c2ce9a7a..640dc3ad22e55 100644 --- a/coderd/parameters_test.go +++ b/coderd/parameters_test.go @@ -56,7 +56,7 @@ func TestDynamicParametersOwnerSSHPublicKey(t *testing.T) { _ = coderdtest.CreateTemplate(t, templateAdmin, owner.OrganizationID, version.ID) ctx := testutil.Context(t, testutil.WaitShort) - stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, codersdk.Me, version.ID) + stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) require.NoError(t, err) defer stream.Close(websocket.StatusGoingAway) @@ -387,7 +387,7 @@ func setupDynamicParamsTest(t *testing.T, args setupDynamicParamsTestParams) dyn require.NoError(t, err) ctx := testutil.Context(t, testutil.WaitShort) - stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, codersdk.Me, version.ID) + stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) if args.expectWebsocketError { require.Errorf(t, err, "expected error forming websocket") } else { diff --git a/codersdk/parameters.go b/codersdk/parameters.go index dbbe3cdbd2fc7..035537d34259e 100644 --- a/codersdk/parameters.go +++ b/codersdk/parameters.go @@ -125,17 +125,8 @@ type DynamicParametersResponse struct { // TODO: Workspace tags } -func (c *Client) TemplateVersionDynamicParameters(ctx context.Context, userID string, version uuid.UUID) (*wsjson.Stream[DynamicParametersResponse, DynamicParametersRequest], error) { - endpoint := fmt.Sprintf("/api/v2/templateversions/%s/dynamic-parameters", version) - if userID != Me { - uid, err := uuid.Parse(userID) - if err != nil { - return nil, fmt.Errorf("invalid user ID: %w", err) - } - endpoint += fmt.Sprintf("?user_id=%s", uid.String()) - } - - conn, err := c.Dial(ctx, endpoint, nil) +func (c *Client) TemplateVersionDynamicParameters(ctx context.Context, version uuid.UUID) (*wsjson.Stream[DynamicParametersResponse, DynamicParametersRequest], error) { + conn, err := c.Dial(ctx, fmt.Sprintf("/api/v2/templateversions/%s/dynamic-parameters", version), nil) if err != nil { return nil, err } diff --git a/enterprise/coderd/parameters_test.go b/enterprise/coderd/parameters_test.go index 93f5057206527..5fc0eaa4aa369 100644 --- a/enterprise/coderd/parameters_test.go +++ b/enterprise/coderd/parameters_test.go @@ -32,7 +32,6 @@ func TestDynamicParametersOwnerGroups(t *testing.T) { }, ) templateAdmin, templateAdminUser := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID, rbac.RoleTemplateAdmin()) - _, noGroupUser := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID) // Create the group to be asserted group := coderdtest.CreateGroup(t, ownerClient, owner.OrganizationID, "bloob", templateAdminUser) @@ -58,24 +57,7 @@ func TestDynamicParametersOwnerGroups(t *testing.T) { _ = coderdtest.CreateTemplate(t, templateAdmin, owner.OrganizationID, version.ID) ctx := testutil.Context(t, testutil.WaitShort) - - // First check with a no group admin user, that they do not see the extra group - // Use the admin client, as the user might not have access to the template. - // Also checking that the admin can see the form for the other user. - noGroupStream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, noGroupUser.ID.String(), version.ID) - require.NoError(t, err) - defer noGroupStream.Close(websocket.StatusGoingAway) - noGroupPreviews := noGroupStream.Chan() - noGroupPreview := testutil.RequireReceive(ctx, t, noGroupPreviews) - require.Equal(t, -1, noGroupPreview.ID) - require.Empty(t, noGroupPreview.Diagnostics) - require.Equal(t, "group", noGroupPreview.Parameters[0].Name) - require.Equal(t, database.EveryoneGroup, noGroupPreview.Parameters[0].Value.Value) - require.Equal(t, 1, len(noGroupPreview.Parameters[0].Options)) // Only 1 group - noGroupStream.Close(websocket.StatusGoingAway) - - // Now try with a user with more than 1 group - stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, codersdk.Me, version.ID) + stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) require.NoError(t, err) defer stream.Close(websocket.StatusGoingAway) From c9cf7809aab2e1904f3a9d2da7528d2e012f8eec Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 11:45:23 -0500 Subject: [PATCH 09/15] just pass the authorizer as a whole --- coderd/coderd.go | 2 +- coderd/files/cache.go | 10 ++++------ coderd/files/cache_internal_test.go | 11 +++-------- coderd/files/cache_test.go | 2 +- 4 files changed, 9 insertions(+), 16 deletions(-) diff --git a/coderd/coderd.go b/coderd/coderd.go index 5d075ba82e9b6..24b34ea4db91a 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -572,7 +572,7 @@ func New(options *Options) *API { TemplateScheduleStore: options.TemplateScheduleStore, UserQuietHoursScheduleStore: options.UserQuietHoursScheduleStore, AccessControlStore: options.AccessControlStore, - FileCache: files.NewFromStore(options.Database, options.PrometheusRegistry, options.Authorizer.Authorize), + FileCache: files.NewFromStore(options.Database, options.PrometheusRegistry, options.Authorizer), Experiments: experiments, WebpushDispatcher: options.WebPushDispatcher, healthCheckGroup: &singleflight.Group[string, *healthsdk.HealthcheckReport]{}, diff --git a/coderd/files/cache.go b/coderd/files/cache.go index 2774c58640174..d00340085485d 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -19,11 +19,9 @@ import ( "github.com/coder/coder/v2/coderd/util/lazy" ) -type AuthorizeFile func(ctx context.Context, subject rbac.Subject, action policy.Action, object rbac.Object) error - // NewFromStore returns a file cache that will fetch files from the provided // database. -func NewFromStore(store database.Store, registerer prometheus.Registerer, authz AuthorizeFile) *Cache { +func NewFromStore(store database.Store, registerer prometheus.Registerer, authz rbac.Authorizer) *Cache { fetch := func(ctx context.Context, fileID uuid.UUID) (cacheEntryValue, error) { // Make sure the read does not fail due to authorization issues. // Authz is checked on the Acquire call, so this is safe. @@ -44,7 +42,7 @@ func NewFromStore(store database.Store, registerer prometheus.Registerer, authz return New(fetch, registerer, authz) } -func New(fetch fetcher, registerer prometheus.Registerer, authz AuthorizeFile) *Cache { +func New(fetch fetcher, registerer prometheus.Registerer, authz rbac.Authorizer) *Cache { return (&Cache{ lock: sync.Mutex{}, data: make(map[uuid.UUID]*cacheEntry), @@ -111,7 +109,7 @@ type Cache struct { lock sync.Mutex data map[uuid.UUID]*cacheEntry fetcher - authz AuthorizeFile + authz rbac.Authorizer // metrics cacheMetrics @@ -164,7 +162,7 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { return nil, dbauthz.ErrNoActor } // Always check the caller can actually read the file. - if err := c.authz(ctx, subject, policy.ActionRead, it.object); err != nil { + if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.object); err != nil { c.Release(fileID) return nil, err } diff --git a/coderd/files/cache_internal_test.go b/coderd/files/cache_internal_test.go index 54f270de7f8fd..57b5ae30fe76f 100644 --- a/coderd/files/cache_internal_test.go +++ b/coderd/files/cache_internal_test.go @@ -12,17 +12,12 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/coderdtest/promhelp" "github.com/coder/coder/v2/coderd/database/dbauthz" - "github.com/coder/coder/v2/coderd/rbac" - "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/testutil" ) -func authzAlwaysTrue(_ context.Context, _ rbac.Subject, _ policy.Action, _ rbac.Object) error { - return nil -} - func cachePromMetricName(metric string) string { return "coderd_file_cache_" + metric } @@ -42,7 +37,7 @@ func TestConcurrency(t *testing.T) { // will be waiting in line, ensuring that no one duplicated a fetch. time.Sleep(testutil.IntervalMedium) return cacheEntryValue{FS: emptyFS, size: fileSize}, nil - }, reg, authzAlwaysTrue) + }, reg, &coderdtest.FakeAuthorizer{}) batches := 1000 groups := make([]*errgroup.Group, 0, batches) @@ -94,7 +89,7 @@ func TestRelease(t *testing.T) { FS: emptyFS, size: fileSize, }, nil - }, reg, authzAlwaysTrue) + }, reg, &coderdtest.FakeAuthorizer{}) batches := 100 ids := make([]uuid.UUID, 0, batches) diff --git a/coderd/files/cache_test.go b/coderd/files/cache_test.go index 0c309d1bd00a2..4b9da437bc577 100644 --- a/coderd/files/cache_test.go +++ b/coderd/files/cache_test.go @@ -109,7 +109,7 @@ func cacheAuthzSetup(t *testing.T) (database.Store, *files.Cache, *coderdtest.Re // Dbauthz wrap the db db = dbauthz.New(db, rec, logger, coderdtest.AccessControlStorePointer()) - c := files.NewFromStore(db, reg, rec.Authorize) + c := files.NewFromStore(db, reg, rec) return db, c, rec } From 9a29e589f88587d912b6c0cd02e26895ca7ced43 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 11:47:43 -0500 Subject: [PATCH 10/15] use the RBACObject method --- coderd/files/cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coderd/files/cache.go b/coderd/files/cache.go index d00340085485d..d1256649ded90 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -33,7 +33,7 @@ func NewFromStore(store database.Store, registerer prometheus.Registerer, authz content := bytes.NewBuffer(file.Data) return cacheEntryValue{ - object: rbac.ResourceFile.WithID(file.ID).WithOwner(file.CreatedBy.String()), + object: file.RBACObject(), FS: archivefs.FromTarReader(content), size: int64(content.Len()), }, nil From bc25afafe8405b635813574efb9dc37a107a5b66 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 11:50:58 -0500 Subject: [PATCH 11/15] remove 'UserAuthorizationCtx' --- coderd/authorize.go | 8 ++++---- coderd/httpmw/apikey.go | 6 +----- coderd/httpmw/apikey_test.go | 4 ++-- coderd/httpmw/authorize_test.go | 2 +- coderd/httpmw/ratelimit.go | 2 +- coderd/identityprovider/middleware.go | 2 +- coderd/roles.go | 4 ++-- coderd/users.go | 2 +- enterprise/coderd/provisionerdaemons.go | 2 +- 9 files changed, 14 insertions(+), 18 deletions(-) diff --git a/coderd/authorize.go b/coderd/authorize.go index 802cb5ea15e9b..575bb5e98baf6 100644 --- a/coderd/authorize.go +++ b/coderd/authorize.go @@ -19,7 +19,7 @@ import ( // objects that the user is authorized to perform the given action on. // This is faster than calling Authorize() on each object. func AuthorizeFilter[O rbac.Objecter](h *HTTPAuthorizer, r *http.Request, action policy.Action, objects []O) ([]O, error) { - roles := httpmw.UserAuthorization(r) + roles := httpmw.UserAuthorization(r.Context()) objects, err := rbac.Filter(r.Context(), h.Authorizer, roles, action, objects) if err != nil { // Log the error as Filter should not be erroring. @@ -65,7 +65,7 @@ func (api *API) Authorize(r *http.Request, action policy.Action, object rbac.Obj // return // } func (h *HTTPAuthorizer) Authorize(r *http.Request, action policy.Action, object rbac.Objecter) bool { - roles := httpmw.UserAuthorization(r) + roles := httpmw.UserAuthorization(r.Context()) err := h.Authorizer.Authorize(r.Context(), roles, action, object.RBACObject()) if err != nil { // Log the errors for debugging @@ -97,7 +97,7 @@ func (h *HTTPAuthorizer) Authorize(r *http.Request, action policy.Action, object // call 'Authorize()' on the returned objects. // Note the authorization is only for the given action and object type. func (h *HTTPAuthorizer) AuthorizeSQLFilter(r *http.Request, action policy.Action, objectType string) (rbac.PreparedAuthorized, error) { - roles := httpmw.UserAuthorization(r) + roles := httpmw.UserAuthorization(r.Context()) prepared, err := h.Authorizer.Prepare(r.Context(), roles, action, objectType) if err != nil { return nil, xerrors.Errorf("prepare filter: %w", err) @@ -120,7 +120,7 @@ func (h *HTTPAuthorizer) AuthorizeSQLFilter(r *http.Request, action policy.Actio // @Router /authcheck [post] func (api *API) checkAuthorization(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() - auth := httpmw.UserAuthorization(r) + auth := httpmw.UserAuthorization(r.Context()) var params codersdk.AuthorizationRequest if !httpapi.Read(ctx, rw, r, ¶ms) { diff --git a/coderd/httpmw/apikey.go b/coderd/httpmw/apikey.go index 861746c9bf27b..a70dc30ec903b 100644 --- a/coderd/httpmw/apikey.go +++ b/coderd/httpmw/apikey.go @@ -53,11 +53,7 @@ func UserAuthorizationOptional(ctx context.Context) (rbac.Subject, bool) { // UserAuthorization returns the roles and scope used for authorization. Depends // on the ExtractAPIKey handler. -func UserAuthorization(r *http.Request) rbac.Subject { - return UserAuthorizationCtx(r.Context()) -} - -func UserAuthorizationCtx(ctx context.Context) rbac.Subject { +func UserAuthorization(ctx context.Context) rbac.Subject { auth, ok := UserAuthorizationOptional(ctx) if !ok { panic("developer error: ExtractAPIKey middleware not provided") diff --git a/coderd/httpmw/apikey_test.go b/coderd/httpmw/apikey_test.go index 2bdccd97e17b4..85f36959476b3 100644 --- a/coderd/httpmw/apikey_test.go +++ b/coderd/httpmw/apikey_test.go @@ -904,7 +904,7 @@ func TestAPIKey(t *testing.T) { })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { assertActorOk(t, r) - auth := httpmw.UserAuthorization(r) + auth := httpmw.UserAuthorization(r.Context()) roles, err := auth.Roles.Expand() assert.NoError(t, err, "expand user roles") @@ -968,7 +968,7 @@ func TestAPIKey(t *testing.T) { RedirectToLogin: false, })(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { assertActorOk(t, r) - auth := httpmw.UserAuthorization(r) + auth := httpmw.UserAuthorization(r.Context()) roles, err := auth.Roles.Expand() assert.NoError(t, err, "expand user roles") diff --git a/coderd/httpmw/authorize_test.go b/coderd/httpmw/authorize_test.go index 5d04c5afacdb3..3ee9d92742252 100644 --- a/coderd/httpmw/authorize_test.go +++ b/coderd/httpmw/authorize_test.go @@ -125,7 +125,7 @@ func TestExtractUserRoles(t *testing.T) { }), ) rtr.Get("/", func(_ http.ResponseWriter, r *http.Request) { - roles := httpmw.UserAuthorization(r) + roles := httpmw.UserAuthorization(r.Context()) require.Equal(t, user.ID.String(), roles.ID) require.ElementsMatch(t, expRoles, roles.Roles.Names()) }) diff --git a/coderd/httpmw/ratelimit.go b/coderd/httpmw/ratelimit.go index 932373b5bacd9..ad1ecf3d6bbd9 100644 --- a/coderd/httpmw/ratelimit.go +++ b/coderd/httpmw/ratelimit.go @@ -43,7 +43,7 @@ func RateLimit(count int, window time.Duration) func(http.Handler) http.Handler // Allow Owner to bypass rate limiting for load tests // and automation. - auth := UserAuthorization(r) + auth := UserAuthorization(r.Context()) // We avoid using rbac.Authorizer since rego is CPU-intensive // and undermines the DoS-prevention goal of the rate limiter. diff --git a/coderd/identityprovider/middleware.go b/coderd/identityprovider/middleware.go index 1704ab2270f49..632e5a53c0319 100644 --- a/coderd/identityprovider/middleware.go +++ b/coderd/identityprovider/middleware.go @@ -36,7 +36,7 @@ func authorizeMW(accessURL *url.URL) func(next http.Handler) http.Handler { } app := httpmw.OAuth2ProviderApp(r) - ua := httpmw.UserAuthorization(r) + ua := httpmw.UserAuthorization(r.Context()) // url.Parse() allows empty URLs, which is fine because the origin is not // always set by browsers (or other tools like cURL). If the origin does diff --git a/coderd/roles.go b/coderd/roles.go index ed650f41fd6c9..3814cd36d29ad 100644 --- a/coderd/roles.go +++ b/coderd/roles.go @@ -26,7 +26,7 @@ import ( // @Router /users/roles [get] func (api *API) AssignableSiteRoles(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() - actorRoles := httpmw.UserAuthorization(r) + actorRoles := httpmw.UserAuthorization(r.Context()) if !api.Authorize(r, policy.ActionRead, rbac.ResourceAssignRole) { httpapi.Forbidden(rw) return @@ -59,7 +59,7 @@ func (api *API) AssignableSiteRoles(rw http.ResponseWriter, r *http.Request) { func (api *API) assignableOrgRoles(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() organization := httpmw.OrganizationParam(r) - actorRoles := httpmw.UserAuthorization(r) + actorRoles := httpmw.UserAuthorization(r.Context()) if !api.Authorize(r, policy.ActionRead, rbac.ResourceAssignOrgRole.InOrg(organization.ID)) { httpapi.ResourceNotFound(rw) diff --git a/coderd/users.go b/coderd/users.go index ad1ba8a018743..e2f6fd79c7d75 100644 --- a/coderd/users.go +++ b/coderd/users.go @@ -525,7 +525,7 @@ func (api *API) deleteUser(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() auditor := *api.Auditor.Load() user := httpmw.UserParam(r) - auth := httpmw.UserAuthorization(r) + auth := httpmw.UserAuthorization(r.Context()) aReq, commitAudit := audit.InitRequest[database.User](rw, &audit.RequestParams{ Audit: auditor, Log: api.Logger, diff --git a/enterprise/coderd/provisionerdaemons.go b/enterprise/coderd/provisionerdaemons.go index 30f4ddd66d91c..c8304952781d1 100644 --- a/enterprise/coderd/provisionerdaemons.go +++ b/enterprise/coderd/provisionerdaemons.go @@ -133,7 +133,7 @@ func (p *provisionerDaemonAuth) authorize(r *http.Request, org database.Organiza tags: tags, }, nil } - ua := httpmw.UserAuthorization(r) + ua := httpmw.UserAuthorization(r.Context()) err = p.authorizer.Authorize(ctx, ua, policy.ActionCreate, rbac.ResourceProvisionerDaemon.InOrg(org.ID)) if err != nil { return provisiionerDaemonAuthResponse{}, xerrors.New("user unauthorized") From 038bb579696e44f7a6354fcc941157dfb85c778f Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 12:08:22 -0500 Subject: [PATCH 12/15] fix import loop, but had to make the file cache entry exported :cry: --- coderd/files/cache.go | 32 ++++++++++++++--------------- coderd/files/cache_internal_test.go | 17 +++++++-------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/coderd/files/cache.go b/coderd/files/cache.go index d1256649ded90..3ccb24fd8e3a4 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -22,20 +22,20 @@ import ( // NewFromStore returns a file cache that will fetch files from the provided // database. func NewFromStore(store database.Store, registerer prometheus.Registerer, authz rbac.Authorizer) *Cache { - fetch := func(ctx context.Context, fileID uuid.UUID) (cacheEntryValue, error) { + fetch := func(ctx context.Context, fileID uuid.UUID) (CacheEntryValue, error) { // Make sure the read does not fail due to authorization issues. // Authz is checked on the Acquire call, so this is safe. //nolint:gocritic file, err := store.GetFileByID(dbauthz.AsFileReader(ctx), fileID) if err != nil { - return cacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err) + return CacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err) } content := bytes.NewBuffer(file.Data) - return cacheEntryValue{ - object: file.RBACObject(), + return CacheEntryValue{ + Object: file.RBACObject(), FS: archivefs.FromTarReader(content), - size: int64(content.Len()), + Size: int64(content.Len()), }, nil } @@ -126,19 +126,19 @@ type cacheMetrics struct { totalCacheSize prometheus.Counter } -type cacheEntryValue struct { - object rbac.Object +type CacheEntryValue struct { + Object rbac.Object fs.FS - size int64 + Size int64 } type cacheEntry struct { // refCount must only be accessed while the Cache lock is held. refCount int - value *lazy.ValueWithError[cacheEntryValue] + value *lazy.ValueWithError[CacheEntryValue] } -type fetcher func(context.Context, uuid.UUID) (cacheEntryValue, error) +type fetcher func(context.Context, uuid.UUID) (CacheEntryValue, error) // Acquire will load the fs.FS for the given file. It guarantees that parallel // calls for the same fileID will only result in one fetch, and that parallel @@ -162,7 +162,7 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { return nil, dbauthz.ErrNoActor } // Always check the caller can actually read the file. - if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.object); err != nil { + if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.Object); err != nil { c.Release(fileID) return nil, err } @@ -170,19 +170,19 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { return it.FS, err } -func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[cacheEntryValue] { +func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[CacheEntryValue] { c.lock.Lock() defer c.lock.Unlock() entry, ok := c.data[fileID] if !ok { - value := lazy.NewWithError(func() (cacheEntryValue, error) { + value := lazy.NewWithError(func() (CacheEntryValue, error) { val, err := c.fetcher(ctx, fileID) // Always add to the cache size the bytes of the file loaded. if err == nil { - c.currentCacheSize.Add(float64(val.size)) - c.totalCacheSize.Add(float64(val.size)) + c.currentCacheSize.Add(float64(val.Size)) + c.totalCacheSize.Add(float64(val.Size)) } return val, err @@ -227,7 +227,7 @@ func (c *Cache) Release(fileID uuid.UUID) { ev, err := entry.value.Load() if err == nil { - c.currentCacheSize.Add(-1 * float64(ev.size)) + c.currentCacheSize.Add(-1 * float64(ev.Size)) } delete(c.data, fileID) diff --git a/coderd/files/cache_internal_test.go b/coderd/files/cache_internal_test.go index 57b5ae30fe76f..f029b31d69424 100644 --- a/coderd/files/cache_internal_test.go +++ b/coderd/files/cache_internal_test.go @@ -1,4 +1,4 @@ -package files +package files_test import ( "context" @@ -15,6 +15,7 @@ import ( "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/coderdtest/promhelp" "github.com/coder/coder/v2/coderd/database/dbauthz" + "github.com/coder/coder/v2/coderd/files" "github.com/coder/coder/v2/testutil" ) @@ -31,12 +32,12 @@ func TestConcurrency(t *testing.T) { emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) var fetches atomic.Int64 reg := prometheus.NewRegistry() - c := New(func(_ context.Context, _ uuid.UUID) (cacheEntryValue, error) { + c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) { fetches.Add(1) // Wait long enough before returning to make sure that all of the goroutines // will be waiting in line, ensuring that no one duplicated a fetch. time.Sleep(testutil.IntervalMedium) - return cacheEntryValue{FS: emptyFS, size: fileSize}, nil + return files.CacheEntryValue{FS: emptyFS, Size: fileSize}, nil }, reg, &coderdtest.FakeAuthorizer{}) batches := 1000 @@ -84,10 +85,10 @@ func TestRelease(t *testing.T) { const fileSize = 10 emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) reg := prometheus.NewRegistry() - c := New(func(_ context.Context, _ uuid.UUID) (cacheEntryValue, error) { - return cacheEntryValue{ + c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) { + return files.CacheEntryValue{ FS: emptyFS, - size: fileSize, + Size: fileSize, }, nil }, reg, &coderdtest.FakeAuthorizer{}) @@ -118,7 +119,7 @@ func TestRelease(t *testing.T) { } // Make sure cache is fully loaded - require.Equal(t, len(c.data), batches) + require.Equal(t, c.Count(), batches) // Now release all of the references for closedIdx, id := range ids { @@ -142,7 +143,7 @@ func TestRelease(t *testing.T) { } // ...and make sure that the cache has emptied itself. - require.Equal(t, len(c.data), 0) + require.Equal(t, c.Count(), 0) // Verify all the counts & metrics are correct. // All existing files are closed From 3b733719a2d2700d5d03d4f5d5f7bb90e1fd65b7 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Fri, 13 Jun 2025 12:09:00 -0500 Subject: [PATCH 13/15] move test --- coderd/files/cache_internal_test.go | 159 ---------------------------- coderd/files/cache_test.go | 145 +++++++++++++++++++++++++ 2 files changed, 145 insertions(+), 159 deletions(-) delete mode 100644 coderd/files/cache_internal_test.go diff --git a/coderd/files/cache_internal_test.go b/coderd/files/cache_internal_test.go deleted file mode 100644 index f029b31d69424..0000000000000 --- a/coderd/files/cache_internal_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package files_test - -import ( - "context" - "sync/atomic" - "testing" - "time" - - "github.com/google/uuid" - "github.com/prometheus/client_golang/prometheus" - "github.com/spf13/afero" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/coder/coder/v2/coderd/coderdtest" - "github.com/coder/coder/v2/coderd/coderdtest/promhelp" - "github.com/coder/coder/v2/coderd/database/dbauthz" - "github.com/coder/coder/v2/coderd/files" - "github.com/coder/coder/v2/testutil" -) - -func cachePromMetricName(metric string) string { - return "coderd_file_cache_" + metric -} - -func TestConcurrency(t *testing.T) { - t.Parallel() - //nolint:gocritic // Unit testing - ctx := dbauthz.AsFileReader(t.Context()) - - const fileSize = 10 - emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) - var fetches atomic.Int64 - reg := prometheus.NewRegistry() - c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) { - fetches.Add(1) - // Wait long enough before returning to make sure that all of the goroutines - // will be waiting in line, ensuring that no one duplicated a fetch. - time.Sleep(testutil.IntervalMedium) - return files.CacheEntryValue{FS: emptyFS, Size: fileSize}, nil - }, reg, &coderdtest.FakeAuthorizer{}) - - batches := 1000 - groups := make([]*errgroup.Group, 0, batches) - for range batches { - groups = append(groups, new(errgroup.Group)) - } - - // Call Acquire with a unique ID per batch, many times per batch, with many - // batches all in parallel. This is pretty much the worst-case scenario: - // thousands of concurrent reads, with both warm and cold loads happening. - batchSize := 10 - for _, g := range groups { - id := uuid.New() - for range batchSize { - g.Go(func() error { - // We don't bother to Release these references because the Cache will be - // released at the end of the test anyway. - _, err := c.Acquire(ctx, id) - return err - }) - } - } - - for _, g := range groups { - require.NoError(t, g.Wait()) - } - require.Equal(t, int64(batches), fetches.Load()) - - // Verify all the counts & metrics are correct. - require.Equal(t, batches, c.Count()) - require.Equal(t, batches*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) - require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_bytes_total"), nil)) - require.Equal(t, batches, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) - require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil)) - require.Equal(t, batches*batchSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) - require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil)) -} - -func TestRelease(t *testing.T) { - t.Parallel() - //nolint:gocritic // Unit testing - ctx := dbauthz.AsFileReader(t.Context()) - - const fileSize = 10 - emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) - reg := prometheus.NewRegistry() - c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) { - return files.CacheEntryValue{ - FS: emptyFS, - Size: fileSize, - }, nil - }, reg, &coderdtest.FakeAuthorizer{}) - - batches := 100 - ids := make([]uuid.UUID, 0, batches) - for range batches { - ids = append(ids, uuid.New()) - } - - // Acquire a bunch of references - batchSize := 10 - for openedIdx, id := range ids { - for batchIdx := range batchSize { - it, err := c.Acquire(ctx, id) - require.NoError(t, err) - require.Equal(t, emptyFS, it) - - // Each time a new file is opened, the metrics should be updated as so: - opened := openedIdx + 1 - // Number of unique files opened is equal to the idx of the ids. - require.Equal(t, opened, c.Count()) - require.Equal(t, opened, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) - // Current file size is unique files * file size. - require.Equal(t, opened*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) - // The number of refs is the current iteration of both loops. - require.Equal(t, ((opened-1)*batchSize)+(batchIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) - } - } - - // Make sure cache is fully loaded - require.Equal(t, c.Count(), batches) - - // Now release all of the references - for closedIdx, id := range ids { - stillOpen := len(ids) - closedIdx - for closingIdx := range batchSize { - c.Release(id) - - // Each time a file is released, the metrics should decrement the file refs - require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) - - closed := closingIdx+1 == batchSize - if closed { - continue - } - - // File ref still exists, so the counts should not change yet. - require.Equal(t, stillOpen, c.Count()) - require.Equal(t, stillOpen, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) - require.Equal(t, stillOpen*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) - } - } - - // ...and make sure that the cache has emptied itself. - require.Equal(t, c.Count(), 0) - - // Verify all the counts & metrics are correct. - // All existing files are closed - require.Equal(t, 0, c.Count()) - require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) - require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) - require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) - - // Total counts remain - require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_bytes_total"), nil)) - require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil)) - require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil)) -} diff --git a/coderd/files/cache_test.go b/coderd/files/cache_test.go index 4b9da437bc577..469520b4139fe 100644 --- a/coderd/files/cache_test.go +++ b/coderd/files/cache_test.go @@ -1,14 +1,20 @@ package files_test import ( + "context" + "sync/atomic" "testing" + "time" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/afero" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/coderd/coderdtest" + "github.com/coder/coder/v2/coderd/coderdtest/promhelp" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbauthz" "github.com/coder/coder/v2/coderd/database/dbgen" @@ -94,6 +100,145 @@ func TestCacheRBAC(t *testing.T) { }) } +func cachePromMetricName(metric string) string { + return "coderd_file_cache_" + metric +} + +func TestConcurrency(t *testing.T) { + t.Parallel() + //nolint:gocritic // Unit testing + ctx := dbauthz.AsFileReader(t.Context()) + + const fileSize = 10 + emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) + var fetches atomic.Int64 + reg := prometheus.NewRegistry() + c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) { + fetches.Add(1) + // Wait long enough before returning to make sure that all of the goroutines + // will be waiting in line, ensuring that no one duplicated a fetch. + time.Sleep(testutil.IntervalMedium) + return files.CacheEntryValue{FS: emptyFS, Size: fileSize}, nil + }, reg, &coderdtest.FakeAuthorizer{}) + + batches := 1000 + groups := make([]*errgroup.Group, 0, batches) + for range batches { + groups = append(groups, new(errgroup.Group)) + } + + // Call Acquire with a unique ID per batch, many times per batch, with many + // batches all in parallel. This is pretty much the worst-case scenario: + // thousands of concurrent reads, with both warm and cold loads happening. + batchSize := 10 + for _, g := range groups { + id := uuid.New() + for range batchSize { + g.Go(func() error { + // We don't bother to Release these references because the Cache will be + // released at the end of the test anyway. + _, err := c.Acquire(ctx, id) + return err + }) + } + } + + for _, g := range groups { + require.NoError(t, g.Wait()) + } + require.Equal(t, int64(batches), fetches.Load()) + + // Verify all the counts & metrics are correct. + require.Equal(t, batches, c.Count()) + require.Equal(t, batches*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) + require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_bytes_total"), nil)) + require.Equal(t, batches, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) + require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil)) + require.Equal(t, batches*batchSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) + require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil)) +} + +func TestRelease(t *testing.T) { + t.Parallel() + //nolint:gocritic // Unit testing + ctx := dbauthz.AsFileReader(t.Context()) + + const fileSize = 10 + emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs())) + reg := prometheus.NewRegistry() + c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) { + return files.CacheEntryValue{ + FS: emptyFS, + Size: fileSize, + }, nil + }, reg, &coderdtest.FakeAuthorizer{}) + + batches := 100 + ids := make([]uuid.UUID, 0, batches) + for range batches { + ids = append(ids, uuid.New()) + } + + // Acquire a bunch of references + batchSize := 10 + for openedIdx, id := range ids { + for batchIdx := range batchSize { + it, err := c.Acquire(ctx, id) + require.NoError(t, err) + require.Equal(t, emptyFS, it) + + // Each time a new file is opened, the metrics should be updated as so: + opened := openedIdx + 1 + // Number of unique files opened is equal to the idx of the ids. + require.Equal(t, opened, c.Count()) + require.Equal(t, opened, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) + // Current file size is unique files * file size. + require.Equal(t, opened*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) + // The number of refs is the current iteration of both loops. + require.Equal(t, ((opened-1)*batchSize)+(batchIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) + } + } + + // Make sure cache is fully loaded + require.Equal(t, c.Count(), batches) + + // Now release all of the references + for closedIdx, id := range ids { + stillOpen := len(ids) - closedIdx + for closingIdx := range batchSize { + c.Release(id) + + // Each time a file is released, the metrics should decrement the file refs + require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) + + closed := closingIdx+1 == batchSize + if closed { + continue + } + + // File ref still exists, so the counts should not change yet. + require.Equal(t, stillOpen, c.Count()) + require.Equal(t, stillOpen, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) + require.Equal(t, stillOpen*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) + } + } + + // ...and make sure that the cache has emptied itself. + require.Equal(t, c.Count(), 0) + + // Verify all the counts & metrics are correct. + // All existing files are closed + require.Equal(t, 0, c.Count()) + require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil)) + require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil)) + require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) + + // Total counts remain + require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_bytes_total"), nil)) + require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil)) + require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil)) +} + func cacheAuthzSetup(t *testing.T) (database.Store, *files.Cache, *coderdtest.RecordingAuthorizer) { t.Helper() From df5ca147eaa3ce23f730a4549316fbcbe5586565 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Sun, 15 Jun 2025 17:38:37 -0500 Subject: [PATCH 14/15] Update coderd/files/cache.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: ケイラ --- coderd/files/cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coderd/files/cache.go b/coderd/files/cache.go index 3ccb24fd8e3a4..1d9805b1b0e36 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -127,8 +127,8 @@ type cacheMetrics struct { } type CacheEntryValue struct { - Object rbac.Object fs.FS + Object rbac.Object Size int64 } From 18bd4b7067ff13405266f9003753957ce71c9e58 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Mon, 16 Jun 2025 08:27:56 -0500 Subject: [PATCH 15/15] fmt --- coderd/files/cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coderd/files/cache.go b/coderd/files/cache.go index 1d9805b1b0e36..484507d2ac5b0 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -129,7 +129,7 @@ type cacheMetrics struct { type CacheEntryValue struct { fs.FS Object rbac.Object - Size int64 + Size int64 } type cacheEntry struct { pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy