diff --git a/tailnet/configmaps.go b/tailnet/configmaps.go index 2ca1dd17b204c..2e5e019bf271c 100644 --- a/tailnet/configmaps.go +++ b/tailnet/configmaps.go @@ -207,7 +207,11 @@ func (c *configMaps) netMapLocked() *netmap.NetworkMap { func (c *configMaps) peerConfigLocked() []*tailcfg.Node { out := make([]*tailcfg.Node, 0, len(c.peers)) for _, p := range c.peers { - out = append(out, p.node.Clone()) + n := p.node.Clone() + if c.blockEndpoints { + n.Endpoints = nil + } + out = append(out, n) } return out } @@ -228,6 +232,19 @@ func (c *configMaps) setAddresses(ips []netip.Prefix) { c.Broadcast() } +// setBlockEndpoints sets whether we should block configuring endpoints we learn +// from peers. It triggers a configuration of the engine if the value changes. +// nolint: revive +func (c *configMaps) setBlockEndpoints(blockEndpoints bool) { + c.L.Lock() + defer c.L.Unlock() + if c.blockEndpoints != blockEndpoints { + c.netmapDirty = true + } + c.blockEndpoints = blockEndpoints + c.Broadcast() +} + // derMapLocked returns the current DERPMap. c.L must be held func (c *configMaps) derpMapLocked() *tailcfg.DERPMap { m := DERPMapFromProto(c.derpMap) @@ -342,9 +359,6 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat // to avoid random hangs while we set up the connection again after // inactivity. node.KeepAlive = ok && peerStatus.Active - if c.blockEndpoints { - node.Endpoints = nil - } } switch { case !ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE: diff --git a/tailnet/configmaps_internal_test.go b/tailnet/configmaps_internal_test.go index 3b2c27fad8342..003ac1b5229d6 100644 --- a/tailnet/configmaps_internal_test.go +++ b/tailnet/configmaps_internal_test.go @@ -484,6 +484,93 @@ func TestConfigMaps_updatePeers_lost_and_found(t *testing.T) { _ = testutil.RequireRecvCtx(ctx, t, done) } +func TestConfigMaps_setBlockEndpoints_different(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fEng := newFakeEngineConfigurable() + nodePrivateKey := key.NewNode() + nodeID := tailcfg.NodeID(5) + discoKey := key.NewDisco() + uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil) + defer uut.close() + + p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000") + p1Node := newTestNode(1) + p1n, err := NodeToProto(p1Node) + require.NoError(t, err) + p1tcn, err := uut.protoNodeToTailcfg(p1n) + p1tcn.KeepAlive = true + require.NoError(t, err) + + // Given: peer already exists + uut.L.Lock() + uut.peers[p1ID] = &peerLifecycle{ + peerID: p1ID, + node: p1tcn, + lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC), + } + uut.L.Unlock() + + uut.setBlockEndpoints(true) + + nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap) + r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig) + require.Len(t, nm.Peers, 1) + require.Len(t, nm.Peers[0].Endpoints, 0) + require.Len(t, r.wg.Peers, 1) + + done := make(chan struct{}) + go func() { + defer close(done) + uut.close() + }() + _ = testutil.RequireRecvCtx(ctx, t, done) +} + +func TestConfigMaps_setBlockEndpoints_same(t *testing.T) { + t.Parallel() + ctx := testutil.Context(t, testutil.WaitShort) + logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) + fEng := newFakeEngineConfigurable() + nodePrivateKey := key.NewNode() + nodeID := tailcfg.NodeID(5) + discoKey := key.NewDisco() + uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil) + defer uut.close() + + p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000") + p1Node := newTestNode(1) + p1n, err := NodeToProto(p1Node) + require.NoError(t, err) + p1tcn, err := uut.protoNodeToTailcfg(p1n) + p1tcn.KeepAlive = true + require.NoError(t, err) + + // Given: peer already exists && blockEndpoints set to true + uut.L.Lock() + uut.peers[p1ID] = &peerLifecycle{ + peerID: p1ID, + node: p1tcn, + lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC), + } + uut.blockEndpoints = true + uut.L.Unlock() + + // Then: we don't configure + requireNeverConfigures(ctx, t, uut) + + // When we set blockEndpoints to true + uut.setBlockEndpoints(true) + + done := make(chan struct{}) + go func() { + defer close(done) + uut.close() + }() + _ = testutil.RequireRecvCtx(ctx, t, done) +} + func expectStatusWithHandshake( ctx context.Context, t testing.TB, fEng *fakeEngineConfigurable, k key.NodePublic, lastHandshake time.Time, ) <-chan struct{} { pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy