Skip to content

Commit 38a0415

Browse files
committed
chore: add support for blockEndpoints to configMaps
1 parent 40c72c2 commit 38a0415

File tree

2 files changed

+105
-4
lines changed

2 files changed

+105
-4
lines changed

tailnet/configmaps.go

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,11 @@ func (c *configMaps) netMapLocked() *netmap.NetworkMap {
207207
func (c *configMaps) peerConfigLocked() []*tailcfg.Node {
208208
out := make([]*tailcfg.Node, 0, len(c.peers))
209209
for _, p := range c.peers {
210-
out = append(out, p.node.Clone())
210+
n := p.node.Clone()
211+
if c.blockEndpoints {
212+
n.Endpoints = nil
213+
}
214+
out = append(out, n)
211215
}
212216
return out
213217
}
@@ -228,6 +232,19 @@ func (c *configMaps) setAddresses(ips []netip.Prefix) {
228232
c.Broadcast()
229233
}
230234

235+
// setBlockEndpoints sets whether we should block configuring endpoints we learn
236+
// from peers. It triggers a configuration of the engine if the value changes.
237+
// nolint: revive
238+
func (c *configMaps) setBlockEndpoints(blockEndpoints bool) {
239+
c.L.Lock()
240+
defer c.L.Unlock()
241+
if c.blockEndpoints != blockEndpoints {
242+
c.netmapDirty = true
243+
}
244+
c.blockEndpoints = blockEndpoints
245+
c.Broadcast()
246+
}
247+
231248
// derMapLocked returns the current DERPMap. c.L must be held
232249
func (c *configMaps) derpMapLocked() *tailcfg.DERPMap {
233250
m := DERPMapFromProto(c.derpMap)
@@ -342,9 +359,6 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat
342359
// to avoid random hangs while we set up the connection again after
343360
// inactivity.
344361
node.KeepAlive = ok && peerStatus.Active
345-
if c.blockEndpoints {
346-
node.Endpoints = nil
347-
}
348362
}
349363
switch {
350364
case !ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE:

tailnet/configmaps_internal_test.go

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -475,6 +475,93 @@ func TestConfigMaps_updatePeers_lost_and_found(t *testing.T) {
475475
_ = testutil.RequireRecvCtx(ctx, t, done)
476476
}
477477

478+
func TestConfigMaps_setBlockEndpoints_different(t *testing.T) {
479+
t.Parallel()
480+
ctx := testutil.Context(t, testutil.WaitShort)
481+
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
482+
fEng := newFakeEngineConfigurable()
483+
nodePrivateKey := key.NewNode()
484+
nodeID := tailcfg.NodeID(5)
485+
discoKey := key.NewDisco()
486+
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
487+
defer uut.close()
488+
489+
p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
490+
p1Node := newTestNode(1)
491+
p1n, err := NodeToProto(p1Node)
492+
require.NoError(t, err)
493+
p1tcn, err := uut.protoNodeToTailcfg(p1n)
494+
p1tcn.KeepAlive = true
495+
require.NoError(t, err)
496+
497+
// Given: peer already exists
498+
uut.L.Lock()
499+
uut.peers[p1ID] = &peerLifecycle{
500+
peerID: p1ID,
501+
node: p1tcn,
502+
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
503+
}
504+
uut.L.Unlock()
505+
506+
uut.setBlockEndpoints(true)
507+
508+
nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap)
509+
r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig)
510+
require.Len(t, nm.Peers, 1)
511+
require.Len(t, nm.Peers[0].Endpoints, 0)
512+
require.Len(t, r.wg.Peers, 1)
513+
514+
done := make(chan struct{})
515+
go func() {
516+
defer close(done)
517+
uut.close()
518+
}()
519+
_ = testutil.RequireRecvCtx(ctx, t, done)
520+
}
521+
522+
func TestConfigMaps_setBlockEndpoints_same(t *testing.T) {
523+
t.Parallel()
524+
ctx := testutil.Context(t, testutil.WaitShort)
525+
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
526+
fEng := newFakeEngineConfigurable()
527+
nodePrivateKey := key.NewNode()
528+
nodeID := tailcfg.NodeID(5)
529+
discoKey := key.NewDisco()
530+
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
531+
defer uut.close()
532+
533+
p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
534+
p1Node := newTestNode(1)
535+
p1n, err := NodeToProto(p1Node)
536+
require.NoError(t, err)
537+
p1tcn, err := uut.protoNodeToTailcfg(p1n)
538+
p1tcn.KeepAlive = true
539+
require.NoError(t, err)
540+
541+
// Given: peer already exists && blockEndpoints set to true
542+
uut.L.Lock()
543+
uut.peers[p1ID] = &peerLifecycle{
544+
peerID: p1ID,
545+
node: p1tcn,
546+
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
547+
}
548+
uut.blockEndpoints = true
549+
uut.L.Unlock()
550+
551+
// Then: we don't configure
552+
requireNeverConfigures(ctx, t, uut)
553+
554+
// When we set blockEndpoints to true
555+
uut.setBlockEndpoints(true)
556+
557+
done := make(chan struct{})
558+
go func() {
559+
defer close(done)
560+
uut.close()
561+
}()
562+
_ = testutil.RequireRecvCtx(ctx, t, done)
563+
}
564+
478565
func expectStatusWithHandshake(
479566
ctx context.Context, t testing.TB, fEng *fakeEngineConfigurable, k key.NodePublic, lastHandshake time.Time,
480567
) <-chan struct{} {

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy