Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Resolver interface to transport #1719

Merged
merged 15 commits into from
Sep 13, 2022
4 changes: 3 additions & 1 deletion config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,9 @@ func (cfg *Config) makeSwarm() (*swarm.Swarm, error) {
if cfg.ResourceManager != nil {
opts = append(opts, swarm.WithResourceManager(cfg.ResourceManager))
}
if cfg.MultiaddrResolver != nil {
opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver))
}
// TODO: Make the swarm implementation configurable.
return swarm.NewSwarm(pid, cfg.Peerstore, opts...)
}
Expand Down Expand Up @@ -229,7 +232,6 @@ func (cfg *Config) NewNode() (host.Host, error) {
EnablePing: !cfg.DisablePing,
UserAgent: cfg.UserAgent,
ProtocolVersion: cfg.ProtocolVersion,
MultiaddrResolver: cfg.MultiaddrResolver,
EnableHolePunching: cfg.EnableHolePunching,
HolePunchingOptions: cfg.HolePunchingOptions,
EnableRelayService: cfg.EnableRelayService,
Expand Down
6 changes: 6 additions & 0 deletions core/transport/transport.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@ type Transport interface {
Proxy() bool
}

// Resolver can be optionally implemented by transports that want to resolve or transform the
// multiaddr.
type Resolver interface {
Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error)
}

// Listener is an interface closely resembling the net.Listener interface. The
// only real difference is that Accept() returns Conn's of the type in this
// package, and also exposes a Multiaddr method as opposed to a regular Addr
Expand Down
72 changes: 0 additions & 72 deletions p2p/host/basic/basic_host.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ import (
msmux "github.com/multiformats/go-multistream"
)

// The maximum number of address resolution steps we'll perform for a single
// peer (for all addresses).
const maxAddressResolution = 32

// addrChangeTickrInterval is the interval between two address change ticks.
var addrChangeTickrInterval = 5 * time.Second

Expand Down Expand Up @@ -713,77 +709,9 @@ func (h *BasicHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
}
}

resolved, err := h.resolveAddrs(ctx, h.Peerstore().PeerInfo(pi.ID))
if err != nil {
return err
}
h.Peerstore().AddAddrs(pi.ID, resolved, peerstore.TempAddrTTL)

return h.dialPeer(ctx, pi.ID)
}

func (h *BasicHost) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multiaddr, error) {
proto := ma.ProtocolWithCode(ma.P_P2P).Name
p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
if err != nil {
return nil, err
}

resolveSteps := 0

// Recursively resolve all addrs.
//
// While the toResolve list is non-empty:
// * Pop an address off.
// * If the address is fully resolved, add it to the resolved list.
// * Otherwise, resolve it and add the results to the "to resolve" list.
toResolve := append(([]ma.Multiaddr)(nil), pi.Addrs...)
resolved := make([]ma.Multiaddr, 0, len(pi.Addrs))
for len(toResolve) > 0 {
// pop the last addr off.
addr := toResolve[len(toResolve)-1]
toResolve = toResolve[:len(toResolve)-1]

// if it's resolved, add it to the resolved list.
if !madns.Matches(addr) {
resolved = append(resolved, addr)
continue
}

resolveSteps++

// We've resolved too many addresses. We can keep all the fully
// resolved addresses but we'll need to skip the rest.
if resolveSteps >= maxAddressResolution {
log.Warnf(
"peer %s asked us to resolve too many addresses: %s/%s",
pi.ID,
resolveSteps,
maxAddressResolution,
)
continue
}

// otherwise, resolve it
reqaddr := addr.Encapsulate(p2paddr)
resaddrs, err := h.maResolver.Resolve(ctx, reqaddr)
if err != nil {
log.Infof("error resolving %s: %s", reqaddr, err)
}

// add the results to the toResolve list.
for _, res := range resaddrs {
pi, err := peer.AddrInfoFromP2pAddr(res)
if err != nil {
log.Infof("error parsing %s: %s", res, err)
}
toResolve = append(toResolve, pi.Addrs...)
}
}

return resolved, nil
}

// dialPeer opens a connection to peer, and makes sure to identify
// the connection once it has been opened.
func (h *BasicHost) dialPeer(ctx context.Context, p peer.ID) error {
Expand Down
107 changes: 0 additions & 107 deletions p2p/host/basic/basic_host_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,12 @@ import (
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/record"
"github.com/libp2p/go-libp2p/core/test"
"github.com/libp2p/go-libp2p/p2p/host/autonat"
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"

ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -526,111 +524,6 @@ func TestProtoDowngrade(t *testing.T) {
assertWait(t, connectedOn, "/testing")
}

func TestAddrResolution(t *testing.T) {
ctx := context.Background()

p1 := test.RandPeerIDFatal(t)
p2 := test.RandPeerIDFatal(t)
addr1 := ma.StringCast("/dnsaddr/example.com")
addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123")
p2paddr1 := ma.StringCast("/dnsaddr/example.com/p2p/" + p1.Pretty())
p2paddr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.Pretty())
p2paddr3 := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p2.Pretty())

backend := &madns.MockResolver{
TXT: map[string][]string{"_dnsaddr.example.com": {
"dnsaddr=" + p2paddr2.String(), "dnsaddr=" + p2paddr3.String(),
}},
}
resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
require.NoError(t, err)

h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{MultiaddrResolver: resolver})
require.NoError(t, err)
defer h.Close()

pi, err := peer.AddrInfoFromP2pAddr(p2paddr1)
require.NoError(t, err)

tctx, cancel := context.WithTimeout(ctx, time.Millisecond*100)
defer cancel()
_ = h.Connect(tctx, *pi)

addrs := h.Peerstore().Addrs(pi.ID)

require.Len(t, addrs, 2)
require.Contains(t, addrs, addr1)
require.Contains(t, addrs, addr2)
}

func TestAddrResolutionRecursive(t *testing.T) {
ctx := context.Background()

p1, err := test.RandPeerID()
if err != nil {
t.Error(err)
}
p2, err := test.RandPeerID()
if err != nil {
t.Error(err)
}
addr1 := ma.StringCast("/dnsaddr/example.com")
addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123")
p2paddr1 := ma.StringCast("/dnsaddr/example.com/p2p/" + p1.Pretty())
p2paddr2 := ma.StringCast("/dnsaddr/example.com/p2p/" + p2.Pretty())
p2paddr1i := ma.StringCast("/dnsaddr/foo.example.com/p2p/" + p1.Pretty())
p2paddr2i := ma.StringCast("/dnsaddr/bar.example.com/p2p/" + p2.Pretty())
p2paddr1f := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.Pretty())

backend := &madns.MockResolver{
TXT: map[string][]string{
"_dnsaddr.example.com": {
"dnsaddr=" + p2paddr1i.String(),
"dnsaddr=" + p2paddr2i.String(),
},
"_dnsaddr.foo.example.com": {
"dnsaddr=" + p2paddr1f.String(),
},
"_dnsaddr.bar.example.com": {
"dnsaddr=" + p2paddr2i.String(),
},
},
}
resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
if err != nil {
t.Fatal(err)
}

h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{MultiaddrResolver: resolver})
require.NoError(t, err)
defer h.Close()

pi1, err := peer.AddrInfoFromP2pAddr(p2paddr1)
if err != nil {
t.Error(err)
}

tctx, cancel := context.WithTimeout(ctx, time.Millisecond*100)
defer cancel()
_ = h.Connect(tctx, *pi1)

addrs1 := h.Peerstore().Addrs(pi1.ID)
require.Len(t, addrs1, 2)
require.Contains(t, addrs1, addr1)
require.Contains(t, addrs1, addr2)

pi2, err := peer.AddrInfoFromP2pAddr(p2paddr2)
if err != nil {
t.Error(err)
}

_ = h.Connect(tctx, *pi2)

addrs2 := h.Peerstore().Addrs(pi2.ID)
require.Len(t, addrs2, 1)
require.Contains(t, addrs2, addr1)
}

func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
ctx := context.Background()
taddrs := []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/1234")}
Expand Down
65 changes: 52 additions & 13 deletions p2p/net/swarm/dial_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,18 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
testutil "github.com/libp2p/go-libp2p/core/test"
. "github.com/libp2p/go-libp2p/p2p/net/swarm"
"github.com/libp2p/go-libp2p/p2p/net/swarm"
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"

"github.com/libp2p/go-libp2p-testing/ci"

ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
)

func closeSwarms(swarms []*Swarm) {
func closeSwarms(swarms []*swarm.Swarm) {
for _, s := range swarms {
s.Close()
}
Expand All @@ -45,6 +46,44 @@ func TestBasicDialPeer(t *testing.T) {
s.Close()
}

func TestBasicDialPeerWithResolver(t *testing.T) {
t.Parallel()

mockResolver := madns.MockResolver{IP: make(map[string][]net.IPAddr)}
ipaddr, err := net.ResolveIPAddr("ip4", "127.0.0.1")
require.NoError(t, err)
mockResolver.IP["example.com"] = []net.IPAddr{*ipaddr}
resolver, err := madns.NewResolver(madns.WithDomainResolver("example.com", &mockResolver))
require.NoError(t, err)

swarms := makeSwarms(t, 2, swarmt.WithSwarmOpts([]swarm.Option{swarm.WithMultiaddrResolver(resolver)}))
defer closeSwarms(swarms)
s1 := swarms[0]
s2 := swarms[1]

// Change the multiaddr from /ip4/127.0.0.1/... to /dns4/example.com/... so
// that the resovler has to resolve this
var s2Addrs []ma.Multiaddr
for _, a := range s2.ListenAddresses() {
_, rest := ma.SplitFunc(a, func(c ma.Component) bool {
return c.Protocol().Code == ma.P_TCP || c.Protocol().Code == ma.P_UDP
},
)
if rest != nil {
s2Addrs = append(s2Addrs, ma.StringCast("/dns4/example.com").Encapsulate(rest))
}
}

s1.Peerstore().AddAddrs(s2.LocalPeer(), s2Addrs, peerstore.PermanentAddrTTL)

c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
require.NoError(t, err)

s, err := c.NewStream(context.Background())
require.NoError(t, err)
s.Close()
}

func TestDialWithNoListeners(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -90,7 +129,7 @@ func TestSimultDials(t *testing.T) {
{
var wg sync.WaitGroup
errs := make(chan error, 20) // 2 connect calls in each of the 10 for-loop iterations
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
connect := func(s *swarm.Swarm, dst peer.ID, addr ma.Multiaddr) {
// copy for other peer
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.LocalPeer(), dst, addr)
s.Peerstore().AddAddr(dst, addr, peerstore.TempAddrTTL)
Expand Down Expand Up @@ -176,11 +215,11 @@ func TestDialWait(t *testing.T) {
}
duration := time.Since(before)

if duration < dialTimeout*DialAttempts {
t.Error("< dialTimeout * DialAttempts not being respected", duration, dialTimeout*DialAttempts)
if duration < dialTimeout*swarm.DialAttempts {
t.Error("< dialTimeout * DialAttempts not being respected", duration, dialTimeout*swarm.DialAttempts)
}
if duration > 2*dialTimeout*DialAttempts {
t.Error("> 2*dialTimeout * DialAttempts not being respected", duration, 2*dialTimeout*DialAttempts)
if duration > 2*dialTimeout*swarm.DialAttempts {
t.Error("> 2*dialTimeout * DialAttempts not being respected", duration, 2*dialTimeout*swarm.DialAttempts)
}

if !s1.Backoff().Backoff(s2p, s2addr) {
Expand Down Expand Up @@ -418,11 +457,11 @@ func TestDialBackoffClears(t *testing.T) {
require.Error(t, err, "dialing to broken addr worked...")
duration := time.Since(before)

if duration < dialTimeout*DialAttempts {
t.Error("< dialTimeout * DialAttempts not being respected", duration, dialTimeout*DialAttempts)
if duration < dialTimeout*swarm.DialAttempts {
t.Error("< dialTimeout * DialAttempts not being respected", duration, dialTimeout*swarm.DialAttempts)
}
if duration > 2*dialTimeout*DialAttempts {
t.Error("> 2*dialTimeout * DialAttempts not being respected", duration, 2*dialTimeout*DialAttempts)
if duration > 2*dialTimeout*swarm.DialAttempts {
t.Error("> 2*dialTimeout * DialAttempts not being respected", duration, 2*dialTimeout*swarm.DialAttempts)
}
require.True(t, s1.Backoff().Backoff(s2.LocalPeer(), s2bad), "s2 should now be on backoff")

Expand Down Expand Up @@ -462,7 +501,7 @@ func TestDialPeerFailed(t *testing.T) {
// * [/ip4/127.0.0.1/tcp/34881] failed to negotiate security protocol: context deadline exceeded
// ...

dialErr, ok := err.(*DialError)
dialErr, ok := err.(*swarm.DialError)
if !ok {
t.Fatalf("expected *DialError, got %T", err)
}
Expand Down Expand Up @@ -617,5 +656,5 @@ func TestDialSelf(t *testing.T) {
s1 := swarms[0]

_, err := s1.DialPeer(context.Background(), s1.LocalPeer())
require.ErrorIs(t, err, ErrDialToSelf, "expected error from self dial")
require.ErrorIs(t, err, swarm.ErrDialToSelf, "expected error from self dial")
}
Loading