Skip to content
This repository has been archived by the owner on Feb 12, 2024. It is now read-only.

Commit

Permalink
fix(cli): make swarm addrs more resilient (#2083)
Browse files Browse the repository at this point in the history
* fix(cli): make swarm addrs more resilient

Dont fail if decapsulation of ipfs fails. Go node addresses typically wont contain their peerid

* test: add test for addrs handler

* refactor(cli): have swarm addrs return the output for printing

* chore: fix linting
  • Loading branch information
jacobheun authored and Alan Shaw committed May 22, 2019
1 parent 84978c4 commit 3792b68
Show file tree
Hide file tree
Showing 2 changed files with 112 additions and 47 deletions.
25 changes: 18 additions & 7 deletions src/cli/commands/swarm/addrs.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
'use strict'

const print = require('../../utils').print

module.exports = {
command: 'addrs',

Expand All @@ -16,15 +14,28 @@ module.exports = {
argv.resolve((async () => {
const ipfs = await argv.getIpfs()
const res = await ipfs.swarm.addrs()
res.forEach((peer) => {

const output = res.map((peer) => {
const count = peer.multiaddrs.size
print(`${peer.id.toB58String()} (${count})`)
const peerAddrs = [`${peer.id.toB58String()} (${count})`]

peer.multiaddrs.forEach((addr) => {
const res = addr.decapsulate('ipfs').toString()
print(`\t${res}`)
peer.multiaddrs.toArray().map((addr) => {
let res
try {
res = addr.decapsulate('ipfs').toString()
} catch (_) {
// peer addresses dont need to have /ipfs/ as we know their peerId
// and can encapsulate on dial.
res = addr.toString()
}
peerAddrs.push(`\t${res}`)
})

return peerAddrs.join('\n')
})

// Return the output for printing
return { data: output.join('\n'), argv }
})())
}
}
134 changes: 94 additions & 40 deletions test/cli/swarm.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,15 @@ const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const series = require('async/series')
const sinon = require('sinon')
const ipfsExec = require('../utils/ipfs-exec')
const path = require('path')
const parallel = require('async/parallel')
const addrsCommand = require('../../src/cli/commands/swarm/addrs')

const multiaddr = require('multiaddr')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')

const DaemonFactory = require('ipfsd-ctl')
const df = DaemonFactory.create({ type: 'js' })
Expand All @@ -25,50 +30,54 @@ const config = {
}

describe('swarm', () => {
let bMultiaddr
let ipfsA

let nodes = []
before(function (done) {
// CI takes longer to instantiate the daemon, so we need to increase the
// timeout for the before step
this.timeout(80 * 1000)

series([
(cb) => {
df.spawn({
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
config,
initOptions: { bits: 512 }
}, (err, node) => {
expect(err).to.not.exist()
ipfsA = ipfsExec(node.repoPath)
nodes.push(node)
cb()
})
},
(cb) => {
df.spawn({
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
config,
initOptions: { bits: 512 }
}, (err, node) => {
expect(err).to.not.exist()
node.api.id((err, id) => {
afterEach(() => {
sinon.restore()
})

describe('daemon on (through http-api)', function () {
this.timeout(60 * 1000)

let bMultiaddr
let ipfsA

let nodes = []
before(function (done) {
// CI takes longer to instantiate the daemon, so we need to increase the
// timeout for the before step
this.timeout(80 * 1000)

parallel([
(cb) => {
df.spawn({
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
config,
initOptions: { bits: 512 }
}, (err, node) => {
expect(err).to.not.exist()
bMultiaddr = id.addresses[0]
ipfsA = ipfsExec(node.repoPath)
nodes.push(node)
cb()
})
})
}
], done)
})

after((done) => parallel(nodes.map((node) => (cb) => node.stop(cb)), done))
},
(cb) => {
df.spawn({
exec: path.resolve(`${__dirname}/../../src/cli/bin.js`),
config,
initOptions: { bits: 512 }
}, (err, node) => {
expect(err).to.not.exist()
node.api.id((err, id) => {
expect(err).to.not.exist()
bMultiaddr = id.addresses[0]
nodes.push(node)
cb()
})
})
}
], done)
})

describe('daemon on (through http-api)', function () {
this.timeout(60 * 1000)
after((done) => parallel(nodes.map((node) => (cb) => node.stop(cb)), done))

it('connect', () => {
return ipfsA('swarm', 'connect', bMultiaddr).then((out) => {
Expand Down Expand Up @@ -108,4 +117,49 @@ describe('swarm', () => {
})
})
})

describe('handlers', () => {
let peerInfo
const ipfs = {
swarm: { addrs: () => {} }
}
const argv = {
resolve: () => {},
getIpfs: () => ipfs
}

describe('addrs', () => {
before((done) => {
PeerId.create({ bits: 512 }, (err, peerId) => {
if (err) return done(err)
peerInfo = new PeerInfo(peerId)
done()
})
})

it('should return addresses for all peers', (done) => {
sinon.stub(argv, 'resolve').callsFake(promise => {
promise.then(({ data }) => {
expect(data).to.eql([
`${peerInfo.id.toB58String()} (2)`,
`\t/ip4/127.0.0.1/tcp/4001`,
`\t/ip4/127.0.0.1/tcp/4001/ws`
].join('\n'))
done()
})
})

sinon.stub(peerInfo.multiaddrs, '_multiaddrs').value([
multiaddr('/ip4/127.0.0.1/tcp/4001'),
multiaddr(`/ip4/127.0.0.1/tcp/4001/ws/ipfs/${peerInfo.id.toB58String()}`)
])

sinon.stub(ipfs.swarm, 'addrs').returns(
Promise.resolve([peerInfo])
)

addrsCommand.handler(argv)
})
})
})
})

0 comments on commit 3792b68

Please sign in to comment.