|
1 |
| -/* eslint max-nested-callbacks: ["error", 8] */ |
2 | 1 | /* eslint-env mocha */
|
3 | 2 | 'use strict'
|
4 | 3 |
|
5 | 4 | const hat = require('hat')
|
| 5 | +const pmap = require('p-map') |
6 | 6 | const { expect } = require('interface-ipfs-core/src/utils/mocha')
|
7 |
| -const _ = require('lodash') |
8 |
| -const series = require('async/series') |
9 |
| -const waterfall = require('async/waterfall') |
10 |
| -const parallel = require('async/parallel') |
11 | 7 | const Block = require('ipfs-block')
|
12 |
| -const multiaddr = require('multiaddr') |
13 |
| -const { isNode } = require('ipfs-utils/src/env') |
14 | 8 | const multihashing = require('multihashing-async')
|
15 | 9 | const CID = require('cids')
|
16 |
| -const path = require('path') |
17 |
| -const IPFSFactory = require('ipfsd-ctl') |
18 |
| -const callbackify = require('callbackify') |
19 |
| -const IPFSHTTPClient = require('ipfs-http-client') |
| 10 | +const factory = require('../utils/factory') |
20 | 11 |
|
21 |
| -const IPFS = require('../../src/core') |
22 |
| - |
23 |
| -function makeBlock (callback) { |
| 12 | +const makeBlock = async () => { |
24 | 13 | const d = Buffer.from(`IPFS is awesome ${hat()}`)
|
| 14 | + const h = await multihashing(d, 'sha2-256') |
25 | 15 |
|
26 |
| - callbackify(multihashing)(d, 'sha2-256', null, (err, multihash) => { |
27 |
| - if (err) { |
28 |
| - return callback(err) |
29 |
| - } |
30 |
| - callback(null, new Block(d, new CID(multihash))) |
31 |
| - }) |
32 |
| -} |
33 |
| - |
34 |
| -function wire (targetNode, dialerNode, callback) { |
35 |
| - targetNode.id((err, identity) => { |
36 |
| - expect(err).to.not.exist() |
37 |
| - const addr = identity.addresses |
38 |
| - .map((addr) => multiaddr(addr.toString().split('ipfs')[0])) |
39 |
| - .filter((addr) => _.includes(addr.protoNames(), 'ws'))[0] |
40 |
| - |
41 |
| - if (!addr) { |
42 |
| - // Note: the browser doesn't have a websockets listening addr |
43 |
| - return callback() |
44 |
| - } |
45 |
| - |
46 |
| - const targetAddr = addr |
47 |
| - .encapsulate(multiaddr(`/ipfs/${identity.id}`)).toString() |
48 |
| - .replace('0.0.0.0', '127.0.0.1') |
49 |
| - |
50 |
| - dialerNode.swarm.connect(targetAddr, callback) |
51 |
| - }) |
52 |
| -} |
53 |
| - |
54 |
| -function connectNodes (remoteNode, inProcNode, callback) { |
55 |
| - series([ |
56 |
| - (cb) => wire(remoteNode, inProcNode, cb), |
57 |
| - // need timeout so we wait for identify to happen. |
58 |
| - // This call is just to ensure identify happened |
59 |
| - (cb) => setTimeout(() => wire(inProcNode, remoteNode, cb), 500) |
60 |
| - ], callback) |
61 |
| -} |
62 |
| - |
63 |
| -let nodes = [] |
64 |
| - |
65 |
| -function addNode (fDaemon, inProcNode, callback) { |
66 |
| - callbackify.variadic(fDaemon.spawn.bind(fDaemon))({ |
67 |
| - exec: isNode ? path.resolve(`${__dirname}/../../src/cli/bin.js`) : './src/cli/bin.js', |
68 |
| - initOptions: { bits: 512 }, |
69 |
| - config: { |
70 |
| - Addresses: { |
71 |
| - Swarm: ['/ip4/127.0.0.1/tcp/0/ws'] |
72 |
| - }, |
73 |
| - Discovery: { |
74 |
| - MDNS: { |
75 |
| - Enabled: false |
76 |
| - } |
77 |
| - }, |
78 |
| - Bootstrap: [] |
79 |
| - }, |
80 |
| - preload: { enabled: false } |
81 |
| - }, (err, ipfsd) => { |
82 |
| - expect(err).to.not.exist() |
83 |
| - nodes.push(ipfsd) |
84 |
| - connectNodes(ipfsd.api, inProcNode, (err) => { |
85 |
| - callback(err, ipfsd.api) |
86 |
| - }) |
87 |
| - }) |
| 16 | + return new Block(d, new CID(h)) |
88 | 17 | }
|
89 | 18 |
|
90 | 19 | describe('bitswap', function () {
|
91 |
| - this.timeout(80 * 1000) |
92 |
| - |
93 |
| - let inProcNode // Node spawned inside this process |
94 |
| - let fDaemon |
95 |
| - let fInProc |
96 |
| - |
97 |
| - before(function () { |
98 |
| - fDaemon = IPFSFactory.create({ |
99 |
| - type: 'js', |
100 |
| - IpfsClient: require('ipfs-http-client') |
101 |
| - }) |
102 |
| - fInProc = IPFSFactory.create({ |
103 |
| - type: 'proc', |
104 |
| - IpfsClient: require('ipfs-http-client') |
105 |
| - }) |
106 |
| - }) |
107 |
| - |
108 |
| - beforeEach(async function () { |
109 |
| - this.timeout(60 * 1000) |
110 |
| - |
111 |
| - let config = { |
112 |
| - Addresses: { |
113 |
| - Swarm: [] |
114 |
| - }, |
115 |
| - Discovery: { |
116 |
| - MDNS: { |
117 |
| - Enabled: false |
118 |
| - } |
119 |
| - }, |
120 |
| - Bootstrap: [] |
121 |
| - } |
122 |
| - |
123 |
| - if (isNode) { |
124 |
| - config = Object.assign({}, config, { |
125 |
| - Addresses: { |
126 |
| - Swarm: ['/ip4/127.0.0.1/tcp/0'] |
127 |
| - } |
128 |
| - }) |
129 |
| - } |
130 |
| - |
131 |
| - const ipfsd = await fInProc.spawn({ |
132 |
| - exec: IPFS, |
133 |
| - IPFSClient: IPFSHTTPClient, |
134 |
| - config: config, |
135 |
| - initOptions: { bits: 512 }, |
136 |
| - start: true, |
137 |
| - init: true |
138 |
| - }) |
139 |
| - nodes.push(ipfsd) |
140 |
| - inProcNode = ipfsd.api |
141 |
| - }) |
142 |
| - |
143 |
| - afterEach(async function () { |
144 |
| - this.timeout(80 * 1000) |
145 |
| - await Promise.all( |
146 |
| - nodes.map((node) => node.stop()) |
147 |
| - ) |
148 |
| - nodes = [] |
149 |
| - }) |
| 20 | + this.timeout(20 * 1000) |
| 21 | + const df = factory() |
150 | 22 |
|
151 | 23 | describe('transfer a block between', () => {
|
152 |
| - it('2 peers', function (done) { |
153 |
| - this.timeout(160 * 1000) |
154 |
| - |
155 |
| - let remoteNode |
156 |
| - let block |
157 |
| - waterfall([ |
158 |
| - (cb) => parallel([ |
159 |
| - (cb) => makeBlock(cb), |
160 |
| - (cb) => addNode(fDaemon, inProcNode, cb) |
161 |
| - ], cb), |
162 |
| - (res, cb) => { |
163 |
| - block = res[0] |
164 |
| - remoteNode = res[1] |
165 |
| - cb() |
166 |
| - }, |
167 |
| - (cb) => remoteNode.block.put(block, cb), |
168 |
| - (key, cb) => inProcNode.block.get(block.cid, cb), |
169 |
| - (b, cb) => { |
170 |
| - expect(b.data).to.eql(block.data) |
171 |
| - cb() |
172 |
| - } |
173 |
| - ], done) |
174 |
| - }) |
| 24 | + it('2 peers', async function () { |
| 25 | + const remote = (await df.spawn({ type: 'js' })).api |
| 26 | + const proc = (await df.spawn({ type: 'proc' })).api |
| 27 | + proc.swarm.connect(remote.peerId.addresses[0]) |
| 28 | + const block = await makeBlock() |
175 | 29 |
|
176 |
| - it('3 peers', function (done) { |
177 |
| - this.timeout(160 * 1000) |
| 30 | + await proc.block.put(block) |
| 31 | + const b = await remote.block.get(block.cid) |
178 | 32 |
|
179 |
| - let blocks |
180 |
| - const remoteNodes = [] |
181 |
| - |
182 |
| - series([ |
183 |
| - (cb) => parallel(_.range(6).map((i) => makeBlock), (err, _blocks) => { |
184 |
| - expect(err).to.not.exist() |
185 |
| - blocks = _blocks |
186 |
| - cb() |
187 |
| - }), |
188 |
| - (cb) => addNode(fDaemon, inProcNode, (err, _ipfs) => { |
189 |
| - remoteNodes.push(_ipfs) |
190 |
| - cb(err) |
191 |
| - }), |
192 |
| - (cb) => addNode(fDaemon, inProcNode, (err, _ipfs) => { |
193 |
| - remoteNodes.push(_ipfs) |
194 |
| - cb(err) |
195 |
| - }), |
196 |
| - (cb) => connectNodes(remoteNodes[0], remoteNodes[1], cb), |
197 |
| - (cb) => remoteNodes[0].block.put(blocks[0], cb), |
198 |
| - (cb) => remoteNodes[0].block.put(blocks[1], cb), |
199 |
| - (cb) => remoteNodes[1].block.put(blocks[2], cb), |
200 |
| - (cb) => remoteNodes[1].block.put(blocks[3], cb), |
201 |
| - (cb) => inProcNode.block.put(blocks[4], cb), |
202 |
| - (cb) => inProcNode.block.put(blocks[5], cb), |
203 |
| - // 3. Fetch blocks on all nodes |
204 |
| - (cb) => parallel(_.range(6).map((i) => (cbI) => { |
205 |
| - const check = (n, cid, callback) => { |
206 |
| - n.block.get(cid, (err, b) => { |
207 |
| - expect(err).to.not.exist() |
208 |
| - expect(b).to.eql(blocks[i]) |
209 |
| - callback() |
210 |
| - }) |
211 |
| - } |
| 33 | + expect(b.data).to.eql(block.data) |
| 34 | + df.clean() |
| 35 | + }) |
212 | 36 |
|
213 |
| - series([ |
214 |
| - (cbJ) => check(remoteNodes[0], blocks[i].cid, cbJ), |
215 |
| - (cbJ) => check(remoteNodes[1], blocks[i].cid, cbJ), |
216 |
| - (cbJ) => check(inProcNode, blocks[i].cid, cbJ) |
217 |
| - ], cbI) |
218 |
| - }), cb) |
219 |
| - ], done) |
| 37 | + it('3 peers', async () => { |
| 38 | + const blocks = await Promise.all([...Array(6).keys()].map(() => makeBlock())) |
| 39 | + const remote1 = (await df.spawn({ type: 'js' })).api |
| 40 | + const remote2 = (await df.spawn({ type: 'js' })).api |
| 41 | + const proc = (await df.spawn({ type: 'proc' })).api |
| 42 | + proc.swarm.connect(remote1.peerId.addresses[0]) |
| 43 | + proc.swarm.connect(remote2.peerId.addresses[0]) |
| 44 | + remote1.swarm.connect(remote2.peerId.addresses[0]) |
| 45 | + |
| 46 | + await remote1.block.put(blocks[0]) |
| 47 | + await remote1.block.put(blocks[1]) |
| 48 | + await remote2.block.put(blocks[2]) |
| 49 | + await remote2.block.put(blocks[3]) |
| 50 | + await proc.block.put(blocks[4]) |
| 51 | + await proc.block.put(blocks[5]) |
| 52 | + |
| 53 | + await pmap(blocks, async (block) => { |
| 54 | + expect(await remote1.block.get(block.cid)).to.eql(block) |
| 55 | + expect(await remote2.block.get(block.cid)).to.eql(block) |
| 56 | + expect(await proc.block.get(block.cid)).to.eql(block) |
| 57 | + }, { concurrency: 3 }) |
| 58 | + df.clean() |
220 | 59 | })
|
221 | 60 | })
|
222 | 61 |
|
223 |
| - describe('transfer a file between', function () { |
224 |
| - this.timeout(160 * 1000) |
225 |
| - |
226 |
| - it('2 peers', (done) => { |
| 62 | + describe('transfer a file between', () => { |
| 63 | + it('2 peers', async () => { |
227 | 64 | // TODO make this test more interesting (10Mb file)
|
228 | 65 | // TODO remove randomness from the test
|
229 | 66 | const file = Buffer.from(`I love IPFS <3 ${hat()}`)
|
230 |
| - |
231 |
| - waterfall([ |
232 |
| - // 0. Start node |
233 |
| - (cb) => addNode(fDaemon, inProcNode, cb), |
234 |
| - // 1. Add file to tmp instance |
235 |
| - (remote, cb) => { |
236 |
| - remote.add([{ path: 'awesome.txt', content: file }], cb) |
237 |
| - }, |
238 |
| - // 2. Request file from local instance |
239 |
| - (filesAdded, cb) => inProcNode.cat(filesAdded[0].hash, cb) |
240 |
| - ], (err, data) => { |
241 |
| - expect(err).to.not.exist() |
242 |
| - expect(data).to.eql(file) |
243 |
| - done() |
244 |
| - }) |
| 67 | + const remote = (await df.spawn({ type: 'js' })).api |
| 68 | + const proc = (await df.spawn({ type: 'proc' })).api |
| 69 | + proc.swarm.connect(remote.peerId.addresses[0]) |
| 70 | + |
| 71 | + const files = await remote.add([{ path: 'awesome.txt', content: file }]) |
| 72 | + const data = await proc.cat(files[0].hash) |
| 73 | + expect(data).to.eql(file) |
| 74 | + df.clean() |
245 | 75 | })
|
246 | 76 | })
|
247 | 77 |
|
248 | 78 | describe('unwant', () => {
|
249 |
| - it('should callback with error for invalid CID input', (done) => { |
250 |
| - inProcNode.bitswap.unwant('INVALID CID', (err) => { |
| 79 | + it('should callback with error for invalid CID input', async () => { |
| 80 | + const proc = (await df.spawn({ type: 'proc' })).api |
| 81 | + try { |
| 82 | + await proc.bitswap.unwant('INVALID CID') |
| 83 | + } catch (err) { |
251 | 84 | expect(err).to.exist()
|
252 | 85 | expect(err.code).to.equal('ERR_INVALID_CID')
|
253 |
| - done() |
254 |
| - }) |
| 86 | + } finally { |
| 87 | + df.clean() |
| 88 | + } |
255 | 89 | })
|
256 | 90 | })
|
257 | 91 | })
|
0 commit comments