diff --git a/benchmarks_test.go b/benchmarks_test.go index 1671b9bb..501488de 100644 --- a/benchmarks_test.go +++ b/benchmarks_test.go @@ -3,7 +3,9 @@ package bitswap_test import ( "context" "encoding/json" + "fmt" "io/ioutil" + "math" "math/rand" "os" "strconv" @@ -19,7 +21,6 @@ import ( testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" ) @@ -29,89 +30,114 @@ type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) type distFunc func(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) type runStats struct { - Dups uint64 - MsgSent uint64 - MsgRecd uint64 - Time time.Duration - Name string + DupsRcvd uint64 + BlksRcvd uint64 + MsgSent uint64 + MsgRecd uint64 + Time time.Duration + Name string } var benchmarkLog []runStats -func BenchmarkDups2Nodes(b *testing.B) { +type bench struct { + name string + nodeCount int + blockCount int + distFn distFunc + fetchFn fetchFunc +} + +var benches = []bench{ + // Fetch from two seed nodes that both have all 100 blocks + // - request one at a time, in series + bench{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, + // - request all 100 with a single GetBlocks() call + bench{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, + + // Fetch from two seed nodes, one at a time, where: + // - node A has blocks 0 - 74 + // - node B has blocks 25 - 99 + bench{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, + + // Fetch from two seed nodes, where: + // - node A has even blocks + // - node B has odd blocks + // - both nodes have every third block + + // - request one at a time, in series + bench{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, + // - request 10 at a time, in series + bench{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, + // - request all 100 in parallel as individual GetBlock() calls + bench{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, + // - request all 100 with a single GetBlocks() call + bench{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, + + // Fetch from nine seed nodes, all nodes have all blocks + // - request one at a time, in series + bench{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, + // - request 10 at a time, in series + bench{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, + // - request all 100 with a single GetBlocks() call + bench{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, + // - request all 100 in parallel as individual GetBlock() calls + bench{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + bench{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, + // - follow a typical IPFS request pattern for 1000 blocks + bench{"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, + + // Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups) + // - request one at a time, in series + bench{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, + // - request all 100 with a single GetBlocks() call + bench{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + bench{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, + + // Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call + bench{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, +} + +func BenchmarkFixedDelay(b *testing.B) { benchmarkLog = nil fixedDelay := delay.Fixed(10 * time.Millisecond) - b.Run("AllToAll-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, oneAtATime) - }) - b.Run("AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, batchFetchAll) - }) + bstoreLatency := time.Duration(0) - b.Run("Overlap1-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap1, oneAtATime) - }) + for _, bch := range benches { + b.Run(bch.name, func(b *testing.B) { + subtestDistributeAndFetch(b, bch.nodeCount, bch.blockCount, fixedDelay, bstoreLatency, bch.distFn, bch.fetchFn) + }) + } - b.Run("Overlap3-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, oneAtATime) - }) - b.Run("Overlap3-BatchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchBy10) - }) - b.Run("Overlap3-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, fetchAllConcurrent) - }) - b.Run("Overlap3-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchAll) - }) - b.Run("Overlap3-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, unixfsFileFetch) - }) - b.Run("10Nodes-AllToAll-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, oneAtATime) - }) - b.Run("10Nodes-AllToAll-BatchFetchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, batchFetchBy10) - }) - b.Run("10Nodes-AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, batchFetchAll) - }) - b.Run("10Nodes-AllToAll-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, fetchAllConcurrent) - }) - b.Run("10Nodes-AllToAll-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, unixfsFileFetch) - }) - b.Run("10Nodes-OnePeerPerBlock-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, oneAtATime) - }) - b.Run("10Nodes-OnePeerPerBlock-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, batchFetchAll) - }) - b.Run("10Nodes-OnePeerPerBlock-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, unixfsFileFetch) - }) - b.Run("200Nodes-AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 200, 20, fixedDelay, allToAll, batchFetchAll) - }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) } +const datacenterSpeed = 5 * time.Millisecond const fastSpeed = 60 * time.Millisecond const mediumSpeed = 200 * time.Millisecond const slowSpeed = 800 * time.Millisecond const superSlowSpeed = 4000 * time.Millisecond +const datacenterDistribution = 3 * time.Millisecond const distribution = 20 * time.Millisecond +const datacenterBandwidth = 125000000.0 +const datacenterBandwidthDeviation = 3000000.0 const fastBandwidth = 1250000.0 const fastBandwidthDeviation = 300000.0 const mediumBandwidth = 500000.0 const mediumBandwidthDeviation = 80000.0 const slowBandwidth = 100000.0 const slowBandwidthDeviation = 16500.0 +const rootBlockSize = 800 const stdBlockSize = 8000 +const largeBlockSize = int64(256 * 1024) -func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { +func BenchmarkRealWorld(b *testing.B) { benchmarkLog = nil benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) var randomGen *rand.Rand = nil @@ -134,67 +160,198 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { 0.3, 0.3, distribution, randomGen) slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, randomGen) + bstoreLatency := time.Duration(0) b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") _ = ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenter(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Nodes-Overlap3-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) + }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Leech3Seed-AllToAll-UnixfsFetch", func(b *testing.B) { + d := datacenterNetworkDelay + rateLimitGenerator := datacenterBandwidthGenerator + blockSize := largeBlockSize + df := allToAll + ff := unixfsFileFetchLarge + numnodes := 6 + numblks := 1000 + + for i := 0; i < b.N; i++ { + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() + + instances := ig.Instances(numnodes) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + runDistributionMulti(b, instances, 3, blocks, bstoreLatency, df, ff) + } + }) + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) } -func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { +func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { for i := 0; i < b.N; i++ { - start := time.Now() net := tn.VirtualNetwork(mockrouting.NewServer(), d) ig := testinstance.NewTestInstanceGenerator(net) - defer ig.Close() - - bg := blocksutil.NewBlockGenerator() instances := ig.Instances(numnodes) - blocks := bg.Blocks(numblks) - runDistribution(b, instances, blocks, df, ff, start) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) + ig.Close() + // panic("done") } } -func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, df distFunc, ff fetchFunc) { +func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { for i := 0; i < b.N; i++ { - - start := time.Now() net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) ig := testinstance.NewTestInstanceGenerator(net) defer ig.Close() instances := ig.Instances(numnodes) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) - - runDistribution(b, instances, blocks, df, ff, start) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) } } -func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, df distFunc, ff fetchFunc, start time.Time) { - +func runDistributionMulti(b *testing.B, instances []testinstance.Instance, numFetchers int, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { numnodes := len(instances) + fetchers := instances[numnodes-numFetchers:] + + // Distribute blocks to seed nodes + seeds := instances[:numnodes-numFetchers] + df(b, seeds, blocks) + + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + start := time.Now() + var wg sync.WaitGroup + for _, fetcher := range fetchers { + wg.Add(1) + + go func(ftchr testinstance.Instance) { + defer wg.Done() + + ff(b, ftchr.Exchange, ks) + }(fetcher) + } + wg.Wait() + + // Collect statistics + fetcher := fetchers[0] + st, err := fetcher.Exchange.Stat() + if err != nil { + b.Fatal(err) + } + + for _, fetcher := range fetchers { + nst := fetcher.Adapter.Stats() + stats := runStats{ + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + } + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) +} +func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + numnodes := len(instances) fetcher := instances[numnodes-1] - df(b, instances[:numnodes-1], blocks) + // Distribute blocks to seed nodes + seeds := instances[:numnodes-1] + df(b, seeds, blocks) + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) var ks []cid.Cid for _, blk := range blocks { ks = append(ks, blk.Cid()) } + start := time.Now() ff(b, fetcher.Exchange, ks) + // Collect statistics st, err := fetcher.Exchange.Stat() if err != nil { b.Fatal(err) @@ -202,14 +359,15 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b nst := fetcher.Adapter.Stats() stats := runStats{ - Time: time.Since(start), - MsgRecd: nst.MessagesRecvd, - MsgSent: nst.MessagesSent, - Dups: st.DupBlksReceived, - Name: b.Name(), + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), } benchmarkLog = append(benchmarkLog, stats) - b.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) } func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { @@ -282,7 +440,7 @@ func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { b.Fatal(err) } } - b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) + // b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) } // fetch data in batches, 10 at a time @@ -348,3 +506,111 @@ func unixfsFileFetch(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { for range out { } } + +func unixfsFileFetchLarge(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + b.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:100]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + rest := ks[100:] + for len(rest) > 0 { + var batch [][]cid.Cid + for i := 0; i < 5 && len(rest) > 0; i++ { + cnt := 10 + if len(rest) < 10 { + cnt = len(rest) + } + group := rest[:cnt] + rest = rest[cnt:] + batch = append(batch, group) + } + + var anyErr error + var wg sync.WaitGroup + for _, group := range batch { + wg.Add(1) + go func(grp []cid.Cid) { + defer wg.Done() + + out, err = ses.GetBlocks(context.Background(), grp) + if err != nil { + anyErr = err + } + for range out { + } + }(group) + } + wg.Wait() + + // Note: b.Fatal() cannot be called from within a go-routine + if anyErr != nil { + b.Fatal(anyErr) + } + } +} + +func printResults(rs []runStats) { + nameOrder := make([]string, 0) + names := make(map[string]struct{}) + for i := 0; i < len(rs); i++ { + if _, ok := names[rs[i].Name]; !ok { + nameOrder = append(nameOrder, rs[i].Name) + names[rs[i].Name] = struct{}{} + } + } + + for i := 0; i < len(names); i++ { + name := nameOrder[i] + count := 0 + sent := 0.0 + rcvd := 0.0 + dups := 0.0 + blks := 0.0 + elpd := 0.0 + for i := 0; i < len(rs); i++ { + if rs[i].Name == name { + count++ + sent += float64(rs[i].MsgSent) + rcvd += float64(rs[i].MsgRecd) + dups += float64(rs[i].DupsRcvd) + blks += float64(rs[i].BlksRcvd) + elpd += float64(rs[i].Time) + } + } + sent /= float64(count) + rcvd /= float64(count) + dups /= float64(count) + blks /= float64(count) + + label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd/1000000000.0) + fmt.Printf("%-75s %s: sent %d, recv %d, dups %d / %d\n", + label, + fmtDuration(time.Duration(int64(math.Round(elpd/float64(count))))), + int64(math.Round(sent)), int64(math.Round(rcvd)), + int64(math.Round(dups)), int64(math.Round(blks))) + } +} + +func fmtDuration(d time.Duration) string { + d = d.Round(time.Millisecond) + s := d / time.Second + d -= s * time.Second + ms := d / time.Millisecond + return fmt.Sprintf("%d.%03ds", s, ms) +} diff --git a/bitswap.go b/bitswap.go index 93759802..d607274d 100644 --- a/bitswap.go +++ b/bitswap.go @@ -5,12 +5,13 @@ package bitswap import ( "context" "errors" + "sync" "time" - bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" delay "github.com/ipfs/go-ipfs-delay" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" decision "github.com/ipfs/go-bitswap/decision" bsgetter "github.com/ipfs/go-bitswap/getter" bsmsg "github.com/ipfs/go-bitswap/message" @@ -20,6 +21,7 @@ import ( bspm "github.com/ipfs/go-bitswap/peermanager" bspqm "github.com/ipfs/go-bitswap/providerquerymanager" bssession "github.com/ipfs/go-bitswap/session" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" bssm "github.com/ipfs/go-bitswap/sessionmanager" bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" bswm "github.com/ipfs/go-bitswap/wantmanager" @@ -113,24 +115,30 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return bsmq.New(ctx, p, network) } - wm := bswm.New(ctx, bspm.New(ctx, peerQueueFactory)) + sim := bssim.New() + bpm := bsbpm.New() + pm := bspm.New(ctx, peerQueueFactory, network.Self()) + wm := bswm.New(ctx, pm, sim, bpm) pqm := bspqm.New(ctx, network) - sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, + sessionFactory := func(ctx context.Context, id uint64, spm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, - rebroadcastDelay delay.D) bssm.Session { - return bssession.New(ctx, id, wm, pm, srs, notif, provSearchDelay, rebroadcastDelay) + rebroadcastDelay delay.D, + self peer.ID) bssm.Session { + return bssession.New(ctx, id, wm, spm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } - sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { + sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { return bsspm.New(ctx, id, network.ConnectionManager(), pqm) } - sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter { - return bssrs.New(ctx) - } notif := notifications.New() + sm := bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) + wm.SetSessionManager(sm) + engine := decision.NewEngine(ctx, bstore, network.ConnectionManager(), network.Self()) - engine := decision.NewEngine(ctx, bstore, network.ConnectionManager()) // TODO close the engine with Close() method bs := &Bitswap{ blockstore: bstore, engine: engine, @@ -139,8 +147,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, + pm: pm, pqm: pqm, - sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory, notif), + sm: sm, + sim: sim, notif: notif, counters: new(counters), dupMetric: dupHist, @@ -156,7 +166,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, option(bs) } - bs.wm.Startup() bs.pqm.Startup() network.SetDelegate(bs) @@ -181,6 +190,8 @@ type Bitswap struct { // the wantlist tracks global wants for bitswap wm *bswm.WantManager + pm *bspm.PeerManager + // the provider query manager manages requests to find providers pqm *bspqm.ProviderQueryManager @@ -215,9 +226,13 @@ type Bitswap struct { allMetric metrics.Histogram sentHistogram metrics.Histogram - // the sessionmanager manages tracking sessions + // the SessionManager routes requests to interested sessions sm *bssm.SessionManager + // the SessionInterestManager keeps track of which sessions are interested + // in which CIDs + sim *bssim.SessionInterestManager + // whether or not to make provide announcements provideEnabled bool @@ -275,14 +290,14 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}) + return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}, nil, nil) } // TODO: Some of this stuff really only needs to be done when adding a block // from the user, not when receiving it from the network. // In case you run `git blame` on this comment, I'll save you some time: ask // @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block) error { +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -293,22 +308,20 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // If blocks came from the network if from != "" { - // Split blocks into wanted blocks vs duplicates - wanted = make([]blocks.Block, 0, len(blks)) - for _, b := range blks { - if bs.sm.IsWanted(b.Cid()) { - wanted = append(wanted, b) - } else { - log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) - } + var notWanted []blocks.Block + wanted, notWanted = bs.sim.SplitWantedUnwanted(blks) + for _, b := range notWanted { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) } } // Put wanted blocks into blockstore - err := bs.blockstore.PutMany(wanted) - if err != nil { - log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) - return err + if len(wanted) > 0 { + err := bs.blockstore.PutMany(wanted) + if err != nil { + log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) + return err + } } // NOTE: There exists the possiblity for a race condition here. If a user @@ -322,23 +335,15 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b allKs = append(allKs, b.Cid()) } - wantedKs := allKs - if len(blks) != len(wanted) { - wantedKs = make([]cid.Cid, 0, len(wanted)) - for _, b := range wanted { - wantedKs = append(wantedKs, b.Cid()) - } - } - // Send all block keys (including duplicates) to any sessions that want them. // (The duplicates are needed by sessions for accounting purposes) - bs.sm.ReceiveFrom(from, allKs) + bs.wm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) - // Send wanted block keys to decision engine - bs.engine.AddBlocks(wantedKs) + // Send wanted blocks to decision engine + bs.engine.ReceiveFrom(from, wanted, haves) // Publish the block to any Bitswap clients that had requested blocks. - // (the sessions use this pubsub mechanism to inform clients of received + // (the sessions use this pubsub mechanism to inform clients of incoming // blocks) for _, b := range wanted { bs.notif.Publish(b) @@ -346,9 +351,9 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // If the reprovider is enabled, send wanted blocks to reprovider if bs.provideEnabled { - for _, k := range wantedKs { + for _, blk := range wanted { select { - case bs.newBlocks <- k: + case bs.newBlocks <- blk.Cid(): // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() @@ -380,20 +385,22 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg iblocks := incoming.Blocks() - if len(iblocks) == 0 { - return - } - - bs.updateReceiveCounters(iblocks) - for _, b := range iblocks { - log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + if len(iblocks) > 0 { + bs.updateReceiveCounters(iblocks) + for _, b := range iblocks { + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + } } - // Process blocks - err := bs.receiveBlocksFrom(ctx, p, iblocks) - if err != nil { - log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) - return + haves := incoming.Haves() + dontHaves := incoming.DontHaves() + if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { + // Process blocks + err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) + if err != nil { + log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) + return + } } } @@ -479,12 +486,12 @@ func (bs *Bitswap) Close() error { // GetWantlist returns the current local wantlist. func (bs *Bitswap) GetWantlist() []cid.Cid { - entries := bs.wm.CurrentWants() - out := make([]cid.Cid, 0, len(entries)) - for _, e := range entries { - out = append(out, e.Cid) - } - return out + return bs.pm.CurrentWants() +} + +// GetWanthaves returns the current list of want-haves. +func (bs *Bitswap) GetWantHaves() []cid.Cid { + return bs.pm.CurrentWantHaves() } // IsOnline is needed to match go-ipfs-exchange-interface diff --git a/bitswap_test.go b/bitswap_test.go index 9b757182..965c94ed 100644 --- a/bitswap_test.go +++ b/bitswap_test.go @@ -571,8 +571,9 @@ func TestWantlistCleanup(t *testing.T) { defer ig.Close() bg := blocksutil.NewBlockGenerator() - instances := ig.Instances(1)[0] - bswap := instances.Exchange + instances := ig.Instances(2) + instance := instances[0] + bswap := instance.Exchange blocks := bg.Blocks(20) var keys []cid.Cid @@ -580,6 +581,7 @@ func TestWantlistCleanup(t *testing.T) { keys = append(keys, b.Cid()) } + // Once context times out, key should be removed from wantlist ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() _, err := bswap.GetBlock(ctx, keys[0]) @@ -589,10 +591,11 @@ func TestWantlistCleanup(t *testing.T) { time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) > 0 { + if len(bswap.GetWantHaves()) > 0 { t.Fatal("should not have anyting in wantlist") } + // Once context times out, keys should be removed from wantlist ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() _, err = bswap.GetBlocks(ctx, keys[:10]) @@ -603,29 +606,37 @@ func TestWantlistCleanup(t *testing.T) { <-ctx.Done() time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) > 0 { + if len(bswap.GetWantHaves()) > 0 { t.Fatal("should not have anyting in wantlist") } + // Send want for single block, with no timeout _, err = bswap.GetBlocks(context.Background(), keys[:1]) if err != nil { t.Fatal(err) } + // Send want for 10 blocks ctx, cancel = context.WithCancel(context.Background()) _, err = bswap.GetBlocks(ctx, keys[10:]) if err != nil { t.Fatal(err) } + // Even after 50 milli-seconds we haven't explicitly cancelled anything + // and no timeouts have expired, so we should have 11 want-haves time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) != 5 { - t.Fatal("should have 5 keys in wantlist") + if len(bswap.GetWantHaves()) != 11 { + t.Fatal("should have 11 keys in wantlist") } + // Cancel the timeout for the request for 10 blocks. This should remove + // the want-haves cancel() + + // Once the cancel is processed, we are left with the request for 1 block time.Sleep(time.Millisecond * 50) - if !(len(bswap.GetWantlist()) == 1 && bswap.GetWantlist()[0] == keys[0]) { + if !(len(bswap.GetWantHaves()) == 1 && bswap.GetWantHaves()[0] == keys[0]) { t.Fatal("should only have keys[0] in wantlist") } } diff --git a/bitswap_with_sessions_test.go b/bitswap_with_sessions_test.go index db7255c8..77ad03b2 100644 --- a/bitswap_with_sessions_test.go +++ b/bitswap_with_sessions_test.go @@ -30,12 +30,15 @@ func TestBasicSessions(t *testing.T) { a := inst[0] b := inst[1] + // Add a block to Peer B if err := b.Blockstore().Put(block); err != nil { t.Fatal(err) } + // Create a session on Peer A sesa := a.Exchange.NewSession(ctx) + // Get the block blkout, err := sesa.GetBlock(ctx, block.Cid()) if err != nil { t.Fatal(err) @@ -74,6 +77,7 @@ func TestSessionBetweenPeers(t *testing.T) { inst := ig.Instances(10) + // Add 101 blocks to Peer A blks := bgen.Blocks(101) if err := inst[0].Blockstore().PutMany(blks); err != nil { t.Fatal(err) @@ -84,6 +88,7 @@ func TestSessionBetweenPeers(t *testing.T) { cids = append(cids, blk.Cid()) } + // Create a session on Peer B ses := inst[1].Exchange.NewSession(ctx) if _, err := ses.GetBlock(ctx, cids[0]); err != nil { t.Fatal(err) @@ -91,6 +96,7 @@ func TestSessionBetweenPeers(t *testing.T) { blks = blks[1:] cids = cids[1:] + // Fetch blocks with the session, 10 at a time for i := 0; i < 10; i++ { ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) if err != nil { @@ -127,6 +133,7 @@ func TestSessionSplitFetch(t *testing.T) { inst := ig.Instances(11) + // Add 10 distinct blocks to each of 10 peers blks := bgen.Blocks(100) for i := 0; i < 10; i++ { if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil { @@ -139,6 +146,7 @@ func TestSessionSplitFetch(t *testing.T) { cids = append(cids, blk.Cid()) } + // Create a session on the remaining peer and fetch all the blocks 10 at a time ses := inst[10].Exchange.NewSession(ctx).(*bssession.Session) ses.SetBaseTickDelay(time.Millisecond * 10) @@ -169,6 +177,7 @@ func TestFetchNotConnected(t *testing.T) { other := ig.Next() + // Provide 10 blocks on Peer A blks := bgen.Blocks(10) for _, block := range blks { if err := other.Exchange.HasBlock(block); err != nil { @@ -181,6 +190,9 @@ func TestFetchNotConnected(t *testing.T) { cids = append(cids, blk.Cid()) } + // Request blocks with Peer B + // Note: Peer A and Peer B are not initially connected, so this tests + // that Peer B will search for and find Peer A thisNode := ig.Next() ses := thisNode.Exchange.NewSession(ctx).(*bssession.Session) ses.SetBaseTickDelay(time.Millisecond * 10) @@ -198,6 +210,81 @@ func TestFetchNotConnected(t *testing.T) { t.Fatal(err) } } + +func TestFetchAfterDisconnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, bitswap.ProviderSearchDelay(10*time.Millisecond)) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(2) + peerA := inst[0] + peerB := inst[1] + + // Provide 5 blocks on Peer A + blks := bgen.Blocks(10) + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + firstBlks := blks[:5] + for _, block := range firstBlks { + if err := peerA.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + // Request all blocks with Peer B + ses := peerB.Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + // Should get first 5 blocks + var got []blocks.Block + for i := 0; i < 5; i++ { + b := <-ch + got = append(got, b) + } + + if err := assertBlockLists(got, blks[:5]); err != nil { + t.Fatal(err) + } + + // Break connection + err = peerA.Adapter.DisconnectFrom(ctx, peerB.Peer) + if err != nil { + t.Fatal(err) + } + + // Provide remaining blocks + lastBlks := blks[5:] + for _, block := range lastBlks { + if err := peerA.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + // Peer B should call FindProviders() and find Peer A + + // Should get last 5 blocks + for i := 0; i < 5; i++ { + b := <-ch + got = append(got, b) + } + + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} + func TestInterestCacheOverflow(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/blockpresencemanager/blockpresencemanager.go b/blockpresencemanager/blockpresencemanager.go new file mode 100644 index 00000000..87821f2f --- /dev/null +++ b/blockpresencemanager/blockpresencemanager.go @@ -0,0 +1,111 @@ +package blockpresencemanager + +import ( + "sync" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// BlockPresenceManager keeps track of which peers have indicated that they +// have or explicitly don't have a block +type BlockPresenceManager struct { + sync.RWMutex + presence map[cid.Cid]map[peer.ID]bool +} + +func New() *BlockPresenceManager { + return &BlockPresenceManager{ + presence: make(map[cid.Cid]map[peer.ID]bool), + } +} + +// ReceiveFrom is called when a peer sends us information about which blocks +// it has and does not have +func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range haves { + bpm.updateBlockPresence(p, c, true) + } + for _, c := range dontHaves { + bpm.updateBlockPresence(p, c, false) + } +} + +func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { + _, ok := bpm.presence[c] + if !ok { + bpm.presence[c] = make(map[peer.ID]bool) + } + + // Make sure not to change HAVE to DONT_HAVE + has, pok := bpm.presence[c][p] + if pok && has { + return + } + bpm.presence[c][p] = present +} + +// PeerHasBlock indicates whether the given peer has sent a HAVE for the given +// cid +func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + return bpm.presence[c][p] +} + +// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE +// for the given cid +func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + have, known := bpm.presence[c][p] + return known && !have +} + +// Filters the keys such that all the given peers have received a DONT_HAVE +// for a key. +// This allows us to know if we've exhausted all possibilities of finding +// the key with the peers we know about. +func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { + bpm.RLock() + defer bpm.RUnlock() + + var res []cid.Cid + for _, c := range ks { + if bpm.allDontHave(peers, c) { + res = append(res, c) + } + } + return res +} + +func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { + // Check if we know anything about the cid's block presence + ps, cok := bpm.presence[c] + if !cok { + return false + } + + // Check if we explicitly know that all the given peers do not have the cid + for _, p := range peers { + if has, pok := ps[p]; !pok || has { + return false + } + } + return true +} + +// RemoveKeys cleans up the given keys from the block presence map +func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range ks { + delete(bpm.presence, c) + } +} diff --git a/blockpresencemanager/blockpresencemanager_test.go b/blockpresencemanager/blockpresencemanager_test.go new file mode 100644 index 00000000..6154f4df --- /dev/null +++ b/blockpresencemanager/blockpresencemanager_test.go @@ -0,0 +1,239 @@ +package blockpresencemanager + +import ( + "fmt" + "testing" + + "github.com/ipfs/go-bitswap/testutil" + peer "github.com/libp2p/go-libp2p-core/peer" + + cid "github.com/ipfs/go-cid" +) + +const ( + expHasFalseMsg = "Expected PeerHasBlock to return false" + expHasTrueMsg = "Expected PeerHasBlock to return true" + expDoesNotHaveFalseMsg = "Expected PeerDoesNotHaveBlock to return false" + expDoesNotHaveTrueMsg = "Expected PeerDoesNotHaveBlock to return true" +) + +func TestBlockPresenceManager(t *testing.T) { + bpm := New() + + p := testutil.GeneratePeers(1)[0] + cids := testutil.GenerateCids(2) + c0 := cids[0] + c1 := cids[1] + + // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // HAVE cid0 / DONT_HAVE cid1 + bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) + + // Peer has received HAVE for cid0 + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Peer has received DONT_HAVE for cid1 + if !bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveTrueMsg) + } + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + + // HAVE cid1 / DONT_HAVE cid0 + bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) + + // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + + // HAVE cid1 should over-write earlier DONT_HAVE cid1 + if !bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid0 + bpm.RemoveKeys([]cid.Cid{c0}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid1 + bpm.RemoveKeys([]cid.Cid{c1}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAddRemoveMulti(t *testing.T) { + bpm := New() + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // p0: HAVE cid0, cid1 / DONT_HAVE cid1, cid2 + // p1: HAVE cid1, cid2 / DONT_HAVE cid0 + bpm.ReceiveFrom(p0, []cid.Cid{c0, c1}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1, c2}, []cid.Cid{c0}) + + // Peer 0 should end up with + // - HAVE cid0 + // - HAVE cid1 + // - DONT_HAVE cid2 + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Peer 1 should end up with + // - HAVE cid1 + // - HAVE cid2 + // - DONT_HAVE cid0 + if !bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Remove cid1 and cid2. Should end up with + // Peer 0: HAVE cid0 + // Peer 1: DONT_HAVE cid0 + bpm.RemoveKeys([]cid.Cid{c1, c2}) + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // The other keys should have been cleared, so both HasBlock() and + // DoesNotHaveBlock() should return false + if bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p0, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAllPeersDoNotHaveBlock(t *testing.T) { + bpm := New() + + peers := testutil.GeneratePeers(3) + p0 := peers[0] + p1 := peers[1] + p2 := peers[2] + + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // c0 c1 c2 + // p0 ? N N + // p1 N Y ? + // p2 Y Y N + bpm.ReceiveFrom(p0, []cid.Cid{}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1}, []cid.Cid{c0}) + bpm.ReceiveFrom(p2, []cid.Cid{c0, c1}, []cid.Cid{c2}) + + type testcase struct { + peers []peer.ID + ks []cid.Cid + exp []cid.Cid + } + + testcases := []testcase{ + testcase{[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, + testcase{[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, + testcase{[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, + + testcase{[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, + testcase{[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, + testcase{[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, + + testcase{[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, + testcase{[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, + testcase{[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, + + // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) + testcase{[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, + testcase{[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + // Both p0 and p2 received DONT_HAVE for c2 + testcase{[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, + testcase{[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + } + + for i, tc := range testcases { + if !testutil.MatchKeysIgnoreOrder( + bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), + tc.exp, + ) { + t.Fatal(fmt.Sprintf("test case %d failed: expected matching keys", i)) + } + } +} diff --git a/decision/engine.go b/decision/engine.go index 7a58bb3f..2e183b06 100644 --- a/decision/engine.go +++ b/decision/engine.go @@ -8,8 +8,11 @@ import ( "time" "github.com/google/uuid" + bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" @@ -56,10 +59,10 @@ var log = logging.Logger("engine") const ( // outboxChanBuffer must be 0 to prevent stale messages from being sent outboxChanBuffer = 0 - // Number of concurrent workers that pull tasks off the request queue - taskWorkerCount = 8 - // maxMessageSize is the maximum size of the batched payload - maxMessageSize = 512 * 1024 + // targetMessageSize is the ideal size of the batched payload. We try to + // pop this much data off the request queue, but it may be a little more + // or less depending on what's in the queue. + targetMessageSize = 16 * 1024 // tagFormat is the tag given to peers associated an engine tagFormat = "bs-engine-%s-%s" @@ -82,6 +85,13 @@ const ( longTermScore = 10 // this is a high tag but it grows _very_ slowly. shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock = 1024 + + // Number of concurrent workers that pull tasks off the request queue + taskWorkerCount = 8 + // Number of concurrent workers that process requests to the blockstore blockstoreWorkerCount = 128 ) @@ -137,7 +147,8 @@ type Engine struct { tagQueued, tagUseful string - lock sync.Mutex // protects the fields immediatly below + lock sync.RWMutex // protects the fields immediatly below + // ledgerMap lists Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger @@ -145,24 +156,39 @@ type Engine struct { taskWorkerLock sync.Mutex taskWorkerCount int + + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock int + + self peer.ID } // NewEngine creates a new block sending engine for the given block store -func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger) *Engine { +func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID) *Engine { + return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock) +} + +// This constructor is used by the tests +func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, maxReplaceSize int) *Engine { e := &Engine{ - ledgerMap: make(map[peer.ID]*ledger), - bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), - peerTagger: peerTagger, - outbox: make(chan (<-chan *Envelope), outboxChanBuffer), - workSignal: make(chan struct{}, 1), - ticker: time.NewTicker(time.Millisecond * 100), - taskWorkerCount: taskWorkerCount, + ledgerMap: make(map[peer.ID]*ledger), + bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), + peerTagger: peerTagger, + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), + workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), + maxBlockSizeReplaceHasWithBlock: maxReplaceSize, + taskWorkerCount: taskWorkerCount, + self: self, } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) e.peerRequestQueue = peertaskqueue.New( peertaskqueue.OnPeerAddedHook(e.onPeerAdded), - peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved)) + peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), + peertaskqueue.TaskMerger(newTaskMerger()), + peertaskqueue.IgnoreFreezing(true)) go e.scoreWorker(ctx) return e } @@ -310,9 +336,9 @@ func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { } } -// Each taskWorker pulls items off the request queue up and adds them to an -// envelope. The envelope is passed off to the bitswap workers, which send -// the message to the network. +// Each taskWorker pulls items off the request queue up to the maximum size +// and adds them to an envelope that is passed off to the bitswap workers, +// which send the message to the network. func (e *Engine) taskWorker(ctx context.Context) { defer e.taskWorkerExit() for { @@ -349,53 +375,91 @@ func (e *Engine) taskWorkerExit() { // context is cancelled before the next Envelope can be created. func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { - nextTask := e.peerRequestQueue.PopBlock() - for nextTask == nil { + // Pop some tasks off the request queue + p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(targetMessageSize) + for len(nextTasks) == 0 { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: - nextTask = e.peerRequestQueue.PopBlock() + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) case <-e.ticker.C: + // When a task is cancelled, the queue may be "frozen" for a + // period of time. We periodically "thaw" the queue to make + // sure it doesn't get stuck in a frozen state. e.peerRequestQueue.ThawRound() - nextTask = e.peerRequestQueue.PopBlock() + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) } } - // with a task in hand, we're ready to prepare the envelope... - blockCids := cid.NewSet() - for _, t := range nextTask.Tasks { - blockCids.Add(t.Identifier.(cid.Cid)) + // Create a new message + msg := bsmsg.New(true) + + // log.Debugf(" %s got %d tasks", lu.P(e.self), len(nextTasks)) + + // Amount of data in the request queue still waiting to be popped + msg.SetPendingBytes(int32(pendingBytes)) + + // Split out want-blocks, want-haves and DONT_HAVEs + blockCids := make([]cid.Cid, 0, len(nextTasks)) + blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) + for _, t := range nextTasks { + c := t.Topic.(cid.Cid) + td := t.Data.(*taskData) + if td.HaveBlock { + if td.IsWantBlock { + blockCids = append(blockCids, c) + blockTasks[c] = td + } else { + // Add HAVES to the message + msg.AddHave(c) + } + } else { + // Add DONT_HAVEs to the message + msg.AddDontHave(c) + } } - blks, err := e.bsm.getBlocks(ctx, blockCids.Keys()) + + // Fetch blocks from datastore + blks, err := e.bsm.getBlocks(ctx, blockCids) if err != nil { // we're dropping the envelope but that's not an issue in practice. return nil, err } - msg := bsmsg.New(true) - for _, b := range blks { - msg.AddBlock(b) + for c, t := range blockTasks { + blk := blks[c] + // If the block was not found (it has been removed) + if blk == nil { + // If the client requested DONT_HAVE, add DONT_HAVE to the message + if t.SendDontHave { + // log.Debugf(" make evlp %s->%s DONT_HAVE (expected block) %s", lu.P(e.self), lu.P(p), lu.C(c)) + msg.AddDontHave(c) + } + } else { + // Add the block to the message + // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", lu.P(e.self), lu.P(p), lu.C(c), len(blk.RawData())) + msg.AddBlock(blk) + } } + // If there's nothing in the message, bail out if msg.Empty() { - // If we don't have the block, don't hold that against the peer - // make sure to update that the task has been 'completed' - nextTask.Done(nextTask.Tasks) + e.peerRequestQueue.TasksDone(p, nextTasks...) continue } + // log.Debugf(" sending message %s->%s (%d blks / %d presences / %d bytes)\n", lu.P(e.self), lu.P(p), blkCount, presenceCount, msg.Size()) return &Envelope{ - Peer: nextTask.Target, + Peer: p, Message: msg, Sent: func() { - nextTask.Done(nextTask.Tasks) - select { - case e.workSignal <- struct{}{}: - // work completing may mean that our queue will provide new - // work to be done. - default: - } + // Once the message has been sent, signal the request queue so + // it can be cleared from the queue + e.peerRequestQueue.TasksDone(p, nextTasks...) + + // Signal the worker to check for more work + e.signalNewWork() }, }, nil } @@ -408,8 +472,8 @@ func (e *Engine) Outbox() <-chan (<-chan *Envelope) { // Peers returns a slice of Peers with whom the local node has active sessions. func (e *Engine) Peers() []peer.ID { - e.lock.Lock() - defer e.lock.Unlock() + e.lock.RLock() + defer e.lock.RUnlock() response := make([]peer.ID, 0, len(e.ledgerMap)) @@ -419,9 +483,25 @@ func (e *Engine) Peers() []peer.ID { return response } -// MessageReceived performs book-keeping. Returns error if passed invalid -// arguments. +// MessageReceived is called when a message is received from a remote peer. +// For each item in the wantlist, add a want-have or want-block entry to the +// request queue (this is later popped off by the workerTasks) func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { + entries := m.Wantlist() + + // if len(entries) > 0 { + // log.Debugf("engine-%s received message from %s with %d entries\n", lu.P(e.self), lu.P(p), len(entries)) + // for _, et := range entries { + // if !et.Cancel { + // if et.WantType == pb.Message_Wantlist_Have { + // log.Debugf(" recv %s<-%s: want-have %s\n", lu.P(e.self), lu.P(p), lu.C(et.Cid)) + // } else { + // log.Debugf(" recv %s<-%s: want-block %s\n", lu.P(e.self), lu.P(p), lu.C(et.Cid)) + // } + // } + // } + // } + if m.Empty() { log.Debugf("received empty message from %s", p) } @@ -434,12 +514,10 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap }() // Get block sizes - entries := m.Wantlist() + wants, cancels := e.splitWantsCancels(entries) wantKs := cid.NewSet() - for _, entry := range entries { - if !entry.Cancel { - wantKs.Add(entry.Cid) - } + for _, entry := range wants { + wantKs.Add(entry.Cid) } blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) if err != nil { @@ -447,78 +525,186 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap return } + // Get the ledger for the peer l := e.findOrCreate(p) l.lk.Lock() defer l.lk.Unlock() + + // Record how many bytes were received in the ledger + blks := m.Blocks() + for _, block := range blks { + log.Debugf("got block %s %d bytes", block, len(block.RawData())) + l.ReceivedBytes(len(block.RawData())) + } + + // If the peer sent a full wantlist, replace the ledger's wantlist if m.Full() { l.wantList = wl.New() } - var msgSize int var activeEntries []peertask.Task - for _, entry := range m.Wantlist() { - if entry.Cancel { - log.Debugf("%s cancel %s", p, entry.Cid) - l.CancelWant(entry.Cid) + + // Remove cancelled blocks from the queue + for _, entry := range cancels { + // log.Debugf("%s<-%s cancel %s", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + if l.CancelWant(entry.Cid) { e.peerRequestQueue.Remove(entry.Cid, p) - } else { - log.Debugf("wants %s - %d", entry.Cid, entry.Priority) - l.Wants(entry.Cid, entry.Priority) - blockSize, ok := blockSizes[entry.Cid] - if ok { - // we have the block + } + } + + // For each want-have / want-block + for _, entry := range wants { + c := entry.Cid + blockSize, found := blockSizes[entry.Cid] + + // Add each want-have / want-block to the ledger + l.Wants(c, entry.Priority, entry.WantType) + + // If the block was not found + if !found { + // Only add the task to the queue if the requester wants a DONT_HAVE + if entry.SendDontHave { newWorkExists = true - if msgSize+blockSize > maxMessageSize { - e.peerRequestQueue.PushBlock(p, activeEntries...) - activeEntries = []peertask.Task{} - msgSize = 0 + isWantBlock := false + if entry.WantType == pb.Message_Wantlist_Block { + isWantBlock = true } - activeEntries = append(activeEntries, peertask.Task{Identifier: entry.Cid, Priority: entry.Priority}) - msgSize += blockSize + + // if isWantBlock { + // log.Debugf(" put rq %s->%s %s as want-block (not found)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + // } else { + // log.Debugf(" put rq %s->%s %s as want-have (not found)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + // } + + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: entry.Priority, + Work: bsmsg.BlockPresenceSize(c), + Data: &taskData{ + BlockSize: 0, + HaveBlock: false, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + // log.Debugf(" not putting rq %s->%s %s (not found, SendDontHave false)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + } else { + // The block was found, add it to the queue + newWorkExists = true + + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + // if isWantBlock { + // log.Debugf(" put rq %s->%s %s as want-block (%d bytes)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid), blockSize) + // } else { + // log.Debugf(" put rq %s->%s %s as want-have (%d bytes)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid), blockSize) + // } + + // entrySize is the amount of space the entry takes up in the + // message we send to the recipient. If we're sending a block, the + // entrySize is the size of the block. Otherwise it's the size of + // a block presence entry. + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(c) } + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: entry.Priority, + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) } } + + // Push entries onto the request queue if len(activeEntries) > 0 { - e.peerRequestQueue.PushBlock(p, activeEntries...) + e.peerRequestQueue.PushTasks(p, activeEntries...) } - for _, block := range m.Blocks() { - log.Debugf("got block %s %d bytes", block, len(block.RawData())) - l.ReceivedBytes(len(block.RawData())) +} + +// Split the want-have / want-block entries from the cancel entries +func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { + wants := make([]bsmsg.Entry, 0, len(es)) + cancels := make([]bsmsg.Entry, 0, len(es)) + for _, et := range es { + if et.Cancel { + cancels = append(cancels, et) + } else { + wants = append(wants, et) + } } + return wants, cancels } -func (e *Engine) addBlocks(ks []cid.Cid) { - work := false +// ReceiveFrom is called when new blocks are received and added to the block +// store, meaning there may be peers who want those blocks, so we should send +// the blocks to them. +func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) { + if len(blks) == 0 { + return + } + + // Get the size of each block + blockSizes := make(map[cid.Cid]int, len(blks)) + for _, blk := range blks { + blockSizes[blk.Cid()] = len(blk.RawData()) + } + // Check each peer to see if it wants one of the blocks we received + work := false + e.lock.RLock() for _, l := range e.ledgerMap { - l.lk.Lock() - for _, k := range ks { + l.lk.RLock() + + for _, b := range blks { + k := b.Cid() + if entry, ok := l.WantListContains(k); ok { - e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ - Identifier: entry.Cid, - Priority: entry.Priority, - }) work = true + + blockSize := blockSizes[k] + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + // if isWantBlock { + // log.Debugf(" add-block put rq %s->%s %s as want-block (%d bytes)\n", lu.P(e.self), lu.P(l.Partner), lu.C(k), blockSize) + // } else { + // log.Debugf(" add-block put rq %s->%s %s as want-have (%d bytes)\n", lu.P(e.self), lu.P(l.Partner), lu.C(k), blockSize) + // } + + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(k) + } + + e.peerRequestQueue.PushTasks(l.Partner, peertask.Task{ + Topic: entry.Cid, + Priority: entry.Priority, + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: false, + }, + }) } } - l.lk.Unlock() + l.lk.RUnlock() } + e.lock.RUnlock() if work { e.signalNewWork() } } -// AddBlocks is called when new blocks are received and added to a block store, -// meaning there may be peers who want those blocks, so we should send the blocks -// to them. -func (e *Engine) AddBlocks(ks []cid.Cid) { - e.lock.Lock() - defer e.lock.Unlock() - - e.addBlocks(ks) -} - // TODO add contents of m.WantList() to my local wantlist? NB: could introduce // race conditions where I send a message, but MessageSent gets handled after // MessageReceived. The information in the local wantlist could become @@ -532,9 +718,19 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { l.lk.Lock() defer l.lk.Unlock() + // Remove sent blocks from the want list for the peer for _, block := range m.Blocks() { l.SentBytes(len(block.RawData())) - l.wantList.Remove(block.Cid()) + l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) + } + + // Remove sent block presences from the want list for the peer + for _, bp := range m.BlockPresences() { + // TODO: record block presence bytes as well? + // l.SentBytes(?) + if bp.Type == pb.Message_Have { + l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) + } } } @@ -548,6 +744,7 @@ func (e *Engine) PeerConnected(p peer.ID) { l = newLedger(p) e.ledgerMap[p] = l } + l.lk.Lock() defer l.lk.Unlock() l.ref++ @@ -561,6 +758,7 @@ func (e *Engine) PeerDisconnected(p peer.ID) { if !ok { return } + l.lk.Lock() defer l.lk.Unlock() l.ref-- @@ -569,6 +767,13 @@ func (e *Engine) PeerDisconnected(p peer.ID) { } } +// If the want is a want-have, and it's below a certain size, send the full +// block (instead of sending a HAVE) +func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { + isWantBlock := wantType == pb.Message_Wantlist_Block + return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock +} + func (e *Engine) numBytesSentTo(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesSent @@ -581,9 +786,20 @@ func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { // ledger lazily instantiates a ledger func (e *Engine) findOrCreate(p peer.ID) *ledger { + // Take a read lock (as it's less expensive) to check if we have a ledger + // for the peer + e.lock.RLock() + l, ok := e.ledgerMap[p] + e.lock.RUnlock() + if ok { + return l + } + + // There's no ledger, so take a write lock, then check again and create the + // ledger if necessary e.lock.Lock() defer e.lock.Unlock() - l, ok := e.ledgerMap[p] + l, ok = e.ledgerMap[p] if !ok { l = newLedger(p) e.ledgerMap[p] = l diff --git a/decision/engine_test.go b/decision/engine_test.go index 09962e1e..12e7eca2 100644 --- a/decision/engine_test.go +++ b/decision/engine_test.go @@ -1,6 +1,7 @@ package decision import ( + "bytes" "context" "errors" "fmt" @@ -9,15 +10,19 @@ import ( "testing" "time" + lu "github.com/ipfs/go-bitswap/logutil" message "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-bitswap/testutil" blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p-core/peer" - testutil "github.com/libp2p/go-libp2p-core/test" + libp2ptest "github.com/libp2p/go-libp2p-core/test" ) type peerTag struct { @@ -86,10 +91,10 @@ type engineSet struct { Blockstore blockstore.Blockstore } -func newEngine(ctx context.Context, idStr string) engineSet { +func newTestEngine(ctx context.Context, idStr string) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := NewEngine(ctx, bs, fpt) + e := newEngine(ctx, bs, fpt, "localhost", 0) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -103,8 +108,8 @@ func newEngine(ctx context.Context, idStr string) engineSet { func TestConsistentAccounting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sender := newEngine(ctx, "Ernie") - receiver := newEngine(ctx, "Bert") + sender := newTestEngine(ctx, "Ernie") + receiver := newTestEngine(ctx, "Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -138,8 +143,8 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sanfrancisco := newEngine(ctx, "sf") - seattle := newEngine(ctx, "sea") + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") m := message.New(true) @@ -176,7 +181,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := NewEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}) + e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -193,6 +198,616 @@ func TestOutboxClosedWhenEngineClosed(t *testing.T) { } } +func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { + alphabet := "abcdefghijklmnopqrstuvwxyz" + vowels := "aeiou" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + partner := libp2ptest.RandPeerIDFatal(t) + // partnerWantBlocks(e, vowels, partner) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Just send want-blocks + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + }, + }, + }, + + // Send want-blocks and want-haves + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, but without requesting DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + dontHaves: "123", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, but without requesting DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "fgh", + dontHaves: "", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "fgh", + dontHaves: "123456", + }, + }, + }, + + // Send repeated want-blocks + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "io", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + }, + }, + }, + + // Send repeated want-blocks and want-haves + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae", + wantHaves: "jk", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "io", + wantHaves: "lm", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "jklm", + }, + }, + }, + + // Send repeated want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae12", + wantHaves: "jk5", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "io34", + wantHaves: "lm", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "u", + wantHaves: "6", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "jklm", + dontHaves: "123456", + }, + }, + }, + + // Send want-block then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "b", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-block should overwrite existing want-have + exp: []testCaseExp{ + testCaseExp{ + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + testCaseExp{ + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + for i, testCase := range testCases { + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + } + + for _, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + next := <-e.Outbox() + env := <-next + err := checkOutput(t, e, env, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + env.Sent() + } + } +} + +func TestPartnerWantHaveWantBlockActive(t *testing.T) { + alphabet := "abcdefghijklmnopqrstuvwxyz" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + partner := libp2ptest.RandPeerIDFatal(t) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Send want-block then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "b", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-have is active when want-block is added, so want-have + // should get sent, then want-block + exp: []testCaseExp{ + testCaseExp{ + haves: "b", + }, + testCaseExp{ + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + testCaseExp{ + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + var next envChan + for i, testCase := range testCases { + envs := make([]*Envelope, 0) + + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + + var env *Envelope + next, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + envs = append(envs, env) + } + } + + if len(envs) != len(testCase.exp) { + t.Fatalf("Expected %d envelopes but received %d", len(testCase.exp), len(envs)) + } + + for i, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envs[i], expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + envs[i].Sent() + } + } +} + +func checkOutput(t *testing.T, e *Engine, envelope *Envelope, expBlks []string, expHaves []string, expDontHaves []string) error { + blks := envelope.Message.Blocks() + presences := envelope.Message.BlockPresences() + + // Verify payload message length + if len(blks) != len(expBlks) { + blkDiff := formatBlocksDiff(blks, expBlks) + msg := fmt.Sprintf("Received %d blocks. Expected %d blocks:\n%s", len(blks), len(expBlks), blkDiff) + return errors.New(msg) + } + + // Verify block presences message length + expPresencesCount := len(expHaves) + len(expDontHaves) + if len(presences) != expPresencesCount { + presenceDiff := formatPresencesDiff(presences, expHaves, expDontHaves) + return fmt.Errorf("Received %d BlockPresences. Expected %d BlockPresences:\n%s", + len(presences), expPresencesCount, presenceDiff) + } + + // Verify payload message contents + for _, k := range expBlks { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range blks { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(formatBlocksDiff(blks, expBlks)) + } + } + + // Verify HAVEs + if err := checkPresence(presences, expHaves, pb.Message_Have); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + // Verify DONT_HAVEs + if err := checkPresence(presences, expDontHaves, pb.Message_DontHave); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + return nil +} + +func checkPresence(presences []message.BlockPresence, expPresence []string, presenceType pb.Message_BlockPresenceType) error { + for _, k := range expPresence { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, p := range presences { + if p.Cid.Equals(expected.Cid()) { + found = true + if p.Type != presenceType { + return errors.New("type mismatch") + } + break + } + } + if !found { + return errors.New("not found") + } + } + return nil +} + +func formatBlocksDiff(blks []blocks.Block, expBlks []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("Blocks (%d):\n", len(blks))) + for _, b := range blks { + out.WriteString(fmt.Sprintf(" %s: %s\n", lu.C(b.Cid()), b.RawData())) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expBlks))) + for _, k := range expBlks { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s\n", lu.C(expected.Cid()), k)) + } + return out.String() +} + +func formatPresencesDiff(presences []message.BlockPresence, expHaves []string, expDontHaves []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("BlockPresences (%d):\n", len(presences))) + for _, p := range presences { + t := "HAVE" + if p.Type == pb.Message_DontHave { + t = "DONT_HAVE" + } + out.WriteString(fmt.Sprintf(" %s - %s\n", lu.C(p.Cid), t)) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expHaves)+len(expDontHaves))) + for _, k := range expHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", lu.C(expected.Cid()), k)) + } + for _, k := range expDontHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", lu.C(expected.Cid()), k)) + } + return out.String() +} + func TestPartnerWantsThenCancels(t *testing.T) { numRounds := 10 if testing.Short() { @@ -235,7 +850,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := NewEngine(ctx, bs, &fakePeerTagger{}) + e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -243,9 +858,9 @@ func TestPartnerWantsThenCancels(t *testing.T) { keeps := stringsComplement(set, cancels) expected = append(expected, keeps) - partner := testutil.RandPeerIDFatal(t) + partner := libp2ptest.RandPeerIDFatal(t) - partnerWants(e, set, partner) + partnerWantBlocks(e, set, partner) partnerCancels(e, cancels, partner) } if err := checkHandledInOrder(t, e, expected); err != nil { @@ -255,11 +870,119 @@ func TestPartnerWantsThenCancels(t *testing.T) { } } +func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, so shouldn't get any envelope + var next envChan + next, env := getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + t.Fatal("expected no envelope yet") + } + + if err := bs.PutMany([]blocks.Block{blks[0], blks[2]}); err != nil { + t.Fatal(err) + } + e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}, []cid.Cid{}) + _, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + sentBlk := env.Message.Blocks() + if len(sentBlk) != 1 || !sentBlk[0].Cid().Equals(blks[2].Cid()) { + t.Fatal("expected 1 block") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 1 || !sentHave[0].Cid.Equals(blks[0].Cid()) || sentHave[0].Type != pb.Message_Have { + t.Fatal("expected 1 HAVE") + } +} + +func TestSendDontHave(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, true) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, should get DONT_HAVE for entries that wanted it + var next envChan + next, env := getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) > 0 { + t.Fatal("expected no blocks") + } + sentDontHaves := env.Message.BlockPresences() + if len(sentDontHaves) != 2 { + t.Fatal("expected 2 DONT_HAVEs") + } + if !sentDontHaves[0].Cid.Equals(blks[1].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[1].Cid()) { + t.Fatal("expected DONT_HAVE for want-have") + } + if !sentDontHaves[0].Cid.Equals(blks[3].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[3].Cid()) { + t.Fatal("expected DONT_HAVE for want-block") + } + + // Receive all the blocks + if err := bs.PutMany(blks); err != nil { + t.Fatal(err) + } + e.ReceiveFrom(otherPeer, blks, []cid.Cid{}) + + // Envelope should contain 2 HAVEs / 2 blocks + _, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) != 2 { + t.Fatal("expected 2 blocks") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 2 || sentHave[0].Type != pb.Message_Have || sentHave[1].Type != pb.Message_Have { + t.Fatal("expected 2 HAVEs") + } +} + func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - sanfrancisco := newEngine(ctx, "sf") - seattle := newEngine(ctx, "sea") + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") keys := []string{"a", "b", "c", "d", "e"} for _, letter := range keys { @@ -268,7 +991,7 @@ func TestTaggingPeers(t *testing.T) { t.Fatal(err) } } - partnerWants(sanfrancisco.Engine, keys, seattle.Peer) + partnerWantBlocks(sanfrancisco.Engine, keys, seattle.Peer) next := <-sanfrancisco.Engine.Outbox() envelope := <-next @@ -285,12 +1008,12 @@ func TestTaggingPeers(t *testing.T) { func TestTaggingUseful(t *testing.T) { oldShortTerm := shortTerm - shortTerm = 1 * time.Millisecond + shortTerm = 2 * time.Millisecond defer func() { shortTerm = oldShortTerm }() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - me := newEngine(ctx, "engine") + me := newTestEngine(ctx, "engine") friend := peer.ID("friend") block := blocks.NewBlock([]byte("foobar")) @@ -322,11 +1045,27 @@ func TestTaggingUseful(t *testing.T) { } } -func partnerWants(e *Engine, keys []string, partner peer.ID) { +func partnerWantBlocks(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), len(keys)-i) + add.AddEntry(block.Cid(), len(keys)-i, pb.Message_Wantlist_Block, true) + } + e.MessageReceived(context.Background(), partner, add) +} + +func partnerWantBlocksHaves(e *Engine, keys []string, wantHaves []string, sendDontHave bool, partner peer.ID) { + add := message.New(false) + priority := len(wantHaves) + len(keys) + for _, letter := range wantHaves { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) + priority-- + } + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Block, sendDontHave) + priority-- } e.MessageReceived(context.Background(), partner, add) } @@ -340,6 +1079,29 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { e.MessageReceived(context.Background(), partner, cancels) } +type envChan <-chan *Envelope + +func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelope) { + ctx, cancel := context.WithTimeout(context.Background(), t) + defer cancel() + + if next == nil { + next = <-e.Outbox() // returns immediately + } + + select { + case env, ok := <-next: // blocks till next envelope ready + if !ok { + log.Warningf("got closed channel") + return nil, nil + } + return nil, env + case <-ctx.Done(): + // log.Warningf("got timeout") + } + return next, nil +} + func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { for _, keys := range expected { next := <-e.Outbox() diff --git a/decision/ledger.go b/decision/ledger.go index 277daaa2..a607834a 100644 --- a/decision/ledger.go +++ b/decision/ledger.go @@ -4,6 +4,7 @@ import ( "sync" "time" + pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" cid "github.com/ipfs/go-cid" @@ -46,7 +47,7 @@ type ledger struct { // don't drop the reference to this ledger in multi-connection scenarios ref int - lk sync.Mutex + lk sync.RWMutex } // Receipt is a summary of the ledger for a given peer @@ -90,13 +91,13 @@ func (l *ledger) ReceivedBytes(n int) { l.Accounting.BytesRecv += uint64(n) } -func (l *ledger) Wants(k cid.Cid, priority int) { +func (l *ledger) Wants(k cid.Cid, priority int, wantType pb.Message_Wantlist_WantType) { log.Debugf("peer %s wants %s", l.Partner, k) - l.wantList.Add(k, priority) + l.wantList.Add(k, priority, wantType) } -func (l *ledger) CancelWant(k cid.Cid) { - l.wantList.Remove(k) +func (l *ledger) CancelWant(k cid.Cid) bool { + return l.wantList.Remove(k) } func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { diff --git a/decision/taskmerger.go b/decision/taskmerger.go new file mode 100644 index 00000000..19048641 --- /dev/null +++ b/decision/taskmerger.go @@ -0,0 +1,87 @@ +package decision + +import ( + "github.com/ipfs/go-peertaskqueue/peertask" +) + +// taskData is extra data associated with each task in the request queue +type taskData struct { + // Tasks can be want-have or want-block + IsWantBlock bool + // Whether to immediately send a response if the block is not found + SendDontHave bool + // The size of the block corresponding to the task + BlockSize int + // Whether the block was found + HaveBlock bool +} + +type taskMerger struct{} + +func newTaskMerger() *taskMerger { + return &taskMerger{} +} + +// The request queue uses this Method to decide if a newly pushed task has any +// new information beyond the tasks with the same Topic (CID) in the queue. +func (*taskMerger) HasNewInfo(task peertask.Task, existing []peertask.Task) bool { + haveSize := false + isWantBlock := false + for _, et := range existing { + etd := et.Data.(*taskData) + if etd.HaveBlock { + haveSize = true + } + + if etd.IsWantBlock { + isWantBlock = true + } + } + + // If there is no active want-block and the new task is a want-block, + // the new task is better + newTaskData := task.Data.(*taskData) + if !isWantBlock && newTaskData.IsWantBlock { + return true + } + + // If there is no size information for the CID and the new task has + // size information, the new task is better + if !haveSize && newTaskData.HaveBlock { + return true + } + + return false +} + +// The request queue uses Merge to merge a newly pushed task with an existing +// task with the same Topic (CID) +func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { + newTask := task.Data.(*taskData) + existingTask := existing.Data.(*taskData) + + // If we now have block size information, update the task with + // the new block size + if !existingTask.HaveBlock && newTask.HaveBlock { + existingTask.HaveBlock = newTask.HaveBlock + existingTask.BlockSize = newTask.BlockSize + } + + // If replacing a want-have with a want-block + if !existingTask.IsWantBlock && newTask.IsWantBlock { + // Change the type from want-have to want-block + existingTask.IsWantBlock = true + // If the want-have was a DONT_HAVE, or the want-block has a size + if !existingTask.HaveBlock || newTask.HaveBlock { + // Update the entry size + existingTask.HaveBlock = newTask.HaveBlock + existing.Work = task.Work + } + } + + // If the task is a want-block, make sure the entry size is equal + // to the block size (because we will send the whole block) + if existingTask.IsWantBlock && existingTask.HaveBlock { + existing.Work = existingTask.BlockSize + } +} diff --git a/decision/taskmerger_test.go b/decision/taskmerger_test.go new file mode 100644 index 00000000..7d4d61c8 --- /dev/null +++ b/decision/taskmerger_test.go @@ -0,0 +1,357 @@ +package decision + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-peertaskqueue" + "github.com/ipfs/go-peertaskqueue/peertask" +) + +func TestPushHaveVsBlock(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + isWantBlock := popped[0].Data.(*taskData).IsWantBlock + if isWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, isWantBlock) + } + } + const wantBlockType = true + const wantHaveType = false + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHaveType) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlockType) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlockType) + // want-block overwrites want-have + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlockType) +} + +func TestPushSizeInfo(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlockBlockSize := 10 + wantBlockDontHaveBlockSize := 0 + wantHaveBlockSize := 10 + wantHaveDontHaveBlockSize := 0 + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expSize int, expBlockSize int, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + if popped[0].Work != expSize { + t.Fatalf("Expected task.Work to be %d, received %d", expSize, popped[0].Work) + } + td := popped[0].Data.(*taskData) + if td.BlockSize != expBlockSize { + t.Fatalf("Expected task.Work to be %d, received %d", expBlockSize, td.BlockSize) + } + if td.IsWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, td.IsWantBlock) + } + } + + isWantBlock := true + isWantHave := false + + // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-block with size should update existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-block (DONT_HAVE) size, + // but leave it as a want-block (ie should not change it to want-have) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + + // want-block (DONT_HAVE) size should not update existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-block with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, wantHaveDontHave.Work, wantHaveDontHaveBlockSize, isWantHave) + // want-block with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should not update existing want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + // want-block with size should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have should have no effect on existing want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) +} + +func TestPushHaveVsBlockActive(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expCount int) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + // ptq.PushTasks(partner, tasks...) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + // tracker.PushTasks([]peertask.Task{task}) + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != expCount { + t.Fatalf("Expected %d tasks, received %d tasks", expCount, len(popped)) + } + } + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, 1) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, 1) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, 1) + // can't replace want-have with want-block because want-have is active + runTestCase([]peertask.Task{wantHave, wantBlock}, 2) +} + +func TestPushSizeInfoActive(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expTasks []peertask.Task) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != len(expTasks) { + t.Fatalf("Expected %d tasks, received %d tasks", len(expTasks), len(popped)) + } + for i, task := range popped { + td := task.Data.(*taskData) + expTd := expTasks[i].Data.(*taskData) + if td.IsWantBlock != expTd.IsWantBlock { + t.Fatalf("Expected IsWantBlock to be %t, received %t", expTd.IsWantBlock, td.IsWantBlock) + } + if task.Work != expTasks[i].Work { + t.Fatalf("Expected Size to be %d, received %d", expTasks[i].Work, task.Work) + } + } + } + + // second want-block (DONT_HAVE) should be ignored + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, []peertask.Task{wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, []peertask.Task{wantBlockDontHave}) + // want-block with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, []peertask.Task{wantBlockDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, []peertask.Task{wantBlockDontHave, wantHave}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, []peertask.Task{wantHaveDontHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, []peertask.Task{wantHaveDontHave}) + // want-block with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, []peertask.Task{wantHaveDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, []peertask.Task{wantHaveDontHave, wantHave}) + + // want-block (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, []peertask.Task{wantBlock}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, []peertask.Task{wantBlock}) + // second want-block with size should be ignored + runTestCase([]peertask.Task{wantBlock, wantBlock}, []peertask.Task{wantBlock}) + // want-have with size should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, []peertask.Task{wantBlock}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, []peertask.Task{wantHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, []peertask.Task{wantHave}) + // second want-have with size should be ignored + runTestCase([]peertask.Task{wantHave, wantHave}, []peertask.Task{wantHave}) + // want-block with size should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, []peertask.Task{wantHave, wantBlock}) +} + +func cloneTasks(tasks []peertask.Task) []peertask.Task { + var cp []peertask.Task + for _, t := range tasks { + td := t.Data.(*taskData) + cp = append(cp, peertask.Task{ + Topic: t.Topic, + Priority: t.Priority, + Work: t.Work, + Data: &taskData{ + IsWantBlock: td.IsWantBlock, + BlockSize: td.BlockSize, + HaveBlock: td.HaveBlock, + SendDontHave: td.SendDontHave, + }, + }) + } + return cp +} diff --git a/go.mod b/go.mod index 14a53e64..1684e2c1 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,10 @@ module github.com/ipfs/go-bitswap require ( + github.com/bep/debounce v1.2.0 github.com/cskr/pubsub v1.0.2 github.com/gogo/protobuf v1.3.1 + github.com/golang/protobuf v1.3.2 // indirect github.com/google/uuid v1.1.1 github.com/ipfs/go-block-format v0.0.2 github.com/ipfs/go-cid v0.0.3 @@ -16,7 +18,7 @@ require ( github.com/ipfs/go-ipfs-util v0.0.1 github.com/ipfs/go-log v0.0.1 github.com/ipfs/go-metrics-interface v0.0.1 - github.com/ipfs/go-peertaskqueue v0.1.1 + github.com/ipfs/go-peertaskqueue v0.2.0 github.com/jbenet/goprocess v0.1.3 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-libp2p v0.4.2 diff --git a/go.sum b/go.sum index 2051997e..62ab2c9f 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32 h1:qkOC5Gd33k54tobS36cXdAzJbeHaduLtnLQQwNoIi78= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c h1:aEbSeNALREWXk0G7UdNhR3ayBV7tZ4M2PNmnrCAph6Q= @@ -45,7 +47,6 @@ github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJY github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -58,6 +59,8 @@ github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -93,9 +96,7 @@ github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0 h1:TOxI04l8CmO4zGtesENhzm4PwkFwJXY3rKiYaaMf9fI= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1 h1:SS1t869a6cctoSYmZXUk8eL6AzVXgASmKIWFNQkQ1jU= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= @@ -117,8 +118,8 @@ github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= @@ -127,8 +128,8 @@ github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.1.1 h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= @@ -210,7 +211,6 @@ github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUje github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0 h1:MKh7pRNPHSh1fLPj8u/M/s/napdmeNpoi9BRy9lPN0E= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= github.com/libp2p/go-libp2p-peerstore v0.1.4 h1:d23fvq5oYMJ/lkkbO4oTwBp/JP+I/1m5gZJobNXCE/k= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= @@ -218,7 +218,6 @@ github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvH github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-secio v0.1.0 h1:NNP5KLxuP97sE5Bu3iuwOWyT/dKEGMN5zSLMWdB7GTQ= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -231,7 +230,6 @@ github.com/libp2p/go-libp2p-testing v0.0.3 h1:bdij4bKaaND7tCsaXVjRfYkMpvoOeKj9AV github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4 h1:Qev57UR47GcLPXWjrunv5aLIQGO4n9mhI/8/EIrEEFc= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0 h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= @@ -254,7 +252,6 @@ github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-openssl v0.0.2 h1:9pP2d3Ubaxkv7ZisLjx9BFwgOGnQdQYnfcH29HNY3ls= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3 h1:wjlG7HvQkt4Fq4cfH33Ivpwp0omaElYEi9z26qaIkIk= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -277,7 +274,6 @@ github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= @@ -317,10 +313,7 @@ github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lg github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.1 h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.1.2 h1:HWYHNSyyllbQopmVIF5K7JKJugiah+L9/kuZKHbmNdQ= -github.com/multiformats/go-multiaddr v0.1.2/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -334,7 +327,6 @@ github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/e github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0 h1:ZepO8Ezwovd+7b5XPPDhQhayk1yt0AJpzQBpq9fejx4= github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.1 h1:jFFKUuXTXv+3ARyHZi3XUqQO+YWMKgBdhEvuGRfnL6s= github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= @@ -370,7 +362,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/smola/gocompat v0.2.0 h1:6b1oIMlUXIpz//VKEDzPVBK8KG7beVwmHIUEBIs/Pns= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= @@ -463,7 +454,6 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/logutil/logutil.go b/logutil/logutil.go new file mode 100644 index 00000000..8cba2a47 --- /dev/null +++ b/logutil/logutil.go @@ -0,0 +1,26 @@ +package logutil + +import ( + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +func C(c cid.Cid) string { + if c.Defined() { + str := c.String() + return str[len(str)-6:] + } + return "" +} + +func P(p peer.ID) string { + if p != "" { + str := p.String() + limit := 6 + if len(str) < limit { + limit = len(str) + } + return str[len(str)-limit:] + } + return "" +} diff --git a/message/message.go b/message/message.go index 08c85ea6..c4ea0fd1 100644 --- a/message/message.go +++ b/message/message.go @@ -6,9 +6,9 @@ import ( "io" pb "github.com/ipfs/go-bitswap/message/pb" - wantlist "github.com/ipfs/go-bitswap/wantlist" - blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-bitswap/wantlist" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" pool "github.com/libp2p/go-buffer-pool" msgio "github.com/libp2p/go-msgio" @@ -25,18 +25,43 @@ type BitSwapMessage interface { // Blocks returns a slice of unique blocks. Blocks() []blocks.Block + // BlockPresences returns the list of HAVE / DONT_HAVE in the message + BlockPresences() []BlockPresence + // Haves returns the Cids for each HAVE + Haves() []cid.Cid + // DontHaves returns the Cids for each DONT_HAVE + DontHaves() []cid.Cid + // PendingBytes returns the number of outstanding bytes of data that the + // engine has yet to send to the client (because they didn't fit in this + // message) + PendingBytes() int32 // AddEntry adds an entry to the Wantlist. - AddEntry(key cid.Cid, priority int) + AddEntry(key cid.Cid, priority int, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int - Cancel(key cid.Cid) + // Cancel adds a CANCEL for the given CID to the message + // Returns the size of the CANCEL entry in the protobuf + Cancel(key cid.Cid) int + // Empty indicates whether the message has any information Empty() bool + // Size returns the size of the message in bytes + Size() int // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set Full() bool + // AddBlock adds a block to the message AddBlock(blocks.Block) + // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message + AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) + // AddHave adds a HAVE for the given Cid to the message + AddHave(cid.Cid) + // AddDontHave adds a DONT_HAVE for the given Cid to the message + AddDontHave(cid.Cid) + // SetPendingBytes sets the number of bytes of data that are yet to be sent + // to the client (because they didn't fit in this message) + SetPendingBytes(int32) Exportable Loggable() map[string]interface{} @@ -45,16 +70,27 @@ type BitSwapMessage interface { // Exportable is an interface for structures than can be // encoded in a bitswap protobuf. type Exportable interface { + // Note that older Bitswap versions use a different wire format, so we need + // to convert the message to the appropriate format depending on which + // version of the protocol the remote peer supports. ToProtoV0() *pb.Message ToProtoV1() *pb.Message ToNetV0(w io.Writer) error ToNetV1(w io.Writer) error } +// BlockPresence represents a HAVE / DONT_HAVE for a given Cid +type BlockPresence struct { + Cid cid.Cid + Type pb.Message_BlockPresenceType +} + type impl struct { - full bool - wantlist map[cid.Cid]*Entry - blocks map[cid.Cid]blocks.Block + full bool + wantlist map[cid.Cid]*Entry + blocks map[cid.Cid]blocks.Block + blockPresences map[cid.Cid]pb.Message_BlockPresenceType + pendingBytes int32 } // New returns a new, empty bitswap message @@ -64,17 +100,21 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ - blocks: make(map[cid.Cid]blocks.Block), - wantlist: make(map[cid.Cid]*Entry), - full: full, + blocks: make(map[cid.Cid]blocks.Block), + blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), + wantlist: make(map[cid.Cid]*Entry), + full: full, } } -// Entry is an wantlist entry in a Bitswap message (along with whether it's an -// add or cancel). +// Entry is a wantlist entry in a Bitswap message, with flags indicating +// - whether message is a cancel +// - whether requester wants a DONT_HAVE message +// - whether requester wants a HAVE message (instead of the block) type Entry struct { wantlist.Entry - Cancel bool + Cancel bool + SendDontHave bool } func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { @@ -84,7 +124,7 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { if err != nil { return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err) } - m.addEntry(c, int(e.Priority), e.Cancel) + m.addEntry(c, int(e.Priority), e.Cancel, e.WantType, e.SendDontHave) } // deprecated @@ -114,6 +154,18 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { m.AddBlock(blk) } + for _, bi := range pbm.GetBlockPresences() { + c, err := cid.Cast(bi.GetCid()) + if err != nil { + return nil, err + } + + t := bi.GetType() + m.AddBlockPresence(c, t) + } + + m.pendingBytes = pbm.PendingBytes + return m, nil } @@ -122,7 +174,7 @@ func (m *impl) Full() bool { } func (m *impl) Empty() bool { - return len(m.blocks) == 0 && len(m.wantlist) == 0 + return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 } func (m *impl) Wantlist() []Entry { @@ -141,35 +193,129 @@ func (m *impl) Blocks() []blocks.Block { return bs } -func (m *impl) Cancel(k cid.Cid) { - delete(m.wantlist, k) - m.addEntry(k, 0, true) +func (m *impl) BlockPresences() []BlockPresence { + bps := make([]BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + bps = append(bps, BlockPresence{c, t}) + } + return bps +} + +func (m *impl) Haves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_Have) +} + +func (m *impl) DontHaves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_DontHave) +} + +func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { + cids := make([]cid.Cid, 0, len(m.blockPresences)) + for c, bpt := range m.blockPresences { + if bpt == t { + cids = append(cids, c) + } + } + return cids +} + +func (m *impl) PendingBytes() int32 { + return m.pendingBytes } -func (m *impl) AddEntry(k cid.Cid, priority int) { - m.addEntry(k, priority, false) +func (m *impl) SetPendingBytes(pendingBytes int32) { + m.pendingBytes = pendingBytes } -func (m *impl) addEntry(c cid.Cid, priority int, cancel bool) { +func (m *impl) Cancel(k cid.Cid) int { + return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) +} + +func (m *impl) AddEntry(k cid.Cid, priority int, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { + return m.addEntry(k, priority, false, wantType, sendDontHave) +} + +func (m *impl) addEntry(c cid.Cid, priority int, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { e, exists := m.wantlist[c] if exists { - e.Priority = priority - e.Cancel = cancel - } else { - m.wantlist[c] = &Entry{ - Entry: wantlist.Entry{ - Cid: c, - Priority: priority, - }, - Cancel: cancel, + // Only change priority if want is of the same type + if e.WantType == wantType { + e.Priority = priority + } + // Only change from "dont cancel" to "do cancel" + if cancel { + e.Cancel = cancel } + // Only change from "dont send" to "do send" DONT_HAVE + if sendDontHave { + e.SendDontHave = sendDontHave + } + // want-block overrides existing want-have + if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { + e.WantType = wantType + } + m.wantlist[c] = e + return 0 } + + e = &Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: priority, + WantType: wantType, + }, + SendDontHave: sendDontHave, + Cancel: cancel, + } + m.wantlist[c] = e + + aspb := entryToPB(e) + return aspb.Size() } func (m *impl) AddBlock(b blocks.Block) { + delete(m.blockPresences, b.Cid()) m.blocks[b.Cid()] = b } +func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { + if _, ok := m.blocks[c]; ok { + return + } + m.blockPresences[c] = t +} + +func (m *impl) AddHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_Have) +} + +func (m *impl) AddDontHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_DontHave) +} + +func (m *impl) Size() int { + size := 0 + for _, block := range m.blocks { + size += len(block.RawData()) + } + for c := range m.blockPresences { + size += BlockPresenceSize(c) + } + for _, e := range m.wantlist { + epb := entryToPB(e) + size += epb.Size() + } + + return size +} + +func BlockPresenceSize(c cid.Cid) int { + return (&pb.Message_BlockPresence{ + Cid: c.Bytes(), + Type: pb.Message_Have, + }).Size() +} + // FromNet generates a new BitswapMessage from incoming data on an io.Reader. func FromNet(r io.Reader) (BitSwapMessage, error) { reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) @@ -193,15 +339,21 @@ func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { return newMessageFromProto(pb) } +func entryToPB(e *Entry) pb.Message_Wantlist_Entry { + return pb.Message_Wantlist_Entry{ + Block: e.Cid.Bytes(), + Priority: int32(e.Priority), + Cancel: e.Cancel, + WantType: e.WantType, + SendDontHave: e.SendDontHave, + } +} + func (m *impl) ToProtoV0() *pb.Message { pbm := new(pb.Message) pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{ - Block: e.Cid.Bytes(), - Priority: int32(e.Priority), - Cancel: e.Cancel, - }) + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, entryToPB(e)) } pbm.Wantlist.Full = m.full @@ -217,11 +369,7 @@ func (m *impl) ToProtoV1() *pb.Message { pbm := new(pb.Message) pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{ - Block: e.Cid.Bytes(), - Priority: int32(e.Priority), - Cancel: e.Cancel, - }) + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, entryToPB(e)) } pbm.Wantlist.Full = m.full @@ -233,6 +381,17 @@ func (m *impl) ToProtoV1() *pb.Message { Prefix: b.Cid().Prefix().Bytes(), }) } + + pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ + Cid: c.Bytes(), + Type: t, + }) + } + + pbm.PendingBytes = m.PendingBytes() + return pbm } diff --git a/message/message_test.go b/message/message_test.go index 686ac4a4..4b51a3cc 100644 --- a/message/message_test.go +++ b/message/message_test.go @@ -18,7 +18,7 @@ func mkFakeCid(s string) cid.Cid { func TestAppendWanted(t *testing.T) { str := mkFakeCid("foo") m := New(true) - m.AddEntry(str, 1) + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) if !wantlistContains(&m.ToProtoV0().Wantlist, str) { t.Fail() @@ -69,7 +69,7 @@ func TestWantlist(t *testing.T) { keystrs := []cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} m := New(true) for _, s := range keystrs { - m.AddEntry(s, 1) + m.AddEntry(s, 1, pb.Message_Wantlist_Block, true) } exported := m.Wantlist() @@ -92,7 +92,7 @@ func TestCopyProtoByValue(t *testing.T) { str := mkFakeCid("foo") m := New(true) protoBeforeAppend := m.ToProtoV0() - m.AddEntry(str, 1) + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) if wantlistContains(&protoBeforeAppend.Wantlist, str) { t.Fail() } @@ -100,11 +100,11 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetFromNetPreservesWantList(t *testing.T) { original := New(true) - original.AddEntry(mkFakeCid("M"), 1) - original.AddEntry(mkFakeCid("B"), 1) - original.AddEntry(mkFakeCid("D"), 1) - original.AddEntry(mkFakeCid("T"), 1) - original.AddEntry(mkFakeCid("F"), 1) + original.AddEntry(mkFakeCid("M"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("B"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("D"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("T"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("F"), 1, pb.Message_Wantlist_Block, true) buf := new(bytes.Buffer) if err := original.ToNetV1(buf); err != nil { @@ -184,8 +184,8 @@ func TestDuplicates(t *testing.T) { b := blocks.NewBlock([]byte("foo")) msg := New(true) - msg.AddEntry(b.Cid(), 1) - msg.AddEntry(b.Cid(), 1) + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) if len(msg.Wantlist()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } @@ -195,4 +195,97 @@ func TestDuplicates(t *testing.T) { if len(msg.Blocks()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } + + b2 := blocks.NewBlock([]byte("bar")) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + if len(msg.Haves()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } +} + +func TestBlockPresences(t *testing.T) { + b1 := blocks.NewBlock([]byte("foo")) + b2 := blocks.NewBlock([]byte("bar")) + msg := New(true) + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.Haves()) != 1 || !msg.Haves()[0].Equals(b1.Cid()) { + t.Fatal("Expected HAVE") + } + if len(msg.DontHaves()) != 1 || !msg.DontHaves()[0].Equals(b2.Cid()) { + t.Fatal("Expected HAVE") + } + + msg.AddBlock(b1) + if len(msg.Haves()) != 0 { + t.Fatal("Expected block to overwrite HAVE") + } + + msg.AddBlock(b2) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected block to overwrite DONT_HAVE") + } + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + if len(msg.Haves()) != 0 { + t.Fatal("Expected HAVE not to overwrite block") + } + + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected DONT_HAVE not to overwrite block") + } +} + +func TestAddWantlistEntry(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New(true) + + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Have, false) + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + entries := msg.Wantlist() + if len(entries) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + e := entries[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-block should override want-have") + } + if e.SendDontHave != true { + t.Fatal("true SendDontHave should override false SendDontHave") + } + if e.Priority != 1 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + e = msg.Wantlist()[0] + if e.Priority != 2 { + t.Fatal("priority should be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 3, pb.Message_Wantlist_Have, false) + e = msg.Wantlist()[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-have should not override want-block") + } + if e.SendDontHave != true { + t.Fatal("false SendDontHave should not override true SendDontHave") + } + if e.Priority != 2 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.Cancel(b.Cid()) + e = msg.Wantlist()[0] + if !e.Cancel { + t.Fatal("cancel should override want") + } + + msg.AddEntry(b.Cid(), 10, pb.Message_Wantlist_Block, true) + if !e.Cancel { + t.Fatal("want should not override cancel") + } } diff --git a/message/pb/message.pb.go b/message/pb/message.pb.go index adf14da8..b64e3082 100644 --- a/message/pb/message.pb.go +++ b/message/pb/message.pb.go @@ -21,12 +21,64 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Message_BlockPresenceType int32 + +const ( + Message_Have Message_BlockPresenceType = 0 + Message_DontHave Message_BlockPresenceType = 1 +) + +var Message_BlockPresenceType_name = map[int32]string{ + 0: "Have", + 1: "DontHave", +} + +var Message_BlockPresenceType_value = map[string]int32{ + "Have": 0, + "DontHave": 1, +} + +func (x Message_BlockPresenceType) String() string { + return proto.EnumName(Message_BlockPresenceType_name, int32(x)) +} + +func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} +} + +type Message_Wantlist_WantType int32 + +const ( + Message_Wantlist_Block Message_Wantlist_WantType = 0 + Message_Wantlist_Have Message_Wantlist_WantType = 1 +) + +var Message_Wantlist_WantType_name = map[int32]string{ + 0: "Block", + 1: "Have", +} + +var Message_Wantlist_WantType_value = map[string]int32{ + "Block": 0, + "Have": 1, +} + +func (x Message_Wantlist_WantType) String() string { + return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) +} + +func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} +} type Message struct { - Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` - Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` + Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` + BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` + PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` } func (m *Message) Reset() { *m = Message{} } @@ -83,6 +135,20 @@ func (m *Message) GetPayload() []Message_Block { return nil } +func (m *Message) GetBlockPresences() []Message_BlockPresence { + if m != nil { + return m.BlockPresences + } + return nil +} + +func (m *Message) GetPendingBytes() int32 { + if m != nil { + return m.PendingBytes + } + return 0 +} + type Message_Wantlist struct { Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` @@ -136,9 +202,11 @@ func (m *Message_Wantlist) GetFull() bool { } type Message_Wantlist_Entry struct { - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` - Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` + SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` } func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } @@ -195,6 +263,20 @@ func (m *Message_Wantlist_Entry) GetCancel() bool { return false } +func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { + if m != nil { + return m.WantType + } + return Message_Wantlist_Block +} + +func (m *Message_Wantlist_Entry) GetSendDontHave() bool { + if m != nil { + return m.SendDontHave + } + return false +} + type Message_Block struct { Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -247,38 +329,103 @@ func (m *Message_Block) GetData() []byte { return nil } +type Message_BlockPresence struct { + Cid []byte `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` + Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` +} + +func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } +func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } +func (*Message_BlockPresence) ProtoMessage() {} +func (*Message_BlockPresence) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} +} +func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_BlockPresence.Merge(m, src) +} +func (m *Message_BlockPresence) XXX_Size() int { + return m.Size() +} +func (m *Message_BlockPresence) XXX_DiscardUnknown() { + xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo + +func (m *Message_BlockPresence) GetCid() []byte { + if m != nil { + return m.Cid + } + return nil +} + +func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { + if m != nil { + return m.Type + } + return Message_Have +} + func init() { + proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) + proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") + proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") } func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } var fileDescriptor_33c57e4bae7b9afd = []byte{ - // 335 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x4a, 0xf3, 0x40, - 0x14, 0xc5, 0x33, 0x4d, 0xd3, 0x86, 0xdb, 0x7e, 0x9b, 0xe1, 0x43, 0x86, 0x2c, 0x62, 0x14, 0x17, - 0x41, 0x70, 0x0a, 0xed, 0x13, 0x58, 0xd0, 0x85, 0xe0, 0xc2, 0x6c, 0x5c, 0x4f, 0xd2, 0x34, 0x0e, - 0xa6, 0x99, 0x90, 0x4c, 0xa9, 0x7d, 0x0b, 0x5f, 0xc1, 0x07, 0x71, 0xdf, 0x65, 0x97, 0xae, 0x44, - 0xda, 0x17, 0x91, 0xdc, 0x4e, 0xb3, 0x11, 0xc4, 0xdd, 0x3d, 0xc3, 0x39, 0xbf, 0xfb, 0x67, 0xe0, - 0xdf, 0x22, 0xad, 0x6b, 0x91, 0xa5, 0xbc, 0xac, 0x94, 0x56, 0x94, 0xc6, 0x52, 0xd7, 0x2b, 0x51, - 0xf2, 0xf6, 0x39, 0xf6, 0xae, 0x32, 0xa9, 0x9f, 0x96, 0x31, 0x4f, 0xd4, 0x62, 0x94, 0xa9, 0x4c, - 0x8d, 0xd0, 0x1a, 0x2f, 0xe7, 0xa8, 0x50, 0x60, 0x75, 0x40, 0x9c, 0xbf, 0xd9, 0xd0, 0xbf, 0x3f, - 0xa4, 0xe9, 0x2d, 0xb8, 0x2b, 0x51, 0xe8, 0x5c, 0xd6, 0x9a, 0x91, 0x80, 0x84, 0x83, 0xf1, 0x05, - 0xff, 0xd9, 0x81, 0x1b, 0x3b, 0x7f, 0x34, 0xde, 0x69, 0x77, 0xf3, 0x79, 0x6a, 0x45, 0x6d, 0x96, - 0x9e, 0x40, 0x2f, 0xce, 0x55, 0xf2, 0x5c, 0xb3, 0x4e, 0x60, 0x87, 0xc3, 0xc8, 0x28, 0x7a, 0x0d, - 0xfd, 0x52, 0xac, 0x73, 0x25, 0x66, 0xcc, 0x0e, 0xec, 0x70, 0x30, 0x3e, 0xfb, 0x0d, 0x3f, 0x6d, - 0x42, 0x86, 0x7d, 0xcc, 0x79, 0xef, 0x04, 0xdc, 0x63, 0x5f, 0x7a, 0x07, 0xfd, 0xb4, 0xd0, 0x95, - 0x4c, 0x6b, 0x46, 0x90, 0x77, 0xf9, 0x97, 0x71, 0xf9, 0x4d, 0xa1, 0xab, 0xf5, 0x11, 0x6c, 0x00, - 0x94, 0x42, 0x77, 0xbe, 0xcc, 0x73, 0xd6, 0x09, 0x48, 0xe8, 0x46, 0x58, 0x7b, 0x0f, 0xe0, 0xa0, - 0x97, 0xfe, 0x07, 0x07, 0x57, 0xc0, 0xab, 0x0c, 0xa3, 0x83, 0xa0, 0x1e, 0xb8, 0x65, 0x25, 0x55, - 0x25, 0xf5, 0x1a, 0x63, 0x4e, 0xd4, 0xea, 0xe6, 0x04, 0x89, 0x28, 0x92, 0x34, 0x67, 0x36, 0x02, - 0x8d, 0xf2, 0x26, 0xe0, 0xe0, 0x5e, 0x8d, 0xa1, 0xac, 0xd2, 0xb9, 0x7c, 0x31, 0x4c, 0xa3, 0x9a, - 0x39, 0x66, 0x42, 0x0b, 0x04, 0x0e, 0x23, 0xac, 0xa7, 0x6c, 0xb3, 0xf3, 0xc9, 0x76, 0xe7, 0x93, - 0xaf, 0x9d, 0x4f, 0x5e, 0xf7, 0xbe, 0xb5, 0xdd, 0xfb, 0xd6, 0xc7, 0xde, 0xb7, 0xe2, 0x1e, 0x7e, - 0xe2, 0xe4, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x1d, 0x6e, 0x21, 0x18, 0x02, 0x00, 0x00, + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6b, 0xd4, 0x50, + 0x14, 0xcd, 0x9b, 0x24, 0x9d, 0x78, 0x9b, 0x96, 0xf1, 0x21, 0xf2, 0xc8, 0x22, 0x8d, 0x83, 0x8b, + 0xa8, 0x34, 0x85, 0xe9, 0x2f, 0xe8, 0xa0, 0xa2, 0x82, 0x20, 0x41, 0x98, 0x75, 0x3e, 0xde, 0xc4, + 0x60, 0x9a, 0x84, 0xbc, 0x37, 0xd6, 0xfc, 0x0b, 0x7f, 0x92, 0xb8, 0xea, 0x4a, 0xba, 0x74, 0x25, + 0x32, 0xf3, 0x47, 0x24, 0x37, 0x2f, 0x81, 0xb1, 0x60, 0xbb, 0xbb, 0xe7, 0xbe, 0x7b, 0x4e, 0xee, + 0xb9, 0x87, 0xc0, 0xd1, 0x25, 0x17, 0x22, 0xca, 0x78, 0x50, 0x37, 0x95, 0xac, 0x28, 0x8d, 0x73, + 0x29, 0xae, 0xa2, 0x3a, 0x18, 0xdb, 0xb1, 0x73, 0x9a, 0xe5, 0xf2, 0xd3, 0x26, 0x0e, 0x92, 0xea, + 0xf2, 0x2c, 0xab, 0xb2, 0xea, 0x0c, 0x47, 0xe3, 0xcd, 0x1a, 0x11, 0x02, 0xac, 0x7a, 0x89, 0xf9, + 0x8f, 0x03, 0x98, 0xbe, 0xef, 0xd9, 0xf4, 0x35, 0x58, 0x57, 0x51, 0x29, 0x8b, 0x5c, 0x48, 0x46, + 0x3c, 0xe2, 0x1f, 0x2e, 0x9e, 0x06, 0xb7, 0xbf, 0x10, 0xa8, 0xf1, 0x60, 0xa5, 0x66, 0x97, 0xc6, + 0xf5, 0xef, 0x13, 0x2d, 0x1c, 0xb9, 0xf4, 0x31, 0x1c, 0xc4, 0x45, 0x95, 0x7c, 0x16, 0x6c, 0xe2, + 0xe9, 0xbe, 0x1d, 0x2a, 0x44, 0x2f, 0x60, 0x5a, 0x47, 0x6d, 0x51, 0x45, 0x29, 0xd3, 0x3d, 0xdd, + 0x3f, 0x5c, 0x3c, 0xf9, 0x9f, 0xfc, 0xb2, 0x23, 0x29, 0xed, 0x81, 0x47, 0x57, 0x70, 0x8c, 0x62, + 0x1f, 0x1a, 0x2e, 0x78, 0x99, 0x70, 0xc1, 0x0c, 0x54, 0x7a, 0x76, 0xa7, 0xd2, 0xc0, 0x50, 0x8a, + 0xff, 0xc8, 0xd0, 0x39, 0xd8, 0x35, 0x2f, 0xd3, 0xbc, 0xcc, 0x96, 0xad, 0xe4, 0x82, 0x99, 0x1e, + 0xf1, 0xcd, 0x70, 0xaf, 0xe7, 0xfc, 0x9c, 0x80, 0x35, 0x98, 0xa6, 0xef, 0x60, 0xca, 0x4b, 0xd9, + 0xe4, 0x5c, 0x30, 0x82, 0x2b, 0x3c, 0xbf, 0xcf, 0xad, 0x82, 0x57, 0xa5, 0x6c, 0xda, 0xc1, 0x95, + 0x12, 0xa0, 0x14, 0x8c, 0xf5, 0xa6, 0x28, 0xd8, 0xc4, 0x23, 0xbe, 0x15, 0x62, 0xed, 0x7c, 0x27, + 0x60, 0xe2, 0x30, 0x7d, 0x04, 0x26, 0x2e, 0x8b, 0x99, 0xd8, 0x61, 0x0f, 0xa8, 0x03, 0x56, 0xdd, + 0xe4, 0x55, 0x93, 0xcb, 0x16, 0x79, 0x66, 0x38, 0xe2, 0x2e, 0x80, 0x24, 0x2a, 0x13, 0x5e, 0x30, + 0x1d, 0x15, 0x15, 0xa2, 0x6f, 0xfb, 0x80, 0x3f, 0xb6, 0x35, 0x67, 0x86, 0x47, 0xfc, 0xe3, 0xc5, + 0xe9, 0xbd, 0x96, 0x5e, 0x29, 0x52, 0x38, 0xd2, 0xbb, 0x7b, 0x09, 0x5e, 0xa6, 0x2f, 0xab, 0x52, + 0xbe, 0x89, 0xbe, 0x70, 0xbc, 0x97, 0x15, 0xee, 0xf5, 0xe6, 0x27, 0xfd, 0xb9, 0x70, 0xfe, 0x01, + 0x98, 0x18, 0xc3, 0x4c, 0xa3, 0x16, 0x18, 0xdd, 0xf3, 0x8c, 0x38, 0xe7, 0xaa, 0xd9, 0x2d, 0x5c, + 0x37, 0x7c, 0x9d, 0x7f, 0x55, 0x1e, 0x15, 0xea, 0x0e, 0x93, 0x46, 0x32, 0x42, 0x83, 0x76, 0x88, + 0xb5, 0x93, 0xc2, 0xd1, 0x5e, 0xa0, 0x74, 0x06, 0x7a, 0x92, 0xa7, 0x8a, 0xd9, 0x95, 0xf4, 0x02, + 0x0c, 0xd9, 0x79, 0x9c, 0xdc, 0xed, 0x71, 0x4f, 0x0a, 0x3d, 0x22, 0x75, 0xfe, 0x02, 0x1e, 0xde, + 0x7a, 0x1a, 0x37, 0xd7, 0xa8, 0x0d, 0xd6, 0x60, 0x73, 0x46, 0x96, 0xec, 0x7a, 0xeb, 0x92, 0x9b, + 0xad, 0x4b, 0xfe, 0x6c, 0x5d, 0xf2, 0x6d, 0xe7, 0x6a, 0x37, 0x3b, 0x57, 0xfb, 0xb5, 0x73, 0xb5, + 0xf8, 0x00, 0xff, 0xb2, 0xf3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0xa9, 0xf7, 0xab, 0xb9, + 0x03, 0x00, 0x00, } func (m *Message) Marshal() (dAtA []byte, err error) { @@ -301,6 +448,25 @@ func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PendingBytes != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) + i-- + dAtA[i] = 0x28 + } + if len(m.BlockPresences) > 0 { + for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Payload) > 0 { for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { { @@ -404,6 +570,21 @@ func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.SendDontHave { + i-- + if m.SendDontHave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.WantType != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) + i-- + dAtA[i] = 0x20 + } if m.Cancel { i-- if m.Cancel { @@ -466,6 +647,41 @@ func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if len(m.Cid) > 0 { + i -= len(m.Cid) + copy(dAtA[i:], m.Cid) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Cid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { offset -= sovMessage(v) base := offset @@ -497,6 +713,15 @@ func (m *Message) Size() (n int) { n += 1 + l + sovMessage(uint64(l)) } } + if len(m.BlockPresences) > 0 { + for _, e := range m.BlockPresences { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.PendingBytes != 0 { + n += 1 + sovMessage(uint64(m.PendingBytes)) + } return n } @@ -534,6 +759,12 @@ func (m *Message_Wantlist_Entry) Size() (n int) { if m.Cancel { n += 2 } + if m.WantType != 0 { + n += 1 + sovMessage(uint64(m.WantType)) + } + if m.SendDontHave { + n += 2 + } return n } @@ -554,6 +785,22 @@ func (m *Message_Block) Size() (n int) { return n } +func (m *Message_BlockPresence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cid) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovMessage(uint64(m.Type)) + } + return n +} + func sovMessage(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -688,6 +935,59 @@ func (m *Message) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) + if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) + } + m.PendingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PendingBytes |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) @@ -921,6 +1221,45 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } } m.Cancel = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) + } + m.WantType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SendDontHave = bool(v != 0) default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) @@ -1066,10 +1405,115 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { } return nil } +func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Message_BlockPresenceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipMessage(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1101,8 +1545,10 @@ func skipMessage(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1123,30 +1569,55 @@ func skipMessage(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthMessage } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMessage + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMessage(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") ) diff --git a/message/pb/message.proto b/message/pb/message.proto index 102b3431..f7afdb1f 100644 --- a/message/pb/message.proto +++ b/message/pb/message.proto @@ -7,11 +7,17 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; message Message { message Wantlist { + enum WantType { + Block = 0; + Have = 1; + } message Entry { bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) int32 priority = 2; // the priority (normalized). default to 1 bool cancel = 3; // whether this revokes an entry + WantType wantType = 4; // Note: defaults to enum 0, ie Block + bool sendDontHave = 5; // Note: defaults to false } repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries @@ -23,7 +29,18 @@ message Message { bytes data = 2; } + enum BlockPresenceType { + Have = 0; + DontHave = 1; + } + message BlockPresence { + bytes cid = 1; + BlockPresenceType type = 2; + } + Wantlist wantlist = 1 [(gogoproto.nullable) = false]; repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 + repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; + int32 pendingBytes = 5; } diff --git a/messagequeue/messagequeue.go b/messagequeue/messagequeue.go index 601a7074..b8caad57 100644 --- a/messagequeue/messagequeue.go +++ b/messagequeue/messagequeue.go @@ -2,12 +2,17 @@ package messagequeue import ( "context" + "math" "sync" "time" + debounce "github.com/bep/debounce" + bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" - wantlist "github.com/ipfs/go-bitswap/wantlist" + bswl "github.com/ipfs/go-bitswap/wantlist" + cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -16,7 +21,18 @@ var log = logging.Logger("bitswap") const ( defaultRebroadcastInterval = 30 * time.Second - maxRetries = 10 + // maxRetries is the number of times to attempt to send a message before + // giving up + maxRetries = 10 + // maxMessageSize is the maximum message size in bytes + maxMessageSize = 1024 * 1024 * 2 + // sendErrorBackoff is the time to wait before retrying to connect after + // an error when trying to send a message + sendErrorBackoff = 100 * time.Millisecond + // maxPriority is the max priority as defined by the bitswap protocol + maxPriority = math.MaxInt32 + // sendMessageDebounce is the debounce duration when calling sendMessage() + sendMessageDebounce = time.Millisecond ) // MessageNetwork is any network that can connect peers and generate a message @@ -24,55 +40,168 @@ const ( type MessageNetwork interface { ConnectTo(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) + Self() peer.ID } // MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { - ctx context.Context - p peer.ID - network MessageNetwork - - outgoingWork chan struct{} - done chan struct{} - - // do not touch out of run loop - wl *wantlist.SessionTrackedWantlist - nextMessage bsmsg.BitSwapMessage - nextMessageLk sync.RWMutex + ctx context.Context + p peer.ID + network MessageNetwork + maxMessageSize int + sendErrorBackoff time.Duration + + signalWorkReady func() + outgoingWork chan struct{} + done chan struct{} + + // Take lock whenever any of these variables are modified + wllock sync.Mutex + bcstWants recallWantlist + peerWants recallWantlist + cancels *cid.Set + priority int + + // Dont touch any of these variables outside of run loop sender bsnet.MessageSender rebroadcastIntervalLk sync.RWMutex rebroadcastInterval time.Duration rebroadcastTimer *time.Timer } +// recallWantlist keeps a list of pending wants, and a list of all wants that +// have ever been requested +type recallWantlist struct { + // The list of all wants that have been requested, including wants that + // have been sent and wants that have not yet been sent + allWants *bswl.Wantlist + // The list of wants that have not yet been sent + pending *bswl.Wantlist +} + +func newRecallWantList() recallWantlist { + return recallWantlist{ + allWants: bswl.New(), + pending: bswl.New(), + } +} + +// Add want to both the pending list and the list of all wants +func (r *recallWantlist) Add(c cid.Cid, priority int, wtype pb.Message_Wantlist_WantType) { + r.allWants.Add(c, priority, wtype) + r.pending.Add(c, priority, wtype) +} + +// Remove wants from both the pending list and the list of all wants +func (r *recallWantlist) Remove(c cid.Cid) { + r.allWants.Remove(c) + r.pending.Remove(c) +} + +// Remove wants by type from both the pending list and the list of all wants +func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { + r.allWants.RemoveType(c, wtype) + r.pending.RemoveType(c, wtype) +} + // New creats a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { - return &MessageQueue{ + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff) +} + +// This constructor is used by the tests +func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, maxMsgSize int, sendErrorBackoff time.Duration) *MessageQueue { + mq := &MessageQueue{ ctx: ctx, - wl: wantlist.NewSessionTrackedWantlist(), - network: network, p: p, + network: network, + maxMessageSize: maxMsgSize, + bcstWants: newRecallWantList(), + peerWants: newRecallWantList(), + cancels: cid.NewSet(), outgoingWork: make(chan struct{}, 1), done: make(chan struct{}), rebroadcastInterval: defaultRebroadcastInterval, + sendErrorBackoff: sendErrorBackoff, + priority: maxPriority, } + + // Apply debounce to the work ready signal (which triggers sending a message) + debounced := debounce.New(sendMessageDebounce) + mq.signalWorkReady = func() { debounced(mq.onWorkReady) } + + return mq } -// AddMessage adds new entries to an outgoing message for a given session. -func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) { - if !mq.addEntries(entries, ses) { +// Add want-haves that are part of a broadcast to all connected peers +func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { + if len(wantHaves) == 0 { return } - select { - case mq.outgoingWork <- struct{}{}: - default: + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) } + + // Schedule a message send + mq.signalWorkReady() } -// AddWantlist adds a complete session tracked want list to a message queue -func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) { - initialWants.CopyWants(mq.wl) - mq.addWantlist() +// Add want-haves and want-blocks for the peer for this message queue. +func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + if len(wantBlocks) == 0 && len(wantHaves) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + for _, c := range wantBlocks { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) + mq.priority-- + + // We're adding a want-block for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + + // Schedule a message send + mq.signalWorkReady() +} + +// Add cancel messages for the given keys. +func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { + if len(cancelKs) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range cancelKs { + mq.bcstWants.Remove(c) + mq.peerWants.Remove(c) + mq.cancels.Add(c) + } + + // Schedule a message send + mq.signalWorkReady() } // SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist @@ -85,8 +214,7 @@ func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { mq.rebroadcastIntervalLk.Unlock() } -// Startup starts the processing of messages, and creates an initial message -// based on the given initial wantlist. +// Startup starts the processing of messages and rebroadcasting. func (mq *MessageQueue) Startup() { mq.rebroadcastIntervalLk.RLock() mq.rebroadcastTimer = time.NewTimer(mq.rebroadcastInterval) @@ -105,7 +233,7 @@ func (mq *MessageQueue) runQueue() { case <-mq.rebroadcastTimer.C: mq.rebroadcastWantlist() case <-mq.outgoingWork: - mq.sendMessage() + mq.sendIfReady() case <-mq.done: if mq.sender != nil { mq.sender.Close() @@ -120,87 +248,178 @@ func (mq *MessageQueue) runQueue() { } } -func (mq *MessageQueue) addWantlist() { - - mq.nextMessageLk.Lock() - defer mq.nextMessageLk.Unlock() - - if mq.wl.Len() > 0 { - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } - for _, e := range mq.wl.Entries() { - mq.nextMessage.AddEntry(e.Cid, e.Priority) - } - select { - case mq.outgoingWork <- struct{}{}: - default: - } - } -} - +// Periodically resend the list of wants to the peer func (mq *MessageQueue) rebroadcastWantlist() { mq.rebroadcastIntervalLk.RLock() mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) mq.rebroadcastIntervalLk.RUnlock() - mq.addWantlist() + // If some wants were transferred from the rebroadcast list + if mq.transferRebroadcastWants() { + // Send them out + mq.sendMessage() + } } -func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) bool { - var work bool - mq.nextMessageLk.Lock() - defer mq.nextMessageLk.Unlock() - // if we have no message held allocate a new one - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } +// Transfer wants from the rebroadcast lists into the pending lists. +func (mq *MessageQueue) transferRebroadcastWants() bool { + mq.wllock.Lock() + defer mq.wllock.Unlock() - for _, e := range entries { - if e.Cancel { - if mq.wl.Remove(e.Cid, ses) { - work = true - mq.nextMessage.Cancel(e.Cid) - } - } else { - if mq.wl.Add(e.Cid, e.Priority, ses) { - work = true - mq.nextMessage.AddEntry(e.Cid, e.Priority) - } - } + // Check if there are any wants to rebroadcast + if mq.bcstWants.allWants.Len() == 0 && mq.peerWants.allWants.Len() == 0 { + return false } - return work + + // Copy all wants into pending wants lists + mq.bcstWants.pending.Absorb(mq.bcstWants.allWants) + mq.peerWants.pending.Absorb(mq.peerWants.allWants) + + return true } -func (mq *MessageQueue) extractOutgoingMessage() bsmsg.BitSwapMessage { - // grab outgoing message - mq.nextMessageLk.Lock() - message := mq.nextMessage - mq.nextMessage = nil - mq.nextMessageLk.Unlock() - return message +func (mq *MessageQueue) onWorkReady() { + select { + case mq.outgoingWork <- struct{}{}: + default: + } } -func (mq *MessageQueue) sendMessage() { - message := mq.extractOutgoingMessage() - if message == nil || message.Empty() { - return +func (mq *MessageQueue) sendIfReady() { + if mq.hasPendingWork() { + mq.sendMessage() } +} +func (mq *MessageQueue) sendMessage() { err := mq.initializeSender() if err != nil { log.Infof("cant open message sender to peer %s: %s", mq.p, err) // TODO: cant connect, what now? + // TODO: should we stop using this connection and clear the want list + // to avoid using up memory? return } - for i := 0; i < maxRetries; i++ { // try to send this message until we fail. + // Convert want lists to a Bitswap Message + message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + if message == nil || message.Empty() { + return + } + + // mq.logOutgoingMessage(message) + + // Try to send this message repeatedly + for i := 0; i < maxRetries; i++ { if mq.attemptSendAndRecovery(message) { + // We were able to send successfully. + onSent() + + // If the message was too big and only a subset of wants could be + // sent, schedule sending the rest of the wants in the next + // iteration of the event loop. + if mq.hasPendingWork() { + mq.signalWorkReady() + } + return } } } +// func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { +// entries := msg.Wantlist() +// for _, e := range entries { +// if e.Cancel { +// if e.WantType == pb.Message_Wantlist_Have { +// log.Debugf("send %s->%s: cancel-have %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } else { +// log.Debugf("send %s->%s: cancel-block %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } +// } else { +// if e.WantType == pb.Message_Wantlist_Have { +// log.Debugf("send %s->%s: want-have %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } else { +// log.Debugf("send %s->%s: want-block %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } +// } +// } +// } + +func (mq *MessageQueue) hasPendingWork() bool { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + return mq.bcstWants.pending.Len() > 0 || mq.peerWants.pending.Len() > 0 || mq.cancels.Len() > 0 +} + +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { + // Create a new message + msg := bsmsg.New(false) + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + // Get broadcast and regular wantlist entries + bcstEntries := mq.bcstWants.pending.SortedEntries() + peerEntries := mq.peerWants.pending.SortedEntries() + + // Size of the message so far + msgSize := 0 + + // Add each broadcast want-have to the message + for i := 0; i < len(bcstEntries) && msgSize < mq.maxMessageSize; i++ { + // Broadcast wants are sent as want-have + wantType := pb.Message_Wantlist_Have + + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // send a want-block instead + if !supportsHave { + wantType = pb.Message_Wantlist_Block + } + + e := bcstEntries[i] + msgSize += msg.AddEntry(e.Cid, e.Priority, wantType, false) + } + + // Add each regular want-have / want-block to the message + for i := 0; i < len(peerEntries) && msgSize < mq.maxMessageSize; i++ { + e := peerEntries[i] + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // don't send want-haves (only send want-blocks) + if !supportsHave && e.WantType == pb.Message_Wantlist_Have { + mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) + } else { + msgSize += msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + } + } + + // Add each cancel to the message + cancels := mq.cancels.Keys() + for i := 0; i < len(cancels) && msgSize < mq.maxMessageSize; i++ { + c := cancels[i] + + msgSize += msg.Cancel(c) + + // Clear the cancel - we make a best effort to let peers know about + // cancels but won't save them to resend if there's a failure. + mq.cancels.Remove(c) + } + + // Called when the message has been successfully sent. + // Remove the sent keys from the broadcast and regular wantlists. + onSent := func() { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, e := range msg.Wantlist() { + mq.bcstWants.pending.Remove(e.Cid) + mq.peerWants.pending.RemoveType(e.Cid, e.WantType) + } + } + + return msg, onSent +} func (mq *MessageQueue) initializeSender() error { if mq.sender != nil { return nil @@ -228,18 +447,14 @@ func (mq *MessageQueue) attemptSendAndRecovery(message bsmsg.BitSwapMessage) boo return true case <-mq.ctx.Done(): return true - case <-time.After(time.Millisecond * 100): - // wait 100ms in case disconnect notifications are still propogating + case <-time.After(mq.sendErrorBackoff): + // wait 100ms in case disconnect notifications are still propagating log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") } err = mq.initializeSender() if err != nil { log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) - // TODO(why): what do we do now? - // I think the *right* answer is to probably put the message we're - // trying to send back, and then return to waiting for new work or - // a disconnect. return true } diff --git a/messagequeue/messagequeue_test.go b/messagequeue/messagequeue_test.go index e9d09b93..6ce146f9 100644 --- a/messagequeue/messagequeue_test.go +++ b/messagequeue/messagequeue_test.go @@ -2,12 +2,16 @@ package messagequeue import ( "context" + "errors" "testing" "time" + "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -29,19 +33,28 @@ func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet return nil, fmn.messageSenderError } +func (fms *fakeMessageNetwork) Self() peer.ID { return "" } + type fakeMessageSender struct { sendError error fullClosed chan<- struct{} reset chan<- struct{} messagesSent chan<- bsmsg.BitSwapMessage + sendErrors chan<- error + supportsHave bool } func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + if fms.sendError != nil { + fms.sendErrors <- fms.sendError + return fms.sendError + } fms.messagesSent <- msg - return fms.sendError + return nil } -func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } -func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } +func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } +func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } +func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } func collectMessages(ctx context.Context, t *testing.T, @@ -71,24 +84,24 @@ func totalEntriesLength(messages []bsmsg.BitSwapMessage) int { func TestStartupAndShutdown(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses := testutil.GenerateSessionID() - wl := testutil.GenerateWantlist(10, ses) + bcstwh := testutil.GenerateCids(10) messageQueue.Startup() - messageQueue.AddWantlist(wl) + messageQueue.AddBroadcastWantHaves(bcstwh) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { - t.Fatal("wrong number of messages were sent for initial wants") + t.Fatal("wrong number of messages were sent for broadcast want-haves") } firstMessage := messages[0] - if len(firstMessage.Wantlist()) != wl.Len() { + if len(firstMessage.Wantlist()) != len(bcstwh) { t.Fatal("did not add all wants to want list") } for _, entry := range firstMessage.Wantlist() { @@ -113,22 +126,22 @@ func TestStartupAndShutdown(t *testing.T) { func TestSendingMessagesDeduped(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses1 := testutil.GenerateSessionID() - ses2 := testutil.GenerateSessionID() - entries := testutil.GenerateMessageEntries(10, false) - messageQueue.Startup() + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) - messageQueue.AddMessage(entries, ses1) - messageQueue.AddMessage(entries, ses2) + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddWants(wantBlocks, wantHaves) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - if totalEntriesLength(messages) != len(entries) { + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("Messages were not deduped") } } @@ -136,62 +149,448 @@ func TestSendingMessagesDeduped(t *testing.T) { func TestSendingMessagesPartialDupe(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses1 := testutil.GenerateSessionID() - ses2 := testutil.GenerateSessionID() - entries := testutil.GenerateMessageEntries(10, false) - moreEntries := testutil.GenerateMessageEntries(5, false) - secondEntries := append(entries[5:], moreEntries...) - messageQueue.Startup() + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) - messageQueue.AddMessage(entries, ses1) - messageQueue.AddMessage(secondEntries, ses2) + messageQueue.Startup() + messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) + messageQueue.AddWants(wantBlocks[3:], wantHaves[3:]) messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) - if totalEntriesLength(messages) != len(entries)+len(moreEntries) { + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("messages were not correctly deduped") } +} + +func TestSendingMessagesPriority(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + wantHaves1 := testutil.GenerateCids(5) + wantHaves2 := testutil.GenerateCids(5) + wantHaves := append(wantHaves1, wantHaves2...) + wantBlocks1 := testutil.GenerateCids(5) + wantBlocks2 := testutil.GenerateCids(5) + wantBlocks := append(wantBlocks1, wantBlocks2...) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks1, wantHaves1) + messageQueue.AddWants(wantBlocks2, wantHaves2) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + byCid := make(map[cid.Cid]message.Entry) + for _, entry := range messages[0].Wantlist() { + byCid[entry.Cid] = entry + } + + // Check that earliest want-haves have highest priority + for i := range wantHaves { + if i > 0 { + if byCid[wantHaves[i]].Priority > byCid[wantHaves[i-1]].Priority { + t.Fatal("earliest want-haves should have higher priority") + } + } + } + + // Check that earliest want-blocks have highest priority + for i := range wantBlocks { + if i > 0 { + if byCid[wantBlocks[i]].Priority > byCid[wantBlocks[i-1]].Priority { + t.Fatal("earliest want-blocks should have higher priority") + } + } + } + + // Check that want-haves have higher priority than want-blocks within + // same group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantBlocks[0]].Priority { + t.Fatal("want-haves should have higher priority than want-blocks") + } + } + } + // Check that all items in first group have higher priority than first item + // in second group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantHaves2[0]].Priority { + t.Fatal("items in first group should have higher priority than items in second group") + } + } + } } -func TestWantlistRebroadcast(t *testing.T) { +func TestCancelOverridesPendingWants(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + wantHaves := testutil.GenerateCids(2) + wantBlocks := testutil.GenerateCids(2) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddCancels([]cid.Cid{wantBlocks[0], wantHaves[0]}) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("Wrong message count") + } + wb, wh, cl := filterWantTypes(messages[0].Wantlist()) + if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 1 || !wh[0].Equals(wantHaves[1]) { + t.Fatal("Expected 1 want-have") + } + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") + } +} + +func TestWantOverridesPendingCancels(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses := testutil.GenerateSessionID() - wl := testutil.GenerateWantlist(10, ses) + cancels := testutil.GenerateCids(3) messageQueue.Startup() - messageQueue.AddWantlist(wl) + messageQueue.AddCancels(cancels) + messageQueue.AddWants([]cid.Cid{cancels[0]}, []cid.Cid{cancels[1]}) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(cancels) { + t.Fatal("Wrong message count") + } + + wb, wh, cl := filterWantTypes(messages[0].Wantlist()) + if len(wb) != 1 || !wb[0].Equals(cancels[0]) { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 1 || !wh[0].Equals(cancels[1]) { + t.Fatal("Expected 1 want-have") + } + if len(cl) != 1 || !cl[0].Equals(cancels[2]) { + t.Fatal("Expected 1 cancel") + } +} + +func TestWantlistRebroadcast(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + bcstwh := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + // Add some broadcast want-haves + messageQueue.Startup() + messageQueue.AddBroadcastWantHaves(bcstwh) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent for initial wants") } + // All broadcast want-haves should have been sent + firstMessage := messages[0] + if len(firstMessage.Wantlist()) != len(bcstwh) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms messageQueue.SetRebroadcastInterval(5 * time.Millisecond) messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were rebroadcast") } - firstMessage := messages[0] - if len(firstMessage.Wantlist()) != wl.Len() { - t.Fatal("did not add all wants to want list") + // All the want-haves should have been rebroadcast + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != len(bcstwh) { + t.Fatal("did not rebroadcast all wants") + } + + // Tell message queue to rebroadcast after a long time (so it doesn't + // interfere with the next message collection), then send out some + // regular wants and collect them + messageQueue.SetRebroadcastInterval(1 * time.Second) + messageQueue.AddWants(wantBlocks, wantHaves) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // All new wants should have been sent + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + firstMessage = messages[0] + + // Both original and new wants should have been rebroadcast + totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) + if len(firstMessage.Wantlist()) != totalWants { + t.Fatal("did not rebroadcast all wants") + } + + // Cancel some of the wants + messageQueue.SetRebroadcastInterval(1 * time.Second) + cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) + messageQueue.AddCancels(cancels) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // Cancels for each want should have been sent + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != len(cancels) { + t.Fatal("wrong number of cancels") } for _, entry := range firstMessage.Wantlist() { - if entry.Cancel { - t.Fatal("initial add sent cancel entry when it should not have") + if !entry.Cancel { + t.Fatal("expected cancels") + } + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != totalWants-len(cancels) { + t.Fatal("did not rebroadcast all wants") + } +} + +func TestSendingLargeMessages(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + wantBlocks := testutil.GenerateCids(10) + entrySize := 44 + maxMsgSize := entrySize * 3 // 3 wants + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, []cid.Cid{}) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if + // we send 10 want-blocks we should expect 4 messages: + // [***] [***] [***] [*] + if len(messages) != 4 { + t.Fatal("expected 4 messages to be sent, got", len(messages)) + } + if totalEntriesLength(messages) != len(wantBlocks) { + t.Fatal("wrong number of wants") + } +} + +func TestSendToPeerThatDoesntSupportHave(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, false} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := New(ctx, peerID, fakenet) + messageQueue.Startup() + + // If the remote peer doesn't support HAVE / DONT_HAVE messages + // - want-blocks should be sent normally + // - want-haves should not be sent + // - broadcast want-haves should be sent as want-blocks + + // Check broadcast want-haves + bcwh := testutil.GenerateCids(10) + messageQueue.AddBroadcastWantHaves(bcwh) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl := messages[0].Wantlist() + if len(wl) != len(bcwh) { + t.Fatal("wrong number of entries in wantlist", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("broadcast want-haves should be sent as want-blocks") + } + } + + // Check regular want-haves and want-blocks + wbs := testutil.GenerateCids(10) + whs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, whs) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl = messages[0].Wantlist() + if len(wl) != len(wbs) { + t.Fatal("should only send want-blocks (no want-haves)", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("should only send want-blocks") + } + } +} + +func TestResendAfterError(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + sendErrBackoff := 5 * time.Millisecond + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff) + wantBlocks := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + + messageQueue.Startup() + + var errs []error + go func() { + // After the first error is received, clear sendError so that + // subsequent sends will not error + errs = append(errs, <-sendErrors) + fakeSender.sendError = nil + }() + + // Make the first send error out + fakeSender.sendError = errors.New("send err") + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(errs) != 1 { + t.Fatal("Expected first send to error") + } + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("Expected subsequent send to succeed") + } +} + +func TestResendAfterMaxRetries(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, maxRetries*2) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + sendErrBackoff := 2 * time.Millisecond + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff) + wantBlocks := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + wantBlocks2 := testutil.GenerateCids(10) + wantHaves2 := testutil.GenerateCids(10) + + messageQueue.Startup() + + var errs []error + go func() { + for len(errs) < maxRetries { + err := <-sendErrors + errs = append(errs, err) + } + }() + + // Make the first group of send attempts error out + fakeSender.sendError = errors.New("send err") + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 50*time.Millisecond) + + if len(errs) != maxRetries { + t.Fatal("Expected maxRetries errors, got", len(errs)) + } + + // No successful send after max retries, so expect no messages sent + if totalEntriesLength(messages) != 0 { + t.Fatal("Expected no messages") + } + + // Clear sendError so that subsequent sends will not error + fakeSender.sendError = nil + + // Add a new batch of wants + messageQueue.AddWants(wantBlocks2, wantHaves2) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // All wants from previous and new send should be sent + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)+len(wantHaves2)+len(wantBlocks2) { + t.Fatal("Expected subsequent send to send first and second batches of wants") + } +} + +func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { + var wbs []cid.Cid + var whs []cid.Cid + var cls []cid.Cid + for _, e := range wantlist { + if e.Cancel { + cls = append(cls, e.Cid) + } else if e.WantType == pb.Message_Wantlist_Block { + wbs = append(wbs, e.Cid) + } else { + whs = append(whs, e.Cid) } } + return wbs, whs, cls } diff --git a/network/interface.go b/network/interface.go index 783e29e9..704d851f 100644 --- a/network/interface.go +++ b/network/interface.go @@ -13,18 +13,19 @@ import ( ) var ( - // ProtocolBitswapOne is the prefix for the legacy bitswap protocol - ProtocolBitswapOne protocol.ID = "/ipfs/bitswap/1.0.0" // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" - - // ProtocolBitswap is the current version of bitswap protocol, 1.1.0 - ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.1.0" + // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol + ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" + // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" + // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 + ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" ) // BitSwapNetwork provides network connectivity for BitSwap sessions. type BitSwapNetwork interface { - + Self() peer.ID // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, @@ -36,6 +37,7 @@ type BitSwapNetwork interface { SetDelegate(Receiver) ConnectTo(context.Context, peer.ID) error + DisconnectFrom(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (MessageSender, error) @@ -52,6 +54,8 @@ type MessageSender interface { SendMsg(context.Context, bsmsg.BitSwapMessage) error Close() error Reset() error + // Indicates whether the remote peer supports HAVE / DONT_HAVE messages + SupportsHave() bool } // Receiver is an interface that can receive messages from the BitSwapNetwork. diff --git a/network/ipfs_impl.go b/network/ipfs_impl.go index 036d1532..2a25b7a0 100644 --- a/network/ipfs_impl.go +++ b/network/ipfs_impl.go @@ -29,31 +29,52 @@ var sendMessageTimeout = time.Minute * 10 // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { - s := Settings{} - for _, opt := range opts { - opt(&s) - } + s := processSettings(opts...) bitswapNetwork := impl{ host: host, routing: r, - protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, - protocolBitswapOne: s.ProtocolPrefix + ProtocolBitswapOne, - protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, + protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, + protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, + protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, + protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, + + supportedProtocols: s.SupportedProtocols, } return &bitswapNetwork } +func processSettings(opts ...NetOpt) Settings { + s := Settings{ + SupportedProtocols: []protocol.ID{ + ProtocolBitswap, + ProtocolBitswapOneOne, + ProtocolBitswapOneZero, + ProtocolBitswapNoVers, + }, + } + for _, opt := range opts { + opt(&s) + } + for i, proto := range s.SupportedProtocols { + s.SupportedProtocols[i] = s.ProtocolPrefix + proto + } + return s +} + // impl transforms the ipfs network interface, which sends and receives // NetMessage objects, into the bitswap network interface. type impl struct { host host.Host routing routing.ContentRouting - protocolBitswap protocol.ID - protocolBitswapOne protocol.ID - protocolBitswapNoVers protocol.ID + protocolBitswapNoVers protocol.ID + protocolBitswapOneZero protocol.ID + protocolBitswapOneOne protocol.ID + protocolBitswap protocol.ID + + supportedProtocols []protocol.ID // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -78,6 +99,23 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess return s.bsnet.msgToStream(ctx, s.s, msg) } +func (s *streamMessageSender) SupportsHave() bool { + return s.bsnet.SupportsHave(s.s.Protocol()) +} + +func (bsnet *impl) Self() peer.ID { + return bsnet.host.ID() +} + +// Indicates whether the given protocol supports HAVE / DONT_HAVE messages +func (bsnet *impl) SupportsHave(proto protocol.ID) bool { + switch proto { + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: + return false + } + return true +} + func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage) error { deadline := time.Now().Add(sendMessageTimeout) if dl, ok := ctx.Deadline(); ok { @@ -88,13 +126,16 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. log.Warningf("error setting deadline: %s", err) } + // Older Bitswap versions use a slightly different wire format so we need + // to convert the message to the appropriate format depending on the remote + // peer's Bitswap version. switch s.Protocol() { - case bsnet.protocolBitswap: + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: if err := msg.ToNetV1(s); err != nil { log.Debugf("error: %s", err) return err } - case bsnet.protocolBitswapOne, bsnet.protocolBitswapNoVers: + case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: if err := msg.ToNetV0(s); err != nil { log.Debugf("error: %s", err) return err @@ -119,7 +160,7 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSend } func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { - return bsnet.host.NewStream(ctx, p, bsnet.protocolBitswap, bsnet.protocolBitswapOne, bsnet.protocolBitswapNoVers) + return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) } func (bsnet *impl) SendMessage( @@ -147,9 +188,9 @@ func (bsnet *impl) SendMessage( func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r - bsnet.host.SetStreamHandler(bsnet.protocolBitswap, bsnet.handleNewStream) - bsnet.host.SetStreamHandler(bsnet.protocolBitswapOne, bsnet.handleNewStream) - bsnet.host.SetStreamHandler(bsnet.protocolBitswapNoVers, bsnet.handleNewStream) + for _, proto := range bsnet.supportedProtocols { + bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) + } bsnet.host.Network().Notify((*netNotifiee)(bsnet)) // TODO: StopNotify. @@ -159,6 +200,10 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) } +func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { + panic("Not implemented: DisconnectFrom() is only used by tests") +} + // FindProvidersAsync returns a channel of providers for the given key. func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { out := make(chan peer.ID, max) @@ -234,12 +279,10 @@ func (nn *netNotifiee) impl() *impl { func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { nn.impl().receiver.PeerConnected(v.RemotePeer()) } - func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { nn.impl().receiver.PeerDisconnected(v.RemotePeer()) } - -func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) {} +func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} diff --git a/network/ipfs_impl_test.go b/network/ipfs_impl_test.go index cbcc4fec..beecf09c 100644 --- a/network/ipfs_impl_test.go +++ b/network/ipfs_impl_test.go @@ -6,12 +6,15 @@ import ( "time" bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" + bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/testnet" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p-core/protocol" + tnet "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ) @@ -24,6 +27,14 @@ type receiver struct { lastSender peer.ID } +func newReceiver() *receiver { + return &receiver{ + peers: make(map[peer.ID]struct{}), + messageReceived: make(chan struct{}), + connectionEvent: make(chan struct{}, 1), + } +} + func (r *receiver) ReceiveMessage( ctx context.Context, sender peer.ID, @@ -48,6 +59,7 @@ func (r *receiver) PeerDisconnected(p peer.ID) { delete(r.peers, p) r.connectionEvent <- struct{}{} } + func TestMessageSendAndReceive(t *testing.T) { // create network ctx := context.Background() @@ -64,16 +76,8 @@ func TestMessageSendAndReceive(t *testing.T) { bsnet1 := streamNet.Adapter(p1) bsnet2 := streamNet.Adapter(p2) - r1 := &receiver{ - peers: make(map[peer.ID]struct{}), - messageReceived: make(chan struct{}), - connectionEvent: make(chan struct{}, 1), - } - r2 := &receiver{ - peers: make(map[peer.ID]struct{}), - messageReceived: make(chan struct{}), - connectionEvent: make(chan struct{}, 1), - } + r1 := newReceiver() + r2 := newReceiver() bsnet1.SetDelegate(r1) bsnet2.SetDelegate(r2) @@ -109,7 +113,7 @@ func TestMessageSendAndReceive(t *testing.T) { block1 := blockGenerator.Next() block2 := blockGenerator.Next() sent := bsmsg.New(false) - sent.AddEntry(block1.Cid(), 1) + sent.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) sent.AddBlock(block2) err = bsnet1.SendMessage(ctx, p2.ID(), sent) @@ -159,3 +163,49 @@ func TestMessageSendAndReceive(t *testing.T) { t.Fatal("Sent message blocks did not match received message blocks") } } + +func TestSupportsHave(t *testing.T) { + ctx := context.Background() + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + + type testCase struct { + proto protocol.ID + expSupportsHave bool + } + + testCases := []testCase{ + testCase{bsnet.ProtocolBitswap, true}, + testCase{bsnet.ProtocolBitswapOneOne, false}, + testCase{bsnet.ProtocolBitswapOneZero, false}, + testCase{bsnet.ProtocolBitswapNoVers, false}, + } + + for _, tc := range testCases { + p1 := tnet.RandIdentityOrFatal(t) + bsnet1 := streamNet.Adapter(p1) + bsnet1.SetDelegate(newReceiver()) + + p2 := tnet.RandIdentityOrFatal(t) + bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) + bsnet2.SetDelegate(newReceiver()) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + + if senderCurrent.SupportsHave() != tc.expSupportsHave { + t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) + } + } +} diff --git a/network/options.go b/network/options.go index 38bb63d1..1df8963a 100644 --- a/network/options.go +++ b/network/options.go @@ -5,7 +5,8 @@ import "github.com/libp2p/go-libp2p-core/protocol" type NetOpt func(*Settings) type Settings struct { - ProtocolPrefix protocol.ID + ProtocolPrefix protocol.ID + SupportedProtocols []protocol.ID } func Prefix(prefix protocol.ID) NetOpt { @@ -13,3 +14,9 @@ func Prefix(prefix protocol.ID) NetOpt { settings.ProtocolPrefix = prefix } } + +func SupportedProtocols(protos []protocol.ID) NetOpt { + return func(settings *Settings) { + settings.SupportedProtocols = protos + } +} diff --git a/peermanager/peermanager.go b/peermanager/peermanager.go index 18fc56b7..ddd59399 100644 --- a/peermanager/peermanager.go +++ b/peermanager/peermanager.go @@ -2,21 +2,28 @@ package peermanager import ( "context" + "sync" - bsmsg "github.com/ipfs/go-bitswap/message" - wantlist "github.com/ipfs/go-bitswap/wantlist" + "github.com/ipfs/go-metrics-interface" + cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) // PeerQueue provides a queue of messages to be sent for a single peer. type PeerQueue interface { - AddMessage(entries []bsmsg.Entry, ses uint64) + AddBroadcastWantHaves([]cid.Cid) + AddWants([]cid.Cid, []cid.Cid) + AddCancels([]cid.Cid) Startup() - AddWantlist(initialWants *wantlist.SessionTrackedWantlist) Shutdown() } +type Session interface { + ID() uint64 + SignalAvailability(peer.ID, bool) +} + // PeerQueueFactory provides a function that will create a PeerQueue. type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue @@ -27,24 +34,47 @@ type peerQueueInstance struct { // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { + // sync access to peerQueues and peerWantManager + pqLk sync.RWMutex // peerQueues -- interact through internal utility functions get/set/remove/iterate peerQueues map[peer.ID]*peerQueueInstance + pwm *peerWantManager createPeerQueue PeerQueueFactory ctx context.Context + + psLk sync.RWMutex + sessions map[uint64]Session + peerSessions map[peer.ID]map[uint64]struct{} + + self peer.ID } // New creates a new PeerManager, given a context and a peerQueueFactory. -func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { +func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { + wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() return &PeerManager{ peerQueues: make(map[peer.ID]*peerQueueInstance), + pwm: newPeerWantManager(wantGauge), createPeerQueue: createPeerQueue, ctx: ctx, + self: self, + + sessions: make(map[uint64]Session), + peerSessions: make(map[peer.ID]map[uint64]struct{}), } } +func (pm *PeerManager) AvailablePeers() []peer.ID { + // TODO: Rate-limit peers + return pm.ConnectedPeers() +} + // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + peers := make([]peer.ID, 0, len(pm.peerQueues)) for p := range pm.peerQueues { peers = append(peers, p) @@ -54,18 +84,31 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. -func (pm *PeerManager) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) { +func (pm *PeerManager) Connected(p peer.ID, initialWantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + pq := pm.getOrCreate(p) + pq.refcnt++ - if pq.refcnt == 0 { - pq.pq.AddWantlist(initialWants) + // If this is the first connection to the peer + if pq.refcnt == 1 { + // Inform the peer want manager that there's a new peer + pm.pwm.AddPeer(p) + // Record that the want-haves are being sent to the peer + pm.pwm.PrepareSendWants(p, nil, initialWantHaves) + // Broadcast any live want-haves to the newly connected peers + pq.pq.AddBroadcastWantHaves(initialWantHaves) + // Inform the sessions that the peer has connected + pm.signalAvailability(p, true) } - - pq.refcnt++ } // Disconnected is called to remove a peer from the pool. func (pm *PeerManager) Disconnected(p peer.ID) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + pq, ok := pm.peerQueues[p] if !ok { @@ -77,25 +120,62 @@ func (pm *PeerManager) Disconnected(p peer.ID) { return } + // Inform the sessions that the peer has disconnected + pm.signalAvailability(p, false) + + // Clean up the peer delete(pm.peerQueues, p) pq.pq.Shutdown() + pm.pwm.RemovePeer(p) } -// SendMessage is called to send a message to all or some peers in the pool; -// if targets is nil, it sends to all. -func (pm *PeerManager) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { - if len(targets) == 0 { - for _, p := range pm.peerQueues { - p.pq.AddMessage(entries, from) +func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + for p, ks := range pm.pwm.PrepareBroadcastWantHaves(wantHaves) { + if pqi, ok := pm.peerQueues[p]; ok { + pqi.pq.AddBroadcastWantHaves(ks) } - } else { - for _, t := range targets { - pqi := pm.getOrCreate(t) - pqi.pq.AddMessage(entries, from) + } +} + +func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + if pqi, ok := pm.peerQueues[p]; ok { + wblks, whvs := pm.pwm.PrepareSendWants(p, wantBlocks, wantHaves) + pqi.pq.AddWants(wblks, whvs) + } +} + +func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + // Send a CANCEL to each peer that has been sent a want-block or want-have + for p, ks := range pm.pwm.PrepareSendCancels(cancelKs) { + if pqi, ok := pm.peerQueues[p]; ok { + pqi.pq.AddCancels(ks) } } } +func (pm *PeerManager) CurrentWants() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.GetWantBlocks() +} + +func (pm *PeerManager) CurrentWantHaves() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.GetWantHaves() +} + func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { pqi, ok := pm.peerQueues[p] if !ok { @@ -106,3 +186,44 @@ func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { } return pqi } + +func (pm *PeerManager) RegisterSession(p peer.ID, s Session) bool { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + if _, ok := pm.sessions[s.ID()]; !ok { + pm.sessions[s.ID()] = s + } + + if _, ok := pm.peerSessions[p]; !ok { + pm.peerSessions[p] = make(map[uint64]struct{}) + } + pm.peerSessions[p][s.ID()] = struct{}{} + + _, ok := pm.peerQueues[p] + return ok +} + +func (pm *PeerManager) UnregisterSession(ses uint64) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + for p := range pm.peerSessions { + delete(pm.peerSessions[p], ses) + if len(pm.peerSessions[p]) == 0 { + delete(pm.peerSessions, p) + } + } + + delete(pm.sessions, ses) +} + +func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { + for p, sesIds := range pm.peerSessions { + for sesId := range sesIds { + if s, ok := pm.sessions[sesId]; ok { + s.SignalAvailability(p, isConnected) + } + } + } +} diff --git a/peermanager/peermanager_test.go b/peermanager/peermanager_test.go index cea9ce26..c62cb3aa 100644 --- a/peermanager/peermanager_test.go +++ b/peermanager/peermanager_test.go @@ -2,77 +2,85 @@ package peermanager import ( "context" - "reflect" "testing" "time" "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" - bsmsg "github.com/ipfs/go-bitswap/message" - wantlist "github.com/ipfs/go-bitswap/wantlist" "github.com/libp2p/go-libp2p-core/peer" ) -type messageSent struct { - p peer.ID - entries []bsmsg.Entry - ses uint64 +type msg struct { + p peer.ID + wantBlocks []cid.Cid + wantHaves []cid.Cid + cancels []cid.Cid } -type fakePeer struct { - p peer.ID - messagesSent chan messageSent +type mockPeerQueue struct { + p peer.ID + msgs chan msg } -func (fp *fakePeer) Startup() {} -func (fp *fakePeer) Shutdown() {} +func (fp *mockPeerQueue) Startup() {} +func (fp *mockPeerQueue) Shutdown() {} -func (fp *fakePeer) AddMessage(entries []bsmsg.Entry, ses uint64) { - fp.messagesSent <- messageSent{fp.p, entries, ses} +func (fp *mockPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, whs, nil} } -func (fp *fakePeer) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) {} -func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { - return func(ctx context.Context, p peer.ID) PeerQueue { - return &fakePeer{ - p: p, - messagesSent: messagesSent, - } - } +func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { + fp.msgs <- msg{fp.p, wbs, whs, nil} +} +func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, nil, cs} } -func collectAndCheckMessages( - ctx context.Context, - t *testing.T, - messagesSent <-chan messageSent, - entries []bsmsg.Entry, - ses uint64, - timeout time.Duration) []peer.ID { - var peersReceived []peer.ID - timeoutCtx, cancel := context.WithTimeout(ctx, timeout) +type peerWants struct { + wantHaves []cid.Cid + wantBlocks []cid.Cid + cancels []cid.Cid +} + +func collectMessages(ch chan msg, timeout time.Duration) map[peer.ID]peerWants { + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() + + collected := make(map[peer.ID]peerWants) for { select { - case nextMessage := <-messagesSent: - if nextMessage.ses != ses { - t.Fatal("Message enqueued with wrong session") - } - if !reflect.DeepEqual(nextMessage.entries, entries) { - t.Fatal("Message enqueued with wrong wants") + case m := <-ch: + pw, ok := collected[m.p] + if !ok { + pw = peerWants{} } - peersReceived = append(peersReceived, nextMessage.p) - case <-timeoutCtx.Done(): - return peersReceived + pw.wantHaves = append(pw.wantHaves, m.wantHaves...) + pw.wantBlocks = append(pw.wantBlocks, m.wantBlocks...) + pw.cancels = append(pw.cancels, m.cancels...) + collected[m.p] = pw + case <-ctx.Done(): + return collected + } + } +} + +func makePeerQueueFactory(msgs chan msg) PeerQueueFactory { + return func(ctx context.Context, p peer.ID) PeerQueue { + return &mockPeerQueue{ + p: p, + msgs: msgs, } } } func TestAddingAndRemovingPeers(t *testing.T) { ctx := context.Background() - peerQueueFactory := makePeerQueueFactory(nil) + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(5) - peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] - peerManager := New(ctx, peerQueueFactory) + tp := testutil.GeneratePeers(6) + self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] + peerManager := New(ctx, peerQueueFactory, self) peerManager.Connected(peer1, nil) peerManager.Connected(peer2, nil) @@ -109,63 +117,186 @@ func TestAddingAndRemovingPeers(t *testing.T) { } } -func TestSendingMessagesToPeers(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan messageSent, 16) - peerQueueFactory := makePeerQueueFactory(messagesSent) +func TestBroadcastOnConnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) - tp := testutil.GeneratePeers(5) + cids := testutil.GenerateCids(2) - peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] - peerManager := New(ctx, peerQueueFactory) + // Connect with two broadcast wants for first peer + peerManager.Connected(peer1, cids) + collected := collectMessages(msgs, 2*time.Millisecond) - peerManager.Connected(peer1, nil) + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } +} + +func TestBroadcastWantHaves(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + + cids := testutil.GenerateCids(3) + + // Connect to first peer with two broadcast wants + peerManager.Connected(peer1, []cid.Cid{cids[0], cids[1]}) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } + + // Connect to second peer peerManager.Connected(peer2, nil) - peerManager.Connected(peer3, nil) - entries := testutil.GenerateMessageEntries(5, false) - ses := testutil.GenerateSessionID() + // Send a broadcast to all peers, including cid that was already sent to + // first peer + peerManager.BroadcastWantHaves(ctx, []cid.Cid{cids[0], cids[2]}) + collected = collectMessages(msgs, 2*time.Millisecond) + + // One of the want-haves was already sent to peer1 + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected 1 want-haves to be sent to first peer", collected[peer1].wantHaves) + } + if len(collected[peer2].wantHaves) != 2 { + t.Fatal("Expected 2 want-haves to be sent to second peer") + } +} + +func TestSendWants(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) - peerManager.SendMessage(entries, nil, ses) + peerManager.Connected(peer1, nil) + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) - peersReceived := collectAndCheckMessages( - ctx, t, messagesSent, entries, ses, 10*time.Millisecond) - if len(peersReceived) != 3 { - t.Fatal("Incorrect number of peers received messages") + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } + + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2], cids[3]}) + collected = collectMessages(msgs, 2*time.Millisecond) - if !testutil.ContainsPeer(peersReceived, peer1) || - !testutil.ContainsPeer(peersReceived, peer2) || - !testutil.ContainsPeer(peersReceived, peer3) { - t.Fatal("Peers should have received message but did not") + // First want-have and want-block should be filtered (because they were + // already sent) + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } +} + +func TestSendCancels(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) + + // Connect to peer1 and peer2 + peerManager.Connected(peer1, nil) + peerManager.Connected(peer2, nil) + + // Send 2 want-blocks and 1 want-have to peer1 + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2]}) + + // Clear messages + collectMessages(msgs, 2*time.Millisecond) + + // Send cancels for 1 want-block and 1 want-have + peerManager.SendCancels(ctx, []cid.Cid{cids[0], cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) - if testutil.ContainsPeer(peersReceived, peer4) || - testutil.ContainsPeer(peersReceived, peer5) { - t.Fatal("Peers received message but should not have") + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") } + if len(collected[peer1].cancels) != 2 { + t.Fatal("Expected cancel to be sent for want-block and want-have sent to peer") + } + + // Send cancels for all cids + peerManager.SendCancels(ctx, cids) + collected = collectMessages(msgs, 2*time.Millisecond) + + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") + } + if len(collected[peer1].cancels) != 1 { + t.Fatal("Expected cancel to be sent for remaining want-block") + } +} + +func (s *sess) ID() uint64 { + return s.id +} +func (s *sess) SignalAvailability(p peer.ID, isAvailable bool) { + s.available[p] = isAvailable +} - var peersToSendTo []peer.ID - peersToSendTo = append(peersToSendTo, peer1, peer3, peer4) - peerManager.SendMessage(entries, peersToSendTo, ses) - peersReceived = collectAndCheckMessages( - ctx, t, messagesSent, entries, ses, 10*time.Millisecond) +type sess struct { + id uint64 + available map[peer.ID]bool +} - if len(peersReceived) != 3 { - t.Fatal("Incorrect number of peers received messages") +func newSess(id uint64) *sess { + return &sess{id, make(map[peer.ID]bool)} +} + +func TestSessionRegistration(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + + tp := testutil.GeneratePeers(2) + self, p1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + + id := uint64(1) + s := newSess(id) + peerManager.RegisterSession(p1, s) + if s.available[p1] { + t.Fatal("Expected peer not be available till connected") } - if !testutil.ContainsPeer(peersReceived, peer1) || - !testutil.ContainsPeer(peersReceived, peer3) { - t.Fatal("Peers should have received message but did not") + peerManager.Connected(p1, nil) + if !s.available[p1] { + t.Fatal("Expected signal callback") } - if testutil.ContainsPeer(peersReceived, peer2) || - testutil.ContainsPeer(peersReceived, peer5) { - t.Fatal("Peers received message but should not have") + peerManager.Disconnected(p1) + if s.available[p1] { + t.Fatal("Expected signal callback") } - if !testutil.ContainsPeer(peersReceived, peer4) { - t.Fatal("Peer should have autoconnected on message send") + peerManager.UnregisterSession(id) + + peerManager.Connected(p1, nil) + if s.available[p1] { + t.Fatal("Expected no signal callback (session unregistered)") } } diff --git a/peermanager/peerwantmanager.go b/peermanager/peerwantmanager.go new file mode 100644 index 00000000..31bcf795 --- /dev/null +++ b/peermanager/peerwantmanager.go @@ -0,0 +1,206 @@ +package peermanager + +import ( + "bytes" + "fmt" + + lu "github.com/ipfs/go-bitswap/logutil" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// Gauge can be used to keep track of a metric that increases and decreases +// incrementally. It is used by the peerWantManager to track the number of +// want-blocks that are active (ie sent but no response received) +type Gauge interface { + Inc() + Dec() +} + +// peerWantManager keeps track of which want-haves and want-blocks have been +// sent to each peer, so that the PeerManager doesn't send duplicates. +type peerWantManager struct { + peerWants map[peer.ID]*peerWant + // Keeps track of the number of active want-blocks + wantBlockGauge Gauge +} + +type peerWant struct { + wantBlocks *cid.Set + wantHaves *cid.Set +} + +// New creates a new peerWantManager with a Gauge that keeps track of the +// number of active want-blocks (ie sent but no response received) +func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { + return &peerWantManager{ + peerWants: make(map[peer.ID]*peerWant), + wantBlockGauge: wantBlockGauge, + } +} + +// AddPeer adds a peer whose wants we need to keep track of +func (pwm *peerWantManager) AddPeer(p peer.ID) { + if _, ok := pwm.peerWants[p]; !ok { + pwm.peerWants[p] = &peerWant{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + } + } +} + +// RemovePeer removes a peer and its associated wants from tracking +func (pwm *peerWantManager) RemovePeer(p peer.ID) { + delete(pwm.peerWants, p) +} + +// PrepareBroadcastWantHaves filters the list of want-haves for each peer, +// returning a map of peers to the want-haves they have not yet been sent. +func (pwm *peerWantManager) PrepareBroadcastWantHaves(wantHaves []cid.Cid) map[peer.ID][]cid.Cid { + res := make(map[peer.ID][]cid.Cid) + + // Iterate over all known peers + for p, pws := range pwm.peerWants { + // Iterate over all want-haves + for _, c := range wantHaves { + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Record that the CID has been sent as a want-have + pws.wantHaves.Add(c) + + // Add the CID to the results + if _, ok := res[p]; !ok { + res[p] = make([]cid.Cid, 0, 1) + } + res[p] = append(res[p], c) + } + } + } + + return res +} + +// PrepareSendWants filters the list of want-blocks and want-haves such that +// it only contains wants that have not already been sent to the peer. +func (pwm *peerWantManager) PrepareSendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) ([]cid.Cid, []cid.Cid) { + resWantBlks := make([]cid.Cid, 0) + resWantHvs := make([]cid.Cid, 0) + + // Get the existing want-blocks and want-haves for the peer + if pws, ok := pwm.peerWants[p]; ok { + // Iterate over the requested want-blocks + for _, c := range wantBlocks { + // If the want-block hasn't been sent to the peer + if !pws.wantBlocks.Has(c) { + // Record that the CID was sent as a want-block + pws.wantBlocks.Add(c) + + // Add the CID to the results + resWantBlks = append(resWantBlks, c) + + // Make sure the CID is no longer recorded as a want-have + pws.wantHaves.Remove(c) + + // Increment the count of want-blocks + pwm.wantBlockGauge.Inc() + } + } + + // Iterate over the requested want-haves + for _, c := range wantHaves { + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Record that the CID was sent as a want-have + pws.wantHaves.Add(c) + + // Add the CID to the results + resWantHvs = append(resWantHvs, c) + } + } + } + + return resWantBlks, resWantHvs +} + +// PrepareSendCancels filters the list of cancels for each peer, +// returning a map of peers which only contains cancels for wants that have +// been sent to the peer. +func (pwm *peerWantManager) PrepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { + res := make(map[peer.ID][]cid.Cid) + + // Iterate over all known peers + for p, pws := range pwm.peerWants { + // Iterate over all requested cancels + for _, c := range cancelKs { + isWantBlock := pws.wantBlocks.Has(c) + isWantHave := pws.wantHaves.Has(c) + + // If the CID was sent as a want-block, decrement the want-block count + if isWantBlock { + pwm.wantBlockGauge.Dec() + } + + // If the CID was sent as a want-block or want-have + if isWantBlock || isWantHave { + // Remove the CID from the recorded want-blocks and want-haves + pws.wantBlocks.Remove(c) + pws.wantHaves.Remove(c) + + // Add the CID to the results + if _, ok := res[p]; !ok { + res[p] = make([]cid.Cid, 0, 1) + } + res[p] = append(res[p], c) + } + } + } + + return res +} + +// GetWantBlocks returns the set of all want-blocks sent to all peers +func (pwm *peerWantManager) GetWantBlocks() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-blocks + for _, c := range pws.wantBlocks.Keys() { + // Add the CID to the results + res.Add(c) + } + } + + return res.Keys() +} + +// GetWantHaves returns the set of all want-haves sent to all peers +func (pwm *peerWantManager) GetWantHaves() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-haves + for _, c := range pws.wantHaves.Keys() { + // Add the CID to the results + res.Add(c) + } + } + + return res.Keys() +} + +func (pwm *peerWantManager) String() string { + var b bytes.Buffer + for p, ws := range pwm.peerWants { + b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", lu.P(p), ws.wantHaves.Len(), ws.wantBlocks.Len())) + for _, c := range ws.wantHaves.Keys() { + b.WriteString(fmt.Sprintf(" want-have %s\n", lu.C(c))) + } + for _, c := range ws.wantBlocks.Keys() { + b.WriteString(fmt.Sprintf(" want-block %s\n", lu.C(c))) + } + } + return b.String() +} diff --git a/peermanager/peerwantmanager_test.go b/peermanager/peerwantmanager_test.go new file mode 100644 index 00000000..dc9e181c --- /dev/null +++ b/peermanager/peerwantmanager_test.go @@ -0,0 +1,292 @@ +package peermanager + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" + + cid "github.com/ipfs/go-cid" +) + +type gauge struct { + count int +} + +func (g *gauge) Inc() { + g.count++ +} +func (g *gauge) Dec() { + g.count-- +} + +func TestEmpty(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + if len(pwm.GetWantBlocks()) > 0 { + t.Fatal("Expected GetWantBlocks() to have length 0") + } + if len(pwm.GetWantHaves()) > 0 { + t.Fatal("Expected GetWantHaves() to have length 0") + } +} + +func TestPrepareBroadcastWantHaves(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + peers := testutil.GeneratePeers(3) + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + cids3 := testutil.GenerateCids(2) + + pwm.AddPeer(peers[0]) + pwm.AddPeer(peers[1]) + + // Broadcast 2 cids to 2 peers + bcst := pwm.PrepareBroadcastWantHaves(cids) + if len(bcst) != 2 { + t.Fatal("Expected 2 peers") + } + for p := range bcst { + if !testutil.MatchKeysIgnoreOrder(bcst[p], cids) { + t.Fatal("Expected all cids to be broadcast") + } + } + + // Broadcasting same cids should have no effect + bcst2 := pwm.PrepareBroadcastWantHaves(cids) + if len(bcst2) != 0 { + t.Fatal("Expected 0 peers") + } + + // Broadcast 2 other cids + bcst3 := pwm.PrepareBroadcastWantHaves(cids2) + if len(bcst3) != 2 { + t.Fatal("Expected 2 peers") + } + for p := range bcst3 { + if !testutil.MatchKeysIgnoreOrder(bcst3[p], cids2) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Broadcast mix of old and new cids + bcst4 := pwm.PrepareBroadcastWantHaves(append(cids, cids3...)) + if len(bcst4) != 2 { + t.Fatal("Expected 2 peers") + } + // Only new cids should be broadcast + for p := range bcst4 { + if !testutil.MatchKeysIgnoreOrder(bcst4[p], cids3) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Sending want-block for a cid should prevent broadcast to that peer + cids4 := testutil.GenerateCids(4) + wantBlocks := []cid.Cid{cids4[0], cids4[2]} + pwm.PrepareSendWants(peers[0], wantBlocks, []cid.Cid{}) + + bcst5 := pwm.PrepareBroadcastWantHaves(cids4) + if len(bcst4) != 2 { + t.Fatal("Expected 2 peers") + } + // Only cids that were not sent as want-block to peer should be broadcast + for p := range bcst5 { + if p == peers[0] { + if !testutil.MatchKeysIgnoreOrder(bcst5[p], []cid.Cid{cids4[1], cids4[3]}) { + t.Fatal("Expected unsent cids to be broadcast") + } + } + if p == peers[1] { + if !testutil.MatchKeysIgnoreOrder(bcst5[p], cids4) { + t.Fatal("Expected all cids to be broadcast") + } + } + } + + // Add another peer + pwm.AddPeer(peers[2]) + bcst6 := pwm.PrepareBroadcastWantHaves(cids) + if len(bcst6) != 1 { + t.Fatal("Expected 1 peer") + } + for p := range bcst6 { + if !testutil.MatchKeysIgnoreOrder(bcst6[p], cids) { + t.Fatal("Expected all cids to be broadcast") + } + } +} + +func TestPrepareSendWants(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.AddPeer(p0) + pwm.AddPeer(p1) + + // Send 2 want-blocks and 2 want-haves to p0 + wb, wh := pwm.PrepareSendWants(p0, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(wb, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(wh, cids2) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 + // - 1 old want-block and 2 new want-blocks + // - 1 old want-have and 2 new want-haves + cids3 := testutil.GenerateCids(2) + cids4 := testutil.GenerateCids(2) + wb2, wh2 := pwm.PrepareSendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) + if !testutil.MatchKeysIgnoreOrder(wb2, cids3) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(wh2, cids4) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 as want-blocks: 1 new want-block, 1 old want-have + cids5 := testutil.GenerateCids(1) + newWantBlockOldWantHave := append(cids5, cids2[0]) + wb3, wh3 := pwm.PrepareSendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) + // If a want was sent as a want-have, it should be ok to now send it as a + // want-block + if !testutil.MatchKeysIgnoreOrder(wb3, newWantBlockOldWantHave) { + t.Fatal("Expected 2 want-blocks") + } + if len(wh3) != 0 { + t.Fatal("Expected 0 want-haves") + } + + // Send to p0 as want-haves: 1 new want-have, 1 old want-block + cids6 := testutil.GenerateCids(1) + newWantHaveOldWantBlock := append(cids6, cids[0]) + wb4, wh4 := pwm.PrepareSendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) + // If a want was previously sent as a want-block, it should not be + // possible to now send it as a want-have + if !testutil.MatchKeysIgnoreOrder(wh4, cids6) { + t.Fatal("Expected 1 want-have") + } + if len(wb4) != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Send 2 want-blocks and 2 want-haves to p1 + wb5, wh5 := pwm.PrepareSendWants(p1, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(wb5, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(wh5, cids2) { + t.Fatal("Expected 2 want-haves") + } +} + +func TestPrepareSendCancels(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + wb1 := testutil.GenerateCids(2) + wh1 := testutil.GenerateCids(2) + wb2 := testutil.GenerateCids(2) + wh2 := testutil.GenerateCids(2) + allwb := append(wb1, wb2...) + allwh := append(wh1, wh2...) + + pwm.AddPeer(p0) + pwm.AddPeer(p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.PrepareSendWants(p0, wb1, wh1) + // Send 3 want-blocks and 3 want-haves to p1 + // (1 overlapping want-block / want-have with p0) + pwm.PrepareSendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) + + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantBlocks(), allwb) { + t.Fatal("Expected 4 cids to be wanted") + } + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantHaves(), allwh) { + t.Fatal("Expected 4 cids to be wanted") + } + + // Cancel 1 want-block and 1 want-have that were sent to p0 + res := pwm.PrepareSendCancels([]cid.Cid{wb1[0], wh1[0]}) + // Should cancel the want-block and want-have + if len(res) != 1 { + t.Fatal("Expected 1 peer") + } + if !testutil.MatchKeysIgnoreOrder(res[p0], []cid.Cid{wb1[0], wh1[0]}) { + t.Fatal("Expected 2 cids to be cancelled") + } + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantBlocks(), append(wb2, wb1[1])) { + t.Fatal("Expected 3 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantHaves(), append(wh2, wh1[1])) { + t.Fatal("Expected 3 want-haves") + } + + // Cancel everything + allCids := append(allwb, allwh...) + res2 := pwm.PrepareSendCancels(allCids) + // Should cancel the remaining want-blocks and want-haves + if len(res2) != 2 { + t.Fatal("Expected 2 peers", len(res2)) + } + if !testutil.MatchKeysIgnoreOrder(res2[p0], []cid.Cid{wb1[1], wh1[1]}) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + remainingP2 := append(wb2, wh2...) + remainingP2 = append(remainingP2, wb1[1], wh1[1]) + if !testutil.MatchKeysIgnoreOrder(res2[p1], remainingP2) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + if len(pwm.GetWantBlocks()) != 0 { + t.Fatal("Expected 0 want-blocks") + } + if len(pwm.GetWantHaves()) != 0 { + t.Fatal("Expected 0 want-haves") + } +} + +func TestStats(t *testing.T) { + g := &gauge{} + pwm := newPeerWantManager(g) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.AddPeer(p0) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.PrepareSendWants(p0, cids, cids2) + + if g.count != 2 { + t.Fatal("Expected 2 want-blocks") + } + + // Send 1 old want-block and 2 new want-blocks to p0 + cids3 := testutil.GenerateCids(2) + pwm.PrepareSendWants(p0, append(cids3, cids[0]), []cid.Cid{}) + + if g.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 want-block that was sent to p0 + // and 1 want-block that was not sent + cids4 := testutil.GenerateCids(1) + pwm.PrepareSendCancels(append(cids4, cids[0])) + + if g.count != 3 { + t.Fatal("Expected 3 want-blocks", g.count) + } +} diff --git a/session/cidqueue.go b/session/cidqueue.go index cf461a6c..aedfa944 100644 --- a/session/cidqueue.go +++ b/session/cidqueue.go @@ -27,6 +27,23 @@ func (cq *cidQueue) Pop() cid.Cid { } } +func (cq *cidQueue) Cids() []cid.Cid { + // Lazily delete from the list any cids that were removed from the set + if len(cq.elems) > cq.eset.Len() { + i := 0 + for _, c := range cq.elems { + if cq.eset.Has(c) { + cq.elems[i] = c + i++ + } + } + cq.elems = cq.elems[:i] + } + + // Make a copy of the cids + return append([]cid.Cid{}, cq.elems...) +} + func (cq *cidQueue) Push(c cid.Cid) { if cq.eset.Visit(c) { cq.elems = append(cq.elems, c) diff --git a/session/peeravailabilitymanager.go b/session/peeravailabilitymanager.go new file mode 100644 index 00000000..31b887c6 --- /dev/null +++ b/session/peeravailabilitymanager.go @@ -0,0 +1,57 @@ +package session + +import ( + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// peerAvailabilityManager keeps track of which peers have available space +// to receive want requests +type peerAvailabilityManager struct { + peerAvailable map[peer.ID]bool +} + +func newPeerAvailabilityManager() *peerAvailabilityManager { + return &peerAvailabilityManager{ + peerAvailable: make(map[peer.ID]bool), + } +} + +func (pam *peerAvailabilityManager) addPeer(p peer.ID) { + pam.peerAvailable[p] = false +} + +func (pam *peerAvailabilityManager) isAvailable(p peer.ID) (bool, bool) { + is, ok := pam.peerAvailable[p] + return is, ok +} + +func (pam *peerAvailabilityManager) setPeerAvailability(p peer.ID, isAvailable bool) { + pam.peerAvailable[p] = isAvailable +} + +func (pam *peerAvailabilityManager) haveAvailablePeers() bool { + for _, isAvailable := range pam.peerAvailable { + if isAvailable { + return true + } + } + return false +} + +func (pam *peerAvailabilityManager) availablePeers() []peer.ID { + var available []peer.ID + for p, isAvailable := range pam.peerAvailable { + if isAvailable { + available = append(available, p) + } + } + return available +} + +func (pam *peerAvailabilityManager) allPeers() []peer.ID { + var available []peer.ID + for p := range pam.peerAvailable { + available = append(available, p) + } + return available +} diff --git a/session/peeravailabilitymanager_test.go b/session/peeravailabilitymanager_test.go new file mode 100644 index 00000000..4c4b4b1e --- /dev/null +++ b/session/peeravailabilitymanager_test.go @@ -0,0 +1,74 @@ +package session + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestPeerAvailabilityManager(t *testing.T) { + peers := testutil.GeneratePeers(2) + pam := newPeerAvailabilityManager() + + isAvailable, ok := pam.isAvailable(peers[0]) + if isAvailable || ok { + t.Fatal("expected not to have any availability yet") + } + + if pam.haveAvailablePeers() { + t.Fatal("expected not to have any availability yet") + } + + pam.addPeer(peers[0]) + isAvailable, ok = pam.isAvailable(peers[0]) + if !ok { + t.Fatal("expected to have a peer") + } + if isAvailable { + t.Fatal("expected not to have any availability yet") + } + if pam.haveAvailablePeers() { + t.Fatal("expected not to have any availability yet") + } + if len(pam.availablePeers()) != 0 { + t.Fatal("expected not to have any availability yet") + } + if len(pam.allPeers()) != 1 { + t.Fatal("expected one peer") + } + + pam.setPeerAvailability(peers[0], true) + isAvailable, ok = pam.isAvailable(peers[0]) + if !ok { + t.Fatal("expected to have a peer") + } + if !isAvailable { + t.Fatal("expected peer to be available") + } + if !pam.haveAvailablePeers() { + t.Fatal("expected peer to be available") + } + if len(pam.availablePeers()) != 1 { + t.Fatal("expected peer to be available") + } + if len(pam.allPeers()) != 1 { + t.Fatal("expected one peer") + } + + pam.addPeer(peers[1]) + if len(pam.availablePeers()) != 1 { + t.Fatal("expected one peer to be available") + } + if len(pam.allPeers()) != 2 { + t.Fatal("expected two peers") + } + + pam.setPeerAvailability(peers[0], false) + isAvailable, ok = pam.isAvailable(peers[0]) + if !ok { + t.Fatal("expected to have a peer") + } + if isAvailable { + t.Fatal("expected peer to not be available") + } +} diff --git a/session/peerresponsetracker.go b/session/peerresponsetracker.go new file mode 100644 index 00000000..22039896 --- /dev/null +++ b/session/peerresponsetracker.go @@ -0,0 +1,68 @@ +package session + +import ( + "math/rand" + + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// peerResponseTracker keeps track of how many times each peer was the first +// to send us a block for a given CID (used to rank peers) +type peerResponseTracker struct { + firstResponder map[peer.ID]int +} + +func newPeerResponseTracker() *peerResponseTracker { + return &peerResponseTracker{ + firstResponder: make(map[peer.ID]int), + } +} + +func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { + prt.firstResponder[from]++ +} + +func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { + if len(peers) == 0 { + return "" + } + + rnd := rand.Float64() + + // Find the total received blocks for all candidate peers + total := 0 + for _, p := range peers { + total += prt.getPeerCount(p) + } + + // Choose one of the peers with a chance proportional to the number + // of blocks received from that peer + counted := 0.0 + for _, p := range peers { + counted += float64(prt.getPeerCount(p)) / float64(total) + if counted > rnd { + // log.Warningf(" chose %s from %s (%d) / %s (%d) with pivot %.2f", + // lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) + return p + } + } + + // We shouldn't get here unless there is some weirdness with floating point + // math that doesn't quite cover the whole range of peers in the for loop + // so just choose the last peer. + index := len(peers) - 1 + // log.Warningf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f", + // index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) + return peers[index] +} + +func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { + count, ok := prt.firstResponder[p] + if ok { + return count + } + + // Make sure there is always at least a small chance a new peer + // will be chosen + return 1 +} diff --git a/session/peerresponsetracker_test.go b/session/peerresponsetracker_test.go new file mode 100644 index 00000000..bbe6bd75 --- /dev/null +++ b/session/peerresponsetracker_test.go @@ -0,0 +1,117 @@ +package session + +import ( + "math" + "testing" + + "github.com/ipfs/go-bitswap/testutil" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +func TestPeerResponseTrackerInit(t *testing.T) { + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + if prt.choose([]peer.ID{}) != "" { + t.Fatal("expected empty peer ID") + } + if prt.choose([]peer.ID{peers[0]}) != peers[0] { + t.Fatal("expected single peer ID") + } + p := prt.choose(peers) + if p != peers[0] && p != peers[1] { + t.Fatal("expected randomly chosen peer") + } +} + +func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { + peers := testutil.GeneratePeers(4) + prt := newPeerResponseTracker() + + choices := []int{0, 0, 0, 0} + count := 1000 + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } else if p == peers[3] { + choices[3]++ + } + } + + for _, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c-choices[0])) > 0.2*float64(count) { + t.Fatal("expected unknown peers to have roughly equal chance of being chosen") + } + } +} + +func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + prt.receivedBlockFrom(peers[0]) + + chooseFirst := 0 + chooseSecond := 0 + for i := 0; i < 1000; i++ { + p := prt.choose(peers) + if p == peers[0] { + chooseFirst++ + } else if p == peers[1] { + chooseSecond++ + } + } + + if chooseSecond == 0 { + t.Fatal("expected unknown peer to occasionally be chosen") + } + if chooseSecond > chooseFirst { + t.Fatal("expected known peer to be chosen more often") + } +} + +func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { + peers := testutil.GeneratePeers(3) + prt := newPeerResponseTracker() + + probabilities := []float64{0.1, 0.6, 0.3} + count := 1000 + for pi, prob := range probabilities { + for i := 0; float64(i) < float64(count)*prob; i++ { + prt.receivedBlockFrom(peers[pi]) + } + } + + var choices []int + for range probabilities { + choices = append(choices, 0) + } + + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } + } + + for i, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c)-(float64(count)*probabilities[i])) > 0.2*float64(count) { + t.Fatal("expected peers to be chosen proportionally to probability") + } + } +} diff --git a/session/sentwantblockstracker.go b/session/sentwantblockstracker.go new file mode 100644 index 00000000..cf0581ef --- /dev/null +++ b/session/sentwantblockstracker.go @@ -0,0 +1,33 @@ +package session + +import ( + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// sentWantBlocksTracker keeps track of which peers we've sent a want-block to +type sentWantBlocksTracker struct { + sentWantBlocks map[peer.ID]map[cid.Cid]struct{} +} + +func newSentWantBlocksTracker() *sentWantBlocksTracker { + return &sentWantBlocksTracker{ + sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), + } +} + +func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { + cids, ok := s.sentWantBlocks[p] + if !ok { + cids = make(map[cid.Cid]struct{}, len(ks)) + s.sentWantBlocks[p] = cids + } + for _, c := range ks { + cids[c] = struct{}{} + } +} + +func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { + _, ok := s.sentWantBlocks[p][c] + return ok +} diff --git a/session/sentwantblockstracker_test.go b/session/sentwantblockstracker_test.go new file mode 100644 index 00000000..097cac6b --- /dev/null +++ b/session/sentwantblockstracker_test.go @@ -0,0 +1,28 @@ +package session + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestSendWantBlocksTracker(t *testing.T) { + peers := testutil.GeneratePeers(2) + cids := testutil.GenerateCids(2) + swbt := newSentWantBlocksTracker() + + if swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected not to have sent anything yet") + } + + swbt.addSentWantBlocksTo(peers[0], cids) + if !swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected to have sent cid to peer") + } + if !swbt.haveSentWantBlockTo(peers[0], cids[1]) { + t.Fatal("expected to have sent cid to peer") + } + if swbt.haveSentWantBlockTo(peers[1], cids[0]) { + t.Fatal("expected not to have sent cid to peer") + } +} diff --git a/session/session.go b/session/session.go index 6c836355..d9fb2443 100644 --- a/session/session.go +++ b/session/session.go @@ -2,11 +2,15 @@ package session import ( "context" + "sync" "time" + // lu "github.com/ipfs/go-bitswap/logutil" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" bsgetter "github.com/ipfs/go-bitswap/getter" notifications "github.com/ipfs/go-bitswap/notifications" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bspm "github.com/ipfs/go-bitswap/peermanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -15,47 +19,71 @@ import ( loggables "github.com/libp2p/go-libp2p-loggables" ) +var log = logging.Logger("bs:sess") + const ( - broadcastLiveWantsLimit = 4 - targetedLiveWantsLimit = 32 + broadcastLiveWantsLimit = 64 ) // WantManager is an interface that can be used to request blocks // from given peers. type WantManager interface { - WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) - CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) + // BroadcastWantHaves sends want-haves to all connected peers (used for + // session discovery) + BroadcastWantHaves(context.Context, uint64, []cid.Cid) + // RemoveSession removes the session from the WantManager (when the + // session shuts down) + RemoveSession(context.Context, uint64) +} + +// PeerManager keeps track of which sessions are interested in which peers +// and takes care of sending wants for the sessions +type PeerManager interface { + // RegisterSession tells the PeerManager that the session is interested + // in a peer's connection state + RegisterSession(peer.ID, bspm.Session) bool + // UnregisterSession tells the PeerManager that the session is no longer + // interested in a peer's connection state + UnregisterSession(uint64) + // SendWants tells the PeerManager to send wants to the given peer + SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) } // PeerManager provides an interface for tracking and optimize peers, and // requesting more when neccesary. -type PeerManager interface { +type SessionPeerManager interface { + // ReceiveFrom is called when blocks and HAVEs are received from a peer. + // It returns a boolean indicating if the peer is new to the session. + ReceiveFrom(peerId peer.ID, blks []cid.Cid, haves []cid.Cid) bool + // Peers returns the set of peers in the session. + Peers() *peer.Set + // FindMorePeers queries Content Routing to discover providers of the given cid FindMorePeers(context.Context, cid.Cid) - GetOptimizedPeers() []bssd.OptimizedPeer + // RecordPeerRequests records the time that a cid was requested from a peer RecordPeerRequests([]peer.ID, []cid.Cid) + // RecordPeerResponse records the time that a response for a cid arrived + // from a peer RecordPeerResponse(peer.ID, []cid.Cid) + // RecordCancels records that cancels were sent for the given cids RecordCancels([]cid.Cid) } -// RequestSplitter provides an interface for splitting -// a request for Cids up among peers. -type RequestSplitter interface { - SplitRequest([]bssd.OptimizedPeer, []cid.Cid) []bssd.PartialRequest - RecordDuplicateBlock() - RecordUniqueBlock() -} - +// opType is the kind of operation that is being processed by the event loop type opType int const ( + // Receive blocks opReceive opType = iota + // Want blocks opWant + // Cancel wants opCancel + // Broadcast want-haves + opBroadcast ) type op struct { op opType - from peer.ID keys []cid.Cid } @@ -64,24 +92,24 @@ type op struct { // info to, and who to request blocks from. type Session struct { // dependencies - ctx context.Context - wm WantManager - pm PeerManager - srs RequestSplitter + ctx context.Context + wm WantManager + sprm SessionPeerManager + sim *bssim.SessionInterestManager + + sw sessionWants + sws sessionWantSender - sw sessionWants + latencyTrkr latencyTracker // channels incoming chan op - latencyReqs chan chan time.Duration tickDelayReqs chan time.Duration // do not touch outside run loop idleTick *time.Timer periodicSearchTimer *time.Timer baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int consecutiveTicks int initialSearchDelay time.Duration periodicSearchDelay delay.D @@ -89,6 +117,8 @@ type Session struct { notif notifications.PubSub uuid logging.Loggable id uint64 + + self peer.ID } // New creates a new bitswap session whose lifetime is bounded by the @@ -96,53 +126,111 @@ type Session struct { func New(ctx context.Context, id uint64, wm WantManager, + sprm SessionPeerManager, + sim *bssim.SessionInterestManager, pm PeerManager, - srs RequestSplitter, + bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, initialSearchDelay time.Duration, - periodicSearchDelay delay.D) *Session { + periodicSearchDelay delay.D, + self peer.ID) *Session { s := &Session{ - sw: sessionWants{ - toFetch: newCidQueue(), - liveWants: make(map[cid.Cid]time.Time), - pastWants: cid.NewSet(), - }, - latencyReqs: make(chan chan time.Duration), + sw: newSessionWants(), tickDelayReqs: make(chan time.Duration), ctx: ctx, wm: wm, - pm: pm, - srs: srs, - incoming: make(chan op, 16), + sprm: sprm, + sim: sim, + incoming: make(chan op, 128), + latencyTrkr: latencyTracker{}, notif: notif, uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, id: id, initialSearchDelay: initialSearchDelay, periodicSearchDelay: periodicSearchDelay, + self: self, } + s.sws = newSessionWantSender(ctx, id, pm, bpm, s.onWantsSent, s.onPeersExhausted) go s.run(ctx) return s } +func (s *Session) ID() uint64 { + return s.id +} + // ReceiveFrom receives incoming blocks from the given peer. -func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid) { - interested := s.sw.FilterInteresting(ks) - if len(interested) == 0 { +func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) + ks = interestedRes[0] + haves = interestedRes[1] + dontHaves = interestedRes[2] + // s.logReceiveFrom(from, ks, haves, dontHaves) + + // Add any newly discovered peers that have blocks we're interested in to + // the peer set + isNewPeer := s.sprm.ReceiveFrom(from, ks, haves) + + // Record response timing only if the blocks came from the network + // (blocks can also be received from the local node) + if len(ks) > 0 && from != "" { + s.sprm.RecordPeerResponse(from, ks) + } + + // Update want potential + s.sws.Update(from, ks, haves, dontHaves, isNewPeer) + + if len(ks) == 0 { return } + // Record which blocks have been received and figure out the total latency + // for fetching the blocks + wanted, totalLatency := s.sw.BlocksReceived(ks) + s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) + + if len(wanted) == 0 { + return + } + + // Inform the SessionInterestManager that this session is no longer + // expecting to receive the wanted keys + s.sim.RemoveSessionWants(s.id, wanted) + select { - case s.incoming <- op{op: opReceive, from: from, keys: interested}: + case s.incoming <- op{op: opReceive, keys: wanted}: case <-s.ctx.Done(): } } -// IsWanted returns true if this session is waiting to receive the given Cid. -func (s *Session) IsWanted(c cid.Cid) bool { - return s.sw.IsWanted(c) +// func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { +// // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", +// // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) +// for _, c := range interestedKs { +// log.Warningf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// } +// for _, c := range haves { +// log.Warningf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// } +// for _, c := range dontHaves { +// log.Warningf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// } +// } + +func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) + s.sw.WantsSent(allBlks) + s.sprm.RecordPeerRequests([]peer.ID{p}, allBlks) +} + +func (s *Session) onPeersExhausted(ks []cid.Cid) { + select { + case s.incoming <- op{op: opBroadcast, keys: ks}: + case <-s.ctx.Done(): + } } // GetBlock fetches a single block. @@ -173,23 +261,6 @@ func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks. ) } -// GetAverageLatency returns the average latency for block requests. -func (s *Session) GetAverageLatency() time.Duration { - resp := make(chan time.Duration) - select { - case s.latencyReqs <- resp: - case <-s.ctx.Done(): - return -1 * time.Millisecond - } - - select { - case latency := <-resp: - return latency - case <-s.ctx.Done(): - return -1 * time.Millisecond - } -} - // SetBaseTickDelay changes the rate at which ticks happen. func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { select { @@ -198,9 +269,11 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } -// Session run loop -- everything function below here should not be called -// of this loop +// Session run loop -- everything in this function should not be called +// outside of this loop func (s *Session) run(ctx context.Context) { + go s.sws.Run() + s.idleTick = time.NewTimer(s.initialSearchDelay) s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) for { @@ -208,11 +281,13 @@ func (s *Session) run(ctx context.Context) { case oper := <-s.incoming: switch oper.op { case opReceive: - s.handleReceive(ctx, oper.from, oper.keys) + s.handleReceive(oper.keys) case opWant: s.wantBlocks(ctx, oper.keys) case opCancel: s.sw.CancelPending(oper.keys) + case opBroadcast: + s.handleIdleTick(ctx) default: panic("unhandled operation") } @@ -220,8 +295,6 @@ func (s *Session) run(ctx context.Context) { s.handleIdleTick(ctx) case <-s.periodicSearchTimer.C: s.handlePeriodicSearch(ctx) - case resp := <-s.latencyReqs: - resp <- s.averageLatency() case baseTickDelay := <-s.tickDelayReqs: s.baseTickDelay = baseTickDelay case <-ctx.Done(): @@ -233,18 +306,22 @@ func (s *Session) run(ctx context.Context) { func (s *Session) handleIdleTick(ctx context.Context) { live := s.sw.PrepareBroadcast() + // log.Warningf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) + // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) + log.Warningf("Ses%d: broadcast %d keys", s.id, len(live)) - // Broadcast these keys to everyone we're connected to - s.pm.RecordPeerRequests(nil, live) - s.wm.WantBlocks(ctx, live, nil, s.id) + // Broadcast a want-have for the live wants to everyone we're connected to + s.sprm.RecordPeerRequests(nil, live) + s.wm.BroadcastWantHaves(ctx, s.id, live) - // do no find providers on consecutive ticks + // do not find providers on consecutive ticks // -- just rely on periodic search widening if len(live) > 0 && (s.consecutiveTicks == 0) { - s.pm.FindMorePeers(ctx, live[0]) + s.sprm.FindMorePeers(ctx, live[0]) } s.resetIdleTick() + // If we have live wants if s.sw.HasLiveWants() { s.consecutiveTicks++ } @@ -258,110 +335,89 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { // TODO: come up with a better strategy for determining when to search // for new providers for blocks. - s.pm.FindMorePeers(ctx, randomWant) - s.wm.WantBlocks(ctx, []cid.Cid{randomWant}, nil, s.id) + s.sprm.FindMorePeers(ctx, randomWant) + + s.wm.BroadcastWantHaves(ctx, s.id, []cid.Cid{randomWant}) s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } func (s *Session) handleShutdown() { s.idleTick.Stop() - - live := s.sw.LiveWants() - s.wm.CancelWants(s.ctx, live, nil, s.id) + s.wm.RemoveSession(s.ctx, s.id) } -func (s *Session) handleReceive(ctx context.Context, from peer.ID, keys []cid.Cid) { - // Record statistics only if the blocks came from the network - // (blocks can also be received from the local node) - if from != "" { - s.updateReceiveCounters(ctx, from, keys) - } - - // Update the want list - wanted, totalLatency := s.sw.BlocksReceived(keys) - if len(wanted) == 0 { - return - } - - // We've received the blocks so we can cancel any outstanding wants for them - s.cancelIncoming(ctx, wanted) - +func (s *Session) handleReceive(ks []cid.Cid) { s.idleTick.Stop() - // Process the received blocks - s.processReceive(ctx, wanted, totalLatency) - - s.resetIdleTick() -} - -func (s *Session) updateReceiveCounters(ctx context.Context, from peer.ID, keys []cid.Cid) { - // Record unique vs duplicate blocks - s.sw.ForEachUniqDup(keys, s.srs.RecordUniqueBlock, s.srs.RecordDuplicateBlock) - - // Record response (to be able to time latency) - if len(keys) > 0 { - s.pm.RecordPeerResponse(from, keys) - } -} - -func (s *Session) cancelIncoming(ctx context.Context, ks []cid.Cid) { - s.pm.RecordCancels(ks) - s.wm.CancelWants(s.ctx, ks, nil, s.id) -} - -func (s *Session) processReceive(ctx context.Context, ks []cid.Cid, totalLatency time.Duration) { - // Keep track of the total number of blocks received and total latency - s.fetchcnt += len(ks) - s.latTotal += totalLatency - // We've received new wanted blocks, so reset the number of ticks // that have occurred since the last new block s.consecutiveTicks = 0 - s.wantBlocks(ctx, nil) + s.sprm.RecordCancels(ks) + + s.resetIdleTick() } func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { - // Given the want limit and any newly received blocks, get as many wants as - // we can to send out - ks := s.sw.GetNextWants(s.wantLimit(), newks) - if len(ks) == 0 { - return + if len(newks) > 0 { + s.sim.RecordSessionInterest(s.id, newks) + s.sw.BlocksRequested(newks) + s.sws.Add(newks) } - peers := s.pm.GetOptimizedPeers() - if len(peers) > 0 { - splitRequests := s.srs.SplitRequest(peers, ks) - for _, splitRequest := range splitRequests { - s.pm.RecordPeerRequests(splitRequest.Peers, splitRequest.Keys) - s.wm.WantBlocks(ctx, splitRequest.Keys, splitRequest.Peers, s.id) - } - } else { - s.pm.RecordPeerRequests(nil, ks) - s.wm.WantBlocks(ctx, ks, nil, s.id) + // If we have discovered peers already, the SessionPotentialManager will + // send wants to them + if s.sprm.Peers().Size() > 0 { + return } -} -func (s *Session) averageLatency() time.Duration { - return s.latTotal / time.Duration(s.fetchcnt) + // No peers discovered yet, broadcast some want-haves + ks := s.sw.GetNextWants(broadcastLiveWantsLimit) + if len(ks) > 0 { + log.Infof("Ses%d: No peers - broadcasting %d want HAVE requests\n", s.id, len(ks)) + s.sprm.RecordPeerRequests(nil, ks) + s.wm.BroadcastWantHaves(ctx, s.id, ks) + } } func (s *Session) resetIdleTick() { var tickDelay time.Duration - if s.latTotal == 0 { + if !s.latencyTrkr.hasLatency() { tickDelay = s.initialSearchDelay } else { - avLat := s.averageLatency() + avLat := s.latencyTrkr.averageLatency() + // log.Warningf("averageLatency %s", avLat) tickDelay = s.baseTickDelay + (3 * avLat) } tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) s.idleTick.Reset(tickDelay) } -func (s *Session) wantLimit() int { - if len(s.pm.GetOptimizedPeers()) > 0 { - return targetedLiveWantsLimit - } - return broadcastLiveWantsLimit +type latencyTracker struct { + sync.RWMutex + totalLatency time.Duration + count int +} + +func (lt *latencyTracker) hasLatency() bool { + lt.RLock() + defer lt.RUnlock() + + return lt.totalLatency > 0 && lt.count > 0 +} + +func (lt *latencyTracker) averageLatency() time.Duration { + lt.RLock() + defer lt.RUnlock() + + return lt.totalLatency / time.Duration(lt.count) +} + +func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { + lt.Lock() + defer lt.Unlock() + + lt.totalLatency += totalLatency + lt.count += count } diff --git a/session/session_test.go b/session/session_test.go index 19266d1b..688f7883 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -2,14 +2,14 @@ package session import ( "context" - "sync" "testing" "time" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/notifications" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bspm "github.com/ipfs/go-bitswap/peermanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" "github.com/ipfs/go-bitswap/testutil" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" @@ -17,225 +17,164 @@ import ( ) type wantReq struct { - cids []cid.Cid - peers []peer.ID + cids []cid.Cid } type fakeWantManager struct { - wantReqs chan wantReq - cancelReqs chan wantReq + wantReqs chan wantReq } -func (fwm *fakeWantManager) WantBlocks(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { - select { - case fwm.wantReqs <- wantReq{cids, peers}: - case <-ctx.Done(): +func newFakeWantManager() *fakeWantManager { + return &fakeWantManager{ + wantReqs: make(chan wantReq, 1), } } -func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { +func (fwm *fakeWantManager) BroadcastWantHaves(ctx context.Context, sesid uint64, cids []cid.Cid) { select { - case fwm.cancelReqs <- wantReq{cids, peers}: + case fwm.wantReqs <- wantReq{cids}: case <-ctx.Done(): } } +func (fwm *fakeWantManager) RemoveSession(context.Context, uint64) {} -type fakePeerManager struct { - lk sync.RWMutex - peers []peer.ID +type fakeSessionPeerManager struct { + peers *peer.Set findMorePeersRequested chan cid.Cid } -func (fpm *fakePeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { +func newFakeSessionPeerManager() *fakeSessionPeerManager { + return &fakeSessionPeerManager{ + peers: peer.NewSet(), + findMorePeersRequested: make(chan cid.Cid, 1), + } +} + +func (fpm *fakeSessionPeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { select { case fpm.findMorePeersRequested <- k: case <-ctx.Done(): } } -func (fpm *fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { - fpm.lk.Lock() - defer fpm.lk.Unlock() - optimizedPeers := make([]bssd.OptimizedPeer, 0, len(fpm.peers)) - for _, peer := range fpm.peers { - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: peer, OptimizationRating: 1.0}) - } - return optimizedPeers +func (fpm *fakeSessionPeerManager) Peers() *peer.Set { + return fpm.peers } -func (fpm *fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c []cid.Cid) { - fpm.lk.Lock() - fpm.peers = append(fpm.peers, p) - fpm.lk.Unlock() +func (fpm *fakeSessionPeerManager) ReceiveFrom(p peer.ID, ks []cid.Cid, haves []cid.Cid) bool { + if !fpm.peers.Contains(p) { + fpm.peers.Add(p) + return true + } + return false +} +func (fpm *fakeSessionPeerManager) RecordCancels(c []cid.Cid) {} +func (fpm *fakeSessionPeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} +func (fpm *fakeSessionPeerManager) RecordPeerResponse(p peer.ID, c []cid.Cid) { + fpm.peers.Add(p) } -func (fpm *fakePeerManager) RecordCancels(c []cid.Cid) {} -type fakeRequestSplitter struct { +type fakePeerManager struct { } -func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, keys []cid.Cid) []bssd.PartialRequest { - peers := make([]peer.ID, len(optimizedPeers)) - for i, optimizedPeer := range optimizedPeers { - peers[i] = optimizedPeer.Peer - } - return []bssd.PartialRequest{bssd.PartialRequest{Peers: peers, Keys: keys}} +func newFakePeerManager() *fakePeerManager { + return &fakePeerManager{} } -func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} -func (frs *fakeRequestSplitter) RecordUniqueBlock() {} +func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { + return true +} +func (pm *fakePeerManager) UnregisterSession(uint64) {} +func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid for _, block := range blks { cids = append(cids, block.Cid()) } - getBlocksCh, err := session.GetBlocks(ctx, cids) + _, err := session.GetBlocks(ctx, cids) if err != nil { t.Fatal("error getting blocks") } - // check initial want request + // Wait for initial want request receivedWantReq := <-fwm.wantReqs + // Should have registered session's interest in blocks + intSes := sim.FilterSessionInterested(id, cids) + if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { + t.Fatal("did not register session interest in blocks") + } + + // Should have sent out broadcast request for wants if len(receivedWantReq.cids) != broadcastLiveWantsLimit { t.Fatal("did not enqueue correct initial number of wants") } - if receivedWantReq.peers != nil { - t.Fatal("first want request should be a broadcast") - } - for _, c := range cids { - if !session.IsWanted(c) { - t.Fatal("expected session to want cids") - } - } - // now receive the first set of blocks + // Simulate receiving HAVEs from several peers peers := testutil.GeneratePeers(broadcastLiveWantsLimit) - var newCancelReqs []wantReq - var newBlockReqs []wantReq - var receivedBlocks []blocks.Block for i, p := range peers { - // simulate what bitswap does on receiving a message: - // - calls ReceiveFrom() on session - // - publishes block to pubsub channel blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) - notif.Publish(blk) - - select { - case cancelBlock := <-cancelReqs: - newCancelReqs = append(newCancelReqs, cancelBlock) - case <-ctx.Done(): - t.Fatal("did not cancel block want") - } - - select { - case receivedBlock := <-getBlocksCh: - receivedBlocks = append(receivedBlocks, receivedBlock) - case <-ctx.Done(): - t.Fatal("Did not receive block!") - } - - select { - case wantBlock := <-wantReqs: - newBlockReqs = append(newBlockReqs, wantBlock) - default: - } - } - - // verify new peers were recorded - fpm.lk.Lock() - if len(fpm.peers) != broadcastLiveWantsLimit { - t.Fatal("received blocks not recorded by the peer manager") - } - for _, p := range fpm.peers { - if !testutil.ContainsPeer(peers, p) { - t.Fatal("incorrect peer recorded to peer manager") - } + session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) } - fpm.lk.Unlock() - // look at new interactions with want manager - - // should have cancelled each received block - if len(newCancelReqs) != broadcastLiveWantsLimit { - t.Fatal("did not cancel each block once it was received") - } - // new session reqs should be targeted - var newCidsRequested []cid.Cid - for _, w := range newBlockReqs { - if len(w.peers) == 0 { - t.Fatal("should not have broadcast again after initial broadcast") - } - newCidsRequested = append(newCidsRequested, w.cids...) + // Verify new peers were recorded + if !testutil.MatchPeersIgnoreOrder(fpm.Peers().Peers(), peers) { + t.Fatal("peers not recorded by the peer manager") } - // full new round of cids should be requested - if len(newCidsRequested) != broadcastLiveWantsLimit { - t.Fatal("new blocks were not requested") + // Verify session still wants received blocks + _, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") } - // receive remaining blocks - for i, p := range peers { - // simulate what bitswap does on receiving a message: - // - calls ReceiveFrom() on session - // - publishes block to pubsub channel - blk := blks[testutil.IndexOf(blks, newCidsRequested[i])] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) - notif.Publish(blk) + // Simulate receiving DONT_HAVE for a CID + session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) - receivedBlock := <-getBlocksCh - receivedBlocks = append(receivedBlocks, receivedBlock) - cancelBlock := <-cancelReqs - newCancelReqs = append(newCancelReqs, cancelBlock) + // Verify session still wants received blocks + _, unwanted = sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") } - if len(receivedBlocks) != len(blks) { - t.Fatal("did not receive enough blocks") - } - if len(newCancelReqs) != len(receivedBlocks) { - t.Fatal("expected an equal number of received blocks and cancels") - } - for _, block := range receivedBlocks { - if !testutil.ContainsBlock(blks, block) { - t.Fatal("received incorrect block") - } + // Simulate receiving block for a CID + session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + // Verify session no longer wants received block + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { + t.Fatal("session wants block that has already been received") } - for _, c := range cids { - if session.IsWanted(c) { - t.Fatal("expected session NOT to want cids") - } + if len(wanted) != len(blks)-1 { + t.Fatal("session wants incorrect number of blocks") } } func TestSessionFindMorePeers(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) defer cancel() - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -243,14 +182,14 @@ func TestSessionFindMorePeers(t *testing.T) { for _, block := range blks { cids = append(cids, block.Cid()) } - getBlocksCh, err := session.GetBlocks(ctx, cids) + _, err := session.GetBlocks(ctx, cids) if err != nil { t.Fatal("error getting blocks") } - // clear the initial block of wants + // The session should initially broadcast want-haves select { - case <-wantReqs: + case <-fwm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make first want request ") } @@ -261,42 +200,28 @@ func TestSessionFindMorePeers(t *testing.T) { // millisecond range p := testutil.GeneratePeers(1)[0] - // simulate what bitswap does on receiving a message: - // - calls ReceiveFrom() on session - // - publishes block to pubsub channel blk := blks[0] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) - notif.Publish(blk) - select { - case <-cancelReqs: - case <-ctx.Done(): - t.Fatal("Did not cancel block") - } - select { - case <-getBlocksCh: - case <-ctx.Done(): - t.Fatal("Did not get block") - } + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) + + // The session should now time out waiting for a response and broadcast + // want-haves again select { - case <-wantReqs: + case <-fwm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make second want request ") } - // verify a broadcast was made + // Verify a broadcast was made select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < broadcastLiveWantsLimit { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } - // wait for a request to get more peers to occur + // The session should eventually try to find more peers select { case <-fpm.findMorePeersRequested: case <-ctx.Done(): @@ -307,16 +232,14 @@ func TestSessionFindMorePeers(t *testing.T) { func TestSessionFailingToGetFirstBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - - session := New(ctx, id, fwm, fpm, frs, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond)) + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid @@ -329,27 +252,24 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { t.Fatal("error getting blocks") } - // clear the initial block of wants + // The session should initially broadcast want-haves select { - case <-wantReqs: + case <-fwm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make first want request ") } - // verify a broadcast is made + // Verify a broadcast was made select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } - // wait for a request to get more peers to occur + // Wait for a request to find more peers to occur select { case k := <-fpm.findMorePeersRequested: if testutil.IndexOf(blks, k) == -1 { @@ -360,63 +280,58 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } firstTickLength := time.Since(startTick) - // wait for another broadcast to occur + // Wait for another broadcast to occur select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } + + // Wait for another broadcast to occur startTick = time.Now() - // wait for another broadcast to occur select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } + + // Tick should take longer consecutiveTickLength := time.Since(startTick) - // tick should take longer if firstTickLength > consecutiveTickLength { t.Fatal("Should have increased tick length after first consecutive tick") } + + // Wait for another broadcast to occur startTick = time.Now() - // wait for another broadcast to occur select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } + + // Tick should take longer secondConsecutiveTickLength := time.Since(startTick) - // tick should take longer if consecutiveTickLength > secondConsecutiveTickLength { t.Fatal("Should have increased tick length after first consecutive tick") } - // should not have looked for peers on consecutive ticks + // Should not have tried to find peers on consecutive ticks select { case <-fpm.findMorePeersRequested: - t.Fatal("Should not have looked for peers on consecutive tick") + t.Fatal("Should not have tried to find peers on consecutive ticks") default: } - // wait for rebroadcast to occur + // Wait for rebroadcast to occur select { case k := <-fpm.findMorePeersRequested: if testutil.IndexOf(blks, k) == -1 { @@ -428,18 +343,17 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() // Create a new session with its own context sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(sessctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + session := New(sessctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer timerCancel() @@ -468,3 +382,37 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { t.Fatal("expected channel to be closed before timeout") } } + +func TestSessionReceiveMessageAfterShutdown(t *testing.T) { + ctx, cancelCtx := context.WithTimeout(context.Background(), 10*time.Millisecond) + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(2) + cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} + + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + <-fwm.wantReqs + + // Shut down session + cancelCtx() + + // Simulate receiving block for a CID + peer := testutil.GeneratePeers(1)[0] + session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + time.Sleep(5 * time.Millisecond) + + // If we don't get a panic then the test is considered passing +} diff --git a/session/sessionwants.go b/session/sessionwants.go index aa487f12..9f896049 100644 --- a/session/sessionwants.go +++ b/session/sessionwants.go @@ -1,6 +1,7 @@ package session import ( + "fmt" "math/rand" "sync" "time" @@ -8,60 +9,43 @@ import ( cid "github.com/ipfs/go-cid" ) +// sessionWants keeps track of which cids are waiting to be sent out, and which +// peers are "live" - ie, we've sent a request but haven't received a block yet type sessionWants struct { sync.RWMutex toFetch *cidQueue liveWants map[cid.Cid]time.Time - pastWants *cid.Set } -// BlocksReceived moves received block CIDs from live to past wants and -// measures latency. It returns the CIDs of blocks that were actually wanted -// (as opposed to duplicates) and the total latency for all incoming blocks. -func (sw *sessionWants) BlocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration) { - now := time.Now() +func newSessionWants() sessionWants { + return sessionWants{ + toFetch: newCidQueue(), + liveWants: make(map[cid.Cid]time.Time), + } +} + +func (sw *sessionWants) String() string { + return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) +} +// BlocksRequested is called when the client makes a request for blocks +func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { sw.Lock() defer sw.Unlock() - totalLatency := time.Duration(0) - wanted := make([]cid.Cid, 0, len(cids)) - for _, c := range cids { - if sw.unlockedIsWanted(c) { - wanted = append(wanted, c) - - // If the block CID was in the live wants queue, remove it - tval, ok := sw.liveWants[c] - if ok { - totalLatency += now.Sub(tval) - delete(sw.liveWants, c) - } else { - // Otherwise remove it from the toFetch queue, if it was there - sw.toFetch.Remove(c) - } - - // Keep track of CIDs we've successfully fetched - sw.pastWants.Add(c) - } + for _, k := range newWants { + sw.toFetch.Push(k) } - - return wanted, totalLatency } -// GetNextWants adds any new wants to the list of CIDs to fetch, then moves as -// many CIDs from the fetch queue to the live wants list as possible (given the -// limit). Returns the newly live wants. -func (sw *sessionWants) GetNextWants(limit int, newWants []cid.Cid) []cid.Cid { +// GetNextWants moves as many CIDs from the fetch queue to the live wants +// list as possible (given the limit). Returns the newly live wants. +func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { now := time.Now() sw.Lock() defer sw.Unlock() - // Add new wants to the fetch queue - for _, k := range newWants { - sw.toFetch.Push(k) - } - // Move CIDs from fetch queue to the live wants queue (up to the limit) currentLiveCount := len(sw.liveWants) toAdd := limit - currentLiveCount @@ -76,6 +60,55 @@ func (sw *sessionWants) GetNextWants(limit int, newWants []cid.Cid) []cid.Cid { return live } +// WantsSent is called when wants are sent to a peer +func (sw *sessionWants) WantsSent(ks []cid.Cid) { + now := time.Now() + + sw.Lock() + defer sw.Unlock() + + for _, c := range ks { + if _, ok := sw.liveWants[c]; !ok { + sw.toFetch.Remove(c) + sw.liveWants[c] = now + } + } +} + +// BlocksReceived removes received block CIDs from the live wants list and +// measures latency. It returns the CIDs of blocks that were actually +// wanted (as opposed to duplicates) and the total latency for all incoming blocks. +func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { + wanted := make([]cid.Cid, 0, len(ks)) + totalLatency := time.Duration(0) + if len(ks) == 0 { + return wanted, totalLatency + } + + now := time.Now() + + sw.Lock() + defer sw.Unlock() + + for _, c := range ks { + if sw.unlockedIsWanted(c) { + wanted = append(wanted, c) + + sentAt, ok := sw.liveWants[c] + if ok && !sentAt.IsZero() { + totalLatency += now.Sub(sentAt) + } + + // Remove the CID from the live wants / toFetch queue and add it + // to the past wants + delete(sw.liveWants, c) + sw.toFetch.Remove(c) + } + } + + return wanted, totalLatency +} + // PrepareBroadcast saves the current time for each live want and returns the // live want CIDs. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { @@ -102,23 +135,6 @@ func (sw *sessionWants) CancelPending(keys []cid.Cid) { } } -// ForEachUniqDup iterates over each of the given CIDs and calls isUniqFn -// if the session is expecting a block for the CID, or isDupFn if the session -// has already received the block. -func (sw *sessionWants) ForEachUniqDup(ks []cid.Cid, isUniqFn, isDupFn func()) { - sw.RLock() - - for _, k := range ks { - if sw.unlockedIsWanted(k) { - isUniqFn() - } else if sw.pastWants.Has(k) { - isDupFn() - } - } - - sw.RUnlock() -} - // LiveWants returns a list of live wants func (sw *sessionWants) LiveWants() []cid.Cid { sw.RLock() @@ -131,7 +147,6 @@ func (sw *sessionWants) LiveWants() []cid.Cid { return live } -// RandomLiveWant returns a randomly selected live want func (sw *sessionWants) RandomLiveWant() cid.Cid { i := rand.Uint64() @@ -160,31 +175,6 @@ func (sw *sessionWants) HasLiveWants() bool { return len(sw.liveWants) > 0 } -// IsWanted indicates if the session is expecting to receive the block with the -// given CID -func (sw *sessionWants) IsWanted(c cid.Cid) bool { - sw.RLock() - defer sw.RUnlock() - - return sw.unlockedIsWanted(c) -} - -// FilterInteresting filters the list so that it only contains keys for -// blocks that the session is waiting to receive or has received in the past -func (sw *sessionWants) FilterInteresting(ks []cid.Cid) []cid.Cid { - sw.RLock() - defer sw.RUnlock() - - var interested []cid.Cid - for _, k := range ks { - if sw.unlockedIsWanted(k) || sw.pastWants.Has(k) { - interested = append(interested, k) - } - } - - return interested -} - func (sw *sessionWants) unlockedIsWanted(c cid.Cid) bool { _, ok := sw.liveWants[c] if !ok { diff --git a/session/sessionwants_test.go b/session/sessionwants_test.go index 87972924..953ecce9 100644 --- a/session/sessionwants_test.go +++ b/session/sessionwants_test.go @@ -2,20 +2,13 @@ package session import ( "testing" - "time" "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" ) -func TestSessionWants(t *testing.T) { - sw := sessionWants{ - toFetch: newCidQueue(), - liveWants: make(map[cid.Cid]time.Time), - pastWants: cid.NewSet(), - } - cids := testutil.GenerateCids(10) - others := testutil.GenerateCids(1) +func TestEmptySessionWants(t *testing.T) { + sw := newSessionWants() // Expect these functions to return nothing on a new sessionWants lws := sw.PrepareBroadcast() @@ -33,25 +26,29 @@ func TestSessionWants(t *testing.T) { if rw.Defined() { t.Fatal("expected no random want") } - if sw.IsWanted(cids[0]) { - t.Fatal("expected cid to not be wanted") - } - if len(sw.FilterInteresting(cids)) > 0 { - t.Fatal("expected no interesting wants") - } +} - // Add 10 new wants with a limit of 5 - // The first 5 cids should go into the toFetch queue - // The other 5 cids should go into the live want queue - // toFetch Live Past +func TestSessionWants(t *testing.T) { + sw := newSessionWants() + cids := testutil.GenerateCids(10) + others := testutil.GenerateCids(1) + + // Add 10 new wants + // toFetch Live + // 9876543210 + sw.BlocksRequested(cids) + + // Get next wants with a limit of 5 + // The first 5 cids should go move into the live queue + // toFetch Live // 98765 43210 - nextw := sw.GetNextWants(5, cids) + nextw := sw.GetNextWants(5) if len(nextw) != 5 { t.Fatal("expected 5 next wants") } - lws = sw.PrepareBroadcast() + lws := sw.PrepareBroadcast() if len(lws) != 5 { - t.Fatal("expected 5 broadcast wants") + t.Fatal("expected 5 broadcast wants", len(lws)) } lws = sw.LiveWants() if len(lws) != 5 { @@ -60,52 +57,28 @@ func TestSessionWants(t *testing.T) { if !sw.HasLiveWants() { t.Fatal("expected to have live wants") } - rw = sw.RandomLiveWant() + rw := sw.RandomLiveWant() if !rw.Defined() { t.Fatal("expected random want") } - if !sw.IsWanted(cids[0]) { - t.Fatal("expected cid to be wanted") - } - if !sw.IsWanted(cids[9]) { - t.Fatal("expected cid to be wanted") - } - if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[9], others[0]})) != 2 { - t.Fatal("expected 2 interesting wants") - } // Two wanted blocks and one other block are received. - // The wanted blocks should be moved from the live wants queue - // to the past wants set (the other block CID should be ignored) - // toFetch Live Past - // 98765 432__ 10 + // The wanted blocks should be removed from the live wants queue + // (the other block CID should be ignored) + // toFetch Live + // 98765 432__ recvdCids := []cid.Cid{cids[0], cids[1], others[0]} - uniq := 0 - dup := 0 - sw.ForEachUniqDup(recvdCids, func() { uniq++ }, func() { dup++ }) - if uniq != 2 || dup != 0 { - t.Fatal("expected 2 uniqs / 0 dups", uniq, dup) - } sw.BlocksReceived(recvdCids) lws = sw.LiveWants() if len(lws) != 3 { t.Fatal("expected 3 live wants") } - if sw.IsWanted(cids[0]) { - t.Fatal("expected cid to no longer be wanted") - } - if !sw.IsWanted(cids[9]) { - t.Fatal("expected cid to be wanted") - } - if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[9], others[0]})) != 2 { - t.Fatal("expected 2 interesting wants") - } // Ask for next wants with a limit of 5 // Should move 2 wants from toFetch queue to live wants - // toFetch Live Past - // 987__ 65432 10 - nextw = sw.GetNextWants(5, nil) + // toFetch Live + // 987__ 65432 + nextw = sw.GetNextWants(5) if len(nextw) != 2 { t.Fatal("expected 2 next wants") } @@ -113,22 +86,13 @@ func TestSessionWants(t *testing.T) { if len(lws) != 5 { t.Fatal("expected 5 live wants") } - if !sw.IsWanted(cids[5]) { - t.Fatal("expected cid to be wanted") - } // One wanted block and one dup block are received. - // The wanted block should be moved from the live wants queue - // to the past wants set - // toFetch Live Past - // 987 654_2 310 + // The wanted block should be removed from the live + // wants queue. + // toFetch Live + // 987 654_2 recvdCids = []cid.Cid{cids[0], cids[3]} - uniq = 0 - dup = 0 - sw.ForEachUniqDup(recvdCids, func() { uniq++ }, func() { dup++ }) - if uniq != 1 || dup != 1 { - t.Fatal("expected 1 uniq / 1 dup", uniq, dup) - } sw.BlocksReceived(recvdCids) lws = sw.LiveWants() if len(lws) != 4 { @@ -136,17 +100,11 @@ func TestSessionWants(t *testing.T) { } // One block in the toFetch queue should be cancelled - // toFetch Live Past - // 9_7 654_2 310 + // toFetch Live + // 9_7 654_2 sw.CancelPending([]cid.Cid{cids[8]}) lws = sw.LiveWants() if len(lws) != 4 { t.Fatal("expected 4 live wants") } - if sw.IsWanted(cids[8]) { - t.Fatal("expected cid to no longer be wanted") - } - if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[8]})) != 1 { - t.Fatal("expected 1 interesting wants") - } } diff --git a/session/sessionwantsender.go b/session/sessionwantsender.go new file mode 100644 index 00000000..ddd24ee0 --- /dev/null +++ b/session/sessionwantsender.go @@ -0,0 +1,605 @@ +package session + +import ( + "context" + + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// Maximum number of changes to accept before blocking +const changesBufferSize = 128 + +// BlockPresence indicates whether a peer has a block. +// Note that the order is important, we decide which peer to send a want to +// based on knowing whether peer has the block. eg we're more likely to send +// a want to a peer that has the block than a peer that doesnt have the block +// so BPHave > BPDontHave +type BlockPresence int + +const ( + BPDontHave BlockPresence = iota + BPUnknown + BPHave +) + +// update encapsulates a message received by the session +type update struct { + // Which peer sent the update + from peer.ID + // cids of blocks received + ks []cid.Cid + // HAVE message + haves []cid.Cid + // DONT_HAVE message + dontHaves []cid.Cid +} + +// peerAvailability indicates a peer's connection state +type peerAvailability struct { + target peer.ID + available bool +} + +// change can be a new peer being discovered, a new message received by the +// session, or a change in the connect status of a peer +type change struct { + // the peer ID of a new peer + addPeer peer.ID + // new wants requested + add []cid.Cid + // new message received by session (blocks / HAVEs / DONT_HAVEs) + update update + // peer has connected / disconnected + availability peerAvailability +} + +type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) +type onPeersExhaustedFn func([]cid.Cid) + +// +// sessionWantSender is responsible for sending want-have and want-block to +// peers. For each want, it sends a single optimistic want-block request to +// one peer and want-have requests to all other peers in the session. +// To choose the best peer for the optimistic want-block it maintains a list +// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and +// consults the peer response tracker (records which peers sent us blocks). +// +type sessionWantSender struct { + // When the context is cancelled, sessionWantSender shuts down + ctx context.Context + // The session ID + sessionID uint64 + // A channel that collects incoming changes (events) + changes chan change + // Information about each want indexed by CID + wants map[cid.Cid]*wantInfo + // Tracks which peers we have send want-block to + swbt *sentWantBlocksTracker + // Maintains a list of peers and whether they are connected + peerAvlMgr *peerAvailabilityManager + // Tracks the number of blocks each peer sent us + peerRspTrkr *peerResponseTracker + + // Sends wants to peers + pm PeerManager + // Keeps track of which peer has / doesn't have a block + bpm *bsbpm.BlockPresenceManager + // Called when wants are sent + onSend onSendFn + // Called when all peers explicitly don't have a block + onPeersExhausted onPeersExhaustedFn +} + +func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, bpm *bsbpm.BlockPresenceManager, + onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { + + spm := sessionWantSender{ + ctx: ctx, + sessionID: sid, + changes: make(chan change, changesBufferSize), + wants: make(map[cid.Cid]*wantInfo), + swbt: newSentWantBlocksTracker(), + peerAvlMgr: newPeerAvailabilityManager(), + peerRspTrkr: newPeerResponseTracker(), + + pm: pm, + bpm: bpm, + onSend: onSend, + onPeersExhausted: onPeersExhausted, + } + + return spm +} + +func (spm *sessionWantSender) ID() uint64 { + return spm.sessionID +} + +// Add is called when new wants are added to the session +func (spm *sessionWantSender) Add(ks []cid.Cid) { + if len(ks) == 0 { + return + } + spm.addChange(change{add: ks}) +} + +// Update is called when the session receives a message with incoming blocks +// or HAVE / DONT_HAVE +func (spm *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid, isNewPeer bool) { + // fmt.Printf("Update(%s, %d, %d, %d, %t)\n", lu.P(from), len(ks), len(haves), len(dontHaves), isNewPeer) + hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 + if !hasUpdate && !isNewPeer { + return + } + + ch := change{} + + if hasUpdate { + ch.update = update{from, ks, haves, dontHaves} + } + + // If the message came from a new peer register with the peer manager + if isNewPeer { + available := spm.pm.RegisterSession(from, spm) + ch.addPeer = from + ch.availability = peerAvailability{from, available} + } + + spm.addChange(ch) +} + +// SignalAvailability is called by the PeerManager to signal that a peer has +// connected / disconnected +func (spm *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { + // fmt.Printf("SignalAvailability(%s, %t)\n", lu.P(p), isAvailable) + availability := peerAvailability{p, isAvailable} + spm.addChange(change{availability: availability}) +} + +// Run is the main loop for processing incoming changes +func (spm *sessionWantSender) Run() { + for { + select { + case ch := <-spm.changes: + spm.onChange([]change{ch}) + case <-spm.ctx.Done(): + spm.shutdown() + return + } + } +} + +// addChange adds a new change to the queue +func (spm *sessionWantSender) addChange(c change) { + select { + case spm.changes <- c: + case <-spm.ctx.Done(): + } +} + +// shutdown unregisters the session with the PeerManager +func (spm *sessionWantSender) shutdown() { + spm.pm.UnregisterSession(spm.sessionID) +} + +// collectChanges collects all the changes that have occurred since the last +// invocation of onChange +func (spm *sessionWantSender) collectChanges(changes []change) []change { + for len(changes) < changesBufferSize { + select { + case next := <-spm.changes: + changes = append(changes, next) + default: + return changes + } + } + return changes +} + +// onChange processes the next set of changes +func (spm *sessionWantSender) onChange(changes []change) { + // Several changes may have been recorded since the last time we checked, + // so pop all outstanding changes from the channel + changes = spm.collectChanges(changes) + + // Apply each change + availability := make(map[peer.ID]bool, len(changes)) + var updates []update + for _, chng := range changes { + // Add newly discovered peers + if chng.addPeer != "" { + spm.peerAvlMgr.addPeer(chng.addPeer) + } + + // Initialize info for new wants + for _, c := range chng.add { + spm.trackWant(c) + } + + // Consolidate updates and changes to availability + if chng.update.from != "" { + updates = append(updates, chng.update) + } + if chng.availability.target != "" { + availability[chng.availability.target] = chng.availability.available + } + } + + // Update peer availability + newlyAvailable := spm.processAvailability(availability) + + // Update wants + spm.processUpdates(updates) + + // If there are some connected peers, send any pending wants + if spm.peerAvlMgr.haveAvailablePeers() { + // fmt.Printf("sendNextWants()\n") + spm.sendNextWants(newlyAvailable) + // fmt.Println(spm) + } +} + +// processAvailability updates the want queue with any changes in +// peer availability +func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) []peer.ID { + var newlyAvailable []peer.ID + for p, isNowAvailable := range availability { + // Make sure this is a peer that the session is actually interested in + if wasAvailable, ok := spm.peerAvlMgr.isAvailable(p); ok { + // If the state has changed + if wasAvailable != isNowAvailable { + // Update the state and record that something changed + spm.peerAvlMgr.setPeerAvailability(p, isNowAvailable) + // fmt.Printf("processAvailability change %s %t\n", lu.P(p), isNowAvailable) + spm.updateWantsPeerAvailability(p, isNowAvailable) + if isNowAvailable { + newlyAvailable = append(newlyAvailable, p) + } + } + } + } + + return newlyAvailable +} + +// trackWant creates a new entry in the map of CID -> want info +func (spm *sessionWantSender) trackWant(c cid.Cid) { + // fmt.Printf("trackWant %s\n", lu.C(c)) + if _, ok := spm.wants[c]; ok { + return + } + + // Create the want info + wi := newWantInfo(spm.peerRspTrkr) + spm.wants[c] = wi + + // For each available peer, register any information we know about + // whether the peer has the block + for _, p := range spm.peerAvlMgr.availablePeers() { + spm.updateWantBlockPresence(c, p) + } +} + +// processUpdates processes incoming blocks and HAVE / DONT_HAVEs +func (spm *sessionWantSender) processUpdates(updates []update) { + dontHaves := cid.NewSet() + for _, upd := range updates { + // TODO: If there is a timeout for the want from the peer, remove want.sentTo + // so the want can be sent to another peer (and blacklist the peer?) + // TODO: If a peer is no longer available, check if all providers of + // each CID have been exhausted + + // For each DONT_HAVE + for _, c := range upd.dontHaves { + dontHaves.Add(c) + + // Update the block presence for the peer + spm.updateWantBlockPresence(c, upd.from) + + // Check if the DONT_HAVE is in response to a want-block + // (could also be in response to want-have) + if spm.swbt.haveSentWantBlockTo(upd.from, c) { + // If we were waiting for a response from this peer, clear + // sentTo so that we can send the want to another peer + if sentTo, ok := spm.getWantSentTo(c); ok && sentTo == upd.from { + spm.setWantSentTo(c, "") + } + } + } + + // For each HAVE + for _, c := range upd.haves { + // Update the block presence for the peer + spm.updateWantBlockPresence(c, upd.from) + } + + // For each received block + for _, c := range upd.ks { + // Remove the want + removed := spm.removeWant(c) + if removed != nil { + // Inform the peer tracker that this peer was the first to send + // us the block + spm.peerRspTrkr.receivedBlockFrom(upd.from) + } + } + } + + // If all available peers for a cid sent a DONT_HAVE, signal to the session + // that we've exhausted available peers + if dontHaves.Len() > 0 { + exhausted := spm.bpm.AllPeersDoNotHaveBlock(spm.peerAvlMgr.availablePeers(), dontHaves.Keys()) + newlyExhausted := spm.newlyExhausted(exhausted) + if len(newlyExhausted) > 0 { + spm.onPeersExhausted(newlyExhausted) + } + } +} + +// convenience structs for passing around want-blocks and want-haves for a peer +type wantSets struct { + wantBlocks *cid.Set + wantHaves *cid.Set +} + +type allWants map[peer.ID]*wantSets + +func (aw allWants) forPeer(p peer.ID) *wantSets { + if _, ok := aw[p]; !ok { + aw[p] = &wantSets{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + } + } + return aw[p] +} + +// sendNextWants sends wants to peers according to the latest information +// about which peers have / dont have blocks +func (spm *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { + toSend := make(allWants) + + for c, wi := range spm.wants { + // Ensure we send want-haves to any newly available peers + for _, p := range newlyAvailable { + toSend.forPeer(p).wantHaves.Add(c) + } + + // We already sent a want-block to a peer and haven't yet received a + // response yet + if wi.sentTo != "" { + // fmt.Printf(" q - already sent want-block %s to %s\n", lu.C(c), lu.P(wi.sentTo)) + continue + } + + // All the peers have indicated that they don't have the block + // corresponding to this want, so we must wait to discover more peers + if wi.bestPeer == "" { + // TODO: work this out in real time instead of using bestP? + // fmt.Printf(" q - no best peer for %s\n", lu.C(c)) + continue + } + + // fmt.Printf(" q - send best: %s: %s\n", lu.C(c), lu.P(wi.bestPeer)) + + // Record that we are sending a want-block for this want to the peer + spm.setWantSentTo(c, wi.bestPeer) + + // Send a want-block to the chosen peer + toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) + + // Send a want-have to each other peer + for _, op := range spm.peerAvlMgr.availablePeers() { + if op != wi.bestPeer { + toSend.forPeer(op).wantHaves.Add(c) + } + } + } + + // Send any wants we've collected + spm.sendWants(toSend) +} + +// sendWants sends want-have and want-blocks to the appropriate peers +func (spm *sessionWantSender) sendWants(sends allWants) { + // fmt.Printf(" send wants to %d peers\n", len(sends)) + + // For each peer we're sending a request to + for p, snd := range sends { + // fmt.Printf(" send %d wants to %s\n", snd.wantBlocks.Len(), lu.P(p)) + + // Piggyback some other want-haves onto the request to the peer + for _, c := range spm.getPiggybackWantHaves(p, snd.wantBlocks) { + snd.wantHaves.Add(c) + } + + // Send the wants to the peer. + // Note that the PeerManager ensures that we don't sent duplicate + // want-haves / want-blocks to a peer, and that want-blocks take + // precedence over want-haves. + wblks := snd.wantBlocks.Keys() + whaves := snd.wantHaves.Keys() + spm.pm.SendWants(spm.ctx, p, wblks, whaves) + + // Inform the session that we've sent the wants + spm.onSend(p, wblks, whaves) + + // Record which peers we send want-block to + spm.swbt.addSentWantBlocksTo(p, wblks) + } +} + +// getPiggybackWantHaves gets the want-haves that should be piggybacked onto +// a request that we are making to send want-blocks to a peer +func (spm *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { + var whs []cid.Cid + for c := range spm.wants { + // Don't send want-have if we're already sending a want-block + // (or have previously) + if !wantBlocks.Has(c) && !spm.swbt.haveSentWantBlockTo(p, c) { + whs = append(whs, c) + } + } + return whs +} + +// newlyExhausted filters the list of keys for wants that have not already +// been marked as exhausted (all peers indicated they don't have the block) +func (spm *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { + var res []cid.Cid + for _, c := range ks { + if wi, ok := spm.wants[c]; ok { + if !wi.exhausted { + res = append(res, c) + wi.exhausted = true + } + } + } + return res +} + +// removeWant is called when the corresponding block is received +func (spm *sessionWantSender) removeWant(c cid.Cid) *wantInfo { + if wi, ok := spm.wants[c]; ok { + delete(spm.wants, c) + return wi + } + return nil +} + +// updateWantsPeerAvailability is called when the availability changes for a +// peer. It updates all the wants accordingly. +func (spm *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { + for c, wi := range spm.wants { + if isNowAvailable { + spm.updateWantBlockPresence(c, p) + } else { + wi.removePeer(p) + } + } +} + +// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given +// want / peer +func (spm *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { + wi, ok := spm.wants[c] + if !ok { + return + } + + // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the + // block presence for the peer / cid combination + if spm.bpm.PeerHasBlock(p, c) { + wi.setPeerBlockPresence(p, BPHave) + } else if spm.bpm.PeerDoesNotHaveBlock(p, c) { + wi.setPeerBlockPresence(p, BPDontHave) + } else { + wi.setPeerBlockPresence(p, BPUnknown) + } +} + +// Which peer was the want sent to +func (spm *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { + if wi, ok := spm.wants[c]; ok { + return wi.sentTo, true + } + return "", false +} + +// Record which peer the want was sent to +func (spm *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { + if wi, ok := spm.wants[c]; ok { + wi.sentTo = p + } +} + +// wantInfo keeps track of the information for a want +type wantInfo struct { + // Tracks HAVE / DONT_HAVE sent to us for the want by each peer + blockPresence map[peer.ID]BlockPresence + // The peer that we've sent a want-block to (cleared when we get a response) + sentTo peer.ID + // The "best" peer to send the want to next + bestPeer peer.ID + // Keeps track of how many hits / misses each peer has sent us for wants + // in the session + peerRspTrkr *peerResponseTracker + // true if all known peers have sent a DONT_HAVE for this want + exhausted bool +} + +// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { +func newWantInfo(prt *peerResponseTracker) *wantInfo { + return &wantInfo{ + blockPresence: make(map[peer.ID]BlockPresence), + peerRspTrkr: prt, + exhausted: false, + } +} + +// setPeerBlockPresence sets the block presence for the given peer +func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { + wi.blockPresence[p] = bp + wi.calculateBestPeer() + + // If a peer informed us that it has a block then make sure the want is no + // longer flagged as exhausted (exhausted means no peers have the block) + if bp == BPHave { + wi.exhausted = false + } +} + +// removePeer deletes the given peer from the want info +func (wi *wantInfo) removePeer(p peer.ID) { + // If we were waiting to hear back from the peer that is being removed, + // clear the sentTo field so we no longer wait + if p == wi.sentTo { + wi.sentTo = "" + } + delete(wi.blockPresence, p) + wi.calculateBestPeer() +} + +// calculateBestPeer finds the best peer to send the want to next +func (wi *wantInfo) calculateBestPeer() { + // Recalculate the best peer + bestBP := BPDontHave + bestPeer := peer.ID("") + + // Find the peer with the best block presence, recording how many peers + // share the block presence + countWithBest := 0 + for p, bp := range wi.blockPresence { + if bp > bestBP { + bestBP = bp + bestPeer = p + countWithBest = 1 + } else if bp == bestBP { + countWithBest++ + } + } + wi.bestPeer = bestPeer + + // If no peer has a block presence better than DONT_HAVE, bail out + if bestPeer == "" { + return + } + + // If there was only one peer with the best block presence, we're done + if countWithBest <= 1 { + return + } + + // There were multiple peers with the best block presence, so choose one of + // them to be the best + var peersWithBest []peer.ID + for p, bp := range wi.blockPresence { + if bp == bestBP { + peersWithBest = append(peersWithBest, p) + } + } + wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) +} diff --git a/session/sessionwantsender_test.go b/session/sessionwantsender_test.go new file mode 100644 index 00000000..e3774409 --- /dev/null +++ b/session/sessionwantsender_test.go @@ -0,0 +1,348 @@ +package session + +import ( + "context" + "sync" + "testing" + "time" + + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bspm "github.com/ipfs/go-bitswap/peermanager" + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +type sentWants struct { + p peer.ID + wantHaves *cid.Set + wantBlocks *cid.Set +} + +type mockPeerManager struct { + peerSessions sync.Map + peerSends sync.Map +} + +func newMockPeerManager() *mockPeerManager { + return &mockPeerManager{} +} + +func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) bool { + pm.peerSessions.Store(p, sess) + return true +} + +func (pm *mockPeerManager) UnregisterSession(sesid uint64) { +} + +func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + swi, _ := pm.peerSends.LoadOrStore(p, sentWants{p, cid.NewSet(), cid.NewSet()}) + sw := swi.(sentWants) + for _, c := range wantBlocks { + sw.wantBlocks.Add(c) + } + for _, c := range wantHaves { + if !sw.wantBlocks.Has(c) { + sw.wantHaves.Add(c) + } + } +} + +func (pm *mockPeerManager) waitNextWants() map[peer.ID]sentWants { + time.Sleep(5 * time.Millisecond) + nw := make(map[peer.ID]sentWants) + pm.peerSends.Range(func(k, v interface{}) bool { + nw[k.(peer.ID)] = v.(sentWants) + return true + }) + return nw +} + +func (pm *mockPeerManager) clearWants() { + pm.peerSends.Range(func(k, v interface{}) bool { + pm.peerSends.Delete(k) + return true + }) +} + +func TestSendWants(t *testing.T) { + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(1) + peerA := peers[0] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), blkCids0) { + t.Fatal("Wrong keys") + } + if sw.wantHaves.Len() > 0 { + t.Fatal("Expecting no want-haves") + } +} + +func TestSendsWantBlockToOnePeerOnly(t *testing.T) { + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), blkCids0) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Have not received response from peerA, so should not send want-block to + // peerB. Should have sent + // peerB: want-have cid0, cid1 + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if sw.wantBlocks.Len() > 0 { + t.Fatal("Expecting no want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantHaves.Keys(), blkCids0) { + t.Fatal("Wrong keys") + } +} + +func TestReceiveBlock(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerA: block cid0, DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}, false) + // peerB: HAVE cid0, cid1 + bpm.ReceiveFrom(peerB, cids, []cid.Cid{}) + spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should have sent + // peerB: want-block cid1 + // (should not have sent want-block for cid0 because block0 has already + // been received) + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + wb := sw.wantBlocks.Keys() + if len(wb) != 1 || !wb[0].Equals(cids[1]) { + t.Fatal("Wrong keys", wb) + } +} + +func TestPeerUnavailable(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should not have sent anything because want-blocks were already sent to + // peer A + sw, ok = peerSends[peerB] + if ok && sw.wantBlocks.Len() > 0 { + t.Fatal("Expected no wants sent to peer") + } + + // peerA becomes unavailable + spm.SignalAvailability(peerA, false) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should now have sent want-block cid0, cid1 to peerB + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + t.Fatal("Wrong keys") + } +} + +func TestPeersExhausted(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + var exhausted []cid.Cid + onPeersExhausted := func(ks []cid.Cid) { + exhausted = append(exhausted, ks...) + } + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: DONT_HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[0]}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}, true) + + time.Sleep(5 * time.Millisecond) + + // All available peers (peer A) have sent us a DONT_HAVE for cid0, + // so expect that onPeersExhausted() will be called with cid0 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[0]}) { + t.Fatal("Wrong keys") + } + + // Clear exhausted cids + exhausted = []cid.Cid{} + + // peerB: DONT_HAVE cid0, cid1 + bpm.ReceiveFrom(peerB, []cid.Cid{}, cids) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, cids, true) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE + // for cid0, but we already called onPeersExhausted with cid0, so it + // should not be called again + if len(exhausted) > 0 { + t.Fatal("Wrong keys") + } + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}, false) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE for + // cid1, so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } +} diff --git a/session/wantinfo_test.go b/session/wantinfo_test.go new file mode 100644 index 00000000..618b231a --- /dev/null +++ b/session/wantinfo_test.go @@ -0,0 +1,80 @@ +package session + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestEmptyWantInfo(t *testing.T) { + wp := newWantInfo(newPeerResponseTracker()) + + if wp.bestPeer != "" { + t.Fatal("expected no best peer") + } +} + +func TestSetPeerBlockPresence(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestSetPeerBlockPresenceBestLower(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPHave) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestRemoveThenSetDontHave(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.removePeer(peers[0]) + if wp.bestPeer != "" { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} diff --git a/sessioninterestmanager/sessioninterestmanager.go b/sessioninterestmanager/sessioninterestmanager.go new file mode 100644 index 00000000..9deb3795 --- /dev/null +++ b/sessioninterestmanager/sessioninterestmanager.go @@ -0,0 +1,73 @@ +package sessioninterestmanager + +import ( + bsswl "github.com/ipfs/go-bitswap/sessionwantlist" + blocks "github.com/ipfs/go-block-format" + + cid "github.com/ipfs/go-cid" +) + +type SessionInterestManager struct { + interested *bsswl.SessionWantlist + wanted *bsswl.SessionWantlist +} + +// New initializes a new SessionInterestManager. +func New() *SessionInterestManager { + return &SessionInterestManager{ + interested: bsswl.NewSessionWantlist(), + wanted: bsswl.NewSessionWantlist(), + } +} + +func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { + sim.interested.Add(ks, ses) + sim.wanted.Add(ks, ses) +} + +func (sim *SessionInterestManager) RemoveSessionInterest(ses uint64) []cid.Cid { + sim.wanted.RemoveSession(ses) + return sim.interested.RemoveSession(ses) +} + +func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, wants []cid.Cid) { + sim.wanted.RemoveSessionKeys(ses, wants) +} + +func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { + kres := make([][]cid.Cid, len(ksets)) + for i, ks := range ksets { + kres[i] = sim.interested.SessionHas(ses, ks).Keys() + } + return kres +} + +func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { + // Get the wanted block keys + ks := make([]cid.Cid, len(blks)) + for _, b := range blks { + ks = append(ks, b.Cid()) + } + wantedKs := sim.wanted.Has(ks) + + // Separate the blocks into wanted and unwanted + wantedBlks := make([]blocks.Block, 0, len(blks)) + notWantedBlks := make([]blocks.Block, 0) + for _, b := range blks { + if wantedKs.Has(b.Cid()) { + wantedBlks = append(wantedBlks, b) + } else { + notWantedBlks = append(notWantedBlks, b) + } + } + return wantedBlks, notWantedBlks +} + +func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { + ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) + ks = append(ks, blks...) + ks = append(ks, haves...) + ks = append(ks, dontHaves...) + + return sim.interested.SessionsFor(ks) +} diff --git a/sessioninterestmanager/sessioninterestmanager_test.go b/sessioninterestmanager/sessioninterestmanager_test.go new file mode 100644 index 00000000..d882cabc --- /dev/null +++ b/sessioninterestmanager/sessioninterestmanager_test.go @@ -0,0 +1,182 @@ +package sessioninterestmanager + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" +) + +func TestEmpty(t *testing.T) { + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(2) + res := sim.FilterSessionInterested(ses, cids) + if len(res) != 1 || len(res[0]) > 0 { + t.Fatal("Expected no interest") + } + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) > 0 { + t.Fatal("Expected no interest") + } +} + +func TestBasic(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + if len(sim.InterestedSessions(cids1, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + + sim.RecordSessionInterest(ses2, cids2) + res = sim.FilterSessionInterested(ses2, cids1[:1]) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + res = sim.FilterSessionInterested(ses2, cids2) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + + if len(sim.InterestedSessions(cids1[:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids1[1:], []cid.Cid{}, []cid.Cid{})) != 2 { + t.Fatal("Expected 2 sessions") + } +} + +func TestInterestedSessions(t *testing.T) { + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(3) + sim.RecordSessionInterest(ses, cids[0:2]) + + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids[0:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids[0:1], []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids)) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids[0:1])) != 1 { + t.Fatal("Expected 1 session") + } +} + +func TestRemoveSessionInterest(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + sim.RemoveSessionInterest(ses1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + + res = sim.FilterSessionInterested(ses2, cids1, cids2) + if len(res) != 2 { + t.Fatal("unexpected results size") + } + if len(res[0]) != 1 { + t.Fatal("Expected 1 key") + } + if len(res[1]) != 2 { + t.Fatal("Expected 2 keys") + } +} + +func TestSplitWantedUnwanted(t *testing.T) { + blks := testutil.GenerateBlocksOfSize(3, 1024) + sim := New() + ses1 := uint64(1) + ses2 := uint64(2) + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + // ses1: + // ses2: + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(wanted) > 0 { + t.Fatal("Expected no blocks") + } + if len(unwanted) != 3 { + t.Fatal("Expected 3 blocks") + } + + // ses1: 0 1 + // ses2: + sim.RecordSessionInterest(ses1, cids[0:2]) + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected 1 block") + } + + // ses1: 1 + // ses2: 1 2 + sim.RecordSessionInterest(ses2, cids[1:]) + sim.RemoveSessionWants(ses1, cids[:1]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 1 2 + sim.RemoveSessionWants(ses1, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 2 + sim.RemoveSessionWants(ses2, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 1 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 2 { + t.Fatal("Expected 2 blocks") + } +} diff --git a/sessionmanager/sessionmanager.go b/sessionmanager/sessionmanager.go index c967a04a..3090e829 100644 --- a/sessionmanager/sessionmanager.go +++ b/sessionmanager/sessionmanager.go @@ -8,8 +8,10 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/notifications" bssession "github.com/ipfs/go-bitswap/session" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -17,52 +19,51 @@ import ( // Session is a session that is managed by the session manager type Session interface { exchange.Fetcher - ReceiveFrom(peer.ID, []cid.Cid) - IsWanted(cid.Cid) bool -} - -type sesTrk struct { - session Session - pm bssession.PeerManager - srs bssession.RequestSplitter + ID() uint64 + ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) } // SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session - -// RequestSplitterFactory generates a new request splitter for a session. -type RequestSplitterFactory func(ctx context.Context) bssession.RequestSplitter +type SessionFactory func(ctx context.Context, id uint64, sprm bssession.SessionPeerManager, sim *bssim.SessionInterestManager, pm bssession.PeerManager, bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) Session // PeerManagerFactory generates a new peer manager for a session. -type PeerManagerFactory func(ctx context.Context, id uint64) bssession.PeerManager +type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager // SessionManager is responsible for creating, managing, and dispatching to // sessions. type SessionManager struct { ctx context.Context sessionFactory SessionFactory + sessionInterestManager *bssim.SessionInterestManager peerManagerFactory PeerManagerFactory - requestSplitterFactory RequestSplitterFactory + blockPresenceManager *bsbpm.BlockPresenceManager + peerManager bssession.PeerManager notif notifications.PubSub // Sessions sessLk sync.RWMutex - sessions []sesTrk + sessions map[uint64]Session // Session Index sessIDLk sync.Mutex sessID uint64 + + self peer.ID } // New creates a new SessionManager. -func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory, - requestSplitterFactory RequestSplitterFactory, notif notifications.PubSub) *SessionManager { +func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, + blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { return &SessionManager{ ctx: ctx, sessionFactory: sessionFactory, + sessionInterestManager: sessionInterestManager, peerManagerFactory: peerManagerFactory, - requestSplitterFactory: requestSplitterFactory, + blockPresenceManager: blockPresenceManager, + peerManager: peerManager, notif: notif, + sessions: make(map[uint64]Session), + self: self, } } @@ -75,66 +76,53 @@ func (sm *SessionManager) NewSession(ctx context.Context, sessionctx, cancel := context.WithCancel(ctx) pm := sm.peerManagerFactory(sessionctx, id) - srs := sm.requestSplitterFactory(sessionctx) - session := sm.sessionFactory(sessionctx, id, pm, srs, sm.notif, provSearchDelay, rebroadcastDelay) - tracked := sesTrk{session, pm, srs} + session := sm.sessionFactory(sessionctx, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) sm.sessLk.Lock() - sm.sessions = append(sm.sessions, tracked) + sm.sessions[id] = session sm.sessLk.Unlock() go func() { defer cancel() select { case <-sm.ctx.Done(): - sm.removeSession(tracked) + sm.removeSession(id) case <-ctx.Done(): - sm.removeSession(tracked) + sm.removeSession(id) } }() return session } -func (sm *SessionManager) removeSession(session sesTrk) { +func (sm *SessionManager) removeSession(sesid uint64) { sm.sessLk.Lock() defer sm.sessLk.Unlock() - for i := 0; i < len(sm.sessions); i++ { - if sm.sessions[i] == session { - sm.sessions[i] = sm.sessions[len(sm.sessions)-1] - sm.sessions[len(sm.sessions)-1] = sesTrk{} // free memory. - sm.sessions = sm.sessions[:len(sm.sessions)-1] - return - } - } + + delete(sm.sessions, sesid) } -// GetNextSessionID returns the next sequentional identifier for a session. +// GetNextSessionID returns the next sequential identifier for a session. func (sm *SessionManager) GetNextSessionID() uint64 { sm.sessIDLk.Lock() defer sm.sessIDLk.Unlock() + sm.sessID++ return sm.sessID } -// ReceiveFrom receives block CIDs from a peer and dispatches to sessions. -func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { - sm.sessLk.RLock() - defer sm.sessLk.RUnlock() - - for _, s := range sm.sessions { - s.session.ReceiveFrom(from, ks) - } -} +func (sm *SessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []Session { + sessions := make([]Session, 0) -// IsWanted indicates whether any of the sessions are waiting to receive -// the block with the given CID. -func (sm *SessionManager) IsWanted(cid cid.Cid) bool { - sm.sessLk.RLock() - defer sm.sessLk.RUnlock() + // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs + for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { + sm.sessLk.RLock() + sess, ok := sm.sessions[id] + sm.sessLk.RUnlock() - for _, s := range sm.sessions { - if s.session.IsWanted(cid) { - return true + if ok { + sess.ReceiveFrom(p, blks, haves, dontHaves) + sessions = append(sessions, sess) } } - return false + + return sessions } diff --git a/sessionmanager/sessionmanager_test.go b/sessionmanager/sessionmanager_test.go index 95c12b12..8f25a952 100644 --- a/sessionmanager/sessionmanager_test.go +++ b/sessionmanager/sessionmanager_test.go @@ -7,10 +7,11 @@ import ( delay "github.com/ipfs/go-ipfs-delay" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/notifications" + bspm "github.com/ipfs/go-bitswap/peermanager" bssession "github.com/ipfs/go-bitswap/session" - bssd "github.com/ipfs/go-bitswap/sessiondata" - "github.com/ipfs/go-bitswap/testutil" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -18,12 +19,12 @@ import ( ) type fakeSession struct { - wanted []cid.Cid - ks []cid.Cid - id uint64 - pm *fakePeerManager - srs *fakeRequestSplitter - notif notifications.PubSub + ks []cid.Cid + wantBlocks []cid.Cid + wantHaves []cid.Cid + id uint64 + pm *fakeSesPeerManager + notif notifications.PubSub } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { @@ -32,149 +33,124 @@ func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { return nil, nil } -func (fs *fakeSession) IsWanted(c cid.Cid) bool { - for _, ic := range fs.wanted { - if c == ic { - return true - } - } - return false +func (fs *fakeSession) ID() uint64 { + return fs.id } -func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid) { +func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid, wantHaves []cid.Cid) { fs.ks = append(fs.ks, ks...) + fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) + fs.wantHaves = append(fs.wantHaves, wantHaves...) } -type fakePeerManager struct { - id uint64 +type fakeSesPeerManager struct { } -func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {} -func (*fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { return nil } -func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (*fakePeerManager) RecordPeerResponse(peer.ID, []cid.Cid) {} -func (*fakePeerManager) RecordCancels(c []cid.Cid) {} - -type fakeRequestSplitter struct { -} +func (*fakeSesPeerManager) ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid) bool { return true } +func (*fakeSesPeerManager) Peers() *peer.Set { return nil } +func (*fakeSesPeerManager) FindMorePeers(context.Context, cid.Cid) {} +func (*fakeSesPeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} +func (*fakeSesPeerManager) RecordPeerResponse(peer.ID, []cid.Cid) {} +func (*fakeSesPeerManager) RecordCancels(c []cid.Cid) {} -func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, keys []cid.Cid) []bssd.PartialRequest { - return nil +type fakePeerManager struct { } -func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} -func (frs *fakeRequestSplitter) RecordUniqueBlock() {} -var nextWanted []cid.Cid +func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { return true } +func (*fakePeerManager) UnregisterSession(uint64) {} +func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func sessionFactory(ctx context.Context, id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, pm bssession.PeerManager, - srs bssession.RequestSplitter, + bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, - rebroadcastDelay delay.D) Session { + rebroadcastDelay delay.D, + self peer.ID) Session { return &fakeSession{ - wanted: nextWanted, - id: id, - pm: pm.(*fakePeerManager), - srs: srs.(*fakeRequestSplitter), - notif: notif, + id: id, + pm: sprm.(*fakeSesPeerManager), + notif: notif, } } -func peerManagerFactory(ctx context.Context, id uint64) bssession.PeerManager { - return &fakePeerManager{id} -} - -func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter { - return &fakeRequestSplitter{} +func peerManagerFactory(ctx context.Context, id uint64) bssession.SessionPeerManager { + return &fakeSesPeerManager{} } -func TestAddingSessions(t *testing.T) { +func TestReceiveFrom(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() notif := notifications.New() defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") p := peer.ID(123) block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextWanted = []cid.Cid{block.Cid()} - currentID := sm.GetNextSessionID() firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if firstSession.id != firstSession.pm.id || - firstSession.id != currentID+1 { - t.Fatal("session does not have correct id set") - } secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if secondSession.id != secondSession.pm.id || - secondSession.id != firstSession.id+1 { - t.Fatal("session does not have correct id set") - } - sm.GetNextSessionID() thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if thirdSession.id != thirdSession.pm.id || - thirdSession.id != secondSession.id+2 { - t.Fatal("session does not have correct id set") - } - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) == 0 || - len(secondSession.ks) == 0 || + len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { t.Fatal("should have received blocks but didn't") } -} -func TestIsWanted(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) - - blks := testutil.GenerateBlocksOfSize(4, 1024) - var cids []cid.Cid - for _, b := range blks { - cids = append(cids, b.Cid()) + sm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) + if len(firstSession.wantBlocks) == 0 || + len(secondSession.wantBlocks) > 0 || + len(thirdSession.wantBlocks) == 0 { + t.Fatal("should have received want-blocks but didn't") } - nextWanted = []cid.Cid{cids[0], cids[1]} - _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextWanted = []cid.Cid{cids[0], cids[2]} - _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - - if !sm.IsWanted(cids[0]) || - !sm.IsWanted(cids[1]) || - !sm.IsWanted(cids[2]) { - t.Fatal("expected unwanted but session manager did want cid") - } - if sm.IsWanted(cids[3]) { - t.Fatal("expected wanted but session manager did not want cid") + sm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) + if len(firstSession.wantHaves) == 0 || + len(secondSession.wantHaves) > 0 || + len(thirdSession.wantHaves) == 0 { + t.Fatal("should have received want-haves but didn't") } } -func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { +func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) + defer cancel() notif := notifications.New() defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") p := peer.ID(123) block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextWanted = []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + cancel() + // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) > 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) > 0 { @@ -182,27 +158,35 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { } } -func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { +func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() notif := notifications.New() defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") p := peer.ID(123) block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextWanted = []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sessionCtx, sessionCancel := context.WithCancel(ctx) secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + sessionCancel() + // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) == 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { diff --git a/sessionpeermanager/sessionpeermanager.go b/sessionpeermanager/sessionpeermanager.go index 3c4e1374..060df091 100644 --- a/sessionpeermanager/sessionpeermanager.go +++ b/sessionpeermanager/sessionpeermanager.go @@ -8,11 +8,14 @@ import ( "time" bssd "github.com/ipfs/go-bitswap/sessiondata" + logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) +var log = logging.Logger("bs:sprmgr") + const ( defaultTimeoutDuration = 5 * time.Second maxOptimizedPeers = 32 @@ -41,6 +44,7 @@ type SessionPeerManager struct { ctx context.Context tagger PeerTagger providerFinder PeerProviderFinder + peers *peer.Set tag string id uint64 @@ -61,7 +65,8 @@ func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerP id: id, tagger: tagger, providerFinder: providerFinder, - peerMessages: make(chan peerMessage, 16), + peers: peer.NewSet(), + peerMessages: make(chan peerMessage, 128), activePeers: make(map[peer.ID]*peerData), broadcastLatency: newLatencyTracker(), timeoutDuration: defaultTimeoutDuration, @@ -73,6 +78,19 @@ func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerP return spm } +func (spm *SessionPeerManager) ReceiveFrom(p peer.ID, ks []cid.Cid, haves []cid.Cid) bool { + if len(ks) > 0 || len(haves) > 0 && !spm.peers.Contains(p) { + log.Infof("Added peer %s to session: %d peers\n", p, spm.peers.Size()) + spm.peers.Add(p) + return true + } + return false +} + +func (spm *SessionPeerManager) Peers() *peer.Set { + return spm.peers +} + // RecordPeerResponse records that a peer received some blocks, and adds the // peer to the list of peers if it wasn't already added func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, ks []cid.Cid) { @@ -176,6 +194,11 @@ func (spm *SessionPeerManager) insertPeer(p peer.ID, data *peerData) { } else { spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) } + + if !spm.peers.Contains(p) { + log.Infof("Added peer %s to session: %d peers\n", p, spm.peers.Size()) + spm.peers.Add(p) + } } func (spm *SessionPeerManager) removeOptimizedPeer(p peer.ID) { diff --git a/sessionwantlist/sessionwantlist.go b/sessionwantlist/sessionwantlist.go new file mode 100644 index 00000000..d9814739 --- /dev/null +++ b/sessionwantlist/sessionwantlist.go @@ -0,0 +1,126 @@ +package sessionwantlist + +import ( + "sync" + + cid "github.com/ipfs/go-cid" +) + +type SessionWantlist struct { + sync.RWMutex + wants map[cid.Cid]map[uint64]struct{} +} + +func NewSessionWantlist() *SessionWantlist { + return &SessionWantlist{ + wants: make(map[cid.Cid]map[uint64]struct{}), + } +} + +func (swl *SessionWantlist) Add(ks []cid.Cid, ses uint64) { + swl.Lock() + defer swl.Unlock() + + for _, c := range ks { + if _, ok := swl.wants[c]; !ok { + swl.wants[c] = make(map[uint64]struct{}) + } + swl.wants[c][ses] = struct{}{} + } +} + +func (swl *SessionWantlist) RemoveKeys(ks []cid.Cid) { + swl.Lock() + defer swl.Unlock() + + for _, c := range ks { + delete(swl.wants, c) + } +} + +func (swl *SessionWantlist) RemoveSession(ses uint64) []cid.Cid { + swl.Lock() + defer swl.Unlock() + + deletedKs := make([]cid.Cid, 0) + for c := range swl.wants { + delete(swl.wants[c], ses) + if len(swl.wants[c]) == 0 { + delete(swl.wants, c) + deletedKs = append(deletedKs, c) + } + } + + return deletedKs +} + +func (swl *SessionWantlist) RemoveSessionKeys(ses uint64, ks []cid.Cid) { + swl.Lock() + defer swl.Unlock() + + for _, c := range ks { + if _, ok := swl.wants[c]; ok { + delete(swl.wants[c], ses) + if len(swl.wants[c]) == 0 { + delete(swl.wants, c) + } + } + } +} + +func (swl *SessionWantlist) Keys() []cid.Cid { + swl.RLock() + defer swl.RUnlock() + + ks := make([]cid.Cid, 0, len(swl.wants)) + for c := range swl.wants { + ks = append(ks, c) + } + return ks +} + +func (swl *SessionWantlist) SessionsFor(ks []cid.Cid) []uint64 { + swl.RLock() + defer swl.RUnlock() + + sesMap := make(map[uint64]struct{}) + for _, c := range ks { + for s := range swl.wants[c] { + sesMap[s] = struct{}{} + } + } + + ses := make([]uint64, 0, len(sesMap)) + for s := range sesMap { + ses = append(ses, s) + } + return ses +} + +func (swl *SessionWantlist) Has(ks []cid.Cid) *cid.Set { + swl.RLock() + defer swl.RUnlock() + + has := cid.NewSet() + for _, c := range ks { + if _, ok := swl.wants[c]; ok { + has.Add(c) + } + } + return has +} + +func (swl *SessionWantlist) SessionHas(ses uint64, ks []cid.Cid) *cid.Set { + swl.RLock() + defer swl.RUnlock() + + has := cid.NewSet() + for _, c := range ks { + if sesMap, cok := swl.wants[c]; cok { + if _, sok := sesMap[ses]; sok { + has.Add(c) + } + } + } + return has +} diff --git a/sessionwantlist/sessionwantlist_test.go b/sessionwantlist/sessionwantlist_test.go new file mode 100644 index 00000000..0b89b8ae --- /dev/null +++ b/sessionwantlist/sessionwantlist_test.go @@ -0,0 +1,258 @@ +package sessionwantlist + +import ( + "os" + "testing" + + "github.com/ipfs/go-bitswap/testutil" + + cid "github.com/ipfs/go-cid" +) + +var c0 cid.Cid +var c1 cid.Cid +var c2 cid.Cid + +const s0 = uint64(0) +const s1 = uint64(1) + +func setup() { + cids := testutil.GenerateCids(3) + c0 = cids[0] + c1 = cids[1] + c2 = cids[2] +} + +func TestMain(m *testing.M) { + setup() + os.Exit(m.Run()) +} + +func TestEmpty(t *testing.T) { + swl := NewSessionWantlist() + + if len(swl.Keys()) != 0 { + t.Fatal("Expected Keys() to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { + t.Fatal("Expected SessionsFor() to be empty") + } +} + +func TestSimpleAdd(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0 + swl.Add([]cid.Cid{c0}, s0) + if len(swl.Keys()) != 1 { + t.Fatal("Expected Keys() to have length 1") + } + if !swl.Keys()[0].Equals(c0) { + t.Fatal("Expected Keys() to be [cid0]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor() to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { + t.Fatal("Expected SessionsFor() to be [s0]") + } + + // s0: c0, c1 + swl.Add([]cid.Cid{c1}, s0) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { + t.Fatal("Expected Keys() to contain [cid0, cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor() to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { + t.Fatal("Expected SessionsFor() to be [s0]") + } + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0}, s1) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { + t.Fatal("Expected Keys() to contain [cid0, cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 2 { + t.Fatal("Expected SessionsFor() to have length 2") + } +} + +func TestMultiKeyAdd(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + swl.Add([]cid.Cid{c0, c1}, s0) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { + t.Fatal("Expected Keys() to contain [cid0, cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor() to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { + t.Fatal("Expected SessionsFor() to be [s0]") + } +} + +func TestSessionHas(t *testing.T) { + swl := NewSessionWantlist() + + if swl.Has([]cid.Cid{c0, c1}).Len() > 0 { + t.Fatal("Expected Has([c0, c1]) to be []") + } + if swl.SessionHas(s0, []cid.Cid{c0, c1}).Len() > 0 { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be []") + } + + // s0: c0 + swl.Add([]cid.Cid{c0}, s0) + if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0}) { + t.Fatal("Expected Has([c0, c1]) to be [c0]") + } + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0}) { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0]") + } + if swl.SessionHas(s1, []cid.Cid{c0, c1}).Len() > 0 { + t.Fatal("Expected SessionHas(s1, [c0, c1]) to be []") + } + + // s0: c0, c1 + swl.Add([]cid.Cid{c1}, s0) + if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected Has([c0, c1]) to be [c0, c1]") + } + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0, c1]") + } + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0}, s1) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected Has([c0, c1]) to be [c0, c1]") + } + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0, c1]") + } + if !matchSet(swl.SessionHas(s1, []cid.Cid{c0, c1}), []cid.Cid{c0}) { + t.Fatal("Expected SessionHas(s1, [c0, c1]) to be [c0]") + } +} + +func TestSimpleRemoveKeys(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0, c1}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // s0: c1 + swl.RemoveKeys([]cid.Cid{c0}) + if len(swl.Keys()) != 1 { + t.Fatal("Expected Keys() to have length 1") + } + if !swl.Keys()[0].Equals(c1) { + t.Fatal("Expected Keys() to be [cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { + t.Fatal("Expected SessionsFor(c0) to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c1})) != 1 { + t.Fatal("Expected SessionsFor(c1) to have length 1") + } + if swl.SessionsFor([]cid.Cid{c1})[0] != s0 { + t.Fatal("Expected SessionsFor(c1) to be [s0]") + } +} + +func TestMultiRemoveKeys(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0, c1}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // + swl.RemoveKeys([]cid.Cid{c0, c1}) + if len(swl.Keys()) != 0 { + t.Fatal("Expected Keys() to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { + t.Fatal("Expected SessionsFor() to be empty") + } +} + +func TestRemoveSession(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0, c1}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // s1: c0 + swl.RemoveSession(s0) + if len(swl.Keys()) != 1 { + t.Fatal("Expected Keys() to have length 1") + } + if !swl.Keys()[0].Equals(c0) { + t.Fatal("Expected Keys() to be [cid0]") + } + if len(swl.SessionsFor([]cid.Cid{c1})) != 0 { + t.Fatal("Expected SessionsFor(c1) to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor(c0) to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s1 { + t.Fatal("Expected SessionsFor(c0) to be [s1]") + } +} + +func TestRemoveSessionKeys(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1, c2 + // s1: c0 + swl.Add([]cid.Cid{c0, c1, c2}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // s0: c2 + // s1: c0 + swl.RemoveSessionKeys(s0, []cid.Cid{c0, c1}) + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1, c2}), []cid.Cid{c2}) { + t.Fatal("Expected SessionHas(s0, [c0, c1, c2]) to be [c2]") + } + if !matchSet(swl.SessionHas(s1, []cid.Cid{c0, c1, c2}), []cid.Cid{c0}) { + t.Fatal("Expected SessionHas(s1, [c0, c1, c2]) to be [c0]") + } +} + +func matchSet(ks1 *cid.Set, ks2 []cid.Cid) bool { + if ks1.Len() != len(ks2) { + return false + } + + for _, k := range ks2 { + if !ks1.Has(k) { + return false + } + } + return true +} diff --git a/testinstance/testinstance.go b/testinstance/testinstance.go index be9eb10f..f0c85514 100644 --- a/testinstance/testinstance.go +++ b/testinstance/testinstance.go @@ -55,7 +55,8 @@ func (g *InstanceGenerator) Next() Instance { return NewInstance(g.ctx, g.net, p, g.bsOptions...) } -// Instances creates N test instances of bitswap + dependencies +// Instances creates N test instances of bitswap + dependencies and connects +// them to each other func (g *InstanceGenerator) Instances(n int) []Instance { var instances []Instance for j := 0; j < n; j++ { diff --git a/testnet/interface.go b/testnet/interface.go index b6616256..b49dd80a 100644 --- a/testnet/interface.go +++ b/testnet/interface.go @@ -4,13 +4,13 @@ import ( bsnet "github.com/ipfs/go-bitswap/network" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" ) // Network is an interface for generating bitswap network interfaces // based on a test network. type Network interface { - Adapter(tnet.Identity) bsnet.BitSwapNetwork + Adapter(tnet.Identity, ...bsnet.NetOpt) bsnet.BitSwapNetwork HasPeer(peer.ID) bool } diff --git a/testnet/network_test.go b/testnet/network_test.go index 350e95ee..89f3d68f 100644 --- a/testnet/network_test.go +++ b/testnet/network_test.go @@ -13,7 +13,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/testnet/peernet.go b/testnet/peernet.go index ffbe1026..5e643069 100644 --- a/testnet/peernet.go +++ b/testnet/peernet.go @@ -9,7 +9,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" ) @@ -23,13 +23,13 @@ func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Serv return &peernet{net, rs}, nil } -func (pn *peernet) Adapter(p tnet.Identity) bsnet.BitSwapNetwork { +func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) - return bsnet.NewFromIpfsHost(client, routing) + return bsnet.NewFromIpfsHost(client, routing, opts...) } func (pn *peernet) HasPeer(p peer.ID) bool { diff --git a/testnet/virtual.go b/testnet/virtual.go index 8421c2db..9a92d1c7 100644 --- a/testnet/virtual.go +++ b/testnet/virtual.go @@ -14,17 +14,14 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/routing" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ) -var log = logging.Logger("bstestnet") - // VirtualNetwork generates a new testnet instance - a fake network that // is used to simulate sending messages. func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { @@ -87,7 +84,7 @@ type receiverQueue struct { lk sync.Mutex } -func (n *network) Adapter(p tnet.Identity) bsnet.BitSwapNetwork { +func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { n.mu.Lock() defer n.mu.Unlock() @@ -177,6 +174,10 @@ type networkClient struct { stats bsnet.Stats } +func (nc *networkClient) Self() peer.ID { + return nc.local +} + func (nc *networkClient) SendMessage( ctx context.Context, to peer.ID, @@ -197,7 +198,6 @@ func (nc *networkClient) Stats() bsnet.Stats { // FindProvidersAsync returns a channel of providers for the given key. func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - // NB: this function duplicates the AddrInfo -> ID transformation in the // bitswap network adapter. Not to worry. This network client will be // deprecated once the ipfsnet.Mock is added. The code below is only @@ -240,6 +240,10 @@ func (mp *messagePasser) Reset() error { return nil } +func (mp *messagePasser) SupportsHave() bool { + return true +} + func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { return &messagePasser{ net: nc, @@ -260,7 +264,6 @@ func (nc *networkClient) SetDelegate(r bsnet.Receiver) { func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { nc.network.mu.Lock() - otherClient, ok := nc.network.clients[p] if !ok { nc.network.mu.Unlock() @@ -270,19 +273,38 @@ func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { tag := tagForPeers(nc.local, p) if _, ok := nc.network.conns[tag]; ok { nc.network.mu.Unlock() - log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") + // log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") return nil } nc.network.conns[tag] = struct{}{} nc.network.mu.Unlock() - // TODO: add handling for disconnects - otherClient.receiver.PeerConnected(nc.local) nc.Receiver.PeerConnected(p) return nil } +func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + + otherClient, ok := nc.network.clients[p] + if !ok { + return errors.New("no such peer in network") + } + + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; !ok { + // Already disconnected + return nil + } + delete(nc.network.conns, tag) + + otherClient.receiver.PeerDisconnected(nc.local) + nc.Receiver.PeerDisconnected(p) + return nil +} + func (rq *receiverQueue) enqueue(m *message) { rq.lk.Lock() defer rq.lk.Unlock() diff --git a/testutil/testutil.go b/testutil/testutil.go index de6777ff..9f0c5817 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -39,17 +39,6 @@ func GenerateCids(n int) []cid.Cid { return cids } -// GenerateWantlist makes a populated wantlist. -func GenerateWantlist(n int, ses uint64) *wantlist.SessionTrackedWantlist { - wl := wantlist.NewSessionTrackedWantlist() - for i := 0; i < n; i++ { - prioritySeq++ - entry := wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq) - wl.AddEntry(entry, ses) - } - return wl -} - // GenerateMessageEntries makes fake bitswap message entries. func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { bsmsgs := make([]bsmsg.Entry, 0, n) @@ -127,3 +116,43 @@ func IndexOf(blks []blocks.Block, c cid.Cid) int { func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { return IndexOf(blks, block.Cid()) != -1 } + +// ContainsKey returns true if a key is found n a list of CIDs. +func ContainsKey(ks []cid.Cid, c cid.Cid) bool { + for _, k := range ks { + if c == k { + return true + } + } + return false +} + +// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if +// they're in a different order) +func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { + if len(ks1) != len(ks2) { + return false + } + + for _, k := range ks1 { + if !ContainsKey(ks2, k) { + return false + } + } + return true +} + +// MatchPeersIgnoreOrder returns true if the lists of peers match (even if +// they're in a different order) +func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { + if len(ps1) != len(ps2) { + return false + } + + for _, p := range ps1 { + if !ContainsPeer(ps2, p) { + return false + } + } + return true +} diff --git a/wantlist/wantlist.go b/wantlist/wantlist.go index b5c2a602..d891ad0b 100644 --- a/wantlist/wantlist.go +++ b/wantlist/wantlist.go @@ -5,15 +5,11 @@ package wantlist import ( "sort" + pb "github.com/ipfs/go-bitswap/message/pb" + cid "github.com/ipfs/go-cid" ) -// SessionTrackedWantlist is a list of wants that also track which bitswap -// sessions have requested them -type SessionTrackedWantlist struct { - set map[cid.Cid]*sessionTrackedEntry -} - // Wantlist is a raw list of wanted blocks and their priorities type Wantlist struct { set map[cid.Cid]Entry @@ -23,11 +19,7 @@ type Wantlist struct { type Entry struct { Cid cid.Cid Priority int -} - -type sessionTrackedEntry struct { - Entry - sesTrk map[uint64]struct{} + WantType pb.Message_Wantlist_WantType } // NewRefEntry creates a new reference tracked wantlist entry. @@ -35,6 +27,7 @@ func NewRefEntry(c cid.Cid, p int) Entry { return Entry{ Cid: c, Priority: p, + WantType: pb.Message_Wantlist_Block, } } @@ -44,13 +37,6 @@ func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } -// NewSessionTrackedWantlist generates a new SessionTrackedWantList. -func NewSessionTrackedWantlist() *SessionTrackedWantlist { - return &SessionTrackedWantlist{ - set: make(map[cid.Cid]*sessionTrackedEntry), - } -} - // New generates a new raw Wantlist func New() *Wantlist { return &Wantlist{ @@ -58,136 +44,53 @@ func New() *Wantlist { } } -// Add adds the given cid to the wantlist with the specified priority, governed -// by the session ID 'ses'. if a cid is added under multiple session IDs, then -// it must be removed by each of those sessions before it is no longer 'in the -// wantlist'. Calls to Add are idempotent given the same arguments. Subsequent -// calls with different values for priority will not update the priority. -// TODO: think through priority changes here -// Add returns true if the cid did not exist in the wantlist before this call -// (even if it was under a different session). -func (w *SessionTrackedWantlist) Add(c cid.Cid, priority int, ses uint64) bool { - - if e, ok := w.set[c]; ok { - e.sesTrk[ses] = struct{}{} - return false - } - - w.set[c] = &sessionTrackedEntry{ - Entry: Entry{Cid: c, Priority: priority}, - sesTrk: map[uint64]struct{}{ses: struct{}{}}, - } - - return true -} - -// AddEntry adds given Entry to the wantlist. For more information see Add method. -func (w *SessionTrackedWantlist) AddEntry(e Entry, ses uint64) bool { - if ex, ok := w.set[e.Cid]; ok { - ex.sesTrk[ses] = struct{}{} - return false - } - w.set[e.Cid] = &sessionTrackedEntry{ - Entry: e, - sesTrk: map[uint64]struct{}{ses: struct{}{}}, - } - return true -} - -// Remove removes the given cid from being tracked by the given session. -// 'true' is returned if this call to Remove removed the final session ID -// tracking the cid. (meaning true will be returned iff this call caused the -// value of 'Contains(c)' to change from true to false) -func (w *SessionTrackedWantlist) Remove(c cid.Cid, ses uint64) bool { - e, ok := w.set[c] - if !ok { - return false - } - - delete(e.sesTrk, ses) - if len(e.sesTrk) == 0 { - delete(w.set, c) - return true - } - return false -} - -// Contains returns true if the given cid is in the wantlist tracked by one or -// more sessions. -func (w *SessionTrackedWantlist) Contains(k cid.Cid) (Entry, bool) { - e, ok := w.set[k] - if !ok { - return Entry{}, false - } - return e.Entry, true -} - -// Entries returns all wantlist entries for a given session tracked want list. -func (w *SessionTrackedWantlist) Entries() []Entry { - es := make([]Entry, 0, len(w.set)) - for _, e := range w.set { - es = append(es, e.Entry) - } - return es -} - -// SortedEntries returns wantlist entries ordered by priority. -func (w *SessionTrackedWantlist) SortedEntries() []Entry { - es := w.Entries() - sort.Sort(entrySlice(es)) - return es -} - -// Len returns the number of entries in a wantlist. -func (w *SessionTrackedWantlist) Len() int { - return len(w.set) -} - -// CopyWants copies all wants from one SessionTrackWantlist to another (along with -// the session data) -func (w *SessionTrackedWantlist) CopyWants(to *SessionTrackedWantlist) { - for _, e := range w.set { - for k := range e.sesTrk { - to.AddEntry(e.Entry, k) - } - } -} - // Len returns the number of entries in a wantlist. func (w *Wantlist) Len() int { return len(w.set) } // Add adds an entry in a wantlist from CID & Priority, if not already present. -func (w *Wantlist) Add(c cid.Cid, priority int) bool { - if _, ok := w.set[c]; ok { +func (w *Wantlist) Add(c cid.Cid, priority int, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] + + // Adding want-have should not override want-block + if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { return false } w.set[c] = Entry{ Cid: c, Priority: priority, + WantType: wantType, } return true } -// AddEntry adds an entry to a wantlist if not already present. -func (w *Wantlist) AddEntry(e Entry) bool { - if _, ok := w.set[e.Cid]; ok { +// Remove removes the given cid from the wantlist. +func (w *Wantlist) Remove(c cid.Cid) bool { + _, ok := w.set[c] + if !ok { return false } - w.set[e.Cid] = e + + delete(w.set, c) return true } -// Remove removes the given cid from the wantlist. -func (w *Wantlist) Remove(c cid.Cid) bool { - _, ok := w.set[c] +// Remove removes the given cid from the wantlist, respecting the type: +// Remove with want-have will not remove an existing want-block. +func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] if !ok { return false } + // Removing want-have should not remove want-block + if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { + return false + } + delete(w.set, c) return true } @@ -214,3 +117,10 @@ func (w *Wantlist) SortedEntries() []Entry { sort.Sort(entrySlice(es)) return es } + +// Absorb all the entries in other into this want list +func (w *Wantlist) Absorb(other *Wantlist) { + for _, e := range other.Entries() { + w.Add(e.Cid, e.Priority, e.WantType) + } +} diff --git a/wantlist/wantlist_test.go b/wantlist/wantlist_test.go index 8616efb0..1139e87a 100644 --- a/wantlist/wantlist_test.go +++ b/wantlist/wantlist_test.go @@ -3,6 +3,7 @@ package wantlist import ( "testing" + pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" ) @@ -38,21 +39,14 @@ func assertHasCid(t *testing.T, w wli, c cid.Cid) { } } -func assertNotHasCid(t *testing.T, w wli, c cid.Cid) { - _, ok := w.Contains(c) - if ok { - t.Fatal("expected not to have ", c) - } -} - func TestBasicWantlist(t *testing.T) { wl := New() - if !wl.Add(testcids[0], 5) { + if !wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) { t.Fatal("expected true") } assertHasCid(t, wl, testcids[0]) - if !wl.Add(testcids[1], 4) { + if !wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { t.Fatal("expected true") } assertHasCid(t, wl, testcids[0]) @@ -62,7 +56,7 @@ func TestBasicWantlist(t *testing.T) { t.Fatal("should have had two items") } - if wl.Add(testcids[1], 4) { + if wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { t.Fatal("add shouldnt report success on second add") } assertHasCid(t, wl, testcids[0]) @@ -72,7 +66,7 @@ func TestBasicWantlist(t *testing.T) { t.Fatal("should have had two items") } - if !wl.Remove(testcids[0]) { + if !wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) { t.Fatal("should have gotten true") } @@ -82,23 +76,144 @@ func TestBasicWantlist(t *testing.T) { } } -func TestSessionTrackedWantlist(t *testing.T) { - wl := NewSessionTrackedWantlist() +func TestAddHaveThenBlock(t *testing.T) { + wl := New() - if !wl.Add(testcids[0], 5, 1) { - t.Fatal("should have added") + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) } - assertHasCid(t, wl, testcids[0]) - if wl.Remove(testcids[0], 2) { - t.Fatal("shouldnt have removed") + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) } - assertHasCid(t, wl, testcids[0]) - if wl.Add(testcids[0], 5, 1) { - t.Fatal("shouldnt have added") +} + +func TestAddBlockThenHave(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) } - assertHasCid(t, wl, testcids[0]) - if !wl.Remove(testcids[0], 1) { - t.Fatal("should have removed") + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveBlock(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveHave(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveAny(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveAny(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAbsort(t *testing.T) { + wl := New() + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 4, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 3, pb.Message_Wantlist_Have) + + wl2 := New() + wl2.Add(testcids[0], 2, pb.Message_Wantlist_Have) + wl2.Add(testcids[1], 1, pb.Message_Wantlist_Block) + + wl.Absorb(wl2) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.Priority != 5 { + t.Fatal("expected priority 5") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[1]) + if !ok { + t.Fatal("expected to have ", testcids[1]) + } + if e.Priority != 1 { + t.Fatal("expected priority 1") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[2]) + if !ok { + t.Fatal("expected to have ", testcids[2]) + } + if e.Priority != 3 { + t.Fatal("expected priority 3") + } + if e.WantType != pb.Message_Wantlist_Have { + t.Fatal("expected type ", pb.Message_Wantlist_Have) + } +} + +func TestSortedEntries(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) + + entries := wl.SortedEntries() + if !entries[0].Cid.Equals(testcids[1]) || + !entries[1].Cid.Equals(testcids[2]) || + !entries[2].Cid.Equals(testcids[0]) { + t.Fatal("wrong order") } - assertNotHasCid(t, wl, testcids[0]) } diff --git a/wantmanager/wantmanager.go b/wantmanager/wantmanager.go index f726d684..00935993 100644 --- a/wantmanager/wantmanager.go +++ b/wantmanager/wantmanager.go @@ -2,256 +2,112 @@ package wantmanager import ( "context" - "math" - bsmsg "github.com/ipfs/go-bitswap/message" - wantlist "github.com/ipfs/go-bitswap/wantlist" - logging "github.com/ipfs/go-log" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + "github.com/ipfs/go-bitswap/sessionmanager" + bsswl "github.com/ipfs/go-bitswap/sessionwantlist" cid "github.com/ipfs/go-cid" - metrics "github.com/ipfs/go-metrics-interface" peer "github.com/libp2p/go-libp2p-core/peer" ) -var log = logging.Logger("bitswap") - -const ( - // maxPriority is the max priority as defined by the bitswap protocol - maxPriority = math.MaxInt32 -) - -// PeerHandler sends changes out to the network as they get added to the wantlist -// managed by the WantManager. +// PeerHandler sends wants / cancels to other peers type PeerHandler interface { + // Connected is called when a peer connects, with any initial want-haves + // that have been broadcast to all peers (as part of session discovery) + Connected(p peer.ID, initialWants []cid.Cid) + // Disconnected is called when a peer disconnects Disconnected(p peer.ID) - Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) - SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) + // BroadcastWantHaves sends want-haves to all connected peers + BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) + // SendCancels sends cancels to all peers that had previously been sent + // a want-block or want-have for the given key + SendCancels(context.Context, []cid.Cid) } -type wantMessage interface { - handle(wm *WantManager) +// SessionManager receives incoming messages and distributes them to sessions +type SessionManager interface { + ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []sessionmanager.Session } -// WantManager manages a global want list. It tracks two seperate want lists - -// one for all wants, and one for wants that are specifically broadcast to the -// internet. +// WantManager +// - informs the SessionManager and BlockPresenceManager of incoming information +// and cancelled sessions +// - informs the PeerManager of connects and disconnects +// - manages the list of want-haves that are broadcast to the internet +// (as opposed to being sent to specific peers) type WantManager struct { - // channel requests to the run loop - // to get predictable behavior while running this in a go routine - // having only one channel is neccesary, so requests are processed serially - wantMessages chan wantMessage - - // synchronized by Run loop, only touch inside there - wl *wantlist.SessionTrackedWantlist - bcwl *wantlist.SessionTrackedWantlist + bcwl *bsswl.SessionWantlist - ctx context.Context - cancel func() - - peerHandler PeerHandler - wantlistGauge metrics.Gauge + peerHandler PeerHandler + sim *bssim.SessionInterestManager + bpm *bsbpm.BlockPresenceManager + sm SessionManager } // New initializes a new WantManager for a given context. -func New(ctx context.Context, peerHandler PeerHandler) *WantManager { - ctx, cancel := context.WithCancel(ctx) - wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", - "Number of items in wantlist.").Gauge() +func New(ctx context.Context, peerHandler PeerHandler, sim *bssim.SessionInterestManager, bpm *bsbpm.BlockPresenceManager) *WantManager { return &WantManager{ - wantMessages: make(chan wantMessage, 10), - wl: wantlist.NewSessionTrackedWantlist(), - bcwl: wantlist.NewSessionTrackedWantlist(), - ctx: ctx, - cancel: cancel, - peerHandler: peerHandler, - wantlistGauge: wantlistGauge, + bcwl: bsswl.NewSessionWantlist(), + peerHandler: peerHandler, + sim: sim, + bpm: bpm, } } -// WantBlocks adds the given cids to the wantlist, tracked by the given session. -func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - log.Debugf("[wantlist] want blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses) - wm.addEntries(ctx, ks, peers, false, ses) +func (wm *WantManager) SetSessionManager(sm SessionManager) { + wm.sm = sm } -// CancelWants removes the given cids from the wantlist, tracked by the given session. -func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - log.Debugf("[wantlist] unwant blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses) - wm.addEntries(context.Background(), ks, peers, true, ses) +// ReceiveFrom is called when a new message is received +func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Record block presence for HAVE / DONT_HAVE + wm.bpm.ReceiveFrom(p, haves, dontHaves) + // Inform interested sessions + wm.sm.ReceiveFrom(p, blks, haves, dontHaves) + // Remove received blocks from broadcast wantlist + wm.bcwl.RemoveKeys(blks) + // Send CANCEL to all peers with want-have / want-block + wm.peerHandler.SendCancels(ctx, blks) } -// CurrentWants returns the list of current wants. -func (wm *WantManager) CurrentWants() []wantlist.Entry { - resp := make(chan []wantlist.Entry, 1) - select { - case wm.wantMessages <- ¤tWantsMessage{resp}: - case <-wm.ctx.Done(): - return nil - } - select { - case wantlist := <-resp: - return wantlist - case <-wm.ctx.Done(): - return nil - } -} +// BroadcastWantHaves is called when want-haves should be broadcast to all +// connected peers (as part of session discovery) +func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { + // log.Warningf("BroadcastWantHaves session%d: %s", ses, wantHaves) -// CurrentBroadcastWants returns the current list of wants that are broadcasts. -func (wm *WantManager) CurrentBroadcastWants() []wantlist.Entry { - resp := make(chan []wantlist.Entry, 1) - select { - case wm.wantMessages <- ¤tBroadcastWantsMessage{resp}: - case <-wm.ctx.Done(): - return nil - } - select { - case wl := <-resp: - return wl - case <-wm.ctx.Done(): - return nil - } -} + // Record broadcast wants + wm.bcwl.Add(wantHaves, ses) -// WantCount returns the total count of wants. -func (wm *WantManager) WantCount() int { - resp := make(chan int, 1) - select { - case wm.wantMessages <- &wantCountMessage{resp}: - case <-wm.ctx.Done(): - return 0 - } - select { - case count := <-resp: - return count - case <-wm.ctx.Done(): - return 0 - } + // Send want-haves to all peers + wm.peerHandler.BroadcastWantHaves(ctx, wantHaves) } -// Connected is called when a new peer is connected -func (wm *WantManager) Connected(p peer.ID) { - select { - case wm.wantMessages <- &connectedMessage{p}: - case <-wm.ctx.Done(): - } -} +// RemoveSession is called when the session is shut down +func (wm *WantManager) RemoveSession(ctx context.Context, ses uint64) { + // Remove session's interest in the given blocks + cancelKs := wm.sim.RemoveSessionInterest(ses) -// Disconnected is called when a peer is disconnected -func (wm *WantManager) Disconnected(p peer.ID) { - select { - case wm.wantMessages <- &disconnectedMessage{p}: - case <-wm.ctx.Done(): - } -} + // Remove broadcast want-haves for session + wm.bcwl.RemoveSession(ses) -// Startup starts processing for the WantManager. -func (wm *WantManager) Startup() { - go wm.run() -} + // Free up block presence tracking for keys that no session is interested + // in anymore + wm.bpm.RemoveKeys(cancelKs) -// Shutdown ends processing for the want manager. -func (wm *WantManager) Shutdown() { - wm.cancel() -} - -func (wm *WantManager) run() { - // NOTE: Do not open any streams or connections from anywhere in this - // event loop. Really, just don't do anything likely to block. - for { - select { - case message := <-wm.wantMessages: - message.handle(wm) - case <-wm.ctx.Done(): - return - } - } -} - -func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { - entries := make([]bsmsg.Entry, 0, len(ks)) - for i, k := range ks { - entries = append(entries, bsmsg.Entry{ - Cancel: cancel, - Entry: wantlist.NewRefEntry(k, maxPriority-i), - }) - } - select { - case wm.wantMessages <- &wantSet{entries: entries, targets: targets, from: ses}: - case <-wm.ctx.Done(): - case <-ctx.Done(): - } + // Send CANCEL to all peers for blocks that no session is interested in anymore + wm.peerHandler.SendCancels(ctx, cancelKs) } -type wantSet struct { - entries []bsmsg.Entry - targets []peer.ID - from uint64 -} - -func (ws *wantSet) handle(wm *WantManager) { - // is this a broadcast or not? - brdc := len(ws.targets) == 0 - - // add changes to our wantlist - for _, e := range ws.entries { - if e.Cancel { - if brdc { - wm.bcwl.Remove(e.Cid, ws.from) - } - - if wm.wl.Remove(e.Cid, ws.from) { - wm.wantlistGauge.Dec() - } - } else { - if brdc { - wm.bcwl.AddEntry(e.Entry, ws.from) - } - if wm.wl.AddEntry(e.Entry, ws.from) { - wm.wantlistGauge.Inc() - } - } - } - - // broadcast those wantlist changes - wm.peerHandler.SendMessage(ws.entries, ws.targets, ws.from) -} - -type currentWantsMessage struct { - resp chan<- []wantlist.Entry -} - -func (cwm *currentWantsMessage) handle(wm *WantManager) { - cwm.resp <- wm.wl.Entries() -} - -type currentBroadcastWantsMessage struct { - resp chan<- []wantlist.Entry -} - -func (cbcwm *currentBroadcastWantsMessage) handle(wm *WantManager) { - cbcwm.resp <- wm.bcwl.Entries() -} - -type wantCountMessage struct { - resp chan<- int -} - -func (wcm *wantCountMessage) handle(wm *WantManager) { - wcm.resp <- wm.wl.Len() -} - -type connectedMessage struct { - p peer.ID -} - -func (cm *connectedMessage) handle(wm *WantManager) { - wm.peerHandler.Connected(cm.p, wm.bcwl) -} - -type disconnectedMessage struct { - p peer.ID +// Connected is called when a new peer connects +func (wm *WantManager) Connected(p peer.ID) { + // Tell the peer handler that there is a new connection and give it the + // list of outstanding broadcast wants + wm.peerHandler.Connected(p, wm.bcwl.Keys()) } -func (dm *disconnectedMessage) handle(wm *WantManager) { - wm.peerHandler.Disconnected(dm.p) +// Disconnected is called when a peer disconnects +func (wm *WantManager) Disconnected(p peer.ID) { + wm.peerHandler.Disconnected(p) } diff --git a/wantmanager/wantmanager_test.go b/wantmanager/wantmanager_test.go index a721e24a..b4e7cd58 100644 --- a/wantmanager/wantmanager_test.go +++ b/wantmanager/wantmanager_test.go @@ -2,217 +2,236 @@ package wantmanager import ( "context" - "reflect" - "sync" "testing" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + "github.com/ipfs/go-bitswap/sessionmanager" "github.com/ipfs/go-bitswap/testutil" - wantlist "github.com/ipfs/go-bitswap/wantlist" - bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" ) type fakePeerHandler struct { - lk sync.RWMutex - lastWantSet wantSet + lastInitialWants []cid.Cid + lastBcstWants []cid.Cid + lastCancels []cid.Cid } -func (fph *fakePeerHandler) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { - fph.lk.Lock() - fph.lastWantSet = wantSet{entries, targets, from} - fph.lk.Unlock() +func (fph *fakePeerHandler) Connected(p peer.ID, initialWants []cid.Cid) { + fph.lastInitialWants = initialWants } +func (fph *fakePeerHandler) Disconnected(p peer.ID) { -func (fph *fakePeerHandler) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) {} -func (fph *fakePeerHandler) Disconnected(p peer.ID) {} - -func (fph *fakePeerHandler) getLastWantSet() wantSet { - fph.lk.Lock() - defer fph.lk.Unlock() - return fph.lastWantSet } - -func setupTestFixturesAndInitialWantList() ( - context.Context, *fakePeerHandler, *WantManager, []cid.Cid, []cid.Cid, []peer.ID, uint64, uint64) { - ctx := context.Background() - - // setup fixtures - wantSender := &fakePeerHandler{} - wantManager := New(ctx, wantSender) - keys := testutil.GenerateCids(10) - otherKeys := testutil.GenerateCids(5) - peers := testutil.GeneratePeers(10) - session := testutil.GenerateSessionID() - otherSession := testutil.GenerateSessionID() - - // startup wantManager - wantManager.Startup() - - // add initial wants - wantManager.WantBlocks( - ctx, - keys, - peers, - session) - - return ctx, wantSender, wantManager, keys, otherKeys, peers, session, otherSession +func (fph *fakePeerHandler) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { + fph.lastBcstWants = wantHaves +} +func (fph *fakePeerHandler) SendCancels(ctx context.Context, cancels []cid.Cid) { + fph.lastCancels = cancels } -func TestInitialWantsAddedCorrectly(t *testing.T) { +type fakeSessionManager struct { +} - _, wantSender, wantManager, keys, _, peers, session, _ := - setupTestFixturesAndInitialWantList() +func (*fakeSessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []sessionmanager.Session { + return nil +} - bcwl := wantManager.CurrentBroadcastWants() - wl := wantManager.CurrentWants() +func TestInitialBroadcastWantsAddedCorrectly(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) - if len(bcwl) > 0 { - t.Fatal("should not create broadcast wants when peers are specified") - } + peers := testutil.GeneratePeers(3) - if len(wl) != len(keys) { - t.Fatal("did not add correct number of wants to want lsit") + // Connect peer 0. Should not receive anything yet. + wm.Connected(peers[0]) + if len(ph.lastInitialWants) != 0 { + t.Fatal("expected no initial wants") } - generatedWantSet := wantSender.getLastWantSet() - - if len(generatedWantSet.entries) != len(keys) { - t.Fatal("incorrect wants sent") + // Broadcast 2 wants + wantHaves := testutil.GenerateCids(2) + wm.BroadcastWantHaves(ctx, 1, wantHaves) + if len(ph.lastBcstWants) != 2 { + t.Fatal("expected broadcast wants") } - for _, entry := range generatedWantSet.entries { - if entry.Cancel { - t.Fatal("did not send only non-cancel messages") - } + // Connect peer 1. Should receive all wants broadcast so far. + wm.Connected(peers[1]) + if len(ph.lastInitialWants) != 2 { + t.Fatal("expected broadcast wants") } - if generatedWantSet.from != session { - t.Fatal("incorrect session used in sending") + // Broadcast 3 more wants + wantHaves2 := testutil.GenerateCids(3) + wm.BroadcastWantHaves(ctx, 2, wantHaves2) + if len(ph.lastBcstWants) != 3 { + t.Fatal("expected broadcast wants") } - if !reflect.DeepEqual(generatedWantSet.targets, peers) { - t.Fatal("did not setup peers correctly") + // Connect peer 2. Should receive all wants broadcast so far. + wm.Connected(peers[2]) + if len(ph.lastInitialWants) != 5 { + t.Fatal("expected all wants to be broadcast") } - - wantManager.Shutdown() } -func TestCancellingWants(t *testing.T) { - ctx, wantSender, wantManager, keys, _, peers, session, _ := - setupTestFixturesAndInitialWantList() - - wantManager.CancelWants(ctx, keys, peers, session) - - wl := wantManager.CurrentWants() - - if len(wl) != 0 { - t.Fatal("did not remove blocks from want list") - } - - generatedWantSet := wantSender.getLastWantSet() - - if len(generatedWantSet.entries) != len(keys) { - t.Fatal("incorrect wants sent") - } +func TestReceiveFromRemovesBroadcastWants(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) - for _, entry := range generatedWantSet.entries { - if !entry.Cancel { - t.Fatal("did not send only cancel messages") - } - } + peers := testutil.GeneratePeers(3) - if generatedWantSet.from != session { - t.Fatal("incorrect session used in sending") + // Broadcast 2 wants + cids := testutil.GenerateCids(2) + wm.BroadcastWantHaves(ctx, 1, cids) + if len(ph.lastBcstWants) != 2 { + t.Fatal("expected broadcast wants") } - if !reflect.DeepEqual(generatedWantSet.targets, peers) { - t.Fatal("did not setup peers correctly") + // Connect peer 0. Should receive all wants. + wm.Connected(peers[0]) + if len(ph.lastInitialWants) != 2 { + t.Fatal("expected broadcast wants") } - wantManager.Shutdown() - -} - -func TestCancellingWantsFromAnotherSessionHasNoEffect(t *testing.T) { - ctx, _, wantManager, keys, _, peers, _, otherSession := - setupTestFixturesAndInitialWantList() - - // cancelling wants from another session has no effect - wantManager.CancelWants(ctx, keys, peers, otherSession) - - wl := wantManager.CurrentWants() + // Receive block for first want + ks := cids[0:1] + haves := []cid.Cid{} + dontHaves := []cid.Cid{} + wm.ReceiveFrom(ctx, peers[1], ks, haves, dontHaves) - if len(wl) != len(keys) { - t.Fatal("should not cancel wants unless they match session that made them") + // Connect peer 2. Should get remaining want (the one that the block has + // not yet been received for). + wm.Connected(peers[2]) + if len(ph.lastInitialWants) != 1 { + t.Fatal("expected remaining wants") } - - wantManager.Shutdown() } -func TestAddingWantsWithNoPeersAddsToBroadcastAndRegularWantList(t *testing.T) { - ctx, _, wantManager, keys, otherKeys, _, session, _ := - setupTestFixturesAndInitialWantList() - - wantManager.WantBlocks(ctx, otherKeys, nil, session) - - bcwl := wantManager.CurrentBroadcastWants() - wl := wantManager.CurrentWants() - - if len(bcwl) != len(otherKeys) { - t.Fatal("want requests with no peers should get added to broadcast list") - } - - if len(wl) != len(otherKeys)+len(keys) { - t.Fatal("want requests with no peers should get added to regular want list") +func TestRemoveSessionRemovesBroadcastWants(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) + + peers := testutil.GeneratePeers(2) + + // Broadcast 2 wants for session 0 and 2 wants for session 1 + ses0 := uint64(0) + ses1 := uint64(1) + ses0wants := testutil.GenerateCids(2) + ses1wants := testutil.GenerateCids(2) + wm.BroadcastWantHaves(ctx, ses0, ses0wants) + wm.BroadcastWantHaves(ctx, ses1, ses1wants) + + // Connect peer 0. Should receive all wants. + wm.Connected(peers[0]) + if len(ph.lastInitialWants) != 4 { + t.Fatal("expected broadcast wants") + } + + // Remove session 0 + wm.RemoveSession(ctx, ses0) + + // Connect peer 1. Should receive all wants from session that has not been + // removed. + wm.Connected(peers[1]) + if len(ph.lastInitialWants) != 2 { + t.Fatal("expected broadcast wants") } - - wantManager.Shutdown() } -func TestAddingRequestFromSecondSessionPreventsCancel(t *testing.T) { - ctx, wantSender, wantManager, keys, _, peers, session, otherSession := - setupTestFixturesAndInitialWantList() - - // add a second session requesting the first key - firstKeys := append([]cid.Cid(nil), keys[0]) - wantManager.WantBlocks(ctx, firstKeys, peers, otherSession) +func TestReceiveFrom(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) - wl := wantManager.CurrentWants() + p := testutil.GeneratePeers(1)[0] + ks := testutil.GenerateCids(2) + haves := testutil.GenerateCids(2) + dontHaves := testutil.GenerateCids(2) + wm.ReceiveFrom(ctx, p, ks, haves, dontHaves) - if len(wl) != len(keys) { - t.Fatal("wants from other sessions should not get added seperately") + if !bpm.PeerHasBlock(p, haves[0]) { + t.Fatal("expected block presence manager to be invoked") } - - generatedWantSet := wantSender.getLastWantSet() - if len(generatedWantSet.entries) != len(firstKeys) && - generatedWantSet.from != otherSession && - generatedWantSet.entries[0].Cid != firstKeys[0] && - generatedWantSet.entries[0].Cancel != false { - t.Fatal("should send additional message requesting want for new session") + if !bpm.PeerDoesNotHaveBlock(p, dontHaves[0]) { + t.Fatal("expected block presence manager to be invoked") } - - // cancel block from first session - wantManager.CancelWants(ctx, firstKeys, peers, session) - - wl = wantManager.CurrentWants() - - // want should still be on want list - if len(wl) != len(keys) { - t.Fatal("wants should not be removed until all sessions cancel wants") + if len(ph.lastCancels) != len(ks) { + t.Fatal("expected received blocks to be cancelled") } +} - // cancel other block from first session - secondKeys := append([]cid.Cid(nil), keys[1]) - wantManager.CancelWants(ctx, secondKeys, peers, session) - - wl = wantManager.CurrentWants() - - // want should not be on want list, cause it was only tracked by one session - if len(wl) != len(keys)-1 { - t.Fatal("wants should be removed if all sessions have cancelled") +func TestRemoveSession(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) + + // Record session interest in 2 keys for session 0 and 2 keys for session 1 + // with 1 overlapping key + cids := testutil.GenerateCids(3) + ses0 := uint64(0) + ses1 := uint64(1) + ses0ks := cids[:2] + ses1ks := cids[1:] + sim.RecordSessionInterest(ses0, ses0ks) + sim.RecordSessionInterest(ses1, ses1ks) + + // Receive HAVE for all keys + p := testutil.GeneratePeers(1)[0] + ks := []cid.Cid{} + haves := append(ses0ks, ses1ks...) + dontHaves := []cid.Cid{} + wm.ReceiveFrom(ctx, p, ks, haves, dontHaves) + + // Remove session 0 + wm.RemoveSession(ctx, ses0) + + // Expect session 0 interest to be removed and session 1 interest to be + // unchanged + if len(sim.FilterSessionInterested(ses0, ses0ks)[0]) != 0 { + t.Fatal("expected session 0 interest to be removed") + } + if len(sim.FilterSessionInterested(ses1, ses1ks)[0]) != len(ses1ks) { + t.Fatal("expected session 1 interest to be unchanged") + } + + // Should clear block presence for key that was in session 0 and not + // in session 1 + if bpm.PeerHasBlock(p, ses0ks[0]) { + t.Fatal("expected block presence manager to be cleared") + } + if !bpm.PeerHasBlock(p, ses0ks[1]) { + t.Fatal("expected block presence manager to be unchanged for overlapping key") + } + + // Should cancel key that was in session 0 and not session 1 + if len(ph.lastCancels) != 1 || !ph.lastCancels[0].Equals(cids[0]) { + t.Fatal("expected removed want-have to be cancelled") } - - wantManager.Shutdown() } diff --git a/workers.go b/workers.go index fb3dc019..2028c4df 100644 --- a/workers.go +++ b/workers.go @@ -2,9 +2,11 @@ package bitswap import ( "context" + "fmt" engine "github.com/ipfs/go-bitswap/decision" bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" process "github.com/jbenet/goprocess" @@ -50,6 +52,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { if !ok { continue } + // update the BS ledger to reflect sent message // TODO: Should only track *useful* messages in ledger outgoing := bsmsg.New(false) @@ -63,6 +66,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { })) outgoing.AddBlock(block) } + for _, blockPresence := range envelope.Message.BlockPresences() { + outgoing.AddBlockPresence(blockPresence.Cid, blockPresence.Type) + } + // TODO: Only record message as sent if there was no error? bs.engine.MessageSent(envelope.Peer, outgoing) bs.sendBlocks(ctx, envelope) @@ -88,6 +95,21 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { msgSize := 0 msg := bsmsg.New(false) + + for _, blockPresence := range env.Message.BlockPresences() { + c := blockPresence.Cid + switch blockPresence.Type { + case pb.Message_Have: + log.Infof("Sending HAVE %s to %s", c.String()[2:8], env.Peer) + case pb.Message_DontHave: + log.Infof("Sending DONT_HAVE %s to %s", c.String()[2:8], env.Peer) + default: + panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) + } + + msgSize += bsmsg.BlockPresenceSize(c) + msg.AddBlockPresence(c, blockPresence.Type) + } for _, block := range env.Message.Blocks() { msgSize += len(block.RawData()) msg.AddBlock(block) @@ -97,8 +119,10 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { bs.sentHistogram.Observe(float64(msgSize)) err := bs.network.SendMessage(ctx, env.Peer, msg) if err != nil { - log.Infof("sendblock error: %s", err) + // log.Infof("sendblock error: %s", err) + log.Errorf("SendMessage error: %s. size: %d. block-presence length: %d", err, msg.Size(), len(env.Message.BlockPresences())) } + log.Infof("Sent message to %s", env.Peer) } func (bs *Bitswap) provideWorker(px process.Process) {