Skip to content
This repository has been archived by the owner on Feb 1, 2023. It is now read-only.

Commit

Permalink
refactor: go fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
dirkmc committed Aug 30, 2019
1 parent 55b4862 commit baada99
Show file tree
Hide file tree
Showing 10 changed files with 107 additions and 105 deletions.
61 changes: 30 additions & 31 deletions benchmarks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,25 +41,24 @@ type runStats struct {
var benchmarkLog []runStats

type bench struct {
name string
nodeCount int
name string
nodeCount int
blockCount int
distFn distFunc
fetchFn fetchFunc
distFn distFunc
fetchFn fetchFunc
}

var benches = []bench {
var benches = []bench{
// Fetch from two seed nodes that both have all 100 blocks
// - request one at a time, in series
bench {"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime},
bench{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime},
// - request all 100 with a single GetBlocks() call
bench {"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll},
bench{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll},

// Fetch from two seed nodes, one at a time, where:
// - node A has blocks 0 - 74
// - node B has blocks 25 - 99
bench {"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime},

bench{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime},

// Fetch from two seed nodes, where:
// - node A has even blocks
Expand All @@ -68,38 +67,38 @@ var benches = []bench {

// - request one at a time, in series
// * times out every time potential-threshold reaches 1.0
bench {"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime},
bench{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime},
// - request 10 at a time, in series
bench {"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10},
bench{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10},
// - request all 100 in parallel as individual GetBlock() calls
bench {"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent},
bench{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent},
// - request all 100 with a single GetBlocks() call
bench {"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll},
bench{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll},
// - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file)
bench {"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch},
bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch},

// Fetch from nine seed nodes, all nodes have all blocks
// - request one at a time, in series
bench {"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime},
bench{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime},
// - request 10 at a time, in series
bench {"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10},
bench{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10},
// - request all 100 with a single GetBlocks() call
bench {"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll},
bench{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll},
// - request all 100 in parallel as individual GetBlock() calls
bench {"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent},
bench{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent},
// - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file)
bench {"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch},
bench{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch},

// Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups)
// - request one at a time, in series
bench {"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime},
bench{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime},
// - request all 100 with a single GetBlocks() call
bench {"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll},
bench{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll},
// - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file)
bench {"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch},
bench{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch},

// Fetch from 19 seed nodes, all nodes have all blocks, fetch all 200 blocks with a single GetBlocks() call
bench {"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll},
bench{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll},
}

func BenchmarkDups2Nodes(b *testing.B) {
Expand All @@ -118,7 +117,7 @@ func BenchmarkDups2Nodes(b *testing.B) {
}

func printResults(rs []runStats) {
nameOrder := make([]string, 0, 0)
nameOrder := make([]string, 0)
names := make(map[string]struct{})
for i := 0; i < len(rs); i++ {
if _, ok := names[rs[i].Name]; !ok {
Expand Down Expand Up @@ -147,22 +146,22 @@ func printResults(rs []runStats) {
rcvd /= float64(count)
dups /= float64(count)

label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd / 1000000000.0)
label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd/1000000000.0)
fmt.Printf("%-75s %s / sent %d / recv %d / dups %d\n",
label,
fmtDuration(time.Duration(int64(math.Round(elpd / float64(count))))),
fmtDuration(time.Duration(int64(math.Round(elpd/float64(count))))),
int64(math.Round(sent)), int64(math.Round(rcvd)), int64(math.Round(dups)))
}

// b.Logf("%d runs: sent %f / recv %f / dups %f\n", count, sent, rcvd, dups)
}

func fmtDuration(d time.Duration) string {
d = d.Round(time.Millisecond)
s := d / time.Second
d -= s * time.Second
ms := d / time.Millisecond
return fmt.Sprintf("%d.%03ds", s, ms)
d = d.Round(time.Millisecond)
s := d / time.Second
d -= s * time.Second
ms := d / time.Millisecond
return fmt.Sprintf("%d.%03ds", s, ms)
}

const fastSpeed = 60 * time.Millisecond
Expand Down
12 changes: 6 additions & 6 deletions decision/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,15 @@ import (

"github.com/google/uuid"
bsmsg "github.com/ipfs/go-bitswap/message"
pb "github.com/ipfs/go-bitswap/message/pb"
wantlist "github.com/ipfs/go-bitswap/wantlist"
wl "github.com/ipfs/go-bitswap/wantlist"
cid "github.com/ipfs/go-cid"
bstore "github.com/ipfs/go-ipfs-blockstore"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-peertaskqueue"
"github.com/ipfs/go-peertaskqueue/peertask"
peer "github.com/libp2p/go-libp2p-core/peer"
pb "github.com/ipfs/go-bitswap/message/pb"
wantlist "github.com/ipfs/go-bitswap/wantlist"
)

// TODO consider taking responsibility for other types of requests. For
Expand Down Expand Up @@ -212,9 +212,9 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
entry, ok := l.WantListContains(c)
l.lk.Unlock()

// if ok {
// log.Debugf("wantlist has %s: WantHave %t / SendDontHave: %t", c.String()[2:8], entry.WantHave, entry.SendDontHave)
// }
// if ok {
// log.Debugf("wantlist has %s: WantHave %t / SendDontHave: %t", c.String()[2:8], entry.WantHave, entry.SendDontHave)
// }
// If the remote peer wants HAVE or DONT_HAVE messages
has := true
if ok && (entry.WantType == wantlist.WantType_Have || entry.SendDontHave) {
Expand All @@ -226,7 +226,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {

// If we have the block, and the remote peer asked for a HAVE
if has && entry.WantType == wantlist.WantType_Have {
// log.Debugf("%s: Have", c.String()[2:8])
// log.Debugf("%s: Have", c.String()[2:8])
msg.AddHave(c)
}
// If we don't have the block, and the remote peer asked for a DONT_HAVE
Expand Down
22 changes: 11 additions & 11 deletions message/message.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,9 +207,9 @@ func (m *impl) addEntry(c cid.Cid, priority int, cancel bool, wantType wantlist.
} else {
m.wantlist[c] = &Entry{
Entry: wantlist.Entry{
Cid: c,
Priority: priority,
WantType: wantType,
Cid: c,
Priority: priority,
WantType: wantType,
SendDontHave: sendDontHave,
},
Cancel: cancel,
Expand Down Expand Up @@ -261,10 +261,10 @@ func (m *impl) ToProtoV0() *pb.Message {
pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist))
for _, e := range m.wantlist {
pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{
Block: e.Cid.Bytes(),
Priority: int32(e.Priority),
Cancel: e.Cancel,
WantType: go2pb(e.WantType),
Block: e.Cid.Bytes(),
Priority: int32(e.Priority),
Cancel: e.Cancel,
WantType: go2pb(e.WantType),
SendDontHave: e.SendDontHave,
})
}
Expand All @@ -283,10 +283,10 @@ func (m *impl) ToProtoV1() *pb.Message {
pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist))
for _, e := range m.wantlist {
pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{
Block: e.Cid.Bytes(),
Priority: int32(e.Priority),
Cancel: e.Cancel,
WantType: go2pb(e.WantType),
Block: e.Cid.Bytes(),
Priority: int32(e.Priority),
Cancel: e.Cancel,
WantType: go2pb(e.WantType),
SendDontHave: e.SendDontHave,
})
}
Expand Down
11 changes: 6 additions & 5 deletions peerbroker/peerbroker.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@ import (
"context"
"fmt"

logging "github.com/ipfs/go-log"
// logging "github.com/ipfs/go-log"

cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-core/peer"
)

var log = logging.Logger("bitswap")
// var log = logging.Logger("bitswap")

type WantSource interface {
MatchWantPeer([]peer.ID) *Want
Expand All @@ -31,10 +31,10 @@ type message interface {
}

type Want struct {
Cid cid.Cid
Cid cid.Cid
WantHaves []cid.Cid
Peer peer.ID
Ses uint64
Peer peer.ID
Ses uint64
}

type PeerBroker struct {
Expand Down Expand Up @@ -126,6 +126,7 @@ func (pb *PeerBroker) run() {
}

const peerBatchSize = 16

func (pb *PeerBroker) checkMatch() {
batches := make(map[uint64]map[peer.ID][]Want)

Expand Down
34 changes: 17 additions & 17 deletions session/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ type RequestSplitter interface {
}

type rcvFrom struct {
from peer.ID
ks []cid.Cid
haves []cid.Cid
from peer.ID
ks []cid.Cid
haves []cid.Cid
dontHaves []cid.Cid
}

Expand All @@ -67,8 +67,8 @@ type Session struct {
pm PeerManager
srs RequestSplitter

sw sessionWants
pb *bspb.PeerBroker
sw sessionWants
pb *bspb.PeerBroker
peers *peer.Set

// channels
Expand Down Expand Up @@ -151,8 +151,8 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH
size := s.peers.Size()
s.peers.Add(from)
newSize := s.peers.Size()
if (newSize > size) {
log.Infof("Ses%d: Added %d new peers to session: %d peers\n", s.id, newSize - size, newSize)
if newSize > size {
log.Infof("Ses%d: Added %d new peers to session: %d peers\n", s.id, newSize-size, newSize)
}
}

Expand Down Expand Up @@ -198,10 +198,10 @@ func (s *Session) MatchWantPeer(ps []peer.ID) *bspb.Want {
s.pm.RecordPeerRequests([]peer.ID{p}, []cid.Cid{c})

return &bspb.Want{
Cid: c,
Cid: c,
WantHaves: wh,
Peer: p,
Ses: s.id,
Peer: p,
Ses: s.id,
}
}

Expand Down Expand Up @@ -371,7 +371,7 @@ func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) {
// If we didn't get any blocks, but we did get some HAVEs, we must have
// discovered at least one peer by now, so signal the PeerBroker to
// ask us if we have wants
s.pb.WantAvailable()
s.pb.WantAvailable()
}
}

Expand All @@ -391,7 +391,7 @@ func (s *Session) updateReceiveCounters(ctx context.Context, rcv rcvFrom) {
s.recentWasUnique = s.recentWasUnique[poplen:]
}
// fmt.Println("recentWasUnique", s.recentWasUnique)
if (len(s.recentWasUnique) > 16) {
if len(s.recentWasUnique) > 16 {
unqCount := 1
dupCount := 1
for _, u := range s.recentWasUnique {
Expand Down Expand Up @@ -443,11 +443,11 @@ func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) {

// peers := s.pm.GetOptimizedPeers()
// if len(peers) > 0 {
// splitRequests := s.srs.SplitRequest(peers, ks)
// for _, splitRequest := range splitRequests {
// s.pm.RecordPeerRequests(splitRequest.Peers, splitRequest.Keys)
// s.wm.WantBlocks(ctx, splitRequest.Keys, splitRequest.Peers, s.id)
// }
// splitRequests := s.srs.SplitRequest(peers, ks)
// for _, splitRequest := range splitRequests {
// s.pm.RecordPeerRequests(splitRequest.Peers, splitRequest.Keys)
// s.wm.WantBlocks(ctx, splitRequest.Keys, splitRequest.Peers, s.id)
// }
if s.peers.Size() > 0 {
log.Infof("Ses%d: WantAvailable()\n", s.id)
s.pb.WantAvailable()
Expand Down
Loading

0 comments on commit baada99

Please sign in to comment.