p2p: initialize maps with known size (#27229)
This commit is contained in:
parent
a742943c78
commit
ae7db289b8
|
@ -584,7 +584,7 @@ func (d *dialTestDialer) Dial(ctx context.Context, n *enode.Node) (net.Conn, err
|
|||
// waitForDials waits for calls to Dial with the given nodes as argument.
|
||||
// Those calls will be held blocking until completeDials is called with the same nodes.
|
||||
func (d *dialTestDialer) waitForDials(nodes []*enode.Node) error {
|
||||
waitset := make(map[enode.ID]*enode.Node)
|
||||
waitset := make(map[enode.ID]*enode.Node, len(nodes))
|
||||
for _, n := range nodes {
|
||||
waitset[n.ID()] = n
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ func (t *pingRecorder) RequestENR(n *enode.Node) (*enode.Node, error) {
|
|||
}
|
||||
|
||||
func hasDuplicates(slice []*node) bool {
|
||||
seen := make(map[enode.ID]bool)
|
||||
seen := make(map[enode.ID]bool, len(slice))
|
||||
for i, e := range slice {
|
||||
if e == nil {
|
||||
panic(fmt.Sprintf("nil *Node at %d", i))
|
||||
|
|
|
@ -198,7 +198,7 @@ func TestUDPv5_findnodeHandling(t *testing.T) {
|
|||
}
|
||||
|
||||
func (test *udpV5Test) expectNodes(wantReqID []byte, wantTotal uint8, wantNodes []*enode.Node) {
|
||||
nodeSet := make(map[enode.ID]*enr.Record)
|
||||
nodeSet := make(map[enode.ID]*enr.Record, len(wantNodes))
|
||||
for _, n := range wantNodes {
|
||||
nodeSet[n.ID()] = n.Record()
|
||||
}
|
||||
|
|
|
@ -439,7 +439,7 @@ func testNodes(keys []*ecdsa.PrivateKey) []*enode.Node {
|
|||
type mapResolver map[string]string
|
||||
|
||||
func newMapResolver(maps ...map[string]string) mapResolver {
|
||||
mr := make(mapResolver)
|
||||
mr := make(mapResolver, len(maps))
|
||||
for _, m := range maps {
|
||||
mr.add(m)
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func checkNodes(t *testing.T, nodes []*Node, wantLen int) {
|
|||
t.Errorf("slice has %d nodes, want %d", len(nodes), wantLen)
|
||||
return
|
||||
}
|
||||
seen := make(map[ID]bool)
|
||||
seen := make(map[ID]bool, len(nodes))
|
||||
for i, e := range nodes {
|
||||
if e == nil {
|
||||
t.Errorf("nil node at index %d", i)
|
||||
|
@ -231,7 +231,7 @@ func testMixerClose(t *testing.T) {
|
|||
}
|
||||
|
||||
func idPrefixDistribution(nodes []*Node) map[uint32]int {
|
||||
d := make(map[uint32]int)
|
||||
d := make(map[uint32]int, len(nodes))
|
||||
for _, node := range nodes {
|
||||
id := node.ID()
|
||||
d[binary.BigEndian.Uint32(id[:4])]++
|
||||
|
|
|
@ -273,11 +273,11 @@ func testSeedQuery() error {
|
|||
|
||||
// Retrieve the entire batch and check for duplicates
|
||||
seeds := db.QuerySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
|
||||
have := make(map[ID]struct{})
|
||||
have := make(map[ID]struct{}, len(seeds))
|
||||
for _, seed := range seeds {
|
||||
have[seed.ID()] = struct{}{}
|
||||
}
|
||||
want := make(map[ID]struct{})
|
||||
want := make(map[ID]struct{}, len(nodeDBSeedQueryNodes[1:]))
|
||||
for _, seed := range nodeDBSeedQueryNodes[1:] {
|
||||
want[seed.node.ID()] = struct{}{}
|
||||
}
|
||||
|
|
|
@ -329,7 +329,7 @@ func (t *Trackers) MeanCapacities() map[uint64]float64 {
|
|||
// meanCapacities is the internal lockless version of MeanCapacities used for
|
||||
// debug logging.
|
||||
func (t *Trackers) meanCapacities() map[uint64]float64 {
|
||||
capacities := make(map[uint64]float64)
|
||||
capacities := make(map[uint64]float64, len(t.trackers))
|
||||
for _, tt := range t.trackers {
|
||||
tt.lock.RLock()
|
||||
for key, val := range tt.capacity {
|
||||
|
|
|
@ -78,7 +78,7 @@ func (it *IPTracker) PredictEndpoint() string {
|
|||
it.gcStatements(it.clock.Now())
|
||||
|
||||
// The current strategy is simple: find the endpoint with most statements.
|
||||
counts := make(map[string]int)
|
||||
counts := make(map[string]int, len(it.statements))
|
||||
maxcount, max := 0, ""
|
||||
for _, s := range it.statements {
|
||||
c := counts[s.endpoint] + 1
|
||||
|
|
|
@ -333,7 +333,7 @@ func NewNodeStateMachine(db ethdb.KeyValueStore, dbKey []byte, clock mclock.Cloc
|
|||
fields: make([]*fieldInfo, len(setup.fields)),
|
||||
}
|
||||
ns.opWait = sync.NewCond(&ns.lock)
|
||||
stateNameMap := make(map[string]int)
|
||||
stateNameMap := make(map[string]int, len(setup.flags))
|
||||
for index, flag := range setup.flags {
|
||||
if _, ok := stateNameMap[flag.name]; ok {
|
||||
panic("Node state flag name collision: " + flag.name)
|
||||
|
@ -343,7 +343,7 @@ func NewNodeStateMachine(db ethdb.KeyValueStore, dbKey []byte, clock mclock.Cloc
|
|||
ns.saveFlags |= bitMask(1) << uint(index)
|
||||
}
|
||||
}
|
||||
fieldNameMap := make(map[string]int)
|
||||
fieldNameMap := make(map[string]int, len(setup.fields))
|
||||
for index, field := range setup.fields {
|
||||
if _, ok := fieldNameMap[field.name]; ok {
|
||||
panic("Node field name collision: " + field.name)
|
||||
|
|
|
@ -510,7 +510,7 @@ func (p *Peer) Info() *PeerInfo {
|
|||
ID: p.ID().String(),
|
||||
Name: p.Fullname(),
|
||||
Caps: caps,
|
||||
Protocols: make(map[string]interface{}),
|
||||
Protocols: make(map[string]interface{}, len(p.running)),
|
||||
}
|
||||
if p.Node().Seq() > 0 {
|
||||
info.ENR = p.Node().String()
|
||||
|
|
Loading…
Reference in New Issue