build: update to golangci-lint 1.61.0 (#30587)

Changelog: https://golangci-lint.run/product/changelog/#1610 

Removes `exportloopref` (no longer needed), replaces it with
`copyloopvar` which is basically the opposite.

Also adds: 
- `durationcheck`
- `gocheckcompilerdirectives`
- `reassign`
- `mirror`
- `tenv`

---------

Co-authored-by: Marius van der Wijden <m.vanderwijden@live.de>
This commit is contained in:
Martin HS 2024-10-14 19:25:22 +02:00 committed by GitHub
parent f4dc7530b1
commit 5adc314817
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
45 changed files with 52 additions and 100 deletions

View File

@ -21,10 +21,14 @@ linters:
- staticcheck - staticcheck
- bidichk - bidichk
- durationcheck - durationcheck
- exportloopref - copyloopvar
- whitespace - whitespace
- revive # only certain checks enabled - revive # only certain checks enabled
- durationcheck
- gocheckcompilerdirectives
- reassign
- mirror
- tenv
### linters we tried and will not be using: ### linters we tried and will not be using:
### ###
# - structcheck # lots of false positives # - structcheck # lots of false positives

View File

@ -1199,7 +1199,6 @@ func TestUnpackRevert(t *testing.T) {
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil}, {"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
} }
for index, c := range cases { for index, c := range cases {
index, c := index, c
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) { t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := UnpackRevert(common.Hex2Bytes(c.input)) got, err := UnpackRevert(common.Hex2Bytes(c.input))

View File

@ -252,7 +252,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
} }
// Parse library references. // Parse library references.
for pattern, name := range libs { for pattern, name := range libs {
matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin)) matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin)
if err != nil { if err != nil {
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err) log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
} }

View File

@ -331,7 +331,6 @@ func TestEventTupleUnpack(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
assert := assert.New(t) assert := assert.New(t)
tc := tc
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert) err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" { if tc.error == "" {

View File

@ -34,7 +34,6 @@ import (
func TestPack(t *testing.T) { func TestPack(t *testing.T) {
t.Parallel() t.Parallel()
for i, test := range packUnpackTests { for i, test := range packUnpackTests {
i, test := i, test
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Parallel() t.Parallel()
encb, err := hex.DecodeString(test.packed) encb, err := hex.DecodeString(test.packed)

View File

@ -172,7 +172,6 @@ var reflectTests = []reflectTest{
func TestReflectNameToStruct(t *testing.T) { func TestReflectNameToStruct(t *testing.T) {
t.Parallel() t.Parallel()
for _, test := range reflectTests { for _, test := range reflectTests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel() t.Parallel()
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc)) m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))

View File

@ -137,7 +137,6 @@ func TestMakeTopics(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := MakeTopics(tt.args.query...) got, err := MakeTopics(tt.args.query...)
@ -373,7 +372,6 @@ func TestParseTopics(t *testing.T) {
tests := setupTopicsTests() tests := setupTopicsTests()
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
createObj := tt.args.createObj() createObj := tt.args.createObj()
@ -393,7 +391,6 @@ func TestParseTopicsIntoMap(t *testing.T) {
tests := setupTopicsTests() tests := setupTopicsTests()
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
outMap := make(map[string]interface{}) outMap := make(map[string]interface{})

View File

@ -389,7 +389,6 @@ func TestMethodMultiReturn(t *testing.T) {
"Can not unpack into a slice with wrong types", "Can not unpack into a slice with wrong types",
}} }}
for _, tc := range testCases { for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
require := require.New(t) require := require.New(t)
err := abi.UnpackIntoInterface(tc.dest, "multi", data) err := abi.UnpackIntoInterface(tc.dest, "multi", data)
@ -947,7 +946,7 @@ func TestOOMMaliciousInput(t *testing.T) {
} }
encb, err := hex.DecodeString(test.enc) encb, err := hex.DecodeString(test.enc)
if err != nil { if err != nil {
t.Fatalf("invalid hex: %s" + test.enc) t.Fatalf("invalid hex: %s", test.enc)
} }
_, err = abi.Methods["method"].Outputs.UnpackValues(encb) _, err = abi.Methods["method"].Outputs.UnpackValues(encb)
if err == nil { if err == nil {

View File

@ -56,37 +56,37 @@ f45af3e1434175ff85620a74c07fb41d6844655f1f2cd2389c5fca6de000f58c go1.23.2.freeb
f626cdd92fc21a88b31c1251f419c17782933a42903db87a174ce74eeecc66a9 go1.23.2.linux-arm64.tar.gz f626cdd92fc21a88b31c1251f419c17782933a42903db87a174ce74eeecc66a9 go1.23.2.linux-arm64.tar.gz
fa70d39ddeb6b55241a30b48d7af4e681c6a7d7104e8326c3bc1b12a75e091cc go1.23.2.solaris-amd64.tar.gz fa70d39ddeb6b55241a30b48d7af4e681c6a7d7104e8326c3bc1b12a75e091cc go1.23.2.solaris-amd64.tar.gz
# version:golangci 1.59.0 # version:golangci 1.61.0
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/ # https://github.com/golangci/golangci-lint/releases/download/v1.61.0/
418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz 5c280ef3284f80c54fd90d73dc39ca276953949da1db03eb9dd0fbf868cc6e55 golangci-lint-1.61.0-darwin-amd64.tar.gz
5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz 544334890701e4e04a6e574bc010bea8945205c08c44cced73745a6378012d36 golangci-lint-1.61.0-darwin-arm64.tar.gz
8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz e885a6f561092055930ebd298914d80e8fd2e10d2b1e9942836c2c6a115301fa golangci-lint-1.61.0-freebsd-386.tar.gz
658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz b13f6a3f11f65e7ff66b734d7554df3bbae0f485768848424e7554ed289e19c2 golangci-lint-1.61.0-freebsd-amd64.tar.gz
4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz cd8e7bbe5b8f33ed1597aa1cc588da96a3b9f22e1b9ae60d93511eae1a0ee8c5 golangci-lint-1.61.0-freebsd-armv6.tar.gz
ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz 7ade524dbd88bd250968f45e190af90e151fa5ee63dd6aa7f7bb90e8155db61d golangci-lint-1.61.0-freebsd-armv7.tar.gz
439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz 0fe3cd8a1ed8d9f54f48670a5af3df056d6040d94017057f0f4d65c930660ad9 golangci-lint-1.61.0-illumos-amd64.tar.gz
940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz b463fc5053a612abd26393ebaff1d85d7d56058946f4f0f7bf25ed44ea899415 golangci-lint-1.61.0-linux-386.tar.gz
3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz 77cb0af99379d9a21d5dc8c38364d060e864a01bd2f3e30b5e8cc550c3a54111 golangci-lint-1.61.0-linux-amd64.tar.gz
c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz af60ac05566d9351615cb31b4cc070185c25bf8cbd9b09c1873aa5ec6f3cc17e golangci-lint-1.61.0-linux-arm64.tar.gz
93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz 1f307f2fcc5d7d674062a967a0d83a7091e300529aa237ec6ad2b3dd14c897f5 golangci-lint-1.61.0-linux-armv6.tar.gz
d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz 3ad8cbaae75a547450844811300f99c4cd290277398e43d22b9eb1792d15af4c golangci-lint-1.61.0-linux-armv7.tar.gz
047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz 9be2ca67d961d7699079739cf6f7c8291c5183d57e34d1677de21ca19d0bd3ed golangci-lint-1.61.0-linux-loong64.tar.gz
5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz 90d005e1648115ebf0861b408eab9c936079a24763e883058b0a227cd3135d31 golangci-lint-1.61.0-linux-mips64.tar.gz
71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz 6d2ed4f49407115460b8c10ccfc40fd177e0887a48864a2879dd16e84ba2a48c golangci-lint-1.61.0-linux-mips64le.tar.gz
6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz 633089589af5a58b7430afb6eee107d4e9c99e8d91711ddc219eb13a07e8d3b8 golangci-lint-1.61.0-linux-ppc64le.tar.gz
af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz 4c1a097d9e0d1b4a8144dae6a1f5583a38d662f3bdc1498c4e954b6ed856be98 golangci-lint-1.61.0-linux-riscv64.tar.gz
a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz 30581d3c987d287b7064617f1a2694143e10dffc40bc25be6636006ee82d7e1c golangci-lint-1.61.0-linux-s390x.tar.gz
68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz 42530bf8100bd43c07f5efe6d92148ba6c5a7a712d510c6f24be85af6571d5eb golangci-lint-1.61.0-netbsd-386.tar.gz
d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz b8bb07c920f6601edf718d5e82ec0784fd590b0992b42b6ec18da99f26013ed4 golangci-lint-1.61.0-netbsd-amd64.tar.gz
83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz 353a51527c60bd0776b0891b03f247c791986f625fca689d121972c624e54198 golangci-lint-1.61.0-netbsd-arm64.tar.gz
6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz 957a6272c3137910514225704c5dac0723b9c65eb7d9587366a997736e2d7580 golangci-lint-1.61.0-netbsd-armv6.tar.gz
11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz a89eb28ff7f18f5cd52b914739360fa95cf2f643de4adeca46e26bec3a07e8d8 golangci-lint-1.61.0-netbsd-armv7.tar.gz
466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip d8d74c43600b271393000717a4ed157d7a15bb85bab7db2efad9b63a694d4634 golangci-lint-1.61.0-windows-386.zip
3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip e7bc2a81929a50f830244d6d2e657cce4f19a59aff49fa9000176ff34fda64ce golangci-lint-1.61.0-windows-amd64.zip
b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip ed97c221596dd771e3dd9344872c140340bee2e819cd7a90afa1de752f1f2e0f golangci-lint-1.61.0-windows-arm64.zip
6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip 4b365233948b13d02d45928a5c390045e00945e919747b9887b5f260247541ae golangci-lint-1.61.0-windows-armv6.zip
3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip 595538fb64d152173959d28f6235227f9cd969a828e5af0c4e960d02af4ffd0e golangci-lint-1.61.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify! # This is the builder on PPA that will build Go itself (inception-y), don't modify!
# #

View File

@ -100,7 +100,6 @@ func (c *Chain) AccountsInHashOrder() []state.DumpAccount {
list := make([]state.DumpAccount, len(c.state)) list := make([]state.DumpAccount, len(c.state))
i := 0 i := 0
for addr, acc := range c.state { for addr, acc := range c.state {
addr := addr
list[i] = acc list[i] = acc
list[i].Address = &addr list[i].Address = &addr
if len(acc.AddressHash) != 32 { if len(acc.AddressHash) != 32 {

View File

@ -286,7 +286,6 @@ a key before startingHash (wrong order). The server should return the first avai
} }
for i, tc := range tests { for i, tc := range tests {
tc := tc
if i > 0 { if i > 0 {
t.Log("\n") t.Log("\n")
} }
@ -429,7 +428,6 @@ of the test account. The server should return slots [2,3] (i.e. the 'next availa
} }
for i, tc := range tests { for i, tc := range tests {
tc := tc
if i > 0 { if i > 0 {
t.Log("\n") t.Log("\n")
} }
@ -526,7 +524,6 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
} }
for i, tc := range tests { for i, tc := range tests {
tc := tc
if i > 0 { if i > 0 {
t.Log("\n") t.Log("\n")
} }
@ -723,7 +720,6 @@ The server should reject the request.`,
} }
for i, tc := range tests { for i, tc := range tests {
tc := tc
if i > 0 { if i > 0 {
t.Log("\n") t.Log("\n")
} }

View File

@ -524,7 +524,7 @@ func TestT9n(t *testing.T) {
ok, err := cmpJson(have, want) ok, err := cmpJson(have, want)
switch { switch {
case err != nil: case err != nil:
t.Logf(string(have)) t.Log(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err) t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok: case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
@ -659,7 +659,7 @@ func TestB11r(t *testing.T) {
ok, err := cmpJson(have, want) ok, err := cmpJson(have, want)
switch { switch {
case err != nil: case err != nil:
t.Logf(string(have)) t.Log(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err) t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok: case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))

View File

@ -113,7 +113,6 @@ func TestAccountImport(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel() t.Parallel()
importAccountWithExpect(t, test.key, test.output) importAccountWithExpect(t, test.key, test.output)

View File

@ -152,7 +152,7 @@ func remoteConsole(ctx *cli.Context) error {
func ephemeralConsole(ctx *cli.Context) error { func ephemeralConsole(ctx *cli.Context) error {
var b strings.Builder var b strings.Builder
for _, file := range ctx.Args().Slice() { for _, file := range ctx.Args().Slice() {
b.Write([]byte(fmt.Sprintf("loadScript('%s');", file))) b.WriteString(fmt.Sprintf("loadScript('%s');", file))
} }
utils.Fatalf(`The "js" command is deprecated. Please use the following instead: utils.Fatalf(`The "js" command is deprecated. Please use the following instead:
geth --exec "%s" console`, b.String()) geth --exec "%s" console`, b.String())

View File

@ -170,7 +170,6 @@ func TestKeyID(t *testing.T) {
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"}, {"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := keyID(tt.args.id); got != tt.want { if got := keyID(tt.args.id); got != tt.want {

View File

@ -142,7 +142,7 @@ func dump(in *inStream, s *rlp.Stream, depth int, out io.Writer) error {
s.List() s.List()
defer s.ListEnd() defer s.ListEnd()
if size == 0 { if size == 0 {
fmt.Fprintf(out, ws(depth)+"[]") fmt.Fprint(out, ws(depth)+"[]")
} else { } else {
fmt.Fprintln(out, ws(depth)+"[") fmt.Fprintln(out, ws(depth)+"[")
for i := 0; ; i++ { for i := 0; ; i++ {

View File

@ -56,7 +56,6 @@ func Test_SplitTagsFlag(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := SplitTagsFlag(tt.args); !reflect.DeepEqual(got, tt.want) { if got := SplitTagsFlag(tt.args); !reflect.DeepEqual(got, tt.want) {

View File

@ -66,7 +66,6 @@ func TestGetPassPhraseWithList(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := GetPassPhraseWithList(tt.args.text, tt.args.confirmation, tt.args.index, tt.args.passwords); got != tt.want { if got := GetPassPhraseWithList(tt.args.text, tt.args.confirmation, tt.args.index, tt.args.passwords); got != tt.want {

View File

@ -121,7 +121,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// such as amount of used gas, the receipt roots and the state root itself. // such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error { func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
if res == nil { if res == nil {
return fmt.Errorf("nil ProcessResult value") return errors.New("nil ProcessResult value")
} }
header := block.Header() header := block.Header()
if block.GasUsed() != res.GasUsed { if block.GasUsed() != res.GasUsed {
@ -150,7 +150,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
return fmt.Errorf("invalid requests hash (remote: %x local: %x)", *header.RequestsHash, reqhash) return fmt.Errorf("invalid requests hash (remote: %x local: %x)", *header.RequestsHash, reqhash)
} }
} else if res.Requests != nil { } else if res.Requests != nil {
return fmt.Errorf("block has requests before prague fork") return errors.New("block has requests before prague fork")
} }
// Validate the state root against the received state root and throw // Validate the state root against the received state root and throw
// an error if they don't match. // an error if they don't match.

View File

@ -388,10 +388,10 @@ func TestBlockReceiptStorage(t *testing.T) {
// Insert the receipt slice into the database and check presence // Insert the receipt slice into the database and check presence
WriteReceipts(db, hash, 0, receipts) WriteReceipts(db, hash, 0, receipts)
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 { if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 {
t.Fatalf("no receipts returned") t.Fatal("no receipts returned")
} else { } else {
if err := checkReceiptsRLP(rs, receipts); err != nil { if err := checkReceiptsRLP(rs, receipts); err != nil {
t.Fatalf(err.Error()) t.Fatal(err)
} }
} }
// Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed) // Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
@ -401,7 +401,7 @@ func TestBlockReceiptStorage(t *testing.T) {
} }
// Ensure that receipts without metadata can be returned without the block body too // Ensure that receipts without metadata can be returned without the block body too
if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil { if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
t.Fatalf(err.Error()) t.Fatal(err)
} }
// Sanity check that body alone without the receipt is a full purge // Sanity check that body alone without the receipt is a full purge
WriteBody(db, hash, 0, body) WriteBody(db, hash, 0, body)

View File

@ -282,7 +282,6 @@ func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
// Append the tasks to the current queue // Append the tasks to the current queue
sf.lock.Lock() sf.lock.Lock()
for _, key := range keys { for _, key := range keys {
key := key // closure for the append below
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key}) sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key})
} }
sf.lock.Unlock() sf.lock.Unlock()

View File

@ -546,9 +546,7 @@ func TestYParityJSONUnmarshalling(t *testing.T) {
DynamicFeeTxType, DynamicFeeTxType,
BlobTxType, BlobTxType,
} { } {
txType := txType
for _, test := range tests { for _, test := range tests {
test := test
t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) { t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) {
// Copy the base json // Copy the base json
testJson := maps.Clone(baseJson) testJson := maps.Clone(baseJson)

View File

@ -56,7 +56,7 @@ func (e ErrStackUnderflow) Error() string {
} }
func (e ErrStackUnderflow) Unwrap() error { func (e ErrStackUnderflow) Unwrap() error {
return fmt.Errorf("stack underflow") return errors.New("stack underflow")
} }
// ErrStackOverflow wraps an evm error when the items on the stack exceeds // ErrStackOverflow wraps an evm error when the items on the stack exceeds
@ -71,7 +71,7 @@ func (e ErrStackOverflow) Error() string {
} }
func (e ErrStackOverflow) Unwrap() error { func (e ErrStackOverflow) Unwrap() error {
return fmt.Errorf("stack overflow") return errors.New("stack overflow")
} }
// ErrInvalidOpCode wraps an evm error when an invalid opcode is encountered. // ErrInvalidOpCode wraps an evm error when an invalid opcode is encountered.

View File

@ -541,7 +541,6 @@ func (d *Downloader) spawnSync(fetchers []func() error) error {
errc := make(chan error, len(fetchers)) errc := make(chan error, len(fetchers))
d.cancelWg.Add(len(fetchers)) d.cancelWg.Add(len(fetchers))
for _, fn := range fetchers { for _, fn := range fetchers {
fn := fn
go func() { defer d.cancelWg.Done(); errc <- fn() }() go func() { defer d.cancelWg.Done(); errc <- fn() }()
} }
// Wait for the first error, then terminate the others. // Wait for the first error, then terminate the others.

View File

@ -273,7 +273,6 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc
select { select {
case logs := <-matchedLogs: case logs := <-matchedLogs:
for _, log := range logs { for _, log := range logs {
log := log
notifier.Notify(rpcSub.ID, &log) notifier.Notify(rpcSub.ID, &log)
} }
case <-rpcSub.Err(): // client send an unsubscribe request case <-rpcSub.Err(): // client send an unsubscribe request

View File

@ -390,8 +390,6 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
} }
// Interconnect all the sink handlers with the source handler // Interconnect all the sink handlers with the source handler
for i, sink := range sinks { for i, sink := range sinks {
sink := sink // Closure for goroutine below
sourcePipe, sinkPipe := p2p.MsgPipe() sourcePipe, sinkPipe := p2p.MsgPipe()
defer sourcePipe.Close() defer sourcePipe.Close()
defer sinkPipe.Close() defer sinkPipe.Close()

View File

@ -93,8 +93,6 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions { for _, version := range ProtocolVersions {
version := version // Closure
protocols = append(protocols, p2p.Protocol{ protocols = append(protocols, p2p.Protocol{
Name: ProtocolName, Name: ProtocolName,
Version: version, Version: version,

View File

@ -85,8 +85,6 @@ type Backend interface {
func MakeProtocols(backend Backend) []p2p.Protocol { func MakeProtocols(backend Backend) []p2p.Protocol {
protocols := make([]p2p.Protocol, len(ProtocolVersions)) protocols := make([]p2p.Protocol, len(ProtocolVersions))
for i, version := range ProtocolVersions { for i, version := range ProtocolVersions {
version := version // Closure
protocols[i] = p2p.Protocol{ protocols[i] = p2p.Protocol{
Name: ProtocolName, Name: ProtocolName,
Version: version, Version: version,

View File

@ -345,7 +345,6 @@ func (task *accountTask) activeSubTasks() map[common.Hash][]*storageTask {
last = task.res.hashes[len(task.res.hashes)-1] last = task.res.hashes[len(task.res.hashes)-1]
) )
for hash, subTasks := range task.SubTasks { for hash, subTasks := range task.SubTasks {
subTasks := subTasks // closure
if hash.Cmp(last) <= 0 { if hash.Cmp(last) <= 0 {
tasks[hash] = subTasks tasks[hash] = subTasks
} }
@ -765,8 +764,6 @@ func (s *Syncer) loadSyncStatus() {
} }
s.tasks = progress.Tasks s.tasks = progress.Tasks
for _, task := range s.tasks { for _, task := range s.tasks {
task := task // closure for task.genBatch in the stacktrie writer callback
// Restore the completed storages // Restore the completed storages
task.stateCompleted = make(map[common.Hash]struct{}) task.stateCompleted = make(map[common.Hash]struct{})
for _, hash := range task.StorageCompleted { for _, hash := range task.StorageCompleted {
@ -790,8 +787,6 @@ func (s *Syncer) loadSyncStatus() {
// Restore leftover storage tasks // Restore leftover storage tasks
for accountHash, subtasks := range task.SubTasks { for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks { for _, subtask := range subtasks {
subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
subtask.genBatch = ethdb.HookedBatch{ subtask.genBatch = ethdb.HookedBatch{
Batch: s.db.NewBatch(), Batch: s.db.NewBatch(),
OnPut: func(key []byte, value []byte) { OnPut: func(key []byte, value []byte) {

View File

@ -96,7 +96,6 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") { if !strings.HasSuffix(file.Name(), ".json") {
continue continue
} }
file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel() t.Parallel()
@ -183,7 +182,6 @@ func BenchmarkTracers(b *testing.B) {
if !strings.HasSuffix(file.Name(), ".json") { if !strings.HasSuffix(file.Name(), ".json") {
continue continue
} }
file := file // capture range variable
b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) { b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
blob, err := os.ReadFile(filepath.Join("testdata", "call_tracer", file.Name())) blob, err := os.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
if err != nil { if err != nil {

View File

@ -151,7 +151,6 @@ func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") { if !strings.HasSuffix(file.Name(), ".json") {
continue continue
} }
file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -73,7 +73,6 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") { if !strings.HasSuffix(file.Name(), ".json") {
continue continue
} }
file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -458,7 +458,7 @@ func formatLogs(logs []StructLog) []StructLogRes {
} }
formatted[index].Stack = &stack formatted[index].Stack = &stack
} }
if trace.ReturnData != nil && len(trace.ReturnData) > 0 { if len(trace.ReturnData) > 0 {
formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String() formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String()
} }
if trace.Memory != nil { if trace.Memory != nil {

View File

@ -17,15 +17,12 @@
package flags package flags
import ( import (
"os"
"os/user" "os/user"
"runtime" "runtime"
"testing" "testing"
) )
func TestPathExpansion(t *testing.T) { func TestPathExpansion(t *testing.T) {
t.Parallel()
user, _ := user.Current() user, _ := user.Current()
var tests map[string]string var tests map[string]string
@ -53,7 +50,7 @@ func TestPathExpansion(t *testing.T) {
} }
} }
os.Setenv(`DDDXXX`, `/tmp`) t.Setenv(`DDDXXX`, `/tmp`)
for test, expected := range tests { for test, expected := range tests {
t.Run(test, func(t *testing.T) { t.Run(test, func(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -13,7 +13,7 @@ func TestRegistryMarshallJSON(t *testing.T) {
r.Register("counter", NewCounter()) r.Register("counter", NewCounter())
enc.Encode(r) enc.Encode(r)
if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" { if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
t.Fatalf(s) t.Fatal(s)
} }
} }

View File

@ -244,7 +244,6 @@ func TestStartRPC(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -513,7 +513,6 @@ func TestNodeRPCPrefix(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
test := test
name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix) name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix)
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
cfg := &Config{ cfg := &Config{

View File

@ -522,7 +522,6 @@ func TestGzipHandler(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
srv := httptest.NewServer(newGzipHandler(test.handler)) srv := httptest.NewServer(newGzipHandler(test.handler))
defer srv.Close() defer srv.Close()

View File

@ -395,7 +395,6 @@ func TestTestVectorsV5(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
net := newHandshakeTest() net := newHandshakeTest()
defer net.close() defer net.close()

View File

@ -412,7 +412,6 @@ outer:
func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error) { func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error) {
p.wg.Add(len(p.running)) p.wg.Add(len(p.running))
for _, proto := range p.running { for _, proto := range p.running {
proto := proto
proto.closed = p.closed proto.closed = p.closed
proto.wstart = writeStart proto.wstart = writeStart
proto.werr = writeErr proto.werr = writeErr

View File

@ -307,7 +307,6 @@ func TestStreamReadBytes(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
test := test
name := fmt.Sprintf("input_%s/size_%d", test.input, test.size) name := fmt.Sprintf("input_%s/size_%d", test.input, test.size)
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
s := NewStream(bytes.NewReader(unhex(test.input)), 0) s := NewStream(bytes.NewReader(unhex(test.input)), 0)

View File

@ -51,7 +51,6 @@ var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"
func TestOutput(t *testing.T) { func TestOutput(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test, func(t *testing.T) { t.Run(test, func(t *testing.T) {
inputFile := filepath.Join("testdata", test+".in.txt") inputFile := filepath.Join("testdata", test+".in.txt")
outputFile := filepath.Join("testdata", test+".out.txt") outputFile := filepath.Join("testdata", test+".out.txt")

View File

@ -776,7 +776,6 @@ func TestClientHTTP(t *testing.T) {
wantResult = echoResult{"a", 1, new(echoArgs)} wantResult = echoResult{"a", 1, new(echoArgs)}
) )
for i := range results { for i := range results {
i := i
go func() { go func() {
errc <- client.Call(&results[i], "test_echo", wantResult.String, wantResult.Int, wantResult.Args) errc <- client.Call(&results[i], "test_echo", wantResult.String, wantResult.Int, wantResult.Args)
}() }()

View File

@ -143,7 +143,6 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
{"finalized", int64(FinalizedBlockNumber)}, {"finalized", int64(FinalizedBlockNumber)},
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number)) bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number))
marshalled, err := json.Marshal(bnh) marshalled, err := json.Marshal(bnh)

View File

@ -104,7 +104,6 @@ func TestExecutionSpecState(t *testing.T) {
func execStateTest(t *testing.T, st *testMatcher, test *StateTest) { func execStateTest(t *testing.T, st *testMatcher, test *StateTest) {
for _, subtest := range test.Subtests() { for _, subtest := range test.Subtests() {
subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
// If -short flag is used, we don't execute all four permutations, only // If -short flag is used, we don't execute all four permutations, only
@ -244,14 +243,12 @@ func runBenchmarkFile(b *testing.B, path string) {
return return
} }
for _, t := range m { for _, t := range m {
t := t
runBenchmark(b, &t) runBenchmark(b, &t)
} }
} }
func runBenchmark(b *testing.B, t *StateTest) { func runBenchmark(b *testing.B, t *StateTest) {
for _, subtest := range t.Subtests() { for _, subtest := range t.Subtests() {
subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
b.Run(key, func(b *testing.B) { b.Run(key, func(b *testing.B) {