all: replace fmt.Errorf() with errors.New() if no param required (#29126)

replace-fmt-errorf

Co-authored-by: yzb@example.cn <yzb@example.cn>
This commit is contained in:
yzb 2024-02-29 17:56:46 +08:00 committed by GitHub
parent 28d55218f7
commit db4cf69166
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 45 additions and 35 deletions

View File

@ -18,6 +18,7 @@ package main
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
@ -182,7 +183,7 @@ func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
// that the accumulator matches the expected value.
func verify(ctx *cli.Context) error {
if ctx.Args().Len() != 1 {
return fmt.Errorf("missing accumulators file")
return errors.New("missing accumulators file")
}
roots, err := readHashes(ctx.Args().First())
@ -203,7 +204,7 @@ func verify(ctx *cli.Context) error {
}
if len(entries) != len(roots) {
return fmt.Errorf("number of era1 files should match the number of accumulator hashes")
return errors.New("number of era1 files should match the number of accumulator hashes")
}
// Verify each epoch matches the expected root.
@ -308,7 +309,7 @@ func checkAccumulator(e *era.Era) error {
func readHashes(f string) ([]common.Hash, error) {
b, err := os.ReadFile(f)
if err != nil {
return nil, fmt.Errorf("unable to open accumulators file")
return nil, errors.New("unable to open accumulators file")
}
s := strings.Split(string(b), "\n")
// Remove empty last element, if present.

View File

@ -444,7 +444,7 @@ func importHistory(ctx *cli.Context) error {
return fmt.Errorf("no era1 files found in %s", dir)
}
if len(networks) > 1 {
return fmt.Errorf("multiple networks found, use a network flag to specify desired network")
return errors.New("multiple networks found, use a network flag to specify desired network")
}
network = networks[0]
}

View File

@ -245,7 +245,7 @@ func readList(filename string) ([]string, error) {
// starting from genesis.
func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
if chain.CurrentSnapBlock().Number.BitLen() != 0 {
return fmt.Errorf("history import only supported when starting from genesis")
return errors.New("history import only supported when starting from genesis")
}
entries, err := era.ReadDir(dir, network)
if err != nil {

View File

@ -18,6 +18,7 @@ package txpool
import (
"crypto/sha256"
"errors"
"fmt"
"math/big"
@ -120,13 +121,13 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
}
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return fmt.Errorf("missing sidecar in blob transaction")
return errors.New("missing sidecar in blob transaction")
}
// Ensure the number of items in the blob transaction and various side
// data match up before doing any expensive validations
hashes := tx.BlobHashes()
if len(hashes) == 0 {
return fmt.Errorf("blobless blob transaction")
return errors.New("blobless blob transaction")
}
if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)

View File

@ -17,6 +17,7 @@
package era
import (
"errors"
"fmt"
"math/big"
@ -28,7 +29,7 @@ import (
// accumulator of header records.
func ComputeAccumulator(hashes []common.Hash, tds []*big.Int) (common.Hash, error) {
if len(hashes) != len(tds) {
return common.Hash{}, fmt.Errorf("must have equal number hashes as td values")
return common.Hash{}, errors.New("must have equal number hashes as td values")
}
if len(hashes) > MaxEra1Size {
return common.Hash{}, fmt.Errorf("too many records: have %d, max %d", len(hashes), MaxEra1Size)

View File

@ -18,6 +18,7 @@ package era
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
@ -158,7 +159,7 @@ func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash comm
// corresponding e2store entries.
func (b *Builder) Finalize() (common.Hash, error) {
if b.startNum == nil {
return common.Hash{}, fmt.Errorf("finalize called on empty builder")
return common.Hash{}, errors.New("finalize called on empty builder")
}
// Compute accumulator root and write entry.
root, err := ComputeAccumulator(b.hashes, b.tds)

View File

@ -18,6 +18,7 @@ package e2store
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
@ -160,7 +161,7 @@ func (r *Reader) ReadMetadataAt(off int64) (typ uint16, length uint32, err error
// Check reserved bytes of header.
if b[6] != 0 || b[7] != 0 {
return 0, 0, fmt.Errorf("reserved bytes are non-zero")
return 0, 0, errors.New("reserved bytes are non-zero")
}
return typ, length, nil

View File

@ -18,7 +18,7 @@ package e2store
import (
"bytes"
"fmt"
"errors"
"io"
"testing"
@ -92,7 +92,7 @@ func TestDecode(t *testing.T) {
},
{ // basic invalid decoding
have: "ffff000000000001",
err: fmt.Errorf("reserved bytes are non-zero"),
err: errors.New("reserved bytes are non-zero"),
},
{ // no more entries to read, returns EOF
have: "",

View File

@ -18,6 +18,7 @@ package era
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
@ -127,7 +128,7 @@ func (e *Era) Close() error {
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds")
return nil, errors.New("out-of-bounds")
}
off, err := e.readOffset(num)
if err != nil {

View File

@ -17,6 +17,7 @@
package era
import (
"errors"
"fmt"
"io"
"math/big"
@ -61,7 +62,7 @@ func (it *Iterator) Error() error {
// Block returns the block for the iterator's current position.
func (it *Iterator) Block() (*types.Block, error) {
if it.inner.Header == nil || it.inner.Body == nil {
return nil, fmt.Errorf("header and body must be non-nil")
return nil, errors.New("header and body must be non-nil")
}
var (
header types.Header

View File

@ -947,7 +947,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
if genParams.parentHash != (common.Hash{}) {
block := w.chain.GetBlockByHash(genParams.parentHash)
if block == nil {
return nil, fmt.Errorf("missing parent")
return nil, errors.New("missing parent")
}
parent = block.Header()
}

View File

@ -19,6 +19,7 @@ package node
import (
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"net"
@ -299,7 +300,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
defer h.mu.Unlock()
if h.rpcAllowed() {
return fmt.Errorf("JSON-RPC over HTTP is already enabled")
return errors.New("JSON-RPC over HTTP is already enabled")
}
// Create RPC server and handler.
@ -335,7 +336,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
defer h.mu.Unlock()
if h.wsAllowed() {
return fmt.Errorf("JSON-RPC over WebSocket is already enabled")
return errors.New("JSON-RPC over WebSocket is already enabled")
}
// Create RPC server and handler.
srv := rpc.NewServer()

View File

@ -364,7 +364,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) {
return nil, err
}
if respN.ID() != n.ID() {
return nil, fmt.Errorf("invalid ID in response record")
return nil, errors.New("invalid ID in response record")
}
if respN.Seq() < n.Seq() {
return n, nil // response record is older

View File

@ -442,7 +442,7 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s
}
}
if _, ok := seen[node.ID()]; ok {
return nil, fmt.Errorf("duplicate record")
return nil, errors.New("duplicate record")
}
seen[node.ID()] = struct{}{}
return node, nil

View File

@ -367,11 +367,11 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey
// key is part of the ID nonce signature.
var remotePubkey = new(ecdsa.PublicKey)
if err := challenge.Node.Load((*enode.Secp256k1)(remotePubkey)); err != nil {
return nil, nil, fmt.Errorf("can't find secp256k1 key for recipient")
return nil, nil, errors.New("can't find secp256k1 key for recipient")
}
ephkey, err := c.sc.ephemeralKeyGen()
if err != nil {
return nil, nil, fmt.Errorf("can't generate ephemeral key")
return nil, nil, errors.New("can't generate ephemeral key")
}
ephpubkey := EncodePubkey(&ephkey.PublicKey)
auth.pubkey = ephpubkey[:]
@ -395,7 +395,7 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey
// Create session keys.
sec := deriveKeys(sha256.New, ephkey, remotePubkey, c.localnode.ID(), challenge.Node.ID(), cdata)
if sec == nil {
return nil, nil, fmt.Errorf("key derivation failed")
return nil, nil, errors.New("key derivation failed")
}
return auth, sec, err
}

View File

@ -191,7 +191,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry,
func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry, error) {
wantHash, err := b32format.DecodeString(hash)
if err != nil {
return nil, fmt.Errorf("invalid base32 hash")
return nil, errors.New("invalid base32 hash")
}
name := hash + "." + domain
txts, err := c.cfg.Resolver.LookupTXT(ctx, hash+"."+domain)

View File

@ -21,6 +21,7 @@ import (
"crypto/ecdsa"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"strings"
@ -341,7 +342,7 @@ func parseLinkEntry(e string) (entry, error) {
func parseLink(e string) (*linkEntry, error) {
if !strings.HasPrefix(e, linkPrefix) {
return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL")
return nil, errors.New("wrong/missing scheme 'enrtree' in URL")
}
e = e[len(linkPrefix):]

View File

@ -18,7 +18,7 @@ package enode
import (
"crypto/ecdsa"
"fmt"
"errors"
"io"
"github.com/ethereum/go-ethereum/common/math"
@ -67,7 +67,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
if err := r.Load(&entry); err != nil {
return err
} else if len(entry) != 33 {
return fmt.Errorf("invalid public key")
return errors.New("invalid public key")
}
h := sha3.NewLegacyKeccak256()

View File

@ -17,6 +17,7 @@
package nat
import (
"errors"
"fmt"
"net"
"strings"
@ -46,7 +47,7 @@ func (n *pmp) ExternalIP() (net.IP, error) {
func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) {
if lifetime <= 0 {
return 0, fmt.Errorf("lifetime must not be <= 0")
return 0, errors.New("lifetime must not be <= 0")
}
// Note order of port arguments is switched between our
// AddMapping and the client's AddPortMapping.

View File

@ -460,7 +460,7 @@ func startExecNodeStack() (*node.Node, error) {
// decode the config
confEnv := os.Getenv(envNodeConfig)
if confEnv == "" {
return nil, fmt.Errorf("missing " + envNodeConfig)
return nil, errors.New("missing " + envNodeConfig)
}
var conf execNodeConfig
if err := json.Unmarshal([]byte(confEnv), &conf); err != nil {

View File

@ -708,7 +708,7 @@ func formatPrimitiveValue(encType string, encValue interface{}) (string, error)
func (t Types) validate() error {
for typeKey, typeArr := range t {
if len(typeKey) == 0 {
return fmt.Errorf("empty type key")
return errors.New("empty type key")
}
for i, typeObj := range typeArr {
if len(typeObj.Type) == 0 {

View File

@ -556,7 +556,7 @@ func runRandTest(rt randTest) error {
checktr.MustUpdate(it.Key, it.Value)
}
if tr.Hash() != checktr.Hash() {
rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
rt[i].err = errors.New("hash mismatch in opItercheckhash")
}
case opNodeDiff:
var (
@ -594,19 +594,19 @@ func runRandTest(rt randTest) error {
}
}
if len(insertExp) != len(tr.tracer.inserts) {
rt[i].err = fmt.Errorf("insert set mismatch")
rt[i].err = errors.New("insert set mismatch")
}
if len(deleteExp) != len(tr.tracer.deletes) {
rt[i].err = fmt.Errorf("delete set mismatch")
rt[i].err = errors.New("delete set mismatch")
}
for insert := range tr.tracer.inserts {
if _, present := insertExp[insert]; !present {
rt[i].err = fmt.Errorf("missing inserted node")
rt[i].err = errors.New("missing inserted node")
}
}
for del := range tr.tracer.deletes {
if _, present := deleteExp[del]; !present {
rt[i].err = fmt.Errorf("missing deleted node")
rt[i].err = errors.New("missing deleted node")
}
}
}

View File

@ -215,7 +215,7 @@ func (m *meta) encode() []byte {
// decode unpacks the meta object from byte stream.
func (m *meta) decode(blob []byte) error {
if len(blob) < 1 {
return fmt.Errorf("no version tag")
return errors.New("no version tag")
}
switch blob[0] {
case stateHistoryVersion: