Merge branch 'master' into drop-in-plugin
This commit is contained in:
commit
a83925989f
42
.travis.yml
42
.travis.yml
|
@ -120,36 +120,6 @@ jobs:
|
||||||
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||||
|
|
||||||
# This builder does the Linux Azure MIPS xgo uploads
|
|
||||||
- stage: build
|
|
||||||
if: type = push
|
|
||||||
os: linux
|
|
||||||
dist: bionic
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
go: 1.17.x
|
|
||||||
env:
|
|
||||||
- azure-linux-mips
|
|
||||||
- GO111MODULE=on
|
|
||||||
git:
|
|
||||||
submodules: false # avoid cloning ethereum/tests
|
|
||||||
script:
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
|
|
||||||
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
|
|
||||||
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
|
|
||||||
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
|
|
||||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
# This builder does the Android Maven and Azure uploads
|
# This builder does the Android Maven and Azure uploads
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
|
@ -263,3 +233,15 @@ jobs:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||||
|
|
||||||
|
# This builder executes race tests
|
||||||
|
- stage: build
|
||||||
|
if: type = cron
|
||||||
|
os: linux
|
||||||
|
dist: bionic
|
||||||
|
go: 1.17.x
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
script:
|
||||||
|
- go run build/ci.go test -race -coverage $TEST_PACKAGES
|
||||||
|
|
||||||
|
|
98
Makefile
98
Makefile
|
@ -2,11 +2,7 @@
|
||||||
# with Go source code. If you know what GOPATH is then you probably
|
# with Go source code. If you know what GOPATH is then you probably
|
||||||
# don't need to bother with make.
|
# don't need to bother with make.
|
||||||
|
|
||||||
.PHONY: geth android ios geth-cross evm all test clean
|
.PHONY: geth android ios evm all test clean
|
||||||
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
|
|
||||||
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
|
||||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
|
||||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
|
||||||
|
|
||||||
GOBIN = ./build/bin
|
GOBIN = ./build/bin
|
||||||
GO ?= latest
|
GO ?= latest
|
||||||
|
@ -53,95 +49,3 @@ devtools:
|
||||||
env GOBIN= go install ./cmd/abigen
|
env GOBIN= go install ./cmd/abigen
|
||||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||||
|
|
||||||
# Cross Compilation Targets (xgo)
|
|
||||||
|
|
||||||
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
|
||||||
@echo "Full cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-*
|
|
||||||
|
|
||||||
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
|
|
||||||
@echo "Linux cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-*
|
|
||||||
|
|
||||||
geth-linux-386:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
|
||||||
@echo "Linux 386 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
|
||||||
|
|
||||||
geth-linux-amd64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
|
||||||
@echo "Linux amd64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
|
||||||
|
|
||||||
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
|
||||||
@echo "Linux ARM cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
|
||||||
|
|
||||||
geth-linux-arm-5:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
|
||||||
@echo "Linux ARMv5 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
|
||||||
|
|
||||||
geth-linux-arm-6:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
|
||||||
@echo "Linux ARMv6 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
|
||||||
|
|
||||||
geth-linux-arm-7:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
|
||||||
@echo "Linux ARMv7 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
|
||||||
|
|
||||||
geth-linux-arm64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
|
||||||
@echo "Linux ARM64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
|
||||||
|
|
||||||
geth-linux-mips:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPS cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
|
||||||
|
|
||||||
geth-linux-mipsle:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPSle cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
|
||||||
|
|
||||||
geth-linux-mips64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPS64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
|
||||||
|
|
||||||
geth-linux-mips64le:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPS64le cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
|
||||||
|
|
||||||
geth-darwin: geth-darwin-386 geth-darwin-amd64
|
|
||||||
@echo "Darwin cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-darwin-*
|
|
||||||
|
|
||||||
geth-darwin-386:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
|
||||||
@echo "Darwin 386 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
|
||||||
|
|
||||||
geth-darwin-amd64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
|
||||||
@echo "Darwin amd64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
|
||||||
|
|
||||||
geth-windows: geth-windows-386 geth-windows-amd64
|
|
||||||
@echo "Windows cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-windows-*
|
|
||||||
|
|
||||||
geth-windows-386:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
|
||||||
@echo "Windows 386 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
|
||||||
|
|
||||||
geth-windows-amd64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
|
||||||
@echo "Windows amd64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ This command will:
|
||||||
causing it to download more data in exchange for avoiding processing the entire history
|
causing it to download more data in exchange for avoiding processing the entire history
|
||||||
of the Ethereum network, which is very CPU intensive.
|
of the Ethereum network, which is very CPU intensive.
|
||||||
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
|
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
|
||||||
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/en/)
|
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/)
|
||||||
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
||||||
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
|
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
|
||||||
This tool is optional and if you leave it out you can always attach to an already running
|
This tool is optional and if you leave it out you can always attach to an already running
|
||||||
|
|
|
@ -34,6 +34,7 @@ type ABI struct {
|
||||||
Constructor Method
|
Constructor Method
|
||||||
Methods map[string]Method
|
Methods map[string]Method
|
||||||
Events map[string]Event
|
Events map[string]Event
|
||||||
|
Errors map[string]Error
|
||||||
|
|
||||||
// Additional "special" functions introduced in solidity v0.6.0.
|
// Additional "special" functions introduced in solidity v0.6.0.
|
||||||
// It's separated from the original default fallback. Each contract
|
// It's separated from the original default fallback. Each contract
|
||||||
|
@ -157,12 +158,13 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
abi.Methods = make(map[string]Method)
|
abi.Methods = make(map[string]Method)
|
||||||
abi.Events = make(map[string]Event)
|
abi.Events = make(map[string]Event)
|
||||||
|
abi.Errors = make(map[string]Error)
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
switch field.Type {
|
switch field.Type {
|
||||||
case "constructor":
|
case "constructor":
|
||||||
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
|
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
|
||||||
case "function":
|
case "function":
|
||||||
name := abi.overloadedMethodName(field.Name)
|
name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok })
|
||||||
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
|
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
|
||||||
case "fallback":
|
case "fallback":
|
||||||
// New introduced function type in v0.6.0, check more detail
|
// New introduced function type in v0.6.0, check more detail
|
||||||
|
@ -182,8 +184,10 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
|
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
|
||||||
case "event":
|
case "event":
|
||||||
name := abi.overloadedEventName(field.Name)
|
name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok })
|
||||||
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
|
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
|
||||||
|
case "error":
|
||||||
|
abi.Errors[field.Name] = NewError(field.Name, field.Inputs)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
|
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
|
||||||
}
|
}
|
||||||
|
@ -191,36 +195,6 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// overloadedMethodName returns the next available name for a given function.
|
|
||||||
// Needed since solidity allows for function overload.
|
|
||||||
//
|
|
||||||
// e.g. if the abi contains Methods send, send1
|
|
||||||
// overloadedMethodName would return send2 for input send.
|
|
||||||
func (abi *ABI) overloadedMethodName(rawName string) string {
|
|
||||||
name := rawName
|
|
||||||
_, ok := abi.Methods[name]
|
|
||||||
for idx := 0; ok; idx++ {
|
|
||||||
name = fmt.Sprintf("%s%d", rawName, idx)
|
|
||||||
_, ok = abi.Methods[name]
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// overloadedEventName returns the next available name for a given event.
|
|
||||||
// Needed since solidity allows for event overload.
|
|
||||||
//
|
|
||||||
// e.g. if the abi contains events received, received1
|
|
||||||
// overloadedEventName would return received2 for input received.
|
|
||||||
func (abi *ABI) overloadedEventName(rawName string) string {
|
|
||||||
name := rawName
|
|
||||||
_, ok := abi.Events[name]
|
|
||||||
for idx := 0; ok; idx++ {
|
|
||||||
name = fmt.Sprintf("%s%d", rawName, idx)
|
|
||||||
_, ok = abi.Events[name]
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// MethodById looks up a method by the 4-byte id,
|
// MethodById looks up a method by the 4-byte id,
|
||||||
// returns nil if none found.
|
// returns nil if none found.
|
||||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||||
|
@ -277,3 +251,20 @@ func UnpackRevert(data []byte) (string, error) {
|
||||||
}
|
}
|
||||||
return unpacked[0].(string), nil
|
return unpacked[0].(string), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// overloadedName returns the next available name for a given thing.
|
||||||
|
// Needed since solidity allows for overloading.
|
||||||
|
//
|
||||||
|
// e.g. if the abi contains Methods send, send1
|
||||||
|
// overloadedName would return send2 for input send.
|
||||||
|
//
|
||||||
|
// overloadedName works for methods, events and errors.
|
||||||
|
func overloadedName(rawName string, isAvail func(string) bool) string {
|
||||||
|
name := rawName
|
||||||
|
ok := isAvail(name)
|
||||||
|
for idx := 0; ok; idx++ {
|
||||||
|
name = fmt.Sprintf("%s%d", rawName, idx)
|
||||||
|
ok = isAvail(name)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
|
@ -295,6 +295,20 @@ func TestOverloadedMethodSignature(t *testing.T) {
|
||||||
check("bar0", "bar(uint256,uint256)", false)
|
check("bar0", "bar(uint256,uint256)", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCustomErrors(t *testing.T) {
|
||||||
|
json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]`
|
||||||
|
abi, err := JSON(strings.NewReader(json))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
check := func(name string, expect string) {
|
||||||
|
if abi.Errors[name].Sig != expect {
|
||||||
|
t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check("MyError", "MyError(uint256)")
|
||||||
|
}
|
||||||
|
|
||||||
func TestMultiPack(t *testing.T) {
|
func TestMultiPack(t *testing.T) {
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -462,6 +462,9 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
|
||||||
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
||||||
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
||||||
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
||||||
|
if b.pendingBlock.Header().BaseFee != nil {
|
||||||
|
return b.pendingBlock.Header().BaseFee, nil
|
||||||
|
}
|
||||||
return big.NewInt(1), nil
|
return big.NewInt(1), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -916,8 +916,8 @@ func TestSuggestGasPrice(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not get gas price: %v", err)
|
t.Errorf("could not get gas price: %v", err)
|
||||||
}
|
}
|
||||||
if gasPrice.Uint64() != uint64(1) {
|
if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() {
|
||||||
t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64())
|
t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -231,108 +231,158 @@ func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error)
|
||||||
return c.transact(opts, &c.address, nil)
|
return c.transact(opts, &c.address, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// transact executes an actual transaction invocation, first deriving any missing
|
func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Address, input []byte, head *types.Header) (*types.Transaction, error) {
|
||||||
// authorization fields, and then scheduling the transaction for execution.
|
// Normalize value
|
||||||
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Ensure a valid value field and resolve the account nonce
|
|
||||||
value := opts.Value
|
value := opts.Value
|
||||||
if value == nil {
|
if value == nil {
|
||||||
value = new(big.Int)
|
value = new(big.Int)
|
||||||
}
|
}
|
||||||
var nonce uint64
|
// Estimate TipCap
|
||||||
if opts.Nonce == nil {
|
gasTipCap := opts.GasTipCap
|
||||||
nonce, err = c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From)
|
if gasTipCap == nil {
|
||||||
|
tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to retrieve account nonce: %v", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
gasTipCap = tip
|
||||||
nonce = opts.Nonce.Uint64()
|
|
||||||
}
|
}
|
||||||
// Figure out reasonable gas price values
|
// Estimate FeeCap
|
||||||
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) {
|
gasFeeCap := opts.GasFeeCap
|
||||||
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
if gasFeeCap == nil {
|
||||||
|
gasFeeCap = new(big.Int).Add(
|
||||||
|
gasTipCap,
|
||||||
|
new(big.Int).Mul(head.BaseFee, big.NewInt(2)),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
head, err := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil)
|
if gasFeeCap.Cmp(gasTipCap) < 0 {
|
||||||
|
return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap)
|
||||||
|
}
|
||||||
|
// Estimate GasLimit
|
||||||
|
gasLimit := opts.GasLimit
|
||||||
|
if opts.GasLimit == 0 {
|
||||||
|
var err error
|
||||||
|
gasLimit, err = c.estimateGasLimit(opts, contract, input, nil, gasTipCap, gasFeeCap, value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// create the transaction
|
||||||
|
nonce, err := c.getNonce(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if head.BaseFee != nil && opts.GasPrice == nil {
|
baseTx := &types.DynamicFeeTx{
|
||||||
if opts.GasTipCap == nil {
|
To: contract,
|
||||||
tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context))
|
Nonce: nonce,
|
||||||
if err != nil {
|
GasFeeCap: gasFeeCap,
|
||||||
return nil, err
|
GasTipCap: gasTipCap,
|
||||||
}
|
Gas: gasLimit,
|
||||||
opts.GasTipCap = tip
|
Value: value,
|
||||||
}
|
Data: input,
|
||||||
if opts.GasFeeCap == nil {
|
|
||||||
gasFeeCap := new(big.Int).Add(
|
|
||||||
opts.GasTipCap,
|
|
||||||
new(big.Int).Mul(head.BaseFee, big.NewInt(2)),
|
|
||||||
)
|
|
||||||
opts.GasFeeCap = gasFeeCap
|
|
||||||
}
|
|
||||||
if opts.GasFeeCap.Cmp(opts.GasTipCap) < 0 {
|
|
||||||
return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", opts.GasFeeCap, opts.GasTipCap)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if opts.GasFeeCap != nil || opts.GasTipCap != nil {
|
|
||||||
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
|
|
||||||
}
|
|
||||||
if opts.GasPrice == nil {
|
|
||||||
price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opts.GasPrice = price
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
gasLimit := opts.GasLimit
|
return types.NewTx(baseTx), nil
|
||||||
if gasLimit == 0 {
|
}
|
||||||
// Gas estimation cannot succeed without code for method invocations
|
|
||||||
if contract != nil {
|
func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
|
||||||
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
|
if opts.GasFeeCap != nil || opts.GasTipCap != nil {
|
||||||
return nil, err
|
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
|
||||||
} else if len(code) == 0 {
|
}
|
||||||
return nil, ErrNoCode
|
// Normalize value
|
||||||
}
|
value := opts.Value
|
||||||
}
|
if value == nil {
|
||||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
value = new(big.Int)
|
||||||
msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: opts.GasPrice, GasTipCap: opts.GasTipCap, GasFeeCap: opts.GasFeeCap, Value: value, Data: input}
|
}
|
||||||
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg)
|
// Estimate GasPrice
|
||||||
|
gasPrice := opts.GasPrice
|
||||||
|
if gasPrice == nil {
|
||||||
|
price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to estimate gas needed: %v", err)
|
return nil, err
|
||||||
|
}
|
||||||
|
gasPrice = price
|
||||||
|
}
|
||||||
|
// Estimate GasLimit
|
||||||
|
gasLimit := opts.GasLimit
|
||||||
|
if opts.GasLimit == 0 {
|
||||||
|
var err error
|
||||||
|
gasLimit, err = c.estimateGasLimit(opts, contract, input, gasPrice, nil, nil, value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Create the transaction, sign it and schedule it for execution
|
// create the transaction
|
||||||
var rawTx *types.Transaction
|
nonce, err := c.getNonce(opts)
|
||||||
if opts.GasFeeCap == nil {
|
if err != nil {
|
||||||
baseTx := &types.LegacyTx{
|
return nil, err
|
||||||
Nonce: nonce,
|
}
|
||||||
GasPrice: opts.GasPrice,
|
baseTx := &types.LegacyTx{
|
||||||
Gas: gasLimit,
|
To: contract,
|
||||||
Value: value,
|
Nonce: nonce,
|
||||||
Data: input,
|
GasPrice: gasPrice,
|
||||||
|
Gas: gasLimit,
|
||||||
|
Value: value,
|
||||||
|
Data: input,
|
||||||
|
}
|
||||||
|
return types.NewTx(baseTx), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BoundContract) estimateGasLimit(opts *TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
|
||||||
|
if contract != nil {
|
||||||
|
// Gas estimation cannot succeed without code for method invocations.
|
||||||
|
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else if len(code) == 0 {
|
||||||
|
return 0, ErrNoCode
|
||||||
}
|
}
|
||||||
if contract != nil {
|
}
|
||||||
baseTx.To = &c.address
|
msg := ethereum.CallMsg{
|
||||||
}
|
From: opts.From,
|
||||||
rawTx = types.NewTx(baseTx)
|
To: contract,
|
||||||
|
GasPrice: gasPrice,
|
||||||
|
GasTipCap: gasTipCap,
|
||||||
|
GasFeeCap: gasFeeCap,
|
||||||
|
Value: value,
|
||||||
|
Data: input,
|
||||||
|
}
|
||||||
|
return c.transactor.EstimateGas(ensureContext(opts.Context), msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BoundContract) getNonce(opts *TransactOpts) (uint64, error) {
|
||||||
|
if opts.Nonce == nil {
|
||||||
|
return c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From)
|
||||||
} else {
|
} else {
|
||||||
baseTx := &types.DynamicFeeTx{
|
return opts.Nonce.Uint64(), nil
|
||||||
Nonce: nonce,
|
|
||||||
GasFeeCap: opts.GasFeeCap,
|
|
||||||
GasTipCap: opts.GasTipCap,
|
|
||||||
Gas: gasLimit,
|
|
||||||
Value: value,
|
|
||||||
Data: input,
|
|
||||||
}
|
|
||||||
if contract != nil {
|
|
||||||
baseTx.To = &c.address
|
|
||||||
}
|
|
||||||
rawTx = types.NewTx(baseTx)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// transact executes an actual transaction invocation, first deriving any missing
|
||||||
|
// authorization fields, and then scheduling the transaction for execution.
|
||||||
|
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
|
||||||
|
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) {
|
||||||
|
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
||||||
|
}
|
||||||
|
// Create the transaction
|
||||||
|
var (
|
||||||
|
rawTx *types.Transaction
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if opts.GasPrice != nil {
|
||||||
|
rawTx, err = c.createLegacyTx(opts, contract, input)
|
||||||
|
} else {
|
||||||
|
// Only query for basefee if gasPrice not specified
|
||||||
|
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
|
||||||
|
return nil, errHead
|
||||||
|
} else if head.BaseFee != nil {
|
||||||
|
rawTx, err = c.createDynamicTx(opts, contract, input, head)
|
||||||
|
} else {
|
||||||
|
// Chain is not London ready -> use legacy transaction
|
||||||
|
rawTx, err = c.createLegacyTx(opts, contract, input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Sign the transaction and schedule it for execution
|
||||||
if opts.Signer == nil {
|
if opts.Signer == nil {
|
||||||
return nil, errors.New("no signer to authorize the transaction with")
|
return nil, errors.New("no signer to authorize the transaction with")
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,8 +31,49 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func mockSign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil }
|
||||||
|
|
||||||
|
type mockTransactor struct {
|
||||||
|
baseFee *big.Int
|
||||||
|
gasTipCap *big.Int
|
||||||
|
gasPrice *big.Int
|
||||||
|
suggestGasTipCapCalled bool
|
||||||
|
suggestGasPriceCalled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
|
||||||
|
return &types.Header{BaseFee: mt.baseFee}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) {
|
||||||
|
return []byte{1}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
||||||
|
mt.suggestGasPriceCalled = true
|
||||||
|
return mt.gasPrice, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
|
||||||
|
mt.suggestGasTipCapCalled = true
|
||||||
|
return mt.gasTipCap, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type mockCaller struct {
|
type mockCaller struct {
|
||||||
codeAtBlockNumber *big.Int
|
codeAtBlockNumber *big.Int
|
||||||
callContractBlockNumber *big.Int
|
callContractBlockNumber *big.Int
|
||||||
|
@ -226,6 +267,51 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
||||||
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTransactGasFee(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
// GasTipCap and GasFeeCap
|
||||||
|
// When opts.GasTipCap and opts.GasFeeCap are nil
|
||||||
|
mt := &mockTransactor{baseFee: big.NewInt(100), gasTipCap: big.NewInt(5)}
|
||||||
|
bc := bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil)
|
||||||
|
opts := &bind.TransactOpts{Signer: mockSign}
|
||||||
|
tx, err := bc.Transact(opts, "")
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(big.NewInt(5), tx.GasTipCap())
|
||||||
|
assert.Equal(big.NewInt(205), tx.GasFeeCap())
|
||||||
|
assert.Nil(opts.GasTipCap)
|
||||||
|
assert.Nil(opts.GasFeeCap)
|
||||||
|
assert.True(mt.suggestGasTipCapCalled)
|
||||||
|
|
||||||
|
// Second call to Transact should use latest suggested GasTipCap
|
||||||
|
mt.gasTipCap = big.NewInt(6)
|
||||||
|
mt.suggestGasTipCapCalled = false
|
||||||
|
tx, err = bc.Transact(opts, "")
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(big.NewInt(6), tx.GasTipCap())
|
||||||
|
assert.Equal(big.NewInt(206), tx.GasFeeCap())
|
||||||
|
assert.True(mt.suggestGasTipCapCalled)
|
||||||
|
|
||||||
|
// GasPrice
|
||||||
|
// When opts.GasPrice is nil
|
||||||
|
mt = &mockTransactor{gasPrice: big.NewInt(5)}
|
||||||
|
bc = bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil)
|
||||||
|
opts = &bind.TransactOpts{Signer: mockSign}
|
||||||
|
tx, err = bc.Transact(opts, "")
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(big.NewInt(5), tx.GasPrice())
|
||||||
|
assert.Nil(opts.GasPrice)
|
||||||
|
assert.True(mt.suggestGasPriceCalled)
|
||||||
|
|
||||||
|
// Second call to Transact should use latest suggested GasPrice
|
||||||
|
mt.gasPrice = big.NewInt(6)
|
||||||
|
mt.suggestGasPriceCalled = false
|
||||||
|
tx, err = bc.Transact(opts, "")
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(big.NewInt(6), tx.GasPrice())
|
||||||
|
assert.True(mt.suggestGasPriceCalled)
|
||||||
|
}
|
||||||
|
|
||||||
func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) {
|
func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) {
|
||||||
received := make(map[string]interface{})
|
received := make(map[string]interface{})
|
||||||
if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil {
|
if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil {
|
||||||
|
|
|
@ -1850,6 +1850,61 @@ var bindTests = []struct {
|
||||||
if count != 1 {
|
if count != 1 {
|
||||||
t.Fatal("Unexpected contract event number")
|
t.Fatal("Unexpected contract event number")
|
||||||
}
|
}
|
||||||
|
`,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
// Test errors introduced in v0.8.4
|
||||||
|
{
|
||||||
|
`NewErrors`,
|
||||||
|
`
|
||||||
|
pragma solidity >0.8.4;
|
||||||
|
|
||||||
|
contract NewErrors {
|
||||||
|
error MyError(uint256);
|
||||||
|
error MyError1(uint256);
|
||||||
|
error MyError2(uint256, uint256);
|
||||||
|
error MyError3(uint256 a, uint256 b, uint256 c);
|
||||||
|
function Error() public pure {
|
||||||
|
revert MyError3(1,2,3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
[]string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"},
|
||||||
|
[]string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`},
|
||||||
|
`
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
|
`,
|
||||||
|
`
|
||||||
|
var (
|
||||||
|
key, _ = crypto.GenerateKey()
|
||||||
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
|
)
|
||||||
|
defer sim.Close()
|
||||||
|
|
||||||
|
_, tx, contract, err := DeployNewErrors(user, sim)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sim.Commit()
|
||||||
|
_, err = bind.WaitDeployed(nil, sim, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err := contract.Error(new(bind.CallOpts)); err == nil {
|
||||||
|
t.Fatalf("expected contract to throw error")
|
||||||
|
}
|
||||||
|
// TODO (MariusVanDerWijden unpack error using abigen
|
||||||
|
// once that is implemented
|
||||||
`,
|
`,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2021 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -17,66 +17,75 @@
|
||||||
package abi
|
package abi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type Error struct {
|
||||||
errBadBool = errors.New("abi: improperly encoded boolean value")
|
Name string
|
||||||
)
|
Inputs Arguments
|
||||||
|
str string
|
||||||
// formatSliceString formats the reflection kind with the given slice size
|
// Sig contains the string signature according to the ABI spec.
|
||||||
// and returns a formatted string representation.
|
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
|
||||||
func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
// Please note that "int" is substitute for its canonical representation "int256"
|
||||||
if sliceSize == -1 {
|
Sig string
|
||||||
return fmt.Sprintf("[]%v", kind)
|
// ID returns the canonical representation of the event's signature used by the
|
||||||
}
|
// abi definition to identify event names and types.
|
||||||
return fmt.Sprintf("[%d]%v", sliceSize, kind)
|
ID common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// sliceTypeCheck checks that the given slice can by assigned to the reflection
|
func NewError(name string, inputs Arguments) Error {
|
||||||
// type in t.
|
// sanitize inputs to remove inputs without names
|
||||||
func sliceTypeCheck(t Type, val reflect.Value) error {
|
// and precompute string and sig representation.
|
||||||
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
names := make([]string, len(inputs))
|
||||||
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type())
|
types := make([]string, len(inputs))
|
||||||
}
|
for i, input := range inputs {
|
||||||
|
if input.Name == "" {
|
||||||
if t.T == ArrayTy && val.Len() != t.Size {
|
inputs[i] = Argument{
|
||||||
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
Name: fmt.Sprintf("arg%d", i),
|
||||||
}
|
Indexed: input.Indexed,
|
||||||
|
Type: input.Type,
|
||||||
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
|
}
|
||||||
if val.Len() > 0 {
|
} else {
|
||||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
inputs[i] = input
|
||||||
}
|
}
|
||||||
|
// string representation
|
||||||
|
names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
|
||||||
|
if input.Indexed {
|
||||||
|
names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
|
||||||
|
}
|
||||||
|
// sig representation
|
||||||
|
types[i] = input.Type.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
|
str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", "))
|
||||||
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
|
sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ","))
|
||||||
|
id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
|
||||||
|
|
||||||
|
return Error{
|
||||||
|
Name: name,
|
||||||
|
Inputs: inputs,
|
||||||
|
str: str,
|
||||||
|
Sig: sig,
|
||||||
|
ID: id,
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// typeCheck checks that the given reflection value can be assigned to the reflection
|
func (e *Error) String() string {
|
||||||
// type in t.
|
return e.str
|
||||||
func typeCheck(t Type, value reflect.Value) error {
|
|
||||||
if t.T == SliceTy || t.T == ArrayTy {
|
|
||||||
return sliceTypeCheck(t, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check base type validity. Element types will be checked later on.
|
|
||||||
if t.GetType().Kind() != value.Kind() {
|
|
||||||
return typeErr(t.GetType().Kind(), value.Kind())
|
|
||||||
} else if t.T == FixedBytesTy && t.Size != value.Len() {
|
|
||||||
return typeErr(t.GetType(), value.Type())
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// typeErr returns a formatted type casting error.
|
func (e *Error) Unpack(data []byte) (interface{}, error) {
|
||||||
func typeErr(expected, got interface{}) error {
|
if len(data) < 4 {
|
||||||
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
|
return "", errors.New("invalid data for unpacking")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(data[:4], e.ID[:4]) {
|
||||||
|
return "", errors.New("invalid data for unpacking")
|
||||||
|
}
|
||||||
|
return e.Inputs.Unpack(data[4:])
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errBadBool = errors.New("abi: improperly encoded boolean value")
|
||||||
|
)
|
||||||
|
|
||||||
|
// formatSliceString formats the reflection kind with the given slice size
|
||||||
|
// and returns a formatted string representation.
|
||||||
|
func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||||
|
if sliceSize == -1 {
|
||||||
|
return fmt.Sprintf("[]%v", kind)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("[%d]%v", sliceSize, kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sliceTypeCheck checks that the given slice can by assigned to the reflection
|
||||||
|
// type in t.
|
||||||
|
func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||||
|
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
||||||
|
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.T == ArrayTy && val.Len() != t.Size {
|
||||||
|
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
|
||||||
|
if val.Len() > 0 {
|
||||||
|
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
|
||||||
|
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeCheck checks that the given reflection value can be assigned to the reflection
|
||||||
|
// type in t.
|
||||||
|
func typeCheck(t Type, value reflect.Value) error {
|
||||||
|
if t.T == SliceTy || t.T == ArrayTy {
|
||||||
|
return sliceTypeCheck(t, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check base type validity. Element types will be checked later on.
|
||||||
|
if t.GetType().Kind() != value.Kind() {
|
||||||
|
return typeErr(t.GetType().Kind(), value.Kind())
|
||||||
|
} else if t.T == FixedBytesTy && t.Size != value.Len() {
|
||||||
|
return typeErr(t.GetType(), value.Type())
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeErr returns a formatted type casting error.
|
||||||
|
func typeErr(expected, got interface{}) error {
|
||||||
|
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
|
||||||
|
}
|
|
@ -123,15 +123,8 @@ func set(dst, src reflect.Value) error {
|
||||||
func setSlice(dst, src reflect.Value) error {
|
func setSlice(dst, src reflect.Value) error {
|
||||||
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
|
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
|
||||||
for i := 0; i < src.Len(); i++ {
|
for i := 0; i < src.Len(); i++ {
|
||||||
if src.Index(i).Kind() == reflect.Struct {
|
if err := set(slice.Index(i), src.Index(i)); err != nil {
|
||||||
if err := set(slice.Index(i), src.Index(i)); err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// e.g. [][32]uint8 to []common.Hash
|
|
||||||
if err := set(slice.Index(i), src.Index(i)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if dst.CanSet() {
|
if dst.CanSet() {
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
3a70e5055509f347c0fb831ca07a2bf3b531068f349b14a3c652e9b5b67beb5d go1.17.src.tar.gz
|
2255eb3e4e824dd7d5fcdc2e7f84534371c186312e546fb1086a34c17752f431 go1.17.2.src.tar.gz
|
||||||
355bd544ce08d7d484d9d7de05a71b5c6f5bc10aa4b316688c2192aeb3dacfd1 go1.17.darwin-amd64.tar.gz
|
7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94 go1.17.2.darwin-amd64.tar.gz
|
||||||
da4e3e3c194bf9eed081de8842a157120ef44a7a8d7c820201adae7b0e28b20b go1.17.darwin-arm64.tar.gz
|
ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904 go1.17.2.darwin-arm64.tar.gz
|
||||||
6819a7a11b8351d5d5768f2fff666abde97577602394f132cb7f85b3a7151f05 go1.17.freebsd-386.tar.gz
|
8cea5b8d1f8e8cbb58069bfed58954c71c5b1aca2f3c857765dae83bf724d0d7 go1.17.2.freebsd-386.tar.gz
|
||||||
15c184c83d99441d719da201b26256455eee85a808747c404b4183e9aa6c64b4 go1.17.freebsd-amd64.tar.gz
|
c96e57218fb03e74d683ad63b1684d44c89d5e5b994f36102b33dce21b58499a go1.17.2.freebsd-amd64.tar.gz
|
||||||
c19e3227a6ac6329db91d1af77bbf239ccd760a259c16e6b9c932d527ff14848 go1.17.linux-386.tar.gz
|
8617f2e40d51076983502894181ae639d1d8101bfbc4d7463a2b442f239f5596 go1.17.2.linux-386.tar.gz
|
||||||
6bf89fc4f5ad763871cf7eac80a2d594492de7a818303283f1366a7f6a30372d go1.17.linux-amd64.tar.gz
|
f242a9db6a0ad1846de7b6d94d507915d14062660616a61ef7c808a76e4f1676 go1.17.2.linux-amd64.tar.gz
|
||||||
01a9af009ada22122d3fcb9816049c1d21842524b38ef5d5a0e2ee4b26d7c3e7 go1.17.linux-arm64.tar.gz
|
a5a43c9cdabdb9f371d56951b14290eba8ce2f9b0db48fb5fc657943984fd4fc go1.17.2.linux-arm64.tar.gz
|
||||||
ae89d33f4e4acc222bdb04331933d5ece4ae71039812f6ccd7493cb3e8ddfb4e go1.17.linux-armv6l.tar.gz
|
04d16105008230a9763005be05606f7eb1c683a3dbf0fbfed4034b23889cb7f2 go1.17.2.linux-armv6l.tar.gz
|
||||||
ee84350114d532bf15f096198c675aafae9ff091dc4cc69eb49e1817ff94dbd7 go1.17.linux-ppc64le.tar.gz
|
12e2dc7e0ffeebe77083f267ef6705fec1621cdf2ed6489b3af04a13597ed68d go1.17.2.linux-ppc64le.tar.gz
|
||||||
a50aaecf054f393575f969a9105d5c6864dd91afc5287d772449033fbafcf7e3 go1.17.linux-s390x.tar.gz
|
c4b2349a8d11350ca038b8c57f3cc58dc0b31284bcbed4f7fca39aeed28b4a51 go1.17.2.linux-s390x.tar.gz
|
||||||
c5afdd2ea4969f2b44637e913b04f7c15265d7beb60924a28063722670a52feb go1.17.windows-386.zip
|
8a85257a351996fdf045fe95ed5fdd6917dd48636d562dd11dedf193005a53e0 go1.17.2.windows-386.zip
|
||||||
2a18bd65583e221be8b9b7c2fbe3696c40f6e27c2df689bbdcc939d49651d151 go1.17.windows-amd64.zip
|
fa6da0b829a66f5fab7e4e312fd6aa1b2d8f045c7ecee83b3d00f6fe5306759a go1.17.2.windows-amd64.zip
|
||||||
5256f92f643d9022394ddc84de5c74fe8660c2151daaa199b12e60e542d694ae go1.17.windows-arm64.zip
|
00575c85dc7a129ba892685a456b27a3f3670f71c8bfde1c5ad151f771d55df7 go1.17.2.windows-arm64.zip
|
||||||
|
|
||||||
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
|
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
|
||||||
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
|
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
|
||||||
|
|
57
build/ci.go
57
build/ci.go
|
@ -14,6 +14,7 @@
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build none
|
||||||
// +build none
|
// +build none
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -32,7 +33,6 @@ Available commands are:
|
||||||
nsis -- creates a Windows NSIS installer
|
nsis -- creates a Windows NSIS installer
|
||||||
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
||||||
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
||||||
xgo [ -alltools ] [ options ] -- cross builds according to options
|
|
||||||
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
||||||
|
|
||||||
For all commands, -n prevents execution of external programs (dry run mode).
|
For all commands, -n prevents execution of external programs (dry run mode).
|
||||||
|
@ -147,7 +147,7 @@ var (
|
||||||
// This is the version of go that will be downloaded by
|
// This is the version of go that will be downloaded by
|
||||||
//
|
//
|
||||||
// go run ci.go install -dlgo
|
// go run ci.go install -dlgo
|
||||||
dlgoVersion = "1.17"
|
dlgoVersion = "1.17.2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
|
@ -187,8 +187,6 @@ func main() {
|
||||||
doAndroidArchive(os.Args[2:])
|
doAndroidArchive(os.Args[2:])
|
||||||
case "xcode":
|
case "xcode":
|
||||||
doXCodeFramework(os.Args[2:])
|
doXCodeFramework(os.Args[2:])
|
||||||
case "xgo":
|
|
||||||
doXgo(os.Args[2:])
|
|
||||||
case "purge":
|
case "purge":
|
||||||
doPurge(os.Args[2:])
|
doPurge(os.Args[2:])
|
||||||
default:
|
default:
|
||||||
|
@ -259,6 +257,11 @@ func buildFlags(env build.Environment) (flags []string) {
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
ld = append(ld, "-s")
|
ld = append(ld, "-s")
|
||||||
}
|
}
|
||||||
|
// Enforce the stacksize to 8M, which is the case on most platforms apart from
|
||||||
|
// alpine Linux.
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000")
|
||||||
|
}
|
||||||
if len(ld) > 0 {
|
if len(ld) > 0 {
|
||||||
flags = append(flags, "-ldflags", strings.Join(ld, " "))
|
flags = append(flags, "-ldflags", strings.Join(ld, " "))
|
||||||
}
|
}
|
||||||
|
@ -276,6 +279,7 @@ func doTest(cmdline []string) {
|
||||||
cc = flag.String("cc", "", "Sets C compiler binary")
|
cc = flag.String("cc", "", "Sets C compiler binary")
|
||||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||||
verbose = flag.Bool("v", false, "Whether to log verbosely")
|
verbose = flag.Bool("v", false, "Whether to log verbosely")
|
||||||
|
race = flag.Bool("race", false, "Execute the race detector")
|
||||||
)
|
)
|
||||||
flag.CommandLine.Parse(cmdline)
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
|
||||||
|
@ -296,6 +300,9 @@ func doTest(cmdline []string) {
|
||||||
if *verbose {
|
if *verbose {
|
||||||
gotest.Args = append(gotest.Args, "-v")
|
gotest.Args = append(gotest.Args, "-v")
|
||||||
}
|
}
|
||||||
|
if *race {
|
||||||
|
gotest.Args = append(gotest.Args, "-race")
|
||||||
|
}
|
||||||
|
|
||||||
packages := []string{"./..."}
|
packages := []string{"./..."}
|
||||||
if len(flag.CommandLine.Args()) > 0 {
|
if len(flag.CommandLine.Args()) > 0 {
|
||||||
|
@ -1199,48 +1206,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cross compilation
|
|
||||||
|
|
||||||
func doXgo(cmdline []string) {
|
|
||||||
var (
|
|
||||||
alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`)
|
|
||||||
)
|
|
||||||
flag.CommandLine.Parse(cmdline)
|
|
||||||
env := build.Env()
|
|
||||||
var tc build.GoToolchain
|
|
||||||
|
|
||||||
// Make sure xgo is available for cross compilation
|
|
||||||
build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest"))
|
|
||||||
|
|
||||||
// If all tools building is requested, build everything the builder wants
|
|
||||||
args := append(buildFlags(env), flag.Args()...)
|
|
||||||
|
|
||||||
if *alltools {
|
|
||||||
args = append(args, []string{"--dest", GOBIN}...)
|
|
||||||
for _, res := range allToolsArchiveFiles {
|
|
||||||
if strings.HasPrefix(res, GOBIN) {
|
|
||||||
// Binary tool found, cross build it explicitly
|
|
||||||
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
|
|
||||||
build.MustRun(xgoTool(args))
|
|
||||||
args = args[:len(args)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise execute the explicit cross compilation
|
|
||||||
path := args[len(args)-1]
|
|
||||||
args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...)
|
|
||||||
build.MustRun(xgoTool(args))
|
|
||||||
}
|
|
||||||
|
|
||||||
func xgoTool(args []string) *exec.Cmd {
|
|
||||||
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
|
||||||
cmd.Env = os.Environ()
|
|
||||||
cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...)
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Binary distribution cleanups
|
// Binary distribution cleanups
|
||||||
|
|
||||||
func doPurge(cmdline []string) {
|
func doPurge(cmdline []string) {
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
@ -35,17 +36,19 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type result struct {
|
type result struct {
|
||||||
Error error
|
Error error
|
||||||
Address common.Address
|
Address common.Address
|
||||||
Hash common.Hash
|
Hash common.Hash
|
||||||
|
IntrinsicGas uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals as JSON with a hash.
|
// MarshalJSON marshals as JSON with a hash.
|
||||||
func (r *result) MarshalJSON() ([]byte, error) {
|
func (r *result) MarshalJSON() ([]byte, error) {
|
||||||
type xx struct {
|
type xx struct {
|
||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"`
|
||||||
Address *common.Address `json:"address,omitempty"`
|
Address *common.Address `json:"address,omitempty"`
|
||||||
Hash *common.Hash `json:"hash,omitempty"`
|
Hash *common.Hash `json:"hash,omitempty"`
|
||||||
|
IntrinsicGas uint64 `json:"intrinsicGas,omitempty"`
|
||||||
}
|
}
|
||||||
var out xx
|
var out xx
|
||||||
if r.Error != nil {
|
if r.Error != nil {
|
||||||
|
@ -57,6 +60,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
|
||||||
if r.Hash != (common.Hash{}) {
|
if r.Hash != (common.Hash{}) {
|
||||||
out.Hash = &r.Hash
|
out.Hash = &r.Hash
|
||||||
}
|
}
|
||||||
|
out.IntrinsicGas = r.IntrinsicGas
|
||||||
return json.Marshal(out)
|
return json.Marshal(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,18 +121,55 @@ func Transaction(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
var results []result
|
var results []result
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
|
if err := it.Err(); err != nil {
|
||||||
|
return NewError(ErrorIO, err)
|
||||||
|
}
|
||||||
var tx types.Transaction
|
var tx types.Transaction
|
||||||
err := rlp.DecodeBytes(it.Value(), &tx)
|
err := rlp.DecodeBytes(it.Value(), &tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
results = append(results, result{Error: err})
|
results = append(results, result{Error: err})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sender, err := types.Sender(signer, &tx)
|
r := result{Hash: tx.Hash()}
|
||||||
if err != nil {
|
if sender, err := types.Sender(signer, &tx); err != nil {
|
||||||
results = append(results, result{Error: err})
|
r.Error = err
|
||||||
|
results = append(results, r)
|
||||||
continue
|
continue
|
||||||
|
} else {
|
||||||
|
r.Address = sender
|
||||||
}
|
}
|
||||||
results = append(results, result{Address: sender, Hash: tx.Hash()})
|
// Check intrinsic gas
|
||||||
|
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
|
||||||
|
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil {
|
||||||
|
r.Error = err
|
||||||
|
results = append(results, r)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
r.IntrinsicGas = gas
|
||||||
|
if tx.Gas() < gas {
|
||||||
|
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
|
||||||
|
results = append(results, r)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Validate <256bit fields
|
||||||
|
switch {
|
||||||
|
case tx.Value().BitLen() > 256:
|
||||||
|
r.Error = errors.New("value exceeds 256 bits")
|
||||||
|
case tx.GasPrice().BitLen() > 256:
|
||||||
|
r.Error = errors.New("gasPrice exceeds 256 bits")
|
||||||
|
case tx.GasTipCap().BitLen() > 256:
|
||||||
|
r.Error = errors.New("maxPriorityFeePerGas exceeds 256 bits")
|
||||||
|
case tx.GasFeeCap().BitLen() > 256:
|
||||||
|
r.Error = errors.New("maxFeePerGas exceeds 256 bits")
|
||||||
|
case tx.GasFeeCap().Cmp(tx.GasTipCap()) < 0:
|
||||||
|
r.Error = errors.New("maxFeePerGas < maxPriorityFeePerGas")
|
||||||
|
case new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
|
||||||
|
r.Error = errors.New("gas * gasPrice exceeds 256 bits")
|
||||||
|
case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
|
||||||
|
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
|
||||||
|
}
|
||||||
|
results = append(results, r)
|
||||||
}
|
}
|
||||||
out, err := json.MarshalIndent(results, "", " ")
|
out, err := json.MarshalIndent(results, "", " ")
|
||||||
fmt.Println(string(out))
|
fmt.Println(string(out))
|
||||||
|
|
|
@ -419,7 +419,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
}
|
}
|
||||||
os.Stdout.Write(b)
|
os.Stdout.Write(b)
|
||||||
os.Stdout.Write([]byte("\n"))
|
os.Stdout.WriteString("\n")
|
||||||
}
|
}
|
||||||
if len(stdErrObject) > 0 {
|
if len(stdErrObject) > 0 {
|
||||||
b, err := json.MarshalIndent(stdErrObject, "", " ")
|
b, err := json.MarshalIndent(stdErrObject, "", " ")
|
||||||
|
@ -427,7 +427,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
}
|
}
|
||||||
os.Stderr.Write(b)
|
os.Stderr.Write(b)
|
||||||
os.Stderr.Write([]byte("\n"))
|
os.Stderr.WriteString("\n")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/reexec"
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
|
||||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -170,13 +171,45 @@ func TestT8n(t *testing.T) {
|
||||||
output: t8nOutput{result: true},
|
output: t8nOutput{result: true},
|
||||||
expOut: "exp2.json",
|
expOut: "exp2.json",
|
||||||
},
|
},
|
||||||
|
{ // Difficulty calculation - with uncles + Berlin
|
||||||
|
base: "./testdata/14",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.uncles.json", "Berlin", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp_berlin.json",
|
||||||
|
},
|
||||||
|
{ // Difficulty calculation on arrow glacier
|
||||||
|
base: "./testdata/19",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "London", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp_london.json",
|
||||||
|
},
|
||||||
|
{ // Difficulty calculation on arrow glacier
|
||||||
|
base: "./testdata/19",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "ArrowGlacier", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp_arrowglacier.json",
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
|
|
||||||
args := []string{"t8n"}
|
args := []string{"t8n"}
|
||||||
args = append(args, tc.output.get()...)
|
args = append(args, tc.output.get()...)
|
||||||
args = append(args, tc.input.get(tc.base)...)
|
args = append(args, tc.input.get(tc.base)...)
|
||||||
|
var qArgs []string // quoted args for debugging purposes
|
||||||
|
for _, arg := range args {
|
||||||
|
if len(arg) == 0 {
|
||||||
|
qArgs = append(qArgs, `""`)
|
||||||
|
} else {
|
||||||
|
qArgs = append(qArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tt.Logf("args: %v\n", strings.Join(qArgs, " "))
|
||||||
tt.Run("evm-test", args...)
|
tt.Run("evm-test", args...)
|
||||||
tt.Logf("args: %v\n", strings.Join(args, " "))
|
|
||||||
// Compare the expected output, if provided
|
// Compare the expected output, if provided
|
||||||
if tc.expOut != "" {
|
if tc.expOut != "" {
|
||||||
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
||||||
|
@ -233,7 +266,7 @@ func TestT9n(t *testing.T) {
|
||||||
},
|
},
|
||||||
expOut: "exp.json",
|
expOut: "exp.json",
|
||||||
},
|
},
|
||||||
{ // London txs on homestead
|
{ // London txs on London
|
||||||
base: "./testdata/15",
|
base: "./testdata/15",
|
||||||
input: t9nInput{
|
input: t9nInput{
|
||||||
inTxs: "signed_txs.rlp",
|
inTxs: "signed_txs.rlp",
|
||||||
|
@ -249,6 +282,30 @@ func TestT9n(t *testing.T) {
|
||||||
},
|
},
|
||||||
expOut: "exp3.json",
|
expOut: "exp3.json",
|
||||||
},
|
},
|
||||||
|
{ // Transactions with too low gas
|
||||||
|
base: "./testdata/16",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "signed_txs.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // Transactions with value exceeding 256 bits
|
||||||
|
base: "./testdata/17",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "signed_txs.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // Invalid RLP
|
||||||
|
base: "./testdata/18",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "invalid.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expExitCode: t8ntool.ErrorIO,
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
|
|
||||||
args := []string{"t9n"}
|
args := []string{"t9n"}
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [],
|
||||||
|
"currentDifficulty": "0x1ff9000000000"
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,8 +1,10 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"error": "transaction type not supported"
|
"error": "transaction type not supported",
|
||||||
|
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "transaction type not supported"
|
"error": "transaction type not supported",
|
||||||
|
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
|
||||||
}
|
}
|
||||||
]
|
]
|
|
@ -1,10 +1,12 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
||||||
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
|
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||||
|
"intrinsicGas": 21000
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
||||||
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
|
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||||
|
"intrinsicGas": 21000
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6",
|
||||||
|
"intrinsicGas": 21000
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "intrinsic gas too low: have 82, want 21000",
|
||||||
|
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b",
|
||||||
|
"intrinsicGas": 21000
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1 @@
|
||||||
|
"0xf8cab86401f8610180018252089411111111111111111111111111111111111111112080c001a0937f65ef1deece46c473b99962678fb7c38425cf303d1e8fa9717eb4b9d012b5a01940c5a5647c4940217ffde1051a5fd92ec8551e275c1787f81f50a2ad84de43b86201f85f018001529411111111111111111111111111111111111111112080c001a0241c3aec732205542a87fef8c76346741e85480bce5a42d05a9a73dac892f84ca04f52e2dfce57f3a02ed10e085e1a154edf38a726da34127c85fc53b4921759c8"
|
|
@ -0,0 +1,34 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"input" : "0x",
|
||||||
|
"gas" : "0x5208",
|
||||||
|
"nonce" : "0x0",
|
||||||
|
"to" : "0x1111111111111111111111111111111111111111",
|
||||||
|
"value" : "0x20",
|
||||||
|
"v" : "0x0",
|
||||||
|
"r" : "0x0",
|
||||||
|
"s" : "0x0",
|
||||||
|
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
|
||||||
|
"chainId" : "0x1",
|
||||||
|
"type" : "0x1",
|
||||||
|
"gasPrice": "0x1",
|
||||||
|
"accessList" : [
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"input" : "0x",
|
||||||
|
"gas" : "0x52",
|
||||||
|
"nonce" : "0x0",
|
||||||
|
"to" : "0x1111111111111111111111111111111111111111",
|
||||||
|
"value" : "0x20",
|
||||||
|
"v" : "0x0",
|
||||||
|
"r" : "0x0",
|
||||||
|
"s" : "0x0",
|
||||||
|
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
|
||||||
|
"chainId" : "0x1",
|
||||||
|
"type" : "0x1",
|
||||||
|
"gasPrice": "0x1",
|
||||||
|
"accessList" : [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1,22 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"error": "value exceeds 256 bits",
|
||||||
|
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82",
|
||||||
|
"intrinsicGas": 21000
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "gasPrice exceeds 256 bits",
|
||||||
|
"address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964",
|
||||||
|
"hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5",
|
||||||
|
"intrinsicGas": 21000
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "invalid transaction v, r, s values",
|
||||||
|
"hash": "0xf06691c2a803ab7f3c81d06a0c0a896f80f311105c599fc59a9fdbc669356d35"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "invalid transaction v, r, s values",
|
||||||
|
"hash": "0x84703b697ad5b0db25e4f1f98fb6b1adce85b9edb2232eeba9cedd8c6601694b"
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1,46 @@
|
||||||
|
[
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
"d",
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
010000000000000000000000000000000000000000000000000000000000000001,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
010000000000000000000000000000000000000000000000000000000000000001,
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
11,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
11,
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
11,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daa,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
11,
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
11,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb,
|
||||||
|
],
|
||||||
|
]
|
|
@ -0,0 +1 @@
|
||||||
|
"0xf901c8f880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f88080a101000000000000000000000000000000000000000000000000000000000000000182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba1c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daaa06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da16180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb"
|
|
@ -0,0 +1,9 @@
|
||||||
|
# Invalid rlp
|
||||||
|
|
||||||
|
This folder contains a sample of invalid RLP, and it's expected
|
||||||
|
that the t9n handles this properly:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London
|
||||||
|
ERROR(11): rlp: value size exceeds available input length
|
||||||
|
```
|
|
@ -0,0 +1 @@
|
||||||
|
"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3"
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"code": "0x",
|
||||||
|
"nonce": "0xac",
|
||||||
|
"storage": {}
|
||||||
|
},
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
|
||||||
|
"balance": "0xfeedbead",
|
||||||
|
"nonce" : "0x00"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"currentGasLimit": "0x750a163df65e8a",
|
||||||
|
"currentBaseFee": "0x500",
|
||||||
|
"currentNumber": "13000000",
|
||||||
|
"currentTimestamp": "100015",
|
||||||
|
"parentTimestamp" : "99999",
|
||||||
|
"parentDifficulty" : "0x2000000000000"
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"currentDifficulty": "0x2000000200000",
|
||||||
|
"receipts": []
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"currentDifficulty": "0x2000080000000",
|
||||||
|
"receipts": []
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
## Difficulty calculation
|
||||||
|
|
||||||
|
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller,
|
||||||
|
this time on `ArrowGlacier` (Eip 4345).
|
||||||
|
|
||||||
|
Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block):
|
||||||
|
```
|
||||||
|
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier
|
||||||
|
```
|
|
@ -0,0 +1 @@
|
||||||
|
[]
|
|
@ -741,7 +741,7 @@ func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, c
|
||||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||||
}
|
}
|
||||||
var avatar string
|
var avatar string
|
||||||
if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||||
avatar = parts[1]
|
avatar = parts[1]
|
||||||
}
|
}
|
||||||
return username + "@twitter", username, avatar, address, nil
|
return username + "@twitter", username, avatar, address, nil
|
||||||
|
@ -867,7 +867,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||||
}
|
}
|
||||||
var avatar string
|
var avatar string
|
||||||
if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||||
avatar = parts[1]
|
avatar = parts[1]
|
||||||
}
|
}
|
||||||
return username + "@facebook", avatar, address, nil
|
return username + "@facebook", avatar, address, nil
|
||||||
|
|
|
@ -140,7 +140,9 @@ be gzipped.`,
|
||||||
},
|
},
|
||||||
Category: "BLOCKCHAIN COMMANDS",
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
Description: `
|
Description: `
|
||||||
The import-preimages command imports hash preimages from an RLP encoded stream.`,
|
The import-preimages command imports hash preimages from an RLP encoded stream.
|
||||||
|
It's deprecated, please use "geth db import" instead.
|
||||||
|
`,
|
||||||
}
|
}
|
||||||
exportPreimagesCommand = cli.Command{
|
exportPreimagesCommand = cli.Command{
|
||||||
Action: utils.MigrateFlags(exportPreimages),
|
Action: utils.MigrateFlags(exportPreimages),
|
||||||
|
@ -154,7 +156,9 @@ be gzipped.`,
|
||||||
},
|
},
|
||||||
Category: "BLOCKCHAIN COMMANDS",
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
Description: `
|
Description: `
|
||||||
The export-preimages command export hash preimages to an RLP encoded stream`,
|
The export-preimages command exports hash preimages to an RLP encoded stream.
|
||||||
|
It's deprecated, please use "geth db export" instead.
|
||||||
|
`,
|
||||||
}
|
}
|
||||||
dumpCommand = cli.Command{
|
dumpCommand = cli.Command{
|
||||||
Action: utils.MigrateFlags(dump),
|
Action: utils.MigrateFlags(dump),
|
||||||
|
@ -368,7 +372,6 @@ func exportPreimages(ctx *cli.Context) error {
|
||||||
if len(ctx.Args()) < 1 {
|
if len(ctx.Args()) < 1 {
|
||||||
utils.Fatalf("This command requires an argument.")
|
utils.Fatalf("This command requires an argument.")
|
||||||
}
|
}
|
||||||
|
|
||||||
stack, _ := makeConfigNode(ctx)
|
stack, _ := makeConfigNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
|
|
|
@ -156,8 +156,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||||
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
||||||
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||||
stack, cfg := makeConfigNode(ctx)
|
stack, cfg := makeConfigNode(ctx)
|
||||||
if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) {
|
if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) {
|
||||||
cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name))
|
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
|
||||||
}
|
}
|
||||||
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
|
|
|
@ -17,11 +17,16 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
@ -62,6 +67,8 @@ Remove blockchain and state databases`,
|
||||||
dbPutCmd,
|
dbPutCmd,
|
||||||
dbGetSlotsCmd,
|
dbGetSlotsCmd,
|
||||||
dbDumpFreezerIndex,
|
dbDumpFreezerIndex,
|
||||||
|
dbImportCmd,
|
||||||
|
dbExportCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dbInspectCmd = cli.Command{
|
dbInspectCmd = cli.Command{
|
||||||
|
@ -187,6 +194,36 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
||||||
},
|
},
|
||||||
Description: "This command displays information about the freezer index.",
|
Description: "This command displays information about the freezer index.",
|
||||||
}
|
}
|
||||||
|
dbImportCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(importLDBdata),
|
||||||
|
Name: "import",
|
||||||
|
Usage: "Imports leveldb-data from an exported RLP dump.",
|
||||||
|
ArgsUsage: "<dumpfile> <start (optional)",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: "The import command imports the specific chain data from an RLP encoded stream.",
|
||||||
|
}
|
||||||
|
dbExportCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(exportChaindata),
|
||||||
|
Name: "export",
|
||||||
|
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
|
||||||
|
ArgsUsage: "<type> <dumpfile>",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeDB(ctx *cli.Context) error {
|
func removeDB(ctx *cli.Context) error {
|
||||||
|
@ -335,14 +372,15 @@ func dbGet(ctx *cli.Context) error {
|
||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
key, err := hexutil.Decode(ctx.Args().Get(0))
|
key, err := parseHexOrString(ctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not decode the key", "error", err)
|
log.Info("Could not decode the key", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := db.Get(key)
|
data, err := db.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Get operation failed", "error", err)
|
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("key %#x: %#x\n", key, data)
|
fmt.Printf("key %#x: %#x\n", key, data)
|
||||||
|
@ -360,7 +398,7 @@ func dbDelete(ctx *cli.Context) error {
|
||||||
db := utils.MakeChainDatabase(ctx, stack, false)
|
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
key, err := hexutil.Decode(ctx.Args().Get(0))
|
key, err := parseHexOrString(ctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not decode the key", "error", err)
|
log.Info("Could not decode the key", "error", err)
|
||||||
return err
|
return err
|
||||||
|
@ -370,7 +408,7 @@ func dbDelete(ctx *cli.Context) error {
|
||||||
fmt.Printf("Previous value: %#x\n", data)
|
fmt.Printf("Previous value: %#x\n", data)
|
||||||
}
|
}
|
||||||
if err = db.Delete(key); err != nil {
|
if err = db.Delete(key); err != nil {
|
||||||
log.Info("Delete operation returned an error", "error", err)
|
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -393,7 +431,7 @@ func dbPut(ctx *cli.Context) error {
|
||||||
data []byte
|
data []byte
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
key, err = hexutil.Decode(ctx.Args().Get(0))
|
key, err = parseHexOrString(ctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not decode the key", "error", err)
|
log.Info("Could not decode the key", "error", err)
|
||||||
return err
|
return err
|
||||||
|
@ -499,3 +537,142 @@ func freezerInspect(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
|
||||||
|
func parseHexOrString(str string) ([]byte, error) {
|
||||||
|
b, err := hexutil.Decode(str)
|
||||||
|
if errors.Is(err, hexutil.ErrMissingPrefix) {
|
||||||
|
return []byte(str), nil
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importLDBdata(ctx *cli.Context) error {
|
||||||
|
start := 0
|
||||||
|
switch ctx.NArg() {
|
||||||
|
case 1:
|
||||||
|
break
|
||||||
|
case 2:
|
||||||
|
s, err := strconv.Atoi(ctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("second arg must be an integer: %v", err)
|
||||||
|
}
|
||||||
|
start = s
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
fName = ctx.Args().Get(0)
|
||||||
|
stack, _ = makeConfigNode(ctx)
|
||||||
|
interrupt = make(chan os.Signal, 1)
|
||||||
|
stop = make(chan struct{})
|
||||||
|
)
|
||||||
|
defer stack.Close()
|
||||||
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer signal.Stop(interrupt)
|
||||||
|
defer close(interrupt)
|
||||||
|
go func() {
|
||||||
|
if _, ok := <-interrupt; ok {
|
||||||
|
log.Info("Interrupted during ldb import, stopping at next batch")
|
||||||
|
}
|
||||||
|
close(stop)
|
||||||
|
}()
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
|
return utils.ImportLDBData(db, fName, int64(start), stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
type preimageIterator struct {
|
||||||
|
iter ethdb.Iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
for iter.iter.Next() {
|
||||||
|
key := iter.iter.Key()
|
||||||
|
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
|
||||||
|
return utils.OpBatchAdd, key, iter.iter.Value(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *preimageIterator) Release() {
|
||||||
|
iter.iter.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
type snapshotIterator struct {
|
||||||
|
init bool
|
||||||
|
account ethdb.Iterator
|
||||||
|
storage ethdb.Iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
if !iter.init {
|
||||||
|
iter.init = true
|
||||||
|
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
|
||||||
|
}
|
||||||
|
for iter.account.Next() {
|
||||||
|
key := iter.account.Key()
|
||||||
|
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
|
||||||
|
return utils.OpBatchAdd, key, iter.account.Value(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for iter.storage.Next() {
|
||||||
|
key := iter.storage.Key()
|
||||||
|
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
|
||||||
|
return utils.OpBatchAdd, key, iter.storage.Value(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *snapshotIterator) Release() {
|
||||||
|
iter.account.Release()
|
||||||
|
iter.storage.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// chainExporters defines the export scheme for all exportable chain data.
|
||||||
|
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
|
||||||
|
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
|
||||||
|
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
|
||||||
|
return &preimageIterator{iter: iter}
|
||||||
|
},
|
||||||
|
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
|
||||||
|
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
|
||||||
|
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
|
||||||
|
return &snapshotIterator{account: account, storage: storage}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportChaindata(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 2 {
|
||||||
|
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
// Parse the required chain data type, make sure it's supported.
|
||||||
|
kind := ctx.Args().Get(0)
|
||||||
|
kind = strings.ToLower(strings.Trim(kind, " "))
|
||||||
|
exporter, ok := chainExporters[kind]
|
||||||
|
if !ok {
|
||||||
|
var kinds []string
|
||||||
|
for kind := range chainExporters {
|
||||||
|
kinds = append(kinds, kind)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
stack, _ = makeConfigNode(ctx)
|
||||||
|
interrupt = make(chan os.Signal, 1)
|
||||||
|
stop = make(chan struct{})
|
||||||
|
)
|
||||||
|
defer stack.Close()
|
||||||
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer signal.Stop(interrupt)
|
||||||
|
defer close(interrupt)
|
||||||
|
go func() {
|
||||||
|
if _, ok := <-interrupt; ok {
|
||||||
|
log.Info("Interrupted during db export, stopping at next batch")
|
||||||
|
}
|
||||||
|
close(stop)
|
||||||
|
}()
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
|
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
||||||
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ var (
|
||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
utils.USBFlag,
|
utils.USBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.OverrideLondonFlag,
|
utils.OverrideArrowGlacierFlag,
|
||||||
utils.EthashCacheDirFlag,
|
utils.EthashCacheDirFlag,
|
||||||
utils.EthashCachesInMemoryFlag,
|
utils.EthashCachesInMemoryFlag,
|
||||||
utils.EthashCachesOnDiskFlag,
|
utils.EthashCachesOnDiskFlag,
|
||||||
|
@ -177,6 +177,7 @@ var (
|
||||||
utils.IPCPathFlag,
|
utils.IPCPathFlag,
|
||||||
utils.InsecureUnlockAllowedFlag,
|
utils.InsecureUnlockAllowedFlag,
|
||||||
utils.RPCGlobalGasCapFlag,
|
utils.RPCGlobalGasCapFlag,
|
||||||
|
utils.RPCGlobalEVMTimeoutFlag,
|
||||||
utils.RPCGlobalTxFeeCapFlag,
|
utils.RPCGlobalTxFeeCapFlag,
|
||||||
utils.AllowUnprotectedTxs,
|
utils.AllowUnprotectedTxs,
|
||||||
}
|
}
|
||||||
|
@ -408,7 +409,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) {
|
||||||
}
|
}
|
||||||
ethBackend, ok := backend.(*eth.EthAPIBackend)
|
ethBackend, ok := backend.(*eth.EthAPIBackend)
|
||||||
if !ok {
|
if !ok {
|
||||||
utils.Fatalf("Ethereum service not running: %v", err)
|
utils.Fatalf("Ethereum service not running")
|
||||||
}
|
}
|
||||||
// Set the gas price to the limits from the CLI and start mining
|
// Set the gas price to the limits from the CLI and start mining
|
||||||
gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||||
|
|
|
@ -150,6 +150,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||||
utils.GraphQLCORSDomainFlag,
|
utils.GraphQLCORSDomainFlag,
|
||||||
utils.GraphQLVirtualHostsFlag,
|
utils.GraphQLVirtualHostsFlag,
|
||||||
utils.RPCGlobalGasCapFlag,
|
utils.RPCGlobalGasCapFlag,
|
||||||
|
utils.RPCGlobalEVMTimeoutFlag,
|
||||||
utils.RPCGlobalTxFeeCapFlag,
|
utils.RPCGlobalTxFeeCapFlag,
|
||||||
utils.AllowUnprotectedTxs,
|
utils.AllowUnprotectedTxs,
|
||||||
utils.JSpathFlag,
|
utils.JSpathFlag,
|
||||||
|
|
|
@ -35,8 +35,8 @@ FROM puppeth/blockscout:latest
|
||||||
ADD genesis.json /genesis.json
|
ADD genesis.json /genesis.json
|
||||||
RUN \
|
RUN \
|
||||||
echo 'geth --cache 512 init /genesis.json' > explorer.sh && \
|
echo 'geth --cache 512 init /genesis.json' > explorer.sh && \
|
||||||
echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
|
echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug,txpool" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
|
||||||
echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
|
echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug,txpool" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
|
||||||
echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \
|
echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \
|
||||||
echo 'sleep 5' >> explorer.sh && \
|
echo 'sleep 5' >> explorer.sh && \
|
||||||
echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \
|
echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -32,8 +31,10 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/console/prompt"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/peterh/liner"
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -76,17 +77,27 @@ type wizard struct {
|
||||||
servers map[string]*sshClient // SSH connections to servers to administer
|
servers map[string]*sshClient // SSH connections to servers to administer
|
||||||
services map[string][]string // Ethereum services known to be running on servers
|
services map[string][]string // Ethereum services known to be running on servers
|
||||||
|
|
||||||
in *bufio.Reader // Wrapper around stdin to allow reading user input
|
lock sync.Mutex // Lock to protect configs during concurrent service discovery
|
||||||
lock sync.Mutex // Lock to protect configs during concurrent service discovery
|
}
|
||||||
|
|
||||||
|
// prompts the user for input with the given prompt string. Returns when a value is entered.
|
||||||
|
// Causes the wizard to exit if ctrl-d is pressed
|
||||||
|
func promptInput(p string) string {
|
||||||
|
for {
|
||||||
|
text, err := prompt.Stdin.PromptInput(p)
|
||||||
|
if err != nil {
|
||||||
|
if err != liner.ErrPromptAborted {
|
||||||
|
log.Crit("Failed to read user input", "err", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read reads a single line from stdin, trimming if from spaces.
|
// read reads a single line from stdin, trimming if from spaces.
|
||||||
func (w *wizard) read() string {
|
func (w *wizard) read() string {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(text)
|
return strings.TrimSpace(text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,11 +105,7 @@ func (w *wizard) read() string {
|
||||||
// non-emptyness.
|
// non-emptyness.
|
||||||
func (w *wizard) readString() string {
|
func (w *wizard) readString() string {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text != "" {
|
if text = strings.TrimSpace(text); text != "" {
|
||||||
return text
|
return text
|
||||||
}
|
}
|
||||||
|
@ -108,11 +115,7 @@ func (w *wizard) readString() string {
|
||||||
// readDefaultString reads a single line from stdin, trimming if from spaces. If
|
// readDefaultString reads a single line from stdin, trimming if from spaces. If
|
||||||
// an empty line is entered, the default value is returned.
|
// an empty line is entered, the default value is returned.
|
||||||
func (w *wizard) readDefaultString(def string) string {
|
func (w *wizard) readDefaultString(def string) string {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text != "" {
|
if text = strings.TrimSpace(text); text != "" {
|
||||||
return text
|
return text
|
||||||
}
|
}
|
||||||
|
@ -124,11 +127,7 @@ func (w *wizard) readDefaultString(def string) string {
|
||||||
// value is returned.
|
// value is returned.
|
||||||
func (w *wizard) readDefaultYesNo(def bool) bool {
|
func (w *wizard) readDefaultYesNo(def bool) bool {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
|
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
@ -146,11 +145,7 @@ func (w *wizard) readDefaultYesNo(def bool) bool {
|
||||||
// interpret it as a URL (http, https or file).
|
// interpret it as a URL (http, https or file).
|
||||||
func (w *wizard) readURL() *url.URL {
|
func (w *wizard) readURL() *url.URL {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
uri, err := url.Parse(strings.TrimSpace(text))
|
uri, err := url.Parse(strings.TrimSpace(text))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Invalid input, expected URL", "err", err)
|
log.Error("Invalid input, expected URL", "err", err)
|
||||||
|
@ -164,11 +159,7 @@ func (w *wizard) readURL() *url.URL {
|
||||||
// to parse into an integer.
|
// to parse into an integer.
|
||||||
func (w *wizard) readInt() int {
|
func (w *wizard) readInt() int {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -186,11 +177,7 @@ func (w *wizard) readInt() int {
|
||||||
// returned.
|
// returned.
|
||||||
func (w *wizard) readDefaultInt(def int) int {
|
func (w *wizard) readDefaultInt(def int) int {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
@ -208,11 +195,7 @@ func (w *wizard) readDefaultInt(def int) int {
|
||||||
// default value is returned.
|
// default value is returned.
|
||||||
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
@ -225,38 +208,11 @@ func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
|
||||||
// to parse into a float.
|
|
||||||
func (w *wizard) readFloat() float64 {
|
|
||||||
for {
|
|
||||||
fmt.Printf("> ")
|
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Invalid input, expected float", "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
||||||
// it to parse into a float. If an empty line is entered, the default value is returned.
|
// it to parse into a float. If an empty line is entered, the default value is returned.
|
||||||
func (w *wizard) readDefaultFloat(def float64) float64 {
|
func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
@ -285,12 +241,7 @@ func (w *wizard) readPassword() string {
|
||||||
// it to an Ethereum address.
|
// it to an Ethereum address.
|
||||||
func (w *wizard) readAddress() *common.Address {
|
func (w *wizard) readAddress() *common.Address {
|
||||||
for {
|
for {
|
||||||
// Read the address from the user
|
text := promptInput("> 0x")
|
||||||
fmt.Printf("> 0x")
|
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -311,11 +262,7 @@ func (w *wizard) readAddress() *common.Address {
|
||||||
func (w *wizard) readDefaultAddress(def common.Address) common.Address {
|
func (w *wizard) readDefaultAddress(def common.Address) common.Address {
|
||||||
for {
|
for {
|
||||||
// Read the address from the user
|
// Read the address from the user
|
||||||
fmt.Printf("> 0x")
|
text := promptInput("> 0x")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
@ -334,8 +281,9 @@ func (w *wizard) readJSON() string {
|
||||||
var blob json.RawMessage
|
var blob json.RawMessage
|
||||||
|
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
if err := json.NewDecoder(w.in).Decode(&blob); err != nil {
|
reader := strings.NewReader(text)
|
||||||
|
if err := json.NewDecoder(reader).Decode(&blob); err != nil {
|
||||||
log.Error("Invalid JSON, please try again", "err", err)
|
log.Error("Invalid JSON, please try again", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -351,10 +299,7 @@ func (w *wizard) readIPAddress() string {
|
||||||
for {
|
for {
|
||||||
// Read the IP address from the user
|
// Read the IP address from the user
|
||||||
fmt.Printf("> ")
|
fmt.Printf("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
text := promptInput("> ")
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,14 +17,12 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
)
|
)
|
||||||
|
@ -38,7 +36,6 @@ func makeWizard(network string) *wizard {
|
||||||
},
|
},
|
||||||
servers: make(map[string]*sshClient),
|
servers: make(map[string]*sshClient),
|
||||||
services: make(map[string][]string),
|
services: make(map[string][]string),
|
||||||
in: bufio.NewReader(os.Stdin),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,25 +79,17 @@ func (w *wizard) run() {
|
||||||
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
||||||
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
||||||
} else {
|
} else {
|
||||||
// Dial all previously known servers concurrently
|
// Dial all previously known servers
|
||||||
var pend sync.WaitGroup
|
|
||||||
for server, pubkey := range w.conf.Servers {
|
for server, pubkey := range w.conf.Servers {
|
||||||
pend.Add(1)
|
log.Info("Dialing previously configured server", "server", server)
|
||||||
|
client, err := dial(server, pubkey)
|
||||||
go func(server string, pubkey []byte) {
|
if err != nil {
|
||||||
defer pend.Done()
|
log.Error("Previous server unreachable", "server", server, "err", err)
|
||||||
|
}
|
||||||
log.Info("Dialing previously configured server", "server", server)
|
w.lock.Lock()
|
||||||
client, err := dial(server, pubkey)
|
w.servers[server] = client
|
||||||
if err != nil {
|
w.lock.Unlock()
|
||||||
log.Error("Previous server unreachable", "server", server, "err", err)
|
|
||||||
}
|
|
||||||
w.lock.Lock()
|
|
||||||
w.servers[server] = client
|
|
||||||
w.lock.Unlock()
|
|
||||||
}(server, pubkey)
|
|
||||||
}
|
}
|
||||||
pend.Wait()
|
|
||||||
w.networkStats()
|
w.networkStats()
|
||||||
}
|
}
|
||||||
// Basics done, loop ad infinitum about what to do
|
// Basics done, loop ad infinitum about what to do
|
||||||
|
|
|
@ -18,7 +18,9 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"container/list"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -26,18 +28,20 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hexMode = flag.String("hex", "", "dump given hex data")
|
hexMode = flag.String("hex", "", "dump given hex data")
|
||||||
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
|
reverseMode = flag.Bool("reverse", false, "convert ASCII to rlp")
|
||||||
single = flag.Bool("single", false, "print only the first element, discard the rest")
|
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
|
||||||
|
single = flag.Bool("single", false, "print only the first element, discard the rest")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.Usage = func() {
|
flag.Usage = func() {
|
||||||
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>] [filename]")
|
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>][-reverse] [filename]")
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
fmt.Fprintln(os.Stderr, `
|
fmt.Fprintln(os.Stderr, `
|
||||||
Dumps RLP data from the given file in readable form.
|
Dumps RLP data from the given file in readable form.
|
||||||
|
@ -73,23 +77,40 @@ func main() {
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
out := os.Stdout
|
||||||
s := rlp.NewStream(r, 0)
|
if *reverseMode {
|
||||||
for {
|
data, err := textToRlp(r)
|
||||||
if err := dump(s, 0); err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
die(err)
|
||||||
die(err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Printf("0x%x\n", data)
|
||||||
if *single {
|
return
|
||||||
break
|
} else {
|
||||||
|
err := rlpToText(r, out)
|
||||||
|
if err != nil {
|
||||||
|
die(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func dump(s *rlp.Stream, depth int) error {
|
func rlpToText(r io.Reader, out io.Writer) error {
|
||||||
|
s := rlp.NewStream(r, 0)
|
||||||
|
for {
|
||||||
|
if err := dump(s, 0, out); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fmt.Fprintln(out)
|
||||||
|
if *single {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dump(s *rlp.Stream, depth int, out io.Writer) error {
|
||||||
kind, size, err := s.Kind()
|
kind, size, err := s.Kind()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -101,28 +122,28 @@ func dump(s *rlp.Stream, depth int) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(str) == 0 || !*noASCII && isASCII(str) {
|
if len(str) == 0 || !*noASCII && isASCII(str) {
|
||||||
fmt.Printf("%s%q", ws(depth), str)
|
fmt.Fprintf(out, "%s%q", ws(depth), str)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("%s%x", ws(depth), str)
|
fmt.Fprintf(out, "%s%x", ws(depth), str)
|
||||||
}
|
}
|
||||||
case rlp.List:
|
case rlp.List:
|
||||||
s.List()
|
s.List()
|
||||||
defer s.ListEnd()
|
defer s.ListEnd()
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
fmt.Print(ws(depth) + "[]")
|
fmt.Fprintf(out, ws(depth)+"[]")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println(ws(depth) + "[")
|
fmt.Fprintln(out, ws(depth)+"[")
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
fmt.Print(",\n")
|
fmt.Fprint(out, ",\n")
|
||||||
}
|
}
|
||||||
if err := dump(s, depth+1); err == rlp.EOL {
|
if err := dump(s, depth+1, out); err == rlp.EOL {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Print(ws(depth) + "]")
|
fmt.Fprint(out, ws(depth)+"]")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -145,3 +166,45 @@ func die(args ...interface{}) {
|
||||||
fmt.Fprintln(os.Stderr, args...)
|
fmt.Fprintln(os.Stderr, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// textToRlp converts text into RLP (best effort).
|
||||||
|
func textToRlp(r io.Reader) ([]byte, error) {
|
||||||
|
// We're expecting the input to be well-formed, meaning that
|
||||||
|
// - each element is on a separate line
|
||||||
|
// - each line is either an (element OR a list start/end) + comma
|
||||||
|
// - an element is either hex-encoded bytes OR a quoted string
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
obj []interface{}
|
||||||
|
stack = list.New()
|
||||||
|
)
|
||||||
|
for scanner.Scan() {
|
||||||
|
t := strings.TrimSpace(scanner.Text())
|
||||||
|
if len(t) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch t {
|
||||||
|
case "[": // list start
|
||||||
|
stack.PushFront(obj)
|
||||||
|
obj = make([]interface{}, 0)
|
||||||
|
case "]", "],": // list end
|
||||||
|
parent := stack.Remove(stack.Front()).([]interface{})
|
||||||
|
obj = append(parent, obj)
|
||||||
|
case "[],": // empty list
|
||||||
|
obj = append(obj, make([]interface{}, 0))
|
||||||
|
default: // element
|
||||||
|
data := []byte(t)[:len(t)-1] // cut off comma
|
||||||
|
if data[0] == '"' { // ascii string
|
||||||
|
data = []byte(t)[1 : len(data)-1]
|
||||||
|
} else { // hex data
|
||||||
|
data = common.FromHex(string(data))
|
||||||
|
}
|
||||||
|
obj = append(obj, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := rlp.EncodeToBytes(obj[0])
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRoundtrip(t *testing.T) {
|
||||||
|
for i, want := range []string{
|
||||||
|
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
|
||||||
|
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
|
||||||
|
"0xc780c0c1c0825208",
|
||||||
|
} {
|
||||||
|
var out strings.Builder
|
||||||
|
err := rlpToText(bytes.NewReader(common.FromHex(want)), &out)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
text := out.String()
|
||||||
|
rlpBytes, err := textToRlp(strings.NewReader(text))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: error %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
have := fmt.Sprintf("0x%x", rlpBytes)
|
||||||
|
if have != want {
|
||||||
|
t.Errorf("test %d: have\n%v\nwant:\n%v\n", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTextToRlp(t *testing.T) {
|
||||||
|
type tc struct {
|
||||||
|
text string
|
||||||
|
want string
|
||||||
|
}
|
||||||
|
cases := []tc{
|
||||||
|
{
|
||||||
|
text: `[
|
||||||
|
"",
|
||||||
|
[],
|
||||||
|
[
|
||||||
|
[],
|
||||||
|
],
|
||||||
|
5208,
|
||||||
|
]`,
|
||||||
|
want: "0xc780c0c1c0825208",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, tc := range cases {
|
||||||
|
have, err := textToRlp(strings.NewReader(tc.text))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: error %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hexutil.Encode(have) != tc.want {
|
||||||
|
t.Errorf("test %d:\nhave %v\nwant %v", i, hexutil.Encode(have), tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
212
cmd/utils/cmd.go
212
cmd/utils/cmd.go
|
@ -18,7 +18,9 @@
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
@ -270,6 +272,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImportPreimages imports a batch of exported hash preimages into the database.
|
// ImportPreimages imports a batch of exported hash preimages into the database.
|
||||||
|
// It's a part of the deprecated functionality, should be removed in the future.
|
||||||
func ImportPreimages(db ethdb.Database, fn string) error {
|
func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
log.Info("Importing preimages", "file", fn)
|
log.Info("Importing preimages", "file", fn)
|
||||||
|
|
||||||
|
@ -280,7 +283,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
}
|
}
|
||||||
defer fh.Close()
|
defer fh.Close()
|
||||||
|
|
||||||
var reader io.Reader = fh
|
var reader io.Reader = bufio.NewReader(fh)
|
||||||
if strings.HasSuffix(fn, ".gz") {
|
if strings.HasSuffix(fn, ".gz") {
|
||||||
if reader, err = gzip.NewReader(reader); err != nil {
|
if reader, err = gzip.NewReader(reader); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -288,7 +291,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
}
|
}
|
||||||
stream := rlp.NewStream(reader, 0)
|
stream := rlp.NewStream(reader, 0)
|
||||||
|
|
||||||
// Import the preimages in batches to prevent disk trashing
|
// Import the preimages in batches to prevent disk thrashing
|
||||||
preimages := make(map[common.Hash][]byte)
|
preimages := make(map[common.Hash][]byte)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -317,6 +320,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
|
|
||||||
// ExportPreimages exports all known hash preimages into the specified file,
|
// ExportPreimages exports all known hash preimages into the specified file,
|
||||||
// truncating any data already present in the file.
|
// truncating any data already present in the file.
|
||||||
|
// It's a part of the deprecated functionality, should be removed in the future.
|
||||||
func ExportPreimages(db ethdb.Database, fn string) error {
|
func ExportPreimages(db ethdb.Database, fn string) error {
|
||||||
log.Info("Exporting preimages", "file", fn)
|
log.Info("Exporting preimages", "file", fn)
|
||||||
|
|
||||||
|
@ -344,3 +348,207 @@ func ExportPreimages(db ethdb.Database, fn string) error {
|
||||||
log.Info("Exported preimages", "file", fn)
|
log.Info("Exported preimages", "file", fn)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// exportHeader is used in the export/import flow. When we do an export,
|
||||||
|
// the first element we output is the exportHeader.
|
||||||
|
// Whenever a backwards-incompatible change is made, the Version header
|
||||||
|
// should be bumped.
|
||||||
|
// If the importer sees a higher version, it should reject the import.
|
||||||
|
type exportHeader struct {
|
||||||
|
Magic string // Always set to 'gethdbdump' for disambiguation
|
||||||
|
Version uint64
|
||||||
|
Kind string
|
||||||
|
UnixTime uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
const exportMagic = "gethdbdump"
|
||||||
|
const (
|
||||||
|
OpBatchAdd = 0
|
||||||
|
OpBatchDel = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImportLDBData imports a batch of snapshot data into the database
|
||||||
|
func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
|
||||||
|
log.Info("Importing leveldb data", "file", f)
|
||||||
|
|
||||||
|
// Open the file handle and potentially unwrap the gzip stream
|
||||||
|
fh, err := os.Open(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
var reader io.Reader = bufio.NewReader(fh)
|
||||||
|
if strings.HasSuffix(f, ".gz") {
|
||||||
|
if reader, err = gzip.NewReader(reader); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stream := rlp.NewStream(reader, 0)
|
||||||
|
|
||||||
|
// Read the header
|
||||||
|
var header exportHeader
|
||||||
|
if err := stream.Decode(&header); err != nil {
|
||||||
|
return fmt.Errorf("could not decode header: %v", err)
|
||||||
|
}
|
||||||
|
if header.Magic != exportMagic {
|
||||||
|
return errors.New("incompatible data, wrong magic")
|
||||||
|
}
|
||||||
|
if header.Version != 0 {
|
||||||
|
return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
|
||||||
|
}
|
||||||
|
log.Info("Importing data", "file", f, "type", header.Kind, "data age",
|
||||||
|
common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
|
||||||
|
|
||||||
|
// Import the snapshot in batches to prevent disk thrashing
|
||||||
|
var (
|
||||||
|
count int64
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
batch = db.NewBatch()
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
// Read the next entry
|
||||||
|
var (
|
||||||
|
op byte
|
||||||
|
key, val []byte
|
||||||
|
)
|
||||||
|
if err := stream.Decode(&op); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := stream.Decode(&key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := stream.Decode(&val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if count < startIndex {
|
||||||
|
count++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch op {
|
||||||
|
case OpBatchDel:
|
||||||
|
batch.Delete(key)
|
||||||
|
case OpBatchAdd:
|
||||||
|
batch.Put(key, val)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown op %d\n", op)
|
||||||
|
}
|
||||||
|
if batch.ValueSize() > ethdb.IdealBatchSize {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
// Check interruption emitted by ctrl+c
|
||||||
|
if count%1000 == 0 {
|
||||||
|
select {
|
||||||
|
case <-interrupt:
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
|
||||||
|
log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
// Flush the last batch snapshot data
|
||||||
|
if batch.ValueSize() > 0 {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("Imported chain data", "file", f, "count", count,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainDataIterator is an interface wraps all necessary functions to iterate
|
||||||
|
// the exporting chain data.
|
||||||
|
type ChainDataIterator interface {
|
||||||
|
// Next returns the key-value pair for next exporting entry in the iterator.
|
||||||
|
// When the end is reached, it will return (0, nil, nil, false).
|
||||||
|
Next() (byte, []byte, []byte, bool)
|
||||||
|
|
||||||
|
// Release releases associated resources. Release should always succeed and can
|
||||||
|
// be called multiple times without causing error.
|
||||||
|
Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportChaindata exports the given data type (truncating any data already present)
|
||||||
|
// in the file. If the suffix is 'gz', gzip compression is used.
|
||||||
|
func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
|
||||||
|
log.Info("Exporting chain data", "file", fn, "kind", kind)
|
||||||
|
defer iter.Release()
|
||||||
|
|
||||||
|
// Open the file handle and potentially wrap with a gzip stream
|
||||||
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
var writer io.Writer = fh
|
||||||
|
if strings.HasSuffix(fn, ".gz") {
|
||||||
|
writer = gzip.NewWriter(writer)
|
||||||
|
defer writer.(*gzip.Writer).Close()
|
||||||
|
}
|
||||||
|
// Write the header
|
||||||
|
if err := rlp.Encode(writer, &exportHeader{
|
||||||
|
Magic: exportMagic,
|
||||||
|
Version: 0,
|
||||||
|
Kind: kind,
|
||||||
|
UnixTime: uint64(time.Now().Unix()),
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Extract data from source iterator and dump them out to file
|
||||||
|
var (
|
||||||
|
count int64
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
op, key, val, ok := iter.Next()
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(writer, op); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(writer, key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(writer, val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if count%1000 == 0 {
|
||||||
|
// Check interruption emitted by ctrl+c
|
||||||
|
select {
|
||||||
|
case <-interrupt:
|
||||||
|
log.Info("Chain data exporting interrupted", "file", fn,
|
||||||
|
"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if time.Since(logged) > 8*time.Second {
|
||||||
|
log.Info("Exporting chain data", "file", fn, "kind", kind,
|
||||||
|
"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,198 @@
|
||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestExport does basic sanity checks on the export/import functionality
|
||||||
|
func TestExport(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testExport(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExportGzip(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testExport(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testIterator struct {
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestIterator() *testIterator {
|
||||||
|
return &testIterator{index: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *testIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
if iter.index >= 999 {
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
iter.index += 1
|
||||||
|
if iter.index == 42 {
|
||||||
|
iter.index += 1
|
||||||
|
}
|
||||||
|
return OpBatchAdd, []byte(fmt.Sprintf("key-%04d", iter.index)),
|
||||||
|
[]byte(fmt.Sprintf("value %d", iter.index)), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *testIterator) Release() {}
|
||||||
|
|
||||||
|
func testExport(t *testing.T, f string) {
|
||||||
|
err := ExportChaindata(f, "testdata", newTestIterator(), make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
err = ImportLDBData(db, f, 5, make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// verify
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
|
||||||
|
if (i < 5 || i == 42) && err == nil {
|
||||||
|
t.Fatalf("expected no element at idx %d, got '%v'", i, string(v))
|
||||||
|
}
|
||||||
|
if !(i < 5 || i == 42) {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected element idx %d: %v", i, err)
|
||||||
|
}
|
||||||
|
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
|
||||||
|
t.Fatalf("have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", 1000)))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected no element at idx %d, got '%v'", 1000, string(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// testDeletion tests if the deletion markers can be exported/imported correctly
|
||||||
|
func TestDeletionExport(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testDeletion(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeletionExportGzip tests if the deletion markers can be exported/imported
|
||||||
|
// correctly with gz compression.
|
||||||
|
func TestDeletionExportGzip(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testDeletion(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type deletionIterator struct {
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeletionIterator() *deletionIterator {
|
||||||
|
return &deletionIterator{index: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *deletionIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
if iter.index >= 999 {
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
iter.index += 1
|
||||||
|
if iter.index == 42 {
|
||||||
|
iter.index += 1
|
||||||
|
}
|
||||||
|
return OpBatchDel, []byte(fmt.Sprintf("key-%04d", iter.index)), nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *deletionIterator) Release() {}
|
||||||
|
|
||||||
|
func testDeletion(t *testing.T, f string) {
|
||||||
|
err := ExportChaindata(f, "testdata", newDeletionIterator(), make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
db.Put([]byte(fmt.Sprintf("key-%04d", i)), []byte(fmt.Sprintf("value %d", i)))
|
||||||
|
}
|
||||||
|
err = ImportLDBData(db, f, 5, make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
|
||||||
|
if i < 5 || i == 42 {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected element at idx %d, got '%v'", i, err)
|
||||||
|
}
|
||||||
|
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
|
||||||
|
t.Fatalf("have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !(i < 5 || i == 42) {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected no element idx %d: %v", i, string(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestImportFutureFormat tests that we reject unsupported future versions.
|
||||||
|
func TestImportFutureFormat(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
if err := rlp.Encode(fh, &exportHeader{
|
||||||
|
Magic: exportMagic,
|
||||||
|
Version: 500,
|
||||||
|
Kind: "testdata",
|
||||||
|
UnixTime: uint64(time.Now().Unix()),
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
db2 := rawdb.NewMemoryDatabase()
|
||||||
|
err = ImportLDBData(db2, f, 0, make(chan struct{}))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected error, got none")
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(err.Error(), "incompatible version") {
|
||||||
|
t.Fatalf("wrong error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -235,9 +235,9 @@ var (
|
||||||
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
|
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
|
||||||
Value: 2048,
|
Value: 2048,
|
||||||
}
|
}
|
||||||
OverrideLondonFlag = cli.Uint64Flag{
|
OverrideArrowGlacierFlag = cli.Uint64Flag{
|
||||||
Name: "override.london",
|
Name: "override.arrowglacier",
|
||||||
Usage: "Manually specify London fork-block, overriding the bundled setting",
|
Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
|
||||||
}
|
}
|
||||||
// Light server and client settings
|
// Light server and client settings
|
||||||
LightServeFlag = cli.IntFlag{
|
LightServeFlag = cli.IntFlag{
|
||||||
|
@ -493,6 +493,11 @@ var (
|
||||||
Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)",
|
Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)",
|
||||||
Value: ethconfig.Defaults.RPCGasCap,
|
Value: ethconfig.Defaults.RPCGasCap,
|
||||||
}
|
}
|
||||||
|
RPCGlobalEVMTimeoutFlag = cli.DurationFlag{
|
||||||
|
Name: "rpc.evmtimeout",
|
||||||
|
Usage: "Sets a timeout used for eth_call (0=infinite)",
|
||||||
|
Value: ethconfig.Defaults.RPCEVMTimeout,
|
||||||
|
}
|
||||||
RPCGlobalTxFeeCapFlag = cli.Float64Flag{
|
RPCGlobalTxFeeCapFlag = cli.Float64Flag{
|
||||||
Name: "rpc.txfeecap",
|
Name: "rpc.txfeecap",
|
||||||
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
|
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
|
||||||
|
@ -681,7 +686,7 @@ var (
|
||||||
}
|
}
|
||||||
GpoMaxGasPriceFlag = cli.Int64Flag{
|
GpoMaxGasPriceFlag = cli.Int64Flag{
|
||||||
Name: "gpo.maxprice",
|
Name: "gpo.maxprice",
|
||||||
Usage: "Maximum gas price will be recommended by gpo",
|
Usage: "Maximum transaction priority fee (or gasprice before London fork) to be recommended by gpo",
|
||||||
Value: ethconfig.Defaults.GPO.MaxPrice.Int64(),
|
Value: ethconfig.Defaults.GPO.MaxPrice.Int64(),
|
||||||
}
|
}
|
||||||
GpoIgnoreGasPriceFlag = cli.Int64Flag{
|
GpoIgnoreGasPriceFlag = cli.Int64Flag{
|
||||||
|
@ -1563,6 +1568,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||||
} else {
|
} else {
|
||||||
log.Info("Global gas cap disabled")
|
log.Info("Global gas cap disabled")
|
||||||
}
|
}
|
||||||
|
if ctx.GlobalIsSet(RPCGlobalEVMTimeoutFlag.Name) {
|
||||||
|
cfg.RPCEVMTimeout = ctx.GlobalDuration(RPCGlobalEVMTimeoutFlag.Name)
|
||||||
|
}
|
||||||
if ctx.GlobalIsSet(RPCGlobalTxFeeCapFlag.Name) {
|
if ctx.GlobalIsSet(RPCGlobalTxFeeCapFlag.Name) {
|
||||||
cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCapFlag.Name)
|
cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCapFlag.Name)
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,13 +176,14 @@ func MustDecodeBig(input string) *big.Int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBig encodes bigint as a hex string with 0x prefix.
|
// EncodeBig encodes bigint as a hex string with 0x prefix.
|
||||||
// The sign of the integer is ignored.
|
|
||||||
func EncodeBig(bigint *big.Int) string {
|
func EncodeBig(bigint *big.Int) string {
|
||||||
nbits := bigint.BitLen()
|
if sign := bigint.Sign(); sign == 0 {
|
||||||
if nbits == 0 {
|
|
||||||
return "0x0"
|
return "0x0"
|
||||||
|
} else if sign > 0 {
|
||||||
|
return "0x" + bigint.Text(16)
|
||||||
|
} else {
|
||||||
|
return "-0x" + bigint.Text(16)[1:]
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%#x", bigint)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func has0xPrefix(input string) bool {
|
func has0xPrefix(input string) bool {
|
||||||
|
|
|
@ -201,3 +201,15 @@ func TestDecodeUint64(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkEncodeBig(b *testing.B) {
|
||||||
|
for _, bench := range encodeBigTests {
|
||||||
|
b.Run(bench.want, func(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
bigint := bench.input.(*big.Int)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
EncodeBig(bigint)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -214,6 +214,9 @@ func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address,
|
||||||
} else if number, ok := blockNrOrHash.Number(); ok {
|
} else if number, ok := blockNrOrHash.Number(); ok {
|
||||||
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
||||||
}
|
}
|
||||||
|
if header == nil {
|
||||||
|
return common.Address{}, fmt.Errorf("missing block %v", blockNrOrHash.String())
|
||||||
|
}
|
||||||
return api.clique.Author(header)
|
return api.clique.Author(header)
|
||||||
}
|
}
|
||||||
block := new(types.Block)
|
block := new(types.Block)
|
||||||
|
|
|
@ -363,7 +363,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// All basic checks passed, verify the seal and return
|
// All basic checks passed, verify the seal and return
|
||||||
return c.verifySeal(chain, header, parents)
|
return c.verifySeal(snap, header, parents)
|
||||||
}
|
}
|
||||||
|
|
||||||
// snapshot retrieves the authorization snapshot at a given point in time.
|
// snapshot retrieves the authorization snapshot at a given point in time.
|
||||||
|
@ -460,18 +460,12 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
|
||||||
// consensus protocol requirements. The method accepts an optional list of parent
|
// consensus protocol requirements. The method accepts an optional list of parent
|
||||||
// headers that aren't yet part of the local blockchain to generate the snapshots
|
// headers that aren't yet part of the local blockchain to generate the snapshots
|
||||||
// from.
|
// from.
|
||||||
func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*types.Header) error {
|
||||||
// Verifying the genesis block is not supported
|
// Verifying the genesis block is not supported
|
||||||
number := header.Number.Uint64()
|
number := header.Number.Uint64()
|
||||||
if number == 0 {
|
if number == 0 {
|
||||||
return errUnknownBlock
|
return errUnknownBlock
|
||||||
}
|
}
|
||||||
// Retrieve the snapshot needed to verify this header and cache it
|
|
||||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, parents)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the authorization key and check against signers
|
// Resolve the authorization key and check against signers
|
||||||
signer, err := ecrecover(header, c.signatures)
|
signer, err := ecrecover(header, c.signatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -45,6 +45,11 @@ var (
|
||||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||||
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
||||||
|
|
||||||
|
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
|
||||||
|
// It offsets the bomb a total of 10.7M blocks.
|
||||||
|
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
|
||||||
|
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
|
||||||
|
|
||||||
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
|
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
|
||||||
// It offsets the bomb a total of 9.7M blocks.
|
// It offsets the bomb a total of 9.7M blocks.
|
||||||
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
|
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
|
||||||
|
@ -330,8 +335,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
|
||||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||||
next := new(big.Int).Add(parent.Number, big1)
|
next := new(big.Int).Add(parent.Number, big1)
|
||||||
switch {
|
switch {
|
||||||
case config.IsCatalyst(next):
|
case config.IsArrowGlacier(next):
|
||||||
return big.NewInt(1)
|
return calcDifficultyEip4345(time, parent)
|
||||||
case config.IsLondon(next):
|
case config.IsLondon(next):
|
||||||
return calcDifficultyEip3554(time, parent)
|
return calcDifficultyEip3554(time, parent)
|
||||||
case config.IsMuirGlacier(next):
|
case config.IsMuirGlacier(next):
|
||||||
|
@ -639,10 +644,6 @@ var (
|
||||||
// reward. The total reward consists of the static block reward and rewards for
|
// reward. The total reward consists of the static block reward and rewards for
|
||||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||||
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||||
// Skip block reward in catalyst mode
|
|
||||||
if config.IsCatalyst(header.Number) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Select the correct block reward based on chain progression
|
// Select the correct block reward based on chain progression
|
||||||
blockReward := FrontierBlockReward
|
blockReward := FrontierBlockReward
|
||||||
if config.IsByzantium(header.Number) {
|
if config.IsByzantium(header.Number) {
|
||||||
|
|
|
@ -136,13 +136,16 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
|
if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
|
||||||
|
dump.Close()
|
||||||
|
os.Remove(temp)
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
// Memory map the file for writing and fill it with the generator
|
// Memory map the file for writing and fill it with the generator
|
||||||
mem, buffer, err := memoryMapFile(dump, true)
|
mem, buffer, err := memoryMapFile(dump, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dump.Close()
|
dump.Close()
|
||||||
|
os.Remove(temp)
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
copy(buffer, dumpMagic)
|
copy(buffer, dumpMagic)
|
||||||
|
@ -358,7 +361,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to generate mapped ethash dataset", "err", err)
|
logger.Error("Failed to generate mapped ethash dataset", "err", err)
|
||||||
|
|
||||||
d.dataset = make([]uint32, dsize/2)
|
d.dataset = make([]uint32, dsize/4)
|
||||||
generateDataset(d.dataset, d.epoch, cache)
|
generateDataset(d.dataset, d.epoch, cache)
|
||||||
}
|
}
|
||||||
// Iterate over all previous instances and delete old ones
|
// Iterate over all previous instances and delete old ones
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2018 The go-ethereum Authors
|
// Copyright 2021 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -14,8 +14,22 @@
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Package deps contains the console JavaScript dependencies Go embedded.
|
//go:build linux
|
||||||
package deps
|
// +build linux
|
||||||
|
|
||||||
//go:generate go-bindata -nometadata -pkg deps -o bindata.go bignumber.js
|
package ethash
|
||||||
//go:generate gofmt -w -s bindata.go
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ensureSize expands the file to the given size. This is to prevent runtime
|
||||||
|
// errors later on, if the underlying file expands beyond the disk capacity,
|
||||||
|
// even though it ostensibly is already expanded, but due to being sparse
|
||||||
|
// does not actually occupy the full declared size on disk.
|
||||||
|
func ensureSize(f *os.File, size int64) error {
|
||||||
|
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
|
||||||
|
return unix.Fallocate(int(f.Fd()), 0, 0, size)
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !linux
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ensureSize expands the file to the given size. This is to prevent runtime
|
||||||
|
// errors later on, if the underlying file expands beyond the disk capacity,
|
||||||
|
// even though it ostensibly is already expanded, but due to being sparse
|
||||||
|
// does not actually occupy the full declared size on disk.
|
||||||
|
func ensureSize(f *os.File, size int64) error {
|
||||||
|
// On systems which do not support fallocate, we merely truncate it.
|
||||||
|
// More robust alternatives would be to
|
||||||
|
// - Use posix_fallocate, or
|
||||||
|
// - explicitly fill the file with zeroes.
|
||||||
|
return f.Truncate(size)
|
||||||
|
}
|
|
@ -29,24 +29,24 @@ import (
|
||||||
// do not use e.g. SetInt() on the numbers. For testing only
|
// do not use e.g. SetInt() on the numbers. For testing only
|
||||||
func copyConfig(original *params.ChainConfig) *params.ChainConfig {
|
func copyConfig(original *params.ChainConfig) *params.ChainConfig {
|
||||||
return ¶ms.ChainConfig{
|
return ¶ms.ChainConfig{
|
||||||
ChainID: original.ChainID,
|
ChainID: original.ChainID,
|
||||||
HomesteadBlock: original.HomesteadBlock,
|
HomesteadBlock: original.HomesteadBlock,
|
||||||
DAOForkBlock: original.DAOForkBlock,
|
DAOForkBlock: original.DAOForkBlock,
|
||||||
DAOForkSupport: original.DAOForkSupport,
|
DAOForkSupport: original.DAOForkSupport,
|
||||||
EIP150Block: original.EIP150Block,
|
EIP150Block: original.EIP150Block,
|
||||||
EIP150Hash: original.EIP150Hash,
|
EIP150Hash: original.EIP150Hash,
|
||||||
EIP155Block: original.EIP155Block,
|
EIP155Block: original.EIP155Block,
|
||||||
EIP158Block: original.EIP158Block,
|
EIP158Block: original.EIP158Block,
|
||||||
ByzantiumBlock: original.ByzantiumBlock,
|
ByzantiumBlock: original.ByzantiumBlock,
|
||||||
ConstantinopleBlock: original.ConstantinopleBlock,
|
ConstantinopleBlock: original.ConstantinopleBlock,
|
||||||
PetersburgBlock: original.PetersburgBlock,
|
PetersburgBlock: original.PetersburgBlock,
|
||||||
IstanbulBlock: original.IstanbulBlock,
|
IstanbulBlock: original.IstanbulBlock,
|
||||||
MuirGlacierBlock: original.MuirGlacierBlock,
|
MuirGlacierBlock: original.MuirGlacierBlock,
|
||||||
BerlinBlock: original.BerlinBlock,
|
BerlinBlock: original.BerlinBlock,
|
||||||
LondonBlock: original.LondonBlock,
|
LondonBlock: original.LondonBlock,
|
||||||
CatalystBlock: original.CatalystBlock,
|
TerminalTotalDifficulty: original.TerminalTotalDifficulty,
|
||||||
Ethash: original.Ethash,
|
Ethash: original.Ethash,
|
||||||
Clique: original.Clique,
|
Clique: original.Clique,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ var (
|
||||||
// This is the content of the genesis block used by the benchmarks.
|
// This is the content of the genesis block used by the benchmarks.
|
||||||
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
|
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
|
||||||
benchRootFunds = math.BigPow(2, 100)
|
benchRootFunds = math.BigPow(2, 200)
|
||||||
)
|
)
|
||||||
|
|
||||||
// genValueTx returns a block generator that includes a single
|
// genValueTx returns a block generator that includes a single
|
||||||
|
@ -86,7 +86,19 @@ func genValueTx(nbytes int) func(int, *BlockGen) {
|
||||||
toaddr := common.Address{}
|
toaddr := common.Address{}
|
||||||
data := make([]byte, nbytes)
|
data := make([]byte, nbytes)
|
||||||
gas, _ := IntrinsicGas(data, nil, false, false, false)
|
gas, _ := IntrinsicGas(data, nil, false, false, false)
|
||||||
tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey)
|
signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
|
||||||
|
gasPrice := big.NewInt(0)
|
||||||
|
if gen.header.BaseFee != nil {
|
||||||
|
gasPrice = gen.header.BaseFee
|
||||||
|
}
|
||||||
|
tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{
|
||||||
|
Nonce: gen.TxNonce(benchRootAddr),
|
||||||
|
To: &toaddr,
|
||||||
|
Value: big.NewInt(1),
|
||||||
|
Gas: gas,
|
||||||
|
Data: data,
|
||||||
|
GasPrice: gasPrice,
|
||||||
|
})
|
||||||
gen.AddTx(tx)
|
gen.AddTx(tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,24 +122,38 @@ func init() {
|
||||||
// and fills the blocks with many small transactions.
|
// and fills the blocks with many small transactions.
|
||||||
func genTxRing(naccounts int) func(int, *BlockGen) {
|
func genTxRing(naccounts int) func(int, *BlockGen) {
|
||||||
from := 0
|
from := 0
|
||||||
|
availableFunds := new(big.Int).Set(benchRootFunds)
|
||||||
return func(i int, gen *BlockGen) {
|
return func(i int, gen *BlockGen) {
|
||||||
block := gen.PrevBlock(i - 1)
|
block := gen.PrevBlock(i - 1)
|
||||||
gas := block.GasLimit()
|
gas := block.GasLimit()
|
||||||
|
gasPrice := big.NewInt(0)
|
||||||
|
if gen.header.BaseFee != nil {
|
||||||
|
gasPrice = gen.header.BaseFee
|
||||||
|
}
|
||||||
|
signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
|
||||||
for {
|
for {
|
||||||
gas -= params.TxGas
|
gas -= params.TxGas
|
||||||
if gas < params.TxGas {
|
if gas < params.TxGas {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
to := (from + 1) % naccounts
|
to := (from + 1) % naccounts
|
||||||
tx := types.NewTransaction(
|
burn := new(big.Int).SetUint64(params.TxGas)
|
||||||
gen.TxNonce(ringAddrs[from]),
|
burn.Mul(burn, gen.header.BaseFee)
|
||||||
ringAddrs[to],
|
availableFunds.Sub(availableFunds, burn)
|
||||||
benchRootFunds,
|
if availableFunds.Cmp(big.NewInt(1)) < 0 {
|
||||||
params.TxGas,
|
panic("not enough funds")
|
||||||
nil,
|
}
|
||||||
nil,
|
tx, err := types.SignNewTx(ringKeys[from], signer,
|
||||||
)
|
&types.LegacyTx{
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from])
|
Nonce: gen.TxNonce(ringAddrs[from]),
|
||||||
|
To: &ringAddrs[to],
|
||||||
|
Value: availableFunds,
|
||||||
|
Gas: params.TxGas,
|
||||||
|
GasPrice: gasPrice,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
gen.AddTx(tx)
|
gen.AddTx(tx)
|
||||||
from = to
|
from = to
|
||||||
}
|
}
|
||||||
|
@ -245,6 +271,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
||||||
block := types.NewBlockWithHeader(header)
|
block := types.NewBlockWithHeader(header)
|
||||||
rawdb.WriteBody(db, hash, n, block.Body())
|
rawdb.WriteBody(db, hash, n, block.Body())
|
||||||
rawdb.WriteReceipts(db, hash, n, nil)
|
rawdb.WriteReceipts(db, hash, n, nil)
|
||||||
|
rawdb.WriteHeadBlockHash(db, hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -278,6 +305,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
||||||
}
|
}
|
||||||
makeChainForBench(db, full, count)
|
makeChainForBench(db, full, count)
|
||||||
db.Close()
|
db.Close()
|
||||||
|
cacheConfig := *defaultCacheConfig
|
||||||
|
cacheConfig.TrieDirtyDisabled = true
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
@ -287,7 +316,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(db, &cacheConfig, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error creating chain: %v", err)
|
b.Fatalf("error creating chain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
@ -409,11 +408,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||||
return bc, nil
|
return bc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetVMConfig returns the block chain VM config.
|
|
||||||
func (bc *BlockChain) GetVMConfig() *vm.Config {
|
|
||||||
return &bc.vmConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// empty returns an indicator whether the blockchain is empty.
|
// empty returns an indicator whether the blockchain is empty.
|
||||||
// Note, it's a special case that we connect a non-empty ancient
|
// Note, it's a special case that we connect a non-empty ancient
|
||||||
// database with an empty node, so that we can plugin the ancient
|
// database with an empty node, so that we can plugin the ancient
|
||||||
|
@ -666,53 +660,6 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GasLimit returns the gas limit of the current HEAD block.
|
|
||||||
func (bc *BlockChain) GasLimit() uint64 {
|
|
||||||
return bc.CurrentBlock().GasLimit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentBlock retrieves the current head block of the canonical chain. The
|
|
||||||
// block is retrieved from the blockchain's internal cache.
|
|
||||||
func (bc *BlockChain) CurrentBlock() *types.Block {
|
|
||||||
return bc.currentBlock.Load().(*types.Block)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshots returns the blockchain snapshot tree.
|
|
||||||
func (bc *BlockChain) Snapshots() *snapshot.Tree {
|
|
||||||
return bc.snaps
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
|
|
||||||
// chain. The block is retrieved from the blockchain's internal cache.
|
|
||||||
func (bc *BlockChain) CurrentFastBlock() *types.Block {
|
|
||||||
return bc.currentFastBlock.Load().(*types.Block)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validator returns the current validator.
|
|
||||||
func (bc *BlockChain) Validator() Validator {
|
|
||||||
return bc.validator
|
|
||||||
}
|
|
||||||
|
|
||||||
// Processor returns the current processor.
|
|
||||||
func (bc *BlockChain) Processor() Processor {
|
|
||||||
return bc.processor
|
|
||||||
}
|
|
||||||
|
|
||||||
// State returns a new mutable state based on the current HEAD block.
|
|
||||||
func (bc *BlockChain) State() (*state.StateDB, error) {
|
|
||||||
return bc.StateAt(bc.CurrentBlock().Root())
|
|
||||||
}
|
|
||||||
|
|
||||||
// StateAt returns a new mutable state based on a particular point in time.
|
|
||||||
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
|
|
||||||
return state.New(root, bc.stateCache, bc.snaps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StateCache returns the caching database underpinning the blockchain instance.
|
|
||||||
func (bc *BlockChain) StateCache() state.Database {
|
|
||||||
return bc.stateCache
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset purges the entire blockchain, restoring it to its genesis state.
|
// Reset purges the entire blockchain, restoring it to its genesis state.
|
||||||
func (bc *BlockChain) Reset() error {
|
func (bc *BlockChain) Reset() error {
|
||||||
return bc.ResetWithGenesisBlock(bc.genesisBlock)
|
return bc.ResetWithGenesisBlock(bc.genesisBlock)
|
||||||
|
@ -819,194 +766,6 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
||||||
headBlockGauge.Update(int64(block.NumberU64()))
|
headBlockGauge.Update(int64(block.NumberU64()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Genesis retrieves the chain's genesis block.
|
|
||||||
func (bc *BlockChain) Genesis() *types.Block {
|
|
||||||
return bc.genesisBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBody retrieves a block body (transactions and uncles) from the database by
|
|
||||||
// hash, caching it if found.
|
|
||||||
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
|
||||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
|
||||||
if cached, ok := bc.bodyCache.Get(hash); ok {
|
|
||||||
body := cached.(*types.Body)
|
|
||||||
return body
|
|
||||||
}
|
|
||||||
number := bc.hc.GetBlockNumber(hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
body := rawdb.ReadBody(bc.db, hash, *number)
|
|
||||||
if body == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Cache the found body for next time and return
|
|
||||||
bc.bodyCache.Add(hash, body)
|
|
||||||
return body
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
|
|
||||||
// caching it if found.
|
|
||||||
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
|
|
||||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
|
||||||
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
|
|
||||||
return cached.(rlp.RawValue)
|
|
||||||
}
|
|
||||||
number := bc.hc.GetBlockNumber(hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
|
|
||||||
if len(body) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Cache the found body for next time and return
|
|
||||||
bc.bodyRLPCache.Add(hash, body)
|
|
||||||
return body
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBlock checks if a block is fully present in the database or not.
|
|
||||||
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
|
|
||||||
if bc.blockCache.Contains(hash) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return rawdb.HasBody(bc.db, hash, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasFastBlock checks if a fast block is fully present in the database or not.
|
|
||||||
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
|
|
||||||
if !bc.HasBlock(hash, number) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if bc.receiptsCache.Contains(hash) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return rawdb.HasReceipts(bc.db, hash, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasState checks if state trie is fully present in the database or not.
|
|
||||||
func (bc *BlockChain) HasState(hash common.Hash) bool {
|
|
||||||
_, err := bc.stateCache.OpenTrie(hash)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBlockAndState checks if a block and associated state trie is fully present
|
|
||||||
// in the database or not, caching it if present.
|
|
||||||
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
|
|
||||||
// Check first that the block itself is known
|
|
||||||
block := bc.GetBlock(hash, number)
|
|
||||||
if block == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return bc.HasState(block.Root())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlock retrieves a block from the database by hash and number,
|
|
||||||
// caching it if found.
|
|
||||||
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
|
||||||
// Short circuit if the block's already in the cache, retrieve otherwise
|
|
||||||
if block, ok := bc.blockCache.Get(hash); ok {
|
|
||||||
return block.(*types.Block)
|
|
||||||
}
|
|
||||||
block := rawdb.ReadBlock(bc.db, hash, number)
|
|
||||||
if block == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Cache the found block for next time and return
|
|
||||||
bc.blockCache.Add(block.Hash(), block)
|
|
||||||
return block
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
|
|
||||||
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
|
|
||||||
number := bc.hc.GetBlockNumber(hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return bc.GetBlock(hash, *number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlockByNumber retrieves a block from the database by number, caching it
|
|
||||||
// (associated with its hash) if found.
|
|
||||||
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
|
|
||||||
hash := rawdb.ReadCanonicalHash(bc.db, number)
|
|
||||||
if hash == (common.Hash{}) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return bc.GetBlock(hash, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
|
|
||||||
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
|
||||||
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
|
||||||
return receipts.(types.Receipts)
|
|
||||||
}
|
|
||||||
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
|
|
||||||
if receipts == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
bc.receiptsCache.Add(hash, receipts)
|
|
||||||
return receipts
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
|
||||||
// [deprecated by eth/62]
|
|
||||||
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
|
|
||||||
number := bc.hc.GetBlockNumber(hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
block := bc.GetBlock(hash, *number)
|
|
||||||
if block == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
hash = block.ParentHash()
|
|
||||||
*number--
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUnclesInChain retrieves all the uncles from a given block backwards until
|
|
||||||
// a specific distance is reached.
|
|
||||||
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
|
|
||||||
uncles := []*types.Header{}
|
|
||||||
for i := 0; block != nil && i < length; i++ {
|
|
||||||
uncles = append(uncles, block.Uncles()...)
|
|
||||||
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
|
||||||
}
|
|
||||||
return uncles
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrieNode retrieves a blob of data associated with a trie node
|
|
||||||
// either from ephemeral in-memory cache, or from persistent storage.
|
|
||||||
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
|
|
||||||
return bc.stateCache.TrieDB().Node(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContractCode retrieves a blob of data associated with a contract hash
|
|
||||||
// either from ephemeral in-memory cache, or from persistent storage.
|
|
||||||
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
|
|
||||||
return bc.stateCache.ContractCode(common.Hash{}, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
|
|
||||||
// hash either from ephemeral in-memory cache, or from persistent storage.
|
|
||||||
//
|
|
||||||
// If the code doesn't exist in the in-memory cache, check the storage with
|
|
||||||
// new code scheme.
|
|
||||||
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
|
|
||||||
type codeReader interface {
|
|
||||||
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
|
|
||||||
}
|
|
||||||
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop stops the blockchain service. If any imports are currently in progress
|
// Stop stops the blockchain service. If any imports are currently in progress
|
||||||
// it will abort them using the procInterrupt.
|
// it will abort them using the procInterrupt.
|
||||||
func (bc *BlockChain) Stop() {
|
func (bc *BlockChain) Stop() {
|
||||||
|
@ -1390,18 +1149,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTxLookupLimit is responsible for updating the txlookup limit to the
|
|
||||||
// original one stored in db if the new mismatches with the old one.
|
|
||||||
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
|
|
||||||
bc.txLookupLimit = limit
|
|
||||||
}
|
|
||||||
|
|
||||||
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
|
|
||||||
// stale transaction indices.
|
|
||||||
func (bc *BlockChain) TxLookupLimit() uint64 {
|
|
||||||
return bc.txLookupLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastWrite uint64
|
var lastWrite uint64
|
||||||
|
|
||||||
// writeBlockWithoutState writes only the block and its metadata to the database,
|
// writeBlockWithoutState writes only the block and its metadata to the database,
|
||||||
|
@ -1689,11 +1436,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||||
|
|
||||||
// Peek the error for the first block to decide the directing import logic
|
// Peek the error for the first block to decide the directing import logic
|
||||||
it := newInsertIterator(chain, results, bc.validator)
|
it := newInsertIterator(chain, results, bc.validator)
|
||||||
|
|
||||||
block, err := it.next()
|
block, err := it.next()
|
||||||
|
|
||||||
// Left-trim all the known blocks
|
// Left-trim all the known blocks that don't need to build snapshot
|
||||||
if err == ErrKnownBlock {
|
if bc.skipBlock(err, it) {
|
||||||
// First block (and state) is known
|
// First block (and state) is known
|
||||||
// 1. We did a roll-back, and should now do a re-import
|
// 1. We did a roll-back, and should now do a re-import
|
||||||
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
|
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
|
||||||
|
@ -1704,7 +1450,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||||
localTd = bc.GetTd(current.Hash(), current.NumberU64())
|
localTd = bc.GetTd(current.Hash(), current.NumberU64())
|
||||||
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
|
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
|
||||||
)
|
)
|
||||||
for block != nil && err == ErrKnownBlock {
|
for block != nil && bc.skipBlock(err, it) {
|
||||||
externTd = new(big.Int).Add(externTd, block.Difficulty())
|
externTd = new(big.Int).Add(externTd, block.Difficulty())
|
||||||
if localTd.Cmp(externTd) < 0 {
|
if localTd.Cmp(externTd) < 0 {
|
||||||
break
|
break
|
||||||
|
@ -1722,7 +1468,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||||
// When node runs a fast sync again, it can re-import a batch of known blocks via
|
// When node runs a fast sync again, it can re-import a batch of known blocks via
|
||||||
// `insertChain` while a part of them have higher total difficulty than current
|
// `insertChain` while a part of them have higher total difficulty than current
|
||||||
// head full block(new pivot point).
|
// head full block(new pivot point).
|
||||||
for block != nil && err == ErrKnownBlock {
|
for block != nil && bc.skipBlock(err, it) {
|
||||||
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
|
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
|
||||||
if err := bc.writeKnownBlock(block); err != nil {
|
if err := bc.writeKnownBlock(block); err != nil {
|
||||||
return it.index, err
|
return it.index, err
|
||||||
|
@ -1754,8 +1500,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||||
// If there are any still remaining, mark as ignored
|
// If there are any still remaining, mark as ignored
|
||||||
return it.index, err
|
return it.index, err
|
||||||
|
|
||||||
// Some other error occurred, abort
|
// Some other error(except ErrKnownBlock) occurred, abort.
|
||||||
case err != nil:
|
// ErrKnownBlock is allowed here since some known blocks
|
||||||
|
// still need re-execution to generate snapshots that are missing
|
||||||
|
case err != nil && !errors.Is(err, ErrKnownBlock):
|
||||||
bc.futureBlocks.Remove(block.Hash())
|
bc.futureBlocks.Remove(block.Hash())
|
||||||
stats.ignored += len(it.chain)
|
stats.ignored += len(it.chain)
|
||||||
bc.reportBlock(block, nil, err)
|
bc.reportBlock(block, nil, err)
|
||||||
|
@ -1773,7 +1521,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
|
for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
|
||||||
// If the chain is terminating, stop processing blocks
|
// If the chain is terminating, stop processing blocks
|
||||||
if bc.insertStopped() {
|
if bc.insertStopped() {
|
||||||
log.Debug("Abort during block processing")
|
log.Debug("Abort during block processing")
|
||||||
|
@ -1788,8 +1536,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||||
// Clique blocks where they can share state among each other, so importing an
|
// Clique blocks where they can share state among each other, so importing an
|
||||||
// older block might complete the state of the subsequent one. In this case,
|
// older block might complete the state of the subsequent one. In this case,
|
||||||
// just skip the block (we already validated it once fully (and crashed), since
|
// just skip the block (we already validated it once fully (and crashed), since
|
||||||
// its header and body was already in the database).
|
// its header and body was already in the database). But if the corresponding
|
||||||
if err == ErrKnownBlock {
|
// snapshot layer is missing, forcibly rerun the execution to build it.
|
||||||
|
if bc.skipBlock(err, it) {
|
||||||
logger := log.Debug
|
logger := log.Debug
|
||||||
if bc.chainConfig.Clique == nil {
|
if bc.chainConfig.Clique == nil {
|
||||||
logger = log.Warn
|
logger = log.Warn
|
||||||
|
@ -2266,6 +2015,47 @@ func (bc *BlockChain) futureBlocksLoop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// skipBlock returns 'true', if the block being imported can be skipped over, meaning
|
||||||
|
// that the block does not need to be processed but can be considered already fully 'done'.
|
||||||
|
func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
|
||||||
|
// We can only ever bypass processing if the only error returned by the validator
|
||||||
|
// is ErrKnownBlock, which means all checks passed, but we already have the block
|
||||||
|
// and state.
|
||||||
|
if !errors.Is(err, ErrKnownBlock) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// If we're not using snapshots, we can skip this, since we have both block
|
||||||
|
// and (trie-) state
|
||||||
|
if bc.snaps == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
header = it.current() // header can't be nil
|
||||||
|
parentRoot common.Hash
|
||||||
|
)
|
||||||
|
// If we also have the snapshot-state, we can skip the processing.
|
||||||
|
if bc.snaps.Snapshot(header.Root) != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// In this case, we have the trie-state but not snapshot-state. If the parent
|
||||||
|
// snapshot-state exists, we need to process this in order to not get a gap
|
||||||
|
// in the snapshot layers.
|
||||||
|
// Resolve parent block
|
||||||
|
if parent := it.previous(); parent != nil {
|
||||||
|
parentRoot = parent.Root
|
||||||
|
} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
|
||||||
|
parentRoot = parent.Root
|
||||||
|
}
|
||||||
|
if parentRoot == (common.Hash{}) {
|
||||||
|
return false // Theoretically impossible case
|
||||||
|
}
|
||||||
|
// Parent is also missing snapshot: we can skip this. Otherwise process.
|
||||||
|
if bc.snaps.Snapshot(parentRoot) == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// maintainTxIndex is responsible for the construction and deletion of the
|
// maintainTxIndex is responsible for the construction and deletion of the
|
||||||
// transaction index.
|
// transaction index.
|
||||||
//
|
//
|
||||||
|
@ -2398,128 +2188,3 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i
|
||||||
_, err := bc.hc.InsertHeaderChain(chain, start)
|
_, err := bc.hc.InsertHeaderChain(chain, start)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CurrentHeader retrieves the current head header of the canonical chain. The
|
|
||||||
// header is retrieved from the HeaderChain's internal cache.
|
|
||||||
func (bc *BlockChain) CurrentHeader() *types.Header {
|
|
||||||
return bc.hc.CurrentHeader()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
|
||||||
// database by hash and number, caching it if found.
|
|
||||||
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
|
||||||
return bc.hc.GetTd(hash, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
|
|
||||||
// database by hash, caching it if found.
|
|
||||||
func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
|
|
||||||
return bc.hc.GetTdByHash(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHeader retrieves a block header from the database by hash and number,
|
|
||||||
// caching it if found.
|
|
||||||
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
|
||||||
// Blockchain might have cached the whole block, only if not go to headerchain
|
|
||||||
if block, ok := bc.blockCache.Get(hash); ok {
|
|
||||||
return block.(*types.Block).Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
return bc.hc.GetHeader(hash, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
|
|
||||||
// found.
|
|
||||||
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
|
|
||||||
// Blockchain might have cached the whole block, only if not go to headerchain
|
|
||||||
if block, ok := bc.blockCache.Get(hash); ok {
|
|
||||||
return block.(*types.Block).Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
return bc.hc.GetHeaderByHash(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasHeader checks if a block header is present in the database or not, caching
|
|
||||||
// it if present.
|
|
||||||
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
|
|
||||||
return bc.hc.HasHeader(hash, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCanonicalHash returns the canonical hash for a given block number
|
|
||||||
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
|
|
||||||
return bc.hc.GetCanonicalHash(number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
|
|
||||||
// hash, fetching towards the genesis block.
|
|
||||||
func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
|
|
||||||
return bc.hc.GetBlockHashesFromHash(hash, max)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
|
||||||
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
|
||||||
// number of blocks to be individually checked before we reach the canonical chain.
|
|
||||||
//
|
|
||||||
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
|
||||||
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
|
||||||
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHeaderByNumber retrieves a block header from the database by number,
|
|
||||||
// caching it (associated with its hash) if found.
|
|
||||||
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
|
||||||
return bc.hc.GetHeaderByNumber(number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTransactionLookup retrieves the lookup associate with the given transaction
|
|
||||||
// hash from the cache or database.
|
|
||||||
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
|
|
||||||
// Short circuit if the txlookup already in the cache, retrieve otherwise
|
|
||||||
if lookup, exist := bc.txLookupCache.Get(hash); exist {
|
|
||||||
return lookup.(*rawdb.LegacyTxLookupEntry)
|
|
||||||
}
|
|
||||||
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
|
|
||||||
if tx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
|
|
||||||
bc.txLookupCache.Add(hash, lookup)
|
|
||||||
return lookup
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config retrieves the chain's fork configuration.
|
|
||||||
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
|
|
||||||
|
|
||||||
// Engine retrieves the blockchain's consensus engine.
|
|
||||||
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
|
|
||||||
|
|
||||||
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
|
||||||
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeChainEvent registers a subscription of ChainEvent.
|
|
||||||
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
|
|
||||||
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
|
|
||||||
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeLogsEvent registers a subscription of []*types.Log.
|
|
||||||
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
|
|
||||||
// block processing has started while false means it has stopped.
|
|
||||||
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
|
@ -150,6 +150,14 @@ func (it *insertIterator) previous() *types.Header {
|
||||||
return it.chain[it.index-1].Header()
|
return it.chain[it.index-1].Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// current returns the current header that is being processed, or nil.
|
||||||
|
func (it *insertIterator) current() *types.Header {
|
||||||
|
if it.index == -1 || it.index >= len(it.chain) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return it.chain[it.index].Header()
|
||||||
|
}
|
||||||
|
|
||||||
// first returns the first block in the it.
|
// first returns the first block in the it.
|
||||||
func (it *insertIterator) first() *types.Block {
|
func (it *insertIterator) first() *types.Block {
|
||||||
return it.chain[0]
|
return it.chain[0]
|
||||||
|
|
|
@ -0,0 +1,387 @@
|
||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||||
|
// header is retrieved from the HeaderChain's internal cache.
|
||||||
|
func (bc *BlockChain) CurrentHeader() *types.Header {
|
||||||
|
return bc.hc.CurrentHeader()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock retrieves the current head block of the canonical chain. The
|
||||||
|
// block is retrieved from the blockchain's internal cache.
|
||||||
|
func (bc *BlockChain) CurrentBlock() *types.Block {
|
||||||
|
return bc.currentBlock.Load().(*types.Block)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
|
||||||
|
// chain. The block is retrieved from the blockchain's internal cache.
|
||||||
|
func (bc *BlockChain) CurrentFastBlock() *types.Block {
|
||||||
|
return bc.currentFastBlock.Load().(*types.Block)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasHeader checks if a block header is present in the database or not, caching
|
||||||
|
// it if present.
|
||||||
|
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
|
||||||
|
return bc.hc.HasHeader(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeader retrieves a block header from the database by hash and number,
|
||||||
|
// caching it if found.
|
||||||
|
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||||
|
return bc.hc.GetHeader(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
|
||||||
|
// found.
|
||||||
|
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
|
||||||
|
return bc.hc.GetHeaderByHash(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||||
|
// caching it (associated with its hash) if found.
|
||||||
|
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||||
|
return bc.hc.GetHeaderByNumber(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBody retrieves a block body (transactions and uncles) from the database by
|
||||||
|
// hash, caching it if found.
|
||||||
|
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
||||||
|
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||||
|
if cached, ok := bc.bodyCache.Get(hash); ok {
|
||||||
|
body := cached.(*types.Body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
body := rawdb.ReadBody(bc.db, hash, *number)
|
||||||
|
if body == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Cache the found body for next time and return
|
||||||
|
bc.bodyCache.Add(hash, body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
|
||||||
|
// caching it if found.
|
||||||
|
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
|
||||||
|
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||||
|
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
|
||||||
|
return cached.(rlp.RawValue)
|
||||||
|
}
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
|
||||||
|
if len(body) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Cache the found body for next time and return
|
||||||
|
bc.bodyRLPCache.Add(hash, body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlock checks if a block is fully present in the database or not.
|
||||||
|
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
|
||||||
|
if bc.blockCache.Contains(hash) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return rawdb.HasBody(bc.db, hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFastBlock checks if a fast block is fully present in the database or not.
|
||||||
|
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
|
||||||
|
if !bc.HasBlock(hash, number) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if bc.receiptsCache.Contains(hash) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return rawdb.HasReceipts(bc.db, hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlock retrieves a block from the database by hash and number,
|
||||||
|
// caching it if found.
|
||||||
|
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||||
|
// Short circuit if the block's already in the cache, retrieve otherwise
|
||||||
|
if block, ok := bc.blockCache.Get(hash); ok {
|
||||||
|
return block.(*types.Block)
|
||||||
|
}
|
||||||
|
block := rawdb.ReadBlock(bc.db, hash, number)
|
||||||
|
if block == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Cache the found block for next time and return
|
||||||
|
bc.blockCache.Add(block.Hash(), block)
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
|
||||||
|
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bc.GetBlock(hash, *number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockByNumber retrieves a block from the database by number, caching it
|
||||||
|
// (associated with its hash) if found.
|
||||||
|
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
|
||||||
|
hash := rawdb.ReadCanonicalHash(bc.db, number)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bc.GetBlock(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
||||||
|
// [deprecated by eth/62]
|
||||||
|
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
block := bc.GetBlock(hash, *number)
|
||||||
|
if block == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
blocks = append(blocks, block)
|
||||||
|
hash = block.ParentHash()
|
||||||
|
*number--
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
|
||||||
|
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||||
|
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
||||||
|
return receipts.(types.Receipts)
|
||||||
|
}
|
||||||
|
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
|
||||||
|
if receipts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bc.receiptsCache.Add(hash, receipts)
|
||||||
|
return receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUnclesInChain retrieves all the uncles from a given block backwards until
|
||||||
|
// a specific distance is reached.
|
||||||
|
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
|
||||||
|
uncles := []*types.Header{}
|
||||||
|
for i := 0; block != nil && i < length; i++ {
|
||||||
|
uncles = append(uncles, block.Uncles()...)
|
||||||
|
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||||
|
}
|
||||||
|
return uncles
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCanonicalHash returns the canonical hash for a given block number
|
||||||
|
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
|
||||||
|
return bc.hc.GetCanonicalHash(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||||
|
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||||
|
// number of blocks to be individually checked before we reach the canonical chain.
|
||||||
|
//
|
||||||
|
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||||
|
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||||
|
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTransactionLookup retrieves the lookup associate with the given transaction
|
||||||
|
// hash from the cache or database.
|
||||||
|
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
|
||||||
|
// Short circuit if the txlookup already in the cache, retrieve otherwise
|
||||||
|
if lookup, exist := bc.txLookupCache.Get(hash); exist {
|
||||||
|
return lookup.(*rawdb.LegacyTxLookupEntry)
|
||||||
|
}
|
||||||
|
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
|
||||||
|
if tx == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
|
||||||
|
bc.txLookupCache.Add(hash, lookup)
|
||||||
|
return lookup
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
||||||
|
// database by hash and number, caching it if found.
|
||||||
|
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||||
|
return bc.hc.GetTd(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasState checks if state trie is fully present in the database or not.
|
||||||
|
func (bc *BlockChain) HasState(hash common.Hash) bool {
|
||||||
|
_, err := bc.stateCache.OpenTrie(hash)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlockAndState checks if a block and associated state trie is fully present
|
||||||
|
// in the database or not, caching it if present.
|
||||||
|
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
|
||||||
|
// Check first that the block itself is known
|
||||||
|
block := bc.GetBlock(hash, number)
|
||||||
|
if block == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bc.HasState(block.Root())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrieNode retrieves a blob of data associated with a trie node
|
||||||
|
// either from ephemeral in-memory cache, or from persistent storage.
|
||||||
|
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
|
||||||
|
return bc.stateCache.TrieDB().Node(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContractCode retrieves a blob of data associated with a contract hash
|
||||||
|
// either from ephemeral in-memory cache, or from persistent storage.
|
||||||
|
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
|
||||||
|
return bc.stateCache.ContractCode(common.Hash{}, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
|
||||||
|
// hash either from ephemeral in-memory cache, or from persistent storage.
|
||||||
|
//
|
||||||
|
// If the code doesn't exist in the in-memory cache, check the storage with
|
||||||
|
// new code scheme.
|
||||||
|
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
|
||||||
|
type codeReader interface {
|
||||||
|
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
|
||||||
|
}
|
||||||
|
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// State returns a new mutable state based on the current HEAD block.
|
||||||
|
func (bc *BlockChain) State() (*state.StateDB, error) {
|
||||||
|
return bc.StateAt(bc.CurrentBlock().Root())
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateAt returns a new mutable state based on a particular point in time.
|
||||||
|
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
|
||||||
|
return state.New(root, bc.stateCache, bc.snaps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config retrieves the chain's fork configuration.
|
||||||
|
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
|
||||||
|
|
||||||
|
// Engine retrieves the blockchain's consensus engine.
|
||||||
|
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
|
||||||
|
|
||||||
|
// Snapshots returns the blockchain snapshot tree.
|
||||||
|
func (bc *BlockChain) Snapshots() *snapshot.Tree {
|
||||||
|
return bc.snaps
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator returns the current validator.
|
||||||
|
func (bc *BlockChain) Validator() Validator {
|
||||||
|
return bc.validator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processor returns the current processor.
|
||||||
|
func (bc *BlockChain) Processor() Processor {
|
||||||
|
return bc.processor
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateCache returns the caching database underpinning the blockchain instance.
|
||||||
|
func (bc *BlockChain) StateCache() state.Database {
|
||||||
|
return bc.stateCache
|
||||||
|
}
|
||||||
|
|
||||||
|
// GasLimit returns the gas limit of the current HEAD block.
|
||||||
|
func (bc *BlockChain) GasLimit() uint64 {
|
||||||
|
return bc.CurrentBlock().GasLimit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Genesis retrieves the chain's genesis block.
|
||||||
|
func (bc *BlockChain) Genesis() *types.Block {
|
||||||
|
return bc.genesisBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVMConfig returns the block chain VM config.
|
||||||
|
func (bc *BlockChain) GetVMConfig() *vm.Config {
|
||||||
|
return &bc.vmConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTxLookupLimit is responsible for updating the txlookup limit to the
|
||||||
|
// original one stored in db if the new mismatches with the old one.
|
||||||
|
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
|
||||||
|
bc.txLookupLimit = limit
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
|
||||||
|
// stale transaction indices.
|
||||||
|
func (bc *BlockChain) TxLookupLimit() uint64 {
|
||||||
|
return bc.txLookupLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
||||||
|
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainEvent registers a subscription of ChainEvent.
|
||||||
|
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
|
||||||
|
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
|
||||||
|
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeLogsEvent registers a subscription of []*types.Log.
|
||||||
|
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
|
||||||
|
// block processing has started while false means it has stopped.
|
||||||
|
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
|
||||||
|
}
|
|
@ -1863,3 +1863,124 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
||||||
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
|
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893
|
||||||
|
// Credits to @zzyalbert for finding the issue.
|
||||||
|
//
|
||||||
|
// Local chain owns these blocks:
|
||||||
|
// G B1 B2 B3 B4
|
||||||
|
// B1: state committed
|
||||||
|
// B2: snapshot disk layer
|
||||||
|
// B3: state committed
|
||||||
|
// B4: head block
|
||||||
|
//
|
||||||
|
// Crash happens without fully persisting snapshot and in-memory states,
|
||||||
|
// chain rewinds itself to the B1 (skip B3 in order to recover snapshot)
|
||||||
|
// In this case the snapshot layer of B3 is not created because of existent
|
||||||
|
// state.
|
||||||
|
func TestIssue23496(t *testing.T) {
|
||||||
|
// It's hard to follow the test case, visualize the input
|
||||||
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||||
|
|
||||||
|
// Create a temporary persistent database
|
||||||
|
datadir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temporary datadir: %v", err)
|
||||||
|
}
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
|
||||||
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create persistent database: %v", err)
|
||||||
|
}
|
||||||
|
defer db.Close() // Might double close, should be fine
|
||||||
|
|
||||||
|
// Initialize a fresh chain
|
||||||
|
var (
|
||||||
|
genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
|
||||||
|
engine = ethash.NewFullFaker()
|
||||||
|
config = &CacheConfig{
|
||||||
|
TrieCleanLimit: 256,
|
||||||
|
TrieDirtyLimit: 256,
|
||||||
|
TrieTimeLimit: 5 * time.Minute,
|
||||||
|
SnapshotLimit: 256,
|
||||||
|
SnapshotWait: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create chain: %v", err)
|
||||||
|
}
|
||||||
|
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *BlockGen) {
|
||||||
|
b.SetCoinbase(common.Address{0x02})
|
||||||
|
b.SetDifficulty(big.NewInt(1000000))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Insert block B1 and commit the state into disk
|
||||||
|
if _, err := chain.InsertChain(blocks[:1]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
|
}
|
||||||
|
chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
|
||||||
|
|
||||||
|
// Insert block B2 and commit the snapshot into disk
|
||||||
|
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
|
}
|
||||||
|
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
|
||||||
|
t.Fatalf("Failed to flatten snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert block B3 and commit the state into disk
|
||||||
|
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
|
}
|
||||||
|
chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
|
||||||
|
|
||||||
|
// Insert the remaining blocks
|
||||||
|
if _, err := chain.InsertChain(blocks[3:]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull the plug on the database, simulating a hard crash
|
||||||
|
db.Close()
|
||||||
|
|
||||||
|
// Start a new blockchain back up and see where the repair leads us
|
||||||
|
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to reopen persistent database: %v", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to recreate chain: %v", err)
|
||||||
|
}
|
||||||
|
defer chain.Stop()
|
||||||
|
|
||||||
|
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
|
||||||
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
|
||||||
|
}
|
||||||
|
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
|
||||||
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
|
||||||
|
}
|
||||||
|
if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) {
|
||||||
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reinsert B2-B4
|
||||||
|
if _, err := chain.InsertChain(blocks[1:]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
||||||
|
}
|
||||||
|
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
|
||||||
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
|
||||||
|
}
|
||||||
|
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
|
||||||
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
|
||||||
|
}
|
||||||
|
if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) {
|
||||||
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
|
||||||
|
}
|
||||||
|
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
|
||||||
|
t.Error("Failed to regenerate the snapshot of known state")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -118,17 +118,21 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
||||||
var tdPre, tdPost *big.Int
|
var tdPre, tdPost *big.Int
|
||||||
|
|
||||||
if full {
|
if full {
|
||||||
tdPre = blockchain.GetTdByHash(blockchain.CurrentBlock().Hash())
|
cur := blockchain.CurrentBlock()
|
||||||
|
tdPre = blockchain.GetTd(cur.Hash(), cur.NumberU64())
|
||||||
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
||||||
t.Fatalf("failed to import forked block chain: %v", err)
|
t.Fatalf("failed to import forked block chain: %v", err)
|
||||||
}
|
}
|
||||||
tdPost = blockchain.GetTdByHash(blockChainB[len(blockChainB)-1].Hash())
|
last := blockChainB[len(blockChainB)-1]
|
||||||
|
tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
|
||||||
} else {
|
} else {
|
||||||
tdPre = blockchain.GetTdByHash(blockchain.CurrentHeader().Hash())
|
cur := blockchain.CurrentHeader()
|
||||||
|
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
||||||
if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
|
if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
|
||||||
t.Fatalf("failed to import forked header chain: %v", err)
|
t.Fatalf("failed to import forked header chain: %v", err)
|
||||||
}
|
}
|
||||||
tdPost = blockchain.GetTdByHash(headerChainB[len(headerChainB)-1].Hash())
|
last := headerChainB[len(headerChainB)-1]
|
||||||
|
tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64())
|
||||||
}
|
}
|
||||||
// Compare the total difficulties of the chains
|
// Compare the total difficulties of the chains
|
||||||
comparator(tdPre, tdPost)
|
comparator(tdPre, tdPost)
|
||||||
|
@ -165,7 +169,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
blockchain.chainmu.MustLock()
|
blockchain.chainmu.MustLock()
|
||||||
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
|
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
|
||||||
rawdb.WriteBlock(blockchain.db, block)
|
rawdb.WriteBlock(blockchain.db, block)
|
||||||
statedb.Commit(false)
|
statedb.Commit(false)
|
||||||
blockchain.chainmu.Unlock()
|
blockchain.chainmu.Unlock()
|
||||||
|
@ -183,7 +187,7 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
|
||||||
}
|
}
|
||||||
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
||||||
blockchain.chainmu.MustLock()
|
blockchain.chainmu.MustLock()
|
||||||
rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash)))
|
rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1)))
|
||||||
rawdb.WriteHeader(blockchain.db, header)
|
rawdb.WriteHeader(blockchain.db, header)
|
||||||
blockchain.chainmu.Unlock()
|
blockchain.chainmu.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -356,7 +360,7 @@ func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
||||||
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
||||||
|
|
||||||
func testReorgLong(t *testing.T, full bool) {
|
func testReorgLong(t *testing.T, full bool) {
|
||||||
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full)
|
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that reorganising a short difficult chain after a long easy one
|
// Tests that reorganising a short difficult chain after a long easy one
|
||||||
|
@ -376,7 +380,7 @@ func testReorgShort(t *testing.T, full bool) {
|
||||||
for i := 0; i < len(diff); i++ {
|
for i := 0; i < len(diff); i++ {
|
||||||
diff[i] = -9
|
diff[i] = -9
|
||||||
}
|
}
|
||||||
testReorg(t, easy, diff, 12615120, full)
|
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
||||||
|
@ -436,11 +440,13 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
||||||
// Make sure the chain total difficulty is the correct one
|
// Make sure the chain total difficulty is the correct one
|
||||||
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
|
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
|
||||||
if full {
|
if full {
|
||||||
if have := blockchain.GetTdByHash(blockchain.CurrentBlock().Hash()); have.Cmp(want) != 0 {
|
cur := blockchain.CurrentBlock()
|
||||||
|
if have := blockchain.GetTd(cur.Hash(), cur.NumberU64()); have.Cmp(want) != 0 {
|
||||||
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if have := blockchain.GetTdByHash(blockchain.CurrentHeader().Hash()); have.Cmp(want) != 0 {
|
cur := blockchain.CurrentHeader()
|
||||||
|
if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
|
||||||
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -676,10 +682,10 @@ func TestFastVsFullChains(t *testing.T) {
|
||||||
for i := 0; i < len(blocks); i++ {
|
for i := 0; i < len(blocks); i++ {
|
||||||
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
|
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
|
||||||
|
|
||||||
if ftd, atd := fast.GetTdByHash(hash), archive.GetTdByHash(hash); ftd.Cmp(atd) != 0 {
|
if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 {
|
||||||
t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
|
t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
|
||||||
}
|
}
|
||||||
if antd, artd := ancient.GetTdByHash(hash), archive.GetTdByHash(hash); antd.Cmp(artd) != 0 {
|
if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 {
|
||||||
t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
|
t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
|
||||||
}
|
}
|
||||||
if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
|
if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
|
||||||
|
@ -2055,6 +2061,7 @@ func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyCha
|
||||||
// 1. Have a chain [0 ... N .. X]
|
// 1. Have a chain [0 ... N .. X]
|
||||||
// 2. Reorg to shorter but heavier chain [0 ... N ... Y]
|
// 2. Reorg to shorter but heavier chain [0 ... N ... Y]
|
||||||
// 3. Then there should be no canon mapping for the block at height X
|
// 3. Then there should be no canon mapping for the block at height X
|
||||||
|
// 4. The forked block should still be retrievable by hash
|
||||||
func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
|
func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
|
||||||
chain, canonblocks, sideblocks, err := getLongAndShortChains()
|
chain, canonblocks, sideblocks, err := getLongAndShortChains()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2064,6 +2071,7 @@ func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
|
||||||
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
||||||
}
|
}
|
||||||
canonNum := chain.CurrentBlock().NumberU64()
|
canonNum := chain.CurrentBlock().NumberU64()
|
||||||
|
canonHash := chain.CurrentBlock().Hash()
|
||||||
_, err = chain.InsertChain(sideblocks)
|
_, err = chain.InsertChain(sideblocks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Got error, %v", err)
|
t.Errorf("Got error, %v", err)
|
||||||
|
@ -2079,6 +2087,12 @@ func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
|
||||||
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
||||||
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
||||||
}
|
}
|
||||||
|
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
|
||||||
|
t.Errorf("expected block to be present: %x", blockByHash.Hash())
|
||||||
|
}
|
||||||
|
if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
|
||||||
|
t.Errorf("expected header to be present: %x", headerByHash.Hash())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario
|
// TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario
|
||||||
|
@ -2098,6 +2112,7 @@ func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
|
||||||
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
||||||
}
|
}
|
||||||
canonNum := chain.CurrentHeader().Number.Uint64()
|
canonNum := chain.CurrentHeader().Number.Uint64()
|
||||||
|
canonHash := chain.CurrentBlock().Hash()
|
||||||
sideHeaders := make([]*types.Header, len(sideblocks))
|
sideHeaders := make([]*types.Header, len(sideblocks))
|
||||||
for i, block := range sideblocks {
|
for i, block := range sideblocks {
|
||||||
sideHeaders[i] = block.Header()
|
sideHeaders[i] = block.Header()
|
||||||
|
@ -2116,6 +2131,12 @@ func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
|
||||||
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
||||||
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
||||||
}
|
}
|
||||||
|
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
|
||||||
|
t.Errorf("expected block to be present: %x", blockByHash.Hash())
|
||||||
|
}
|
||||||
|
if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
|
||||||
|
t.Errorf("expected header to be present: %x", headerByHash.Hash())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactionIndices(t *testing.T) {
|
func TestTransactionIndices(t *testing.T) {
|
||||||
|
@ -2364,7 +2385,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
|
||||||
for txi := 0; txi < numTxs; txi++ {
|
for txi := 0; txi < numTxs; txi++ {
|
||||||
uniq := uint64(i*numTxs + txi)
|
uniq := uint64(i*numTxs + txi)
|
||||||
recipient := recipientFn(uniq)
|
recipient := recipientFn(uniq)
|
||||||
tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, big.NewInt(1), nil), signer, testBankKey)
|
tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ func BenchmarkGenerator(b *testing.B) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed to create bloombit generator: %v", err)
|
b.Fatalf("failed to create bloombit generator: %v", err)
|
||||||
}
|
}
|
||||||
for j, bloom := range input {
|
for j, bloom := range &input {
|
||||||
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
||||||
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func BenchmarkGenerator(b *testing.B) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed to create bloombit generator: %v", err)
|
b.Fatalf("failed to create bloombit generator: %v", err)
|
||||||
}
|
}
|
||||||
for j, bloom := range input {
|
for j, bloom := range &input {
|
||||||
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
||||||
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,8 +63,10 @@ func TestCreation(t *testing.T) {
|
||||||
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
||||||
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
||||||
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // First London block
|
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
{20000000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // Future London block
|
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
|
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block
|
||||||
|
{20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Ropsten test cases
|
// Ropsten test cases
|
||||||
|
@ -205,11 +207,11 @@ func TestValidation(t *testing.T) {
|
||||||
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
||||||
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet London, far in the future. Remote announces Gopherium (non existing fork)
|
// Local is mainnet Arrow Glacier, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||||
//
|
//
|
||||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
{88888888, ID{Hash: checksumToBytes(0xb715077d), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
{88888888, ID{Hash: checksumToBytes(0x20c327fc), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||||
|
|
|
@ -158,7 +158,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
||||||
return SetupGenesisBlockWithOverride(db, genesis, nil)
|
return SetupGenesisBlockWithOverride(db, genesis, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideLondon *big.Int) (*params.ChainConfig, common.Hash, error) {
|
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
|
||||||
if genesis != nil && genesis.Config == nil {
|
if genesis != nil && genesis.Config == nil {
|
||||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||||
}
|
}
|
||||||
|
@ -204,8 +204,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
||||||
}
|
}
|
||||||
// Get the existing chain configuration.
|
// Get the existing chain configuration.
|
||||||
newcfg := genesis.configOrDefault(stored)
|
newcfg := genesis.configOrDefault(stored)
|
||||||
if overrideLondon != nil {
|
if overrideArrowGlacier != nil {
|
||||||
newcfg.LondonBlock = overrideLondon
|
newcfg.ArrowGlacierBlock = overrideArrowGlacier
|
||||||
}
|
}
|
||||||
if err := newcfg.CheckConfigForkOrder(); err != nil {
|
if err := newcfg.CheckConfigForkOrder(); err != nil {
|
||||||
return newcfg, common.Hash{}, err
|
return newcfg, common.Hash{}, err
|
||||||
|
@ -322,7 +322,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||||
if config.Clique != nil && len(block.Extra()) == 0 {
|
if config.Clique != nil && len(block.Extra()) == 0 {
|
||||||
return nil, errors.New("can't start clique chain without signers")
|
return nil, errors.New("can't start clique chain without signers")
|
||||||
}
|
}
|
||||||
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
|
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
|
||||||
rawdb.WriteBlock(db, block)
|
rawdb.WriteBlock(db, block)
|
||||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
||||||
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
|
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
|
||||||
|
|
|
@ -209,3 +209,33 @@ func TestGenesisHashes(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGenesis_Commit(t *testing.T) {
|
||||||
|
genesis := &Genesis{
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
// difficulty is nil
|
||||||
|
}
|
||||||
|
|
||||||
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
genesisBlock, err := genesis.Commit(db)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if genesis.Difficulty != nil {
|
||||||
|
t.Fatalf("assumption wrong")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This value should have been set as default in the ToBlock method.
|
||||||
|
if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 {
|
||||||
|
t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expect the stored total difficulty to be the difficulty of the genesis block.
|
||||||
|
stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64())
|
||||||
|
|
||||||
|
if stored.Cmp(genesisBlock.Difficulty()) != 0 {
|
||||||
|
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -394,29 +394,6 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time)
|
||||||
return res.status, err
|
return res.status, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
|
|
||||||
// hash, fetching towards the genesis block.
|
|
||||||
func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
|
|
||||||
// Get the origin header from which to fetch
|
|
||||||
header := hc.GetHeaderByHash(hash)
|
|
||||||
if header == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Iterate the headers until enough is collected or the genesis reached
|
|
||||||
chain := make([]common.Hash, 0, max)
|
|
||||||
for i := uint64(0); i < max; i++ {
|
|
||||||
next := header.ParentHash
|
|
||||||
if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
chain = append(chain, next)
|
|
||||||
if header.Number.Sign() == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return chain
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||||
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||||
// number of blocks to be individually checked before we reach the canonical chain.
|
// number of blocks to be individually checked before we reach the canonical chain.
|
||||||
|
@ -472,16 +449,6 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||||
return td
|
return td
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
|
|
||||||
// database by hash, caching it if found.
|
|
||||||
func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
|
|
||||||
number := hc.GetBlockNumber(hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return hc.GetTd(hash, *number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHeader retrieves a block header from the database by hash and number,
|
// GetHeader retrieves a block header from the database by hash and number,
|
||||||
// caching it if found.
|
// caching it if found.
|
||||||
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||||
|
|
|
@ -35,20 +35,15 @@ import (
|
||||||
|
|
||||||
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
||||||
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
||||||
data, _ := db.Ancient(freezerHashTable, number)
|
var data []byte
|
||||||
if len(data) == 0 {
|
db.ReadAncients(func(reader ethdb.AncientReader) error {
|
||||||
data, _ = db.Get(headerHashKey(number))
|
data, _ = reader.Ancient(freezerHashTable, number)
|
||||||
// In the background freezer is moving data from leveldb to flatten files.
|
|
||||||
// So during the first check for ancient db, the data is not yet in there,
|
|
||||||
// but when we reach into leveldb, the data was already moved. That would
|
|
||||||
// result in a not found error.
|
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
data, _ = db.Ancient(freezerHashTable, number)
|
// Get it by hash from leveldb
|
||||||
|
data, _ = db.Get(headerHashKey(number))
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
if len(data) == 0 {
|
})
|
||||||
return common.Hash{}
|
|
||||||
}
|
|
||||||
return common.BytesToHash(data)
|
return common.BytesToHash(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,32 +299,25 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
|
||||||
|
|
||||||
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
||||||
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
// First try to look up the data in ancient database. Extra hash
|
var data []byte
|
||||||
// comparison is necessary since ancient database only maintains
|
db.ReadAncients(func(reader ethdb.AncientReader) error {
|
||||||
// the canonical data.
|
// First try to look up the data in ancient database. Extra hash
|
||||||
data, _ := db.Ancient(freezerHeaderTable, number)
|
// comparison is necessary since ancient database only maintains
|
||||||
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
// the canonical data.
|
||||||
return data
|
data, _ = reader.Ancient(freezerHeaderTable, number)
|
||||||
}
|
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
||||||
// Then try to look up the data in leveldb.
|
return nil
|
||||||
data, _ = db.Get(headerKey(number, hash))
|
}
|
||||||
if len(data) > 0 {
|
// If not, try reading from leveldb
|
||||||
return data
|
data, _ = db.Get(headerKey(number, hash))
|
||||||
}
|
return nil
|
||||||
// In the background freezer is moving data from leveldb to flatten files.
|
})
|
||||||
// So during the first check for ancient db, the data is not yet in there,
|
return data
|
||||||
// but when we reach into leveldb, the data was already moved. That would
|
|
||||||
// result in a not found error.
|
|
||||||
data, _ = db.Ancient(freezerHeaderTable, number)
|
|
||||||
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
return nil // Can't find the data anywhere.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasHeader verifies the existence of a block header corresponding to the hash.
|
// HasHeader verifies the existence of a block header corresponding to the hash.
|
||||||
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
||||||
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
if isCanon(db, number, hash) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
||||||
|
@ -389,53 +377,48 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isCanon is an internal utility method, to check whether the given number/hash
|
||||||
|
// is part of the ancient (canon) set.
|
||||||
|
func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool {
|
||||||
|
h, err := reader.Ancient(freezerHashTable, number)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bytes.Equal(h, hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||||
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
// First try to look up the data in ancient database. Extra hash
|
// First try to look up the data in ancient database. Extra hash
|
||||||
// comparison is necessary since ancient database only maintains
|
// comparison is necessary since ancient database only maintains
|
||||||
// the canonical data.
|
// the canonical data.
|
||||||
data, _ := db.Ancient(freezerBodiesTable, number)
|
var data []byte
|
||||||
if len(data) > 0 {
|
db.ReadAncients(func(reader ethdb.AncientReader) error {
|
||||||
h, _ := db.Ancient(freezerHashTable, number)
|
// Check if the data is in ancients
|
||||||
if common.BytesToHash(h) == hash {
|
if isCanon(reader, number, hash) {
|
||||||
return data
|
data, _ = reader.Ancient(freezerBodiesTable, number)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
// If not, try reading from leveldb
|
||||||
// Then try to look up the data in leveldb.
|
data, _ = db.Get(blockBodyKey(number, hash))
|
||||||
data, _ = db.Get(blockBodyKey(number, hash))
|
return nil
|
||||||
if len(data) > 0 {
|
})
|
||||||
return data
|
return data
|
||||||
}
|
|
||||||
// In the background freezer is moving data from leveldb to flatten files.
|
|
||||||
// So during the first check for ancient db, the data is not yet in there,
|
|
||||||
// but when we reach into leveldb, the data was already moved. That would
|
|
||||||
// result in a not found error.
|
|
||||||
data, _ = db.Ancient(freezerBodiesTable, number)
|
|
||||||
if len(data) > 0 {
|
|
||||||
h, _ := db.Ancient(freezerHashTable, number)
|
|
||||||
if common.BytesToHash(h) == hash {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil // Can't find the data anywhere.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
|
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
|
||||||
// block at number, in RLP encoding.
|
// block at number, in RLP encoding.
|
||||||
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
||||||
// If it's an ancient one, we don't need the canonical hash
|
var data []byte
|
||||||
data, _ := db.Ancient(freezerBodiesTable, number)
|
db.ReadAncients(func(reader ethdb.AncientReader) error {
|
||||||
if len(data) == 0 {
|
data, _ = reader.Ancient(freezerBodiesTable, number)
|
||||||
// Need to get the hash
|
if len(data) > 0 {
|
||||||
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
|
return nil
|
||||||
// In the background freezer is moving data from leveldb to flatten files.
|
|
||||||
// So during the first check for ancient db, the data is not yet in there,
|
|
||||||
// but when we reach into leveldb, the data was already moved. That would
|
|
||||||
// result in a not found error.
|
|
||||||
if len(data) == 0 {
|
|
||||||
data, _ = db.Ancient(freezerBodiesTable, number)
|
|
||||||
}
|
}
|
||||||
}
|
// Get it by hash from leveldb
|
||||||
|
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,7 +431,7 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
|
||||||
|
|
||||||
// HasBody verifies the existence of a block body corresponding to the hash.
|
// HasBody verifies the existence of a block body corresponding to the hash.
|
||||||
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
||||||
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
if isCanon(db, number, hash) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
||||||
|
@ -489,33 +472,18 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
||||||
|
|
||||||
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
||||||
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
// First try to look up the data in ancient database. Extra hash
|
var data []byte
|
||||||
// comparison is necessary since ancient database only maintains
|
db.ReadAncients(func(reader ethdb.AncientReader) error {
|
||||||
// the canonical data.
|
// Check if the data is in ancients
|
||||||
data, _ := db.Ancient(freezerDifficultyTable, number)
|
if isCanon(reader, number, hash) {
|
||||||
if len(data) > 0 {
|
data, _ = reader.Ancient(freezerDifficultyTable, number)
|
||||||
h, _ := db.Ancient(freezerHashTable, number)
|
return nil
|
||||||
if common.BytesToHash(h) == hash {
|
|
||||||
return data
|
|
||||||
}
|
}
|
||||||
}
|
// If not, try reading from leveldb
|
||||||
// Then try to look up the data in leveldb.
|
data, _ = db.Get(headerTDKey(number, hash))
|
||||||
data, _ = db.Get(headerTDKey(number, hash))
|
return nil
|
||||||
if len(data) > 0 {
|
})
|
||||||
return data
|
return data
|
||||||
}
|
|
||||||
// In the background freezer is moving data from leveldb to flatten files.
|
|
||||||
// So during the first check for ancient db, the data is not yet in there,
|
|
||||||
// but when we reach into leveldb, the data was already moved. That would
|
|
||||||
// result in a not found error.
|
|
||||||
data, _ = db.Ancient(freezerDifficultyTable, number)
|
|
||||||
if len(data) > 0 {
|
|
||||||
h, _ := db.Ancient(freezerHashTable, number)
|
|
||||||
if common.BytesToHash(h) == hash {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil // Can't find the data anywhere.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
||||||
|
@ -553,7 +521,7 @@ func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
||||||
// HasReceipts verifies the existence of all the transaction receipts belonging
|
// HasReceipts verifies the existence of all the transaction receipts belonging
|
||||||
// to a block.
|
// to a block.
|
||||||
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
||||||
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
if isCanon(db, number, hash) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
||||||
|
@ -564,33 +532,18 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
||||||
|
|
||||||
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
||||||
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
// First try to look up the data in ancient database. Extra hash
|
var data []byte
|
||||||
// comparison is necessary since ancient database only maintains
|
db.ReadAncients(func(reader ethdb.AncientReader) error {
|
||||||
// the canonical data.
|
// Check if the data is in ancients
|
||||||
data, _ := db.Ancient(freezerReceiptTable, number)
|
if isCanon(reader, number, hash) {
|
||||||
if len(data) > 0 {
|
data, _ = reader.Ancient(freezerReceiptTable, number)
|
||||||
h, _ := db.Ancient(freezerHashTable, number)
|
return nil
|
||||||
if common.BytesToHash(h) == hash {
|
|
||||||
return data
|
|
||||||
}
|
}
|
||||||
}
|
// If not, try reading from leveldb
|
||||||
// Then try to look up the data in leveldb.
|
data, _ = db.Get(blockReceiptsKey(number, hash))
|
||||||
data, _ = db.Get(blockReceiptsKey(number, hash))
|
return nil
|
||||||
if len(data) > 0 {
|
})
|
||||||
return data
|
return data
|
||||||
}
|
|
||||||
// In the background freezer is moving data from leveldb to flatten files.
|
|
||||||
// So during the first check for ancient db, the data is not yet in there,
|
|
||||||
// but when we reach into leveldb, the data was already moved. That would
|
|
||||||
// result in a not found error.
|
|
||||||
data, _ = db.Ancient(freezerReceiptTable, number)
|
|
||||||
if len(data) > 0 {
|
|
||||||
h, _ := db.Ancient(freezerHashTable, number)
|
|
||||||
if common.BytesToHash(h) == hash {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil // Can't find the data anywhere.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
||||||
|
|
|
@ -47,7 +47,7 @@ func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
|
||||||
// ReadSnapshotRoot retrieves the root of the block whose state is contained in
|
// ReadSnapshotRoot retrieves the root of the block whose state is contained in
|
||||||
// the persisted snapshot.
|
// the persisted snapshot.
|
||||||
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
|
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
|
||||||
data, _ := db.Get(snapshotRootKey)
|
data, _ := db.Get(SnapshotRootKey)
|
||||||
if len(data) != common.HashLength {
|
if len(data) != common.HashLength {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
|
||||||
// WriteSnapshotRoot stores the root of the block whose state is contained in
|
// WriteSnapshotRoot stores the root of the block whose state is contained in
|
||||||
// the persisted snapshot.
|
// the persisted snapshot.
|
||||||
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
|
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
|
||||||
if err := db.Put(snapshotRootKey, root[:]); err != nil {
|
if err := db.Put(SnapshotRootKey, root[:]); err != nil {
|
||||||
log.Crit("Failed to store snapshot root", "err", err)
|
log.Crit("Failed to store snapshot root", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
|
||||||
// be used during updates, so a crash or failure will mark the entire snapshot
|
// be used during updates, so a crash or failure will mark the entire snapshot
|
||||||
// invalid.
|
// invalid.
|
||||||
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
|
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
|
||||||
if err := db.Delete(snapshotRootKey); err != nil {
|
if err := db.Delete(SnapshotRootKey); err != nil {
|
||||||
log.Crit("Failed to remove snapshot root", "err", err)
|
log.Crit("Failed to remove snapshot root", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,8 +89,8 @@ func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
|
||||||
return nil, errNotSupported
|
return nil, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadAncients returns an error as we don't have a backing chain freezer.
|
// AncientRange returns an error as we don't have a backing chain freezer.
|
||||||
func (db *nofreezedb) ReadAncients(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
|
func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
|
||||||
return nil, errNotSupported
|
return nil, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,6 +119,22 @@ func (db *nofreezedb) Sync() error {
|
||||||
return errNotSupported
|
return errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
|
||||||
|
// Unlike other ancient-related methods, this method does not return
|
||||||
|
// errNotSupported when invoked.
|
||||||
|
// The reason for this is that the caller might want to do several things:
|
||||||
|
// 1. Check if something is in freezer,
|
||||||
|
// 2. If not, check leveldb.
|
||||||
|
//
|
||||||
|
// This will work, since the ancient-checks inside 'fn' will return errors,
|
||||||
|
// and the leveldb work will continue.
|
||||||
|
//
|
||||||
|
// If we instead were to return errNotSupported here, then the caller would
|
||||||
|
// have to explicitly check for that, having an extra clause to do the
|
||||||
|
// non-ancient operations.
|
||||||
|
return fn(db)
|
||||||
|
}
|
||||||
|
|
||||||
// NewDatabase creates a high level database on top of a given key-value data
|
// NewDatabase creates a high level database on top of a given key-value data
|
||||||
// store without a freezer moving immutable chain segments into cold storage.
|
// store without a freezer moving immutable chain segments into cold storage.
|
||||||
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
||||||
|
@ -355,7 +371,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||||
accountSnaps.Add(size)
|
accountSnaps.Add(size)
|
||||||
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
|
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
|
||||||
storageSnaps.Add(size)
|
storageSnaps.Add(size)
|
||||||
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength):
|
case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
|
||||||
preimages.Add(size)
|
preimages.Add(size)
|
||||||
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
|
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
|
||||||
metadata.Add(size)
|
metadata.Add(size)
|
||||||
|
@ -377,7 +393,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||||
var accounted bool
|
var accounted bool
|
||||||
for _, meta := range [][]byte{
|
for _, meta := range [][]byte{
|
||||||
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
|
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
|
||||||
fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey,
|
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
|
||||||
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
||||||
uncleanShutdownKey, badBlockKey,
|
uncleanShutdownKey, badBlockKey,
|
||||||
} {
|
} {
|
||||||
|
|
|
@ -80,8 +80,9 @@ type freezer struct {
|
||||||
frozen uint64 // Number of blocks already frozen
|
frozen uint64 // Number of blocks already frozen
|
||||||
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
||||||
|
|
||||||
// This lock synchronizes writers and the truncate operation.
|
// This lock synchronizes writers and the truncate operation, as well as
|
||||||
writeLock sync.Mutex
|
// the "atomic" (batched) read operations.
|
||||||
|
writeLock sync.RWMutex
|
||||||
writeBatch *freezerBatch
|
writeBatch *freezerBatch
|
||||||
|
|
||||||
readonly bool
|
readonly bool
|
||||||
|
@ -201,12 +202,12 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||||
return nil, errUnknownTable
|
return nil, errUnknownTable
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'.
|
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
|
||||||
// It will return
|
// It will return
|
||||||
// - at most 'max' items,
|
// - at most 'max' items,
|
||||||
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
|
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
|
||||||
// return as many items as fit into maxByteSize.
|
// return as many items as fit into maxByteSize.
|
||||||
func (f *freezer) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
||||||
if table := f.tables[kind]; table != nil {
|
if table := f.tables[kind]; table != nil {
|
||||||
return table.RetrieveItems(start, count, maxBytes)
|
return table.RetrieveItems(start, count, maxBytes)
|
||||||
}
|
}
|
||||||
|
@ -222,8 +223,8 @@ func (f *freezer) Ancients() (uint64, error) {
|
||||||
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
||||||
// This needs the write lock to avoid data races on table fields.
|
// This needs the write lock to avoid data races on table fields.
|
||||||
// Speed doesn't matter here, AncientSize is for debugging.
|
// Speed doesn't matter here, AncientSize is for debugging.
|
||||||
f.writeLock.Lock()
|
f.writeLock.RLock()
|
||||||
defer f.writeLock.Unlock()
|
defer f.writeLock.RUnlock()
|
||||||
|
|
||||||
if table := f.tables[kind]; table != nil {
|
if table := f.tables[kind]; table != nil {
|
||||||
return table.size()
|
return table.size()
|
||||||
|
@ -231,6 +232,14 @@ func (f *freezer) AncientSize(kind string) (uint64, error) {
|
||||||
return 0, errUnknownTable
|
return 0, errUnknownTable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadAncients runs the given read operation while ensuring that no writes take place
|
||||||
|
// on the underlying freezer.
|
||||||
|
func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
|
||||||
|
f.writeLock.RLock()
|
||||||
|
defer f.writeLock.RUnlock()
|
||||||
|
return fn(f)
|
||||||
|
}
|
||||||
|
|
||||||
// ModifyAncients runs the given write operation.
|
// ModifyAncients runs the given write operation.
|
||||||
func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
|
func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
|
||||||
if f.readonly {
|
if f.readonly {
|
||||||
|
|
|
@ -48,8 +48,8 @@ var (
|
||||||
// snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
|
// snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
|
||||||
snapshotDisabledKey = []byte("SnapshotDisabled")
|
snapshotDisabledKey = []byte("SnapshotDisabled")
|
||||||
|
|
||||||
// snapshotRootKey tracks the hash of the last snapshot.
|
// SnapshotRootKey tracks the hash of the last snapshot.
|
||||||
snapshotRootKey = []byte("SnapshotRoot")
|
SnapshotRootKey = []byte("SnapshotRoot")
|
||||||
|
|
||||||
// snapshotJournalKey tracks the in-memory diff layers across restarts.
|
// snapshotJournalKey tracks the in-memory diff layers across restarts.
|
||||||
snapshotJournalKey = []byte("SnapshotJournal")
|
snapshotJournalKey = []byte("SnapshotJournal")
|
||||||
|
@ -90,7 +90,7 @@ var (
|
||||||
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
||||||
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
||||||
|
|
||||||
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
|
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
|
||||||
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||||
|
|
||||||
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
|
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
|
||||||
|
@ -207,9 +207,9 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
|
||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
// preimageKey = preimagePrefix + hash
|
// preimageKey = PreimagePrefix + hash
|
||||||
func preimageKey(hash common.Hash) []byte {
|
func preimageKey(hash common.Hash) []byte {
|
||||||
return append(preimagePrefix, hash.Bytes()...)
|
return append(PreimagePrefix, hash.Bytes()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// codeKey = CodePrefix + hash
|
// codeKey = CodePrefix + hash
|
||||||
|
|
|
@ -62,10 +62,10 @@ func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
|
||||||
return t.db.Ancient(kind, number)
|
return t.db.Ancient(kind, number)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadAncients is a noop passthrough that just forwards the request to the underlying
|
// AncientRange is a noop passthrough that just forwards the request to the underlying
|
||||||
// database.
|
// database.
|
||||||
func (t *table) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
||||||
return t.db.ReadAncients(kind, start, count, maxBytes)
|
return t.db.AncientRange(kind, start, count, maxBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ancients is a noop passthrough that just forwards the request to the underlying
|
// Ancients is a noop passthrough that just forwards the request to the underlying
|
||||||
|
@ -85,6 +85,10 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro
|
||||||
return t.db.ModifyAncients(fn)
|
return t.db.ModifyAncients(fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
|
||||||
|
return t.db.ReadAncients(fn)
|
||||||
|
}
|
||||||
|
|
||||||
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
|
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
|
||||||
// database.
|
// database.
|
||||||
func (t *table) TruncateAncients(items uint64) error {
|
func (t *table) TruncateAncients(items uint64) error {
|
||||||
|
|
|
@ -40,7 +40,7 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block {
|
||||||
// A sender who makes transactions, has some funds
|
// A sender who makes transactions, has some funds
|
||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
funds = big.NewInt(1000000000000000)
|
funds = big.NewInt(1_000_000_000_000_000_000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
||||||
|
|
|
@ -33,14 +33,14 @@ type journalEntry interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// journal contains the list of state modifications applied since the last state
|
// journal contains the list of state modifications applied since the last state
|
||||||
// commit. These are tracked to be able to be reverted in case of an execution
|
// commit. These are tracked to be able to be reverted in the case of an execution
|
||||||
// exception or revertal request.
|
// exception or request for reversal.
|
||||||
type journal struct {
|
type journal struct {
|
||||||
entries []journalEntry // Current changes tracked by the journal
|
entries []journalEntry // Current changes tracked by the journal
|
||||||
dirties map[common.Address]int // Dirty accounts and the number of changes
|
dirties map[common.Address]int // Dirty accounts and the number of changes
|
||||||
}
|
}
|
||||||
|
|
||||||
// newJournal create a new initialized journal.
|
// newJournal creates a new initialized journal.
|
||||||
func newJournal() *journal {
|
func newJournal() *journal {
|
||||||
return &journal{
|
return &journal{
|
||||||
dirties: make(map[common.Address]int),
|
dirties: make(map[common.Address]int),
|
||||||
|
|
|
@ -388,7 +388,7 @@ func BenchmarkJournal(b *testing.B) {
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
layer := snapshot(new(diskLayer))
|
layer := snapshot(emptyLayer())
|
||||||
for i := 1; i < 128; i++ {
|
for i := 1; i < 128; i++ {
|
||||||
layer = fill(layer)
|
layer = fill(layer)
|
||||||
}
|
}
|
||||||
|
|
|
@ -560,6 +560,12 @@ func (dl *diskLayer) generate(stats *generatorStats) {
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
|
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
|
||||||
|
if bytes.Compare(currentLocation, dl.genMarker) < 0 {
|
||||||
|
log.Error("Snapshot generator went backwards",
|
||||||
|
"currentLocation", fmt.Sprintf("%x", currentLocation),
|
||||||
|
"genMarker", fmt.Sprintf("%x", dl.genMarker))
|
||||||
|
}
|
||||||
|
|
||||||
// Flush out the batch anyway no matter it's empty or not.
|
// Flush out the batch anyway no matter it's empty or not.
|
||||||
// It's possible that all the states are recovered and the
|
// It's possible that all the states are recovered and the
|
||||||
// generation indeed makes progress.
|
// generation indeed makes progress.
|
||||||
|
@ -634,8 +640,14 @@ func (dl *diskLayer) generate(stats *generatorStats) {
|
||||||
stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
|
stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
|
||||||
stats.accounts++
|
stats.accounts++
|
||||||
}
|
}
|
||||||
|
marker := accountHash[:]
|
||||||
|
// If the snap generation goes here after interrupted, genMarker may go backward
|
||||||
|
// when last genMarker is consisted of accountHash and storageHash
|
||||||
|
if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength {
|
||||||
|
marker = dl.genMarker[:]
|
||||||
|
}
|
||||||
// If we've exceeded our batch allowance or termination was requested, flush to disk
|
// If we've exceeded our batch allowance or termination was requested, flush to disk
|
||||||
if err := checkAndFlush(accountHash[:]); err != nil {
|
if err := checkAndFlush(marker); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If the iterated account is the contract, create a further loop to
|
// If the iterated account is the contract, create a further loop to
|
||||||
|
|
|
@ -163,6 +163,9 @@ type Tree struct {
|
||||||
cache int // Megabytes permitted to use for read caches
|
cache int // Megabytes permitted to use for read caches
|
||||||
layers map[common.Hash]snapshot // Collection of all known layers
|
layers map[common.Hash]snapshot // Collection of all known layers
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
|
|
||||||
|
// Test hooks
|
||||||
|
onFlatten func() // Hook invoked when the bottom most diff layers are flattened
|
||||||
}
|
}
|
||||||
|
|
||||||
// New attempts to load an already existing snapshot from a persistent key-value
|
// New attempts to load an already existing snapshot from a persistent key-value
|
||||||
|
@ -463,14 +466,21 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case *diffLayer:
|
case *diffLayer:
|
||||||
|
// Hold the write lock until the flattened parent is linked correctly.
|
||||||
|
// Otherwise, the stale layer may be accessed by external reads in the
|
||||||
|
// meantime.
|
||||||
|
diff.lock.Lock()
|
||||||
|
defer diff.lock.Unlock()
|
||||||
|
|
||||||
// Flatten the parent into the grandparent. The flattening internally obtains a
|
// Flatten the parent into the grandparent. The flattening internally obtains a
|
||||||
// write lock on grandparent.
|
// write lock on grandparent.
|
||||||
flattened := parent.flatten().(*diffLayer)
|
flattened := parent.flatten().(*diffLayer)
|
||||||
t.layers[flattened.root] = flattened
|
t.layers[flattened.root] = flattened
|
||||||
|
|
||||||
diff.lock.Lock()
|
// Invoke the hook if it's registered. Ugly hack.
|
||||||
defer diff.lock.Unlock()
|
if t.onFlatten != nil {
|
||||||
|
t.onFlatten()
|
||||||
|
}
|
||||||
diff.parent = flattened
|
diff.parent = flattened
|
||||||
if flattened.memory < aggregatorMemoryLimit {
|
if flattened.memory < aggregatorMemoryLimit {
|
||||||
// Accumulator layer is smaller than the limit, so we can abort, unless
|
// Accumulator layer is smaller than the limit, so we can abort, unless
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/fastcache"
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
@ -324,7 +325,7 @@ func TestPostCapBasicDataAccess(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSnaphots tests the functionality for retrieveing the snapshot
|
// TestSnaphots tests the functionality for retrieving the snapshot
|
||||||
// with given head root and the desired depth.
|
// with given head root and the desired depth.
|
||||||
func TestSnaphots(t *testing.T) {
|
func TestSnaphots(t *testing.T) {
|
||||||
// setAccount is a helper to construct a random account entry and assign it to
|
// setAccount is a helper to construct a random account entry and assign it to
|
||||||
|
@ -423,3 +424,63 @@ func TestSnaphots(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestReadStateDuringFlattening tests the scenario that, during the
|
||||||
|
// bottom diff layers are merging which tags these as stale, the read
|
||||||
|
// happens via a pre-created top snapshot layer which tries to access
|
||||||
|
// the state in these stale layers. Ensure this read can retrieve the
|
||||||
|
// right state back(block until the flattening is finished) instead of
|
||||||
|
// an unexpected error(snapshot layer is stale).
|
||||||
|
func TestReadStateDuringFlattening(t *testing.T) {
|
||||||
|
// setAccount is a helper to construct a random account entry and assign it to
|
||||||
|
// an account slot in a snapshot
|
||||||
|
setAccount := func(accKey string) map[common.Hash][]byte {
|
||||||
|
return map[common.Hash][]byte{
|
||||||
|
common.HexToHash(accKey): randomAccount(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create a starting base layer and a snapshot tree out of it
|
||||||
|
base := &diskLayer{
|
||||||
|
diskdb: rawdb.NewMemoryDatabase(),
|
||||||
|
root: common.HexToHash("0x01"),
|
||||||
|
cache: fastcache.New(1024 * 500),
|
||||||
|
}
|
||||||
|
snaps := &Tree{
|
||||||
|
layers: map[common.Hash]snapshot{
|
||||||
|
base.root: base,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// 4 layers in total, 3 diff layers and 1 disk layers
|
||||||
|
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||||
|
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||||
|
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||||
|
|
||||||
|
// Obtain the topmost snapshot handler for state accessing
|
||||||
|
snap := snaps.Snapshot(common.HexToHash("0xa3"))
|
||||||
|
|
||||||
|
// Register the testing hook to access the state after flattening
|
||||||
|
var result = make(chan *Account)
|
||||||
|
snaps.onFlatten = func() {
|
||||||
|
// Spin up a thread to read the account from the pre-created
|
||||||
|
// snapshot handler. It's expected to be blocked.
|
||||||
|
go func() {
|
||||||
|
account, _ := snap.Account(common.HexToHash("0xa1"))
|
||||||
|
result <- account
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case res := <-result:
|
||||||
|
t.Fatalf("Unexpected return %v", res)
|
||||||
|
case <-time.NewTimer(time.Millisecond * 300).C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Cap the snap tree, which will mark the bottom-most layer as stale.
|
||||||
|
snaps.Cap(common.HexToHash("0xa3"), 1)
|
||||||
|
select {
|
||||||
|
case account := <-result:
|
||||||
|
if account == nil {
|
||||||
|
t.Fatal("Failed to retrieve account")
|
||||||
|
}
|
||||||
|
case <-time.NewTimer(time.Millisecond * 300).C:
|
||||||
|
t.Fatal("Unexpected blocker")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -295,9 +295,9 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran
|
||||||
thresholdFeeCap := aFeeCap.Div(aFeeCap, b)
|
thresholdFeeCap := aFeeCap.Div(aFeeCap, b)
|
||||||
thresholdTip := aTip.Div(aTip, b)
|
thresholdTip := aTip.Div(aTip, b)
|
||||||
|
|
||||||
// Have to ensure that either the new fee cap or tip is higher than the
|
// We have to ensure that both the new fee cap and tip are higher than the
|
||||||
// old ones as well as checking the percentage threshold to ensure that
|
// old ones as well as checking the percentage threshold to ensure that
|
||||||
// this is accurate for low (Wei-level) gas price replacements
|
// this is accurate for low (Wei-level) gas price replacements.
|
||||||
if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 {
|
if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ func TestStrictTxListAdd(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkTxListAdd(t *testing.B) {
|
func BenchmarkTxListAdd(b *testing.B) {
|
||||||
// Generate a list of transactions to insert
|
// Generate a list of transactions to insert
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
|
|
||||||
|
@ -60,11 +60,13 @@ func BenchmarkTxListAdd(t *testing.B) {
|
||||||
txs[i] = transaction(uint64(i), 0, key)
|
txs[i] = transaction(uint64(i), 0, key)
|
||||||
}
|
}
|
||||||
// Insert the transactions in a random order
|
// Insert the transactions in a random order
|
||||||
list := newTxList(true)
|
|
||||||
priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit))
|
priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit))
|
||||||
t.ResetTimer()
|
b.ResetTimer()
|
||||||
for _, v := range rand.Perm(len(txs)) {
|
for i := 0; i < b.N; i++ {
|
||||||
list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
|
list := newTxList(true)
|
||||||
list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump)
|
for _, v := range rand.Perm(len(txs)) {
|
||||||
|
list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
|
||||||
|
list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,3 +77,11 @@ func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) {
|
||||||
}
|
}
|
||||||
txn.nonces[addr] = nonce
|
txn.nonces[addr] = nonce
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setAll sets the nonces for all accounts to the given map.
|
||||||
|
func (txn *txNoncer) setAll(all map[common.Address]uint64) {
|
||||||
|
txn.lock.Lock()
|
||||||
|
defer txn.lock.Unlock()
|
||||||
|
|
||||||
|
txn.nonces = all
|
||||||
|
}
|
||||||
|
|
|
@ -533,7 +533,7 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.
|
||||||
// The enforceTips parameter can be used to do an extra filtering on the pending
|
// The enforceTips parameter can be used to do an extra filtering on the pending
|
||||||
// transactions and only return those whose **effective** tip is large enough in
|
// transactions and only return those whose **effective** tip is large enough in
|
||||||
// the next pending execution environment.
|
// the next pending execution environment.
|
||||||
func (pool *TxPool) Pending(enforceTips bool) (map[common.Address]types.Transactions, error) {
|
func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions {
|
||||||
pool.mu.Lock()
|
pool.mu.Lock()
|
||||||
defer pool.mu.Unlock()
|
defer pool.mu.Unlock()
|
||||||
|
|
||||||
|
@ -554,7 +554,7 @@ func (pool *TxPool) Pending(enforceTips bool) (map[common.Address]types.Transact
|
||||||
pending[addr] = txs
|
pending[addr] = txs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return pending, nil
|
return pending
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locals retrieves the accounts currently considered local by the pool.
|
// Locals retrieves the accounts currently considered local by the pool.
|
||||||
|
@ -1182,16 +1182,18 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
|
||||||
pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead)
|
pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead)
|
||||||
pool.priced.SetBaseFee(pendingBaseFee)
|
pool.priced.SetBaseFee(pendingBaseFee)
|
||||||
}
|
}
|
||||||
|
// Update all accounts to the latest known pending nonce
|
||||||
|
nonces := make(map[common.Address]uint64, len(pool.pending))
|
||||||
|
for addr, list := range pool.pending {
|
||||||
|
highestPending := list.LastElement()
|
||||||
|
nonces[addr] = highestPending.Nonce() + 1
|
||||||
|
}
|
||||||
|
pool.pendingNonces.setAll(nonces)
|
||||||
}
|
}
|
||||||
// Ensure pool.queue and pool.pending sizes stay within the configured limits.
|
// Ensure pool.queue and pool.pending sizes stay within the configured limits.
|
||||||
pool.truncatePending()
|
pool.truncatePending()
|
||||||
pool.truncateQueue()
|
pool.truncateQueue()
|
||||||
|
|
||||||
// Update all accounts to the latest known pending nonce
|
|
||||||
for addr, list := range pool.pending {
|
|
||||||
highestPending := list.LastElement()
|
|
||||||
pool.pendingNonces.set(addr, highestPending.Nonce()+1)
|
|
||||||
}
|
|
||||||
dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
|
dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
|
||||||
pool.changesSinceReorg = 0 // Reset change counter
|
pool.changesSinceReorg = 0 // Reset change counter
|
||||||
pool.mu.Unlock()
|
pool.mu.Unlock()
|
||||||
|
|
|
@ -255,10 +255,6 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
|
||||||
trigger = true
|
trigger = true
|
||||||
<-pool.requestReset(nil, nil)
|
<-pool.requestReset(nil, nil)
|
||||||
|
|
||||||
_, err := pool.Pending(false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Could not fetch pending transactions: %v", err)
|
|
||||||
}
|
|
||||||
nonce = pool.Nonce(address)
|
nonce = pool.Nonce(address)
|
||||||
if nonce != 2 {
|
if nonce != 2 {
|
||||||
t.Fatalf("Invalid nonce, want 2, got %d", nonce)
|
t.Fatalf("Invalid nonce, want 2, got %d", nonce)
|
||||||
|
@ -2544,3 +2540,24 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
|
||||||
pool.Stop()
|
pool.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Benchmarks the speed of batch transaction insertion in case of multiple accounts.
|
||||||
|
func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) {
|
||||||
|
// Generate a batch of transactions to enqueue into the pool
|
||||||
|
pool, _ := setupTxPool()
|
||||||
|
defer pool.Stop()
|
||||||
|
b.ReportAllocs()
|
||||||
|
batches := make(types.Transactions, b.N)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||||
|
tx := transaction(uint64(0), 100000, key)
|
||||||
|
batches[i] = tx
|
||||||
|
}
|
||||||
|
// Benchmark importing the transactions into the queue
|
||||||
|
b.ResetTimer()
|
||||||
|
for _, tx := range batches {
|
||||||
|
pool.AddRemotesSync([]*types.Transaction{tx})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -144,13 +144,29 @@ func (r *Receipt) EncodeRLP(w io.Writer) error {
|
||||||
buf := encodeBufferPool.Get().(*bytes.Buffer)
|
buf := encodeBufferPool.Get().(*bytes.Buffer)
|
||||||
defer encodeBufferPool.Put(buf)
|
defer encodeBufferPool.Put(buf)
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
buf.WriteByte(r.Type)
|
if err := r.encodeTyped(data, buf); err != nil {
|
||||||
if err := rlp.Encode(buf, data); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return rlp.Encode(w, buf.Bytes())
|
return rlp.Encode(w, buf.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// encodeTyped writes the canonical encoding of a typed receipt to w.
|
||||||
|
func (r *Receipt) encodeTyped(data *receiptRLP, w *bytes.Buffer) error {
|
||||||
|
w.WriteByte(r.Type)
|
||||||
|
return rlp.Encode(w, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns the consensus encoding of the receipt.
|
||||||
|
func (r *Receipt) MarshalBinary() ([]byte, error) {
|
||||||
|
if r.Type == LegacyTxType {
|
||||||
|
return rlp.EncodeToBytes(r)
|
||||||
|
}
|
||||||
|
data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err := r.encodeTyped(data, &buf)
|
||||||
|
return buf.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt
|
// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt
|
||||||
// from an RLP stream.
|
// from an RLP stream.
|
||||||
func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
|
func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
|
||||||
|
@ -189,6 +205,42 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes the consensus encoding of receipts.
|
||||||
|
// It supports legacy RLP receipts and EIP-2718 typed receipts.
|
||||||
|
func (r *Receipt) UnmarshalBinary(b []byte) error {
|
||||||
|
if len(b) > 0 && b[0] > 0x7f {
|
||||||
|
// It's a legacy receipt decode the RLP
|
||||||
|
var data receiptRLP
|
||||||
|
err := rlp.DecodeBytes(b, &data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Type = LegacyTxType
|
||||||
|
return r.setFromRLP(data)
|
||||||
|
}
|
||||||
|
// It's an EIP2718 typed transaction envelope.
|
||||||
|
return r.decodeTyped(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeTyped decodes a typed receipt from the canonical format.
|
||||||
|
func (r *Receipt) decodeTyped(b []byte) error {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return errEmptyTypedReceipt
|
||||||
|
}
|
||||||
|
switch b[0] {
|
||||||
|
case DynamicFeeTxType, AccessListTxType:
|
||||||
|
var data receiptRLP
|
||||||
|
err := rlp.DecodeBytes(b[1:], &data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Type = b[0]
|
||||||
|
return r.setFromRLP(data)
|
||||||
|
default:
|
||||||
|
return ErrTxTypeNotSupported
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Receipt) setFromRLP(data receiptRLP) error {
|
func (r *Receipt) setFromRLP(data receiptRLP) error {
|
||||||
r.CumulativeGasUsed, r.Bloom, r.Logs = data.CumulativeGasUsed, data.Bloom, data.Logs
|
r.CumulativeGasUsed, r.Bloom, r.Logs = data.CumulativeGasUsed, data.Bloom, data.Logs
|
||||||
return r.setStatus(data.PostStateOrStatus)
|
return r.setStatus(data.PostStateOrStatus)
|
||||||
|
@ -354,42 +406,42 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
|
||||||
|
|
||||||
// DeriveFields fills the receipts with their computed fields based on consensus
|
// DeriveFields fills the receipts with their computed fields based on consensus
|
||||||
// data and contextual infos like containing block and transactions.
|
// data and contextual infos like containing block and transactions.
|
||||||
func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error {
|
func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error {
|
||||||
signer := MakeSigner(config, new(big.Int).SetUint64(number))
|
signer := MakeSigner(config, new(big.Int).SetUint64(number))
|
||||||
|
|
||||||
logIndex := uint(0)
|
logIndex := uint(0)
|
||||||
if len(txs) != len(r) {
|
if len(txs) != len(rs) {
|
||||||
return errors.New("transaction and receipt count mismatch")
|
return errors.New("transaction and receipt count mismatch")
|
||||||
}
|
}
|
||||||
for i := 0; i < len(r); i++ {
|
for i := 0; i < len(rs); i++ {
|
||||||
// The transaction type and hash can be retrieved from the transaction itself
|
// The transaction type and hash can be retrieved from the transaction itself
|
||||||
r[i].Type = txs[i].Type()
|
rs[i].Type = txs[i].Type()
|
||||||
r[i].TxHash = txs[i].Hash()
|
rs[i].TxHash = txs[i].Hash()
|
||||||
|
|
||||||
// block location fields
|
// block location fields
|
||||||
r[i].BlockHash = hash
|
rs[i].BlockHash = hash
|
||||||
r[i].BlockNumber = new(big.Int).SetUint64(number)
|
rs[i].BlockNumber = new(big.Int).SetUint64(number)
|
||||||
r[i].TransactionIndex = uint(i)
|
rs[i].TransactionIndex = uint(i)
|
||||||
|
|
||||||
// The contract address can be derived from the transaction itself
|
// The contract address can be derived from the transaction itself
|
||||||
if txs[i].To() == nil {
|
if txs[i].To() == nil {
|
||||||
// Deriving the signer is expensive, only do if it's actually needed
|
// Deriving the signer is expensive, only do if it's actually needed
|
||||||
from, _ := Sender(signer, txs[i])
|
from, _ := Sender(signer, txs[i])
|
||||||
r[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce())
|
rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce())
|
||||||
}
|
}
|
||||||
// The used gas can be calculated based on previous r
|
// The used gas can be calculated based on previous r
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
r[i].GasUsed = r[i].CumulativeGasUsed
|
rs[i].GasUsed = rs[i].CumulativeGasUsed
|
||||||
} else {
|
} else {
|
||||||
r[i].GasUsed = r[i].CumulativeGasUsed - r[i-1].CumulativeGasUsed
|
rs[i].GasUsed = rs[i].CumulativeGasUsed - rs[i-1].CumulativeGasUsed
|
||||||
}
|
}
|
||||||
// The derived log fields can simply be set from the block and transaction
|
// The derived log fields can simply be set from the block and transaction
|
||||||
for j := 0; j < len(r[i].Logs); j++ {
|
for j := 0; j < len(rs[i].Logs); j++ {
|
||||||
r[i].Logs[j].BlockNumber = number
|
rs[i].Logs[j].BlockNumber = number
|
||||||
r[i].Logs[j].BlockHash = hash
|
rs[i].Logs[j].BlockHash = hash
|
||||||
r[i].Logs[j].TxHash = r[i].TxHash
|
rs[i].Logs[j].TxHash = rs[i].TxHash
|
||||||
r[i].Logs[j].TxIndex = uint(i)
|
rs[i].Logs[j].TxIndex = uint(i)
|
||||||
r[i].Logs[j].Index = logIndex
|
rs[i].Logs[j].Index = logIndex
|
||||||
logIndex++
|
logIndex++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,59 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
legacyReceipt = &Receipt{
|
||||||
|
Status: ReceiptStatusFailed,
|
||||||
|
CumulativeGasUsed: 1,
|
||||||
|
Logs: []*Log{
|
||||||
|
{
|
||||||
|
Address: common.BytesToAddress([]byte{0x11}),
|
||||||
|
Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
|
||||||
|
Data: []byte{0x01, 0x00, 0xff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Address: common.BytesToAddress([]byte{0x01, 0x11}),
|
||||||
|
Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
|
||||||
|
Data: []byte{0x01, 0x00, 0xff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
accessListReceipt = &Receipt{
|
||||||
|
Status: ReceiptStatusFailed,
|
||||||
|
CumulativeGasUsed: 1,
|
||||||
|
Logs: []*Log{
|
||||||
|
{
|
||||||
|
Address: common.BytesToAddress([]byte{0x11}),
|
||||||
|
Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
|
||||||
|
Data: []byte{0x01, 0x00, 0xff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Address: common.BytesToAddress([]byte{0x01, 0x11}),
|
||||||
|
Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
|
||||||
|
Data: []byte{0x01, 0x00, 0xff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: AccessListTxType,
|
||||||
|
}
|
||||||
|
eip1559Receipt = &Receipt{
|
||||||
|
Status: ReceiptStatusFailed,
|
||||||
|
CumulativeGasUsed: 1,
|
||||||
|
Logs: []*Log{
|
||||||
|
{
|
||||||
|
Address: common.BytesToAddress([]byte{0x11}),
|
||||||
|
Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
|
||||||
|
Data: []byte{0x01, 0x00, 0xff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Address: common.BytesToAddress([]byte{0x01, 0x11}),
|
||||||
|
Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
|
||||||
|
Data: []byte{0x01, 0x00, 0xff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: DynamicFeeTxType,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func TestDecodeEmptyTypedReceipt(t *testing.T) {
|
func TestDecodeEmptyTypedReceipt(t *testing.T) {
|
||||||
input := []byte{0x80}
|
input := []byte{0x80}
|
||||||
var r Receipt
|
var r Receipt
|
||||||
|
@ -312,6 +365,105 @@ func TestTypedReceiptEncodingDecoding(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReceiptMarshalBinary(t *testing.T) {
|
||||||
|
// Legacy Receipt
|
||||||
|
legacyReceipt.Bloom = CreateBloom(Receipts{legacyReceipt})
|
||||||
|
have, err := legacyReceipt.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal binary error: %v", err)
|
||||||
|
}
|
||||||
|
legacyReceipts := Receipts{legacyReceipt}
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
legacyReceipts.EncodeIndex(0, buf)
|
||||||
|
haveEncodeIndex := buf.Bytes()
|
||||||
|
if !bytes.Equal(have, haveEncodeIndex) {
|
||||||
|
t.Errorf("BinaryMarshal and EncodeIndex mismatch, got %x want %x", have, haveEncodeIndex)
|
||||||
|
}
|
||||||
|
buf.Reset()
|
||||||
|
if err := legacyReceipt.EncodeRLP(buf); err != nil {
|
||||||
|
t.Fatalf("encode rlp error: %v", err)
|
||||||
|
}
|
||||||
|
haveRLPEncode := buf.Bytes()
|
||||||
|
if !bytes.Equal(have, haveRLPEncode) {
|
||||||
|
t.Errorf("BinaryMarshal and EncodeRLP mismatch for legacy tx, got %x want %x", have, haveRLPEncode)
|
||||||
|
}
|
||||||
|
legacyWant := common.FromHex("f901c58001b9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000080000000000000000000004000000000000000000000000000040000000000000000000000000000800000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f8bef85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff85d940000000000000000000000000000000000000111f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff")
|
||||||
|
if !bytes.Equal(have, legacyWant) {
|
||||||
|
t.Errorf("encoded RLP mismatch, got %x want %x", have, legacyWant)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2930 Receipt
|
||||||
|
buf.Reset()
|
||||||
|
accessListReceipt.Bloom = CreateBloom(Receipts{accessListReceipt})
|
||||||
|
have, err = accessListReceipt.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal binary error: %v", err)
|
||||||
|
}
|
||||||
|
accessListReceipts := Receipts{accessListReceipt}
|
||||||
|
accessListReceipts.EncodeIndex(0, buf)
|
||||||
|
haveEncodeIndex = buf.Bytes()
|
||||||
|
if !bytes.Equal(have, haveEncodeIndex) {
|
||||||
|
t.Errorf("BinaryMarshal and EncodeIndex mismatch, got %x want %x", have, haveEncodeIndex)
|
||||||
|
}
|
||||||
|
accessListWant := common.FromHex("01f901c58001b9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000080000000000000000000004000000000000000000000000000040000000000000000000000000000800000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f8bef85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff85d940000000000000000000000000000000000000111f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff")
|
||||||
|
if !bytes.Equal(have, accessListWant) {
|
||||||
|
t.Errorf("encoded RLP mismatch, got %x want %x", have, accessListWant)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1559 Receipt
|
||||||
|
buf.Reset()
|
||||||
|
eip1559Receipt.Bloom = CreateBloom(Receipts{eip1559Receipt})
|
||||||
|
have, err = eip1559Receipt.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal binary error: %v", err)
|
||||||
|
}
|
||||||
|
eip1559Receipts := Receipts{eip1559Receipt}
|
||||||
|
eip1559Receipts.EncodeIndex(0, buf)
|
||||||
|
haveEncodeIndex = buf.Bytes()
|
||||||
|
if !bytes.Equal(have, haveEncodeIndex) {
|
||||||
|
t.Errorf("BinaryMarshal and EncodeIndex mismatch, got %x want %x", have, haveEncodeIndex)
|
||||||
|
}
|
||||||
|
eip1559Want := common.FromHex("02f901c58001b9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000080000000000000000000004000000000000000000000000000040000000000000000000000000000800000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f8bef85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff85d940000000000000000000000000000000000000111f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff")
|
||||||
|
if !bytes.Equal(have, eip1559Want) {
|
||||||
|
t.Errorf("encoded RLP mismatch, got %x want %x", have, eip1559Want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReceiptUnmarshalBinary(t *testing.T) {
|
||||||
|
// Legacy Receipt
|
||||||
|
legacyBinary := common.FromHex("f901c58001b9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000080000000000000000000004000000000000000000000000000040000000000000000000000000000800000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f8bef85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff85d940000000000000000000000000000000000000111f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff")
|
||||||
|
gotLegacyReceipt := new(Receipt)
|
||||||
|
if err := gotLegacyReceipt.UnmarshalBinary(legacyBinary); err != nil {
|
||||||
|
t.Fatalf("unmarshal binary error: %v", err)
|
||||||
|
}
|
||||||
|
legacyReceipt.Bloom = CreateBloom(Receipts{legacyReceipt})
|
||||||
|
if !reflect.DeepEqual(gotLegacyReceipt, legacyReceipt) {
|
||||||
|
t.Errorf("receipt unmarshalled from binary mismatch, got %v want %v", gotLegacyReceipt, legacyReceipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2930 Receipt
|
||||||
|
accessListBinary := common.FromHex("01f901c58001b9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000080000000000000000000004000000000000000000000000000040000000000000000000000000000800000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f8bef85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff85d940000000000000000000000000000000000000111f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff")
|
||||||
|
gotAccessListReceipt := new(Receipt)
|
||||||
|
if err := gotAccessListReceipt.UnmarshalBinary(accessListBinary); err != nil {
|
||||||
|
t.Fatalf("unmarshal binary error: %v", err)
|
||||||
|
}
|
||||||
|
accessListReceipt.Bloom = CreateBloom(Receipts{accessListReceipt})
|
||||||
|
if !reflect.DeepEqual(gotAccessListReceipt, accessListReceipt) {
|
||||||
|
t.Errorf("receipt unmarshalled from binary mismatch, got %v want %v", gotAccessListReceipt, accessListReceipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1559 Receipt
|
||||||
|
eip1559RctBinary := common.FromHex("02f901c58001b9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000080000000000000000000004000000000000000000000000000040000000000000000000000000000800000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f8bef85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff85d940000000000000000000000000000000000000111f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff")
|
||||||
|
got1559Receipt := new(Receipt)
|
||||||
|
if err := got1559Receipt.UnmarshalBinary(eip1559RctBinary); err != nil {
|
||||||
|
t.Fatalf("unmarshal binary error: %v", err)
|
||||||
|
}
|
||||||
|
eip1559Receipt.Bloom = CreateBloom(Receipts{eip1559Receipt})
|
||||||
|
if !reflect.DeepEqual(got1559Receipt, eip1559Receipt) {
|
||||||
|
t.Errorf("receipt unmarshalled from binary mismatch, got %v want %v", got1559Receipt, eip1559Receipt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) {
|
func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
|
|
@ -182,9 +182,14 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||||
if !evm.StateDB.Exist(addr) {
|
if !evm.StateDB.Exist(addr) {
|
||||||
if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
|
if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
|
||||||
// Calling a non existing account, don't do anything, but ping the tracer
|
// Calling a non existing account, don't do anything, but ping the tracer
|
||||||
if evm.Config.Debug && evm.depth == 0 {
|
if evm.Config.Debug {
|
||||||
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
|
if evm.depth == 0 {
|
||||||
evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil)
|
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
|
||||||
|
evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil)
|
||||||
|
} else {
|
||||||
|
evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value)
|
||||||
|
evm.Config.Tracer.CaptureExit(ret, 0, nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil, gas, nil
|
return nil, gas, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
|
@ -194,7 +195,10 @@ func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*typ
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
|
func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
|
||||||
return b.eth.blockchain.GetTdByHash(hash)
|
if header := b.eth.blockchain.GetHeaderByHash(hash); header != nil {
|
||||||
|
return b.eth.blockchain.GetTd(hash, header.Number.Uint64())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) {
|
func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) {
|
||||||
|
@ -236,10 +240,7 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
|
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
|
||||||
pending, err := b.eth.txPool.Pending(false)
|
pending := b.eth.txPool.Pending(false)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var txs types.Transactions
|
var txs types.Transactions
|
||||||
for _, batch := range pending {
|
for _, batch := range pending {
|
||||||
txs = append(txs, batch...)
|
txs = append(txs, batch...)
|
||||||
|
@ -316,6 +317,10 @@ func (b *EthAPIBackend) RPCGasCap() uint64 {
|
||||||
return b.eth.config.RPCGasCap
|
return b.eth.config.RPCGasCap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *EthAPIBackend) RPCEVMTimeout() time.Duration {
|
||||||
|
return b.eth.config.RPCEVMTimeout
|
||||||
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) RPCTxFeeCap() float64 {
|
func (b *EthAPIBackend) RPCTxFeeCap() float64 {
|
||||||
return b.eth.config.RPCTxFeeCap
|
return b.eth.config.RPCTxFeeCap
|
||||||
}
|
}
|
||||||
|
@ -347,8 +352,8 @@ func (b *EthAPIBackend) StartMining(threads int) error {
|
||||||
return b.eth.StartMining(threads)
|
return b.eth.StartMining(threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) {
|
func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) {
|
||||||
return b.eth.stateAtBlock(block, reexec, base, checkLive)
|
return b.eth.stateAtBlock(block, reexec, base, checkLive, preferDisk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) {
|
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) {
|
||||||
|
|
|
@ -131,7 +131,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon)
|
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier)
|
||||||
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
|
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
|
||||||
return nil, genesisErr
|
return nil, genesisErr
|
||||||
}
|
}
|
||||||
|
@ -554,7 +554,7 @@ func (s *Ethereum) Stop() error {
|
||||||
s.bloomIndexer.Close()
|
s.bloomIndexer.Close()
|
||||||
close(s.closeBloomHandler)
|
close(s.closeBloomHandler)
|
||||||
s.txPool.Stop()
|
s.txPool.Stop()
|
||||||
s.miner.Stop()
|
s.miner.Close()
|
||||||
s.blockchain.Stop()
|
s.blockchain.Stop()
|
||||||
s.engine.Close()
|
s.engine.Close()
|
||||||
rawdb.PopUncleanShutdownMarker(s.chainDb)
|
rawdb.PopUncleanShutdownMarker(s.chainDb)
|
||||||
|
|
|
@ -39,10 +39,8 @@ import (
|
||||||
// Register adds catalyst APIs to the node.
|
// Register adds catalyst APIs to the node.
|
||||||
func Register(stack *node.Node, backend *eth.Ethereum) error {
|
func Register(stack *node.Node, backend *eth.Ethereum) error {
|
||||||
chainconfig := backend.BlockChain().Config()
|
chainconfig := backend.BlockChain().Config()
|
||||||
if chainconfig.CatalystBlock == nil {
|
if chainconfig.TerminalTotalDifficulty == nil {
|
||||||
return errors.New("catalystBlock is not set in genesis config")
|
return errors.New("catalyst started without valid total difficulty")
|
||||||
} else if chainconfig.CatalystBlock.Sign() != 0 {
|
|
||||||
return errors.New("catalystBlock of genesis config must be zero")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warn("Catalyst mode enabled")
|
log.Warn("Catalyst mode enabled")
|
||||||
|
@ -128,10 +126,7 @@ func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableD
|
||||||
time.Sleep(wait)
|
time.Sleep(wait)
|
||||||
}
|
}
|
||||||
|
|
||||||
pending, err := pool.Pending(true)
|
pending := pool.Pending(true)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
coinbase, err := api.eth.Etherbase()
|
coinbase, err := api.eth.Etherbase()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -62,26 +62,28 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
|
||||||
return genesis, blocks
|
return genesis, blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO (MariusVanDerWijden) reenable once engine api is updated to the latest spec
|
||||||
|
/*
|
||||||
func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block, []*types.Block) {
|
func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block, []*types.Block) {
|
||||||
if fork >= n {
|
if fork >= n {
|
||||||
fork = n - 1
|
fork = n - 1
|
||||||
}
|
}
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
config := ¶ms.ChainConfig{
|
config := ¶ms.ChainConfig{
|
||||||
ChainID: big.NewInt(1337),
|
ChainID: big.NewInt(1337),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
EIP150Block: big.NewInt(0),
|
EIP150Block: big.NewInt(0),
|
||||||
EIP155Block: big.NewInt(0),
|
EIP155Block: big.NewInt(0),
|
||||||
EIP158Block: big.NewInt(0),
|
EIP158Block: big.NewInt(0),
|
||||||
ByzantiumBlock: big.NewInt(0),
|
ByzantiumBlock: big.NewInt(0),
|
||||||
ConstantinopleBlock: big.NewInt(0),
|
ConstantinopleBlock: big.NewInt(0),
|
||||||
PetersburgBlock: big.NewInt(0),
|
PetersburgBlock: big.NewInt(0),
|
||||||
IstanbulBlock: big.NewInt(0),
|
IstanbulBlock: big.NewInt(0),
|
||||||
MuirGlacierBlock: big.NewInt(0),
|
MuirGlacierBlock: big.NewInt(0),
|
||||||
BerlinBlock: big.NewInt(0),
|
BerlinBlock: big.NewInt(0),
|
||||||
LondonBlock: big.NewInt(0),
|
LondonBlock: big.NewInt(0),
|
||||||
CatalystBlock: big.NewInt(0),
|
TerminalTotalDifficulty: big.NewInt(0),
|
||||||
Ethash: new(params.EthashConfig),
|
Ethash: new(params.EthashConfig),
|
||||||
}
|
}
|
||||||
genesis := &core.Genesis{
|
genesis := &core.Genesis{
|
||||||
Config: config,
|
Config: config,
|
||||||
|
@ -105,6 +107,7 @@ func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block,
|
||||||
forkedBlocks, _ := core.GenerateChain(config, blocks[fork], engine, db, n-fork, generateFork)
|
forkedBlocks, _ := core.GenerateChain(config, blocks[fork], engine, db, n-fork, generateFork)
|
||||||
return genesis, blocks, forkedBlocks
|
return genesis, blocks, forkedBlocks
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
func TestEth2AssembleBlock(t *testing.T) {
|
func TestEth2AssembleBlock(t *testing.T) {
|
||||||
genesis, blocks := generateTestChain()
|
genesis, blocks := generateTestChain()
|
||||||
|
@ -156,6 +159,8 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO (MariusVanDerWijden) reenable once engine api is updated to the latest spec
|
||||||
|
/*
|
||||||
func TestEth2NewBlock(t *testing.T) {
|
func TestEth2NewBlock(t *testing.T) {
|
||||||
genesis, blocks, forkedBlocks := generateTestChainWithFork(10, 4)
|
genesis, blocks, forkedBlocks := generateTestChainWithFork(10, 4)
|
||||||
n, ethservice := startEthService(t, genesis, blocks[1:5])
|
n, ethservice := startEthService(t, genesis, blocks[1:5])
|
||||||
|
@ -216,6 +221,7 @@ func TestEth2NewBlock(t *testing.T) {
|
||||||
t.Fatalf("Wrong head after inserting fork %x != %x", exp, ethservice.BlockChain().CurrentBlock().Hash())
|
t.Fatalf("Wrong head after inserting fork %x != %x", exp, ethservice.BlockChain().CurrentBlock().Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// startEthService creates a full node instance for testing.
|
// startEthService creates a full node instance for testing.
|
||||||
func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) {
|
func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) {
|
||||||
|
|
|
@ -87,10 +87,11 @@ var Defaults = Config{
|
||||||
GasPrice: big.NewInt(params.GWei),
|
GasPrice: big.NewInt(params.GWei),
|
||||||
Recommit: 3 * time.Second,
|
Recommit: 3 * time.Second,
|
||||||
},
|
},
|
||||||
TxPool: core.DefaultTxPoolConfig,
|
TxPool: core.DefaultTxPoolConfig,
|
||||||
RPCGasCap: 50000000,
|
RPCGasCap: 50000000,
|
||||||
GPO: FullNodeGPO,
|
RPCEVMTimeout: 5 * time.Second,
|
||||||
RPCTxFeeCap: 1, // 1 ether
|
GPO: FullNodeGPO,
|
||||||
|
RPCTxFeeCap: 1, // 1 ether
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -188,6 +189,9 @@ type Config struct {
|
||||||
// RPCGasCap is the global gas cap for eth-call variants.
|
// RPCGasCap is the global gas cap for eth-call variants.
|
||||||
RPCGasCap uint64
|
RPCGasCap uint64
|
||||||
|
|
||||||
|
// RPCEVMTimeout is the global timeout for eth-call.
|
||||||
|
RPCEVMTimeout time.Duration
|
||||||
|
|
||||||
// RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for
|
// RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for
|
||||||
// send-transction variants. The unit is ether.
|
// send-transction variants. The unit is ether.
|
||||||
RPCTxFeeCap float64
|
RPCTxFeeCap float64
|
||||||
|
@ -198,8 +202,8 @@ type Config struct {
|
||||||
// CheckpointOracle is the configuration for checkpoint oracle.
|
// CheckpointOracle is the configuration for checkpoint oracle.
|
||||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||||
|
|
||||||
// Berlin block override (TODO: remove after the fork)
|
// Arrow Glacier block override (TODO: remove after the fork)
|
||||||
OverrideLondon *big.Int `toml:",omitempty"`
|
OverrideArrowGlacier *big.Int `toml:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConsensusEngine creates a consensus engine for the given chain configuration.
|
// CreateConsensusEngine creates a consensus engine for the given chain configuration.
|
||||||
|
|
|
@ -55,10 +55,11 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||||
EnablePreimageRecording bool
|
EnablePreimageRecording bool
|
||||||
DocRoot string `toml:"-"`
|
DocRoot string `toml:"-"`
|
||||||
RPCGasCap uint64
|
RPCGasCap uint64
|
||||||
|
RPCEVMTimeout time.Duration
|
||||||
RPCTxFeeCap float64
|
RPCTxFeeCap float64
|
||||||
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
||||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||||
OverrideLondon *big.Int `toml:",omitempty"`
|
OverrideArrowGlacier *big.Int `toml:",omitempty"`
|
||||||
}
|
}
|
||||||
var enc Config
|
var enc Config
|
||||||
enc.Genesis = c.Genesis
|
enc.Genesis = c.Genesis
|
||||||
|
@ -98,10 +99,11 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||||
enc.EnablePreimageRecording = c.EnablePreimageRecording
|
enc.EnablePreimageRecording = c.EnablePreimageRecording
|
||||||
enc.DocRoot = c.DocRoot
|
enc.DocRoot = c.DocRoot
|
||||||
enc.RPCGasCap = c.RPCGasCap
|
enc.RPCGasCap = c.RPCGasCap
|
||||||
|
enc.RPCEVMTimeout = c.RPCEVMTimeout
|
||||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||||
enc.Checkpoint = c.Checkpoint
|
enc.Checkpoint = c.Checkpoint
|
||||||
enc.CheckpointOracle = c.CheckpointOracle
|
enc.CheckpointOracle = c.CheckpointOracle
|
||||||
enc.OverrideLondon = c.OverrideLondon
|
enc.OverrideArrowGlacier = c.OverrideArrowGlacier
|
||||||
return &enc, nil
|
return &enc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,10 +147,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||||
EnablePreimageRecording *bool
|
EnablePreimageRecording *bool
|
||||||
DocRoot *string `toml:"-"`
|
DocRoot *string `toml:"-"`
|
||||||
RPCGasCap *uint64
|
RPCGasCap *uint64
|
||||||
|
RPCEVMTimeout *time.Duration
|
||||||
RPCTxFeeCap *float64
|
RPCTxFeeCap *float64
|
||||||
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
||||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||||
OverrideLondon *big.Int `toml:",omitempty"`
|
OverrideArrowGlacier *big.Int `toml:",omitempty"`
|
||||||
}
|
}
|
||||||
var dec Config
|
var dec Config
|
||||||
if err := unmarshal(&dec); err != nil {
|
if err := unmarshal(&dec); err != nil {
|
||||||
|
@ -265,6 +268,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||||
if dec.RPCGasCap != nil {
|
if dec.RPCGasCap != nil {
|
||||||
c.RPCGasCap = *dec.RPCGasCap
|
c.RPCGasCap = *dec.RPCGasCap
|
||||||
}
|
}
|
||||||
|
if dec.RPCEVMTimeout != nil {
|
||||||
|
c.RPCEVMTimeout = *dec.RPCEVMTimeout
|
||||||
|
}
|
||||||
if dec.RPCTxFeeCap != nil {
|
if dec.RPCTxFeeCap != nil {
|
||||||
c.RPCTxFeeCap = *dec.RPCTxFeeCap
|
c.RPCTxFeeCap = *dec.RPCTxFeeCap
|
||||||
}
|
}
|
||||||
|
@ -274,8 +280,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||||
if dec.CheckpointOracle != nil {
|
if dec.CheckpointOracle != nil {
|
||||||
c.CheckpointOracle = dec.CheckpointOracle
|
c.CheckpointOracle = dec.CheckpointOracle
|
||||||
}
|
}
|
||||||
if dec.OverrideLondon != nil {
|
if dec.OverrideArrowGlacier != nil {
|
||||||
c.OverrideLondon = dec.OverrideLondon
|
c.OverrideArrowGlacier = dec.OverrideArrowGlacier
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -391,13 +391,14 @@ func (f *BlockFetcher) loop() {
|
||||||
blockAnnounceDOSMeter.Mark(1)
|
blockAnnounceDOSMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if notification.number == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
// If we have a valid block number, check that it's potentially useful
|
// If we have a valid block number, check that it's potentially useful
|
||||||
if notification.number > 0 {
|
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
||||||
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
blockAnnounceDropMeter.Mark(1)
|
||||||
blockAnnounceDropMeter.Mark(1)
|
break
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// All is well, schedule the announce if block's not yet downloading
|
// All is well, schedule the announce if block's not yet downloading
|
||||||
if _, ok := f.fetching[notification.hash]; ok {
|
if _, ok := f.fetching[notification.hash]; ok {
|
||||||
|
|
|
@ -18,6 +18,7 @@ package fetcher
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
mrand "math/rand"
|
mrand "math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -277,29 +278,27 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
||||||
)
|
)
|
||||||
errs := f.addTxs(txs)
|
errs := f.addTxs(txs)
|
||||||
for i, err := range errs {
|
for i, err := range errs {
|
||||||
if err != nil {
|
// Track the transaction hash if the price is too low for us.
|
||||||
// Track the transaction hash if the price is too low for us.
|
// Avoid re-request this transaction when we receive another
|
||||||
// Avoid re-request this transaction when we receive another
|
// announcement.
|
||||||
// announcement.
|
if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) {
|
||||||
if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
|
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
|
||||||
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
|
f.underpriced.Pop()
|
||||||
f.underpriced.Pop()
|
|
||||||
}
|
|
||||||
f.underpriced.Add(txs[i].Hash())
|
|
||||||
}
|
}
|
||||||
// Track a few interesting failure types
|
f.underpriced.Add(txs[i].Hash())
|
||||||
switch err {
|
}
|
||||||
case nil: // Noop, but need to handle to not count these
|
// Track a few interesting failure types
|
||||||
|
switch {
|
||||||
|
case err == nil: // Noop, but need to handle to not count these
|
||||||
|
|
||||||
case core.ErrAlreadyKnown:
|
case errors.Is(err, core.ErrAlreadyKnown):
|
||||||
duplicate++
|
duplicate++
|
||||||
|
|
||||||
case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
|
case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced):
|
||||||
underpriced++
|
underpriced++
|
||||||
|
|
||||||
default:
|
default:
|
||||||
otherreject++
|
otherreject++
|
||||||
}
|
|
||||||
}
|
}
|
||||||
added = append(added, txs[i].Hash())
|
added = append(added, txs[i].Hash())
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,6 +62,7 @@ func BenchmarkBloomBits32k(b *testing.B) {
|
||||||
const benchFilterCnt = 2000
|
const benchFilterCnt = 2000
|
||||||
|
|
||||||
func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
|
func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
|
||||||
|
b.Skip("test disabled: this tests presume (and modify) an existing datadir.")
|
||||||
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
|
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
|
||||||
b.Log("Running bloombits benchmark section size:", sectionSize)
|
b.Log("Running bloombits benchmark section size:", sectionSize)
|
||||||
|
|
||||||
|
@ -155,6 +156,7 @@ func clearBloomBits(db ethdb.Database) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkNoBloomBits(b *testing.B) {
|
func BenchmarkNoBloomBits(b *testing.B) {
|
||||||
|
b.Skip("test disabled: this tests presume (and modify) an existing datadir.")
|
||||||
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
|
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
|
||||||
b.Log("Running benchmark without bloombits")
|
b.Log("Running benchmark without bloombits")
|
||||||
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
|
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
|
||||||
|
|
|
@ -65,15 +65,19 @@ func BenchmarkFilters(b *testing.B) {
|
||||||
case 2403:
|
case 2403:
|
||||||
receipt := makeReceipt(addr1)
|
receipt := makeReceipt(addr1)
|
||||||
gen.AddUncheckedReceipt(receipt)
|
gen.AddUncheckedReceipt(receipt)
|
||||||
|
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
|
||||||
case 1034:
|
case 1034:
|
||||||
receipt := makeReceipt(addr2)
|
receipt := makeReceipt(addr2)
|
||||||
gen.AddUncheckedReceipt(receipt)
|
gen.AddUncheckedReceipt(receipt)
|
||||||
|
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
|
||||||
case 34:
|
case 34:
|
||||||
receipt := makeReceipt(addr3)
|
receipt := makeReceipt(addr3)
|
||||||
gen.AddUncheckedReceipt(receipt)
|
gen.AddUncheckedReceipt(receipt)
|
||||||
|
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
|
||||||
case 99999:
|
case 99999:
|
||||||
receipt := makeReceipt(addr4)
|
receipt := makeReceipt(addr4)
|
||||||
gen.AddUncheckedReceipt(receipt)
|
gen.AddUncheckedReceipt(receipt)
|
||||||
|
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
|
||||||
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -99,29 +99,28 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
|
||||||
var (
|
var (
|
||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
addr = crypto.PubkeyToAddress(key.PublicKey)
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
config = *params.TestChainConfig // needs copy because it is modified below
|
||||||
gspec = &core.Genesis{
|
gspec = &core.Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: &config,
|
||||||
Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
||||||
}
|
}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
)
|
)
|
||||||
if londonBlock != nil {
|
config.LondonBlock = londonBlock
|
||||||
gspec.Config.LondonBlock = londonBlock
|
config.ArrowGlacierBlock = londonBlock
|
||||||
signer = types.LatestSigner(gspec.Config)
|
|
||||||
} else {
|
|
||||||
gspec.Config.LondonBlock = nil
|
|
||||||
}
|
|
||||||
engine := ethash.NewFaker()
|
engine := ethash.NewFaker()
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
genesis, _ := gspec.Commit(db)
|
genesis, err := gspec.Commit(db)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
// Generate testing blocks
|
// Generate testing blocks
|
||||||
blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) {
|
blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) {
|
||||||
b.SetCoinbase(common.Address{1})
|
b.SetCoinbase(common.Address{1})
|
||||||
|
|
||||||
var tx *types.Transaction
|
var txdata types.TxData
|
||||||
if londonBlock != nil && b.Number().Cmp(londonBlock) >= 0 {
|
if londonBlock != nil && b.Number().Cmp(londonBlock) >= 0 {
|
||||||
txdata := &types.DynamicFeeTx{
|
txdata = &types.DynamicFeeTx{
|
||||||
ChainID: gspec.Config.ChainID,
|
ChainID: gspec.Config.ChainID,
|
||||||
Nonce: b.TxNonce(addr),
|
Nonce: b.TxNonce(addr),
|
||||||
To: &common.Address{},
|
To: &common.Address{},
|
||||||
|
@ -130,9 +129,8 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
|
||||||
GasTipCap: big.NewInt(int64(i+1) * params.GWei),
|
GasTipCap: big.NewInt(int64(i+1) * params.GWei),
|
||||||
Data: []byte{},
|
Data: []byte{},
|
||||||
}
|
}
|
||||||
tx = types.NewTx(txdata)
|
|
||||||
} else {
|
} else {
|
||||||
txdata := &types.LegacyTx{
|
txdata = &types.LegacyTx{
|
||||||
Nonce: b.TxNonce(addr),
|
Nonce: b.TxNonce(addr),
|
||||||
To: &common.Address{},
|
To: &common.Address{},
|
||||||
Gas: 21000,
|
Gas: 21000,
|
||||||
|
@ -140,18 +138,13 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
|
||||||
Value: big.NewInt(100),
|
Value: big.NewInt(100),
|
||||||
Data: []byte{},
|
Data: []byte{},
|
||||||
}
|
}
|
||||||
tx = types.NewTx(txdata)
|
|
||||||
}
|
}
|
||||||
tx, err := types.SignTx(tx, signer, key)
|
b.AddTx(types.MustSignNewTx(key, signer, txdata))
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create tx: %v", err)
|
|
||||||
}
|
|
||||||
b.AddTx(tx)
|
|
||||||
})
|
})
|
||||||
// Construct testing chain
|
// Construct testing chain
|
||||||
diskdb := rawdb.NewMemoryDatabase()
|
diskdb := rawdb.NewMemoryDatabase()
|
||||||
gspec.Commit(diskdb)
|
gspec.Commit(diskdb)
|
||||||
chain, err := core.NewBlockChain(diskdb, nil, gspec.Config, engine, vm.Config{}, nil, nil)
|
chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, &config, engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create local chain, %v", err)
|
t.Fatalf("Failed to create local chain, %v", err)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue