build: upgrade to go 1.19 (#25726)
This changes the CI / release builds to use the latest Go version. It also upgrades golangci-lint to a newer version compatible with Go 1.19. In Go 1.19, godoc has gained official support for links and lists. The syntax for code blocks in doc comments has changed and now requires a leading tab character. gofmt adapts comments to the new syntax automatically, so there are a lot of comment re-formatting changes in this PR. We need to apply the new format in order to pass the CI lint stage with Go 1.19. With the linter upgrade, I have decided to disable 'gosec' - it produces too many false-positive warnings. The 'deadcode' and 'varcheck' linters have also been removed because golangci-lint warns about them being unmaintained. 'unused' provides similar coverage and we already have it enabled, so we don't lose much with this change.
This commit is contained in:
parent
389021a5af
commit
b628d72766
|
@ -12,7 +12,6 @@ run:
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- deadcode
|
|
||||||
- goconst
|
- goconst
|
||||||
- goimports
|
- goimports
|
||||||
- gosimple
|
- gosimple
|
||||||
|
@ -20,14 +19,12 @@ linters:
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
- unconvert
|
- unconvert
|
||||||
- varcheck
|
|
||||||
- typecheck
|
- typecheck
|
||||||
- unused
|
- unused
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- bidichk
|
- bidichk
|
||||||
- durationcheck
|
- durationcheck
|
||||||
- exportloopref
|
- exportloopref
|
||||||
- gosec
|
|
||||||
- whitespace
|
- whitespace
|
||||||
|
|
||||||
# - structcheck # lots of false positives
|
# - structcheck # lots of false positives
|
||||||
|
@ -45,11 +42,6 @@ linters-settings:
|
||||||
goconst:
|
goconst:
|
||||||
min-len: 3 # minimum length of string constant
|
min-len: 3 # minimum length of string constant
|
||||||
min-occurrences: 6 # minimum number of occurrences
|
min-occurrences: 6 # minimum number of occurrences
|
||||||
gosec:
|
|
||||||
excludes:
|
|
||||||
- G404 # Use of weak random number generator - lots of FP
|
|
||||||
- G107 # Potential http request -- those are intentional
|
|
||||||
- G306 # G306: Expect WriteFile permissions to be 0600 or less
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
@ -58,16 +50,15 @@ issues:
|
||||||
- deadcode
|
- deadcode
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- path: internal/build/pgp.go
|
- path: internal/build/pgp.go
|
||||||
text: 'SA1019: package golang.org/x/crypto/openpgp is deprecated'
|
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
|
||||||
- path: core/vm/contracts.go
|
- path: core/vm/contracts.go
|
||||||
text: 'SA1019: package golang.org/x/crypto/ripemd160 is deprecated'
|
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
|
||||||
- path: accounts/usbwallet/trezor.go
|
- path: accounts/usbwallet/trezor.go
|
||||||
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
|
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
|
||||||
- path: accounts/usbwallet/trezor/
|
- path: accounts/usbwallet/trezor/
|
||||||
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
|
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
|
||||||
exclude:
|
exclude:
|
||||||
- 'SA1019: event.TypeMux is deprecated: use Feed'
|
- 'SA1019: event.TypeMux is deprecated: use Feed'
|
||||||
- 'SA1019: strings.Title is deprecated'
|
- 'SA1019: strings.Title is deprecated'
|
||||||
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
|
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
|
||||||
- 'SA1029: should not use built-in type string as key for value'
|
- 'SA1029: should not use built-in type string as key for value'
|
||||||
- 'G306: Expect WriteFile permissions to be 0600 or less'
|
|
||||||
|
|
20
.travis.yml
20
.travis.yml
|
@ -16,7 +16,7 @@ jobs:
|
||||||
- stage: lint
|
- stage: lint
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- lint
|
- lint
|
||||||
git:
|
git:
|
||||||
|
@ -31,7 +31,7 @@ jobs:
|
||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
|
@ -48,7 +48,7 @@ jobs:
|
||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
|
@ -65,7 +65,7 @@ jobs:
|
||||||
if: type = push
|
if: type = push
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- ubuntu-ppa
|
- ubuntu-ppa
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
|
@ -90,7 +90,7 @@ jobs:
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
sudo: required
|
sudo: required
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- azure-linux
|
- azure-linux
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
|
@ -162,7 +162,7 @@ jobs:
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
- azure-ios
|
- azure-ios
|
||||||
|
@ -194,7 +194,7 @@ jobs:
|
||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
|
@ -214,7 +214,7 @@ jobs:
|
||||||
- stage: build
|
- stage: build
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
|
@ -225,7 +225,7 @@ jobs:
|
||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- azure-purge
|
- azure-purge
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
|
@ -239,7 +239,7 @@ jobs:
|
||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.18.x
|
go: 1.19.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
|
|
|
@ -165,8 +165,9 @@ func TestInvalidABI(t *testing.T) {
|
||||||
|
|
||||||
// TestConstructor tests a constructor function.
|
// TestConstructor tests a constructor function.
|
||||||
// The test is based on the following contract:
|
// The test is based on the following contract:
|
||||||
// contract TestConstructor {
|
//
|
||||||
// constructor(uint256 a, uint256 b) public{}
|
// contract TestConstructor {
|
||||||
|
// constructor(uint256 a, uint256 b) public{}
|
||||||
// }
|
// }
|
||||||
func TestConstructor(t *testing.T) {
|
func TestConstructor(t *testing.T) {
|
||||||
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
||||||
|
@ -724,16 +725,19 @@ func TestBareEvents(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestUnpackEvent is based on this contract:
|
// TestUnpackEvent is based on this contract:
|
||||||
// contract T {
|
//
|
||||||
// event received(address sender, uint amount, bytes memo);
|
// contract T {
|
||||||
// event receivedAddr(address sender);
|
// event received(address sender, uint amount, bytes memo);
|
||||||
// function receive(bytes memo) external payable {
|
// event receivedAddr(address sender);
|
||||||
// received(msg.sender, msg.value, memo);
|
// function receive(bytes memo) external payable {
|
||||||
// receivedAddr(msg.sender);
|
// received(msg.sender, msg.value, memo);
|
||||||
// }
|
// receivedAddr(msg.sender);
|
||||||
// }
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
//
|
||||||
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestUnpackEvent(t *testing.T) {
|
func TestUnpackEvent(t *testing.T) {
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
|
@ -1078,8 +1082,9 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
|
||||||
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
|
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
|
||||||
// conflict and that the second send event will be renamed send1.
|
// conflict and that the second send event will be renamed send1.
|
||||||
// The test runs the abi of the following contract.
|
// The test runs the abi of the following contract.
|
||||||
// contract DuplicateEvent {
|
//
|
||||||
// event send(uint256 a);
|
// contract DuplicateEvent {
|
||||||
|
// event send(uint256 a);
|
||||||
// event send0();
|
// event send0();
|
||||||
// event send();
|
// event send();
|
||||||
// }
|
// }
|
||||||
|
@ -1106,7 +1111,8 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
|
||||||
// TestUnnamedEventParam checks that an event with unnamed parameters is
|
// TestUnnamedEventParam checks that an event with unnamed parameters is
|
||||||
// correctly handled.
|
// correctly handled.
|
||||||
// The test runs the abi of the following contract.
|
// The test runs the abi of the following contract.
|
||||||
// contract TestEvent {
|
//
|
||||||
|
// contract TestEvent {
|
||||||
// event send(uint256, uint256);
|
// event send(uint256, uint256);
|
||||||
// }
|
// }
|
||||||
func TestUnnamedEventParam(t *testing.T) {
|
func TestUnnamedEventParam(t *testing.T) {
|
||||||
|
|
|
@ -93,17 +93,18 @@ func TestSimulatedBackend(t *testing.T) {
|
||||||
|
|
||||||
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
|
||||||
// the following is based on this contract:
|
// the following is based on this contract:
|
||||||
// contract T {
|
|
||||||
// event received(address sender, uint amount, bytes memo);
|
|
||||||
// event receivedAddr(address sender);
|
|
||||||
//
|
//
|
||||||
// function receive(bytes calldata memo) external payable returns (string memory res) {
|
// contract T {
|
||||||
// emit received(msg.sender, msg.value, memo);
|
// event received(address sender, uint amount, bytes memo);
|
||||||
// emit receivedAddr(msg.sender);
|
// event receivedAddr(address sender);
|
||||||
// return "hello world";
|
//
|
||||||
// }
|
// function receive(bytes calldata memo) external payable returns (string memory res) {
|
||||||
// }
|
// emit received(msg.sender, msg.value, memo);
|
||||||
|
// emit receivedAddr(msg.sender);
|
||||||
|
// return "hello world";
|
||||||
|
// }
|
||||||
|
// }
|
||||||
const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]`
|
const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]`
|
||||||
const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
||||||
const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
||||||
|
@ -417,12 +418,13 @@ func TestEstimateGas(t *testing.T) {
|
||||||
/*
|
/*
|
||||||
pragma solidity ^0.6.4;
|
pragma solidity ^0.6.4;
|
||||||
contract GasEstimation {
|
contract GasEstimation {
|
||||||
function PureRevert() public { revert(); }
|
function PureRevert() public { revert(); }
|
||||||
function Revert() public { revert("revert reason");}
|
function Revert() public { revert("revert reason");}
|
||||||
function OOG() public { for (uint i = 0; ; i++) {}}
|
function OOG() public { for (uint i = 0; ; i++) {}}
|
||||||
function Assert() public { assert(false);}
|
function Assert() public { assert(false);}
|
||||||
function Valid() public {}
|
function Valid() public {}
|
||||||
}*/
|
}
|
||||||
|
*/
|
||||||
const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
||||||
const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
|
const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
|
||||||
|
|
||||||
|
@ -994,7 +996,8 @@ func TestCodeAt(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
//
|
||||||
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestPendingAndCallContract(t *testing.T) {
|
func TestPendingAndCallContract(t *testing.T) {
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
|
@ -1057,27 +1060,27 @@ func TestPendingAndCallContract(t *testing.T) {
|
||||||
// This test is based on the following contract:
|
// This test is based on the following contract:
|
||||||
/*
|
/*
|
||||||
contract Reverter {
|
contract Reverter {
|
||||||
function revertString() public pure{
|
function revertString() public pure{
|
||||||
require(false, "some error");
|
require(false, "some error");
|
||||||
}
|
}
|
||||||
function revertNoString() public pure {
|
function revertNoString() public pure {
|
||||||
require(false, "");
|
require(false, "");
|
||||||
}
|
}
|
||||||
function revertASM() public pure {
|
function revertASM() public pure {
|
||||||
assembly {
|
assembly {
|
||||||
revert(0x0, 0x0)
|
revert(0x0, 0x0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
function noRevert() public pure {
|
function noRevert() public pure {
|
||||||
assembly {
|
assembly {
|
||||||
// Assembles something that looks like require(false, "some error") but is not reverted
|
// Assembles something that looks like require(false, "some error") but is not reverted
|
||||||
mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000)
|
mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000)
|
||||||
mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020)
|
mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020)
|
||||||
mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a)
|
mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a)
|
||||||
mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000)
|
mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000)
|
||||||
return(0x0, 0x64)
|
return(0x0, 0x64)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}*/
|
}*/
|
||||||
func TestCallContractRevert(t *testing.T) {
|
func TestCallContractRevert(t *testing.T) {
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
@ -1204,11 +1207,11 @@ func TestFork(t *testing.T) {
|
||||||
/*
|
/*
|
||||||
Example contract to test event emission:
|
Example contract to test event emission:
|
||||||
|
|
||||||
pragma solidity >=0.7.0 <0.9.0;
|
pragma solidity >=0.7.0 <0.9.0;
|
||||||
contract Callable {
|
contract Callable {
|
||||||
event Called();
|
event Called();
|
||||||
function Call() public { emit Called(); }
|
function Call() public { emit Called(); }
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
||||||
|
|
||||||
|
@ -1226,7 +1229,7 @@ const callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3f
|
||||||
// 7. Mine two blocks to trigger a reorg.
|
// 7. Mine two blocks to trigger a reorg.
|
||||||
// 8. Check that the event was removed.
|
// 8. Check that the event was removed.
|
||||||
// 9. Re-send the transaction and mine a block.
|
// 9. Re-send the transaction and mine a block.
|
||||||
// 10. Check that the event was reborn.
|
// 10. Check that the event was reborn.
|
||||||
func TestForkLogsReborn(t *testing.T) {
|
func TestForkLogsReborn(t *testing.T) {
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
|
|
|
@ -25,16 +25,19 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConvertType converts an interface of a runtime type into a interface of the
|
// ConvertType converts an interface of a runtime type into a interface of the
|
||||||
// given type
|
// given type, e.g. turn this code:
|
||||||
// e.g. turn
|
//
|
||||||
// var fields []reflect.StructField
|
// var fields []reflect.StructField
|
||||||
// fields = append(fields, reflect.StructField{
|
//
|
||||||
// Name: "X",
|
// fields = append(fields, reflect.StructField{
|
||||||
// Type: reflect.TypeOf(new(big.Int)),
|
// Name: "X",
|
||||||
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
|
// Type: reflect.TypeOf(new(big.Int)),
|
||||||
// }
|
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
|
||||||
// into
|
// }
|
||||||
// type TupleT struct { X *big.Int }
|
//
|
||||||
|
// into:
|
||||||
|
//
|
||||||
|
// type TupleT struct { X *big.Int }
|
||||||
func ConvertType(in interface{}, proto interface{}) interface{} {
|
func ConvertType(in interface{}, proto interface{}) interface{} {
|
||||||
protoType := reflect.TypeOf(proto)
|
protoType := reflect.TypeOf(proto)
|
||||||
if reflect.TypeOf(in).ConvertibleTo(protoType) {
|
if reflect.TypeOf(in).ConvertibleTo(protoType) {
|
||||||
|
@ -170,11 +173,13 @@ func setStruct(dst, src reflect.Value) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
|
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
|
||||||
// first round: for each Exportable field that contains a `abi:""` tag
|
//
|
||||||
// and this field name exists in the given argument name list, pair them together.
|
// first round: for each Exportable field that contains a `abi:""` tag and this field name
|
||||||
// second round: for each argument name that has not been already linked,
|
// exists in the given argument name list, pair them together.
|
||||||
// find what variable is expected to be mapped into, if it exists and has not been
|
//
|
||||||
// used, pair them.
|
// second round: for each argument name that has not been already linked, find what
|
||||||
|
// variable is expected to be mapped into, if it exists and has not been used, pair them.
|
||||||
|
//
|
||||||
// Note this function assumes the given value is a struct value.
|
// Note this function assumes the given value is a struct value.
|
||||||
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
|
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
|
||||||
typ := value.Type()
|
typ := value.Type()
|
||||||
|
|
|
@ -21,15 +21,14 @@ import "fmt"
|
||||||
// ResolveNameConflict returns the next available name for a given thing.
|
// ResolveNameConflict returns the next available name for a given thing.
|
||||||
// This helper can be used for lots of purposes:
|
// This helper can be used for lots of purposes:
|
||||||
//
|
//
|
||||||
// - In solidity function overloading is supported, this function can fix
|
// - In solidity function overloading is supported, this function can fix
|
||||||
// the name conflicts of overloaded functions.
|
// the name conflicts of overloaded functions.
|
||||||
// - In golang binding generation, the parameter(in function, event, error,
|
// - In golang binding generation, the parameter(in function, event, error,
|
||||||
// and struct definition) name will be converted to camelcase style which
|
// and struct definition) name will be converted to camelcase style which
|
||||||
// may eventually lead to name conflicts.
|
// may eventually lead to name conflicts.
|
||||||
//
|
//
|
||||||
// Name conflicts are mostly resolved by adding number suffix.
|
// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
|
||||||
// e.g. if the abi contains Methods send, send1
|
// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
|
||||||
// ResolveNameConflict would return send2 for input send.
|
|
||||||
func ResolveNameConflict(rawName string, used func(string) bool) string {
|
func ResolveNameConflict(rawName string, used func(string) bool) string {
|
||||||
name := rawName
|
name := rawName
|
||||||
ok := used(name)
|
ok := used(name)
|
||||||
|
|
|
@ -177,7 +177,8 @@ type Backend interface {
|
||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calculated as
|
// The hash is calculated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
//
|
||||||
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func TextHash(data []byte) []byte {
|
func TextHash(data []byte) []byte {
|
||||||
|
@ -189,7 +190,8 @@ func TextHash(data []byte) []byte {
|
||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calculated as
|
// The hash is calculated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
//
|
||||||
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func TextAndHash(data []byte) ([]byte, string) {
|
func TextAndHash(data []byte) ([]byte, string) {
|
||||||
|
|
|
@ -46,7 +46,7 @@ var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000
|
||||||
// The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
|
// The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
|
||||||
// defines derivation paths to be of the form:
|
// defines derivation paths to be of the form:
|
||||||
//
|
//
|
||||||
// m / purpose' / coin_type' / account' / change / address_index
|
// m / purpose' / coin_type' / account' / change / address_index
|
||||||
//
|
//
|
||||||
// The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
|
// The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
|
||||||
// defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and
|
// defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and
|
||||||
|
|
|
@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// derivationPath fetches the wallet's current derivation path from the card.
|
// derivationPath fetches the wallet's current derivation path from the card.
|
||||||
|
//
|
||||||
//lint:ignore U1000 needs to be added to the console interface
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
||||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
||||||
|
@ -994,6 +995,7 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// keyExport contains information on an exported keypair.
|
// keyExport contains information on an exported keypair.
|
||||||
|
//
|
||||||
//lint:ignore U1000 needs to be added to the console interface
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
type keyExport struct {
|
type keyExport struct {
|
||||||
PublicKey []byte `asn1:"tag:0"`
|
PublicKey []byte `asn1:"tag:0"`
|
||||||
|
@ -1001,6 +1003,7 @@ type keyExport struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// publicKey returns the public key for the current derivation path.
|
// publicKey returns the public key for the current derivation path.
|
||||||
|
//
|
||||||
//lint:ignore U1000 needs to be added to the console interface
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
func (s *Session) publicKey() ([]byte, error) {
|
func (s *Session) publicKey() ([]byte, error) {
|
||||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
||||||
|
|
|
@ -92,10 +92,9 @@ func (u *URL) UnmarshalJSON(input []byte) error {
|
||||||
|
|
||||||
// Cmp compares x and y and returns:
|
// Cmp compares x and y and returns:
|
||||||
//
|
//
|
||||||
// -1 if x < y
|
// -1 if x < y
|
||||||
// 0 if x == y
|
// 0 if x == y
|
||||||
// +1 if x > y
|
// +1 if x > y
|
||||||
//
|
|
||||||
func (u URL) Cmp(url URL) int {
|
func (u URL) Cmp(url URL) int {
|
||||||
if u.Scheme == url.Scheme {
|
if u.Scheme == url.Scheme {
|
||||||
return strings.Compare(u.Path, url.Path)
|
return strings.Compare(u.Path, url.Path)
|
||||||
|
|
|
@ -195,18 +195,18 @@ func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash
|
||||||
//
|
//
|
||||||
// The version retrieval protocol is defined as follows:
|
// The version retrieval protocol is defined as follows:
|
||||||
//
|
//
|
||||||
// CLA | INS | P1 | P2 | Lc | Le
|
// CLA | INS | P1 | P2 | Lc | Le
|
||||||
// ----+-----+----+----+----+---
|
// ----+-----+----+----+----+---
|
||||||
// E0 | 06 | 00 | 00 | 00 | 04
|
// E0 | 06 | 00 | 00 | 00 | 04
|
||||||
//
|
//
|
||||||
// With no input data, and the output data being:
|
// With no input data, and the output data being:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// ---------------------------------------------------+--------
|
// ---------------------------------------------------+--------
|
||||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||||
// Application major version | 1 byte
|
// Application major version | 1 byte
|
||||||
// Application minor version | 1 byte
|
// Application minor version | 1 byte
|
||||||
// Application patch version | 1 byte
|
// Application patch version | 1 byte
|
||||||
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
|
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
|
||||||
// Send the request and wait for the response
|
// Send the request and wait for the response
|
||||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||||
|
@ -227,32 +227,32 @@ func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
|
||||||
//
|
//
|
||||||
// The address derivation protocol is defined as follows:
|
// The address derivation protocol is defined as follows:
|
||||||
//
|
//
|
||||||
// CLA | INS | P1 | P2 | Lc | Le
|
// CLA | INS | P1 | P2 | Lc | Le
|
||||||
// ----+-----+----+----+-----+---
|
// ----+-----+----+----+-----+---
|
||||||
// E0 | 02 | 00 return address
|
// E0 | 02 | 00 return address
|
||||||
// 01 display address and confirm before returning
|
// 01 display address and confirm before returning
|
||||||
// | 00: do not return the chain code
|
// | 00: do not return the chain code
|
||||||
// | 01: return the chain code
|
// | 01: return the chain code
|
||||||
// | var | 00
|
// | var | 00
|
||||||
//
|
//
|
||||||
// Where the input data is:
|
// Where the input data is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// -------------------------------------------------+--------
|
// -------------------------------------------------+--------
|
||||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||||
// First derivation index (big endian) | 4 bytes
|
// First derivation index (big endian) | 4 bytes
|
||||||
// ... | 4 bytes
|
// ... | 4 bytes
|
||||||
// Last derivation index (big endian) | 4 bytes
|
// Last derivation index (big endian) | 4 bytes
|
||||||
//
|
//
|
||||||
// And the output data is:
|
// And the output data is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// ------------------------+-------------------
|
// ------------------------+-------------------
|
||||||
// Public Key length | 1 byte
|
// Public Key length | 1 byte
|
||||||
// Uncompressed Public Key | arbitrary
|
// Uncompressed Public Key | arbitrary
|
||||||
// Ethereum address length | 1 byte
|
// Ethereum address length | 1 byte
|
||||||
// Ethereum address | 40 bytes hex ascii
|
// Ethereum address | 40 bytes hex ascii
|
||||||
// Chain code if requested | 32 bytes
|
// Chain code if requested | 32 bytes
|
||||||
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||||
// Flatten the derivation path into the Ledger request
|
// Flatten the derivation path into the Ledger request
|
||||||
path := make([]byte, 1+4*len(derivationPath))
|
path := make([]byte, 1+4*len(derivationPath))
|
||||||
|
@ -290,35 +290,35 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
|
||||||
//
|
//
|
||||||
// The transaction signing protocol is defined as follows:
|
// The transaction signing protocol is defined as follows:
|
||||||
//
|
//
|
||||||
// CLA | INS | P1 | P2 | Lc | Le
|
// CLA | INS | P1 | P2 | Lc | Le
|
||||||
// ----+-----+----+----+-----+---
|
// ----+-----+----+----+-----+---
|
||||||
// E0 | 04 | 00: first transaction data block
|
// E0 | 04 | 00: first transaction data block
|
||||||
// 80: subsequent transaction data block
|
// 80: subsequent transaction data block
|
||||||
// | 00 | variable | variable
|
// | 00 | variable | variable
|
||||||
//
|
//
|
||||||
// Where the input for the first transaction block (first 255 bytes) is:
|
// Where the input for the first transaction block (first 255 bytes) is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// -------------------------------------------------+----------
|
// -------------------------------------------------+----------
|
||||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||||
// First derivation index (big endian) | 4 bytes
|
// First derivation index (big endian) | 4 bytes
|
||||||
// ... | 4 bytes
|
// ... | 4 bytes
|
||||||
// Last derivation index (big endian) | 4 bytes
|
// Last derivation index (big endian) | 4 bytes
|
||||||
// RLP transaction chunk | arbitrary
|
// RLP transaction chunk | arbitrary
|
||||||
//
|
//
|
||||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// ----------------------+----------
|
// ----------------------+----------
|
||||||
// RLP transaction chunk | arbitrary
|
// RLP transaction chunk | arbitrary
|
||||||
//
|
//
|
||||||
// And the output data is:
|
// And the output data is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// ------------+---------
|
// ------------+---------
|
||||||
// signature V | 1 byte
|
// signature V | 1 byte
|
||||||
// signature R | 32 bytes
|
// signature R | 32 bytes
|
||||||
// signature S | 32 bytes
|
// signature S | 32 bytes
|
||||||
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||||
// Flatten the derivation path into the Ledger request
|
// Flatten the derivation path into the Ledger request
|
||||||
path := make([]byte, 1+4*len(derivationPath))
|
path := make([]byte, 1+4*len(derivationPath))
|
||||||
|
@ -392,30 +392,28 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||||
//
|
//
|
||||||
// The signing protocol is defined as follows:
|
// The signing protocol is defined as follows:
|
||||||
//
|
//
|
||||||
// CLA | INS | P1 | P2 | Lc | Le
|
// CLA | INS | P1 | P2 | Lc | Le
|
||||||
// ----+-----+----+-----------------------------+-----+---
|
// ----+-----+----+-----------------------------+-----+---
|
||||||
// E0 | 0C | 00 | implementation version : 00 | variable | variable
|
// E0 | 0C | 00 | implementation version : 00 | variable | variable
|
||||||
//
|
//
|
||||||
// Where the input is:
|
// Where the input is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// -------------------------------------------------+----------
|
// -------------------------------------------------+----------
|
||||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||||
// First derivation index (big endian) | 4 bytes
|
// First derivation index (big endian) | 4 bytes
|
||||||
// ... | 4 bytes
|
// ... | 4 bytes
|
||||||
// Last derivation index (big endian) | 4 bytes
|
// Last derivation index (big endian) | 4 bytes
|
||||||
// domain hash | 32 bytes
|
// domain hash | 32 bytes
|
||||||
// message hash | 32 bytes
|
// message hash | 32 bytes
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
//
|
||||||
// And the output data is:
|
// And the output data is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// ------------+---------
|
// ------------+---------
|
||||||
// signature V | 1 byte
|
// signature V | 1 byte
|
||||||
// signature R | 32 bytes
|
// signature R | 32 bytes
|
||||||
// signature S | 32 bytes
|
// signature S | 32 bytes
|
||||||
func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) {
|
func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) {
|
||||||
// Flatten the derivation path into the Ledger request
|
// Flatten the derivation path into the Ledger request
|
||||||
path := make([]byte, 1+4*len(derivationPath))
|
path := make([]byte, 1+4*len(derivationPath))
|
||||||
|
@ -454,12 +452,12 @@ func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHas
|
||||||
//
|
//
|
||||||
// The common transport header is defined as follows:
|
// The common transport header is defined as follows:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// --------------------------------------+----------
|
// --------------------------------------+----------
|
||||||
// Communication channel ID (big endian) | 2 bytes
|
// Communication channel ID (big endian) | 2 bytes
|
||||||
// Command tag | 1 byte
|
// Command tag | 1 byte
|
||||||
// Packet sequence index (big endian) | 2 bytes
|
// Packet sequence index (big endian) | 2 bytes
|
||||||
// Payload | arbitrary
|
// Payload | arbitrary
|
||||||
//
|
//
|
||||||
// The Communication channel ID allows commands multiplexing over the same
|
// The Communication channel ID allows commands multiplexing over the same
|
||||||
// physical link. It is not used for the time being, and should be set to 0101
|
// physical link. It is not used for the time being, and should be set to 0101
|
||||||
|
@ -473,15 +471,15 @@ func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHas
|
||||||
//
|
//
|
||||||
// APDU Command payloads are encoded as follows:
|
// APDU Command payloads are encoded as follows:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
// APDU length (big endian) | 2 bytes
|
// APDU length (big endian) | 2 bytes
|
||||||
// APDU CLA | 1 byte
|
// APDU CLA | 1 byte
|
||||||
// APDU INS | 1 byte
|
// APDU INS | 1 byte
|
||||||
// APDU P1 | 1 byte
|
// APDU P1 | 1 byte
|
||||||
// APDU P2 | 1 byte
|
// APDU P2 | 1 byte
|
||||||
// APDU length | 1 byte
|
// APDU length | 1 byte
|
||||||
// Optional APDU data | arbitrary
|
// Optional APDU data | arbitrary
|
||||||
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||||
// Construct the message payload, possibly split into multiple chunks
|
// Construct the message payload, possibly split into multiple chunks
|
||||||
apdu := make([]byte, 2, 7+len(data))
|
apdu := make([]byte, 2, 7+len(data))
|
||||||
|
|
|
@ -84,15 +84,15 @@ func (w *trezorDriver) Status() (string, error) {
|
||||||
|
|
||||||
// Open implements usbwallet.driver, attempting to initialize the connection to
|
// Open implements usbwallet.driver, attempting to initialize the connection to
|
||||||
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
|
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
|
||||||
// * The first phase is to initialize the connection and read the wallet's
|
// - The first phase is to initialize the connection and read the wallet's
|
||||||
// features. This phase is invoked if the provided passphrase is empty. The
|
// features. This phase is invoked if the provided passphrase is empty. The
|
||||||
// device will display the pinpad as a result and will return an appropriate
|
// device will display the pinpad as a result and will return an appropriate
|
||||||
// error to notify the user that a second open phase is needed.
|
// error to notify the user that a second open phase is needed.
|
||||||
// * The second phase is to unlock access to the Trezor, which is done by the
|
// - The second phase is to unlock access to the Trezor, which is done by the
|
||||||
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
||||||
// number of the user (shuffled according to the pinpad displayed).
|
// number of the user (shuffled according to the pinpad displayed).
|
||||||
// * If needed the device will ask for passphrase which will require calling
|
// - If needed the device will ask for passphrase which will require calling
|
||||||
// open again with the actual passphrase (3rd phase)
|
// open again with the actual passphrase (3rd phase)
|
||||||
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||||
w.device, w.failure = device, nil
|
w.device, w.failure = device, nil
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ func (Failure_FailureType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_aaf30d059fdbc38d, []int{1, 0}
|
return fileDescriptor_aaf30d059fdbc38d, []int{1, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Type of button request
|
// Type of button request
|
||||||
type ButtonRequest_ButtonRequestType int32
|
type ButtonRequest_ButtonRequestType int32
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ func (ButtonRequest_ButtonRequestType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_aaf30d059fdbc38d, []int{2, 0}
|
return fileDescriptor_aaf30d059fdbc38d, []int{2, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Type of PIN request
|
// Type of PIN request
|
||||||
type PinMatrixRequest_PinMatrixRequestType int32
|
type PinMatrixRequest_PinMatrixRequestType int32
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ func (PinMatrixRequest_PinMatrixRequestType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_aaf30d059fdbc38d, []int{4, 0}
|
return fileDescriptor_aaf30d059fdbc38d, []int{4, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Success of the previous request
|
// Response: Success of the previous request
|
||||||
// @end
|
// @end
|
||||||
type Success struct {
|
type Success struct {
|
||||||
|
@ -262,7 +262,7 @@ func (m *Success) GetMessage() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Failure of the previous request
|
// Response: Failure of the previous request
|
||||||
// @end
|
// @end
|
||||||
type Failure struct {
|
type Failure struct {
|
||||||
|
@ -312,7 +312,7 @@ func (m *Failure) GetMessage() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Device is waiting for HW button press.
|
// Response: Device is waiting for HW button press.
|
||||||
// @auxstart
|
// @auxstart
|
||||||
// @next ButtonAck
|
// @next ButtonAck
|
||||||
|
@ -363,7 +363,7 @@ func (m *ButtonRequest) GetData() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Computer agrees to wait for HW button press
|
// Request: Computer agrees to wait for HW button press
|
||||||
// @auxend
|
// @auxend
|
||||||
type ButtonAck struct {
|
type ButtonAck struct {
|
||||||
|
@ -397,7 +397,7 @@ func (m *ButtonAck) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_ButtonAck proto.InternalMessageInfo
|
var xxx_messageInfo_ButtonAck proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
// Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
||||||
// @auxstart
|
// @auxstart
|
||||||
// @next PinMatrixAck
|
// @next PinMatrixAck
|
||||||
|
@ -440,7 +440,7 @@ func (m *PinMatrixRequest) GetType() PinMatrixRequest_PinMatrixRequestType {
|
||||||
return PinMatrixRequest_PinMatrixRequestType_Current
|
return PinMatrixRequest_PinMatrixRequestType_Current
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Computer responds with encoded PIN
|
// Request: Computer responds with encoded PIN
|
||||||
// @auxend
|
// @auxend
|
||||||
type PinMatrixAck struct {
|
type PinMatrixAck struct {
|
||||||
|
@ -482,7 +482,7 @@ func (m *PinMatrixAck) GetPin() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Device awaits encryption passphrase
|
// Response: Device awaits encryption passphrase
|
||||||
// @auxstart
|
// @auxstart
|
||||||
// @next PassphraseAck
|
// @next PassphraseAck
|
||||||
|
@ -525,7 +525,7 @@ func (m *PassphraseRequest) GetOnDevice() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Send passphrase back
|
// Request: Send passphrase back
|
||||||
// @next PassphraseStateRequest
|
// @next PassphraseStateRequest
|
||||||
type PassphraseAck struct {
|
type PassphraseAck struct {
|
||||||
|
@ -575,7 +575,7 @@ func (m *PassphraseAck) GetState() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Device awaits passphrase state
|
// Response: Device awaits passphrase state
|
||||||
// @next PassphraseStateAck
|
// @next PassphraseStateAck
|
||||||
type PassphraseStateRequest struct {
|
type PassphraseStateRequest struct {
|
||||||
|
@ -617,7 +617,7 @@ func (m *PassphraseStateRequest) GetState() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Send passphrase state back
|
// Request: Send passphrase state back
|
||||||
// @auxend
|
// @auxend
|
||||||
type PassphraseStateAck struct {
|
type PassphraseStateAck struct {
|
||||||
|
@ -651,7 +651,7 @@ func (m *PassphraseStateAck) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_PassphraseStateAck proto.InternalMessageInfo
|
var xxx_messageInfo_PassphraseStateAck proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Structure representing BIP32 (hierarchical deterministic) node
|
// Structure representing BIP32 (hierarchical deterministic) node
|
||||||
// Used for imports of private key into the device and exporting public key out of device
|
// Used for imports of private key into the device and exporting public key out of device
|
||||||
// @embed
|
// @embed
|
||||||
|
|
|
@ -21,7 +21,7 @@ var _ = math.Inf
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask device for public key corresponding to address_n path
|
// Request: Ask device for public key corresponding to address_n path
|
||||||
// @start
|
// @start
|
||||||
// @next EthereumPublicKey
|
// @next EthereumPublicKey
|
||||||
|
@ -73,7 +73,7 @@ func (m *EthereumGetPublicKey) GetShowDisplay() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Contains public key derived from device private seed
|
// Response: Contains public key derived from device private seed
|
||||||
// @end
|
// @end
|
||||||
type EthereumPublicKey struct {
|
type EthereumPublicKey struct {
|
||||||
|
@ -123,7 +123,7 @@ func (m *EthereumPublicKey) GetXpub() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask device for Ethereum address corresponding to address_n path
|
// Request: Ask device for Ethereum address corresponding to address_n path
|
||||||
// @start
|
// @start
|
||||||
// @next EthereumAddress
|
// @next EthereumAddress
|
||||||
|
@ -175,7 +175,7 @@ func (m *EthereumGetAddress) GetShowDisplay() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Contains an Ethereum address derived from device private seed
|
// Response: Contains an Ethereum address derived from device private seed
|
||||||
// @end
|
// @end
|
||||||
type EthereumAddress struct {
|
type EthereumAddress struct {
|
||||||
|
@ -225,7 +225,7 @@ func (m *EthereumAddress) GetAddressHex() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask device to sign transaction
|
// Request: Ask device to sign transaction
|
||||||
// All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
// All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
||||||
// Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
// Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
||||||
|
@ -351,7 +351,7 @@ func (m *EthereumSignTx) GetTxType() uint32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Device asks for more data from transaction payload, or returns the signature.
|
// Response: Device asks for more data from transaction payload, or returns the signature.
|
||||||
// If data_length is set, device awaits that many more bytes of payload.
|
// If data_length is set, device awaits that many more bytes of payload.
|
||||||
// Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
// Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
||||||
|
@ -420,7 +420,7 @@ func (m *EthereumTxRequest) GetSignatureS() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Transaction payload data.
|
// Request: Transaction payload data.
|
||||||
// @next EthereumTxRequest
|
// @next EthereumTxRequest
|
||||||
type EthereumTxAck struct {
|
type EthereumTxAck struct {
|
||||||
|
@ -462,7 +462,7 @@ func (m *EthereumTxAck) GetDataChunk() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask device to sign message
|
// Request: Ask device to sign message
|
||||||
// @start
|
// @start
|
||||||
// @next EthereumMessageSignature
|
// @next EthereumMessageSignature
|
||||||
|
@ -514,7 +514,7 @@ func (m *EthereumSignMessage) GetMessage() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Signed message
|
// Response: Signed message
|
||||||
// @end
|
// @end
|
||||||
type EthereumMessageSignature struct {
|
type EthereumMessageSignature struct {
|
||||||
|
@ -572,7 +572,7 @@ func (m *EthereumMessageSignature) GetAddressHex() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask device to verify message
|
// Request: Ask device to verify message
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
|
|
@ -21,7 +21,7 @@ var _ = math.Inf
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Structure representing passphrase source
|
// Structure representing passphrase source
|
||||||
type ApplySettings_PassphraseSourceType int32
|
type ApplySettings_PassphraseSourceType int32
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ func (ApplySettings_PassphraseSourceType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_0c720c20d27aa029, []int{4, 0}
|
return fileDescriptor_0c720c20d27aa029, []int{4, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Type of recovery procedure. These should be used as bitmask, e.g.,
|
// Type of recovery procedure. These should be used as bitmask, e.g.,
|
||||||
// `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
// `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
||||||
// listing every method supported by the host computer.
|
// listing every method supported by the host computer.
|
||||||
|
@ -114,7 +114,7 @@ func (RecoveryDevice_RecoveryDeviceType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_0c720c20d27aa029, []int{17, 0}
|
return fileDescriptor_0c720c20d27aa029, []int{17, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Type of Recovery Word request
|
// Type of Recovery Word request
|
||||||
type WordRequest_WordRequestType int32
|
type WordRequest_WordRequestType int32
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ func (WordRequest_WordRequestType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_0c720c20d27aa029, []int{18, 0}
|
return fileDescriptor_0c720c20d27aa029, []int{18, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Reset device to default state and ask for device details
|
// Request: Reset device to default state and ask for device details
|
||||||
// @start
|
// @start
|
||||||
// @next Features
|
// @next Features
|
||||||
|
@ -210,7 +210,7 @@ func (m *Initialize) GetSkipPassphrase() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask for device details (no device reset)
|
// Request: Ask for device details (no device reset)
|
||||||
// @start
|
// @start
|
||||||
// @next Features
|
// @next Features
|
||||||
|
@ -245,7 +245,7 @@ func (m *GetFeatures) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_GetFeatures proto.InternalMessageInfo
|
var xxx_messageInfo_GetFeatures proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Reports various information about the device
|
// Response: Reports various information about the device
|
||||||
// @end
|
// @end
|
||||||
type Features struct {
|
type Features struct {
|
||||||
|
@ -495,7 +495,7 @@ func (m *Features) GetNoBackup() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: clear session (removes cached PIN, passphrase, etc).
|
// Request: clear session (removes cached PIN, passphrase, etc).
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -530,7 +530,7 @@ func (m *ClearSession) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_ClearSession proto.InternalMessageInfo
|
var xxx_messageInfo_ClearSession proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: change language and/or label of the device
|
// Request: change language and/or label of the device
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -622,7 +622,7 @@ func (m *ApplySettings) GetDisplayRotation() uint32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: set flags of the device
|
// Request: set flags of the device
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -666,7 +666,7 @@ func (m *ApplyFlags) GetFlags() uint32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Starts workflow for setting/changing/removing the PIN
|
// Request: Starts workflow for setting/changing/removing the PIN
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -710,7 +710,7 @@ func (m *ChangePin) GetRemove() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Test if the device is alive, device sends back the message in Success response
|
// Request: Test if the device is alive, device sends back the message in Success response
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -777,7 +777,7 @@ func (m *Ping) GetPassphraseProtection() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Abort last operation that required user interaction
|
// Request: Abort last operation that required user interaction
|
||||||
// @start
|
// @start
|
||||||
// @next Failure
|
// @next Failure
|
||||||
|
@ -812,7 +812,7 @@ func (m *Cancel) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_Cancel proto.InternalMessageInfo
|
var xxx_messageInfo_Cancel proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
// Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
||||||
// @start
|
// @start
|
||||||
// @next Entropy
|
// @next Entropy
|
||||||
|
@ -856,7 +856,7 @@ func (m *GetEntropy) GetSize() uint32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Reply with random data generated by internal RNG
|
// Response: Reply with random data generated by internal RNG
|
||||||
// @end
|
// @end
|
||||||
type Entropy struct {
|
type Entropy struct {
|
||||||
|
@ -898,7 +898,7 @@ func (m *Entropy) GetEntropy() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Request device to wipe all sensitive data and settings
|
// Request: Request device to wipe all sensitive data and settings
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -934,7 +934,7 @@ func (m *WipeDevice) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_WipeDevice proto.InternalMessageInfo
|
var xxx_messageInfo_WipeDevice proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Load seed and related internal settings from the computer
|
// Request: Load seed and related internal settings from the computer
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -1036,7 +1036,7 @@ func (m *LoadDevice) GetU2FCounter() uint32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Ask device to do initialization involving user interaction
|
// Request: Ask device to do initialization involving user interaction
|
||||||
// @start
|
// @start
|
||||||
// @next EntropyRequest
|
// @next EntropyRequest
|
||||||
|
@ -1147,7 +1147,7 @@ func (m *ResetDevice) GetNoBackup() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Perform backup of the device seed if not backed up using ResetDevice
|
// Request: Perform backup of the device seed if not backed up using ResetDevice
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -1182,7 +1182,7 @@ func (m *BackupDevice) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_BackupDevice proto.InternalMessageInfo
|
var xxx_messageInfo_BackupDevice proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Ask for additional entropy from host computer
|
// Response: Ask for additional entropy from host computer
|
||||||
// @next EntropyAck
|
// @next EntropyAck
|
||||||
type EntropyRequest struct {
|
type EntropyRequest struct {
|
||||||
|
@ -1216,7 +1216,7 @@ func (m *EntropyRequest) XXX_DiscardUnknown() {
|
||||||
|
|
||||||
var xxx_messageInfo_EntropyRequest proto.InternalMessageInfo
|
var xxx_messageInfo_EntropyRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Provide additional entropy for seed generation function
|
// Request: Provide additional entropy for seed generation function
|
||||||
// @next Success
|
// @next Success
|
||||||
type EntropyAck struct {
|
type EntropyAck struct {
|
||||||
|
@ -1258,7 +1258,7 @@ func (m *EntropyAck) GetEntropy() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Start recovery workflow asking user for specific words of mnemonic
|
// Request: Start recovery workflow asking user for specific words of mnemonic
|
||||||
// Used to recovery device safely even on untrusted computer.
|
// Used to recovery device safely even on untrusted computer.
|
||||||
// @start
|
// @start
|
||||||
|
@ -1369,7 +1369,7 @@ func (m *RecoveryDevice) GetDryRun() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Response: Device is waiting for user to enter word of the mnemonic
|
// Response: Device is waiting for user to enter word of the mnemonic
|
||||||
// Its position is shown only on device's internal display.
|
// Its position is shown only on device's internal display.
|
||||||
// @next WordAck
|
// @next WordAck
|
||||||
|
@ -1412,7 +1412,7 @@ func (m *WordRequest) GetType() WordRequest_WordRequestType {
|
||||||
return WordRequest_WordRequestType_Plain
|
return WordRequest_WordRequestType_Plain
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Computer replies with word from the mnemonic
|
// Request: Computer replies with word from the mnemonic
|
||||||
// @next WordRequest
|
// @next WordRequest
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -1456,7 +1456,7 @@ func (m *WordAck) GetWord() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Request: Set U2F counter
|
// Request: Set U2F counter
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
|
|
@ -22,7 +22,7 @@ var _ = math.Inf
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
//*
|
// *
|
||||||
// Mapping between TREZOR wire identifier (uint) and a protobuf message
|
// Mapping between TREZOR wire identifier (uint) and a protobuf message
|
||||||
type MessageType int32
|
type MessageType int32
|
||||||
|
|
||||||
|
|
|
@ -1,38 +1,38 @@
|
||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
9920d3306a1ac536cdd2c796d6cb3c54bc559c226fc3cc39c32f1e0bd7f50d2a go1.18.5.src.tar.gz
|
27871baa490f3401414ad793fba49086f6c855b1c584385ed7771e1204c7e179 go1.19.1.src.tar.gz
|
||||||
828eeca8b5abea3e56921df8fa4b1101380a5ebcfee10acbc8ffe7ec0bf5876b go1.18.5.darwin-amd64.tar.gz
|
b2828a2b05f0d2169afc74c11ed010775bf7cf0061822b275697b2f470495fb7 go1.19.1.darwin-amd64.tar.gz
|
||||||
923a377c6fc9a2c789f5db61c24b8f64133f7889056897449891f256af34065f go1.18.5.darwin-arm64.tar.gz
|
e46aecce83a9289be16ce4ba9b8478a5b89b8aa0230171d5c6adbc0c66640548 go1.19.1.darwin-arm64.tar.gz
|
||||||
c3d90264a706e2d88cfb44126dc6f0d008a48f00732e04bc377cea1a2b716a7c go1.18.5.freebsd-386.tar.gz
|
cfaca8c1d5784d2bc21e12d8893cfd2dc885a60db4c1a9a95e4ffc694d0925ce go1.19.1.freebsd-386.tar.gz
|
||||||
0de23843c568d388bc0f0e390a8966938cccaae0d74b698325f7175bac04e0c6 go1.18.5.freebsd-amd64.tar.gz
|
db5b8f232e12c655cc6cde6af1adf4d27d842541807802d747c86161e89efa0a go1.19.1.freebsd-amd64.tar.gz
|
||||||
0c44f85d146c6f98c34e8ff436a42af22e90e36fe232d3d9d3101f23fd61362b go1.18.5.linux-386.tar.gz
|
9acc57342400c5b0c2da07b5b01b50da239dd4a7fad41a1fb56af8363ef4133f go1.19.1.linux-386.tar.gz
|
||||||
9e5de37f9c49942c601b191ac5fba404b868bfc21d446d6960acc12283d6e5f2 go1.18.5.linux-amd64.tar.gz
|
acc512fbab4f716a8f97a8b3fbaa9ddd39606a28be6c2515ef7c6c6311acffde go1.19.1.linux-amd64.tar.gz
|
||||||
006f6622718212363fa1ff004a6ab4d87bbbe772ec5631bab7cac10be346e4f1 go1.18.5.linux-arm64.tar.gz
|
49960821948b9c6b14041430890eccee58c76b52e2dbaafce971c3c38d43df9f go1.19.1.linux-arm64.tar.gz
|
||||||
d5ac34ac5f060a5274319aa04b7b11e41b123bd7887d64efb5f44ead236957af go1.18.5.linux-armv6l.tar.gz
|
efe93f5671621ee84ce5e262e1e21acbc72acefbaba360f21778abd083d4ad16 go1.19.1.linux-armv6l.tar.gz
|
||||||
2e37fb9c7cbaedd4e729492d658aa4cde821fc94117391a8105c13b25ca1c84b go1.18.5.linux-ppc64le.tar.gz
|
4137984aa353de9c5ec1bd8fb3cd00a0624b75eafa3d4ec13d2f3f48261dba2e go1.19.1.linux-ppc64le.tar.gz
|
||||||
e3d536e7873639f85353e892444f83b14cb6670603961f215986ae8e28e8e07a go1.18.5.linux-s390x.tar.gz
|
ca1005cc80a3dd726536b4c6ea5fef0318939351ff273eff420bd66a377c74eb go1.19.1.linux-s390x.tar.gz
|
||||||
7b3142ec0c5db991e7f73a231662a92429b90ee151fe47557acb566d8d9ae4d3 go1.18.5.windows-386.zip
|
bc7043e7a9a8d34aacd06f8c2f70e166d1d148f6800814cff790c45b9ab31cee go1.19.1.windows-386.zip
|
||||||
73753620602d4b4469770040c53db55e5dd6af2ad07ecc18f71f164c3224eaad go1.18.5.windows-amd64.zip
|
b33584c1d93b0e9c783de876b7aa99d3018bdeccd396aeb6d516a74e9d88d55f go1.19.1.windows-amd64.zip
|
||||||
4d154626affff12ef73ea1017af0e5b52dbc839ef92f6f9e76cf4f71278a5744 go1.18.5.windows-arm64.zip
|
d8cf3f04762fa7d5d9c82dfa15b5adaae2404463af3bc8dcd7f89837512501fe go1.19.1.windows-arm64.zip
|
||||||
|
|
||||||
658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz
|
20cd1215e0420db8cfa94a6cd3c9d325f7b39c07f2415a02d111568d8bc9e271 golangci-lint-1.49.0-darwin-amd64.tar.gz
|
||||||
81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz
|
cabb1a4c35fe1dadbe5a81550a00871281a331e7660cd85ae16e936a7f0f6cfc golangci-lint-1.49.0-darwin-arm64.tar.gz
|
||||||
943486e703e62ec55ecd90caeb22bcd39f8cc3962a93eec18c06b7bae12cb46f golangci-lint-1.46.2-freebsd-386.tar.gz
|
f834c3b09580cf763b5d30b0c33c83cb13d7a822b5ed5d724143f121ffe28c97 golangci-lint-1.49.0-freebsd-386.tar.gz
|
||||||
a75dd9ba7e08e8315c411697171db5375c0f6a1ece9e6fbeb9e9a4386822e17d golangci-lint-1.46.2-freebsd-amd64.tar.gz
|
4ca91c9f3aa79a71da441b7220a3e799365ff7a24caf9f04fcda12066c5ab0f7 golangci-lint-1.49.0-freebsd-amd64.tar.gz
|
||||||
83eedca1af72e8be055a1235177eb1b33524fbf08bec5730df2e6c3efade2b23 golangci-lint-1.46.2-freebsd-armv6.tar.gz
|
37de789245248eea375d05080e11b4662a08762c353752575167611e65658454 golangci-lint-1.49.0-freebsd-armv6.tar.gz
|
||||||
513d276c490de6f82baa01f9346d8d78b385f2ae97608f42f05d1f0f1314cd54 golangci-lint-1.46.2-freebsd-armv7.tar.gz
|
3abed2bd3a8134b501fdc9cc9a0e60d616c86389e4fcdd1f79ceae7458974378 golangci-lint-1.49.0-freebsd-armv7.tar.gz
|
||||||
461a60016d516c69d406dc3e2d4957b722dbe684b7085dfac4802d0f84409e27 golangci-lint-1.46.2-linux-386.tar.gz
|
ef2860d90d83aee6713f697f23372cd93ac41a16439fdcb3c4ac86ba0f306860 golangci-lint-1.49.0-linux-386.tar.gz
|
||||||
242cd4f2d6ac0556e315192e8555784d13da5d1874e51304711570769c4f2b9b golangci-lint-1.46.2-linux-amd64.tar.gz
|
5badc6e9fee2003621efa07e385910d9a88c89b38f6c35aded153193c5125178 golangci-lint-1.49.0-linux-amd64.tar.gz
|
||||||
ff5448ada2b3982581984d64b0dec614dba0a3ea4cab2d6a343c77927fc89f7e golangci-lint-1.46.2-linux-arm64.tar.gz
|
b57ed03d29b8ca69be9925edd67ea305b6013cd5c97507d205fbe2979f71f2b5 golangci-lint-1.49.0-linux-arm64.tar.gz
|
||||||
177f5210ef04aee282bfbc6ec519d36af5fb7d2b2c8d3f4ea5e59fdba71b0a27 golangci-lint-1.46.2-linux-armv6.tar.gz
|
4a41cff3af7f5304751f7bbf4ea617c14ebc1f88481a28a013e61b06d1f7102c golangci-lint-1.49.0-linux-armv6.tar.gz
|
||||||
10dd512a36ee978a1009edbca3ba3af410f0fda8df4d85f0e4793a24213870cc golangci-lint-1.46.2-linux-armv7.tar.gz
|
14a9683af483ee7052dd0ce7d6140e0b502d6001bea3de606b8e7cce2c673539 golangci-lint-1.49.0-linux-armv7.tar.gz
|
||||||
67779fa517c688c9db1090c3c456117d95c6b92979c623fe8cce8fb84251f21e golangci-lint-1.46.2-linux-mips64.tar.gz
|
33edf757bc2611204fdb40b212900866a57ded4eea62c1b19c10bfc375359afa golangci-lint-1.49.0-linux-mips64.tar.gz
|
||||||
c085f0f57bdccbb2c902a41b72ce210a3dfff16ca856789374745ab52004b6ee golangci-lint-1.46.2-linux-mips64le.tar.gz
|
280f7902f90d162566f1691a300663dd8db6e225e65384fe66b6fb2362e0b314 golangci-lint-1.49.0-linux-mips64le.tar.gz
|
||||||
abecef6421499248e58ed75d2938bc12b4b1f98b057f25060680b77bb51a881e golangci-lint-1.46.2-linux-ppc64le.tar.gz
|
103bcb7ce6c668e0a7e95e5c5355892d74f5d15391443430472e66d652906a15 golangci-lint-1.49.0-linux-ppc64le.tar.gz
|
||||||
134843a8f5c5c182c11979ea75f5866945d54757b2a04f3e5e04a0cf4fbf3a39 golangci-lint-1.46.2-linux-riscv64.tar.gz
|
4636ff9b01ddb18a2c1a953fc134207711b0a5d874d04ac66f915e9cfff0e8e0 golangci-lint-1.49.0-linux-riscv64.tar.gz
|
||||||
9fe21a9476567aafe7a2e1a926b9641a39f920d4c0ea8eda9d968bc6136337f9 golangci-lint-1.46.2-linux-s390x.tar.gz
|
029e0844931a2d3edc771d67e17fe17928f04f80c1a9aa165160a543e8a7e8d4 golangci-lint-1.49.0-linux-s390x.tar.gz
|
||||||
b48a421ec12a43f8fc8f977b9cf7d4a1ea1c4b97f803a238de7d3ce4ab23a84b golangci-lint-1.46.2-windows-386.zip
|
e9cb6f691e62a4d8b28dd52d2eab57cca72acfd5083b3c5417a72d2eb64def09 golangci-lint-1.49.0-windows-386.zip
|
||||||
604acc1378a566abb0eac799362f3a37b7fcb5fa2268aeb2d5d954c829367301 golangci-lint-1.46.2-windows-amd64.zip
|
d058dfb0c7fbd73be70f285d3f8d4d424192fe9b19760ddbb0b2c4b743b8656c golangci-lint-1.49.0-windows-amd64.zip
|
||||||
927def10db073da9687594072e6a3d9c891f67fa897105a2cfd715e018e7386c golangci-lint-1.46.2-windows-arm64.zip
|
c049d80297228db7065eabeac5114f77f04415dcd9b944e8d7c6426d9dd6e9dd golangci-lint-1.49.0-windows-arm64.zip
|
||||||
729b76ed1d8b4e2612e38772b211503cb940e00a137bbaace1aa066f7c943737 golangci-lint-1.46.2-windows-armv6.zip
|
ec9164bab7134ddb94f51c17fd37c109b0801ecd5494b6c0e27ca7898fbd7469 golangci-lint-1.49.0-windows-armv6.zip
|
||||||
ea27c86d91e0b245ecbcfbf6cdb4ac0522d4bc6dca56bba02ea1bc77ad2917ac golangci-lint-1.46.2-windows-armv7.zip
|
68fd9e880d98073f436c58b6f6d2c141881ef49b06ca31137bc19da4e4e3b996 golangci-lint-1.49.0-windows-armv7.zip
|
||||||
|
|
25
build/ci.go
25
build/ci.go
|
@ -24,19 +24,18 @@ Usage: go run build/ci.go <command> <command flags/arguments>
|
||||||
|
|
||||||
Available commands are:
|
Available commands are:
|
||||||
|
|
||||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||||
test [ -coverage ] [ packages... ] -- runs the tests
|
test [ -coverage ] [ packages... ] -- runs the tests
|
||||||
lint -- runs certain pre-selected linters
|
lint -- runs certain pre-selected linters
|
||||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
|
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
|
||||||
importkeys -- imports signing keys from env
|
importkeys -- imports signing keys from env
|
||||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||||
nsis -- creates a Windows NSIS installer
|
nsis -- creates a Windows NSIS installer
|
||||||
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
||||||
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
||||||
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
||||||
|
|
||||||
For all commands, -n prevents execution of external programs (dry run mode).
|
For all commands, -n prevents execution of external programs (dry run mode).
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
@ -149,7 +148,7 @@ var (
|
||||||
// This is the version of go that will be downloaded by
|
// This is the version of go that will be downloaded by
|
||||||
//
|
//
|
||||||
// go run ci.go install -dlgo
|
// go run ci.go install -dlgo
|
||||||
dlgoVersion = "1.18.5"
|
dlgoVersion = "1.19.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
|
@ -347,7 +346,7 @@ func doLint(cmdline []string) {
|
||||||
|
|
||||||
// downloadLinter downloads and unpacks golangci-lint.
|
// downloadLinter downloads and unpacks golangci-lint.
|
||||||
func downloadLinter(cachedir string) string {
|
func downloadLinter(cachedir string) string {
|
||||||
const version = "1.46.2"
|
const version = "1.49.0"
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
arch := runtime.GOARCH
|
arch := runtime.GOARCH
|
||||||
|
|
|
@ -334,8 +334,9 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error {
|
||||||
// signUnsignedTransactions converts the input txs to canonical transactions.
|
// signUnsignedTransactions converts the input txs to canonical transactions.
|
||||||
//
|
//
|
||||||
// The transactions can have two forms, either
|
// The transactions can have two forms, either
|
||||||
// 1. unsigned or
|
// 1. unsigned or
|
||||||
// 2. signed
|
// 2. signed
|
||||||
|
//
|
||||||
// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set.
|
// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set.
|
||||||
// If so, we sign it here and now, with the given `secretKey`
|
// If so, we sign it here and now, with the given `secretKey`
|
||||||
// If the condition above is not met, then it's considered a signed transaction.
|
// If the condition above is not met, then it's considered a signed transaction.
|
||||||
|
|
|
@ -19,21 +19,20 @@
|
||||||
// Here is an example of creating a 2 node network with the first node
|
// Here is an example of creating a 2 node network with the first node
|
||||||
// connected to the second:
|
// connected to the second:
|
||||||
//
|
//
|
||||||
// $ p2psim node create
|
// $ p2psim node create
|
||||||
// Created node01
|
// Created node01
|
||||||
//
|
//
|
||||||
// $ p2psim node start node01
|
// $ p2psim node start node01
|
||||||
// Started node01
|
// Started node01
|
||||||
//
|
//
|
||||||
// $ p2psim node create
|
// $ p2psim node create
|
||||||
// Created node02
|
// Created node02
|
||||||
//
|
//
|
||||||
// $ p2psim node start node02
|
// $ p2psim node start node02
|
||||||
// Started node02
|
// Started node02
|
||||||
//
|
|
||||||
// $ p2psim node connect node01 node02
|
|
||||||
// Connected node01 to node02
|
|
||||||
//
|
//
|
||||||
|
// $ p2psim node connect node01 node02
|
||||||
|
// Connected node01 to node02
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
Package hexutil implements hex encoding with 0x prefix.
|
Package hexutil implements hex encoding with 0x prefix.
|
||||||
This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
|
This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
|
||||||
|
|
||||||
Encoding Rules
|
# Encoding Rules
|
||||||
|
|
||||||
All hex data must have prefix "0x".
|
All hex data must have prefix "0x".
|
||||||
|
|
||||||
|
|
|
@ -227,10 +227,10 @@ func U256Bytes(n *big.Int) []byte {
|
||||||
// S256 interprets x as a two's complement number.
|
// S256 interprets x as a two's complement number.
|
||||||
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
|
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
|
||||||
//
|
//
|
||||||
// S256(0) = 0
|
// S256(0) = 0
|
||||||
// S256(1) = 1
|
// S256(1) = 1
|
||||||
// S256(2**255) = -2**255
|
// S256(2**255) = -2**255
|
||||||
// S256(2**256-1) = -1
|
// S256(2**256-1) = -1
|
||||||
func S256(x *big.Int) *big.Int {
|
func S256(x *big.Int) *big.Int {
|
||||||
if x.Cmp(tt255) < 0 {
|
if x.Cmp(tt255) < 0 {
|
||||||
return x
|
return x
|
||||||
|
|
|
@ -26,9 +26,10 @@ import (
|
||||||
// LazyQueue is a priority queue data structure where priorities can change over
|
// LazyQueue is a priority queue data structure where priorities can change over
|
||||||
// time and are only evaluated on demand.
|
// time and are only evaluated on demand.
|
||||||
// Two callbacks are required:
|
// Two callbacks are required:
|
||||||
// - priority evaluates the actual priority of an item
|
// - priority evaluates the actual priority of an item
|
||||||
// - maxPriority gives an upper estimate for the priority in any moment between
|
// - maxPriority gives an upper estimate for the priority in any moment between
|
||||||
// now and the given absolute time
|
// now and the given absolute time
|
||||||
|
//
|
||||||
// If the upper estimate is exceeded then Update should be called for that item.
|
// If the upper estimate is exceeded then Update should be called for that item.
|
||||||
// A global Refresh function should also be called periodically.
|
// A global Refresh function should also be called periodically.
|
||||||
type LazyQueue struct {
|
type LazyQueue struct {
|
||||||
|
|
|
@ -224,10 +224,11 @@ func (beacon *Beacon) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
// stock Ethereum consensus engine. The difference between the beacon and classic is
|
// stock Ethereum consensus engine. The difference between the beacon and classic is
|
||||||
// (a) The following fields are expected to be constants:
|
// (a) The following fields are expected to be constants:
|
||||||
// - difficulty is expected to be 0
|
// - difficulty is expected to be 0
|
||||||
// - nonce is expected to be 0
|
// - nonce is expected to be 0
|
||||||
// - unclehash is expected to be Hash(emptyHeader)
|
// - unclehash is expected to be Hash(emptyHeader)
|
||||||
// to be the desired constants
|
// to be the desired constants
|
||||||
|
//
|
||||||
// (b) we don't verify if a block is in the future anymore
|
// (b) we don't verify if a block is in the future anymore
|
||||||
// (c) the extradata is limited to 32 bytes
|
// (c) the extradata is limited to 32 bytes
|
||||||
func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error {
|
func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error {
|
||||||
|
|
|
@ -34,10 +34,11 @@ type API struct {
|
||||||
// GetWork returns a work package for external miner.
|
// GetWork returns a work package for external miner.
|
||||||
//
|
//
|
||||||
// The work package consists of 3 strings:
|
// The work package consists of 3 strings:
|
||||||
// result[0] - 32 bytes hex encoded current block header pow-hash
|
//
|
||||||
// result[1] - 32 bytes hex encoded seed hash used for DAG
|
// result[0] - 32 bytes hex encoded current block header pow-hash
|
||||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
// result[1] - 32 bytes hex encoded seed hash used for DAG
|
||||||
// result[3] - hex encoded block number
|
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||||
|
// result[3] - hex encoded block number
|
||||||
func (api *API) GetWork() ([4]string, error) {
|
func (api *API) GetWork() ([4]string, error) {
|
||||||
if api.ethash.remote == nil {
|
if api.ethash.remote == nil {
|
||||||
return [4]string{}, errors.New("not supported")
|
return [4]string{}, errors.New("not supported")
|
||||||
|
|
|
@ -339,10 +339,11 @@ func (s *remoteSealer) loop() {
|
||||||
// makeWork creates a work package for external miner.
|
// makeWork creates a work package for external miner.
|
||||||
//
|
//
|
||||||
// The work package consists of 3 strings:
|
// The work package consists of 3 strings:
|
||||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
//
|
||||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||||
// result[3], hex encoded block number
|
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||||
|
// result[3], hex encoded block number
|
||||||
func (s *remoteSealer) makeWork(block *types.Block) {
|
func (s *remoteSealer) makeWork(block *types.Block) {
|
||||||
hash := s.ethash.SealHash(block.Header())
|
hash := s.ethash.SealHash(block.Header())
|
||||||
s.currentWork[0] = hash.Hex()
|
s.currentWork[0] = hash.Hex()
|
||||||
|
|
|
@ -40,10 +40,11 @@ var (
|
||||||
// ensure it conforms to DAO hard-fork rules.
|
// ensure it conforms to DAO hard-fork rules.
|
||||||
//
|
//
|
||||||
// DAO hard-fork extension to the header validity:
|
// DAO hard-fork extension to the header validity:
|
||||||
// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range
|
//
|
||||||
// with the fork specific extra-data set
|
// - if the node is no-fork, do not accept blocks in the [fork, fork+10) range
|
||||||
// b) if the node is pro-fork, require blocks in the specific range to have the
|
// with the fork specific extra-data set.
|
||||||
// unique extra-data set.
|
// - if the node is pro-fork, require blocks in the specific range to have the
|
||||||
|
// unique extra-data set.
|
||||||
func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
|
func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
|
||||||
// Short circuit validation if the node doesn't care about the DAO fork
|
// Short circuit validation if the node doesn't care about the DAO fork
|
||||||
if config.DAOForkBlock == nil {
|
if config.DAOForkBlock == nil {
|
||||||
|
|
|
@ -136,9 +136,11 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
||||||
|
|
||||||
// ExecutableDataToBlock constructs a block from executable data.
|
// ExecutableDataToBlock constructs a block from executable data.
|
||||||
// It verifies that the following fields:
|
// It verifies that the following fields:
|
||||||
// len(extraData) <= 32
|
//
|
||||||
// uncleHash = emptyUncleHash
|
// len(extraData) <= 32
|
||||||
// difficulty = 0
|
// uncleHash = emptyUncleHash
|
||||||
|
// difficulty = 0
|
||||||
|
//
|
||||||
// and that the blockhash of the constructed block matches the parameters.
|
// and that the blockhash of the constructed block matches the parameters.
|
||||||
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
|
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
|
||||||
txs, err := decodeTransactions(params.Transactions)
|
txs, err := decodeTransactions(params.Transactions)
|
||||||
|
|
|
@ -1875,8 +1875,8 @@ func TestInsertReceiptChainRollback(t *testing.T) {
|
||||||
// overtake the 'canon' chain until after it's passed canon by about 200 blocks.
|
// overtake the 'canon' chain until after it's passed canon by about 200 blocks.
|
||||||
//
|
//
|
||||||
// Details at:
|
// Details at:
|
||||||
// - https://github.com/ethereum/go-ethereum/issues/18977
|
// - https://github.com/ethereum/go-ethereum/issues/18977
|
||||||
// - https://github.com/ethereum/go-ethereum/pull/18988
|
// - https://github.com/ethereum/go-ethereum/pull/18988
|
||||||
func TestLowDiffLongChain(t *testing.T) {
|
func TestLowDiffLongChain(t *testing.T) {
|
||||||
// Generate a canonical chain to act as the main dataset
|
// Generate a canonical chain to act as the main dataset
|
||||||
engine := ethash.NewFaker()
|
engine := ethash.NewFaker()
|
||||||
|
@ -2023,14 +2023,16 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that importing a sidechain (S), where
|
// Tests that importing a sidechain (S), where
|
||||||
// - S is sidechain, containing blocks [Sn...Sm]
|
// - S is sidechain, containing blocks [Sn...Sm]
|
||||||
// - C is canon chain, containing blocks [G..Cn..Cm]
|
// - C is canon chain, containing blocks [G..Cn..Cm]
|
||||||
// - The common ancestor Cc is pruned
|
// - The common ancestor Cc is pruned
|
||||||
// - The first block in S: Sn, is == Cn
|
// - The first block in S: Sn, is == Cn
|
||||||
|
//
|
||||||
// That is: the sidechain for import contains some blocks already present in canon chain.
|
// That is: the sidechain for import contains some blocks already present in canon chain.
|
||||||
// So the blocks are
|
// So the blocks are:
|
||||||
// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
|
//
|
||||||
// ^ ^ ^ pruned
|
// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
|
||||||
|
// ^ ^ ^ pruned
|
||||||
func TestPrunedImportSide(t *testing.T) {
|
func TestPrunedImportSide(t *testing.T) {
|
||||||
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
|
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
|
||||||
//glogger.Verbosity(3)
|
//glogger.Verbosity(3)
|
||||||
|
@ -2774,9 +2776,9 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
|
||||||
// This internally leads to a sidechain import, since the blocks trigger an
|
// This internally leads to a sidechain import, since the blocks trigger an
|
||||||
// ErrPrunedAncestor error.
|
// ErrPrunedAncestor error.
|
||||||
// This may e.g. happen if
|
// This may e.g. happen if
|
||||||
// 1. Downloader rollbacks a batch of inserted blocks and exits
|
// 1. Downloader rollbacks a batch of inserted blocks and exits
|
||||||
// 2. Downloader starts to sync again
|
// 2. Downloader starts to sync again
|
||||||
// 3. The blocks fetched are all known and canonical blocks
|
// 3. The blocks fetched are all known and canonical blocks
|
||||||
func TestSideImportPrunedBlocks(t *testing.T) {
|
func TestSideImportPrunedBlocks(t *testing.T) {
|
||||||
// Generate a canonical chain to act as the main dataset
|
// Generate a canonical chain to act as the main dataset
|
||||||
engine := ethash.NewFaker()
|
engine := ethash.NewFaker()
|
||||||
|
@ -3269,20 +3271,19 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
|
||||||
|
|
||||||
// TestInitThenFailCreateContract tests a pretty notorious case that happened
|
// TestInitThenFailCreateContract tests a pretty notorious case that happened
|
||||||
// on mainnet over blocks 7338108, 7338110 and 7338115.
|
// on mainnet over blocks 7338108, 7338110 and 7338115.
|
||||||
// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
|
// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
|
||||||
// with 0.001 ether (thus created but no code)
|
// with 0.001 ether (thus created but no code)
|
||||||
// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
|
// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
|
||||||
// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
|
// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
|
||||||
// deployment fails due to OOG during initcode execution
|
// deployment fails due to OOG during initcode execution
|
||||||
// - Block 7338115: another tx checks the balance of
|
// - Block 7338115: another tx checks the balance of
|
||||||
// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
|
// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
|
||||||
// zero.
|
// zero.
|
||||||
//
|
//
|
||||||
// The problem being that the snapshotter maintains a destructset, and adds items
|
// The problem being that the snapshotter maintains a destructset, and adds items
|
||||||
// to the destructset in case something is created "onto" an existing item.
|
// to the destructset in case something is created "onto" an existing item.
|
||||||
// We need to either roll back the snapDestructs, or not place it into snapDestructs
|
// We need to either roll back the snapDestructs, or not place it into snapDestructs
|
||||||
// in the first place.
|
// in the first place.
|
||||||
//
|
|
||||||
func TestInitThenFailCreateContract(t *testing.T) {
|
func TestInitThenFailCreateContract(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
engine = ethash.NewFaker()
|
engine = ethash.NewFaker()
|
||||||
|
@ -3459,13 +3460,13 @@ func TestEIP2718Transition(t *testing.T) {
|
||||||
|
|
||||||
// TestEIP1559Transition tests the following:
|
// TestEIP1559Transition tests the following:
|
||||||
//
|
//
|
||||||
// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
|
// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
|
||||||
// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
|
// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
|
||||||
// 3. Only the transaction's tip will be received by the coinbase.
|
// 3. Only the transaction's tip will be received by the coinbase.
|
||||||
// 4. The transaction sender pays for both the tip and baseFee.
|
// 4. The transaction sender pays for both the tip and baseFee.
|
||||||
// 5. The coinbase receives only the partially realized tip when
|
// 5. The coinbase receives only the partially realized tip when
|
||||||
// gasFeeCap - gasTipCap < baseFee.
|
// gasFeeCap - gasTipCap < baseFee.
|
||||||
// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
|
// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
|
||||||
func TestEIP1559Transition(t *testing.T) {
|
func TestEIP1559Transition(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
||||||
|
|
|
@ -239,10 +239,10 @@ type ChainOverrides struct {
|
||||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||||
// The block that will be used is:
|
// The block that will be used is:
|
||||||
//
|
//
|
||||||
// genesis == nil genesis != nil
|
// genesis == nil genesis != nil
|
||||||
// +------------------------------------------
|
// +------------------------------------------
|
||||||
// db has no genesis | main-net default | genesis
|
// db has no genesis | main-net default | genesis
|
||||||
// db has genesis | from DB | genesis (if compatible)
|
// db has genesis | from DB | genesis (if compatible)
|
||||||
//
|
//
|
||||||
// The stored chain configuration will be updated if it is compatible (i.e. does not
|
// The stored chain configuration will be updated if it is compatible (i.e. does not
|
||||||
// specify a fork block below the local head block). In case of a conflict, the
|
// specify a fork block below the local head block). In case of a conflict, the
|
||||||
|
|
|
@ -18,12 +18,10 @@
|
||||||
// +build none
|
// +build none
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
The mkalloc tool creates the genesis allocation constants in genesis_alloc.go
|
||||||
|
It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples.
|
||||||
|
|
||||||
The mkalloc tool creates the genesis allocation constants in genesis_alloc.go
|
go run mkalloc.go genesis.json
|
||||||
It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples.
|
|
||||||
|
|
||||||
go run mkalloc.go genesis.json
|
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
|
|
@ -57,10 +57,10 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000
|
||||||
// Freezer is a memory mapped append-only database to store immutable ordered
|
// Freezer is a memory mapped append-only database to store immutable ordered
|
||||||
// data into flat files:
|
// data into flat files:
|
||||||
//
|
//
|
||||||
// - The append-only nature ensures that disk writes are minimized.
|
// - The append-only nature ensures that disk writes are minimized.
|
||||||
// - The memory mapping ensures we can max out system memory for caching without
|
// - The memory mapping ensures we can max out system memory for caching without
|
||||||
// reserving it for go-ethereum. This would also reduce the memory requirements
|
// reserving it for go-ethereum. This would also reduce the memory requirements
|
||||||
// of Geth, and thus also GC overhead.
|
// of Geth, and thus also GC overhead.
|
||||||
type Freezer struct {
|
type Freezer struct {
|
||||||
// WARNING: The `frozen` and `tail` fields are accessed atomically. On 32 bit platforms, only
|
// WARNING: The `frozen` and `tail` fields are accessed atomically. On 32 bit platforms, only
|
||||||
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
||||||
|
@ -188,9 +188,9 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||||
|
|
||||||
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
|
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
|
||||||
// It will return
|
// It will return
|
||||||
// - at most 'max' items,
|
// - at most 'max' items,
|
||||||
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
|
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
|
||||||
// return as many items as fit into maxByteSize.
|
// return as many items as fit into maxByteSize.
|
||||||
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
||||||
if table := f.tables[kind]; table != nil {
|
if table := f.tables[kind]; table != nil {
|
||||||
return table.RetrieveItems(start, count, maxBytes)
|
return table.RetrieveItems(start, count, maxBytes)
|
||||||
|
|
|
@ -66,9 +66,9 @@ var (
|
||||||
// Pruner is an offline tool to prune the stale state with the
|
// Pruner is an offline tool to prune the stale state with the
|
||||||
// help of the snapshot. The workflow of pruner is very simple:
|
// help of the snapshot. The workflow of pruner is very simple:
|
||||||
//
|
//
|
||||||
// - iterate the snapshot, reconstruct the relevant state
|
// - iterate the snapshot, reconstruct the relevant state
|
||||||
// - iterate the database, delete all other state entries which
|
// - iterate the database, delete all other state entries which
|
||||||
// don't belong to the target state and the genesis state
|
// don't belong to the target state and the genesis state
|
||||||
//
|
//
|
||||||
// It can take several hours(around 2 hours for mainnet) to finish
|
// It can take several hours(around 2 hours for mainnet) to finish
|
||||||
// the whole pruning work. It's recommended to run this offline tool
|
// the whole pruning work. It's recommended to run this offline tool
|
||||||
|
|
|
@ -220,10 +220,12 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) {
|
||||||
// - miss in the beginning
|
// - miss in the beginning
|
||||||
// - miss in the middle
|
// - miss in the middle
|
||||||
// - miss in the end
|
// - miss in the end
|
||||||
|
//
|
||||||
// - the contract(non-empty storage) has wrong storage slots
|
// - the contract(non-empty storage) has wrong storage slots
|
||||||
// - wrong slots in the beginning
|
// - wrong slots in the beginning
|
||||||
// - wrong slots in the middle
|
// - wrong slots in the middle
|
||||||
// - wrong slots in the end
|
// - wrong slots in the end
|
||||||
|
//
|
||||||
// - the contract(non-empty storage) has extra storage slots
|
// - the contract(non-empty storage) has extra storage slots
|
||||||
// - extra slots in the beginning
|
// - extra slots in the beginning
|
||||||
// - extra slots in the middle
|
// - extra slots in the middle
|
||||||
|
|
|
@ -179,10 +179,10 @@ type Tree struct {
|
||||||
// If the memory layers in the journal do not match the disk layer (e.g. there is
|
// If the memory layers in the journal do not match the disk layer (e.g. there is
|
||||||
// a gap) or the journal is missing, there are two repair cases:
|
// a gap) or the journal is missing, there are two repair cases:
|
||||||
//
|
//
|
||||||
// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
|
// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
|
||||||
// This case happens when the snapshot is 'ahead' of the state trie.
|
// This case happens when the snapshot is 'ahead' of the state trie.
|
||||||
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
||||||
// a background thread.
|
// a background thread.
|
||||||
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
|
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
|
||||||
// Create a new, empty snapshot tree
|
// Create a new, empty snapshot tree
|
||||||
snap := &Tree{
|
snap := &Tree{
|
||||||
|
|
|
@ -600,8 +600,8 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
|
||||||
// CreateAccount is called during the EVM CREATE operation. The situation might arise that
|
// CreateAccount is called during the EVM CREATE operation. The situation might arise that
|
||||||
// a contract does the following:
|
// a contract does the following:
|
||||||
//
|
//
|
||||||
// 1. sends funds to sha(account ++ (nonce + 1))
|
// 1. sends funds to sha(account ++ (nonce + 1))
|
||||||
// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
|
// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
|
||||||
//
|
//
|
||||||
// Carrying over the balance ensures that Ether doesn't disappear.
|
// Carrying over the balance ensures that Ether doesn't disappear.
|
||||||
func (s *StateDB) CreateAccount(addr common.Address) {
|
func (s *StateDB) CreateAccount(addr common.Address) {
|
||||||
|
|
|
@ -31,23 +31,26 @@ import (
|
||||||
|
|
||||||
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
||||||
|
|
||||||
/*
|
// The State Transitioning Model
|
||||||
The State Transitioning Model
|
//
|
||||||
|
// A state transition is a change made when a transaction is applied to the current world
|
||||||
A state transition is a change made when a transaction is applied to the current world state
|
// state. The state transitioning model does all the necessary work to work out a valid new
|
||||||
The state transitioning model does all the necessary work to work out a valid new state root.
|
// state root.
|
||||||
|
//
|
||||||
1) Nonce handling
|
// 1. Nonce handling
|
||||||
2) Pre pay gas
|
// 2. Pre pay gas
|
||||||
3) Create a new state object if the recipient is \0*32
|
// 3. Create a new state object if the recipient is \0*32
|
||||||
4) Value transfer
|
// 4. Value transfer
|
||||||
== If contract creation ==
|
//
|
||||||
4a) Attempt to run transaction data
|
// == If contract creation ==
|
||||||
4b) If valid, use result as code for the new state object
|
//
|
||||||
== end ==
|
// 4a. Attempt to run transaction data
|
||||||
5) Run Script section
|
// 4b. If valid, use result as code for the new state object
|
||||||
6) Derive new state root
|
//
|
||||||
*/
|
// == end ==
|
||||||
|
//
|
||||||
|
// 5. Run Script section
|
||||||
|
// 6. Derive new state root
|
||||||
type StateTransition struct {
|
type StateTransition struct {
|
||||||
gp *GasPool
|
gp *GasPool
|
||||||
msg Message
|
msg Message
|
||||||
|
@ -262,13 +265,10 @@ func (st *StateTransition) preCheck() error {
|
||||||
// TransitionDb will transition the state by applying the current message and
|
// TransitionDb will transition the state by applying the current message and
|
||||||
// returning the evm execution result with following fields.
|
// returning the evm execution result with following fields.
|
||||||
//
|
//
|
||||||
// - used gas:
|
// - used gas: total gas used (including gas being refunded)
|
||||||
// total gas used (including gas being refunded)
|
// - returndata: the returned data from evm
|
||||||
// - returndata:
|
// - concrete execution error: various EVM errors which abort the execution, e.g.
|
||||||
// the returned data from evm
|
// ErrOutOfGas, ErrExecutionReverted
|
||||||
// - concrete execution error:
|
|
||||||
// various **EVM** error which aborts the execution,
|
|
||||||
// e.g. ErrOutOfGas, ErrExecutionReverted
|
|
||||||
//
|
//
|
||||||
// However if any consensus issue encountered, return the error directly with
|
// However if any consensus issue encountered, return the error directly with
|
||||||
// nil evm execution result.
|
// nil evm execution result.
|
||||||
|
|
|
@ -263,10 +263,10 @@ var (
|
||||||
|
|
||||||
// modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198
|
// modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198
|
||||||
//
|
//
|
||||||
// def mult_complexity(x):
|
// def mult_complexity(x):
|
||||||
// if x <= 64: return x ** 2
|
// if x <= 64: return x ** 2
|
||||||
// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072
|
// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072
|
||||||
// else: return x ** 2 // 16 + 480 * x - 199680
|
// else: return x ** 2 // 16 + 480 * x - 199680
|
||||||
//
|
//
|
||||||
// where is x is max(length_of_MODULUS, length_of_BASE)
|
// where is x is max(length_of_MODULUS, length_of_BASE)
|
||||||
func modexpMultComplexity(x *big.Int) *big.Int {
|
func modexpMultComplexity(x *big.Int) *big.Int {
|
||||||
|
|
|
@ -117,20 +117,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
|
||||||
return params.SstoreResetGas, nil
|
return params.SstoreResetGas, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The new gas metering is based on net gas costs (EIP-1283):
|
// The new gas metering is based on net gas costs (EIP-1283):
|
||||||
//
|
//
|
||||||
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
|
// (1.) If current value equals new value (this is a no-op), 200 gas is deducted.
|
||||||
// 2. If current value does not equal new value
|
// (2.) If current value does not equal new value
|
||||||
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
|
// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context)
|
||||||
// 2.1.1. If original value is 0, 20000 gas is deducted.
|
// (2.1.1.) If original value is 0, 20000 gas is deducted.
|
||||||
// 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter.
|
// (2.1.2.) Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter.
|
||||||
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
|
// (2.2.) If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
|
||||||
// 2.2.1. If original value is not 0
|
// (2.2.1.) If original value is not 0
|
||||||
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
|
// (2.2.1.1.) If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
|
||||||
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
|
// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
|
||||||
// 2.2.2. If original value equals new value (this storage slot is reset)
|
// (2.2.2.) If original value equals new value (this storage slot is reset)
|
||||||
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
|
// (2.2.2.1.) If original value is 0, add 19800 gas to refund counter.
|
||||||
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
|
// (2.2.2.2.) Otherwise, add 4800 gas to refund counter.
|
||||||
value := common.Hash(y.Bytes32())
|
value := common.Hash(y.Bytes32())
|
||||||
if current == value { // noop (1)
|
if current == value { // noop (1)
|
||||||
return params.NetSstoreNoopGas, nil
|
return params.NetSstoreNoopGas, nil
|
||||||
|
@ -162,19 +163,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
|
||||||
return params.NetSstoreDirtyGas, nil
|
return params.NetSstoreDirtyGas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0. If *gasleft* is less than or equal to 2300, fail the current call.
|
// Here come the EIP220 rules:
|
||||||
// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted.
|
//
|
||||||
// 2. If current value does not equal new value:
|
// (0.) If *gasleft* is less than or equal to 2300, fail the current call.
|
||||||
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context):
|
// (1.) If current value equals new value (this is a no-op), SLOAD_GAS is deducted.
|
||||||
// 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted.
|
// (2.) If current value does not equal new value:
|
||||||
// 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter.
|
// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context):
|
||||||
// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses:
|
// (2.1.1.) If original value is 0, SSTORE_SET_GAS (20K) gas is deducted.
|
||||||
// 2.2.1. If original value is not 0:
|
// (2.1.2.) Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter.
|
||||||
// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter.
|
// (2.2.) If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses:
|
||||||
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter.
|
// (2.2.1.) If original value is not 0:
|
||||||
// 2.2.2. If original value equals new value (this storage slot is reset):
|
// (2.2.1.1.) If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter.
|
||||||
// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter.
|
// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter.
|
||||||
// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter.
|
// (2.2.2.) If original value equals new value (this storage slot is reset):
|
||||||
|
// (2.2.2.1.) If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter.
|
||||||
|
// (2.2.2.2.) Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter.
|
||||||
func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
// If we fail the minimum gas availability invariant, fail (0)
|
// If we fail the minimum gas availability invariant, fail (0)
|
||||||
if contract.Gas <= params.SstoreSentryGasEIP2200 {
|
if contract.Gas <= params.SstoreSentryGasEIP2200 {
|
||||||
|
|
|
@ -392,29 +392,29 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
||||||
// opExtCodeHash returns the code hash of a specified account.
|
// opExtCodeHash returns the code hash of a specified account.
|
||||||
// There are several cases when the function is called, while we can relay everything
|
// There are several cases when the function is called, while we can relay everything
|
||||||
// to `state.GetCodeHash` function to ensure the correctness.
|
// to `state.GetCodeHash` function to ensure the correctness.
|
||||||
// (1) Caller tries to get the code hash of a normal contract account, state
|
|
||||||
// should return the relative code hash and set it as the result.
|
|
||||||
//
|
//
|
||||||
// (2) Caller tries to get the code hash of a non-existent account, state should
|
// 1. Caller tries to get the code hash of a normal contract account, state
|
||||||
// return common.Hash{} and zero will be set as the result.
|
// should return the relative code hash and set it as the result.
|
||||||
//
|
//
|
||||||
// (3) Caller tries to get the code hash for an account without contract code,
|
// 2. Caller tries to get the code hash of a non-existent account, state should
|
||||||
// state should return emptyCodeHash(0xc5d246...) as the result.
|
// return common.Hash{} and zero will be set as the result.
|
||||||
//
|
//
|
||||||
// (4) Caller tries to get the code hash of a precompiled account, the result
|
// 3. Caller tries to get the code hash for an account without contract code, state
|
||||||
// should be zero or emptyCodeHash.
|
// should return emptyCodeHash(0xc5d246...) as the result.
|
||||||
//
|
//
|
||||||
// It is worth noting that in order to avoid unnecessary create and clean,
|
// 4. Caller tries to get the code hash of a precompiled account, the result should be
|
||||||
// all precompile accounts on mainnet have been transferred 1 wei, so the return
|
// zero or emptyCodeHash.
|
||||||
// here should be emptyCodeHash.
|
//
|
||||||
// If the precompile account is not transferred any amount on a private or
|
// It is worth noting that in order to avoid unnecessary create and clean, all precompile
|
||||||
|
// accounts on mainnet have been transferred 1 wei, so the return here should be
|
||||||
|
// emptyCodeHash. If the precompile account is not transferred any amount on a private or
|
||||||
// customized chain, the return value will be zero.
|
// customized chain, the return value will be zero.
|
||||||
//
|
//
|
||||||
// (5) Caller tries to get the code hash for an account which is marked as suicided
|
// 5. Caller tries to get the code hash for an account which is marked as suicided
|
||||||
// in the current transaction, the code hash of this account should be returned.
|
// in the current transaction, the code hash of this account should be returned.
|
||||||
//
|
//
|
||||||
// (6) Caller tries to get the code hash for an account which is marked as deleted,
|
// 6. Caller tries to get the code hash for an account which is marked as deleted, this
|
||||||
// this account should be regarded as a non-existent account and zero should be returned.
|
// account should be regarded as a non-existent account and zero should be returned.
|
||||||
func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
slot := scope.Stack.peek()
|
slot := scope.Stack.peek()
|
||||||
address := common.Address(slot.Bytes20())
|
address := common.Address(slot.Bytes20())
|
||||||
|
|
|
@ -35,7 +35,7 @@ import (
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
//SignatureLength indicates the byte length required to carry a signature with recovery id.
|
// SignatureLength indicates the byte length required to carry a signature with recovery id.
|
||||||
const SignatureLength = 64 + 1 // 64 bytes ECDSA signature + 1 byte recovery id
|
const SignatureLength = 64 + 1 // 64 bytes ECDSA signature + 1 byte recovery id
|
||||||
|
|
||||||
// RecoveryIDOffset points to the byte offset within the signature that contains the recovery id.
|
// RecoveryIDOffset points to the byte offset within the signature that contains the recovery id.
|
||||||
|
|
|
@ -105,7 +105,6 @@ func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool {
|
||||||
return x3.Cmp(y2) == 0
|
return x3.Cmp(y2) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: double check if the function is okay
|
|
||||||
// affineFromJacobian reverses the Jacobian transform. See the comment at the
|
// affineFromJacobian reverses the Jacobian transform. See the comment at the
|
||||||
// top of the file.
|
// top of the file.
|
||||||
func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
|
func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build dummy
|
||||||
// +build dummy
|
// +build dummy
|
||||||
|
|
||||||
// Package c contains only a C file.
|
// Package c contains only a C file.
|
||||||
|
|
|
@ -142,15 +142,19 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkchoiceUpdatedV1 has several responsibilities:
|
// ForkchoiceUpdatedV1 has several responsibilities:
|
||||||
// If the method is called with an empty head block:
|
//
|
||||||
// we return success, which can be used to check if the engine API is enabled
|
// We try to set our blockchain to the headBlock.
|
||||||
// If the total difficulty was not reached:
|
//
|
||||||
// we return INVALID
|
// If the method is called with an empty head block: we return success, which can be used
|
||||||
// If the finalizedBlockHash is set:
|
// to check if the engine API is enabled.
|
||||||
// we check if we have the finalizedBlockHash in our db, if not we start a sync
|
//
|
||||||
// We try to set our blockchain to the headBlock
|
// If the total difficulty was not reached: we return INVALID.
|
||||||
// If there are payloadAttributes:
|
//
|
||||||
// we try to assemble a block with the payloadAttributes and return its payloadID
|
// If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db,
|
||||||
|
// if not we start a sync.
|
||||||
|
//
|
||||||
|
// If there are payloadAttributes: we try to assemble a block with the payloadAttributes
|
||||||
|
// and return its payloadID.
|
||||||
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
|
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
|
||||||
api.forkchoiceLock.Lock()
|
api.forkchoiceLock.Lock()
|
||||||
defer api.forkchoiceLock.Unlock()
|
defer api.forkchoiceLock.Unlock()
|
||||||
|
|
|
@ -519,18 +519,18 @@ func TestExchangeTransitionConfig(t *testing.T) {
|
||||||
TestNewPayloadOnInvalidChain sets up a valid chain and tries to feed blocks
|
TestNewPayloadOnInvalidChain sets up a valid chain and tries to feed blocks
|
||||||
from an invalid chain to test if latestValidHash (LVH) works correctly.
|
from an invalid chain to test if latestValidHash (LVH) works correctly.
|
||||||
|
|
||||||
We set up the following chain where P1 ... Pn and P1'' are valid while
|
We set up the following chain where P1 ... Pn and P1” are valid while
|
||||||
P1' is invalid.
|
P1' is invalid.
|
||||||
We expect
|
We expect
|
||||||
(1) The LVH to point to the current inserted payload if it was valid.
|
(1) The LVH to point to the current inserted payload if it was valid.
|
||||||
(2) The LVH to point to the valid parent on an invalid payload (if the parent is available).
|
(2) The LVH to point to the valid parent on an invalid payload (if the parent is available).
|
||||||
(3) If the parent is unavailable, the LVH should not be set.
|
(3) If the parent is unavailable, the LVH should not be set.
|
||||||
|
|
||||||
CommonAncestor◄─▲── P1 ◄── P2 ◄─ P3 ◄─ ... ◄─ Pn
|
CommonAncestor◄─▲── P1 ◄── P2 ◄─ P3 ◄─ ... ◄─ Pn
|
||||||
│
|
│
|
||||||
└── P1' ◄─ P2' ◄─ P3' ◄─ ... ◄─ Pn'
|
└── P1' ◄─ P2' ◄─ P3' ◄─ ... ◄─ Pn'
|
||||||
│
|
│
|
||||||
└── P1''
|
└── P1''
|
||||||
*/
|
*/
|
||||||
func TestNewPayloadOnInvalidChain(t *testing.T) {
|
func TestNewPayloadOnInvalidChain(t *testing.T) {
|
||||||
genesis, preMergeBlocks := generatePreMergeChain(10)
|
genesis, preMergeBlocks := generatePreMergeChain(10)
|
||||||
|
|
|
@ -741,9 +741,11 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
|
||||||
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
|
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
|
||||||
// common ancestor.
|
// common ancestor.
|
||||||
// It returns parameters to be used for peer.RequestHeadersByNumber:
|
// It returns parameters to be used for peer.RequestHeadersByNumber:
|
||||||
// from - starting block number
|
//
|
||||||
// count - number of headers to request
|
// from - starting block number
|
||||||
// skip - number of headers to skip
|
// count - number of headers to request
|
||||||
|
// skip - number of headers to skip
|
||||||
|
//
|
||||||
// and also returns 'max', the last block which is expected to be returned by the remote peers,
|
// and also returns 'max', the last block which is expected to be returned by the remote peers,
|
||||||
// given the (from,count,skip)
|
// given the (from,count,skip)
|
||||||
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
|
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
|
||||||
|
|
|
@ -480,9 +480,10 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo
|
||||||
// to access the queue, so they already need a lock anyway.
|
// to access the queue, so they already need a lock anyway.
|
||||||
//
|
//
|
||||||
// Returns:
|
// Returns:
|
||||||
// item - the fetchRequest
|
//
|
||||||
// progress - whether any progress was made
|
// item - the fetchRequest
|
||||||
// throttle - if the caller should throttle for a while
|
// progress - whether any progress was made
|
||||||
|
// throttle - if the caller should throttle for a while
|
||||||
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
||||||
pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
|
pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
|
||||||
// Short circuit if the pool has been depleted, or if the peer's already
|
// Short circuit if the pool has been depleted, or if the peer's already
|
||||||
|
|
|
@ -71,10 +71,11 @@ func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
|
||||||
// wants to reserve headers for fetching.
|
// wants to reserve headers for fetching.
|
||||||
//
|
//
|
||||||
// It returns the following:
|
// It returns the following:
|
||||||
// stale - if true, this item is already passed, and should not be requested again
|
//
|
||||||
// throttled - if true, the store is at capacity, this particular header is not prio now
|
// stale - if true, this item is already passed, and should not be requested again
|
||||||
// item - the result to store data into
|
// throttled - if true, the store is at capacity, this particular header is not prio now
|
||||||
// err - any error that occurred
|
// item - the result to store data into
|
||||||
|
// err - any error that occurred
|
||||||
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
|
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
|
||||||
r.lock.Lock()
|
r.lock.Lock()
|
||||||
defer r.lock.Unlock()
|
defer r.lock.Unlock()
|
||||||
|
|
|
@ -208,10 +208,11 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
|
||||||
// actually processed range is returned to avoid ambiguity when parts of the requested range
|
// actually processed range is returned to avoid ambiguity when parts of the requested range
|
||||||
// are not available or when the head has changed during processing this request.
|
// are not available or when the head has changed during processing this request.
|
||||||
// Three arrays are returned based on the processed blocks:
|
// Three arrays are returned based on the processed blocks:
|
||||||
// - reward: the requested percentiles of effective priority fees per gas of transactions in each
|
// - reward: the requested percentiles of effective priority fees per gas of transactions in each
|
||||||
// block, sorted in ascending order and weighted by gas used.
|
// block, sorted in ascending order and weighted by gas used.
|
||||||
// - baseFee: base fee per gas in the given block
|
// - baseFee: base fee per gas in the given block
|
||||||
// - gasUsedRatio: gasUsed/gasLimit in the given block
|
// - gasUsedRatio: gasUsed/gasLimit in the given block
|
||||||
|
//
|
||||||
// Note: baseFee includes the next block after the newest of the returned range, because this
|
// Note: baseFee includes the next block after the newest of the returned range, because this
|
||||||
// value can be derived from the newest block.
|
// value can be derived from the newest block.
|
||||||
func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
|
func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
|
||||||
|
|
|
@ -368,8 +368,8 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
|
||||||
return hashes, slots, proofs
|
return hashes, slots, proofs
|
||||||
}
|
}
|
||||||
|
|
||||||
// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
|
// createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
|
||||||
// supplies the proof for the last account, even if it is 'complete'.h
|
// supplies the proof for the last account, even if it is 'complete'.
|
||||||
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
|
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
|
||||||
var size uint64
|
var size uint64
|
||||||
max = max * 3 / 4
|
max = max * 3 / 4
|
||||||
|
|
|
@ -46,16 +46,16 @@ var noopReleaser = tracers.StateReleaseFunc(func() {})
|
||||||
// Its purpose is to prevent resource leaking. Though it can be noop in some cases.
|
// Its purpose is to prevent resource leaking. Though it can be noop in some cases.
|
||||||
//
|
//
|
||||||
// Parameters:
|
// Parameters:
|
||||||
// - block: The block for which we want the state(state = block.Root)
|
// - block: The block for which we want the state(state = block.Root)
|
||||||
// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
|
// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
|
||||||
// - base: If the caller is tracing multiple blocks, the caller can provide the parent
|
// - base: If the caller is tracing multiple blocks, the caller can provide the parent
|
||||||
// state continuously from the callsite.
|
// state continuously from the callsite.
|
||||||
// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should
|
// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should
|
||||||
// be made from caller, e.g. perform Commit or other 'save-to-disk' changes.
|
// be made from caller, e.g. perform Commit or other 'save-to-disk' changes.
|
||||||
// Otherwise, the trash generated by caller may be persisted permanently.
|
// Otherwise, the trash generated by caller may be persisted permanently.
|
||||||
// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is
|
// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is
|
||||||
// provided, it would be preferable to start from a fresh state, if we have it
|
// provided, it would be preferable to start from a fresh state, if we have it
|
||||||
// on disk.
|
// on disk.
|
||||||
func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) {
|
func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) {
|
||||||
var (
|
var (
|
||||||
current *types.Block
|
current *types.Block
|
||||||
|
|
|
@ -37,14 +37,15 @@ func init() {
|
||||||
// a reversed signature can be matched against the size of the data.
|
// a reversed signature can be matched against the size of the data.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"})
|
//
|
||||||
// {
|
// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"})
|
||||||
// 0x27dc297e-128: 1,
|
// {
|
||||||
// 0x38cc4831-0: 2,
|
// 0x27dc297e-128: 1,
|
||||||
// 0x524f3889-96: 1,
|
// 0x38cc4831-0: 2,
|
||||||
// 0xadf59f99-288: 1,
|
// 0x524f3889-96: 1,
|
||||||
// 0xc281d19e-0: 1
|
// 0xadf59f99-288: 1,
|
||||||
// }
|
// 0xc281d19e-0: 1
|
||||||
|
// }
|
||||||
type fourByteTracer struct {
|
type fourByteTracer struct {
|
||||||
env *vm.EVM
|
env *vm.EVM
|
||||||
ids map[string]int // ids aggregates the 4byte ids found
|
ids map[string]int // ids aggregates the 4byte ids found
|
||||||
|
|
|
@ -14,24 +14,20 @@
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
/*
|
// Package native is a collection of tracers written in go.
|
||||||
Package native is a collection of tracers written in go.
|
//
|
||||||
|
// In order to add a native tracer and have it compiled into the binary, a new
|
||||||
In order to add a native tracer and have it compiled into the binary, a new
|
// file needs to be added to this folder, containing an implementation of the
|
||||||
file needs to be added to this folder, containing an implementation of the
|
// `eth.tracers.Tracer` interface.
|
||||||
`eth.tracers.Tracer` interface.
|
//
|
||||||
|
// Aside from implementing the tracer, it also needs to register itself, using the
|
||||||
Aside from implementing the tracer, it also needs to register itself, using the
|
// `register` method -- and this needs to be done in the package initialization.
|
||||||
`register` method -- and this needs to be done in the package initialization.
|
//
|
||||||
|
// Example:
|
||||||
Example:
|
//
|
||||||
|
// func init() {
|
||||||
```golang
|
// register("noopTracerNative", newNoopTracer)
|
||||||
func init() {
|
// }
|
||||||
register("noopTracerNative", newNoopTracer)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
*/
|
|
||||||
package native
|
package native
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -266,13 +266,14 @@ func (db *Database) Path() string {
|
||||||
// the metrics subsystem.
|
// the metrics subsystem.
|
||||||
//
|
//
|
||||||
// This is how a LevelDB stats table looks like (currently):
|
// This is how a LevelDB stats table looks like (currently):
|
||||||
// Compactions
|
//
|
||||||
// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
|
// Compactions
|
||||||
// -------+------------+---------------+---------------+---------------+---------------
|
// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
|
||||||
// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
|
// -------+------------+---------------+---------------+---------------+---------------
|
||||||
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
|
// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
|
||||||
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
|
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
|
||||||
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
|
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
|
||||||
|
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
|
||||||
//
|
//
|
||||||
// This is how the write delay look like (currently):
|
// This is how the write delay look like (currently):
|
||||||
// DelayN:5 Delay:406.604657ms Paused: false
|
// DelayN:5 Delay:406.604657ms Paused: false
|
||||||
|
|
|
@ -102,13 +102,17 @@ type Service struct {
|
||||||
// websocket.
|
// websocket.
|
||||||
//
|
//
|
||||||
// From Gorilla websocket docs:
|
// From Gorilla websocket docs:
|
||||||
// Connections support one concurrent reader and one concurrent writer.
|
//
|
||||||
// Applications are responsible for ensuring that no more than one goroutine calls the write methods
|
// Connections support one concurrent reader and one concurrent writer. Applications are
|
||||||
// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel
|
// responsible for ensuring that
|
||||||
// concurrently and that no more than one goroutine calls the read methods
|
// - no more than one goroutine calls the write methods
|
||||||
// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler
|
// NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression,
|
||||||
// concurrently.
|
// SetCompressionLevel concurrently; and
|
||||||
// The Close and WriteControl methods can be called concurrently with all other methods.
|
// - that no more than one goroutine calls the
|
||||||
|
// read methods NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler,
|
||||||
|
// SetPingHandler concurrently.
|
||||||
|
//
|
||||||
|
// The Close and WriteControl methods can be called concurrently with all other methods.
|
||||||
type connWrapper struct {
|
type connWrapper struct {
|
||||||
conn *websocket.Conn
|
conn *websocket.Conn
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (tt *TestCmd) Run(name string, args ...string) {
|
||||||
// InputLine writes the given text to the child's stdin.
|
// InputLine writes the given text to the child's stdin.
|
||||||
// This method can also be called from an expect template, e.g.:
|
// This method can also be called from an expect template, e.g.:
|
||||||
//
|
//
|
||||||
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
|
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
|
||||||
func (tt *TestCmd) InputLine(s string) string {
|
func (tt *TestCmd) InputLine(s string) string {
|
||||||
io.WriteString(tt.stdin, s+"\n")
|
io.WriteString(tt.stdin, s+"\n")
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -731,10 +731,10 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlockByNumber returns the requested canonical block.
|
// GetBlockByNumber returns the requested canonical block.
|
||||||
// * When blockNr is -1 the chain head is returned.
|
// - When blockNr is -1 the chain head is returned.
|
||||||
// * When blockNr is -2 the pending chain head is returned.
|
// - When blockNr is -2 the pending chain head is returned.
|
||||||
// * When fullTx is true all transactions in the block are returned, otherwise
|
// - When fullTx is true all transactions in the block are returned, otherwise
|
||||||
// only the transaction hash is returned.
|
// only the transaction hash is returned.
|
||||||
func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
|
func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
|
||||||
block, err := s.b.BlockByNumber(ctx, number)
|
block, err := s.b.BlockByNumber(ctx, number)
|
||||||
if block != nil && err == nil {
|
if block != nil && err == nil {
|
||||||
|
|
|
@ -54,11 +54,11 @@ var migrationApplied = map[*cli.Command]struct{}{}
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
//
|
//
|
||||||
// geth account new --keystore /tmp/mykeystore --lightkdf
|
// geth account new --keystore /tmp/mykeystore --lightkdf
|
||||||
//
|
//
|
||||||
// is equivalent after calling this method with:
|
// is equivalent after calling this method with:
|
||||||
//
|
//
|
||||||
// geth --keystore /tmp/mykeystore --lightkdf account new
|
// geth --keystore /tmp/mykeystore --lightkdf account new
|
||||||
//
|
//
|
||||||
// i.e. in the subcommand Action function of 'account new', ctx.Bool("lightkdf)
|
// i.e. in the subcommand Action function of 'account new', ctx.Bool("lightkdf)
|
||||||
// will return true even if --lightkdf is set as a global option.
|
// will return true even if --lightkdf is set as a global option.
|
||||||
|
|
16
les/api.go
16
les/api.go
|
@ -366,10 +366,11 @@ func NewLightAPI(backend *lesCommons) *LightAPI {
|
||||||
// LatestCheckpoint returns the latest local checkpoint package.
|
// LatestCheckpoint returns the latest local checkpoint package.
|
||||||
//
|
//
|
||||||
// The checkpoint package consists of 4 strings:
|
// The checkpoint package consists of 4 strings:
|
||||||
// result[0], hex encoded latest section index
|
//
|
||||||
// result[1], 32 bytes hex encoded latest section head hash
|
// result[0], hex encoded latest section index
|
||||||
// result[2], 32 bytes hex encoded latest section canonical hash trie root hash
|
// result[1], 32 bytes hex encoded latest section head hash
|
||||||
// result[3], 32 bytes hex encoded latest section bloom trie root hash
|
// result[2], 32 bytes hex encoded latest section canonical hash trie root hash
|
||||||
|
// result[3], 32 bytes hex encoded latest section bloom trie root hash
|
||||||
func (api *LightAPI) LatestCheckpoint() ([4]string, error) {
|
func (api *LightAPI) LatestCheckpoint() ([4]string, error) {
|
||||||
var res [4]string
|
var res [4]string
|
||||||
cp := api.backend.latestLocalCheckpoint()
|
cp := api.backend.latestLocalCheckpoint()
|
||||||
|
@ -384,9 +385,10 @@ func (api *LightAPI) LatestCheckpoint() ([4]string, error) {
|
||||||
// GetLocalCheckpoint returns the specific local checkpoint package.
|
// GetLocalCheckpoint returns the specific local checkpoint package.
|
||||||
//
|
//
|
||||||
// The checkpoint package consists of 3 strings:
|
// The checkpoint package consists of 3 strings:
|
||||||
// result[0], 32 bytes hex encoded latest section head hash
|
//
|
||||||
// result[1], 32 bytes hex encoded latest section canonical hash trie root hash
|
// result[0], 32 bytes hex encoded latest section head hash
|
||||||
// result[2], 32 bytes hex encoded latest section bloom trie root hash
|
// result[1], 32 bytes hex encoded latest section canonical hash trie root hash
|
||||||
|
// result[2], 32 bytes hex encoded latest section bloom trie root hash
|
||||||
func (api *LightAPI) GetCheckpoint(index uint64) ([3]string, error) {
|
func (api *LightAPI) GetCheckpoint(index uint64) ([3]string, error) {
|
||||||
var res [3]string
|
var res [3]string
|
||||||
cp := api.backend.localCheckpoint(index)
|
cp := api.backend.localCheckpoint(index)
|
||||||
|
|
|
@ -56,15 +56,19 @@ func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkchoiceUpdatedV1 has several responsibilities:
|
// ForkchoiceUpdatedV1 has several responsibilities:
|
||||||
// If the method is called with an empty head block:
|
//
|
||||||
// we return success, which can be used to check if the catalyst mode is enabled
|
// We try to set our blockchain to the headBlock.
|
||||||
// If the total difficulty was not reached:
|
//
|
||||||
// we return INVALID
|
// If the method is called with an empty head block: we return success, which can be used
|
||||||
// If the finalizedBlockHash is set:
|
// to check if the catalyst mode is enabled.
|
||||||
// we check if we have the finalizedBlockHash in our db, if not we start a sync
|
//
|
||||||
// We try to set our blockchain to the headBlock
|
// If the total difficulty was not reached: we return INVALID.
|
||||||
// If there are payloadAttributes:
|
//
|
||||||
// we return an error since block creation is not supported in les mode
|
// If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db,
|
||||||
|
// if not we start a sync.
|
||||||
|
//
|
||||||
|
// If there are payloadAttributes: we return an error since block creation is not
|
||||||
|
// supported in les mode.
|
||||||
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
|
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
|
||||||
if heads.HeadBlockHash == (common.Hash{}) {
|
if heads.HeadBlockHash == (common.Hash{}) {
|
||||||
log.Warn("Forkchoice requested update to zero hash")
|
log.Warn("Forkchoice requested update to zero hash")
|
||||||
|
|
|
@ -693,9 +693,11 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
|
||||||
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
|
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
|
||||||
// common ancestor.
|
// common ancestor.
|
||||||
// It returns parameters to be used for peer.RequestHeadersByNumber:
|
// It returns parameters to be used for peer.RequestHeadersByNumber:
|
||||||
// from - starting block number
|
//
|
||||||
// count - number of headers to request
|
// from - starting block number
|
||||||
// skip - number of headers to skip
|
// count - number of headers to request
|
||||||
|
// skip - number of headers to skip
|
||||||
|
//
|
||||||
// and also returns 'max', the last block which is expected to be returned by the remote peers,
|
// and also returns 'max', the last block which is expected to be returned by the remote peers,
|
||||||
// given the (from,count,skip)
|
// given the (from,count,skip)
|
||||||
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
|
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
|
||||||
|
@ -1310,22 +1312,22 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||||
// various callbacks to handle the slight differences between processing them.
|
// various callbacks to handle the slight differences between processing them.
|
||||||
//
|
//
|
||||||
// The instrumentation parameters:
|
// The instrumentation parameters:
|
||||||
// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
|
// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
|
||||||
// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
|
// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
|
||||||
// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
|
// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
|
||||||
// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
|
// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
|
||||||
// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
|
// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
|
||||||
// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
|
// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
|
||||||
// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
|
// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
|
||||||
// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
|
// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
|
||||||
// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
|
// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
|
||||||
// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
|
// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
|
||||||
// - fetch: network callback to actually send a particular download request to a physical remote peer
|
// - fetch: network callback to actually send a particular download request to a physical remote peer
|
||||||
// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
|
// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
|
||||||
// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
|
// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
|
||||||
// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
|
// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
|
||||||
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
|
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
|
||||||
// - kind: textual label of the type being downloaded to display in log messages
|
// - kind: textual label of the type being downloaded to display in log messages
|
||||||
func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
|
func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
|
||||||
expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
|
expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
|
||||||
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
|
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
|
||||||
|
|
|
@ -477,9 +477,10 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo
|
||||||
// to access the queue, so they already need a lock anyway.
|
// to access the queue, so they already need a lock anyway.
|
||||||
//
|
//
|
||||||
// Returns:
|
// Returns:
|
||||||
// item - the fetchRequest
|
//
|
||||||
// progress - whether any progress was made
|
// item - the fetchRequest
|
||||||
// throttle - if the caller should throttle for a while
|
// progress - whether any progress was made
|
||||||
|
// throttle - if the caller should throttle for a while
|
||||||
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
||||||
pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
|
pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
|
||||||
// Short circuit if the pool has been depleted, or if the peer's already
|
// Short circuit if the pool has been depleted, or if the peer's already
|
||||||
|
|
|
@ -71,10 +71,11 @@ func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
|
||||||
// wants to reserve headers for fetching.
|
// wants to reserve headers for fetching.
|
||||||
//
|
//
|
||||||
// It returns the following:
|
// It returns the following:
|
||||||
// stale - if true, this item is already passed, and should not be requested again
|
//
|
||||||
// throttled - if true, the store is at capacity, this particular header is not prio now
|
// stale - if true, this item is already passed, and should not be requested again
|
||||||
// item - the result to store data into
|
// throttled - if true, the store is at capacity, this particular header is not prio now
|
||||||
// err - any error that occurred
|
// item - the result to store data into
|
||||||
|
// err - any error that occurred
|
||||||
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
|
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
|
||||||
r.lock.Lock()
|
r.lock.Lock()
|
||||||
defer r.lock.Unlock()
|
defer r.lock.Unlock()
|
||||||
|
|
|
@ -242,18 +242,20 @@ func (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mainloop is the main event loop of the light fetcher, which is responsible for
|
// mainloop is the main event loop of the light fetcher, which is responsible for
|
||||||
// - announcement maintenance(ulc)
|
|
||||||
// If we are running in ultra light client mode, then all announcements from
|
|
||||||
// the trusted servers are maintained. If the same announcements from trusted
|
|
||||||
// servers reach the threshold, then the relevant header is requested for retrieval.
|
|
||||||
//
|
//
|
||||||
// - block header retrieval
|
// - announcement maintenance(ulc)
|
||||||
// Whenever we receive announce with higher td compared with local chain, the
|
|
||||||
// request will be made for header retrieval.
|
|
||||||
//
|
//
|
||||||
// - re-sync trigger
|
// If we are running in ultra light client mode, then all announcements from
|
||||||
// If the local chain lags too much, then the fetcher will enter "synchronise"
|
// the trusted servers are maintained. If the same announcements from trusted
|
||||||
// mode to retrieve missing headers in batch.
|
// servers reach the threshold, then the relevant header is requested for retrieval.
|
||||||
|
//
|
||||||
|
// - block header retrieval
|
||||||
|
// Whenever we receive announce with higher td compared with local chain, the
|
||||||
|
// request will be made for header retrieval.
|
||||||
|
//
|
||||||
|
// - re-sync trigger
|
||||||
|
// If the local chain lags too much, then the fetcher will enter "synchronise"
|
||||||
|
// mode to retrieve missing headers in batch.
|
||||||
func (f *lightFetcher) mainloop() {
|
func (f *lightFetcher) mainloop() {
|
||||||
defer f.wg.Done()
|
defer f.wg.Done()
|
||||||
|
|
||||||
|
|
|
@ -71,15 +71,16 @@ type TxPool struct {
|
||||||
eip2718 bool // Fork indicator whether we are in the eip2718 stage.
|
eip2718 bool // Fork indicator whether we are in the eip2718 stage.
|
||||||
}
|
}
|
||||||
|
|
||||||
// TxRelayBackend provides an interface to the mechanism that forwards transactions
|
// TxRelayBackend provides an interface to the mechanism that forwards transactions to the
|
||||||
// to the ETH network. The implementations of the functions should be non-blocking.
|
// ETH network. The implementations of the functions should be non-blocking.
|
||||||
//
|
//
|
||||||
// Send instructs backend to forward new transactions
|
// Send instructs backend to forward new transactions NewHead notifies backend about a new
|
||||||
// NewHead notifies backend about a new head after processed by the tx pool,
|
// head after processed by the tx pool, including mined and rolled back transactions since
|
||||||
// including mined and rolled back transactions since the last event
|
// the last event.
|
||||||
// Discard notifies backend about transactions that should be discarded either
|
//
|
||||||
// because they have been replaced by a re-send or because they have been mined
|
// Discard notifies backend about transactions that should be discarded either because
|
||||||
// long ago and no rollback is expected
|
// they have been replaced by a re-send or because they have been mined long ago and no
|
||||||
|
// rollback is expected.
|
||||||
type TxRelayBackend interface {
|
type TxRelayBackend interface {
|
||||||
Send(txs types.Transactions)
|
Send(txs types.Transactions)
|
||||||
NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
|
NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
|
||||||
|
|
218
log/doc.go
218
log/doc.go
|
@ -7,27 +7,25 @@ This package enforces you to only log key/value pairs. Keys must be strings. Val
|
||||||
any type that you like. The default output format is logfmt, but you may also choose to use
|
any type that you like. The default output format is logfmt, but you may also choose to use
|
||||||
JSON instead if that suits you. Here's how you log:
|
JSON instead if that suits you. Here's how you log:
|
||||||
|
|
||||||
log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
|
log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
|
||||||
|
|
||||||
This will output a line that looks like:
|
This will output a line that looks like:
|
||||||
|
|
||||||
lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
|
lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
|
||||||
|
|
||||||
Getting Started
|
# Getting Started
|
||||||
|
|
||||||
To get started, you'll want to import the library:
|
To get started, you'll want to import the library:
|
||||||
|
|
||||||
import log "github.com/inconshreveable/log15"
|
import log "github.com/inconshreveable/log15"
|
||||||
|
|
||||||
|
|
||||||
Now you're ready to start logging:
|
Now you're ready to start logging:
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.Info("Program starting", "args", os.Args())
|
log.Info("Program starting", "args", os.Args())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Convention
|
||||||
Convention
|
|
||||||
|
|
||||||
Because recording a human-meaningful message is common and good practice, the first argument to every
|
Because recording a human-meaningful message is common and good practice, the first argument to every
|
||||||
logging method is the value to the *implicit* key 'msg'.
|
logging method is the value to the *implicit* key 'msg'.
|
||||||
|
@ -40,38 +38,35 @@ you to favor terseness, ordering, and speed over safety. This is a reasonable tr
|
||||||
logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
|
logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
|
||||||
in the variadic argument list:
|
in the variadic argument list:
|
||||||
|
|
||||||
log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
|
log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
|
||||||
|
|
||||||
If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
|
If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
|
||||||
|
|
||||||
log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
|
log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
|
||||||
|
|
||||||
|
# Context loggers
|
||||||
Context loggers
|
|
||||||
|
|
||||||
Frequently, you want to add context to a logger so that you can track actions associated with it. An http
|
Frequently, you want to add context to a logger so that you can track actions associated with it. An http
|
||||||
request is a good example. You can easily create new loggers that have context that is automatically included
|
request is a good example. You can easily create new loggers that have context that is automatically included
|
||||||
with each log line:
|
with each log line:
|
||||||
|
|
||||||
requestlogger := log.New("path", r.URL.Path)
|
requestlogger := log.New("path", r.URL.Path)
|
||||||
|
|
||||||
// later
|
// later
|
||||||
requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
|
requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
|
||||||
|
|
||||||
This will output a log line that includes the path context that is attached to the logger:
|
This will output a log line that includes the path context that is attached to the logger:
|
||||||
|
|
||||||
lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
|
lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
|
||||||
|
|
||||||
|
# Handlers
|
||||||
Handlers
|
|
||||||
|
|
||||||
The Handler interface defines where log lines are printed to and how they are formatted. Handler is a
|
The Handler interface defines where log lines are printed to and how they are formatted. Handler is a
|
||||||
single interface that is inspired by net/http's handler interface:
|
single interface that is inspired by net/http's handler interface:
|
||||||
|
|
||||||
type Handler interface {
|
type Handler interface {
|
||||||
Log(r *Record) error
|
Log(r *Record) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Handlers can filter records, format them, or dispatch to multiple other Handlers.
|
Handlers can filter records, format them, or dispatch to multiple other Handlers.
|
||||||
This package implements a number of Handlers for common logging patterns that are
|
This package implements a number of Handlers for common logging patterns that are
|
||||||
|
@ -79,49 +74,49 @@ easily composed to create flexible, custom logging structures.
|
||||||
|
|
||||||
Here's an example handler that prints logfmt output to Stdout:
|
Here's an example handler that prints logfmt output to Stdout:
|
||||||
|
|
||||||
handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
|
handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
|
||||||
|
|
||||||
Here's an example handler that defers to two other handlers. One handler only prints records
|
Here's an example handler that defers to two other handlers. One handler only prints records
|
||||||
from the rpc package in logfmt to standard out. The other prints records at Error level
|
from the rpc package in logfmt to standard out. The other prints records at Error level
|
||||||
or above in JSON formatted output to the file /var/log/service.json
|
or above in JSON formatted output to the file /var/log/service.json
|
||||||
|
|
||||||
handler := log.MultiHandler(
|
handler := log.MultiHandler(
|
||||||
log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JSONFormat())),
|
log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JSONFormat())),
|
||||||
log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
|
log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
|
||||||
)
|
)
|
||||||
|
|
||||||
Logging File Names and Line Numbers
|
# Logging File Names and Line Numbers
|
||||||
|
|
||||||
This package implements three Handlers that add debugging information to the
|
This package implements three Handlers that add debugging information to the
|
||||||
context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
|
context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
|
||||||
an example that adds the source file and line number of each logging call to
|
an example that adds the source file and line number of each logging call to
|
||||||
the context.
|
the context.
|
||||||
|
|
||||||
h := log.CallerFileHandler(log.StdoutHandler)
|
h := log.CallerFileHandler(log.StdoutHandler)
|
||||||
log.Root().SetHandler(h)
|
log.Root().SetHandler(h)
|
||||||
...
|
...
|
||||||
log.Error("open file", "err", err)
|
log.Error("open file", "err", err)
|
||||||
|
|
||||||
This will output a line that looks like:
|
This will output a line that looks like:
|
||||||
|
|
||||||
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
|
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
|
||||||
|
|
||||||
Here's an example that logs the call stack rather than just the call site.
|
Here's an example that logs the call stack rather than just the call site.
|
||||||
|
|
||||||
h := log.CallerStackHandler("%+v", log.StdoutHandler)
|
h := log.CallerStackHandler("%+v", log.StdoutHandler)
|
||||||
log.Root().SetHandler(h)
|
log.Root().SetHandler(h)
|
||||||
...
|
...
|
||||||
log.Error("open file", "err", err)
|
log.Error("open file", "err", err)
|
||||||
|
|
||||||
This will output a line that looks like:
|
This will output a line that looks like:
|
||||||
|
|
||||||
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
|
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
|
||||||
|
|
||||||
The "%+v" format instructs the handler to include the path of the source file
|
The "%+v" format instructs the handler to include the path of the source file
|
||||||
relative to the compile time GOPATH. The github.com/go-stack/stack package
|
relative to the compile time GOPATH. The github.com/go-stack/stack package
|
||||||
documents the full list of formatting verbs and modifiers available.
|
documents the full list of formatting verbs and modifiers available.
|
||||||
|
|
||||||
Custom Handlers
|
# Custom Handlers
|
||||||
|
|
||||||
The Handler interface is so simple that it's also trivial to write your own. Let's create an
|
The Handler interface is so simple that it's also trivial to write your own. Let's create an
|
||||||
example handler which tries to write to one handler, but if that fails it falls back to
|
example handler which tries to write to one handler, but if that fails it falls back to
|
||||||
|
@ -129,24 +124,24 @@ writing to another handler and includes the error that it encountered when tryin
|
||||||
to the primary. This might be useful when trying to log over a network socket, but if that
|
to the primary. This might be useful when trying to log over a network socket, but if that
|
||||||
fails you want to log those records to a file on disk.
|
fails you want to log those records to a file on disk.
|
||||||
|
|
||||||
type BackupHandler struct {
|
type BackupHandler struct {
|
||||||
Primary Handler
|
Primary Handler
|
||||||
Secondary Handler
|
Secondary Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *BackupHandler) Log (r *Record) error {
|
func (h *BackupHandler) Log (r *Record) error {
|
||||||
err := h.Primary.Log(r)
|
err := h.Primary.Log(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Ctx = append(ctx, "primary_err", err)
|
r.Ctx = append(ctx, "primary_err", err)
|
||||||
return h.Secondary.Log(r)
|
return h.Secondary.Log(r)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
This pattern is so useful that a generic version that handles an arbitrary number of Handlers
|
This pattern is so useful that a generic version that handles an arbitrary number of Handlers
|
||||||
is included as part of this library called FailoverHandler.
|
is included as part of this library called FailoverHandler.
|
||||||
|
|
||||||
Logging Expensive Operations
|
# Logging Expensive Operations
|
||||||
|
|
||||||
Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
|
Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
|
||||||
the price of computing them if you haven't turned up your logging level to a high level of detail.
|
the price of computing them if you haven't turned up your logging level to a high level of detail.
|
||||||
|
@ -155,50 +150,50 @@ This package provides a simple type to annotate a logging operation that you wan
|
||||||
lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
|
lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
|
||||||
filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
|
filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
|
||||||
|
|
||||||
func factorRSAKey() (factors []int) {
|
func factorRSAKey() (factors []int) {
|
||||||
// return the factors of a very large number
|
// return the factors of a very large number
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("factors", log.Lazy{factorRSAKey})
|
log.Debug("factors", log.Lazy{factorRSAKey})
|
||||||
|
|
||||||
If this message is not logged for any reason (like logging at the Error level), then
|
If this message is not logged for any reason (like logging at the Error level), then
|
||||||
factorRSAKey is never evaluated.
|
factorRSAKey is never evaluated.
|
||||||
|
|
||||||
Dynamic context values
|
# Dynamic context values
|
||||||
|
|
||||||
The same log.Lazy mechanism can be used to attach context to a logger which you want to be
|
The same log.Lazy mechanism can be used to attach context to a logger which you want to be
|
||||||
evaluated when the message is logged, but not when the logger is created. For example, let's imagine
|
evaluated when the message is logged, but not when the logger is created. For example, let's imagine
|
||||||
a game where you have Player objects:
|
a game where you have Player objects:
|
||||||
|
|
||||||
type Player struct {
|
type Player struct {
|
||||||
name string
|
name string
|
||||||
alive bool
|
alive bool
|
||||||
log.Logger
|
log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
You always want to log a player's name and whether they're alive or dead, so when you create the player
|
You always want to log a player's name and whether they're alive or dead, so when you create the player
|
||||||
object, you might do:
|
object, you might do:
|
||||||
|
|
||||||
p := &Player{name: name, alive: true}
|
p := &Player{name: name, alive: true}
|
||||||
p.Logger = log.New("name", p.name, "alive", p.alive)
|
p.Logger = log.New("name", p.name, "alive", p.alive)
|
||||||
|
|
||||||
Only now, even after a player has died, the logger will still report they are alive because the logging
|
Only now, even after a player has died, the logger will still report they are alive because the logging
|
||||||
context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
|
context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
|
||||||
of whether the player is alive or not to each log message, so that the log records will reflect the player's
|
of whether the player is alive or not to each log message, so that the log records will reflect the player's
|
||||||
current state no matter when the log message is written:
|
current state no matter when the log message is written:
|
||||||
|
|
||||||
p := &Player{name: name, alive: true}
|
p := &Player{name: name, alive: true}
|
||||||
isAlive := func() bool { return p.alive }
|
isAlive := func() bool { return p.alive }
|
||||||
player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
|
player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
|
||||||
|
|
||||||
Terminal Format
|
# Terminal Format
|
||||||
|
|
||||||
If log15 detects that stdout is a terminal, it will configure the default
|
If log15 detects that stdout is a terminal, it will configure the default
|
||||||
handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
|
handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
|
||||||
logs records nicely for your terminal, including color-coded output based
|
logs records nicely for your terminal, including color-coded output based
|
||||||
on log level.
|
on log level.
|
||||||
|
|
||||||
Error Handling
|
# Error Handling
|
||||||
|
|
||||||
Becasuse log15 allows you to step around the type system, there are a few ways you can specify
|
Becasuse log15 allows you to step around the type system, there are a few ways you can specify
|
||||||
invalid arguments to the logging functions. You could, for example, wrap something that is not
|
invalid arguments to the logging functions. You could, for example, wrap something that is not
|
||||||
|
@ -216,61 +211,61 @@ are encouraged to return errors only if they fail to write their log records out
|
||||||
syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
|
syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
|
||||||
like the FailoverHandler.
|
like the FailoverHandler.
|
||||||
|
|
||||||
Library Use
|
# Library Use
|
||||||
|
|
||||||
log15 is intended to be useful for library authors as a way to provide configurable logging to
|
log15 is intended to be useful for library authors as a way to provide configurable logging to
|
||||||
users of their library. Best practice for use in a library is to always disable all output for your logger
|
users of their library. Best practice for use in a library is to always disable all output for your logger
|
||||||
by default and to provide a public Logger instance that consumers of your library can configure. Like so:
|
by default and to provide a public Logger instance that consumers of your library can configure. Like so:
|
||||||
|
|
||||||
package yourlib
|
package yourlib
|
||||||
|
|
||||||
import "github.com/inconshreveable/log15"
|
import "github.com/inconshreveable/log15"
|
||||||
|
|
||||||
var Log = log.New()
|
var Log = log.New()
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Log.SetHandler(log.DiscardHandler())
|
Log.SetHandler(log.DiscardHandler())
|
||||||
}
|
}
|
||||||
|
|
||||||
Users of your library may then enable it if they like:
|
Users of your library may then enable it if they like:
|
||||||
|
|
||||||
import "github.com/inconshreveable/log15"
|
import "github.com/inconshreveable/log15"
|
||||||
import "example.com/yourlib"
|
import "example.com/yourlib"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
handler := // custom handler setup
|
handler := // custom handler setup
|
||||||
yourlib.Log.SetHandler(handler)
|
yourlib.Log.SetHandler(handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
Best practices attaching logger context
|
# Best practices attaching logger context
|
||||||
|
|
||||||
The ability to attach context to a logger is a powerful one. Where should you do it and why?
|
The ability to attach context to a logger is a powerful one. Where should you do it and why?
|
||||||
I favor embedding a Logger directly into any persistent object in my application and adding
|
I favor embedding a Logger directly into any persistent object in my application and adding
|
||||||
unique, tracing context keys to it. For instance, imagine I am writing a web browser:
|
unique, tracing context keys to it. For instance, imagine I am writing a web browser:
|
||||||
|
|
||||||
type Tab struct {
|
type Tab struct {
|
||||||
url string
|
url string
|
||||||
render *RenderingContext
|
render *RenderingContext
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
Logger
|
Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTab(url string) *Tab {
|
func NewTab(url string) *Tab {
|
||||||
return &Tab {
|
return &Tab {
|
||||||
// ...
|
// ...
|
||||||
url: url,
|
url: url,
|
||||||
|
|
||||||
Logger: log.New("url", url),
|
Logger: log.New("url", url),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
When a new tab is created, I assign a logger to it with the url of
|
When a new tab is created, I assign a logger to it with the url of
|
||||||
the tab as context so it can easily be traced through the logs.
|
the tab as context so it can easily be traced through the logs.
|
||||||
Now, whenever we perform any operation with the tab, we'll log with its
|
Now, whenever we perform any operation with the tab, we'll log with its
|
||||||
embedded logger and it will include the tab title automatically:
|
embedded logger and it will include the tab title automatically:
|
||||||
|
|
||||||
tab.Debug("moved position", "idx", tab.idx)
|
tab.Debug("moved position", "idx", tab.idx)
|
||||||
|
|
||||||
There's only one problem. What if the tab url changes? We could
|
There's only one problem. What if the tab url changes? We could
|
||||||
use log.Lazy to make sure the current url is always written, but that
|
use log.Lazy to make sure the current url is always written, but that
|
||||||
|
@ -285,29 +280,29 @@ function to let you generate what you might call "surrogate keys"
|
||||||
They're just random hex identifiers to use for tracing. Back to our
|
They're just random hex identifiers to use for tracing. Back to our
|
||||||
Tab example, we would prefer to set up our Logger like so:
|
Tab example, we would prefer to set up our Logger like so:
|
||||||
|
|
||||||
import logext "github.com/inconshreveable/log15/ext"
|
import logext "github.com/inconshreveable/log15/ext"
|
||||||
|
|
||||||
t := &Tab {
|
t := &Tab {
|
||||||
// ...
|
// ...
|
||||||
url: url,
|
url: url,
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
|
t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
|
||||||
return t
|
return t
|
||||||
|
|
||||||
Now we'll have a unique traceable identifier even across loading new urls, but
|
Now we'll have a unique traceable identifier even across loading new urls, but
|
||||||
we'll still be able to see the tab's current url in the log messages.
|
we'll still be able to see the tab's current url in the log messages.
|
||||||
|
|
||||||
Must
|
# Must
|
||||||
|
|
||||||
For all Handler functions which can return an error, there is a version of that
|
For all Handler functions which can return an error, there is a version of that
|
||||||
function which will return no error but panics on failure. They are all available
|
function which will return no error but panics on failure. They are all available
|
||||||
on the Must object. For example:
|
on the Must object. For example:
|
||||||
|
|
||||||
log.Must.FileHandler("/path", log.JSONFormat)
|
log.Must.FileHandler("/path", log.JSONFormat)
|
||||||
log.Must.NetHandler("tcp", ":1234", log.JSONFormat)
|
log.Must.NetHandler("tcp", ":1234", log.JSONFormat)
|
||||||
|
|
||||||
Inspiration and Credit
|
# Inspiration and Credit
|
||||||
|
|
||||||
All of the following excellent projects inspired the design of this library:
|
All of the following excellent projects inspired the design of this library:
|
||||||
|
|
||||||
|
@ -325,9 +320,8 @@ github.com/spacemonkeygo/spacelog
|
||||||
|
|
||||||
golang's stdlib, notably io and net/http
|
golang's stdlib, notably io and net/http
|
||||||
|
|
||||||
The Name
|
# The Name
|
||||||
|
|
||||||
https://xkcd.com/927/
|
https://xkcd.com/927/
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package log
|
package log
|
||||||
|
|
|
@ -79,12 +79,11 @@ type TerminalStringer interface {
|
||||||
// a terminal with color-coded level output and terser human friendly timestamp.
|
// a terminal with color-coded level output and terser human friendly timestamp.
|
||||||
// This format should only be used for interactive programs or while developing.
|
// This format should only be used for interactive programs or while developing.
|
||||||
//
|
//
|
||||||
// [LEVEL] [TIME] MESSAGE key=value key=value ...
|
// [LEVEL] [TIME] MESSAGE key=value key=value ...
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
//
|
//
|
||||||
// [DBUG] [May 16 20:58:45] remove route ns=haproxy addr=127.0.0.1:50002
|
// [DBUG] [May 16 20:58:45] remove route ns=haproxy addr=127.0.0.1:50002
|
||||||
//
|
|
||||||
func TerminalFormat(usecolor bool) Format {
|
func TerminalFormat(usecolor bool) Format {
|
||||||
return FormatFunc(func(r *Record) []byte {
|
return FormatFunc(func(r *Record) []byte {
|
||||||
var color = 0
|
var color = 0
|
||||||
|
@ -149,7 +148,6 @@ func TerminalFormat(usecolor bool) Format {
|
||||||
// format for key/value pairs.
|
// format for key/value pairs.
|
||||||
//
|
//
|
||||||
// For more details see: http://godoc.org/github.com/kr/logfmt
|
// For more details see: http://godoc.org/github.com/kr/logfmt
|
||||||
//
|
|
||||||
func LogfmtFormat() Format {
|
func LogfmtFormat() Format {
|
||||||
return FormatFunc(func(r *Record) []byte {
|
return FormatFunc(func(r *Record) []byte {
|
||||||
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
|
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
|
||||||
|
|
|
@ -136,15 +136,14 @@ func CallerStackHandler(format string, h Handler) Handler {
|
||||||
// wrapped Handler if the given function evaluates true. For example,
|
// wrapped Handler if the given function evaluates true. For example,
|
||||||
// to only log records where the 'err' key is not nil:
|
// to only log records where the 'err' key is not nil:
|
||||||
//
|
//
|
||||||
// logger.SetHandler(FilterHandler(func(r *Record) bool {
|
// logger.SetHandler(FilterHandler(func(r *Record) bool {
|
||||||
// for i := 0; i < len(r.Ctx); i += 2 {
|
// for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
// if r.Ctx[i] == "err" {
|
// if r.Ctx[i] == "err" {
|
||||||
// return r.Ctx[i+1] != nil
|
// return r.Ctx[i+1] != nil
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// return false
|
// return false
|
||||||
// }, h))
|
// }, h))
|
||||||
//
|
|
||||||
func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
|
func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
|
||||||
return FuncHandler(func(r *Record) error {
|
return FuncHandler(func(r *Record) error {
|
||||||
if fn(r) {
|
if fn(r) {
|
||||||
|
@ -159,8 +158,7 @@ func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
|
||||||
// context matches the value. For example, to only log records
|
// context matches the value. For example, to only log records
|
||||||
// from your ui package:
|
// from your ui package:
|
||||||
//
|
//
|
||||||
// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
|
// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
|
||||||
//
|
|
||||||
func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
|
func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
|
||||||
return FilterHandler(func(r *Record) (pass bool) {
|
return FilterHandler(func(r *Record) (pass bool) {
|
||||||
switch key {
|
switch key {
|
||||||
|
@ -186,8 +184,7 @@ func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
|
||||||
// level to the wrapped Handler. For example, to only
|
// level to the wrapped Handler. For example, to only
|
||||||
// log Error/Crit records:
|
// log Error/Crit records:
|
||||||
//
|
//
|
||||||
// log.LvlFilterHandler(log.LvlError, log.StdoutHandler)
|
// log.LvlFilterHandler(log.LvlError, log.StdoutHandler)
|
||||||
//
|
|
||||||
func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
|
func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
|
||||||
return FilterHandler(func(r *Record) (pass bool) {
|
return FilterHandler(func(r *Record) (pass bool) {
|
||||||
return r.Lvl <= maxLvl
|
return r.Lvl <= maxLvl
|
||||||
|
@ -199,10 +196,9 @@ func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
|
||||||
// to different locations. For example, to log to a file and
|
// to different locations. For example, to log to a file and
|
||||||
// standard error:
|
// standard error:
|
||||||
//
|
//
|
||||||
// log.MultiHandler(
|
// log.MultiHandler(
|
||||||
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
||||||
// log.StderrHandler)
|
// log.StderrHandler)
|
||||||
//
|
|
||||||
func MultiHandler(hs ...Handler) Handler {
|
func MultiHandler(hs ...Handler) Handler {
|
||||||
return FuncHandler(func(r *Record) error {
|
return FuncHandler(func(r *Record) error {
|
||||||
for _, h := range hs {
|
for _, h := range hs {
|
||||||
|
@ -220,10 +216,10 @@ func MultiHandler(hs ...Handler) Handler {
|
||||||
// to writing to a file if the network fails, and then to
|
// to writing to a file if the network fails, and then to
|
||||||
// standard out if the file write fails:
|
// standard out if the file write fails:
|
||||||
//
|
//
|
||||||
// log.FailoverHandler(
|
// log.FailoverHandler(
|
||||||
// log.Must.NetHandler("tcp", ":9090", log.JSONFormat()),
|
// log.Must.NetHandler("tcp", ":9090", log.JSONFormat()),
|
||||||
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
||||||
// log.StdoutHandler)
|
// log.StdoutHandler)
|
||||||
//
|
//
|
||||||
// All writes that do not go to the first handler will add context with keys of
|
// All writes that do not go to the first handler will add context with keys of
|
||||||
// the form "failover_err_{idx}" which explain the error encountered while
|
// the form "failover_err_{idx}" which explain the error encountered while
|
||||||
|
|
|
@ -82,14 +82,14 @@ func (h *GlogHandler) Verbosity(level Lvl) {
|
||||||
//
|
//
|
||||||
// For instance:
|
// For instance:
|
||||||
//
|
//
|
||||||
// pattern="gopher.go=3"
|
// pattern="gopher.go=3"
|
||||||
// sets the V level to 3 in all Go files named "gopher.go"
|
// sets the V level to 3 in all Go files named "gopher.go"
|
||||||
//
|
//
|
||||||
// pattern="foo=3"
|
// pattern="foo=3"
|
||||||
// sets V to 3 in all files of any packages whose import path ends in "foo"
|
// sets V to 3 in all files of any packages whose import path ends in "foo"
|
||||||
//
|
//
|
||||||
// pattern="foo/*=3"
|
// pattern="foo/*=3"
|
||||||
// sets V to 3 in all files of any packages whose import path contains "foo"
|
// sets V to 3 in all files of any packages whose import path contains "foo"
|
||||||
func (h *GlogHandler) Vmodule(ruleset string) error {
|
func (h *GlogHandler) Vmodule(ruleset string) error {
|
||||||
var filter []pattern
|
var filter []pattern
|
||||||
for _, rule := range strings.Split(ruleset, ",") {
|
for _, rule := range strings.Split(ruleset, ",") {
|
||||||
|
|
|
@ -1,11 +1,3 @@
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
package influxdb
|
package influxdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -77,7 +77,6 @@ func (bi *BigInt) SetInt64(x int64) {
|
||||||
// -1 if x < 0
|
// -1 if x < 0
|
||||||
// 0 if x == 0
|
// 0 if x == 0
|
||||||
// +1 if x > 0
|
// +1 if x > 0
|
||||||
//
|
|
||||||
func (bi *BigInt) Sign() int {
|
func (bi *BigInt) Sign() int {
|
||||||
return bi.bigint.Sign()
|
return bi.bigint.Sign()
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,8 +38,8 @@ type Enode struct {
|
||||||
//
|
//
|
||||||
// For incomplete nodes, the designator must look like one of these
|
// For incomplete nodes, the designator must look like one of these
|
||||||
//
|
//
|
||||||
// enode://<hex node id>
|
// enode://<hex node id>
|
||||||
// <hex node id>
|
// <hex node id>
|
||||||
//
|
//
|
||||||
// For complete nodes, the node ID is encoded in the username portion
|
// For complete nodes, the node ID is encoded in the username portion
|
||||||
// of the URL, separated from the host by an @ sign. The hostname can
|
// of the URL, separated from the host by an @ sign. The hostname can
|
||||||
|
@ -52,7 +52,7 @@ type Enode struct {
|
||||||
// a node with IP address 10.3.58.6, TCP listening port 30303
|
// a node with IP address 10.3.58.6, TCP listening port 30303
|
||||||
// and UDP discovery port 30301.
|
// and UDP discovery port 30301.
|
||||||
//
|
//
|
||||||
// enode://<hex node id>@10.3.58.6:30303?discport=30301
|
// enode://<hex node id>@10.3.58.6:30303?discport=30301
|
||||||
func NewEnode(rawurl string) (*Enode, error) {
|
func NewEnode(rawurl string) (*Enode, error) {
|
||||||
node, err := enode.Parse(enode.ValidSchemes, rawurl)
|
node, err := enode.Parse(enode.ValidSchemes, rawurl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
// with pieces plucked from go-ethereum, rather to allow writing native dapps on
|
// with pieces plucked from go-ethereum, rather to allow writing native dapps on
|
||||||
// mobile platforms. Keep this in mind when using or extending this package!
|
// mobile platforms. Keep this in mind when using or extending this package!
|
||||||
//
|
//
|
||||||
// API limitations
|
// # API limitations
|
||||||
//
|
//
|
||||||
// Since gomobile cannot bridge arbitrary types between Go and Android/iOS, the
|
// Since gomobile cannot bridge arbitrary types between Go and Android/iOS, the
|
||||||
// exposed APIs need to be manually wrapped into simplified types, with custom
|
// exposed APIs need to be manually wrapped into simplified types, with custom
|
||||||
|
|
64
node/doc.go
64
node/doc.go
|
@ -21,25 +21,22 @@ In the model exposed by this package, a node is a collection of services which u
|
||||||
resources to provide RPC APIs. Services can also offer devp2p protocols, which are wired
|
resources to provide RPC APIs. Services can also offer devp2p protocols, which are wired
|
||||||
up to the devp2p network when the node instance is started.
|
up to the devp2p network when the node instance is started.
|
||||||
|
|
||||||
|
# Node Lifecycle
|
||||||
Node Lifecycle
|
|
||||||
|
|
||||||
The Node object has a lifecycle consisting of three basic states, INITIALIZING, RUNNING
|
The Node object has a lifecycle consisting of three basic states, INITIALIZING, RUNNING
|
||||||
and CLOSED.
|
and CLOSED.
|
||||||
|
|
||||||
|
●───────┐
|
||||||
●───────┐
|
New()
|
||||||
New()
|
│
|
||||||
│
|
▼
|
||||||
▼
|
INITIALIZING ────Start()─┐
|
||||||
INITIALIZING ────Start()─┐
|
│ │
|
||||||
│ │
|
│ ▼
|
||||||
│ ▼
|
Close() RUNNING
|
||||||
Close() RUNNING
|
│ │
|
||||||
│ │
|
▼ │
|
||||||
▼ │
|
CLOSED ◀──────Close()─┘
|
||||||
CLOSED ◀──────Close()─┘
|
|
||||||
|
|
||||||
|
|
||||||
Creating a Node allocates basic resources such as the data directory and returns the node
|
Creating a Node allocates basic resources such as the data directory and returns the node
|
||||||
in its INITIALIZING state. Lifecycle objects, RPC APIs and peer-to-peer networking
|
in its INITIALIZING state. Lifecycle objects, RPC APIs and peer-to-peer networking
|
||||||
|
@ -58,8 +55,7 @@ objects and shuts down RPC and peer-to-peer networking.
|
||||||
|
|
||||||
You must always call Close on Node, even if the node was not started.
|
You must always call Close on Node, even if the node was not started.
|
||||||
|
|
||||||
|
# Resources Managed By Node
|
||||||
Resources Managed By Node
|
|
||||||
|
|
||||||
All file-system resources used by a node instance are located in a directory called the
|
All file-system resources used by a node instance are located in a directory called the
|
||||||
data directory. The location of each resource can be overridden through additional node
|
data directory. The location of each resource can be overridden through additional node
|
||||||
|
@ -83,8 +79,7 @@ without a data directory, databases are opened in memory instead.
|
||||||
Node also creates the shared store of encrypted Ethereum account keys. Services can access
|
Node also creates the shared store of encrypted Ethereum account keys. Services can access
|
||||||
the account manager through the service context.
|
the account manager through the service context.
|
||||||
|
|
||||||
|
# Sharing Data Directory Among Instances
|
||||||
Sharing Data Directory Among Instances
|
|
||||||
|
|
||||||
Multiple node instances can share a single data directory if they have distinct instance
|
Multiple node instances can share a single data directory if they have distinct instance
|
||||||
names (set through the Name config option). Sharing behaviour depends on the type of
|
names (set through the Name config option). Sharing behaviour depends on the type of
|
||||||
|
@ -102,26 +97,25 @@ create one database for each instance.
|
||||||
The account key store is shared among all node instances using the same data directory
|
The account key store is shared among all node instances using the same data directory
|
||||||
unless its location is changed through the KeyStoreDir configuration option.
|
unless its location is changed through the KeyStoreDir configuration option.
|
||||||
|
|
||||||
|
# Data Directory Sharing Example
|
||||||
Data Directory Sharing Example
|
|
||||||
|
|
||||||
In this example, two node instances named A and B are started with the same data
|
In this example, two node instances named A and B are started with the same data
|
||||||
directory. Node instance A opens the database "db", node instance B opens the databases
|
directory. Node instance A opens the database "db", node instance B opens the databases
|
||||||
"db" and "db-2". The following files will be created in the data directory:
|
"db" and "db-2". The following files will be created in the data directory:
|
||||||
|
|
||||||
data-directory/
|
data-directory/
|
||||||
A/
|
A/
|
||||||
nodekey -- devp2p node key of instance A
|
nodekey -- devp2p node key of instance A
|
||||||
nodes/ -- devp2p discovery knowledge database of instance A
|
nodes/ -- devp2p discovery knowledge database of instance A
|
||||||
db/ -- LevelDB content for "db"
|
db/ -- LevelDB content for "db"
|
||||||
A.ipc -- JSON-RPC UNIX domain socket endpoint of instance A
|
A.ipc -- JSON-RPC UNIX domain socket endpoint of instance A
|
||||||
B/
|
B/
|
||||||
nodekey -- devp2p node key of node B
|
nodekey -- devp2p node key of node B
|
||||||
nodes/ -- devp2p discovery knowledge database of instance B
|
nodes/ -- devp2p discovery knowledge database of instance B
|
||||||
static-nodes.json -- devp2p static node list of instance B
|
static-nodes.json -- devp2p static node list of instance B
|
||||||
db/ -- LevelDB content for "db"
|
db/ -- LevelDB content for "db"
|
||||||
db-2/ -- LevelDB content for "db-2"
|
db-2/ -- LevelDB content for "db-2"
|
||||||
B.ipc -- JSON-RPC UNIX domain socket endpoint of instance B
|
B.ipc -- JSON-RPC UNIX domain socket endpoint of instance B
|
||||||
keystore/ -- account key store, used by both instances
|
keystore/ -- account key store, used by both instances
|
||||||
*/
|
*/
|
||||||
package node
|
package node
|
||||||
|
|
|
@ -27,8 +27,8 @@ import (
|
||||||
// life cycle management.
|
// life cycle management.
|
||||||
//
|
//
|
||||||
// The following methods are needed to implement a node.Lifecycle:
|
// The following methods are needed to implement a node.Lifecycle:
|
||||||
// - Start() error - method invoked when the node is ready to start the service
|
// - Start() error - method invoked when the node is ready to start the service
|
||||||
// - Stop() error - method invoked when the node terminates the service
|
// - Stop() error - method invoked when the node terminates the service
|
||||||
type SampleLifecycle struct{}
|
type SampleLifecycle struct{}
|
||||||
|
|
||||||
func (s *SampleLifecycle) Start() error { fmt.Println("Service starting..."); return nil }
|
func (s *SampleLifecycle) Start() error { fmt.Println("Service starting..."); return nil }
|
||||||
|
|
11
p2p/dial.go
11
p2p/dial.go
|
@ -84,13 +84,12 @@ var (
|
||||||
// dialer creates outbound connections and submits them into Server.
|
// dialer creates outbound connections and submits them into Server.
|
||||||
// Two types of peer connections can be created:
|
// Two types of peer connections can be created:
|
||||||
//
|
//
|
||||||
// - static dials are pre-configured connections. The dialer attempts
|
// - static dials are pre-configured connections. The dialer attempts
|
||||||
// keep these nodes connected at all times.
|
// keep these nodes connected at all times.
|
||||||
//
|
|
||||||
// - dynamic dials are created from node discovery results. The dialer
|
|
||||||
// continuously reads candidate nodes from its input iterator and attempts
|
|
||||||
// to create peer connections to nodes arriving through the iterator.
|
|
||||||
//
|
//
|
||||||
|
// - dynamic dials are created from node discovery results. The dialer
|
||||||
|
// continuously reads candidate nodes from its input iterator and attempts
|
||||||
|
// to create peer connections to nodes arriving through the iterator.
|
||||||
type dialScheduler struct {
|
type dialScheduler struct {
|
||||||
dialConfig
|
dialConfig
|
||||||
setupFunc dialSetupFunc
|
setupFunc dialSetupFunc
|
||||||
|
|
|
@ -38,8 +38,7 @@ import (
|
||||||
|
|
||||||
// To regenerate discv5 test vectors, run
|
// To regenerate discv5 test vectors, run
|
||||||
//
|
//
|
||||||
// go test -run TestVectors -write-test-vectors
|
// go test -run TestVectors -write-test-vectors
|
||||||
//
|
|
||||||
var writeTestVectorsFlag = flag.Bool("write-test-vectors", false, "Overwrite discv5 test vectors in testdata/")
|
var writeTestVectorsFlag = flag.Bool("write-test-vectors", false, "Overwrite discv5 test vectors in testdata/")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -117,32 +117,32 @@ func (t *Tree) Nodes() []*enode.Node {
|
||||||
We want to keep the UDP size below 512 bytes. The UDP size is roughly:
|
We want to keep the UDP size below 512 bytes. The UDP size is roughly:
|
||||||
UDP length = 8 + UDP payload length ( 229 )
|
UDP length = 8 + UDP payload length ( 229 )
|
||||||
UPD Payload length:
|
UPD Payload length:
|
||||||
- dns.id 2
|
- dns.id 2
|
||||||
- dns.flags 2
|
- dns.flags 2
|
||||||
- dns.count.queries 2
|
- dns.count.queries 2
|
||||||
- dns.count.answers 2
|
- dns.count.answers 2
|
||||||
- dns.count.auth_rr 2
|
- dns.count.auth_rr 2
|
||||||
- dns.count.add_rr 2
|
- dns.count.add_rr 2
|
||||||
- queries (query-size + 6)
|
- queries (query-size + 6)
|
||||||
- answers :
|
- answers :
|
||||||
- dns.resp.name 2
|
- dns.resp.name 2
|
||||||
- dns.resp.type 2
|
- dns.resp.type 2
|
||||||
- dns.resp.class 2
|
- dns.resp.class 2
|
||||||
- dns.resp.ttl 4
|
- dns.resp.ttl 4
|
||||||
- dns.resp.len 2
|
- dns.resp.len 2
|
||||||
- dns.txt.length 1
|
- dns.txt.length 1
|
||||||
- dns.txt resp_data_size
|
- dns.txt resp_data_size
|
||||||
|
|
||||||
So the total size is roughly a fixed overhead of `39`, and the size of the
|
So the total size is roughly a fixed overhead of `39`, and the size of the query (domain
|
||||||
query (domain name) and response.
|
name) and response. The query size is, for example,
|
||||||
The query size is, for example, FVY6INQ6LZ33WLCHO3BPR3FH6Y.snap.mainnet.ethdisco.net (52)
|
FVY6INQ6LZ33WLCHO3BPR3FH6Y.snap.mainnet.ethdisco.net (52)
|
||||||
|
|
||||||
We also have some static data in the response, such as `enrtree-branch:`, and potentially
|
We also have some static data in the response, such as `enrtree-branch:`, and potentially
|
||||||
splitting the response up with `" "`, leaving us with a size of roughly `400` that we need
|
splitting the response up with `" "`, leaving us with a size of roughly `400` that we need
|
||||||
to stay below.
|
to stay below.
|
||||||
|
|
||||||
The number `370` is used to have some margin for extra overhead (for example, the dns query
|
The number `370` is used to have some margin for extra overhead (for example, the dns
|
||||||
may be larger - more subdomains).
|
query may be larger - more subdomains).
|
||||||
*/
|
*/
|
||||||
const (
|
const (
|
||||||
hashAbbrevSize = 1 + 16*13/8 // Size of an encoded hash (plus comma)
|
hashAbbrevSize = 1 + 16*13/8 // Size of an encoded hash (plus comma)
|
||||||
|
|
|
@ -54,8 +54,8 @@ func MustParseV4(rawurl string) *Node {
|
||||||
//
|
//
|
||||||
// For incomplete nodes, the designator must look like one of these
|
// For incomplete nodes, the designator must look like one of these
|
||||||
//
|
//
|
||||||
// enode://<hex node id>
|
// enode://<hex node id>
|
||||||
// <hex node id>
|
// <hex node id>
|
||||||
//
|
//
|
||||||
// For complete nodes, the node ID is encoded in the username portion
|
// For complete nodes, the node ID is encoded in the username portion
|
||||||
// of the URL, separated from the host by an @ sign. The hostname can
|
// of the URL, separated from the host by an @ sign. The hostname can
|
||||||
|
@ -68,7 +68,7 @@ func MustParseV4(rawurl string) *Node {
|
||||||
// a node with IP address 10.3.58.6, TCP listening port 30303
|
// a node with IP address 10.3.58.6, TCP listening port 30303
|
||||||
// and UDP discovery port 30301.
|
// and UDP discovery port 30301.
|
||||||
//
|
//
|
||||||
// enode://<hex node id>@10.3.58.6:30303?discport=30301
|
// enode://<hex node id>@10.3.58.6:30303?discport=30301
|
||||||
func ParseV4(rawurl string) (*Node, error) {
|
func ParseV4(rawurl string) (*Node, error) {
|
||||||
if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
|
if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
|
||||||
id, err := parsePubkey(m[1])
|
id, err := parsePubkey(m[1])
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
// stored in key/value pairs. To store and retrieve key/values in a record, use the Entry
|
// stored in key/value pairs. To store and retrieve key/values in a record, use the Entry
|
||||||
// interface.
|
// interface.
|
||||||
//
|
//
|
||||||
// Signature Handling
|
// # Signature Handling
|
||||||
//
|
//
|
||||||
// Records must be signed before transmitting them to another node.
|
// Records must be signed before transmitting them to another node.
|
||||||
//
|
//
|
||||||
|
|
|
@ -107,12 +107,11 @@ func Send(w MsgWriter, msgcode uint64, data interface{}) error {
|
||||||
// SendItems writes an RLP with the given code and data elements.
|
// SendItems writes an RLP with the given code and data elements.
|
||||||
// For a call such as:
|
// For a call such as:
|
||||||
//
|
//
|
||||||
// SendItems(w, code, e1, e2, e3)
|
// SendItems(w, code, e1, e2, e3)
|
||||||
//
|
//
|
||||||
// the message payload will be an RLP list containing the items:
|
// the message payload will be an RLP list containing the items:
|
||||||
//
|
//
|
||||||
// [e1, e2, e3]
|
// [e1, e2, e3]
|
||||||
//
|
|
||||||
func SendItems(w MsgWriter, msgcode uint64, elems ...interface{}) error {
|
func SendItems(w MsgWriter, msgcode uint64, elems ...interface{}) error {
|
||||||
return Send(w, msgcode, elems)
|
return Send(w, msgcode, elems)
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,12 +53,12 @@ type Interface interface {
|
||||||
// The following formats are currently accepted.
|
// The following formats are currently accepted.
|
||||||
// Note that mechanism names are not case-sensitive.
|
// Note that mechanism names are not case-sensitive.
|
||||||
//
|
//
|
||||||
// "" or "none" return nil
|
// "" or "none" return nil
|
||||||
// "extip:77.12.33.4" will assume the local machine is reachable on the given IP
|
// "extip:77.12.33.4" will assume the local machine is reachable on the given IP
|
||||||
// "any" uses the first auto-detected mechanism
|
// "any" uses the first auto-detected mechanism
|
||||||
// "upnp" uses the Universal Plug and Play protocol
|
// "upnp" uses the Universal Plug and Play protocol
|
||||||
// "pmp" uses NAT-PMP with an auto-detected gateway address
|
// "pmp" uses NAT-PMP with an auto-detected gateway address
|
||||||
// "pmp:192.168.0.1" uses NAT-PMP with the given gateway address
|
// "pmp:192.168.0.1" uses NAT-PMP with the given gateway address
|
||||||
func Parse(spec string) (Interface, error) {
|
func Parse(spec string) (Interface, error) {
|
||||||
var (
|
var (
|
||||||
parts = strings.SplitN(spec, ":", 2)
|
parts = strings.SplitN(spec, ":", 2)
|
||||||
|
|
|
@ -39,10 +39,9 @@ import (
|
||||||
// Node represents a node in a simulation network which is created by a
|
// Node represents a node in a simulation network which is created by a
|
||||||
// NodeAdapter, for example:
|
// NodeAdapter, for example:
|
||||||
//
|
//
|
||||||
// * SimNode - An in-memory node
|
// - SimNode, an in-memory node in the same process
|
||||||
// * ExecNode - A child process node
|
// - ExecNode, a child process node
|
||||||
// * DockerNode - A Docker container node
|
// - DockerNode, a node running in a Docker container
|
||||||
//
|
|
||||||
type Node interface {
|
type Node interface {
|
||||||
// Addr returns the node's address (e.g. an Enode URL)
|
// Addr returns the node's address (e.g. an Enode URL)
|
||||||
Addr() []byte
|
Addr() []byte
|
||||||
|
|
|
@ -29,20 +29,20 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
)
|
)
|
||||||
|
|
||||||
//a map of mocker names to its function
|
// a map of mocker names to its function
|
||||||
var mockerList = map[string]func(net *Network, quit chan struct{}, nodeCount int){
|
var mockerList = map[string]func(net *Network, quit chan struct{}, nodeCount int){
|
||||||
"startStop": startStop,
|
"startStop": startStop,
|
||||||
"probabilistic": probabilistic,
|
"probabilistic": probabilistic,
|
||||||
"boot": boot,
|
"boot": boot,
|
||||||
}
|
}
|
||||||
|
|
||||||
//Lookup a mocker by its name, returns the mockerFn
|
// Lookup a mocker by its name, returns the mockerFn
|
||||||
func LookupMocker(mockerType string) func(net *Network, quit chan struct{}, nodeCount int) {
|
func LookupMocker(mockerType string) func(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
return mockerList[mockerType]
|
return mockerList[mockerType]
|
||||||
}
|
}
|
||||||
|
|
||||||
//Get a list of mockers (keys of the map)
|
// Get a list of mockers (keys of the map)
|
||||||
//Useful for frontend to build available mocker selection
|
// Useful for frontend to build available mocker selection
|
||||||
func GetMockerList() []string {
|
func GetMockerList() []string {
|
||||||
list := make([]string, 0, len(mockerList))
|
list := make([]string, 0, len(mockerList))
|
||||||
for k := range mockerList {
|
for k := range mockerList {
|
||||||
|
@ -51,7 +51,7 @@ func GetMockerList() []string {
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
//The boot mockerFn only connects the node in a ring and doesn't do anything else
|
// The boot mockerFn only connects the node in a ring and doesn't do anything else
|
||||||
func boot(net *Network, quit chan struct{}, nodeCount int) {
|
func boot(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
_, err := connectNodesInRing(net, nodeCount)
|
_, err := connectNodesInRing(net, nodeCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -59,7 +59,7 @@ func boot(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//The startStop mockerFn stops and starts nodes in a defined period (ticker)
|
// The startStop mockerFn stops and starts nodes in a defined period (ticker)
|
||||||
func startStop(net *Network, quit chan struct{}, nodeCount int) {
|
func startStop(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
nodes, err := connectNodesInRing(net, nodeCount)
|
nodes, err := connectNodesInRing(net, nodeCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -96,10 +96,10 @@ func startStop(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//The probabilistic mocker func has a more probabilistic pattern
|
// The probabilistic mocker func has a more probabilistic pattern
|
||||||
//(the implementation could probably be improved):
|
// (the implementation could probably be improved):
|
||||||
//nodes are connected in a ring, then a varying number of random nodes is selected,
|
// nodes are connected in a ring, then a varying number of random nodes is selected,
|
||||||
//mocker then stops and starts them in random intervals, and continues the loop
|
// mocker then stops and starts them in random intervals, and continues the loop
|
||||||
func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
|
func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
nodes, err := connectNodesInRing(net, nodeCount)
|
nodes, err := connectNodesInRing(net, nodeCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -159,7 +159,7 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//connect nodeCount number of nodes in a ring
|
// connect nodeCount number of nodes in a ring
|
||||||
func connectNodesInRing(net *Network, nodeCount int) ([]enode.ID, error) {
|
func connectNodesInRing(net *Network, nodeCount int) ([]enode.ID, error) {
|
||||||
ids := make([]enode.ID, nodeCount)
|
ids := make([]enode.ID, nodeCount)
|
||||||
for i := 0; i < nodeCount; i++ {
|
for i := 0; i < nodeCount; i++ {
|
||||||
|
|
|
@ -19,8 +19,7 @@ package params
|
||||||
// These are the multipliers for ether denominations.
|
// These are the multipliers for ether denominations.
|
||||||
// Example: To get the wei value of an amount in 'gwei', use
|
// Example: To get the wei value of an amount in 'gwei', use
|
||||||
//
|
//
|
||||||
// new(big.Int).Mul(value, big.NewInt(params.GWei))
|
// new(big.Int).Mul(value, big.NewInt(params.GWei))
|
||||||
//
|
|
||||||
const (
|
const (
|
||||||
Wei = 1
|
Wei = 1
|
||||||
GWei = 1e9
|
GWei = 1e9
|
||||||
|
|
|
@ -76,7 +76,7 @@ type Decoder interface {
|
||||||
// Note that Decode does not set an input limit for all readers and may be vulnerable to
|
// Note that Decode does not set an input limit for all readers and may be vulnerable to
|
||||||
// panics cause by huge value sizes. If you need an input limit, use
|
// panics cause by huge value sizes. If you need an input limit, use
|
||||||
//
|
//
|
||||||
// NewStream(r, limit).Decode(val)
|
// NewStream(r, limit).Decode(val)
|
||||||
func Decode(r io.Reader, val interface{}) error {
|
func Decode(r io.Reader, val interface{}) error {
|
||||||
stream := streamPool.Get().(*Stream)
|
stream := streamPool.Get().(*Stream)
|
||||||
defer streamPool.Put(stream)
|
defer streamPool.Put(stream)
|
||||||
|
|
45
rlp/doc.go
45
rlp/doc.go
|
@ -27,8 +27,7 @@ value zero equivalent to the empty string).
|
||||||
RLP values are distinguished by a type tag. The type tag precedes the value in the input
|
RLP values are distinguished by a type tag. The type tag precedes the value in the input
|
||||||
stream and defines the size and kind of the bytes that follow.
|
stream and defines the size and kind of the bytes that follow.
|
||||||
|
|
||||||
|
# Encoding Rules
|
||||||
Encoding Rules
|
|
||||||
|
|
||||||
Package rlp uses reflection and encodes RLP based on the Go type of the value.
|
Package rlp uses reflection and encodes RLP based on the Go type of the value.
|
||||||
|
|
||||||
|
@ -58,8 +57,7 @@ An interface value encodes as the value contained in the interface.
|
||||||
|
|
||||||
Floating point numbers, maps, channels and functions are not supported.
|
Floating point numbers, maps, channels and functions are not supported.
|
||||||
|
|
||||||
|
# Decoding Rules
|
||||||
Decoding Rules
|
|
||||||
|
|
||||||
Decoding uses the following type-dependent rules:
|
Decoding uses the following type-dependent rules:
|
||||||
|
|
||||||
|
@ -93,30 +91,29 @@ or one (true).
|
||||||
|
|
||||||
To decode into an interface value, one of these types is stored in the value:
|
To decode into an interface value, one of these types is stored in the value:
|
||||||
|
|
||||||
[]interface{}, for RLP lists
|
[]interface{}, for RLP lists
|
||||||
[]byte, for RLP strings
|
[]byte, for RLP strings
|
||||||
|
|
||||||
Non-empty interface types are not supported when decoding.
|
Non-empty interface types are not supported when decoding.
|
||||||
Signed integers, floating point numbers, maps, channels and functions cannot be decoded into.
|
Signed integers, floating point numbers, maps, channels and functions cannot be decoded into.
|
||||||
|
|
||||||
|
# Struct Tags
|
||||||
Struct Tags
|
|
||||||
|
|
||||||
As with other encoding packages, the "-" tag ignores fields.
|
As with other encoding packages, the "-" tag ignores fields.
|
||||||
|
|
||||||
type StructWithIgnoredField struct{
|
type StructWithIgnoredField struct{
|
||||||
Ignored uint `rlp:"-"`
|
Ignored uint `rlp:"-"`
|
||||||
Field uint
|
Field uint
|
||||||
}
|
}
|
||||||
|
|
||||||
Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping
|
Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping
|
||||||
of fields to list elements. The "tail" tag, which may only be used on the last exported
|
of fields to list elements. The "tail" tag, which may only be used on the last exported
|
||||||
struct field, allows slurping up any excess list elements into a slice.
|
struct field, allows slurping up any excess list elements into a slice.
|
||||||
|
|
||||||
type StructWithTail struct{
|
type StructWithTail struct{
|
||||||
Field uint
|
Field uint
|
||||||
Tail []string `rlp:"tail"`
|
Tail []string `rlp:"tail"`
|
||||||
}
|
}
|
||||||
|
|
||||||
The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is
|
The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is
|
||||||
used on a struct field, all subsequent public fields must also be declared optional.
|
used on a struct field, all subsequent public fields must also be declared optional.
|
||||||
|
@ -128,11 +125,11 @@ When decoding into a struct, optional fields may be omitted from the end of the
|
||||||
list. For the example below, this means input lists of one, two, or three elements are
|
list. For the example below, this means input lists of one, two, or three elements are
|
||||||
accepted.
|
accepted.
|
||||||
|
|
||||||
type StructWithOptionalFields struct{
|
type StructWithOptionalFields struct{
|
||||||
Required uint
|
Required uint
|
||||||
Optional1 uint `rlp:"optional"`
|
Optional1 uint `rlp:"optional"`
|
||||||
Optional2 uint `rlp:"optional"`
|
Optional2 uint `rlp:"optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change
|
The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change
|
||||||
the decoding rules for the field type. For regular pointer fields without the "nil" tag,
|
the decoding rules for the field type. For regular pointer fields without the "nil" tag,
|
||||||
|
@ -140,9 +137,9 @@ input values must always match the required input length exactly and the decoder
|
||||||
produce nil values. When the "nil" tag is set, input values of size zero decode as a nil
|
produce nil values. When the "nil" tag is set, input values of size zero decode as a nil
|
||||||
pointer. This is especially useful for recursive types.
|
pointer. This is especially useful for recursive types.
|
||||||
|
|
||||||
type StructWithNilField struct {
|
type StructWithNilField struct {
|
||||||
Field *[3]byte `rlp:"nil"`
|
Field *[3]byte `rlp:"nil"`
|
||||||
}
|
}
|
||||||
|
|
||||||
In the example above, Field allows two possible input sizes. For input 0xC180 (a list
|
In the example above, Field allows two possible input sizes. For input 0xC180 (a list
|
||||||
containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a
|
containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a
|
||||||
|
|
57
rpc/doc.go
57
rpc/doc.go
|
@ -15,7 +15,6 @@
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Package rpc implements bi-directional JSON-RPC 2.0 on multiple transports.
|
Package rpc implements bi-directional JSON-RPC 2.0 on multiple transports.
|
||||||
|
|
||||||
It provides access to the exported methods of an object across a network or other I/O
|
It provides access to the exported methods of an object across a network or other I/O
|
||||||
|
@ -23,16 +22,16 @@ connection. After creating a server or client instance, objects can be registere
|
||||||
them visible as 'services'. Exported methods that follow specific conventions can be
|
them visible as 'services'. Exported methods that follow specific conventions can be
|
||||||
called remotely. It also has support for the publish/subscribe pattern.
|
called remotely. It also has support for the publish/subscribe pattern.
|
||||||
|
|
||||||
RPC Methods
|
# RPC Methods
|
||||||
|
|
||||||
Methods that satisfy the following criteria are made available for remote access:
|
Methods that satisfy the following criteria are made available for remote access:
|
||||||
|
|
||||||
- method must be exported
|
- method must be exported
|
||||||
- method returns 0, 1 (response or error) or 2 (response and error) values
|
- method returns 0, 1 (response or error) or 2 (response and error) values
|
||||||
|
|
||||||
An example method:
|
An example method:
|
||||||
|
|
||||||
func (s *CalcService) Add(a, b int) (int, error)
|
func (s *CalcService) Add(a, b int) (int, error)
|
||||||
|
|
||||||
When the returned error isn't nil the returned integer is ignored and the error is sent
|
When the returned error isn't nil the returned integer is ignored and the error is sent
|
||||||
back to the client. Otherwise the returned integer is sent back to the client.
|
back to the client. Otherwise the returned integer is sent back to the client.
|
||||||
|
@ -41,7 +40,7 @@ Optional arguments are supported by accepting pointer values as arguments. E.g.
|
||||||
to do the addition in an optional finite field we can accept a mod argument as pointer
|
to do the addition in an optional finite field we can accept a mod argument as pointer
|
||||||
value.
|
value.
|
||||||
|
|
||||||
func (s *CalcService) Add(a, b int, mod *int) (int, error)
|
func (s *CalcService) Add(a, b int, mod *int) (int, error)
|
||||||
|
|
||||||
This RPC method can be called with 2 integers and a null value as third argument. In that
|
This RPC method can be called with 2 integers and a null value as third argument. In that
|
||||||
case the mod argument will be nil. Or it can be called with 3 integers, in that case mod
|
case the mod argument will be nil. Or it can be called with 3 integers, in that case mod
|
||||||
|
@ -56,40 +55,40 @@ to the client out of order.
|
||||||
|
|
||||||
An example server which uses the JSON codec:
|
An example server which uses the JSON codec:
|
||||||
|
|
||||||
type CalculatorService struct {}
|
type CalculatorService struct {}
|
||||||
|
|
||||||
func (s *CalculatorService) Add(a, b int) int {
|
func (s *CalculatorService) Add(a, b int) int {
|
||||||
return a + b
|
return a + b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *CalculatorService) Div(a, b int) (int, error) {
|
func (s *CalculatorService) Div(a, b int) (int, error) {
|
||||||
if b == 0 {
|
if b == 0 {
|
||||||
return 0, errors.New("divide by zero")
|
return 0, errors.New("divide by zero")
|
||||||
}
|
}
|
||||||
return a/b, nil
|
return a/b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
calculator := new(CalculatorService)
|
calculator := new(CalculatorService)
|
||||||
server := NewServer()
|
server := NewServer()
|
||||||
server.RegisterName("calculator", calculator)
|
server.RegisterName("calculator", calculator)
|
||||||
l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"})
|
l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"})
|
||||||
server.ServeListener(l)
|
server.ServeListener(l)
|
||||||
|
|
||||||
Subscriptions
|
# Subscriptions
|
||||||
|
|
||||||
The package also supports the publish subscribe pattern through the use of subscriptions.
|
The package also supports the publish subscribe pattern through the use of subscriptions.
|
||||||
A method that is considered eligible for notifications must satisfy the following
|
A method that is considered eligible for notifications must satisfy the following
|
||||||
criteria:
|
criteria:
|
||||||
|
|
||||||
- method must be exported
|
- method must be exported
|
||||||
- first method argument type must be context.Context
|
- first method argument type must be context.Context
|
||||||
- method must have return types (rpc.Subscription, error)
|
- method must have return types (rpc.Subscription, error)
|
||||||
|
|
||||||
An example method:
|
An example method:
|
||||||
|
|
||||||
func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) {
|
func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) {
|
||||||
...
|
...
|
||||||
}
|
}
|
||||||
|
|
||||||
When the service containing the subscription method is registered to the server, for
|
When the service containing the subscription method is registered to the server, for
|
||||||
example under the "blockchain" namespace, a subscription is created by calling the
|
example under the "blockchain" namespace, a subscription is created by calling the
|
||||||
|
@ -101,7 +100,7 @@ the client and server. The server will close the connection for any write error.
|
||||||
|
|
||||||
For more information about subscriptions, see https://github.com/ethereum/go-ethereum/wiki/RPC-PUB-SUB.
|
For more information about subscriptions, see https://github.com/ethereum/go-ethereum/wiki/RPC-PUB-SUB.
|
||||||
|
|
||||||
Reverse Calls
|
# Reverse Calls
|
||||||
|
|
||||||
In any method handler, an instance of rpc.Client can be accessed through the
|
In any method handler, an instance of rpc.Client can be accessed through the
|
||||||
ClientFromContext method. Using this client instance, server-to-client method calls can be
|
ClientFromContext method. Using this client instance, server-to-client method calls can be
|
||||||
|
|
|
@ -34,20 +34,20 @@ import (
|
||||||
//
|
//
|
||||||
// The entry points for incoming messages are:
|
// The entry points for incoming messages are:
|
||||||
//
|
//
|
||||||
// h.handleMsg(message)
|
// h.handleMsg(message)
|
||||||
// h.handleBatch(message)
|
// h.handleBatch(message)
|
||||||
//
|
//
|
||||||
// Outgoing calls use the requestOp struct. Register the request before sending it
|
// Outgoing calls use the requestOp struct. Register the request before sending it
|
||||||
// on the connection:
|
// on the connection:
|
||||||
//
|
//
|
||||||
// op := &requestOp{ids: ...}
|
// op := &requestOp{ids: ...}
|
||||||
// h.addRequestOp(op)
|
// h.addRequestOp(op)
|
||||||
//
|
//
|
||||||
// Now send the request, then wait for the reply to be delivered through handleMsg:
|
// Now send the request, then wait for the reply to be delivered through handleMsg:
|
||||||
//
|
//
|
||||||
// if err := op.wait(...); err != nil {
|
// if err := op.wait(...); err != nil {
|
||||||
// h.removeRequestOp(op) // timeout, etc.
|
// h.removeRequestOp(op) // timeout, etc.
|
||||||
// }
|
// }
|
||||||
type handler struct {
|
type handler struct {
|
||||||
reg *serviceRegistry
|
reg *serviceRegistry
|
||||||
unsubscribeCb *callback
|
unsubscribeCb *callback
|
||||||
|
|
|
@ -39,7 +39,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/signer/storage"
|
"github.com/ethereum/go-ethereum/signer/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Used for testing
|
// Used for testing
|
||||||
type headlessUi struct {
|
type headlessUi struct {
|
||||||
approveCh chan string // to send approve/deny
|
approveCh chan string // to send approve/deny
|
||||||
inputCh chan string // to send password
|
inputCh chan string // to send password
|
||||||
|
|
|
@ -64,7 +64,7 @@ func (vs *ValidationMessages) Info(msg string) {
|
||||||
vs.Messages = append(vs.Messages, ValidationInfo{INFO, msg})
|
vs.Messages = append(vs.Messages, ValidationInfo{INFO, msg})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present
|
// getWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present
|
||||||
func (v *ValidationMessages) GetWarnings() error {
|
func (v *ValidationMessages) GetWarnings() error {
|
||||||
var messages []string
|
var messages []string
|
||||||
for _, msg := range v.Messages {
|
for _, msg := range v.Messages {
|
||||||
|
|
|
@ -175,17 +175,18 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
|
/*
|
||||||
|
See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
|
||||||
|
|
||||||
Whether a block is valid or not is a bit subtle, it's defined by presence of
|
Whether a block is valid or not is a bit subtle, it's defined by presence of
|
||||||
blockHeader, transactions and uncleHeaders fields. If they are missing, the block is
|
blockHeader, transactions and uncleHeaders fields. If they are missing, the block is
|
||||||
invalid and we must verify that we do not accept it.
|
invalid and we must verify that we do not accept it.
|
||||||
|
|
||||||
Since some tests mix valid and invalid blocks we need to check this for every block.
|
Since some tests mix valid and invalid blocks we need to check this for every block.
|
||||||
|
|
||||||
If a block is invalid it does not necessarily fail the test, if it's invalidness is
|
If a block is invalid it does not necessarily fail the test, if it's invalidness is
|
||||||
expected we are expected to ignore it and continue processing and then validate the
|
expected we are expected to ignore it and continue processing and then validate the
|
||||||
post state.
|
post state.
|
||||||
*/
|
*/
|
||||||
func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) {
|
func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) {
|
||||||
validBlocks := make([]btBlock, 0)
|
validBlocks := make([]btBlock, 0)
|
||||||
|
|
|
@ -70,12 +70,14 @@ func checkInput(id byte, inputLen int) bool {
|
||||||
panic("programmer error")
|
panic("programmer error")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The fuzzer functions must return
|
// The function must return
|
||||||
// 1 if the fuzzer should increase priority of the
|
//
|
||||||
// given input during subsequent fuzzing (for example, the input is lexically
|
// - 1 if the fuzzer should increase priority of the
|
||||||
// correct and was parsed successfully);
|
// given input during subsequent fuzzing (for example, the input is lexically
|
||||||
// -1 if the input must not be added to corpus even if gives new coverage; and
|
// correct and was parsed successfully);
|
||||||
// 0 otherwise
|
// - -1 if the input must not be added to corpus even if gives new coverage; and
|
||||||
|
// - 0 otherwise
|
||||||
|
//
|
||||||
// other values are reserved for future use.
|
// other values are reserved for future use.
|
||||||
func fuzz(id byte, data []byte) int {
|
func fuzz(id byte, data []byte) int {
|
||||||
// Even on bad input, it should not crash, so we still test the gas calc
|
// Even on bad input, it should not crash, so we still test the gas calc
|
||||||
|
|
|
@ -67,11 +67,13 @@ func (f *fuzzer) readBool() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// The function must return
|
// The function must return
|
||||||
// 1 if the fuzzer should increase priority of the
|
//
|
||||||
// given input during subsequent fuzzing (for example, the input is lexically
|
// - 1 if the fuzzer should increase priority of the
|
||||||
// correct and was parsed successfully);
|
// given input during subsequent fuzzing (for example, the input is lexically
|
||||||
// -1 if the input must not be added to corpus even if gives new coverage; and
|
// correct and was parsed successfully);
|
||||||
// 0 otherwise
|
// - -1 if the input must not be added to corpus even if gives new coverage; and
|
||||||
|
// - 0 otherwise
|
||||||
|
//
|
||||||
// other values are reserved for future use.
|
// other values are reserved for future use.
|
||||||
func Fuzz(data []byte) int {
|
func Fuzz(data []byte) int {
|
||||||
f := fuzzer{
|
f := fuzzer{
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue