Compare commits
17 Commits
ed5c8a1f5f
...
88cce1df47
Author | SHA1 | Date |
---|---|---|
jwasinger | 88cce1df47 | |
Arran Schlosberg | 23800122b3 | |
Jordan Krage | 3c754e2a09 | |
Hyunsoo Shin (Lake) | 19fa71b917 | |
Martin HS | 02159d553f | |
Martin HS | ab4a1cc01f | |
jwasinger | 6342d59d56 | |
jwasinger | c07b5fd265 | |
jwasinger | d8a18e1f7d | |
jwasinger | 83d6b303c0 | |
Jared Wasinger | 0ac7ff025f | |
Jared Wasinger | 1ba590bd56 | |
Jared Wasinger | 0cf16d881b | |
Jared Wasinger | 5ef597efc7 | |
jwasinger | 39a39ce0da | |
Jared Wasinger | dc78fceb7a | |
Jared Wasinger | 095b466434 |
|
@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||
case common.Address:
|
||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||
case *big.Int:
|
||||
copy(topic[:], math.U256Bytes(rule))
|
||||
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
|
||||
case bool:
|
||||
if rule {
|
||||
topic[common.HashLength-1] = 1
|
||||
|
|
|
@ -149,6 +149,23 @@ func TestMakeTopics(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("does not mutate big.Int", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
|
||||
|
||||
in := big.NewInt(-1)
|
||||
got, err := MakeTopics([]interface{}{in})
|
||||
if err != nil {
|
||||
t.Fatalf("makeTopics() error = %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("makeTopics() = %v, want %v", got, want)
|
||||
}
|
||||
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
|
||||
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type args struct {
|
||||
|
|
|
@ -90,13 +90,13 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, exe
|
|||
for i := 0; i < b.N; i++ {
|
||||
haveOutput, haveGasUsed, haveErr := execFunc()
|
||||
if !bytes.Equal(haveOutput, output) {
|
||||
b.Fatalf("output differs, have\n%x\nwant%x\n", haveOutput, output)
|
||||
panic(fmt.Sprintf("output differs\nhave %x\nwant %x\n", haveOutput, output))
|
||||
}
|
||||
if haveGasUsed != gasUsed {
|
||||
b.Fatalf("gas differs, have %v want%v", haveGasUsed, gasUsed)
|
||||
panic(fmt.Sprintf("gas differs, have %v want %v", haveGasUsed, gasUsed))
|
||||
}
|
||||
if haveErr != err {
|
||||
b.Fatalf("err differs, have %v want%v", haveErr, err)
|
||||
panic(fmt.Sprintf("err differs, have %v want %v", haveErr, err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -137,7 +137,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
var (
|
||||
tracer *tracing.Hooks
|
||||
debugLogger *logger.StructLogger
|
||||
statedb *state.StateDB
|
||||
prestate *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
|
@ -174,7 +174,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
defer triedb.Close()
|
||||
genesis := genesisConfig.MustCommit(db, triedb)
|
||||
sdb := state.NewDatabase(triedb, nil)
|
||||
statedb, _ = state.New(genesis.Root(), sdb)
|
||||
prestate, _ = state.New(genesis.Root(), sdb)
|
||||
chainConfig = genesisConfig.Config
|
||||
|
||||
if ctx.String(SenderFlag.Name) != "" {
|
||||
|
@ -231,7 +231,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
}
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
State: prestate,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: flags.GlobalBig(ctx, ValueFlag.Name),
|
||||
|
@ -274,14 +274,18 @@ func runCmd(ctx *cli.Context) error {
|
|||
if ctx.Bool(CreateFlag.Name) {
|
||||
input = append(code, input...)
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
// don't mutate the state!
|
||||
runtimeConfig.State = prestate.Copy()
|
||||
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
||||
return output, gasLeft, err
|
||||
}
|
||||
} else {
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
prestate.SetCode(receiver, code)
|
||||
}
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
// don't mutate the state!
|
||||
runtimeConfig.State = prestate.Copy()
|
||||
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
|
||||
return output, initialGas - gasLeft, err
|
||||
}
|
||||
|
@ -291,7 +295,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
output, stats, err := timedExec(bench, execFunc)
|
||||
|
||||
if ctx.Bool(DumpFlag.Name) {
|
||||
root, err := statedb.Commit(genesisConfig.Number, true)
|
||||
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to commit changes %v\n", err)
|
||||
return err
|
||||
|
@ -310,7 +314,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
logger.WriteLogs(os.Stderr, statedb.Logs())
|
||||
logger.WriteLogs(os.Stderr, runtimeConfig.State.Logs())
|
||||
}
|
||||
|
||||
if bench || ctx.Bool(StatDumpFlag.Name) {
|
||||
|
|
|
@ -206,47 +206,24 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo
|
|||
log.Warn("Snapshot maintenance disabled (syncing)")
|
||||
return snap, nil
|
||||
}
|
||||
// Create the building waiter iff the background generation is allowed
|
||||
if !config.NoBuild && !config.AsyncBuild {
|
||||
defer snap.waitBuild()
|
||||
}
|
||||
if err != nil {
|
||||
log.Warn("Failed to load snapshot", "err", err)
|
||||
if !config.NoBuild {
|
||||
snap.Rebuild(root)
|
||||
return snap, nil
|
||||
if config.NoBuild {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err // Bail out the error, don't rebuild automatically.
|
||||
wait := snap.Rebuild(root)
|
||||
if !config.AsyncBuild {
|
||||
wait()
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
// Existing snapshot loaded, seed all the layers
|
||||
for head != nil {
|
||||
for ; head != nil; head = head.Parent() {
|
||||
snap.layers[head.Root()] = head
|
||||
head = head.Parent()
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
||||
// to be used by tests to ensure we're testing what we believe we are.
|
||||
func (t *Tree) waitBuild() {
|
||||
// Find the rebuild termination channel
|
||||
var done chan struct{}
|
||||
|
||||
t.lock.RLock()
|
||||
for _, layer := range t.layers {
|
||||
if layer, ok := layer.(*diskLayer); ok {
|
||||
done = layer.genPending
|
||||
break
|
||||
}
|
||||
}
|
||||
t.lock.RUnlock()
|
||||
|
||||
// Wait until the snapshot is generated
|
||||
if done != nil {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
||||
// layers in memory and marks snapshots disabled globally. In order to resume
|
||||
// the snapshot functionality, the caller must invoke Rebuild.
|
||||
|
@ -688,8 +665,9 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
|||
|
||||
// Rebuild wipes all available snapshot data from the persistent database and
|
||||
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
||||
// generator with the given root hash.
|
||||
func (t *Tree) Rebuild(root common.Hash) {
|
||||
// generator with the given root hash. The returned function blocks until
|
||||
// regeneration is complete.
|
||||
func (t *Tree) Rebuild(root common.Hash) (wait func()) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
|
@ -721,9 +699,11 @@ func (t *Tree) Rebuild(root common.Hash) {
|
|||
// Start generating a new snapshot from scratch on a background thread. The
|
||||
// generator will run a wiper first if there's not one running right now.
|
||||
log.Info("Rebuilding state snapshot")
|
||||
disk := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root)
|
||||
t.layers = map[common.Hash]snapshot{
|
||||
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
|
||||
root: disk,
|
||||
}
|
||||
return func() { <-disk.genPending }
|
||||
}
|
||||
|
||||
// AccountIterator creates a new account iterator for the specified root hash and
|
||||
|
|
|
@ -363,26 +363,35 @@ func (t *mdLogger) OnEnter(depth int, typ byte, from common.Address, to common.A
|
|||
if depth != 0 {
|
||||
return
|
||||
}
|
||||
create := vm.OpCode(typ) == vm.CREATE
|
||||
if !create {
|
||||
fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
||||
from.String(), to.String(),
|
||||
input, gas, value)
|
||||
if create := vm.OpCode(typ) == vm.CREATE; !create {
|
||||
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||
" - from: `%v`\n"+
|
||||
" - to: `%v`\n"+
|
||||
" - data: `%#x`\n"+
|
||||
" - gas: `%d`\n"+
|
||||
" - value: `%v` wei\n",
|
||||
from.String(), to.String(), input, gas, value)
|
||||
} else {
|
||||
fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
||||
from.String(), to.String(),
|
||||
input, gas, value)
|
||||
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||
" - from: `%v`\n"+
|
||||
" - create: `%v`\n"+
|
||||
" - data: `%#x`\n"+
|
||||
" - gas: `%d`\n"+
|
||||
" - value: `%v` wei\n",
|
||||
from.String(), to.String(), input, gas, value)
|
||||
}
|
||||
|
||||
fmt.Fprintf(t.out, `
|
||||
| Pc | Op | Cost | Stack | RStack | Refund |
|
||||
|-------|-------------|------|-----------|-----------|---------|
|
||||
| Pc | Op | Cost | Refund | Stack |
|
||||
|-------|-------------|------|-----------|-----------|
|
||||
`)
|
||||
}
|
||||
|
||||
func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if depth == 0 {
|
||||
fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n",
|
||||
fmt.Fprintf(t.out, "\nPost-execution info:\n"+
|
||||
" - output: `%#x`\n"+
|
||||
" - consumed gas: `%d`\n"+
|
||||
" - error: `%v`\n",
|
||||
output, gasUsed, err)
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +399,8 @@ func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, r
|
|||
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
|
||||
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
|
||||
stack := scope.StackData()
|
||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, vm.OpCode(op).String(), cost)
|
||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |%10v |", pc, vm.OpCode(op).String(),
|
||||
cost, t.env.StateDB.GetRefund())
|
||||
|
||||
if !t.cfg.DisableStack {
|
||||
// format stack
|
||||
|
@ -401,7 +411,6 @@ func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.
|
|||
b := fmt.Sprintf("[%v]", strings.Join(a, ","))
|
||||
fmt.Fprintf(t.out, "%10v |", b)
|
||||
}
|
||||
fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund())
|
||||
fmt.Fprintln(t.out, "")
|
||||
if err != nil {
|
||||
fmt.Fprintf(t.out, "Error: %v\n", err)
|
||||
|
|
|
@ -71,7 +71,7 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks {
|
|||
l.hooks = &tracing.Hooks{
|
||||
OnTxStart: l.OnTxStart,
|
||||
OnSystemCallStart: l.onSystemCallStart,
|
||||
OnExit: l.OnEnd,
|
||||
OnExit: l.OnExit,
|
||||
OnOpcode: l.OnOpcode,
|
||||
OnFault: l.OnFault,
|
||||
}
|
||||
|
@ -152,13 +152,6 @@ func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common
|
|||
l.encoder.Encode(frame)
|
||||
}
|
||||
|
||||
func (l *jsonLogger) OnEnd(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if depth > 0 {
|
||||
return
|
||||
}
|
||||
l.OnExit(depth, output, gasUsed, err, false)
|
||||
}
|
||||
|
||||
func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
|
@ -186,7 +185,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
|
|||
Tracer: tracer.Hooks(),
|
||||
}
|
||||
)
|
||||
var tracingStateDB = vm.StateDB(sim.state)
|
||||
tracingStateDB := vm.StateDB(sim.state)
|
||||
if hooks := tracer.Hooks(); hooks != nil {
|
||||
tracingStateDB = state.NewHookedState(sim.state, hooks)
|
||||
}
|
||||
|
@ -289,7 +288,7 @@ func (sim *simulator) activePrecompiles(base *types.Header) vm.PrecompiledContra
|
|||
isMerge = (base.Difficulty.Sign() == 0)
|
||||
rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
|
||||
)
|
||||
return maps.Clone(vm.ActivePrecompiledContracts(rules))
|
||||
return vm.ActivePrecompiledContracts(rules)
|
||||
}
|
||||
|
||||
// sanitizeChain checks the chain integrity. Specifically it checks that
|
||||
|
|
|
@ -676,7 +676,7 @@ func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interf
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return math.U256Bytes(b), nil
|
||||
return math.U256Bytes(new(big.Int).Set(b)), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unrecognized type '%s'", encType)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue