2015-07-06 19:54:22 -05:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 11:48:40 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-06 19:54:22 -05:00
|
|
|
//
|
2015-07-23 11:35:11 -05:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-06 19:54:22 -05:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 11:48:40 -05:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-06 19:54:22 -05:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 11:48:40 -05:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-06 19:54:22 -05:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 11:48:40 -05:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-06 19:54:22 -05:00
|
|
|
|
2014-10-31 08:43:14 -05:00
|
|
|
package state
|
2014-07-22 04:54:48 -05:00
|
|
|
|
|
|
|
import (
|
2015-02-17 06:10:18 -06:00
|
|
|
"bytes"
|
2014-07-22 04:54:48 -05:00
|
|
|
"fmt"
|
2024-04-02 07:56:12 -05:00
|
|
|
"maps"
|
2019-03-25 03:01:18 -05:00
|
|
|
"time"
|
2014-07-29 17:31:15 -05:00
|
|
|
|
2015-03-16 05:27:38 -05:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2021-09-28 03:48:07 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-03-16 05:59:52 -05:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2024-04-26 07:24:40 -05:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2015-02-17 06:10:18 -06:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2023-05-09 02:11:04 -05:00
|
|
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
2024-01-23 07:51:58 -06:00
|
|
|
"github.com/holiman/uint256"
|
2014-07-22 04:54:48 -05:00
|
|
|
)
|
|
|
|
|
2016-02-03 16:46:27 -06:00
|
|
|
type Storage map[common.Hash]common.Hash
|
2014-07-22 04:54:48 -05:00
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s Storage) Copy() Storage {
|
2024-04-02 07:56:12 -05:00
|
|
|
return maps.Clone(s)
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
2017-02-22 16:29:59 -06:00
|
|
|
// stateObject represents an Ethereum account which is being modified.
|
2016-09-22 14:04:58 -05:00
|
|
|
//
|
|
|
|
// The usage pattern is as follows:
|
2023-07-11 08:43:23 -05:00
|
|
|
// - First you need to obtain a state object.
|
|
|
|
// - Account values as well as storages can be accessed and modified through the object.
|
|
|
|
// - Finally, call commit to return the changes of storage trie and update account data.
|
2017-02-22 16:29:59 -06:00
|
|
|
type stateObject struct {
|
2017-06-27 08:57:06 -05:00
|
|
|
db *StateDB
|
2023-07-11 08:43:23 -05:00
|
|
|
address common.Address // address of ethereum account
|
|
|
|
addrHash common.Hash // hash of ethereum address of the account
|
|
|
|
origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
|
|
|
|
data types.StateAccount // Account data with all mutations applied in the scope of block
|
2016-09-22 14:04:58 -05:00
|
|
|
|
|
|
|
// Write caches.
|
2024-04-24 04:59:06 -05:00
|
|
|
trie Trie // storage trie, which becomes non-nil on first access
|
|
|
|
code []byte // contract bytecode, which gets set when code is loaded
|
2016-10-03 02:48:01 -05:00
|
|
|
|
2024-06-03 06:17:12 -05:00
|
|
|
originStorage Storage // Storage entries that have been accessed within the current block
|
|
|
|
dirtyStorage Storage // Storage entries that have been modified within the current transaction
|
|
|
|
pendingStorage Storage // Storage entries that have been modified within the current block
|
|
|
|
|
|
|
|
// uncommittedStorage tracks a set of storage entries that have been modified
|
|
|
|
// but not yet committed since the "last commit operation", along with their
|
|
|
|
// original values before mutation.
|
|
|
|
//
|
|
|
|
// Specifically, the commit will be performed after each transaction before
|
|
|
|
// the byzantium fork, therefore the map is already reset at the transaction
|
|
|
|
// boundary; however post the byzantium fork, the commit will only be performed
|
|
|
|
// at the end of block, this set essentially tracks all the modifications
|
|
|
|
// made within the block.
|
|
|
|
uncommittedStorage Storage
|
2018-09-18 08:24:35 -05:00
|
|
|
|
2016-09-22 14:04:58 -05:00
|
|
|
// Cache flags.
|
|
|
|
dirtyCode bool // true if the code was updated
|
2023-07-11 08:43:23 -05:00
|
|
|
|
2024-04-24 04:59:06 -05:00
|
|
|
// Flag whether the account was marked as self-destructed. The self-destructed
|
|
|
|
// account is still accessible in the scope of same transaction.
|
2023-07-15 09:35:30 -05:00
|
|
|
selfDestructed bool
|
2023-07-11 08:43:23 -05:00
|
|
|
|
2024-04-24 04:59:06 -05:00
|
|
|
// This is an EIP-6780 flag indicating whether the object is eligible for
|
|
|
|
// self-destruct according to EIP-6780. The flag could be set either when
|
|
|
|
// the contract is just created within the current transaction, or when the
|
|
|
|
// object was previously existent and is being deployed as a contract within
|
|
|
|
// the current transaction.
|
|
|
|
newContract bool
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
2016-10-20 06:36:29 -05:00
|
|
|
// empty returns whether the account is considered empty.
|
2017-02-22 16:29:59 -06:00
|
|
|
func (s *stateObject) empty() bool {
|
2024-02-07 10:01:38 -06:00
|
|
|
return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
|
2016-10-20 06:36:29 -05:00
|
|
|
}
|
|
|
|
|
2016-10-04 05:36:02 -05:00
|
|
|
// newObject creates a state object.
|
2023-07-11 08:43:23 -05:00
|
|
|
func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject {
|
2024-04-24 04:59:06 -05:00
|
|
|
origin := acct
|
2023-07-11 08:43:23 -05:00
|
|
|
if acct == nil {
|
|
|
|
acct = types.NewEmptyStateAccount()
|
2019-08-12 14:56:07 -05:00
|
|
|
}
|
2017-06-27 08:57:06 -05:00
|
|
|
return &stateObject{
|
2024-06-03 06:17:12 -05:00
|
|
|
db: db,
|
|
|
|
address: address,
|
|
|
|
addrHash: crypto.Keccak256Hash(address[:]),
|
|
|
|
origin: origin,
|
|
|
|
data: *acct,
|
|
|
|
originStorage: make(Storage),
|
|
|
|
dirtyStorage: make(Storage),
|
|
|
|
pendingStorage: make(Storage),
|
|
|
|
uncommittedStorage: make(Storage),
|
2017-06-27 08:57:06 -05:00
|
|
|
}
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
2023-07-15 09:35:30 -05:00
|
|
|
func (s *stateObject) markSelfdestructed() {
|
|
|
|
s.selfDestructed = true
|
2014-10-16 06:38:21 -05:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) touch() {
|
2024-01-29 03:18:46 -06:00
|
|
|
s.db.journal.touchChange(s.address, &s.data, s.selfDestructed, s.newContract)
|
2016-11-24 09:24:04 -06:00
|
|
|
}
|
|
|
|
|
2024-05-28 07:10:27 -05:00
|
|
|
// getTrie returns the associated storage trie. The trie will be opened if it's
|
2024-05-13 07:47:45 -05:00
|
|
|
// not loaded previously. An error will be returned if trie can't be loaded.
|
|
|
|
//
|
|
|
|
// If a new trie is opened, it will be cached within the state object to allow
|
|
|
|
// subsequent reads to expand the same trie instead of reloading from disk.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) getTrie() (Trie, error) {
|
2019-05-25 16:52:10 -05:00
|
|
|
if s.trie == nil {
|
2024-05-13 07:47:45 -05:00
|
|
|
tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
2024-05-13 07:47:45 -05:00
|
|
|
s.trie = tr
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
2022-12-21 03:21:21 -06:00
|
|
|
return s.trie, nil
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
2014-07-22 04:54:48 -05:00
|
|
|
|
2024-05-13 07:47:45 -05:00
|
|
|
// getPrefetchedTrie returns the associated trie, as populated by the prefetcher
|
|
|
|
// if it's available.
|
|
|
|
//
|
|
|
|
// Note, opposed to getTrie, this method will *NOT* blindly cache the resulting
|
|
|
|
// trie in the state object. The caller might want to do that, but it's cleaner
|
|
|
|
// to break the hidden interdependency between retrieving tries from the db or
|
|
|
|
// from the prefetcher.
|
2024-05-28 12:54:55 -05:00
|
|
|
func (s *stateObject) getPrefetchedTrie() Trie {
|
2024-05-13 07:47:45 -05:00
|
|
|
// If there's nothing to meaningfully return, let the user figure it out by
|
|
|
|
// pulling the trie from disk.
|
2024-09-02 03:41:44 -05:00
|
|
|
if (s.data.Root == types.EmptyRootHash && !s.db.db.TrieDB().IsVerkle()) || s.db.prefetcher == nil {
|
2024-05-28 12:54:55 -05:00
|
|
|
return nil
|
2024-05-13 07:47:45 -05:00
|
|
|
}
|
2024-05-28 07:10:27 -05:00
|
|
|
// Attempt to retrieve the trie from the prefetcher
|
2024-05-13 07:47:45 -05:00
|
|
|
return s.db.prefetcher.trie(s.addrHash, s.data.Root)
|
|
|
|
}
|
|
|
|
|
2024-06-03 06:17:12 -05:00
|
|
|
// GetState retrieves a value associated with the given storage key.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) GetState(key common.Hash) common.Hash {
|
2024-04-24 10:45:24 -05:00
|
|
|
value, _ := s.getState(key)
|
|
|
|
return value
|
|
|
|
}
|
|
|
|
|
2024-05-10 02:57:38 -05:00
|
|
|
// getState retrieves a value associated with the given storage key, along with
|
2024-05-10 12:48:14 -05:00
|
|
|
// its original value.
|
2024-05-10 02:57:38 -05:00
|
|
|
func (s *stateObject) getState(key common.Hash) (common.Hash, common.Hash) {
|
|
|
|
origin := s.GetCommittedState(key)
|
2019-05-25 16:52:10 -05:00
|
|
|
value, dirty := s.dirtyStorage[key]
|
2018-09-18 08:24:35 -05:00
|
|
|
if dirty {
|
2024-05-10 02:57:38 -05:00
|
|
|
return value, origin
|
2018-09-18 08:24:35 -05:00
|
|
|
}
|
2024-05-10 02:57:38 -05:00
|
|
|
return origin, origin
|
2018-09-18 08:24:35 -05:00
|
|
|
}
|
|
|
|
|
2024-06-03 06:17:12 -05:00
|
|
|
// GetCommittedState retrieves the value associated with the specific key
|
|
|
|
// without any mutations caused in the current execution.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
|
2019-08-12 14:56:07 -05:00
|
|
|
// If we have a pending write or clean cached, return that
|
|
|
|
if value, pending := s.pendingStorage[key]; pending {
|
|
|
|
return value
|
|
|
|
}
|
|
|
|
if value, cached := s.originStorage[key]; cached {
|
2016-09-22 14:04:58 -05:00
|
|
|
return value
|
|
|
|
}
|
2022-12-28 07:53:43 -06:00
|
|
|
// If the object was destructed in *this* block (and potentially resurrected),
|
|
|
|
// the storage has been cleared out, and we should *not* consult the previous
|
|
|
|
// database about any storage values. The only possible alternatives are:
|
|
|
|
// 1) resurrect happened, and new slot values were set -- those should
|
|
|
|
// have been handles via pendingStorage above.
|
|
|
|
// 2) we don't have new values, and can deliver empty response back
|
|
|
|
if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed {
|
2024-06-03 06:17:12 -05:00
|
|
|
s.originStorage[key] = common.Hash{} // track the empty slot as origin value
|
2022-12-28 07:53:43 -06:00
|
|
|
return common.Hash{}
|
|
|
|
}
|
2024-09-05 05:10:47 -05:00
|
|
|
s.db.StorageLoaded++
|
2024-03-11 03:06:57 -05:00
|
|
|
|
2024-09-05 05:10:47 -05:00
|
|
|
start := time.Now()
|
|
|
|
value, err := s.db.reader.Storage(s.address, key)
|
|
|
|
if err != nil {
|
|
|
|
s.db.setError(err)
|
|
|
|
return common.Hash{}
|
2016-10-05 15:56:07 -05:00
|
|
|
}
|
2024-09-05 05:10:47 -05:00
|
|
|
s.db.StorageReads += time.Since(start)
|
|
|
|
|
|
|
|
// Schedule the resolved storage slots for prefetching if it's enabled.
|
2024-06-11 03:10:07 -05:00
|
|
|
if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash {
|
2024-10-20 05:25:15 -05:00
|
|
|
if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, nil, []common.Hash{key}, true); err != nil {
|
2024-06-11 03:10:07 -05:00
|
|
|
log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err)
|
|
|
|
}
|
|
|
|
}
|
2019-05-25 16:52:10 -05:00
|
|
|
s.originStorage[key] = value
|
2014-07-22 04:54:48 -05:00
|
|
|
return value
|
|
|
|
}
|
|
|
|
|
2016-09-22 14:04:58 -05:00
|
|
|
// SetState updates a value in account storage.
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
// It returns the previous value
|
|
|
|
func (s *stateObject) SetState(key, value common.Hash) common.Hash {
|
2024-04-24 10:45:24 -05:00
|
|
|
// If the new value is the same as old, don't set. Otherwise, track only the
|
|
|
|
// dirty changes, supporting reverting all of it back to no change.
|
2024-05-10 02:57:38 -05:00
|
|
|
prev, origin := s.getState(key)
|
2018-09-18 08:24:35 -05:00
|
|
|
if prev == value {
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
return prev
|
2018-09-18 08:24:35 -05:00
|
|
|
}
|
|
|
|
// New value is different, update and journal the change
|
core/state: semantic journalling (part 1) (#28880)
This is a follow-up to #29520, and a preparatory PR to a more thorough
change in the journalling system.
### API methods instead of `append` operations
This PR hides the journal-implementation details away, so that the
statedb invokes methods like `JournalCreate`, instead of explicitly
appending journal-events in a list. This means that it's up to the
journal whether to implement it as a sequence of events or
aggregate/merge events.
### Snapshot-management inside the journal
This PR also makes it so that management of valid snapshots is moved
inside the journal, exposed via the methods `Snapshot() int` and
`RevertToSnapshot(revid int, s *StateDB)`.
### SetCode
JournalSetCode journals the setting of code: it is implicit that the
previous values were "no code" and emptyCodeHash. Therefore, we can
simplify the setCode journal.
### Selfdestruct
The self-destruct journalling is a bit strange: we allow the
selfdestruct operation to be journalled several times. This makes it so
that we also are forced to store whether the account was already
destructed.
What we can do instead, is to only journal the first destruction, and
after that only journal balance-changes, but not journal the
selfdestruct itself.
This simplifies the journalling, so that internals about state
management does not leak into the journal-API.
### Preimages
Preimages were, for some reason, integrated into the journal management,
despite not being a consensus-critical data structure. This PR undoes
that.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-08-28 01:18:23 -05:00
|
|
|
s.db.journal.storageChange(s.address, key, prev, origin)
|
|
|
|
s.setState(key, value, origin)
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
return prev
|
2016-10-04 05:36:02 -05:00
|
|
|
}
|
|
|
|
|
2024-05-10 02:57:38 -05:00
|
|
|
// setState updates a value in account dirty storage. The dirtiness will be
|
|
|
|
// removed if the value being set equals to the original value.
|
|
|
|
func (s *stateObject) setState(key common.Hash, value common.Hash, origin common.Hash) {
|
|
|
|
// Storage slot is set back to its original value, undo the dirty marker
|
|
|
|
if value == origin {
|
2024-04-24 10:45:24 -05:00
|
|
|
delete(s.dirtyStorage, key)
|
|
|
|
return
|
|
|
|
}
|
2024-05-10 02:57:38 -05:00
|
|
|
s.dirtyStorage[key] = value
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
2019-08-12 14:56:07 -05:00
|
|
|
// finalise moves all dirty storage slots into the pending area to be hashed or
|
|
|
|
// committed later. It is invoked at the end of every transaction.
|
2024-05-13 07:47:45 -05:00
|
|
|
func (s *stateObject) finalise() {
|
2024-10-20 05:25:15 -05:00
|
|
|
slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage))
|
2019-08-12 14:56:07 -05:00
|
|
|
for key, value := range s.dirtyStorage {
|
2024-06-03 06:17:12 -05:00
|
|
|
if origin, exist := s.uncommittedStorage[key]; exist && origin == value {
|
|
|
|
// The slot is reverted to its original value, delete the entry
|
|
|
|
// to avoid thrashing the data structures.
|
|
|
|
delete(s.uncommittedStorage, key)
|
|
|
|
} else if exist {
|
|
|
|
// The slot is modified to another value and the slot has been
|
|
|
|
// tracked for commit, do nothing here.
|
2024-04-26 07:24:40 -05:00
|
|
|
} else {
|
2024-06-03 06:17:12 -05:00
|
|
|
// The slot is different from its original value and hasn't been
|
|
|
|
// tracked for commit yet.
|
|
|
|
s.uncommittedStorage[key] = s.GetCommittedState(key)
|
2024-10-20 05:25:15 -05:00
|
|
|
slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure
|
2021-01-08 07:01:49 -06:00
|
|
|
}
|
2024-06-03 06:17:12 -05:00
|
|
|
// Aggregate the dirty storage slots into the pending area. It might
|
|
|
|
// be possible that the value of tracked slot here is same with the
|
|
|
|
// one in originStorage (e.g. the slot was modified in tx_a and then
|
|
|
|
// modified back in tx_b). We can't blindly remove it from pending
|
|
|
|
// map as the dirty slot might have been committed already (before the
|
|
|
|
// byzantium fork) and entry is necessary to modify the value back.
|
|
|
|
s.pendingStorage[key] = value
|
2020-02-05 06:12:09 -06:00
|
|
|
}
|
2024-05-13 07:47:45 -05:00
|
|
|
if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
|
2024-10-20 05:25:15 -05:00
|
|
|
if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil {
|
2024-05-13 07:47:45 -05:00
|
|
|
log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err)
|
|
|
|
}
|
2019-08-12 14:56:07 -05:00
|
|
|
}
|
|
|
|
if len(s.dirtyStorage) > 0 {
|
|
|
|
s.dirtyStorage = make(Storage)
|
|
|
|
}
|
2024-04-24 04:59:06 -05:00
|
|
|
// Revoke the flag at the end of the transaction. It finalizes the status
|
|
|
|
// of the newly-created object as it's no longer eligible for self-destruct
|
|
|
|
// by EIP-6780. For non-newly-created objects, it's a no-op.
|
|
|
|
s.newContract = false
|
2019-08-12 14:56:07 -05:00
|
|
|
}
|
|
|
|
|
2023-08-31 13:33:18 -05:00
|
|
|
// updateTrie is responsible for persisting cached storage changes into the
|
|
|
|
// object's storage trie. In case the storage trie is not yet loaded, this
|
|
|
|
// function will load the trie automatically. If any issues arise during the
|
|
|
|
// loading or updating of the trie, an error will be returned. Furthermore,
|
|
|
|
// this function will return the mutated storage trie, or nil if there is no
|
|
|
|
// storage change at all.
|
2024-05-14 07:54:49 -05:00
|
|
|
//
|
|
|
|
// It assumes all the dirty storage slots have been finalized before.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) updateTrie() (Trie, error) {
|
2024-06-25 06:48:08 -05:00
|
|
|
// Short circuit if nothing was accessed, don't trigger a prefetcher warning
|
2024-06-03 06:17:12 -05:00
|
|
|
if len(s.uncommittedStorage) == 0 {
|
2024-06-25 06:48:08 -05:00
|
|
|
// Nothing was written, so we could stop early. Unless we have both reads
|
|
|
|
// and witness collection enabled, in which case we need to fetch the trie.
|
|
|
|
if s.db.witness == nil || len(s.originStorage) == 0 {
|
|
|
|
return s.trie, nil
|
|
|
|
}
|
2020-02-03 09:28:30 -06:00
|
|
|
}
|
2024-06-25 06:48:08 -05:00
|
|
|
// Retrieve a pretecher populated trie, or fall back to the database. This will
|
|
|
|
// block until all prefetch tasks are done, which are needed for witnesses even
|
|
|
|
// for unmodified state objects.
|
2024-05-28 12:54:55 -05:00
|
|
|
tr := s.getPrefetchedTrie()
|
|
|
|
if tr != nil {
|
|
|
|
// Prefetcher returned a live trie, swap it out for the current one
|
|
|
|
s.trie = tr
|
|
|
|
} else {
|
2024-05-13 07:47:45 -05:00
|
|
|
// Fetcher not running or empty trie, fallback to the database trie
|
2024-05-28 12:54:55 -05:00
|
|
|
var err error
|
2024-05-13 07:47:45 -05:00
|
|
|
tr, err = s.getTrie()
|
|
|
|
if err != nil {
|
|
|
|
s.db.setError(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2024-06-25 06:48:08 -05:00
|
|
|
// Short circuit if nothing changed, don't bother with hashing anything
|
|
|
|
if len(s.uncommittedStorage) == 0 {
|
|
|
|
return s.trie, nil
|
|
|
|
}
|
2024-06-03 06:17:12 -05:00
|
|
|
// Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes
|
|
|
|
// in circumstances similar to the following:
|
2024-03-26 09:21:39 -05:00
|
|
|
//
|
|
|
|
// Consider nodes `A` and `B` who share the same full node parent `P` and have no other siblings.
|
|
|
|
// During the execution of a block:
|
|
|
|
// - `A` is deleted,
|
|
|
|
// - `C` is created, and also shares the parent `P`.
|
|
|
|
// If the deletion is handled first, then `P` would be left with only one child, thus collapsed
|
|
|
|
// into a shortnode. This requires `B` to be resolved from disk.
|
|
|
|
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
|
2024-06-03 06:17:12 -05:00
|
|
|
var (
|
|
|
|
deletions []common.Hash
|
2024-10-20 05:25:15 -05:00
|
|
|
used = make([]common.Hash, 0, len(s.uncommittedStorage))
|
2024-06-03 06:17:12 -05:00
|
|
|
)
|
|
|
|
for key, origin := range s.uncommittedStorage {
|
2018-09-18 08:24:35 -05:00
|
|
|
// Skip noop changes, persist actual changes
|
2024-06-03 06:17:12 -05:00
|
|
|
value, exist := s.pendingStorage[key]
|
|
|
|
if value == origin {
|
|
|
|
log.Error("Storage update was noop", "address", s.address, "slot", key)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !exist {
|
|
|
|
log.Error("Storage slot is not found in pending area", s.address, "slot", key)
|
2018-09-18 08:24:35 -05:00
|
|
|
continue
|
|
|
|
}
|
2024-03-26 09:21:39 -05:00
|
|
|
if (value != common.Hash{}) {
|
2024-06-03 06:17:12 -05:00
|
|
|
if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil {
|
2023-03-16 02:12:34 -05:00
|
|
|
s.db.setError(err)
|
2022-12-21 03:21:21 -06:00
|
|
|
return nil, err
|
|
|
|
}
|
2024-05-13 07:47:45 -05:00
|
|
|
s.db.StorageUpdated.Add(1)
|
2024-03-26 09:21:39 -05:00
|
|
|
} else {
|
|
|
|
deletions = append(deletions, key)
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
2023-07-11 08:43:23 -05:00
|
|
|
// Cache the items for preloading
|
2024-10-20 05:25:15 -05:00
|
|
|
used = append(used, key) // Copy needed for closure
|
2021-01-08 07:01:49 -06:00
|
|
|
}
|
2024-03-26 09:21:39 -05:00
|
|
|
for _, key := range deletions {
|
|
|
|
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
|
|
|
|
s.db.setError(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-05-13 07:47:45 -05:00
|
|
|
s.db.StorageDeleted.Add(1)
|
2024-03-26 09:21:39 -05:00
|
|
|
}
|
2021-01-08 07:01:49 -06:00
|
|
|
if s.db.prefetcher != nil {
|
2024-10-20 05:25:15 -05:00
|
|
|
s.db.prefetcher.used(s.addrHash, s.data.Root, nil, used)
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
2024-06-03 06:17:12 -05:00
|
|
|
s.uncommittedStorage = make(Storage) // empties the commit markers
|
2022-12-21 03:21:21 -06:00
|
|
|
return tr, nil
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
2023-08-31 13:33:18 -05:00
|
|
|
// updateRoot flushes all cached storage mutations to trie, recalculating the
|
|
|
|
// new storage trie root.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) updateRoot() {
|
2023-08-31 13:33:18 -05:00
|
|
|
// Flush cached storage mutations into trie, short circuit if any error
|
2024-04-23 05:09:42 -05:00
|
|
|
// is occurred or there is no change in the trie.
|
2023-07-24 05:22:09 -05:00
|
|
|
tr, err := s.updateTrie()
|
2023-08-31 13:33:18 -05:00
|
|
|
if err != nil || tr == nil {
|
2020-02-03 09:28:30 -06:00
|
|
|
return
|
|
|
|
}
|
2022-12-21 03:21:21 -06:00
|
|
|
s.data.Root = tr.Hash()
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
|
|
|
|
2024-06-03 06:17:12 -05:00
|
|
|
// commitStorage overwrites the clean storage with the storage changes and
|
|
|
|
// fulfills the storage diffs into the given accountUpdate struct.
|
|
|
|
func (s *stateObject) commitStorage(op *accountUpdate) {
|
|
|
|
var (
|
|
|
|
buf = crypto.NewKeccakState()
|
|
|
|
encode = func(val common.Hash) []byte {
|
|
|
|
if val == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
blob, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(val[:]))
|
|
|
|
return blob
|
|
|
|
}
|
|
|
|
)
|
|
|
|
for key, val := range s.pendingStorage {
|
|
|
|
// Skip the noop storage changes, it might be possible the value
|
|
|
|
// of tracked slot is same in originStorage and pendingStorage
|
|
|
|
// map, e.g. the storage slot is modified in tx_a and then reset
|
|
|
|
// back in tx_b.
|
|
|
|
if val == s.originStorage[key] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
hash := crypto.HashData(buf, key[:])
|
|
|
|
if op.storages == nil {
|
|
|
|
op.storages = make(map[common.Hash][]byte)
|
|
|
|
}
|
|
|
|
op.storages[hash] = encode(val)
|
|
|
|
if op.storagesOrigin == nil {
|
|
|
|
op.storagesOrigin = make(map[common.Hash][]byte)
|
|
|
|
}
|
|
|
|
op.storagesOrigin[hash] = encode(s.originStorage[key])
|
|
|
|
|
|
|
|
// Overwrite the clean value of storage slots
|
|
|
|
s.originStorage[key] = val
|
|
|
|
}
|
|
|
|
s.pendingStorage = make(Storage)
|
|
|
|
}
|
|
|
|
|
|
|
|
// commit obtains the account changes (metadata, storage slots, code) caused by
|
|
|
|
// state execution along with the dirty storage trie nodes.
|
2024-05-02 03:18:27 -05:00
|
|
|
//
|
|
|
|
// Note, commit may run concurrently across all the state objects. Do not assume
|
|
|
|
// thread-safe access to the statedb.
|
2024-06-03 06:17:12 -05:00
|
|
|
func (s *stateObject) commit() (*accountUpdate, *trienode.NodeSet, error) {
|
|
|
|
// commit the account metadata changes
|
|
|
|
op := &accountUpdate{
|
|
|
|
address: s.address,
|
|
|
|
data: types.SlimAccountRLP(s.data),
|
|
|
|
}
|
|
|
|
if s.origin != nil {
|
|
|
|
op.origin = types.SlimAccountRLP(*s.origin)
|
|
|
|
}
|
|
|
|
// commit the contract code if it's modified
|
|
|
|
if s.dirtyCode {
|
|
|
|
op.code = &contractCode{
|
|
|
|
hash: common.BytesToHash(s.CodeHash()),
|
|
|
|
blob: s.code,
|
|
|
|
}
|
|
|
|
s.dirtyCode = false // reset the dirty flag
|
|
|
|
}
|
|
|
|
// Commit storage changes and the associated storage trie
|
|
|
|
s.commitStorage(op)
|
|
|
|
if len(op.storages) == 0 {
|
|
|
|
// nothing changed, don't bother to commit the trie
|
2023-07-11 08:43:23 -05:00
|
|
|
s.origin = s.data.Copy()
|
2024-06-03 06:17:12 -05:00
|
|
|
return op, nil, nil
|
2022-12-21 03:21:21 -06:00
|
|
|
}
|
2024-06-12 04:23:16 -05:00
|
|
|
root, nodes := s.trie.Commit(false)
|
2023-02-09 07:56:59 -06:00
|
|
|
s.data.Root = root
|
2023-07-11 08:43:23 -05:00
|
|
|
s.origin = s.data.Copy()
|
2024-06-03 06:17:12 -05:00
|
|
|
return op, nodes, nil
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
|
|
|
|
2020-08-19 01:54:21 -05:00
|
|
|
// AddBalance adds amount to s's balance.
|
2016-10-20 06:36:29 -05:00
|
|
|
// It is used to add funds to the destination account of a transfer.
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
// returns the previous balance
|
|
|
|
func (s *stateObject) AddBalance(amount *uint256.Int) uint256.Int {
|
2020-08-19 01:54:21 -05:00
|
|
|
// EIP161: We must check emptiness for the objects such that the account
|
2016-10-20 06:36:29 -05:00
|
|
|
// clearing (0,0,0 objects) can take effect.
|
2024-02-07 10:01:38 -06:00
|
|
|
if amount.IsZero() {
|
2019-05-25 16:52:10 -05:00
|
|
|
if s.empty() {
|
|
|
|
s.touch()
|
2016-11-24 09:24:04 -06:00
|
|
|
}
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
return *(s.Balance())
|
2016-09-26 00:25:50 -05:00
|
|
|
}
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
return s.SetBalance(new(uint256.Int).Add(s.Balance(), amount))
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
// SetBalance sets the balance for the object, and returns the previous balance.
|
|
|
|
func (s *stateObject) SetBalance(amount *uint256.Int) uint256.Int {
|
|
|
|
prev := *s.data.Balance
|
2024-01-29 03:18:46 -06:00
|
|
|
s.db.journal.balanceChange(s.address, &s.data, s.selfDestructed, s.newContract)
|
2019-05-25 16:52:10 -05:00
|
|
|
s.setBalance(amount)
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 01:03:36 -05:00
|
|
|
return prev
|
2016-10-04 05:36:02 -05:00
|
|
|
}
|
|
|
|
|
2024-01-23 07:51:58 -06:00
|
|
|
func (s *stateObject) setBalance(amount *uint256.Int) {
|
2019-05-25 16:52:10 -05:00
|
|
|
s.data.Balance = amount
|
2015-02-26 11:39:05 -06:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
|
2023-07-11 08:43:23 -05:00
|
|
|
obj := &stateObject{
|
2024-06-03 06:17:12 -05:00
|
|
|
db: db,
|
|
|
|
address: s.address,
|
|
|
|
addrHash: s.addrHash,
|
|
|
|
origin: s.origin,
|
|
|
|
data: s.data,
|
|
|
|
code: s.code,
|
|
|
|
originStorage: s.originStorage.Copy(),
|
|
|
|
pendingStorage: s.pendingStorage.Copy(),
|
|
|
|
dirtyStorage: s.dirtyStorage.Copy(),
|
|
|
|
uncommittedStorage: s.uncommittedStorage.Copy(),
|
|
|
|
dirtyCode: s.dirtyCode,
|
|
|
|
selfDestructed: s.selfDestructed,
|
|
|
|
newContract: s.newContract,
|
2023-07-11 08:43:23 -05:00
|
|
|
}
|
2019-05-25 16:52:10 -05:00
|
|
|
if s.trie != nil {
|
2024-09-05 05:10:47 -05:00
|
|
|
obj.trie = mustCopyTrie(s.trie)
|
2023-07-11 08:43:23 -05:00
|
|
|
}
|
|
|
|
return obj
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Attribute accessors
|
|
|
|
//
|
|
|
|
|
2022-10-04 01:44:05 -05:00
|
|
|
// Address returns the address of the contract/account
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) Address() common.Address {
|
|
|
|
return s.address
|
2014-07-22 04:54:48 -05:00
|
|
|
}
|
|
|
|
|
2016-09-22 14:04:58 -05:00
|
|
|
// Code returns the contract code associated with this object, if any.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) Code() []byte {
|
2024-04-24 04:59:06 -05:00
|
|
|
if len(s.code) != 0 {
|
2019-05-25 16:52:10 -05:00
|
|
|
return s.code
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
2023-02-21 05:12:27 -06:00
|
|
|
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
|
2016-09-22 14:04:58 -05:00
|
|
|
return nil
|
|
|
|
}
|
2023-07-24 05:22:09 -05:00
|
|
|
code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash()))
|
2016-09-22 14:04:58 -05:00
|
|
|
if err != nil {
|
2023-03-16 02:12:34 -05:00
|
|
|
s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
2019-05-25 16:52:10 -05:00
|
|
|
s.code = code
|
2016-09-22 14:04:58 -05:00
|
|
|
return code
|
2014-11-03 16:45:44 -06:00
|
|
|
}
|
|
|
|
|
2020-05-11 02:14:49 -05:00
|
|
|
// CodeSize returns the size of the contract code associated with this object,
|
2020-08-19 01:54:21 -05:00
|
|
|
// or zero if none. This method is an almost mirror of Code, but uses a cache
|
2020-05-11 02:14:49 -05:00
|
|
|
// inside the database to avoid loading codes seen recently.
|
2023-07-24 05:22:09 -05:00
|
|
|
func (s *stateObject) CodeSize() int {
|
2024-04-24 04:59:06 -05:00
|
|
|
if len(s.code) != 0 {
|
2020-05-11 02:14:49 -05:00
|
|
|
return len(s.code)
|
|
|
|
}
|
2023-02-21 05:12:27 -06:00
|
|
|
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
|
2020-05-11 02:14:49 -05:00
|
|
|
return 0
|
|
|
|
}
|
2023-07-24 05:22:09 -05:00
|
|
|
size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash()))
|
2020-05-11 02:14:49 -05:00
|
|
|
if err != nil {
|
2023-03-16 02:12:34 -05:00
|
|
|
s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
|
2020-05-11 02:14:49 -05:00
|
|
|
}
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
|
2024-01-29 03:18:46 -06:00
|
|
|
s.db.journal.setCode(s.address, &s.data)
|
2019-05-25 16:52:10 -05:00
|
|
|
s.setCode(codeHash, code)
|
2016-10-04 05:36:02 -05:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
|
|
|
|
s.code = code
|
|
|
|
s.data.CodeHash = codeHash[:]
|
|
|
|
s.dirtyCode = true
|
2015-02-20 07:19:34 -06:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) SetNonce(nonce uint64) {
|
2024-01-29 03:18:46 -06:00
|
|
|
s.db.journal.nonceChange(s.address, &s.data, s.selfDestructed, s.newContract)
|
2019-05-25 16:52:10 -05:00
|
|
|
s.setNonce(nonce)
|
2016-10-04 05:36:02 -05:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) setNonce(nonce uint64) {
|
|
|
|
s.data.Nonce = nonce
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) CodeHash() []byte {
|
|
|
|
return s.data.CodeHash
|
2016-09-22 14:04:58 -05:00
|
|
|
}
|
|
|
|
|
2024-01-23 07:51:58 -06:00
|
|
|
func (s *stateObject) Balance() *uint256.Int {
|
2019-05-25 16:52:10 -05:00
|
|
|
return s.data.Balance
|
2015-02-20 07:19:34 -06:00
|
|
|
}
|
|
|
|
|
2019-05-25 16:52:10 -05:00
|
|
|
func (s *stateObject) Nonce() uint64 {
|
|
|
|
return s.data.Nonce
|
2014-12-03 10:06:54 -06:00
|
|
|
}
|
2023-08-31 13:33:18 -05:00
|
|
|
|
|
|
|
func (s *stateObject) Root() common.Hash {
|
|
|
|
return s.data.Root
|
|
|
|
}
|