go-ethereum/vendor/github.com/ethereum/ethash/ethash.go

442 lines
13 KiB
Go
Raw Normal View History

// Copyright 2015 The go-ethereum Authors
// Copyright 2015 Lefteris Karapetsas <lefteris@refu.co>
// Copyright 2015 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-02-28 13:58:37 -06:00
package ethash
/*
2015-05-05 00:50:04 -05:00
#include "src/libethash/internal.h"
int ethashGoCallback_cgo(unsigned);
2015-02-28 13:58:37 -06:00
*/
import "C"
import (
2015-05-05 00:50:04 -05:00
"errors"
2015-03-14 10:42:05 -05:00
"fmt"
"io/ioutil"
2015-02-28 13:58:37 -06:00
"math/big"
"math/rand"
"os"
2015-05-05 00:50:04 -05:00
"os/user"
"path/filepath"
"runtime"
2015-02-28 13:58:37 -06:00
"sync"
"sync/atomic"
2015-02-28 13:58:37 -06:00
"time"
"unsafe"
2015-03-16 05:27:38 -05:00
"github.com/ethereum/go-ethereum/common"
2015-03-20 05:19:12 -05:00
"github.com/ethereum/go-ethereum/crypto"
2015-02-28 13:58:37 -06:00
"github.com/ethereum/go-ethereum/logger"
2015-04-07 06:17:27 -05:00
"github.com/ethereum/go-ethereum/logger/glog"
2015-02-28 13:58:37 -06:00
"github.com/ethereum/go-ethereum/pow"
)
2015-05-05 00:50:04 -05:00
var (
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
sharedLight = new(Light)
2015-05-05 00:50:04 -05:00
)
2015-02-28 13:58:37 -06:00
2015-05-05 00:50:04 -05:00
const (
epochLength uint64 = 30000
cacheSizeForTesting C.uint64_t = 1024
dagSizeForTesting C.uint64_t = 1024 * 32
)
2015-02-28 13:58:37 -06:00
2015-05-05 00:50:04 -05:00
var DefaultDir = defaultDir()
2015-02-28 13:58:37 -06:00
2015-05-05 00:50:04 -05:00
func defaultDir() string {
home := os.Getenv("HOME")
if user, err := user.Current(); err == nil {
home = user.HomeDir
}
2015-05-05 00:50:04 -05:00
if runtime.GOOS == "windows" {
return filepath.Join(home, "AppData", "Ethash")
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
return filepath.Join(home, ".ethash")
}
2015-04-07 06:17:27 -05:00
2015-05-05 00:50:04 -05:00
// cache wraps an ethash_light_t with some metadata
// and automatic memory management.
type cache struct {
epoch uint64
used time.Time
2015-05-05 00:50:04 -05:00
test bool
2015-05-05 00:50:04 -05:00
gen sync.Once // ensures cache is only generated once.
ptr *C.struct_ethash_light
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
// generate creates the actual cache. it can be called from multiple
// goroutines. the first call will generate the cache, subsequent
// calls wait until it is generated.
func (cache *cache) generate() {
cache.gen.Do(func() {
started := time.Now()
seedHash := makeSeedHash(cache.epoch)
glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
if cache.test {
size = cacheSizeForTesting
2015-03-14 10:42:05 -05:00
}
2015-05-05 00:50:04 -05:00
cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
runtime.SetFinalizer(cache, freeCache)
glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
})
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
func freeCache(cache *cache) {
C.ethash_light_delete(cache.ptr)
cache.ptr = nil
}
2015-03-14 10:42:05 -05:00
2016-04-11 09:14:32 -05:00
func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
// Make sure cache is live until after the C call.
// This is important because a GC might happen and execute
// the finalizer before the call completes.
_ = cache
return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
}
// Light implements the Verify half of the proof of work. It uses a few small
// in-memory caches to verify the nonces found by Full.
2015-05-05 00:50:04 -05:00
type Light struct {
test bool // If set, use a smaller cache size
mu sync.Mutex // Protects the per-epoch map of verification caches
caches map[uint64]*cache // Currently maintained verification caches
future *cache // Pre-generated cache for the estimated future DAG
NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify)
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
// TODO: do ethash_quick_verify before getCache in order
// to prevent DOS attacks.
2015-06-24 00:17:21 -05:00
blockNum := block.NumberU64()
2015-05-05 00:50:04 -05:00
if blockNum >= epochLength*2048 {
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
return false
}
2015-06-24 00:17:21 -05:00
difficulty := block.Difficulty()
/* Cannot happen if block header diff is validated prior to PoW, but can
happen if PoW is checked first due to parallel PoW checking.
We could check the minimum valid difficulty but for SoC we avoid (duplicating)
Ethereum protocol consensus rules here which are not in scope of Ethash
*/
if difficulty.Cmp(common.Big0) == 0 {
glog.V(logger.Debug).Infof("invalid block difficulty")
return false
}
cache := l.getCache(blockNum)
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
if l.test {
dagSize = dagSizeForTesting
}
2015-05-05 00:50:04 -05:00
// Recompute the hash using the cache.
2016-04-11 09:14:32 -05:00
ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
if !ok {
2015-05-05 00:50:04 -05:00
return false
}
2015-06-16 05:06:06 -05:00
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
2016-04-11 09:14:32 -05:00
if block.MixDigest() != mixDigest {
2015-06-16 05:06:06 -05:00
return false
}
2015-05-05 00:50:04 -05:00
// The actual check.
target := new(big.Int).Div(maxUint256, difficulty)
2016-04-11 09:14:32 -05:00
return result.Big().Cmp(target) <= 0
2015-05-05 00:50:04 -05:00
}
2015-05-06 10:38:28 -05:00
func h256ToHash(in C.ethash_h256_t) common.Hash {
2015-05-05 00:50:04 -05:00
return *(*common.Hash)(unsafe.Pointer(&in.b))
}
2015-05-06 10:38:28 -05:00
func hashToH256(in common.Hash) C.ethash_h256_t {
return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
}
2015-05-05 00:50:04 -05:00
func (l *Light) getCache(blockNum uint64) *cache {
var c *cache
epoch := blockNum / epochLength
// If we have a PoW for that epoch, use that
2015-05-05 00:50:04 -05:00
l.mu.Lock()
if l.caches == nil {
l.caches = make(map[uint64]*cache)
}
if l.NumCaches == 0 {
l.NumCaches = 3
}
c = l.caches[epoch]
if c == nil {
// No cached DAG, evict the oldest if the cache limit was reached
if len(l.caches) >= l.NumCaches {
var evict *cache
for _, cache := range l.caches {
if evict == nil || evict.used.After(cache.used) {
evict = cache
}
}
glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch)
delete(l.caches, evict.epoch)
}
// If we have the new DAG pre-generated, use that, otherwise create a new one
if l.future != nil && l.future.epoch == epoch {
glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch)
c, l.future = l.future, nil
} else {
glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch)
c = &cache{epoch: epoch, test: l.test}
}
l.caches[epoch] = c
// If we just used up the future cache, or need a refresh, regenerate
if l.future == nil || l.future.epoch <= epoch {
glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1)
l.future = &cache{epoch: epoch + 1, test: l.test}
go l.future.generate()
}
2015-03-14 10:42:05 -05:00
}
c.used = time.Now()
2015-05-05 00:50:04 -05:00
l.mu.Unlock()
// Wait for generation finish and return the cache
2015-05-05 00:50:04 -05:00
c.generate()
return c
}
2015-02-28 13:58:37 -06:00
2015-05-05 00:50:04 -05:00
// dag wraps an ethash_full_t with some metadata
// and automatic memory management.
type dag struct {
epoch uint64
test bool
dir string
2015-05-05 00:50:04 -05:00
gen sync.Once // ensures DAG is only generated once.
ptr *C.struct_ethash_full
}
2015-03-14 10:42:05 -05:00
2015-05-05 00:50:04 -05:00
// generate creates the actual DAG. it can be called from multiple
// goroutines. the first call will generate the DAG, subsequent
// calls wait until it is generated.
func (d *dag) generate() {
d.gen.Do(func() {
var (
started = time.Now()
seedHash = makeSeedHash(d.epoch)
blockNum = C.uint64_t(d.epoch * epochLength)
cacheSize = C.ethash_get_cachesize(blockNum)
dagSize = C.ethash_get_datasize(blockNum)
)
if d.test {
cacheSize = cacheSizeForTesting
dagSize = dagSizeForTesting
2015-03-14 10:42:05 -05:00
}
2015-05-05 00:50:04 -05:00
if d.dir == "" {
d.dir = DefaultDir
}
glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash)
2015-05-05 00:50:04 -05:00
// Generate a temporary cache.
// TODO: this could share the cache with Light
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
defer C.ethash_light_delete(cache)
// Generate the actual DAG.
d.ptr = C.ethash_full_new_internal(
C.CString(d.dir),
hashToH256(seedHash),
dagSize,
cache,
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
)
if d.ptr == nil {
panic("ethash_full_new IO or memory error")
}
runtime.SetFinalizer(d, freeDAG)
glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
})
2015-02-28 13:58:37 -06:00
}
func freeDAG(d *dag) {
C.ethash_full_delete(d.ptr)
d.ptr = nil
}
func (d *dag) Ptr() unsafe.Pointer {
return unsafe.Pointer(d.ptr.data)
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
//export ethashGoCallback
func ethashGoCallback(percent C.unsigned) C.int {
glog.V(logger.Info).Infof("Generating DAG: %d%%", percent)
2015-05-05 00:50:04 -05:00
return 0
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
// MakeDAG pre-generates a DAG file for the given block number in the
// given directory. If dir is the empty string, the default directory
// is used.
func MakeDAG(blockNum uint64, dir string) error {
d := &dag{epoch: blockNum / epochLength, dir: dir}
2015-03-14 10:42:05 -05:00
if blockNum >= epochLength*2048 {
2015-05-05 00:50:04 -05:00
return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
2015-03-14 10:42:05 -05:00
}
2015-05-05 00:50:04 -05:00
d.generate()
if d.ptr == nil {
return errors.New("failed")
2015-03-14 10:42:05 -05:00
}
2015-05-05 00:50:04 -05:00
return nil
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
// Full implements the Search half of the proof of work.
type Full struct {
Dir string // use this to specify a non-default DAG directory
2015-05-05 00:50:04 -05:00
test bool // if set use a smaller DAG size
turbo bool
hashRate int32
2015-05-05 00:50:04 -05:00
mu sync.Mutex // protects dag
current *dag // current full DAG
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
func (pow *Full) getDAG(blockNum uint64) (d *dag) {
epoch := blockNum / epochLength
pow.mu.Lock()
if pow.current != nil && pow.current.epoch == epoch {
d = pow.current
} else {
d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
pow.current = d
}
pow.mu.Unlock()
// wait for it to finish generating.
d.generate()
return d
}
2015-02-28 13:58:37 -06:00
func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) {
2015-05-05 00:50:04 -05:00
dag := pow.getDAG(block.NumberU64())
2015-02-28 13:58:37 -06:00
r := rand.New(rand.NewSource(time.Now().UnixNano()))
diff := block.Difficulty()
2015-02-28 13:58:37 -06:00
i := int64(0)
starti := i
2015-02-28 13:58:37 -06:00
start := time.Now().UnixNano()
previousHashrate := int32(0)
2015-02-28 13:58:37 -06:00
2015-05-05 00:50:04 -05:00
nonce = uint64(r.Int63())
hash := hashToH256(block.HashNoNonce())
target := new(big.Int).Div(maxUint256, diff)
2015-02-28 13:58:37 -06:00
for {
select {
case <-stop:
atomic.AddInt32(&pow.hashRate, -previousHashrate)
2015-05-05 00:50:04 -05:00
return 0, nil
2015-02-28 13:58:37 -06:00
default:
i++
// we don't have to update hash rate on every nonce, so update after
// first nonce check and then after 2^X nonces
if i == 2 || ((i % (1 << 16)) == 0) {
elapsed := time.Now().UnixNano() - start
hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
hashrateDiff := int32(hashes) - previousHashrate
previousHashrate = int32(hashes)
atomic.AddInt32(&pow.hashRate, hashrateDiff)
}
2015-02-28 13:58:37 -06:00
2015-05-05 00:50:04 -05:00
ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
result := h256ToHash(ret.result).Big()
2015-03-14 10:42:05 -05:00
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
2015-05-05 00:50:04 -05:00
if ret.success && result.Cmp(target) <= 0 {
mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
atomic.AddInt32(&pow.hashRate, -previousHashrate)
2015-05-05 00:50:04 -05:00
return nonce, mixDigest
2015-02-28 13:58:37 -06:00
}
nonce += 1
}
if !pow.turbo {
time.Sleep(20 * time.Microsecond)
}
}
}
2015-05-05 00:50:04 -05:00
func (pow *Full) GetHashrate() int64 {
return int64(atomic.LoadInt32(&pow.hashRate))
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
func (pow *Full) Turbo(on bool) {
// TODO: this needs to use an atomic operation.
pow.turbo = on
}
2015-05-05 00:50:04 -05:00
// Ethash combines block verification with Light and
// nonce searching with Full into a single proof of work.
type Ethash struct {
*Light
*Full
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
// New creates an instance of the proof of work.
func New() *Ethash {
return &Ethash{new(Light), &Full{turbo: true}}
}
// NewShared creates an instance of the proof of work., where a single instance
// of the Light cache is shared across all instances created with NewShared.
func NewShared() *Ethash {
2015-05-05 00:50:04 -05:00
return &Ethash{sharedLight, &Full{turbo: true}}
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
// NewForTesting creates a proof of work for use in unit tests.
// It uses a smaller DAG and cache size to keep test times low.
// DAG files are stored in a temporary directory.
//
// Nonces found by a testing instance are not verifiable with a
// regular-size cache.
func NewForTesting() (*Ethash, error) {
dir, err := ioutil.TempDir("", "ethash-test")
if err != nil {
return nil, err
}
return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
func GetSeedHash(blockNum uint64) ([]byte, error) {
if blockNum >= epochLength*2048 {
return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
}
sh := makeSeedHash(blockNum / epochLength)
return sh[:], nil
2015-02-28 13:58:37 -06:00
}
2015-05-05 00:50:04 -05:00
func makeSeedHash(epoch uint64) (sh common.Hash) {
for ; epoch > 0; epoch-- {
sh = crypto.Sha3Hash(sh[:])
}
return sh
2015-02-28 13:58:37 -06:00
}