作者:CedarLogi
项目:go-ethereu
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
// TODO: do ethash_quick_verify before getCache in order
// to prevent DOS attacks.
var (
blockNum = block.NumberU64()
difficulty = block.Difficulty()
cache = l.getCache(blockNum)
dagSize = C.ethash_get_datasize(C.uint64_t(blockNum))
)
if l.test {
dagSize = dagSizeForTesting
}
if blockNum >= epochLength*2048 {
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
return false
}
// Recompute the hash using the cache.
hash := hashToH256(block.HashNoNonce())
ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
if !ret.success {
return false
}
// Make sure cache is live until after the C call.
// This is important because a GC might happen and execute
// the finalizer before the call completes.
_ = cache
// The actual check.
target := new(big.Int).Div(minDifficulty, difficulty)
return h256ToHash(ret.result).Big().Cmp(target) <= 0
}
作者:haegyun
项目:go-ethereu
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
// TODO: do ethash_quick_verify before getCache in order
// to prevent DOS attacks.
blockNum := block.NumberU64()
if blockNum >= epochLength*2048 {
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
return false
}
difficulty := block.Difficulty()
/* Cannot happen if block header diff is validated prior to PoW, but can
happen if PoW is checked first due to parallel PoW checking.
We could check the minimum valid difficulty but for SoC we avoid (duplicating)
Ethereum protocol consensus rules here which are not in scope of Ethash
*/
if difficulty.Cmp(common.Big0) == 0 {
glog.V(logger.Debug).Infof("invalid block difficulty")
return false
}
cache := l.getCache(blockNum)
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
if l.test {
dagSize = dagSizeForTesting
}
// Recompute the hash using the cache.
hash := hashToH256(block.HashNoNonce())
ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
if !ret.success {
return false
}
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
if block.MixDigest() != h256ToHash(ret.mix_hash) {
return false
}
// Make sure cache is live until after the C call.
// This is important because a GC might happen and execute
// the finalizer before the call completes.
_ = cache
// The actual check.
target := new(big.Int).Div(minDifficulty, difficulty)
return h256ToHash(ret.result).Big().Cmp(target) <= 0
}
作者:efaysa
项目:etherapi
// See [2]. We basically do the same here, but the Go OpenCL bindings
// are at a slightly higher abtraction level.
func InitCL(blockNum uint64, c *OpenCLMiner) error {
platforms, err := cl.GetPlatforms()
if err != nil {
return fmt.Errorf("Plaform error: %v\nCheck your OpenCL installation and then run geth gpuinfo", err)
}
var devices []*cl.Device
for _, p := range platforms {
ds, err := cl.GetDevices(p, cl.DeviceTypeGPU)
if err != nil {
return fmt.Errorf("Devices error: %v\nCheck your GPU drivers and then run geth gpuinfo", err)
}
for _, d := range ds {
devices = append(devices, d)
}
}
pow := New()
_ = pow.getDAG(blockNum) // generates DAG if we don't have it
pow.Light.getCache(blockNum) // and cache
c.ethash = pow
dagSize := uint64(C.ethash_get_datasize(C.uint64_t(blockNum)))
c.dagSize = dagSize
for _, id := range c.deviceIds {
if id > len(devices)-1 {
return fmt.Errorf("Device id not found. See available device ids with: geth gpuinfo")
} else {
err := initCLDevice(id, devices[id], c)
if err != nil {
return err
}
}
}
if len(c.devices) == 0 {
return fmt.Errorf("No GPU devices found")
}
return nil
}
作者:Codzar
项目:go-ethereu
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
// TODO: do ethash_quick_verify before getCache in order
// to prevent DOS attacks.
blockNum := block.NumberU64()
if blockNum >= epochLength*2048 {
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
return false
}
difficulty := block.Difficulty()
/* Cannot happen if block header diff is validated prior to PoW, but can
happen if PoW is checked first due to parallel PoW checking.
We could check the minimum valid difficulty but for SoC we avoid (duplicating)
Ethereum protocol consensus rules here which are not in scope of Ethash
*/
if difficulty.Cmp(common.Big0) == 0 {
glog.V(logger.Debug).Infof("invalid block difficulty")
return false
}
cache := l.getCache(blockNum)
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
if l.test {
dagSize = dagSizeForTesting
}
// Recompute the hash using the cache.
ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
if !ok {
return false
}
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
if block.MixDigest() != mixDigest {
return false
}
// The actual check.
target := new(big.Int).Div(maxUint256, difficulty)
return result.Big().Cmp(target) <= 0
}
作者:haegyun
项目:go-ethereu
// generate creates the actual DAG. it can be called from multiple
// goroutines. the first call will generate the DAG, subsequent
// calls wait until it is generated.
func (d *dag) generate() {
d.gen.Do(func() {
var (
started = time.Now()
seedHash = makeSeedHash(d.epoch)
blockNum = C.uint64_t(d.epoch * epochLength)
cacheSize = C.ethash_get_cachesize(blockNum)
dagSize = C.ethash_get_datasize(blockNum)
)
if d.test {
cacheSize = cacheSizeForTesting
dagSize = dagSizeForTesting
}
if d.dir == "" {
d.dir = DefaultDir
}
glog.V(logger.Info).Infof("Generating DAG for epoch %d (%x)", d.epoch, seedHash)
// Generate a temporary cache.
// TODO: this could share the cache with Light
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
defer C.ethash_light_delete(cache)
// Generate the actual DAG.
d.ptr = C.ethash_full_new_internal(
C.CString(d.dir),
hashToH256(seedHash),
dagSize,
cache,
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
)
if d.ptr == nil {
panic("ethash_full_new IO or memory error")
}
runtime.SetFinalizer(d, freeDAG)
glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
})
}
作者:efaysa
项目:etherapi
func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
c.mu.Lock()
newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64())))
if newDagSize > c.dagSize {
// TODO: clean up buffers from previous DAG?
err := InitCL(block.NumberU64(), c)
if err != nil {
fmt.Println("OpenCL init error: ", err)
return 0, []byte{0}
}
}
defer c.mu.Unlock()
// Avoid unneeded OpenCL initialisation if we received stop while running InitCL
select {
case <-stop:
return 0, []byte{0}
default:
}
headerHash := block.HashNoNonce()
diff := block.Difficulty()
target256 := new(big.Int).Div(maxUint256, diff)
target64 := new(big.Int).Rsh(target256, 192).Uint64()
var zero uint32 = 0
d := c.devices[index]
_, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil)
if err != nil {
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
return 0, []byte{0}
}
for i := 0; i < searchBufSize; i++ {
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil)
if err != nil {
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
return 0, []byte{0}
}
}
// wait for all search buffers to complete
err = d.queue.Finish()
if err != nil {
fmt.Println("Error in Search clFinish : ", err)
return 0, []byte{0}
}
err = d.searchKernel.SetArg(1, d.headerBuf)
if err != nil {
fmt.Println("Error in Search clSetKernelArg : ", err)
return 0, []byte{0}
}
err = d.searchKernel.SetArg(2, d.dagBuf)
if err != nil {
fmt.Println("Error in Search clSetKernelArg : ", err)
return 0, []byte{0}
}
err = d.searchKernel.SetArg(4, target64)
if err != nil {
fmt.Println("Error in Search clSetKernelArg : ", err)
return 0, []byte{0}
}
err = d.searchKernel.SetArg(5, uint32(math.MaxUint32))
if err != nil {
fmt.Println("Error in Search clSetKernelArg : ", err)
return 0, []byte{0}
}
// wait on this before returning
var preReturnEvent *cl.Event
if d.openCL12 {
preReturnEvent, err = d.ctx.CreateUserEvent()
if err != nil {
fmt.Println("Error in Search create CL user event : ", err)
return 0, []byte{0}
}
}
pending := make([]pendingSearch, 0, searchBufSize)
var p *pendingSearch
searchBufIndex := uint32(0)
var checkNonce uint64
loops := int64(0)
prevHashRate := int32(0)
start := time.Now().UnixNano()
// we grab a single random nonce and sets this as argument to the kernel search function
// the device will then add each local threads gid to the nonce, creating a unique nonce
// for each device computing unit executing in parallel
initNonce := uint64(d.nonceRand.Int63())
for nonce := initNonce; ; nonce += uint64(globalWorkSize) {
select {
case <-stop:
/*
if d.openCL12 {
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
//.........这里部分代码省略.........