Fixed duplicate shares on vardiff difficulty update. Added more comments.
This commit is contained in:
parent
897dadbf11
commit
0dcccbd56e
15
lib/pool.js
15
lib/pool.js
@ -54,20 +54,21 @@ var pool = module.exports = function pool(options, authorizeFn){
|
||||
client.sendDifficulty(newDiff);
|
||||
|
||||
/*
|
||||
Disabled this line of code as it will because it will force the miner
|
||||
to restart and submit duplicate shares. The stratum-python sends out a
|
||||
Disabled this line of code because it will force the miner
|
||||
to restart and submit duplicate shares. Stratum-python sends out a
|
||||
mining.notify but rolls the jobID and sets "clean jobs" to false.
|
||||
Meaning that the worker will only start on the new work once it
|
||||
exhausts its current nonce range. But if the miner were to start the
|
||||
new job, the shares would be invalidated since stratum-python doesn't
|
||||
insert the new jobID that the share-limiter generated into the jobs
|
||||
array. Also, since the new work is only sent with a new jobID but with
|
||||
the same extranonce as the last job, the shares will be duplicate
|
||||
anyway. Perhaps this bug has gone unnoticed because of how likely it
|
||||
is for a miner to exhaust the nonce range before new work is sent.
|
||||
array. Even worse, since the new work is only sent with a new jobID
|
||||
but with the same extranonce and other job parameters as the last job,
|
||||
the shares will be duplicate. Perhaps this bug has gone unnoticed
|
||||
because of how likely it is for a miner to exhaust the nonce range
|
||||
before new work is sent.
|
||||
|
||||
So lets only send a new difficulty, and the worker will use it when
|
||||
it receives a new job when the block template updates.
|
||||
it receives a new job from when the block template updates.
|
||||
*/
|
||||
//client.sendMiningJob(_this.jobManager.currentJob.getJobParams());
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user