This commit is contained in:
Chris Kleeschulte 2017-07-17 13:26:08 -04:00
parent e1b8c583d8
commit eb39e5bf19
2 changed files with 48 additions and 13 deletions

View File

@ -26,6 +26,10 @@ var BlockService = function(options) {
this._subscriptions.block = [];
this._subscriptions.reorg = [];
//memory
this._maxMem = options.maxMemory || 1500; // in MB
this._deferTimeout = null;
// meta is [{ chainwork: chainwork, hash: hash }]
this._meta = [];
@ -198,6 +202,9 @@ BlockService.prototype.start = function(callback) {
};
BlockService.prototype.stop = function(callback) {
if (this._deferTimeout) {
this._deferTimeout.unref();
}
callback();
};
@ -663,23 +670,44 @@ BlockService.prototype._sendDelta = function() {
if (++this._blockCount >= BlockService.MAX_BLOCKS) {
this._latestBlockHash = this._tip.hash;
this._blockCount = 0;
// this is where we ask the p2p network for more block
var self = this;
// we need a pause here before asking for more blocks so that the db work write queue,
// does not get too large and cause a memory issue
// the larger the difference between network speeds and disk i/o speeds should result in
// larger waits, e.g. gigabit internet speeds and a usb 2.0 5400 rpm hard disk might 10-20
// between each 500 block chunk
setTimeout(function() {
self._sync();
}, 10000);
this._continueSync();
}
};
BlockService.prototype._continueSync = function() {
/*
Essentially, we have three distinct tasks taking place during a sync operation
1. receiving network data
2. light processing of that data (data manipulation)
3. writing that data to disk
Our goal: Sync as quickly as our least performant task will allow using a modest amount of memory.
If our network is very fast and our disk is very slow and we have very little memory to work with,
this is the worst case scenario. We want to defer asking the p2p service for more blocks until the
disk has time to process all the queued everts.
Our way of dealing with this is to review the resident set size and defer the next call to sync if
we are in danger of a 'JS Object Allocation Failed - Out of Memory' fault.
You can use the "maxMemory" config setting for this service to limit the amount of memory considered.
The default is 1.5GB of memory, max.
If we are over 95% of max memory usage, we will defer the sync until we are 95% or less.
*/
var self = this;
var totalRss = process.memoryUsage().rss;
if ((totalRss / 1E6) / self._maxMem > 0.95) {
self._deferTimeout = setTimeout(function() {
self._continueSync();
}, 1000);
return;
}
self._sync();
};
BlockService.prototype._setListeners = function() {
this._p2p.once('bestHeight', this._onBestHeight.bind(this));

View File

@ -37,6 +37,7 @@ function DB(options) {
this.subscriptions = {};
this._operationsCount = 0;
this.GENESIS_HASH = constants.BITCOIN_GENESIS_HASH[this.node.getNetworkName()];
this.node.on('stopping', function() {
log.warn('Node is stopping, gently closing the database.');
@ -158,8 +159,10 @@ DB.prototype.put = function(key, value, options) {
return;
}
self._operationsCount++;
self._store.put(key, value, options, function(err) {
self._operationsCount--;
if (err) {
self.emit('error', err);
return;
@ -176,8 +179,11 @@ DB.prototype.batch = function(ops, options) {
return;
}
self._operationsCount += ops.length;
self._store.batch(ops, options, function(err) {
self._operationsCount -= ops.length;
if (err) {
self.emit('error', err);
return;
@ -212,6 +218,7 @@ DB.prototype.createKeyStream = function(op) {
DB.prototype.stop = function(callback) {
var self = this;
self._stopping = true;
self.close(callback);
};