Merge pull request #703 from braydonf/blockstore

Add file block storage
This commit is contained in:
Braydon Fuller 2019-04-12 10:55:27 -07:00
commit d601b6a303
No known key found for this signature in database
GPG Key ID: F24F232D108B3AD4
22 changed files with 3321 additions and 31 deletions

View File

@ -1,6 +1,26 @@
# Bcoin release notes & changelog
## v2.0.0
## v2.0.0-dev
### How to upgrade
The way that block data is stored has changed for greater performance,
efficiency, reliability and portability. To upgrade to the new disk layout
it's necessary to move block data from LevelDB (e.g. `~/.bcoin/chain`) to
a new file based block storage (e.g. `~./.bcoin/blocks`).
To do this you can run:
```
node ./migrate/chaindb4to5.js /path/to/bcoin/chain
```
The migration will take 1-3 hours, depending on hardware. The block data
will now be stored at `/path/to/bcoin/blocks`, after the data has been moved
the chain database will be compacted to free disk space.
Alternatively, you can also sync the chain again, however the above
migration will be faster as additional network bandwidth won't be used
for downloading the blocks again.
### Wallet API changes
@ -79,6 +99,7 @@
### Other changes
- A new module for storing block data in files.
- Use of `buffer-map` for storing hashes
(see https://github.com/bcoin-org/bcoin/issues/533).
- Use of `bsert` for assertions.

476
bench/blockstore.js Normal file
View File

@ -0,0 +1,476 @@
/*!
* bench/blockstore.js - benchmark blockstore for bcoin
*
* This can be run to benchmark the performance of the blockstore
* module for writing, reading and pruning block data. Results are
* written to stdout as JSON or formated bench results.
*
* Usage:
* node ./blockstore.js [--maxfile=<bytes>] [--total=<bytes>]
* [--location=<path>] [--store=<name>]
* [--output=<name>] [--unsafe]
*
* Options:
* - `maxfile` The maximum file size (applies to "file" store).
* - `total` The total number of block bytes to write.
* - `location` The location to store block data.
* - `store` This can be "file" or "level".
* - `output` This can be "json", "bench" or "benchjson".
* - `unsafe` This will allocate block data directly from memory
* instead of random, it is faster.
*
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
process.title = 'blockstore-bench';
const {isAbsolute} = require('path');
const {mkdirp} = require('bfile');
const random = require('bcrypto/lib/random');
const {BufferMap} = require('buffer-map');
const {
FileBlockStore,
LevelBlockStore
} = require('../lib/blockstore');
const config = {
'maxfile': {
value: true,
parse: a => parseInt(a),
valid: a => Number.isSafeInteger(a),
fallback: 128 * 1024 * 1024
},
'total': {
value: true,
parse: a => parseInt(a),
valid: a => Number.isSafeInteger(a),
fallback: 3 * 1024 * 1024 * 1024
},
'location': {
value: true,
valid: a => isAbsolute(a),
fallback: '/tmp/bcoin-bench-blockstore'
},
'store': {
value: true,
valid: a => (a === 'file' || a === 'level'),
fallback: 'file'
},
'output': {
value: true,
valid: a => (a === 'json' || a === 'bench' || a === 'benchjson'),
fallback: 'bench'
},
'unsafe': {
value: false,
valid: a => (a === true || a === false),
fallback: false
}
};
/**
* These block sizes were generated from bitcoin mainnet blocks by putting
* sizes into bins of 256 ^ (2 * n) as the upper bound and calculating
* the percentage of each and then distributing to roughly match the
* percentage of the following:
*
* |-------------|------------|
* | percentage | bytes |
* |-------------|------------|
* | 23.4055 | 1048576 |
* | 15.5338 | 256 |
* | 12.2182 | 262144 |
* | 8.4079 | 524288 |
* | 7.1289 | 131072 |
* | 6.9197 | 65536 |
* | 6.7073 | 2097152 |
* | 4.6753 | 32768 |
* | 3.9695 | 4096 |
* | 3.3885 | 16384 |
* | 2.6526 | 8192 |
* | 2.0048 | 512 |
* | 1.587 | 1024 |
* | 1.3976 | 2048 |
* | 0.0032 | 4194304 |
* |-------------|------------|
*/
const distribution = [
1048576, 256, 256, 524288, 262144, 256, 131072, 256, 524288, 256, 131072,
1048576, 262144, 1048576, 2097152, 256, 1048576, 65536, 256, 262144, 8192,
32768, 32768, 256, 1048576, 524288, 2097152, 1024, 1048576, 1048576, 131072,
131072, 262144, 512, 1048576, 1048576, 1024, 1048576, 1048576, 262144, 2048,
262144, 256, 1048576, 131072, 4096, 524288, 65536, 4096, 65536, 131072,
2097152, 2097152, 2097152, 256, 524288, 4096, 262144, 65536, 65536, 262144,
16384, 1048576, 32768, 262144, 1048576, 256, 131072, 1048576, 1048576,
1048576, 8192, 1048576, 256, 16384, 1048576, 256, 256, 524288, 256, 32768,
16384, 32768, 1048576, 512, 4096, 1048576, 1048576, 524288, 65536, 2097152,
512, 262144, 8192, 524288, 131072, 65536, 16384, 2048, 262144, 1048576,
1048576, 256, 524288, 262144, 4194304, 262144, 2097152
];
(async () => {
let settings = null;
try {
settings = processArgs(process.argv, config);
} catch (err) {
console.log(err.message);
process.exit(1);
}
await mkdirp(settings.location);
let store = null;
let output = null;
if (settings.store === 'file') {
store = new FileBlockStore({
location: settings.location,
maxFileLength: settings.maxfile
});
} else if (settings.store === 'level') {
store = new LevelBlockStore({
location: settings.location
});
}
if (settings.output === 'bench') {
output = new BenchOutput();
} else if (settings.output === 'benchjson') {
output = new BenchJSONOutput();
} else if (settings.output === 'json') {
output = new JSONOutput();
}
await store.open();
const hashes = [];
const lengths = new BufferMap();
output.start();
// 1. Write data to the block store
let written = 0;
async function write() {
for (const length of distribution) {
const hash = random.randomBytes(32);
let raw = null;
if (settings.unsafe) {
raw = Buffer.allocUnsafe(length);
} else {
raw = random.randomBytes(length);
}
const start = process.hrtime();
await store.write(hash, raw);
const elapsed = process.hrtime(start);
hashes.push(hash);
lengths.set(hash, length);
written += length;
output.result('write', start, elapsed, length);
if (written >= settings.total)
break;
}
}
while (written < settings.total)
await write();
// 2. Read data from the block store
for (const hash of hashes) {
const start = process.hrtime();
const raw = await store.read(hash);
const elapsed = process.hrtime(start);
output.result('read', start, elapsed, raw.length);
}
// 3. Read data not in the order it was written (random)
for (let i = 0; i < hashes.length; i++) {
const rand = random.randomInt() / 0xffffffff * (hashes.length - 1) | 0;
const hash = hashes[rand];
const start = process.hrtime();
const raw = await store.read(hash);
const elapsed = process.hrtime(start);
output.result('randomread', start, elapsed, raw.length);
}
// 4. Prune data from the block store
for (const hash of hashes) {
const start = process.hrtime();
await store.prune(hash);
const elapsed = process.hrtime(start);
const length = lengths.get(hash);
output.result('prune', start, elapsed, length);
}
output.end();
await store.close();
})().catch((err) => {
console.error(err);
process.exit(1);
});
class JSONOutput {
constructor() {
this.time = process.hrtime();
this.index = 0;
}
start() {
process.stdout.write('[');
}
result(type, start, elapsed, length) {
if (this.index > 0)
process.stdout.write(',');
const since = [start[0] - this.time[0], start[1] - this.time[1]];
const smicro = hrToMicro(since);
const emicro = hrToMicro(elapsed);
process.stdout.write(`{"type":"${type}","start":${smicro},`);
process.stdout.write(`"elapsed":${emicro},"length":${length},`);
process.stdout.write(`"index":${this.index}}`);
this.index += 1;
}
end() {
process.stdout.write(']');
}
}
class BenchOutput {
constructor() {
this.time = process.hrtime();
this.index = 0;
this.results = {};
this.interval = null;
this.stdout = process.stdout;
}
start() {
this.stdout.write('Starting benchmark...\n');
this.interval = setInterval(() => {
this.stdout.write(`Operation count=${this.index}\n`);
}, 5000);
}
result(type, start, elapsed, length) {
const micro = hrToMicro(elapsed);
if (!this.results[type])
this.results[type] = {};
if (!this.results[type][length])
this.results[type][length] = [];
this.results[type][length].push(micro);
this.index += 1;
}
end() {
clearInterval(this.interval);
this.stdout.write('Benchmark finished.\n');
function format(value) {
if (typeof value === 'number')
value = value.toFixed(2);
if (typeof value !== 'string')
value = value.toString();
while (value.length < 15)
value = `${value} `;
return value;
}
function title(value) {
if (typeof value !== 'string')
value = value.toString();
while (value.length < 85)
value = ` ${value} `;
if (value.length > 85)
value = value.slice(0, 85);
return value;
}
for (const type in this.results) {
this.stdout.write('\n');
this.stdout.write(`${title(type)}\n`);
this.stdout.write(`${'='.repeat(85)}\n`);
this.stdout.write(`${format('length')}`);
this.stdout.write(`${format('operations')}`);
this.stdout.write(`${format('min')}`);
this.stdout.write(`${format('max')}`);
this.stdout.write(`${format('average')}`);
this.stdout.write(`${format('median')}`);
this.stdout.write('\n');
this.stdout.write(`${'-'.repeat(85)}\n`);
for (const length in this.results[type]) {
const cal = calculate(this.results[type][length]);
this.stdout.write(`${format(length)}`);
this.stdout.write(`${format(cal.operations.toString())}`);
this.stdout.write(`${format(cal.min)}`);
this.stdout.write(`${format(cal.max)}`);
this.stdout.write(`${format(cal.average)}`);
this.stdout.write(`${format(cal.median)}`);
this.stdout.write('\n');
}
this.stdout.write('\n');
}
this.stdout.write('\n');
}
}
class BenchJSONOutput {
constructor() {
this.time = null;
this.results = {};
this.stdout = process.stdout;
}
start() {
this.time = process.hrtime();
}
result(type, start, elapsed, length) {
const micro = hrToMicro(elapsed);
if (!this.results[type])
this.results[type] = {};
if (!this.results[type][length])
this.results[type][length] = [];
this.results[type][length].push(micro);
}
end() {
const report = {
summary: [],
time: hrToMicro(process.hrtime(this.time)),
elapsed: 0
};
for (const type in this.results) {
for (const length in this.results[type]) {
const cal = calculate(this.results[type][length]);
report.elapsed += cal.total;
report.summary.push({
type: type,
length: length,
operations: cal.operations,
min: cal.min,
max: cal.max,
average: cal.average,
median: cal.median
});
}
}
this.stdout.write(JSON.stringify(report, null, 2));
this.stdout.write('\n');
}
}
function hrToMicro(time) {
return (time[0] * 1000000) + (time[1] / 1000);
}
function calculate(times) {
times.sort((a, b) => a - b);
let min = Infinity;
let max = 0;
let total = 0;
for (const micro of times) {
if (micro < min)
min = micro;
if (micro > max)
max = micro;
total += micro;
}
const average = total / times.length;
const median = times[times.length / 2 | 0];
const operations = times.length;
return {
total,
operations,
min,
max,
average,
median
};
}
function processArgs(argv, config) {
const args = {};
for (const key in config)
args[key] = config[key].fallback;
for (let i = 2; i < process.argv.length; i++) {
const arg = process.argv[i];
const match = arg.match(/^(\-){1,2}([a-z]+)(\=)?(.*)?$/);
if (!match) {
throw new Error(`Unexpected argument: ${arg}.`);
} else {
const key = match[2];
let value = match[4];
if (!config[key])
throw new Error(`Invalid argument: ${arg}.`);
if (config[key].value && !value) {
value = process.argv[i + 1];
i++;
} else if (!config[key].value && !value) {
value = true;
} else if (!config[key].value && value) {
throw new Error(`Unexpected value: ${key}=${value}`);
}
if (config[key].parse)
value = config[key].parse(value);
if (value)
args[key] = value;
if (!config[key].valid(args[key]))
throw new Error(`Invalid value: ${key}=${value}`);
}
}
return args;
}

View File

@ -55,6 +55,7 @@ bcoin.set = function set(network) {
// Blockchain
bcoin.define('blockchain', './blockchain');
bcoin.define('blockstore', './blockstore');
bcoin.define('Chain', './blockchain/chain');
bcoin.define('ChainEntry', './blockchain/chainentry');

View File

@ -49,6 +49,7 @@ class Chain extends AsyncEmitter {
this.network = this.options.network;
this.logger = this.options.logger.context('chain');
this.blocks = this.options.blocks;
this.workers = this.options.workers;
this.db = new ChainDB(this.options);
@ -2662,6 +2663,7 @@ class ChainOptions {
constructor(options) {
this.network = Network.primary;
this.logger = Logger.global;
this.blocks = null;
this.workers = null;
this.prefix = null;
@ -2695,6 +2697,13 @@ class ChainOptions {
*/
fromOptions(options) {
if (!options.spv) {
assert(options.blocks && typeof options.blocks === 'object',
'Chain requires a blockstore.');
}
this.blocks = options.blocks;
if (options.network != null)
this.network = Network.get(options.network);

View File

@ -40,8 +40,10 @@ class ChainDB {
this.options = options;
this.network = this.options.network;
this.logger = this.options.logger.context('chaindb');
this.blocks = this.options.blocks;
this.db = bdb.create(this.options);
this.stateCache = new StateCache(this.network);
this.state = new ChainState();
this.pending = null;
@ -60,7 +62,7 @@ class ChainDB {
this.logger.info('Opening ChainDB...');
await this.db.open();
await this.db.verify(layout.V.encode(), 'chain', 4);
await this.db.verify(layout.V.encode(), 'chain', 5);
const state = await this.getState();
@ -101,7 +103,7 @@ class ChainDB {
* @returns {Promise}
*/
close() {
async close() {
return this.db.close();
}
@ -760,7 +762,6 @@ class ChainDB {
const start = pruneAfter + 1;
const end = height - keepBlocks;
const b = this.db.batch();
for (let i = start; i <= end; i++) {
const hash = await this.getHash(i);
@ -768,8 +769,8 @@ class ChainDB {
if (!hash)
throw new Error(`Cannot find hash for ${i}.`);
b.del(layout.b.encode(hash));
b.del(layout.u.encode(hash));
await this.blocks.pruneUndo(hash);
await this.blocks.prune(hash);
}
try {
@ -778,16 +779,12 @@ class ChainDB {
const flags = ChainFlags.fromOptions(options);
assert(flags.prune);
b.put(layout.O.encode(), flags.toRaw());
await b.write();
await this.db.put(layout.O.encode(), flags.toRaw());
} catch (e) {
options.prune = false;
throw e;
}
await this.db.compactRange();
return true;
}
@ -1014,7 +1011,7 @@ class ChainDB {
*/
async getUndoCoins(hash) {
const data = await this.db.get(layout.u.encode(hash));
const data = await this.blocks.readUndo(hash);
if (!data)
return new UndoCoins();
@ -1052,7 +1049,7 @@ class ChainDB {
if (!hash)
return null;
return this.db.get(layout.b.encode(hash));
return this.blocks.read(hash);
}
/**
@ -1464,6 +1461,10 @@ class ChainDB {
await this.commit();
// Remove undo data _after_ successful commit.
if (this.blocks)
await this.blocks.pruneUndo(entry.hash);
return view;
}
@ -1580,6 +1581,12 @@ class ChainDB {
await this.commit();
// Remove block and undo data _after_ successful commit.
if (this.blocks) {
await this.blocks.pruneUndo(tip.hash);
await this.blocks.prune(tip.hash);
}
// Update caches _after_ successful commit.
this.cacheHeight.remove(tip.height);
this.cacheHash.remove(tip.hash);
@ -1603,15 +1610,23 @@ class ChainDB {
// one giant atomic write!
this.start();
let hashes = [];
try {
for (const tip of tips)
await this._removeChain(tip);
hashes = hashes.concat(await this._removeChain(tip));
} catch (e) {
this.drop();
throw e;
}
await this.commit();
// SPV doesn't store blocks.
if (this.blocks) {
for (const hash of hashes)
await this.blocks.prune(hash);
}
}
/**
@ -1629,6 +1644,8 @@ class ChainDB {
this.logger.debug('Removing alternate chain: %h.', tip.hash);
const hashes = [];
for (;;) {
if (await this.isMainChain(tip))
break;
@ -1639,7 +1656,10 @@ class ChainDB {
this.del(layout.p.encode(tip.hash));
this.del(layout.h.encode(tip.hash));
this.del(layout.e.encode(tip.hash));
this.del(layout.b.encode(tip.hash));
// Queue block to be pruned on
// successful write.
hashes.push(tip.hash);
// Queue up hash to be removed
// on successful write.
@ -1648,6 +1668,8 @@ class ChainDB {
tip = await this.getPrevious(tip);
assert(tip);
}
return hashes;
}
/**
@ -1665,9 +1687,8 @@ class ChainDB {
if (this.options.spv)
return;
// Write actual block data (this may be
// better suited to flat files in the future).
this.put(layout.b.encode(hash), block.toRaw());
// Write actual block data.
await this.blocks.write(hash, block.toRaw());
if (!view)
return;
@ -1691,8 +1712,6 @@ class ChainDB {
if (!block)
throw new Error('Block not found.');
this.del(layout.b.encode(block.hash()));
return this.disconnectBlock(entry, block);
}
@ -1762,7 +1781,7 @@ class ChainDB {
// Write undo coins (if there are any).
if (!view.undo.isEmpty())
this.put(layout.u.encode(hash), view.undo.commit());
await this.blocks.writeUndo(hash, view.undo.commit());
// Prune height-288 if pruning is enabled.
return this.pruneBlock(entry);
@ -1820,9 +1839,6 @@ class ChainDB {
// Commit new coin state.
this.saveView(view);
// Remove undo coins.
this.del(layout.u.encode(hash));
return view;
}
@ -1851,8 +1867,8 @@ class ChainDB {
if (!hash)
return;
this.del(layout.b.encode(hash));
this.del(layout.u.encode(hash));
await this.blocks.pruneUndo(hash);
await this.blocks.prune(hash);
}
/**

View File

@ -19,10 +19,10 @@ const bdb = require('bdb');
* H[height] -> hash
* n[hash] -> next hash
* p[hash] -> tip index
* b[hash] -> block
* b[hash] -> block (deprecated)
* t[hash] -> extended tx
* c[hash] -> coins
* u[hash] -> undo coins
* u[hash] -> undo coins (deprecated)
* v[bit][hash] -> versionbits state
* T[addr-hash][hash] -> dummy (tx by address)
* C[addr-hash][hash][index] -> dummy (coin by address)

147
lib/blockstore/README.md Normal file
View File

@ -0,0 +1,147 @@
# BlockStore
BlockStore `lib/blockstore` is a bcoin module intended to be used as a backend
for storing block and undo coin data. It includes a backend that uses flat
files for storage. Its key benefit is performance improvements across the
board in disk I/O, which is the major bottleneck for the initial block sync.
Blocks are stored in wire format directly to the disk, while some additional
metadata is stored in a key-value store, i.e. LevelDB, to help with the data
management. Both the flat files and the metadata db, are exposed through a
unified interace so that the users can simply read and write blocks without
having to worry about managing data layout on the disk.
In addition to blocks, undo coin data, which is used to revert the changes
applied by a block (in case of a re-org), is also stored on disk, in a similar
fashion.
## Interface
The `AbstractBlockStore` interface defines the following abstract methods to be
defined by concrete implementations:
### Basic housekeeping
* `ensure()`
* `open()`
* `close()`
### Block I/O
* `read(hash, offset, size)`
* `write(hash, data)`
* `prune(hash)`
* `has(hash)`
### Undo Coins I/O
* `readUndo(hash)`
* `writeUndo(hash, data)`
* `pruneUndo(hash)`
* `hasUndo(hash)`
The interface is implemented by `FileBlockStore` and `LevelBlockStore`, backed
by flat files and LevelDB respectively. We will focus here on the
`FileBlockStore`, which is the backend that implements a flat file based
storage.
## FileBlockStore
`FileBlockStore` implements the flat file backend for `AbstractBlockStore`. As
the name suggests, it uses flat files for block/undo data and LevelDB for
metadata.
Let's create a file blockstore, write a block and walk-through the disk storage:
```js
// nodejs
const store = blockstore.create({
network: 'regtest',
prefix: '/tmp/blockstore'
});
await store.ensure();
await store.open();
await store.write(hash, block);
```
```sh
// shell
tree /tmp/blockstore/
/tmp/blockstore/
└── blocks
├── blk00000.dat
└── index
├── LOG
...
```
As we can see, the store writes to the file `blk00000.dat` in
`/tmp/blockstore/blocks/`, and the metadata is written to
`/tmp/blockstore/index`.
Raw blocks are written to the disk in flat files named `blkXXXXX.dat`, where
`XXXXX` is the number of file being currently written, starting at
`blk00000.dat`. We store the file number as an integer in the metadata db,
expanding the digits to five places.
The metadata db key `layout.F` tracks the last file used for writing. Each
file in turn tracks the number of blocks in it, the number of bytes used and
its max length. This data is stored in the db key `layout.f`.
f['block'][0] => [1, 5, 128] // blk00000.dat: 1 block written, 5 bytes used, 128 bytes length
F['block'] => 0 // writing to file blk00000.dat
Each raw block data is preceded by a magic marker defined as follows, to help
identify data written by us:
magic (8 bytes) = network.magic (4 bytes) + block data length (4 bytes)
For raw undo block data, the hash of the block is also included:
magic (40 bytes) = network.magic (4 bytes) + length (4 bytes) + hash (32 bytes)
But a marker alone is not sufficient to track the data we write to the files.
For each block we write, we need to store a pointer to the position in the file
where to start reading, and the size of the data we need to seek. This data is
stored in the metadata db using the key `layout.b`:
b['block']['hash'] => [0, 8, 285] // 'hash' points to file blk00000.dat, position 8, size 285
Using this we know that our block is in `blk00000.dat`, bytes 8 through 285.
Note that the position indicates that the block data is preceded by 8 bytes of
the magic marker.
Examples:
> `store.write('hash', 'block')`
blk00000:
0xfabfb5da05000000 block
index:
b['block']['hash'] => [0, 8, 5]
f['block'][0] => [1, 13, 128]
F['block'] => 0
> `store.write('hash1', 'block1')`
blk00000:
0xfabfb5da05000000 block 0xfabfb5da06000000 block1
index:
b['block']['hash'] => [0, 8, 5]
b['block']['hash1'] => [0, 13, 6]
f['block'][0] => [2, 19, 128]
F['block'] => 0
> `store.prune('hash1', 'block1')`
blk00000:
0xfabfb5da05000000 block 0xfabfb5da06000000 block1
index:
b['block']['hash'] => [0, 8, 5]
f['block'][0] => [1, 19, 128]
F['block'] => 0

142
lib/blockstore/abstract.js Normal file
View File

@ -0,0 +1,142 @@
/*!
* blockstore/abstract.js - abstract blockstore for bcoin
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
const Logger = require('blgr');
/**
* Abstract Block Store
*
* @alias module:blockstore.AbstractBlockStore
* @abstract
*/
class AbstractBlockStore {
/**
* Create an abstract blockstore.
* @constructor
*/
constructor(options) {
this.options = options || {};
if (this.options.logger != null)
this.logger = this.options.logger.context('blockstore');
else
this.logger = Logger.global.context('blockstore');
}
/**
* This method ensures that resources are available
* before opening.
* @returns {Promise}
*/
async ensure() {
throw new Error('Abstract method.');
}
/**
* This method opens any necessary resources and
* initializes the store to be ready to be queried.
* @returns {Promise}
*/
async open() {
throw new Error('Abstract method.');
}
/**
* This method closes resources and prepares
* the store to be closed.
* @returns {Promise}
*/
async close() {
throw new Error('Abstract method.');
}
/**
* This method stores block undo coin data.
* @returns {Promise}
*/
async writeUndo(hash, data) {
throw new Error('Abstract method.');
}
/**
* This method stores block data.
* @returns {Promise}
*/
async write(hash, data) {
throw new Error('Abstract method.');
}
/**
* This method will retrieve block undo coin data.
* @returns {Promise}
*/
async readUndo(hash) {
throw new Error('Abstract method.');
}
/**
* This method will retrieve block data. Smaller portions of
* the block can be read by using the offset and size arguments.
* @returns {Promise}
*/
async read(hash, offset, size) {
throw new Error('Abstract method.');
}
/**
* This will free resources for storing the block undo coin data.
* @returns {Promise}
*/
async pruneUndo(hash) {
throw new Error('Abstract method.');
}
/**
* This will free resources for storing the block data.
* @returns {Promise}
*/
async prune(hash) {
throw new Error('Abstract method.');
}
/**
* This will check if a block undo coin data has been stored
* and is available.
* @returns {Promise}
*/
async hasUndo(hash) {
throw new Error('Abstract method.');
}
/**
* This will check if a block has been stored and is available.
* @returns {Promise}
*/
async has(hash) {
throw new Error('Abstract method.');
}
}
/*
* Expose
*/
module.exports = AbstractBlockStore;

31
lib/blockstore/common.js Normal file
View File

@ -0,0 +1,31 @@
/*!
* common.js - blockstore constants for bcoin
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
/**
* @module blockstore/common
*/
/**
* Block data types.
* @enum {Number}
*/
exports.types = {
BLOCK: 1,
UNDO: 2
};
/**
* File prefixes for block data types.
* @enum {String}
*/
exports.prefixes = {
1: 'blk',
2: 'blu'
};

590
lib/blockstore/file.js Normal file
View File

@ -0,0 +1,590 @@
/*!
* blockstore/file.js - file blockstore for bcoin
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
const {isAbsolute, resolve, join} = require('path');
const bdb = require('bdb');
const assert = require('bsert');
const fs = require('bfile');
const bio = require('bufio');
const hash256 = require('bcrypto/lib/hash256');
const Network = require('../protocol/network');
const AbstractBlockStore = require('./abstract');
const {BlockRecord, FileRecord} = require('./records');
const layout = require('./layout');
const {types, prefixes} = require('./common');
/**
* File Block Store
*
* @alias module:blockstore:FileBlockStore
* @abstract
*/
class FileBlockStore extends AbstractBlockStore {
/**
* Create a blockstore that stores blocks in files.
* @constructor
*/
constructor(options) {
super(options);
assert(isAbsolute(options.location), 'Location not absolute.');
this.location = options.location;
this.indexLocation = resolve(this.location, './index');
this.db = bdb.create({
location: this.indexLocation,
cacheSize: options.cacheSize,
compression: false
});
this.maxFileLength = options.maxFileLength || 128 * 1024 * 1024;
assert(Number.isSafeInteger(this.maxFileLength),
'Invalid max file length.');
this.network = Network.primary;
if (options.network != null)
this.network = Network.get(options.network);
this.writing = false;
}
/**
* Compares the number of files in the directory
* with the recorded number of files.
* @param {Number} type - The type of block data
* @private
* @returns {Promise}
*/
async check(type) {
const prefix = prefixes[type];
const regexp = new RegExp(`^${prefix}(\\d{5})\\.dat$`);
const all = await fs.readdir(this.location);
const dats = all.filter(f => regexp.test(f));
const filenos = dats.map(f => parseInt(f.match(regexp)[1]));
let missing = false;
for (const fileno of filenos) {
const rec = await this.db.get(layout.f.encode(type, fileno));
if (!rec) {
missing = true;
break;
}
}
return {missing, filenos};
}
/**
* Creates indexes from files for a block type. Reads the hash of
* the block data from the magic prefix, except for a block which
* the hash is read from the block header.
* @private
* @param {Number} type - The type of block data
* @returns {Promise}
*/
async _index(type) {
const {missing, filenos} = await this.check(type);
if (!missing)
return;
this.logger.info('Indexing block type %d...', type);
for (const fileno of filenos) {
const b = this.db.batch();
const filepath = this.filepath(type, fileno);
const data = await fs.readFile(filepath);
const reader = bio.read(data);
let magic = null;
let blocks = 0;
while (reader.left() >= 4) {
magic = reader.readU32();
// Move forward a byte from the last read
// if the magic doesn't match.
if (magic !== this.network.magic) {
reader.seek(-3);
continue;
}
let hash = null;
let position = 0;
let length = 0;
try {
length = reader.readU32();
if (type === types.BLOCK) {
position = reader.offset;
hash = hash256.digest(reader.readBytes(80, true));
reader.seek(length - 80);
} else {
hash = reader.readHash();
position = reader.offset;
reader.seek(length);
}
} catch (err) {
this.logger.warning(
'Unknown block in file: %s, reason: %s',
filepath, err.message);
continue;
}
const blockrecord = new BlockRecord({
file: fileno,
position: position,
length: length
});
blocks += 1;
b.put(layout.b.encode(type, hash), blockrecord.toRaw());
}
const filerecord = new FileRecord({
blocks: blocks,
used: reader.offset,
length: this.maxFileLength
});
b.put(layout.f.encode(type, fileno), filerecord.toRaw());
await b.write();
this.logger.info('Indexed %d blocks (file=%s).', blocks, filepath);
}
}
/**
* Compares the number of files in the directory
* with the recorded number of files. If there are any
* inconsistencies it will reindex all blocks.
* @private
* @returns {Promise}
*/
async index() {
await this._index(types.BLOCK);
await this._index(types.UNDO);
}
/**
* This method ensures that both the block storage directory
* and index directory exist.
* before opening.
* @returns {Promise}
*/
async ensure() {
return fs.mkdirp(this.indexLocation);
}
/**
* Opens the file block store. It will regenerate necessary block
* indexing if the index is missing or inconsistent.
* @returns {Promise}
*/
async open() {
this.logger.info('Opening FileBlockStore...');
await this.db.open();
await this.db.verify(layout.V.encode(), 'fileblockstore', 0);
await this.index();
}
/**
* This closes the file block store and underlying
* indexing databases.
*/
async close() {
this.logger.info('Closing FileBlockStore...');
await this.db.close();
}
/**
* This method will determine the file path based on the file number
* and the current block data location.
* @private
* @param {Number} type - The type of block data
* @param {Number} fileno - The number of the file.
* @returns {Promise}
*/
filepath(type, fileno) {
const pad = 5;
let num = fileno.toString(10);
if (num.length > pad)
throw new Error('File number too large.');
while (num.length < pad)
num = `0${num}`;
let filepath = null;
const prefix = prefixes[type];
if (!prefix)
throw new Error('Unknown file prefix.');
filepath = join(this.location, `${prefix}${num}.dat`);
return filepath;
}
/**
* This method will select and potentially allocate a file to
* write a block based on the size and type.
* @private
* @param {Number} type - The type of block data
* @param {Number} length - The number of bytes
* @returns {Promise}
*/
async allocate(type, length) {
if (length > this.maxFileLength)
throw new Error('Block length above max file length.');
let fileno = 0;
let filerecord = null;
let filepath = null;
const last = await this.db.get(layout.F.encode(type));
if (last)
fileno = bio.readU32(last, 0);
filepath = this.filepath(type, fileno);
const rec = await this.db.get(layout.f.encode(type, fileno));
let touch = false;
if (rec) {
filerecord = FileRecord.fromRaw(rec);
} else {
touch = true;
filerecord = new FileRecord({
blocks: 0,
used: 0,
length: this.maxFileLength
});
}
if (filerecord.used + length > filerecord.length) {
fileno += 1;
filepath = this.filepath(type, fileno);
touch = true;
filerecord = new FileRecord({
blocks: 0,
used: 0,
length: this.maxFileLength
});
}
if (touch) {
const fd = await fs.open(filepath, 'w');
await fs.close(fd);
}
return {fileno, filerecord, filepath};
}
/**
* This method stores block undo coin data in files.
* @param {Buffer} hash - The block hash
* @param {Buffer} data - The block data
* @returns {Promise}
*/
async writeUndo(hash, data) {
return this._write(types.UNDO, hash, data);
}
/**
* This method stores block data in files.
* @param {Buffer} hash - The block hash
* @param {Buffer} data - The block data
* @returns {Promise}
*/
async write(hash, data) {
return this._write(types.BLOCK, hash, data);
}
/**
* This method stores block data in files with by appending
* data to the last written file and updating indexes to point
* to the file and position.
* @private
* @param {Number} type - The type of block data
* @param {Buffer} hash - The block hash
* @param {Buffer} data - The block data
* @returns {Promise}
*/
async _write(type, hash, data) {
if (this.writing)
throw new Error('Already writing.');
this.writing = true;
if (await this.db.has(layout.b.encode(type, hash))) {
this.writing = false;
return false;
}
let mlength = 8;
// Hash for a block is not stored with
// the magic prefix as it's read from the header
// of the block data.
if (type !== types.BLOCK)
mlength += 32;
const blength = data.length;
const length = data.length + mlength;
const bwm = bio.write(mlength);
bwm.writeU32(this.network.magic);
bwm.writeU32(blength);
if (type !== types.BLOCK)
bwm.writeHash(hash);
const magic = bwm.render();
const {
fileno,
filerecord,
filepath
} = await this.allocate(type, length);
const mposition = filerecord.used;
const bposition = filerecord.used + mlength;
const fd = await fs.open(filepath, 'r+');
let mwritten = 0;
let bwritten = 0;
try {
mwritten = await fs.write(fd, magic, 0, mlength, mposition);
bwritten = await fs.write(fd, data, 0, blength, bposition);
} finally {
await fs.close(fd);
}
if (mwritten !== mlength) {
this.writing = false;
throw new Error('Could not write block magic.');
}
if (bwritten !== blength) {
this.writing = false;
throw new Error('Could not write block.');
}
filerecord.blocks += 1;
filerecord.used += length;
const b = this.db.batch();
const blockrecord = new BlockRecord({
file: fileno,
position: bposition,
length: blength
});
b.put(layout.b.encode(type, hash), blockrecord.toRaw());
b.put(layout.f.encode(type, fileno), filerecord.toRaw());
const last = bio.write(4).writeU32(fileno).render();
b.put(layout.F.encode(type), last);
await b.write();
this.writing = false;
return true;
}
/**
* This method will retrieve block undo coin data.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async readUndo(hash) {
return this._read(types.UNDO, hash);
}
/**
* This method will retrieve block data. Smaller portions of the
* block (e.g. transactions) can be read by using the offset and
* length arguments.
* @param {Buffer} hash - The block hash
* @param {Number} offset - The offset within the block
* @param {Number} length - The number of bytes of the data
* @returns {Promise}
*/
async read(hash, offset, length) {
return this._read(types.BLOCK, hash, offset, length);
}
/**
* This methods reads data from disk by retrieving the index of
* the data and reading from the corresponding file and location.
* @private
* @param {Number} type - The type of block data
* @param {Buffer} hash - The block hash
* @param {Number} offset - The offset within the block
* @param {Number} length - The number of bytes of the data
* @returns {Promise}
*/
async _read(type, hash, offset, length) {
const raw = await this.db.get(layout.b.encode(type, hash));
if (!raw)
return null;
const blockrecord = BlockRecord.fromRaw(raw);
const filepath = this.filepath(type, blockrecord.file);
let position = blockrecord.position;
if (offset)
position += offset;
if (!length)
length = blockrecord.length;
if (offset + length > blockrecord.length)
throw new Error('Out-of-bounds read.');
const data = Buffer.alloc(length);
const fd = await fs.open(filepath, 'r');
let bytes = 0;
try {
bytes = await fs.read(fd, data, 0, length, position);
} finally {
await fs.close(fd);
}
if (bytes !== length)
throw new Error('Wrong number of bytes read.');
return data;
}
/**
* This will free resources for storing the block undo coin data.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async pruneUndo(hash) {
return this._prune(types.UNDO, hash);
}
/**
* This will free resources for storing the block data.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async prune(hash) {
return this._prune(types.BLOCK, hash);
}
/**
* This will free resources for storing the block data. The block
* data may not be deleted from disk immediately, the index for the
* block is removed and will not be able to be read. The underlying
* file is unlinked when all blocks in a file have been pruned.
* @private
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async _prune(type, hash) {
const braw = await this.db.get(layout.b.encode(type, hash));
if (!braw)
return false;
const blockrecord = BlockRecord.fromRaw(braw);
const fraw = await this.db.get(layout.f.encode(type, blockrecord.file));
if (!fraw)
return false;
const filerecord = FileRecord.fromRaw(fraw);
filerecord.blocks -= 1;
const b = this.db.batch();
if (filerecord.blocks === 0)
b.del(layout.f.encode(type, blockrecord.file));
else
b.put(layout.f.encode(type, blockrecord.file), filerecord.toRaw());
b.del(layout.b.encode(type, hash));
await b.write();
if (filerecord.blocks === 0)
await fs.unlink(this.filepath(type, blockrecord.file));
return true;
}
/**
* This will check if a block undo coin has been stored
* and is available.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async hasUndo(hash) {
return await this.db.has(layout.b.encode(types.UNDO, hash));
}
/**
* This will check if a block has been stored and is available.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async has(hash) {
return await this.db.has(layout.b.encode(types.BLOCK, hash));
}
}
/*
* Expose
*/
module.exports = FileBlockStore;

42
lib/blockstore/index.js Normal file
View File

@ -0,0 +1,42 @@
/*!
* blockstore/index.js - bitcoin blockstore for bcoin
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
const {join} = require('path');
const AbstractBlockStore = require('./abstract');
const LevelBlockStore = require('./level');
const FileBlockStore = require('./file');
/**
* @module blockstore
*/
exports.create = (options) => {
const location = join(options.prefix, 'blocks');
if (options.memory) {
return new LevelBlockStore({
network: options.network,
logger: options.logger,
location: location,
cacheSize: options.cacheSize,
memory: options.memory
});
}
return new FileBlockStore({
network: options.network,
logger: options.logger,
location: location,
cacheSize: options.cacheSize
});
};
exports.AbstractBlockStore = AbstractBlockStore;
exports.FileBlockStore = FileBlockStore;
exports.LevelBlockStore = LevelBlockStore;

30
lib/blockstore/layout.js Normal file
View File

@ -0,0 +1,30 @@
/*!
* blockstore/layout.js - file blockstore data layout for bcoin
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
const bdb = require('bdb');
/*
* Database Layout:
* V -> db version
* F[type] -> last file record by type
* f[type][fileno] -> file record by type and file number
* b[type][hash] -> block record by type and block hash
*/
const layout = {
V: bdb.key('V'),
F: bdb.key('F', ['uint32']),
f: bdb.key('f', ['uint32', 'uint32']),
b: bdb.key('b', ['uint32', 'hash256'])
};
/*
* Expose
*/
module.exports = layout;

189
lib/blockstore/level.js Normal file
View File

@ -0,0 +1,189 @@
/*!
* blockstore/level.js - leveldb blockstore for bcoin
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
const bdb = require('bdb');
const fs = require('bfile');
const AbstractBlockStore = require('./abstract');
const layout = require('./layout');
const {types} = require('./common');
/**
* LevelDB Block Store
*
* @alias module:blockstore:LevelBlockStore
* @abstract
*/
class LevelBlockStore extends AbstractBlockStore {
/**
* Create a blockstore that stores blocks in LevelDB.
* @constructor
*/
constructor(options) {
super(options);
this.location = options.location;
this.db = bdb.create({
location: this.location,
cacheSize: options.cacheSize,
compression: false,
memory: options.memory
});
}
/**
* This method ensures that the storage directory exists
* before opening.
* @returns {Promise}
*/
async ensure() {
return fs.mkdirp(this.location);
}
/**
* Opens the block storage.
* @returns {Promise}
*/
async open() {
this.logger.info('Opening LevelBlockStore...');
await this.db.open();
await this.db.verify(layout.V.encode(), 'levelblockstore', 0);
}
/**
* Closes the block storage.
*/
async close() {
this.logger.info('Closing LevelBlockStore...');
await this.db.close();
}
/**
* This method stores block undo coin data in LevelDB.
* @param {Buffer} hash - The block hash
* @param {Buffer} data - The block data
* @returns {Promise}
*/
async writeUndo(hash, data) {
return this.db.put(layout.b.encode(types.UNDO, hash), data);
}
/**
* This method stores block data in LevelDB.
* @param {Buffer} hash - The block hash
* @param {Buffer} data - The block data
* @returns {Promise}
*/
async write(hash, data) {
return this.db.put(layout.b.encode(types.BLOCK, hash), data);
}
/**
* This method will retrieve block undo coin data.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async readUndo(hash) {
return this.db.get(layout.b.encode(types.UNDO, hash));
}
/**
* This method will retrieve block data. Smaller portions of the
* block (e.g. transactions) can be returned using the offset and
* length arguments. However, the entire block will be read as the
* data is stored in a key/value database.
* @param {Buffer} hash - The block hash
* @param {Number} offset - The offset within the block
* @param {Number} length - The number of bytes of the data
* @returns {Promise}
*/
async read(hash, offset, length) {
let raw = await this.db.get(layout.b.encode(types.BLOCK, hash));
if (offset) {
if (offset + length > raw.length)
throw new Error('Out-of-bounds read.');
raw = raw.slice(offset, offset + length);
}
return raw;
}
/**
* This will free resources for storing the block undo coin data.
* The block data may not be immediately removed from disk, and will
* be reclaimed during LevelDB compaction.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async pruneUndo(hash) {
if (!await this.hasUndo(hash))
return false;
await this.db.del(layout.b.encode(types.UNDO, hash));
return true;
}
/**
* This will free resources for storing the block data. The block
* data may not be immediately removed from disk, and will be reclaimed
* during LevelDB compaction.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async prune(hash) {
if (!await this.has(hash))
return false;
await this.db.del(layout.b.encode(types.BLOCK, hash));
return true;
}
/**
* This will check if a block undo coin data has been stored
* and is available.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async hasUndo(hash) {
return this.db.has(layout.b.encode(types.UNDO, hash));
}
/**
* This will check if a block has been stored and is available.
* @param {Buffer} hash - The block hash
* @returns {Promise}
*/
async has(hash) {
return this.db.has(layout.b.encode(types.BLOCK, hash));
}
}
/*
* Expose
*/
module.exports = LevelBlockStore;

149
lib/blockstore/records.js Normal file
View File

@ -0,0 +1,149 @@
/*!
* blockstore/records.js - blockstore records
* Copyright (c) 2019, Braydon Fuller (MIT License).
* https://github.com/bcoin-org/bcoin
*/
'use strict';
const assert = require('bsert');
const bio = require('bufio');
/**
* @module blockstore/records
*/
/**
* Block Record
*/
class BlockRecord {
/**
* Create a block record.
* @constructor
*/
constructor(options = {}) {
this.file = options.file || 0;
this.position = options.position || 0;
this.length = options.length || 0;
assert((this.file >>> 0) === this.file);
assert((this.position >>> 0) === this.position);
assert((this.length >>> 0) === this.length);
}
/**
* Inject properties from serialized data.
* @private
* @param {Buffer} data
*/
fromRaw(data) {
const br = bio.read(data);
this.file = br.readU32();
this.position = br.readU32();
this.length = br.readU32();
return this;
}
/**
* Instantiate block record from serialized data.
* @param {Hash} hash
* @param {Buffer} data
* @returns {BlockRecord}
*/
static fromRaw(data) {
return new this().fromRaw(data);
}
/**
* Serialize the block record.
* @returns {Buffer}
*/
toRaw() {
const bw = bio.write(12);
bw.writeU32(this.file);
bw.writeU32(this.position);
bw.writeU32(this.length);
return bw.render();
}
}
/**
* File Record
*/
class FileRecord {
/**
* Create a file record.
* @constructor
*/
constructor(options = {}) {
this.blocks = options.blocks || 0;
this.used = options.used || 0;
this.length = options.length || 0;
assert((this.blocks >>> 0) === this.blocks);
assert((this.used >>> 0) === this.used);
assert((this.length >>> 0) === this.length);
}
/**
* Inject properties from serialized data.
* @private
* @param {Buffer} data
*/
fromRaw(data) {
const br = bio.read(data);
this.blocks = br.readU32();
this.used = br.readU32();
this.length = br.readU32();
return this;
}
/**
* Instantiate file record from serialized data.
* @param {Hash} hash
* @param {Buffer} data
* @returns {ChainState}
*/
static fromRaw(data) {
return new this().fromRaw(data);
}
/**
* Serialize the file record.
* @returns {Buffer}
*/
toRaw() {
const bw = bio.write(12);
bw.writeU32(this.blocks);
bw.writeU32(this.used);
bw.writeU32(this.length);
return bw.render();
}
}
/*
* Expose
*/
exports.BlockRecord = BlockRecord;
exports.FileRecord = FileRecord;
module.exports = exports;

View File

@ -16,6 +16,7 @@ const Miner = require('../mining/miner');
const Node = require('./node');
const HTTP = require('./http');
const RPC = require('./rpc');
const blockstore = require('../blockstore');
/**
* Full Node
@ -40,10 +41,20 @@ class FullNode extends Node {
// SPV flag.
this.spv = false;
// Instantiate blockchain.
// Instantiate block storage.
this.blocks = blockstore.create({
network: this.network,
logger: this.logger,
prefix: this.config.prefix,
cacheSize: this.config.mb('block-cache-size'),
memory: this.config.bool('memory')
});
// Chain needs access to blocks.
this.chain = new Chain({
network: this.network,
logger: this.logger,
blocks: this.blocks,
workers: this.workers,
memory: this.config.bool('memory'),
prefix: this.config.prefix,
@ -218,6 +229,7 @@ class FullNode extends Node {
this.opened = true;
await this.handlePreopen();
await this.blocks.open();
await this.chain.open();
await this.mempool.open();
await this.miner.open();
@ -250,6 +262,7 @@ class FullNode extends Node {
await this.miner.close();
await this.mempool.close();
await this.chain.close();
await this.blocks.close();
await this.handleClose();
}

View File

@ -57,6 +57,7 @@ class Node extends EventEmitter {
this.workers = null;
this.spv = false;
this.blocks = null;
this.chain = null;
this.fees = null;
this.mempool = null;
@ -135,6 +136,9 @@ class Node extends EventEmitter {
if (this.memory)
return undefined;
if (this.blocks)
await this.blocks.ensure();
return fs.mkdirp(this.config.prefix);
}

142
migrate/chaindb4to5.js Normal file
View File

@ -0,0 +1,142 @@
'use strict';
const assert = require('assert');
const bdb = require('bdb');
const layout = require('../lib/blockchain/layout');
const FileBlockStore = require('../lib/blockstore/file');
const {resolve} = require('path');
assert(process.argv.length > 2, 'Please pass in a database path.');
// migration -
// chaindb: leveldb to flat files
const db = bdb.create({
location: process.argv[2],
memory: false,
compression: true,
cacheSize: 32 << 20,
createIfMissing: false
});
const location = resolve(process.argv[2], '../blocks');
const blockStore = new FileBlockStore({
location: location
});
async function updateVersion() {
const ver = await checkVersion();
console.log('Updating version to %d.', ver + 1);
const buf = Buffer.allocUnsafe(5 + 4);
buf.write('chain', 0, 'ascii');
buf.writeUInt32LE(5, 5, true);
const parent = db.batch();
parent.put(layout.V.encode(), buf);
await parent.write();
}
async function checkVersion() {
console.log('Checking version.');
const data = await db.get(layout.V.encode());
assert(data, 'No version.');
const ver = data.readUInt32LE(5, true);
if (ver !== 4)
throw Error(`DB is version ${ver}.`);
return ver;
}
async function migrateUndoBlocks() {
console.log('Migrating undo blocks');
let parent = db.batch();
const iter = db.iterator({
gte: layout.u.min(),
lte: layout.u.max(),
keys: true,
values: true
});
let total = 0;
await iter.each(async (key, value) => {
const hash = key.slice(1);
await blockStore.writeUndo(hash, value);
parent.del(key);
if (++total % 10000 === 0) {
console.log('Migrated up %d undo blocks.', total);
await parent.write();
parent = db.batch();
}
});
console.log('Migrated all %d undo blocks.', total);
await parent.write();
}
async function migrateBlocks() {
console.log('Migrating blocks');
let parent = db.batch();
const iter = db.iterator({
gte: layout.b.min(),
lte: layout.b.max(),
keys: true,
values: true
});
let total = 0;
await iter.each(async (key, value) => {
const hash = key.slice(1);
await blockStore.write(hash, value);
parent.del(key);
if (++total % 10000 === 0) {
console.log('Migrated up %d blocks.', total);
await parent.write();
parent = db.batch();
}
});
console.log('Migrated all %d blocks.', total);
await parent.write();
}
/*
* Execute
*/
(async () => {
await db.open();
await blockStore.ensure();
await blockStore.open();
console.log('Opened %s.', process.argv[2]);
await checkVersion();
await migrateBlocks();
await migrateUndoBlocks();
await updateVersion();
console.log('Compacting database');
await db.compactRange();
await db.close();
await blockStore.close();
})().then(() => {
console.log('Migration complete.');
process.exit(0);
}).catch((err) => {
console.error(err.stack);
process.exit(1);
});

1246
test/blockstore-test.js Normal file

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ const Output = require('../lib/primitives/output');
const common = require('../lib/blockchain/common');
const nodejsUtil = require('util');
const Opcode = require('../lib/script/opcode');
const BlockStore = require('../lib/blockstore/level');
const opcodes = Script.opcodes;
const ZERO_KEY = Buffer.alloc(33, 0x00);
@ -30,8 +31,14 @@ const workers = new WorkerPool({
enabled: true
});
const blocks = new BlockStore({
memory: true,
network
});
const chain = new Chain({
memory: true,
blocks,
network,
workers
});
@ -115,6 +122,7 @@ describe('Chain', function() {
this.timeout(process.browser ? 1200000 : 60000);
it('should open chain and miner', async () => {
await blocks.open();
await chain.open();
await miner.open();
});
@ -895,5 +903,6 @@ describe('Chain', function() {
it('should cleanup', async () => {
await miner.close();
await chain.close();
await blocks.close();
});
});

View File

@ -18,6 +18,7 @@ const Script = require('../lib/script/script');
const opcodes = Script.opcodes;
const Witness = require('../lib/script/witness');
const MemWallet = require('./util/memwallet');
const BlockStore = require('../lib/blockstore/level');
const ALL = Script.hashType.ALL;
const ONE_HASH = Buffer.alloc(32, 0x00);
@ -27,9 +28,14 @@ const workers = new WorkerPool({
enabled: true
});
const blocks = new BlockStore({
memory: true
});
const chain = new Chain({
memory: true,
workers
workers,
blocks
});
const mempool = new Mempool({
@ -68,6 +74,7 @@ describe('Mempool', function() {
it('should open mempool', async () => {
await workers.open();
await blocks.open();
await chain.open();
await mempool.open();
chain.state.flags |= Script.flags.VERIFY_WITNESS;
@ -453,6 +460,7 @@ describe('Mempool', function() {
it('should destroy mempool', async () => {
await mempool.close();
await chain.close();
await blocks.close();
await workers.close();
});
});

View File

@ -7,6 +7,7 @@ const assert = require('./util/assert');
const Chain = require('../lib/blockchain/chain');
const ChainEntry = require('../lib/blockchain/chainentry');
const Network = require('../lib/protocol/network');
const BlockStore = require('../lib/blockstore/level');
const network = Network.get('main');
@ -14,13 +15,20 @@ function random(max) {
return Math.floor(Math.random() * max);
}
const chain = new Chain({
const blocks = new BlockStore({
memory: true,
network
});
const chain = new Chain({
memory: true,
network,
blocks
});
describe('Difficulty', function() {
it('should open chain', async () => {
await blocks.open();
await chain.open();
});

View File

@ -1,9 +1,11 @@
'use strict';
const assert = require('assert');
const {tmpdir} = require('os');
const path = require('path');
const fs = require('bfile');
const bio = require('bufio');
const {randomBytes} = require('bcrypto/lib/random');
const Block = require('../../lib/primitives/block');
const MerkleBlock = require('../../lib/primitives/merkleblock');
const Headers = require('../../lib/primitives/headers');
@ -85,6 +87,21 @@ common.writeTX = function writeTX(name, tx, view) {
common.writeFile(`${name}-undo.raw`, undoRaw);
};
common.testdir = function(name) {
assert(/^[a-z]+$/.test(name), 'Invalid name');
const uniq = randomBytes(4).toString('hex');
return path.join(tmpdir(), `bcoin-test-${name}-${uniq}`);
};
common.rimraf = async function(p) {
const allowed = /bcoin\-test\-[a-z]+\-[a-f0-9]{8}(\/[a-z]+)?$/;
if (!allowed.test(p))
throw new Error(`Path not allowed: ${p}.`);
return await fs.rimraf(p);
};
function parseUndo(data) {
const br = bio.read(data);
const items = [];