Dynamic header support
Block headers can have a dynamic size that is being indexed on a new meta file "headers_offsets". The offsets are 64 bits in order to accommodate coins with big headers that will accumulate GBs of header data after some years. Closes #128
This commit is contained in:
parent
37602d4eae
commit
a820829e0e
@ -1,2 +1,3 @@
|
||||
Neil Booth: creator and maintainer
|
||||
Johann Bauer: backend DB abstraction
|
||||
Johann Bauer: backend DB abstraction
|
||||
John Jegutanis: alt-chain integrations
|
||||
32
server/db.py
32
server/db.py
@ -50,7 +50,8 @@ class DB(util.LoggedClass):
|
||||
self.header_offset = self.coin.static_header_offset
|
||||
self.header_len = self.coin.static_header_len
|
||||
else:
|
||||
raise Exception("Non static headers are not supported")
|
||||
self.header_offset = self.dynamic_header_offset
|
||||
self.header_len = self.dynamic_header_len
|
||||
|
||||
self.logger.info('switching current directory to {}'
|
||||
.format(env.db_dir))
|
||||
@ -69,6 +70,12 @@ class DB(util.LoggedClass):
|
||||
self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
|
||||
self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
|
||||
self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
|
||||
if not self.coin.STATIC_BLOCK_HEADERS:
|
||||
self.headers_offsets_file = util.LogicalFile(
|
||||
'meta/headers_offsets', 2, 16000000)
|
||||
# Write the offset of the genesis block
|
||||
if self.headers_offsets_file.read(0, 8) != b'\x00' * 8:
|
||||
self.headers_offsets_file.write(0, b'\x00' * 8)
|
||||
|
||||
# tx_counts[N] has the cumulative number of txs at the end of
|
||||
# height N. So tx_counts[0] is 1 - the genesis coinbase
|
||||
@ -192,6 +199,28 @@ class DB(util.LoggedClass):
|
||||
self.clear_excess_history(self.utxo_flush_count)
|
||||
self.clear_excess_undo_info()
|
||||
|
||||
def fs_update_header_offsets(self, offset_start, height_start, headers):
|
||||
if self.coin.STATIC_BLOCK_HEADERS:
|
||||
return
|
||||
offset = offset_start
|
||||
offsets = []
|
||||
for h in headers:
|
||||
offset += len(h)
|
||||
offsets.append(pack("<Q", offset))
|
||||
# For each header we get the offset of the next header, hence we
|
||||
# start writing from the next height
|
||||
pos = (height_start + 1) * 8
|
||||
self.headers_offsets_file.write(pos, b''.join(offsets))
|
||||
|
||||
def dynamic_header_offset(self, height):
|
||||
assert not self.coin.STATIC_BLOCK_HEADERS
|
||||
offset, = unpack('<Q', self.headers_offsets_file.read(height * 8, 8))
|
||||
return offset
|
||||
|
||||
def dynamic_header_len(self, height):
|
||||
return self.dynamic_header_offset(height + 1)\
|
||||
- self.dynamic_header_offset(height)
|
||||
|
||||
def fs_update(self, fs_height, headers, block_tx_hashes):
|
||||
'''Write headers, the tx_count array and block tx hashes to disk.
|
||||
|
||||
@ -215,6 +244,7 @@ class DB(util.LoggedClass):
|
||||
# Write the headers, tx counts, and tx hashes
|
||||
offset = self.header_offset(height_start)
|
||||
self.headers_file.write(offset, b''.join(headers))
|
||||
self.fs_update_header_offsets(offset, height_start, headers)
|
||||
offset = height_start * self.tx_counts.itemsize
|
||||
self.tx_counts_file.write(offset,
|
||||
self.tx_counts[height_start:].tobytes())
|
||||
|
||||
Loading…
Reference in New Issue
Block a user