Relax the constraints on read_headers
They were really for fs_block_hashes; that still enforces the full constraint. Simplifies get_chunk.
This commit is contained in:
parent
4f871cec0f
commit
49ee008346
@ -299,11 +299,11 @@ class Controller(ServerBase):
|
||||
|
||||
def electrum_header(self, height):
|
||||
'''Return the binary header at the given height.'''
|
||||
if not 0 <= height <= self.bp.db_height:
|
||||
raise RPCError('height {:,d} out of range'.format(height))
|
||||
if height in self.header_cache:
|
||||
return self.header_cache[height]
|
||||
header = self.bp.read_headers(height, 1)
|
||||
header, n = self.bp.read_headers(height, 1)
|
||||
if n != 1:
|
||||
raise RPCError('height {:,d} out of range'.format(height))
|
||||
header = self.coin.electrum_header(header, height)
|
||||
self.header_cache[height] = header
|
||||
return header
|
||||
@ -750,10 +750,10 @@ class Controller(ServerBase):
|
||||
def get_chunk(self, index):
|
||||
'''Return header chunk as hex. Index is a non-negative integer.'''
|
||||
chunk_size = self.coin.CHUNK_SIZE
|
||||
next_height = self.bp.db_height + 1
|
||||
start_height = min(index * chunk_size, next_height)
|
||||
count = min(next_height - start_height, chunk_size)
|
||||
return self.bp.read_headers(start_height, count).hex()
|
||||
start_height = index * chunk_size
|
||||
count = chunk_size
|
||||
headers, n = self.bp.read_headers(start_height, count).hex()
|
||||
return headers
|
||||
|
||||
# Client RPC "blockchain" command handlers
|
||||
|
||||
|
||||
30
server/db.py
30
server/db.py
@ -209,18 +209,25 @@ class DB(util.LoggedClass):
|
||||
offset = prior_tx_count * 32
|
||||
self.hashes_file.write(offset, hashes)
|
||||
|
||||
def read_headers(self, start, count):
|
||||
'''Requires count >= 0.'''
|
||||
def read_headers(self, start_height, count):
|
||||
'''Requires start_height >= 0, count >= 0. Reads as many headers as
|
||||
are available starting at start_height up to count. This
|
||||
would be zero if start_height is beyond self.db_height, for
|
||||
example.
|
||||
|
||||
Returns a (binary, n) pair where binary is the concatenated
|
||||
binary headers, and n is the count of headers returned.
|
||||
'''
|
||||
# Read some from disk
|
||||
disk_count = min(count, self.db_height + 1 - start)
|
||||
if start < 0 or count < 0 or disk_count != count:
|
||||
if start_height < 0 or count < 0:
|
||||
raise self.DBError('{:,d} headers starting at {:,d} not on disk'
|
||||
.format(count, start))
|
||||
.format(count, start_height))
|
||||
disk_count = max(0, min(count, self.db_height + 1 - start_height))
|
||||
if disk_count:
|
||||
offset = self.header_offset(start)
|
||||
size = self.header_offset(start + disk_count) - offset
|
||||
return self.headers_file.read(offset, size)
|
||||
return b''
|
||||
offset = self.header_offset(start_height)
|
||||
size = self.header_offset(start_height + disk_count) - offset
|
||||
return self.headers_file.read(offset, size), disk_count
|
||||
return b'', 0
|
||||
|
||||
def fs_tx_hash(self, tx_num):
|
||||
'''Return a par (tx_hash, tx_height) for the given tx number.
|
||||
@ -234,7 +241,10 @@ class DB(util.LoggedClass):
|
||||
return tx_hash, tx_height
|
||||
|
||||
def fs_block_hashes(self, height, count):
|
||||
headers_concat = self.read_headers(height, count)
|
||||
headers_concat, headers_count = self.read_headers(height, count)
|
||||
if headers_count != count:
|
||||
raise self.DBError('only got {:,d} headers starting at {:,d}, not '
|
||||
'{:,d}'.format(headers_count, start, count))
|
||||
offset = 0
|
||||
headers = []
|
||||
for n in range(count):
|
||||
|
||||
Loading…
Reference in New Issue
Block a user