Merge branch 'develop'

This commit is contained in:
Neil Booth 2017-01-23 23:42:37 +09:00
commit 491080f4c2
19 changed files with 977 additions and 729 deletions

View File

@ -115,8 +115,6 @@ Roadmap Pre-1.0
- minor code cleanups. - minor code cleanups.
- implement simple protocol to discover peers without resorting to IRC. - implement simple protocol to discover peers without resorting to IRC.
This may slip to post 1.0
Roadmap Post-1.0 Roadmap Post-1.0
================ ================
@ -137,6 +135,33 @@ version prior to the release of 1.0.
ChangeLog ChangeLog
========= =========
Version 0.10.11
---------------
* rewrite of JSON RPC layer to improve usability for clients.
Includes support of JSON RPC v1, v2 and a compat layer that tries to
detect the peer's version.
Version 0.10.10
---------------
* move peer management from irc.py to peers.py. This is preparataion
for peer discovery without IRC.
* misc cleanups
* fix Litecoin genesis hash (petrkr)
Version 0.10.9
--------------
* restore client to sessions output
* cleanup shutdown process; hopefully this resolves the log spew for good
Version 0.10.8
--------------
* fix import for reverse iterator for RocksDB
* fix tests
Version 0.10.7 Version 0.10.7
-------------- --------------
@ -220,46 +245,6 @@ variables to use roughly the same amount of memory.
For now this code should be considered experimental; if you want For now this code should be considered experimental; if you want
stability please stick with the 0.9 series. stability please stick with the 0.9 series.
Version 0.9.23
--------------
* Backport of the fix for issue `#94#` - stale references to old
sessions. This would effectively memory and network handles.
Version 0.9.22
--------------
* documentation updates (ARCHITECTURE.rst, ENVIRONMENT.rst) only.
Version 0.9.21
--------------
* moved RELEASE-NOTES into this README
* document the RPC interface in docs/RPC-INTERFACE.rst
* clean up open DB handling, issue `#89`_
Version 0.9.20
--------------
* fix for IRC flood issue `#93`_
Version 0.9.19
--------------
* move sleep outside semaphore (issue `#88`_)
Version 0.9.18
--------------
* last release of 2016. Just a couple of minor tweaks to logging.
Version 0.9.17
--------------
* have all the DBs use fsync on write; hopefully means DB won't corrupt in
case of a kernel panic (issue `#75`_)
* replace $DONATION_ADDRESS in banner file
**Neil Booth** kyuupichan@gmail.com https://github.com/kyuupichan **Neil Booth** kyuupichan@gmail.com https://github.com/kyuupichan
@ -267,11 +252,7 @@ Version 0.9.17
.. _#72: https://github.com/kyuupichan/electrumx/issues/72 .. _#72: https://github.com/kyuupichan/electrumx/issues/72
.. _#75: https://github.com/kyuupichan/electrumx/issues/75
.. _#88: https://github.com/kyuupichan/electrumx/issues/88
.. _#89: https://github.com/kyuupichan/electrumx/issues/89
.. _#92: https://github.com/kyuupichan/electrumx/issues/92 .. _#92: https://github.com/kyuupichan/electrumx/issues/92
.. _#93: https://github.com/kyuupichan/electrumx/issues/93
.. _#94: https://github.com/kyuupichan/electrumx/issues/94 .. _#94: https://github.com/kyuupichan/electrumx/issues/94
.. _#99: https://github.com/kyuupichan/electrumx/issues/99 .. _#99: https://github.com/kyuupichan/electrumx/issues/99
.. _#100: https://github.com/kyuupichan/electrumx/issues/100 .. _#100: https://github.com/kyuupichan/electrumx/issues/100

View File

@ -36,7 +36,7 @@ Not started until the Block Processor has caught up with bitcoind.
Daemon Daemon
------ ------
Encapsulates the RPC wire protcol with bitcoind for the whole server. Encapsulates the RPC wire protocol with bitcoind for the whole server.
Transparently handles temporary bitcoind connection errors, and fails Transparently handles temporary bitcoind connection errors, and fails
over if necessary. over if necessary.

View File

@ -205,7 +205,8 @@ below are low and encourage you to raise them.
An integer number of seconds defaulting to 600. Sessions with no An integer number of seconds defaulting to 600. Sessions with no
activity for longer than this are disconnected. Properly activity for longer than this are disconnected. Properly
functioning Electrum clients by default will send pings roughly functioning Electrum clients by default will send pings roughly
every 60 seconds. every 60 seconds, and servers doing peer discovery roughly every 300
seconds.
IRC IRC
--- ---
@ -239,8 +240,9 @@ connectivity on IRC:
* **REPORT_HOST_TOR** * **REPORT_HOST_TOR**
The tor .onion address to advertise. If set, an additional The tor address to advertise; must end with `.onion`. If set, an
connection to IRC happens with '_tor" appended to **IRC_NICK**. additional connection to IRC happens with '_tor' appended to
**IRC_NICK**.
* **REPORT_TCP_PORT_TOR** * **REPORT_TCP_PORT_TOR**

View File

@ -16,45 +16,60 @@ import json
from functools import partial from functools import partial
from os import environ from os import environ
from lib.jsonrpc import JSONRPC from lib.jsonrpc import JSONSession, JSONRPCv2
from server.controller import Controller from server.controller import Controller
class RPCClient(JSONRPC): class RPCClient(JSONSession):
def __init__(self): def __init__(self):
super().__init__() super().__init__(version=JSONRPCv2)
self.queue = asyncio.Queue() self.max_send = 0
self.max_send = 1000000 self.max_buffer_size = 5*10**6
self.event = asyncio.Event()
def enqueue_request(self, request): def have_pending_items(self):
self.queue.put_nowait(request) self.event.set()
async def send_and_wait(self, method, params, timeout=None): async def wait_for_response(self):
# Raise incoming buffer size - presumably connection is trusted await self.event.wait()
self.max_buffer_size = 5000000 await self.process_pending_items()
if params:
params = [params]
payload = self.request_payload(method, id_=method, params=params)
self.encode_and_send_payload(payload)
future = asyncio.ensure_future(self.queue.get()) def send_rpc_request(self, method, params):
for f in asyncio.as_completed([future], timeout=timeout): handler = partial(self.handle_response, method)
try: self.send_request(handler, method, params)
request = await f
except asyncio.TimeoutError: def handle_response(self, method, result, error):
future.cancel() if method in ('groups', 'sessions') and not error:
print('request timed out after {}s'.format(timeout)) if method == 'groups':
lines = Controller.groups_text_lines(result)
else: else:
await request.process(self) lines = Controller.sessions_text_lines(result)
for line in lines:
async def handle_response(self, result, error, method):
if result and method in ('groups', 'sessions'):
for line in Controller.text_lines(method, result):
print(line) print(line)
elif error:
print('error: {} (code {:d})'
.format(error['message'], error['code']))
else: else:
value = {'error': error} if error else result print(json.dumps(result, indent=4, sort_keys=True))
print(json.dumps(value, indent=4, sort_keys=True))
def rpc_send_and_wait(port, method, params, timeout=15):
loop = asyncio.get_event_loop()
coro = loop.create_connection(RPCClient, 'localhost', port)
try:
transport, rpc_client = loop.run_until_complete(coro)
rpc_client.send_rpc_request(method, params)
try:
coro = rpc_client.wait_for_response()
loop.run_until_complete(asyncio.wait_for(coro, timeout))
except asyncio.TimeoutError:
print('request timed out after {}s'.format(timeout))
except OSError:
print('cannot connect - is ElectrumX catching up, not running, or '
'is {:d} the wrong RPC port?'.format(port))
finally:
loop.close()
def main(): def main():
@ -68,19 +83,17 @@ def main():
help='params to send') help='params to send')
args = parser.parse_args() args = parser.parse_args()
if args.port is None: port = args.port
args.port = int(environ.get('RPC_PORT', 8000)) if port is None:
port = int(environ.get('RPC_PORT', 8000))
loop = asyncio.get_event_loop() # Get the RPC request.
coro = loop.create_connection(RPCClient, 'localhost', args.port) method = args.command[0]
try: params = args.param
transport, protocol = loop.run_until_complete(coro) if method in ('log', 'disconnect'):
coro = protocol.send_and_wait(args.command[0], args.param, timeout=15) params = [params]
loop.run_until_complete(coro)
except OSError: rpc_send_and_wait(port, method, params)
print('error connecting - is ElectrumX catching up or not running?')
finally:
loop.close()
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -318,8 +318,8 @@ class Litecoin(Coin):
P2PKH_VERBYTE = 0x30 P2PKH_VERBYTE = 0x30
P2SH_VERBYTE = 0x05 P2SH_VERBYTE = 0x05
WIF_BYTE = 0xb0 WIF_BYTE = 0xb0
GENESIS_HASH=('000000000019d6689c085ae165831e93' GENESIS_HASH=('12a765e31ffd4059bada1e25190f6e98'
'4ff763ae46a2a6c172b3f1b60a8ce26f') 'c99d9714d334efa41a195a7e7e04bfe2')
TX_COUNT = 8908766 TX_COUNT = 8908766
TX_COUNT_HEIGHT = 1105256 TX_COUNT_HEIGHT = 1105256
TX_PER_BLOCK = 10 TX_PER_BLOCK = 10

View File

@ -1,4 +1,4 @@
# Copyright (c) 2016, Neil Booth # Copyright (c) 2016-2017, Neil Booth
# #
# All rights reserved. # All rights reserved.
# #

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# Copyright (c) 2016, Neil Booth # Copyright (c) 2016-2017, Neil Booth
# #
# All rights reserved. # All rights reserved.
# #

View File

@ -1,4 +1,4 @@
# Copyright (c) 2016, Neil Booth # Copyright (c) 2016-2017, Neil Booth
# #
# All rights reserved. # All rights reserved.
# #

View File

@ -1,4 +1,4 @@
# Copyright (c) 2016, Neil Booth # Copyright (c) 2016-2017, Neil Booth
# #
# All rights reserved. # All rights reserved.
# #

View File

@ -196,11 +196,14 @@ class BlockProcessor(server.db.DB):
task = await self.task_queue.get() task = await self.task_queue.get()
await task() await task()
def shutdown(self): def shutdown(self, executor):
'''Shutdown cleanly and flush to disk.'''
# First stut down the executor; it may be processing a block.
# Then we can flush anything remaining to disk.
executor.shutdown()
if self.height != self.db_height: if self.height != self.db_height:
self.logger.info('flushing state to DB for a clean shutdown...') self.logger.info('flushing state to DB for a clean shutdown...')
self.flush(True) self.flush(True)
self.logger.info('shutdown complete')
async def executor(self, func, *args, **kwargs): async def executor(self, func, *args, **kwargs):
'''Run func taking args in the executor.''' '''Run func taking args in the executor.'''

View File

@ -18,14 +18,14 @@ from functools import partial
import pylru import pylru
from lib.jsonrpc import JSONRPC, RPCError, RequestBase from lib.jsonrpc import JSONRPC, RPCError
from lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash from lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash
import lib.util as util import lib.util as util
from server.block_processor import BlockProcessor from server.block_processor import BlockProcessor
from server.daemon import Daemon, DaemonError from server.daemon import Daemon, DaemonError
from server.irc import IRC
from server.session import LocalRPC, ElectrumX
from server.mempool import MemPool from server.mempool import MemPool
from server.peers import PeerManager
from server.session import LocalRPC, ElectrumX
from server.version import VERSION from server.version import VERSION
@ -39,16 +39,6 @@ class Controller(util.LoggedClass):
BANDS = 5 BANDS = 5
CATCHING_UP, LISTENING, PAUSED, SHUTTING_DOWN = range(4) CATCHING_UP, LISTENING, PAUSED, SHUTTING_DOWN = range(4)
class NotificationRequest(RequestBase):
def __init__(self, height, touched):
super().__init__(1)
self.height = height
self.touched = touched
async def process(self, session):
self.remaining = 0
await session.notify(self.height, self.touched)
def __init__(self, env): def __init__(self, env):
super().__init__() super().__init__()
# Set this event to cleanly shutdown # Set this event to cleanly shutdown
@ -56,12 +46,12 @@ class Controller(util.LoggedClass):
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.executor = ThreadPoolExecutor() self.executor = ThreadPoolExecutor()
self.loop.set_default_executor(self.executor) self.loop.set_default_executor(self.executor)
self.start = time.time() self.start_time = time.time()
self.coin = env.coin self.coin = env.coin
self.daemon = Daemon(env.coin.daemon_urls(env.daemon_url)) self.daemon = Daemon(env.coin.daemon_urls(env.daemon_url))
self.bp = BlockProcessor(env, self.daemon) self.bp = BlockProcessor(env, self.daemon)
self.mempool = MemPool(self.bp) self.mempool = MemPool(self.bp)
self.irc = IRC(env) self.peers = PeerManager(env)
self.env = env self.env = env
self.servers = {} self.servers = {}
# Map of session to the key of its list in self.groups # Map of session to the key of its list in self.groups
@ -73,7 +63,8 @@ class Controller(util.LoggedClass):
self.max_sessions = env.max_sessions self.max_sessions = env.max_sessions
self.low_watermark = self.max_sessions * 19 // 20 self.low_watermark = self.max_sessions * 19 // 20
self.max_subs = env.max_subs self.max_subs = env.max_subs
self.subscription_count = 0 # Cache some idea of room to avoid recounting on each subscription
self.subs_room = 0
self.next_stale_check = 0 self.next_stale_check = 0
self.history_cache = pylru.lrucache(256) self.history_cache = pylru.lrucache(256)
self.header_cache = pylru.lrucache(8) self.header_cache = pylru.lrucache(8)
@ -95,12 +86,14 @@ class Controller(util.LoggedClass):
'block.get_header block.get_chunk estimatefee relayfee ' 'block.get_header block.get_chunk estimatefee relayfee '
'transaction.get transaction.get_merkle utxo.get_address'), 'transaction.get transaction.get_merkle utxo.get_address'),
('server', ('server',
'banner donation_address peers.subscribe version'), 'banner donation_address'),
] ]
self.electrumx_handlers = {'.'.join([prefix, suffix]): handlers = {'.'.join([prefix, suffix]):
getattr(self, suffix.replace('.', '_')) getattr(self, suffix.replace('.', '_'))
for prefix, suffixes in rpcs for prefix, suffixes in rpcs
for suffix in suffixes.split()} for suffix in suffixes.split()}
handlers['server.peers.subscribe'] = self.peers.subscribe
self.electrumx_handlers = handlers
async def mempool_transactions(self, hashX): async def mempool_transactions(self, hashX):
'''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool '''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
@ -138,9 +131,9 @@ class Controller(util.LoggedClass):
if isinstance(session, LocalRPC): if isinstance(session, LocalRPC):
return 0 return 0
gid = self.sessions[session] gid = self.sessions[session]
group_bandwidth = sum(s.bandwidth_used for s in self.groups[gid]) group_bw = sum(session.bw_used for session in self.groups[gid])
return 1 + (bisect_left(self.bands, session.bandwidth_used) return 1 + (bisect_left(self.bands, session.bw_used)
+ bisect_left(self.bands, group_bandwidth)) // 2 + bisect_left(self.bands, group_bw)) // 2
def is_deprioritized(self, session): def is_deprioritized(self, session):
return self.session_priority(session) > self.BANDS return self.session_priority(session) > self.BANDS
@ -163,6 +156,15 @@ class Controller(util.LoggedClass):
and self.state == self.PAUSED): and self.state == self.PAUSED):
await self.start_external_servers() await self.start_external_servers()
# Periodically log sessions
if self.env.log_sessions and time.time() > self.next_log_sessions:
if self.next_log_sessions:
data = self.session_data(for_log=True)
for line in Controller.sessions_text_lines(data):
self.logger.info(line)
self.logger.info(json.dumps(self.server_summary()))
self.next_log_sessions = time.time() + self.env.log_sessions
await asyncio.sleep(1) await asyncio.sleep(1)
def enqueue_session(self, session): def enqueue_session(self, session):
@ -192,7 +194,10 @@ class Controller(util.LoggedClass):
while True: while True:
priority_, id_, session = await self.queue.get() priority_, id_, session = await self.queue.get()
if session in self.sessions: if session in self.sessions:
await session.serve_requests() await session.process_pending_items()
# Re-enqueue the session if stuff is left
if session.items:
self.enqueue_session(session)
def initiate_shutdown(self): def initiate_shutdown(self):
'''Call this function to start the shutdown process.''' '''Call this function to start the shutdown process.'''
@ -206,11 +211,11 @@ class Controller(util.LoggedClass):
async def await_bp_catchup(): async def await_bp_catchup():
'''Wait for the block processor to catch up. '''Wait for the block processor to catch up.
When it has, start the servers and connect to IRC. Then start the servers and the peer manager.
''' '''
await self.bp.caught_up_event.wait() await self.bp.caught_up_event.wait()
self.logger.info('block processor has caught up') self.logger.info('block processor has caught up')
add_future(self.irc.start()) add_future(self.peers.main_loop())
add_future(self.start_servers()) add_future(self.start_servers())
add_future(self.mempool.main_loop()) add_future(self.mempool.main_loop())
add_future(self.enqueue_delayed_sessions()) add_future(self.enqueue_delayed_sessions())
@ -225,7 +230,13 @@ class Controller(util.LoggedClass):
# Perform a clean shutdown when this event is signalled. # Perform a clean shutdown when this event is signalled.
await self.shutdown_event.wait() await self.shutdown_event.wait()
self.logger.info('shutting down gracefully')
self.logger.info('shutting down')
await self.shutdown(futures)
self.logger.info('shutdown complete')
async def shutdown(self, futures):
'''Perform the shutdown sequence.'''
self.state = self.SHUTTING_DOWN self.state = self.SHUTTING_DOWN
# Close servers and sessions # Close servers and sessions
@ -237,11 +248,12 @@ class Controller(util.LoggedClass):
for future in futures: for future in futures:
future.cancel() future.cancel()
await asyncio.wait(futures) # Wait for all futures to finish
while any(not future.done() for future in futures):
await asyncio.sleep(1)
# Wait for the executor to finish anything it's doing # Finally shut down the block processor and executor
self.executor.shutdown() self.bp.shutdown(self.executor)
self.bp.shutdown()
def close_servers(self, kinds): def close_servers(self, kinds):
'''Close the servers of the given kinds (TCP etc.).''' '''Close the servers of the given kinds (TCP etc.).'''
@ -253,22 +265,10 @@ class Controller(util.LoggedClass):
if server: if server:
server.close() server.close()
async def wait_for_sessions(self, secs=30):
if not self.sessions:
return
self.logger.info('waiting up to {:d} seconds for socket cleanup'
.format(secs))
limit = time.time() + secs
while self.sessions and time.time() < limit:
self.clear_stale_sessions(grace=secs//2)
await asyncio.sleep(2)
self.logger.info('{:,d} sessions remaining'
.format(len(self.sessions)))
async def start_server(self, kind, *args, **kw_args): async def start_server(self, kind, *args, **kw_args):
protocol_class = LocalRPC if kind == 'RPC' else ElectrumX protocol_class = LocalRPC if kind == 'RPC' else ElectrumX
protocol = partial(protocol_class, self, self.bp, self.env, kind) protocol_factory = partial(protocol_class, self, kind)
server = self.loop.create_server(protocol, *args, **kw_args) server = self.loop.create_server(protocol_factory, *args, **kw_args)
host, port = args[:2] host, port = args[:2]
try: try:
@ -331,17 +331,7 @@ class Controller(util.LoggedClass):
for session in self.sessions: for session in self.sessions:
if isinstance(session, ElectrumX): if isinstance(session, ElectrumX):
request = self.NotificationRequest(self.bp.db_height, await session.notify(self.bp.db_height, touched)
touched)
session.enqueue_request(request)
# Periodically log sessions
if self.env.log_sessions and time.time() > self.next_log_sessions:
if self.next_log_sessions:
data = self.session_data(for_log=True)
for line in Controller.sessions_text_lines(data):
self.logger.info(line)
self.logger.info(json.dumps(self.server_summary()))
self.next_log_sessions = time.time() + self.env.log_sessions
def electrum_header(self, height): def electrum_header(self, height):
'''Return the binary header at the given height.''' '''Return the binary header at the given height.'''
@ -359,7 +349,7 @@ class Controller(util.LoggedClass):
if now > self.next_stale_check: if now > self.next_stale_check:
self.next_stale_check = now + 300 self.next_stale_check = now + 300
self.clear_stale_sessions() self.clear_stale_sessions()
gid = int(session.start - self.start) // 900 gid = int(session.start_time - self.start_time) // 900
self.groups[gid].append(session) self.groups[gid].append(session)
self.sessions[session] = gid self.sessions[session] = gid
session.log_info('{} {}, {:,d} total' session.log_info('{} {}, {:,d} total'
@ -379,17 +369,16 @@ class Controller(util.LoggedClass):
gid = self.sessions.pop(session) gid = self.sessions.pop(session)
assert gid in self.groups assert gid in self.groups
self.groups[gid].remove(session) self.groups[gid].remove(session)
self.subscription_count -= session.sub_count()
def close_session(self, session): def close_session(self, session):
'''Close the session's transport and cancel its future.''' '''Close the session's transport and cancel its future.'''
session.close_connection() session.close_connection()
return 'disconnected {:d}'.format(session.id_) return 'disconnected {:d}'.format(session.session_id)
def toggle_logging(self, session): def toggle_logging(self, session):
'''Toggle logging of the session.''' '''Toggle logging of the session.'''
session.log_me = not session.log_me session.log_me = not session.log_me
return 'log {:d}: {}'.format(session.id_, session.log_me) return 'log {:d}: {}'.format(session.session_id, session.log_me)
def clear_stale_sessions(self, grace=15): def clear_stale_sessions(self, grace=15):
'''Cut off sessions that haven't done anything for 10 minutes. Force '''Cut off sessions that haven't done anything for 10 minutes. Force
@ -403,17 +392,17 @@ class Controller(util.LoggedClass):
stale = [] stale = []
for session in self.sessions: for session in self.sessions:
if session.is_closing(): if session.is_closing():
if session.stop <= shutdown_cutoff: if session.close_time <= shutdown_cutoff:
session.transport.abort() session.abort()
elif session.last_recv < stale_cutoff: elif session.last_recv < stale_cutoff:
self.close_session(session) self.close_session(session)
stale.append(session.id_) stale.append(session.session_id)
if stale: if stale:
self.logger.info('closing stale connections {}'.format(stale)) self.logger.info('closing stale connections {}'.format(stale))
# Consolidate small groups # Consolidate small groups
gids = [gid for gid, l in self.groups.items() if len(l) <= 4 gids = [gid for gid, l in self.groups.items() if len(l) <= 4
and sum(session.bandwidth_used for session in l) < 10000] and sum(session.bw_used for session in l) < 10000]
if len(gids) > 1: if len(gids) > 1:
sessions = sum([self.groups[gid] for gid in gids], []) sessions = sum([self.groups[gid] for gid in gids], [])
new_gid = max(gids) new_gid = max(gids)
@ -438,19 +427,15 @@ class Controller(util.LoggedClass):
'logged': len([s for s in self.sessions if s.log_me]), 'logged': len([s for s in self.sessions if s.log_me]),
'paused': sum(s.pause for s in self.sessions), 'paused': sum(s.pause for s in self.sessions),
'pid': os.getpid(), 'pid': os.getpid(),
'peers': len(self.irc.peers), 'peers': self.peers.count(),
'requests': sum(s.requests_remaining() for s in self.sessions), 'requests': sum(s.count_pending_items() for s in self.sessions),
'sessions': self.session_count(), 'sessions': self.session_count(),
'subs': self.subscription_count, 'subs': self.sub_count(),
'txs_sent': self.txs_sent, 'txs_sent': self.txs_sent,
} }
@staticmethod def sub_count(self):
def text_lines(method, data): return sum(s.sub_count() for s in self.sessions)
if method == 'sessions':
return Controller.sessions_text_lines(data)
else:
return Controller.groups_text_lines(data)
@staticmethod @staticmethod
def groups_text_lines(data): def groups_text_lines(data):
@ -482,8 +467,8 @@ class Controller(util.LoggedClass):
sessions = self.groups[gid] sessions = self.groups[gid]
result.append([gid, result.append([gid,
len(sessions), len(sessions),
sum(s.bandwidth_used for s in sessions), sum(s.bw_used for s in sessions),
sum(s.requests_remaining() for s in sessions), sum(s.count_pending_items() for s in sessions),
sum(s.txs_sent for s in sessions), sum(s.txs_sent for s in sessions),
sum(s.sub_count() for s in sessions), sum(s.sub_count() for s in sessions),
sum(s.recv_count for s in sessions), sum(s.recv_count for s in sessions),
@ -523,17 +508,17 @@ class Controller(util.LoggedClass):
def session_data(self, for_log): def session_data(self, for_log):
'''Returned to the RPC 'sessions' call.''' '''Returned to the RPC 'sessions' call.'''
now = time.time() now = time.time()
sessions = sorted(self.sessions, key=lambda s: s.start) sessions = sorted(self.sessions, key=lambda s: s.start_time)
return [(session.id_, return [(session.session_id,
session.flags(), session.flags(),
session.peername(for_log=for_log), session.peername(for_log=for_log),
session.client, session.client,
session.requests_remaining(), session.count_pending_items(),
session.txs_sent, session.txs_sent,
session.sub_count(), session.sub_count(),
session.recv_count, session.recv_size, session.recv_count, session.recv_size,
session.send_count, session.send_size, session.send_count, session.send_size,
now - session.start) now - session.start_time)
for session in sessions] for session in sessions]
def lookup_session(self, session_id): def lookup_session(self, session_id):
@ -543,7 +528,7 @@ class Controller(util.LoggedClass):
pass pass
else: else:
for session in self.sessions: for session in self.sessions:
if session.id_ == session_id: if session.session_id == session_id:
return session return session
return None return None
@ -562,42 +547,42 @@ class Controller(util.LoggedClass):
# Local RPC command handlers # Local RPC command handlers
async def rpc_disconnect(self, session_ids): def rpc_disconnect(self, session_ids):
'''Disconnect sesssions. '''Disconnect sesssions.
session_ids: array of session IDs session_ids: array of session IDs
''' '''
return self.for_each_session(session_ids, self.close_session) return self.for_each_session(session_ids, self.close_session)
async def rpc_log(self, session_ids): def rpc_log(self, session_ids):
'''Toggle logging of sesssions. '''Toggle logging of sesssions.
session_ids: array of session IDs session_ids: array of session IDs
''' '''
return self.for_each_session(session_ids, self.toggle_logging) return self.for_each_session(session_ids, self.toggle_logging)
async def rpc_stop(self): def rpc_stop(self):
'''Shut down the server cleanly.''' '''Shut down the server cleanly.'''
self.initiate_shutdown() self.initiate_shutdown()
return 'stopping' return 'stopping'
async def rpc_getinfo(self): def rpc_getinfo(self):
'''Return summary information about the server process.''' '''Return summary information about the server process.'''
return self.server_summary() return self.server_summary()
async def rpc_groups(self): def rpc_groups(self):
'''Return statistics about the session groups.''' '''Return statistics about the session groups.'''
return self.group_data() return self.group_data()
async def rpc_sessions(self): def rpc_sessions(self):
'''Return statistics about connected sessions.''' '''Return statistics about connected sessions.'''
return self.session_data(for_log=False) return self.session_data(for_log=False)
async def rpc_peers(self): def rpc_peers(self):
'''Return a list of server peers, currently taken from IRC.''' '''Return a list of server peers, currently taken from IRC.'''
return self.irc.peers return self.peers.peer_list()
async def rpc_reorg(self, count=3): def rpc_reorg(self, count=3):
'''Force a reorg of the given number of blocks. '''Force a reorg of the given number of blocks.
count: number of blocks to reorg (default 3) count: number of blocks to reorg (default 3)
@ -647,10 +632,12 @@ class Controller(util.LoggedClass):
raise RPCError('daemon error: {}'.format(e)) raise RPCError('daemon error: {}'.format(e))
async def new_subscription(self, address): async def new_subscription(self, address):
if self.subscription_count >= self.max_subs: if self.subs_room <= 0:
raise RPCError('server subscription limit {:,d} reached' self.subs_room = self.max_subs - self.sub_count()
.format(self.max_subs)) if self.subs_room <= 0:
self.subscription_count += 1 raise RPCError('server subscription limit {:,d} reached'
.format(self.max_subs))
self.subs_room -= 1
hashX = self.address_to_hashX(address) hashX = self.address_to_hashX(address)
status = await self.address_status(hashX) status = await self.address_status(hashX)
return hashX, status return hashX, status
@ -777,14 +764,14 @@ class Controller(util.LoggedClass):
'height': utxo.height, 'value': utxo.value} 'height': utxo.height, 'value': utxo.value}
for utxo in sorted(await self.get_utxos(hashX))] for utxo in sorted(await self.get_utxos(hashX))]
async def block_get_chunk(self, index): def block_get_chunk(self, index):
'''Return a chunk of block headers. '''Return a chunk of block headers.
index: the chunk index''' index: the chunk index'''
index = self.non_negative_integer(index) index = self.non_negative_integer(index)
return self.get_chunk(index) return self.get_chunk(index)
async def block_get_header(self, height): def block_get_header(self, height):
'''The deserialized header at a given height. '''The deserialized header at a given height.
height: the header's height''' height: the header's height'''
@ -877,24 +864,6 @@ class Controller(util.LoggedClass):
return banner return banner
async def donation_address(self): def donation_address(self):
'''Return the donation address as a string, empty if there is none.''' '''Return the donation address as a string, empty if there is none.'''
return self.env.donation_address return self.env.donation_address
async def peers_subscribe(self):
'''Returns the server peers as a list of (ip, host, ports) tuples.
Despite the name this is not currently treated as a subscription.'''
return list(self.irc.peers.values())
async def version(self, client_name=None, protocol_version=None):
'''Returns the server version as a string.
client_name: a string identifying the client
protocol_version: the protocol version spoken by the client
'''
if client_name:
self.client = str(client_name)[:15]
if protocol_version is not None:
self.protocol_version = protocol_version
return VERSION

View File

@ -66,7 +66,7 @@ class Env(LoggedClass):
self.report_ssl_port self.report_ssl_port
if self.report_ssl_port else if self.report_ssl_port else
self.ssl_port) self.ssl_port)
self.report_host_tor = self.default('REPORT_HOST_TOR', None) self.report_host_tor = self.default('REPORT_HOST_TOR', '')
def default(self, envvar, default): def default(self, envvar, default):
return environ.get(envvar, default) return environ.get(envvar, default)

View File

@ -12,7 +12,6 @@ Only calling start() requires the IRC Python module.
import asyncio import asyncio
import re import re
import socket
from collections import namedtuple from collections import namedtuple
@ -22,52 +21,26 @@ from lib.util import LoggedClass
class IRC(LoggedClass): class IRC(LoggedClass):
Peer = namedtuple('Peer', 'ip_addr host ports')
class DisconnectedError(Exception): class DisconnectedError(Exception):
pass pass
def __init__(self, env): def __init__(self, env, peer_mgr):
super().__init__() super().__init__()
self.env = env self.coin = env.coin
self.peer_mgr = peer_mgr
# If this isn't something a peer or client expects # If this isn't something a peer or client expects
# then you won't appear in the client's network dialog box # then you won't appear in the client's network dialog box
irc_address = (env.coin.IRC_SERVER, env.coin.IRC_PORT)
self.channel = env.coin.IRC_CHANNEL self.channel = env.coin.IRC_CHANNEL
self.prefix = env.coin.IRC_PREFIX self.prefix = env.coin.IRC_PREFIX
self.clients = []
self.nick = '{}{}'.format(self.prefix, self.nick = '{}{}'.format(self.prefix,
env.irc_nick if env.irc_nick else env.irc_nick if env.irc_nick else
double_sha256(env.report_host.encode()) double_sha256(env.report_host.encode())
[:5].hex()) [:5].hex())
self.clients.append(IrcClient(irc_address, self.nick,
env.report_host,
env.report_tcp_port,
env.report_ssl_port))
if env.report_host_tor:
self.clients.append(IrcClient(irc_address, self.nick + '_tor',
env.report_host_tor,
env.report_tcp_port_tor,
env.report_ssl_port_tor))
self.peer_regexp = re.compile('({}[^!]*)!'.format(self.prefix)) self.peer_regexp = re.compile('({}[^!]*)!'.format(self.prefix))
self.peers = {}
async def start(self): async def start(self, name_pairs):
'''Start IRC connections if enabled in environment.''' '''Start IRC connections if enabled in environment.'''
try:
if self.env.irc:
await self.join()
else:
self.logger.info('IRC is disabled')
except asyncio.CancelledError:
pass
except Exception as e:
self.logger.error(str(e))
async def join(self):
import irc.client as irc_client import irc.client as irc_client
from jaraco.stream import buffer from jaraco.stream import buffer
@ -77,21 +50,18 @@ class IRC(LoggedClass):
# Register handlers for events we're interested in # Register handlers for events we're interested in
reactor = irc_client.Reactor() reactor = irc_client.Reactor()
for event in 'welcome join quit kick whoreply disconnect'.split(): for event in 'welcome join quit whoreply disconnect'.split():
reactor.add_global_handler(event, getattr(self, 'on_' + event)) reactor.add_global_handler(event, getattr(self, 'on_' + event))
# Note: Multiple nicks in same channel will trigger duplicate events # Note: Multiple nicks in same channel will trigger duplicate events
for client in self.clients: clients = [IrcClient(self.coin, real_name, self.nick + suffix,
client.connection = reactor.server() reactor.server())
for (real_name, suffix) in name_pairs]
while True: while True:
try: try:
for client in self.clients: for client in clients:
self.logger.info('Joining IRC in {} as "{}" with ' client.connect(self)
'real name "{}"'
.format(self.channel, client.nick,
client.realname))
client.connect()
while True: while True:
reactor.process_once() reactor.process_once()
await asyncio.sleep(2) await asyncio.sleep(2)
@ -130,14 +100,7 @@ class IRC(LoggedClass):
'''Called when someone leaves our channel.''' '''Called when someone leaves our channel.'''
match = self.peer_regexp.match(event.source) match = self.peer_regexp.match(event.source)
if match: if match:
self.peers.pop(match.group(1), None) self.peer_mgr.remove_irc_peer(match.group(1))
def on_kick(self, connection, event):
'''Called when someone is kicked from our channel.'''
self.log_event(event)
match = self.peer_regexp.match(event.arguments[0])
if match:
self.peers.pop(match.group(1), None)
def on_whoreply(self, connection, event): def on_whoreply(self, connection, event):
'''Called when a response to our who requests arrives. '''Called when a response to our who requests arrives.
@ -145,50 +108,25 @@ class IRC(LoggedClass):
The nick is the 4th argument, and real name is in the 6th The nick is the 4th argument, and real name is in the 6th
argument preceeded by '0 ' for some reason. argument preceeded by '0 ' for some reason.
''' '''
try: nick = event.arguments[4]
nick = event.arguments[4] if nick.startswith(self.prefix):
if nick.startswith(self.prefix): line = event.arguments[6].split()
line = event.arguments[6].split() hostname, details = line[1], line[2:]
try: self.peer_mgr.add_irc_peer(nick, hostname, details)
ip_addr = socket.gethostbyname(line[1])
except socket.error:
# Could be .onion or IPv6.
ip_addr = line[1]
peer = self.Peer(ip_addr, line[1], line[2:])
self.peers[nick] = peer
except (IndexError, UnicodeError):
# UnicodeError comes from invalid domains (issue #68)
pass
class IrcClient(LoggedClass): class IrcClient(object):
VERSION = '1.0' def __init__(self, coin, real_name, nick, server):
DEFAULT_PORTS = {'t': 50001, 's': 50002} self.irc_host = coin.IRC_SERVER
self.irc_port = coin.IRC_PORT
def __init__(self, irc_address, nick, host, tcp_port, ssl_port):
super().__init__()
self.irc_host, self.irc_port = irc_address
self.nick = nick self.nick = nick
self.realname = self.create_realname(host, tcp_port, ssl_port) self.real_name = real_name
self.connection = None self.server = server
def connect(self, keepalive=60): def connect(self, irc):
'''Connect this client to its IRC server''' '''Connect this client to its IRC server'''
self.connection.connect(self.irc_host, self.irc_port, self.nick, irc.logger.info('joining {} as "{}" with real name "{}"'
ircname=self.realname) .format(irc.channel, self.nick, self.real_name))
self.connection.set_keepalive(keepalive) self.server.connect(self.irc_host, self.irc_port, self.nick,
ircname=self.real_name)
@classmethod
def create_realname(cls, host, tcp_port, ssl_port):
def port_text(letter, port):
if not port:
return ''
if port == cls.DEFAULT_PORTS.get(letter):
return ' ' + letter
else:
return ' ' + letter + str(port)
tcp = port_text('t', tcp_port)
ssl = port_text('s', ssl_port)
return '{} v{}{}{}'.format(host, cls.VERSION, tcp, ssl)

139
server/peers.py Normal file
View File

@ -0,0 +1,139 @@
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Peer management.'''
import asyncio
import socket
import traceback
from collections import namedtuple
from functools import partial
import lib.util as util
from server.irc import IRC
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
IRCPeer = namedtuple('IRCPeer', 'ip_addr host details')
class PeerManager(util.LoggedClass):
'''Looks after the DB of peer network servers.
Attempts to maintain a connection with up to 8 peers.
Issues a 'peers.subscribe' RPC to them and tells them our data.
'''
VERSION = '1.0'
DEFAULT_PORTS = {'t': 50001, 's': 50002}
def __init__(self, env):
super().__init__()
self.env = env
self.loop = asyncio.get_event_loop()
self.irc = IRC(env, self)
self.futures = set()
self.identities = []
# Keyed by nick
self.irc_peers = {}
# We can have a Tor identity inaddition to a normal one
self.identities.append(NetIdentity(env.report_host,
env.report_tcp_port,
env.report_ssl_port,
''))
if env.report_host_tor.endswith('.onion'):
self.identities.append(NetIdentity(env.report_host_tor,
env.report_tcp_port_tor,
env.report_ssl_port_tor,
'_tor'))
async def executor(self, func, *args, **kwargs):
'''Run func taking args in the executor.'''
await self.loop.run_in_executor(None, partial(func, *args, **kwargs))
@classmethod
def real_name(cls, identity):
'''Real name as used on IRC.'''
def port_text(letter, port):
if not port:
return ''
if port == cls.DEFAULT_PORTS.get(letter):
return ' ' + letter
else:
return ' ' + letter + str(port)
tcp = port_text('t', identity.tcp_port)
ssl = port_text('s', identity.ssl_port)
return '{} v{}{}{}'.format(identity.host, cls.VERSION, tcp, ssl)
def ensure_future(self, coro):
'''Convert a coro into a future and add it to our pending list
to be waited for.'''
self.futures.add(asyncio.ensure_future(coro))
def start_irc(self):
'''Start up the IRC connections if enabled.'''
if self.env.irc:
name_pairs = [(self.real_name(identity), identity.nick_suffix)
for identity in self.identities]
self.ensure_future(self.irc.start(name_pairs))
else:
self.logger.info('IRC is disabled')
async def main_loop(self):
'''Start and then enter the main loop.'''
self.start_irc()
try:
while True:
await asyncio.sleep(10)
done = [future for future in self.futures if future.done()]
self.futures.difference_update(done)
for future in done:
try:
future.result()
except:
self.log_error(traceback.format_exc())
finally:
for future in self.futures:
future.cancel()
def dns_lookup_peer(self, nick, hostname, details):
try:
ip_addr = None
try:
ip_addr = socket.gethostbyname(hostname)
except socket.error:
pass # IPv6?
ip_addr = ip_addr or hostname
self.irc_peers[nick] = IRCPeer(ip_addr, hostname, details)
self.logger.info('new IRC peer {} at {} ({})'
.format(nick, hostname, details))
except UnicodeError:
# UnicodeError comes from invalid domains (issue #68)
self.logger.info('IRC peer domain {} invalid'.format(hostname))
def add_irc_peer(self, *args):
'''Schedule DNS lookup of peer.'''
self.ensure_future(self.executor(self.dns_lookup_peer, *args))
def remove_irc_peer(self, nick):
'''Remove a peer from our IRC peers map.'''
self.logger.info('removing IRC peer {}'.format(nick))
self.irc_peers.pop(nick, None)
def count(self):
return len(self.irc_peers)
def peer_list(self):
return self.irc_peers
def subscribe(self):
'''Returns the server peers as a list of (ip, host, details) tuples.
Despite the name this is not currently treated as a subscription.'''
return list(self.irc_peers.values())

View File

@ -9,38 +9,61 @@
import asyncio import asyncio
import time
import traceback import traceback
from functools import partial
from lib.jsonrpc import JSONRPC, RPCError from lib.jsonrpc import JSONSession, RPCError
from server.daemon import DaemonError from server.daemon import DaemonError
from server.version import VERSION
class Session(JSONRPC): class SessionBase(JSONSession):
'''Base class of ElectrumX JSON session protocols. '''Base class of ElectrumX JSON sessions.
Each session runs its tasks in asynchronous parallelism with other Each session runs its tasks in asynchronous parallelism with other
sessions. To prevent some sessions blocking others, potentially sessions.
long-running requests should yield.
''' '''
def __init__(self, controller, bp, env, kind): def __init__(self, controller, kind):
super().__init__() super().__init__()
self.kind = kind # 'RPC', 'TCP' etc.
self.controller = controller self.controller = controller
self.bp = bp self.bp = controller.bp
self.env = env self.env = controller.env
self.daemon = bp.daemon self.daemon = self.bp.daemon
self.kind = kind
self.client = 'unknown' self.client = 'unknown'
self.anon_logs = env.anon_logs self.anon_logs = self.env.anon_logs
self.max_send = env.max_send
self.bandwidth_limit = env.bandwidth_limit
self.last_delay = 0 self.last_delay = 0
self.txs_sent = 0 self.txs_sent = 0
self.requests = [] self.requests = []
self.start_time = time.time()
self.close_time = 0
self.bw_time = self.start_time
self.bw_interval = 3600
self.bw_used = 0
def is_closing(self): def have_pending_items(self):
'''True if this session is closing.''' '''Called each time the pending item queue goes from empty to having
return self.transport and self.transport.is_closing() one item.'''
self.controller.enqueue_session(self)
def close_connection(self):
'''Call this to close the connection.'''
self.close_time = time.time()
super().close_connection()
def peername(self, *, for_log=True):
'''Return the peer name of this connection.'''
peer_info = self.peer_info()
if not peer_info:
return 'unknown'
if for_log and self.anon_logs:
return 'xx.xx.xx.xx:xx'
if ':' in peer_info[0]:
return '[{}]:{}'.format(peer_info[0], peer_info[1])
else:
return '{}:{}'.format(peer_info[0], peer_info[1])
def flags(self): def flags(self):
'''Status flags.''' '''Status flags.'''
@ -52,42 +75,6 @@ class Session(JSONRPC):
status += str(self.controller.session_priority(self)) status += str(self.controller.session_priority(self))
return status return status
def requests_remaining(self):
return sum(request.remaining for request in self.requests)
def enqueue_request(self, request):
'''Add a request to the session's list.'''
self.requests.append(request)
if len(self.requests) == 1:
self.controller.enqueue_session(self)
async def serve_requests(self):
'''Serve requests in batches.'''
total = 0
errs = []
# Process 8 items at a time
for request in self.requests:
try:
initial = request.remaining
await request.process(self)
total += initial - request.remaining
except asyncio.CancelledError:
raise
except Exception:
# Should probably be considered a bug and fixed
self.log_error('error handling request {}'.format(request))
traceback.print_exc()
errs.append(request)
await asyncio.sleep(0)
if total >= 8:
break
# Remove completed requests and re-enqueue ourself if any remain.
self.requests = [req for req in self.requests
if req.remaining and not req in errs]
if self.requests:
self.controller.enqueue_session(self)
def connection_made(self, transport): def connection_made(self, transport):
'''Handle an incoming client connection.''' '''Handle an incoming client connection.'''
super().connection_made(transport) super().connection_made(transport)
@ -95,27 +82,32 @@ class Session(JSONRPC):
def connection_lost(self, exc): def connection_lost(self, exc):
'''Handle client disconnection.''' '''Handle client disconnection.'''
super().connection_lost(exc) msg = ''
if (self.pause or self.controller.is_deprioritized(self) if self.pause:
or self.send_size >= 1024*1024 or self.error_count): msg += ' whilst paused'
self.log_info('disconnected. Sent {:,d} bytes in {:,d} messages ' if self.controller.is_deprioritized(self):
'{:,d} errors' msg += ' whilst deprioritized'
.format(self.send_size, self.send_count, if self.send_size >= 1024*1024:
self.error_count)) msg += ('. Sent {:,d} bytes in {:,d} messages'
.format(self.send_size, self.send_count))
if msg:
msg = 'disconnected' + msg
self.log_info(msg)
self.controller.remove_session(self) self.controller.remove_session(self)
def sub_count(self): def sub_count(self):
return 0 return 0
class ElectrumX(Session): class ElectrumX(SessionBase):
'''A TCP server that handles incoming Electrum connections.''' '''A TCP server that handles incoming Electrum connections.'''
def __init__(self, *args): def __init__(self, *args, **kwargs):
super().__init__(*args) super().__init__(*args, **kwargs)
self.subscribe_headers = False self.subscribe_headers = False
self.subscribe_height = False self.subscribe_height = False
self.notified_height = None self.notified_height = None
self.max_send = self.env.max_send
self.max_subs = self.env.max_session_subs self.max_subs = self.env.max_session_subs
self.hashX_subs = {} self.hashX_subs = {}
self.electrumx_handlers = { self.electrumx_handlers = {
@ -123,6 +115,7 @@ class ElectrumX(Session):
'blockchain.headers.subscribe': self.headers_subscribe, 'blockchain.headers.subscribe': self.headers_subscribe,
'blockchain.numblocks.subscribe': self.numblocks_subscribe, 'blockchain.numblocks.subscribe': self.numblocks_subscribe,
'blockchain.transaction.broadcast': self.transaction_broadcast, 'blockchain.transaction.broadcast': self.transaction_broadcast,
'server.version': self.server_version,
} }
def sub_count(self): def sub_count(self):
@ -133,32 +126,29 @@ class ElectrumX(Session):
Cache is a shared cache for this update. Cache is a shared cache for this update.
''' '''
controller = self.controller
pairs = []
if height != self.notified_height: if height != self.notified_height:
self.notified_height = height self.notified_height = height
if self.subscribe_headers: if self.subscribe_headers:
payload = self.notification_payload( args = (controller.electrum_header(height), )
'blockchain.headers.subscribe', pairs.append(('blockchain.headers.subscribe', args))
(self.controller.electrum_header(height), ),
)
self.encode_and_send_payload(payload)
if self.subscribe_height: if self.subscribe_height:
payload = self.notification_payload( pairs.append(('blockchain.numblocks.subscribe', (height, )))
'blockchain.numblocks.subscribe',
(height, ),
)
self.encode_and_send_payload(payload)
matches = touched.intersection(self.hashX_subs) matches = touched.intersection(self.hashX_subs)
for hashX in matches: for hashX in matches:
address = self.hashX_subs[hashX] address = self.hashX_subs[hashX]
status = await self.controller.address_status(hashX) status = await controller.address_status(hashX)
payload = self.notification_payload( pairs.append(('blockchain.address.subscribe', (address, status)))
'blockchain.address.subscribe', (address, status))
self.encode_and_send_payload(payload)
self.send_notifications(pairs)
if matches: if matches:
self.log_info('notified of {:,d} addresses'.format(len(matches))) es = '' if len(matches) == 1 else 'es'
self.log_info('notified of {:,d} address{}'
.format(len(matches), es))
def height(self): def height(self):
'''Return the current flushed database height.''' '''Return the current flushed database height.'''
@ -168,12 +158,12 @@ class ElectrumX(Session):
'''Used as response to a headers subscription request.''' '''Used as response to a headers subscription request.'''
return self.controller.electrum_header(self.height()) return self.controller.electrum_header(self.height())
async def headers_subscribe(self): def headers_subscribe(self):
'''Subscribe to get headers of new blocks.''' '''Subscribe to get headers of new blocks.'''
self.subscribe_headers = True self.subscribe_headers = True
return self.current_electrum_header() return self.current_electrum_header()
async def numblocks_subscribe(self): def numblocks_subscribe(self):
'''Subscribe to get height of new blocks.''' '''Subscribe to get height of new blocks.'''
self.subscribe_height = True self.subscribe_height = True
return self.height() return self.height()
@ -191,6 +181,18 @@ class ElectrumX(Session):
self.hashX_subs[hashX] = address self.hashX_subs[hashX] = address
return status return status
def server_version(self, client_name=None, protocol_version=None):
'''Returns the server version as a string.
client_name: a string identifying the client
protocol_version: the protocol version spoken by the client
'''
if client_name:
self.client = str(client_name)[:15]
if protocol_version is not None:
self.protocol_version = protocol_version
return VERSION
async def transaction_broadcast(self, raw_tx): async def transaction_broadcast(self, raw_tx):
'''Broadcast a raw transaction to the network. '''Broadcast a raw transaction to the network.
@ -230,13 +232,13 @@ class ElectrumX(Session):
return handler return handler
class LocalRPC(Session): class LocalRPC(SessionBase):
'''A local TCP RPC server for querying status.''' '''A local TCP RPC server session.'''
def __init__(self, *args): def __init__(self, *args, **kwargs):
super().__init__(*args) super().__init__(*args, **kwargs)
self.client = 'RPC' self.client = 'RPC'
self.max_send = 5000000 self.max_send = 0
def request_handler(self, method): def request_handler(self, method):
'''Return the async handler for the given request method.''' '''Return the async handler for the given request method.'''

View File

@ -10,11 +10,11 @@
import os import os
from functools import partial from functools import partial
from lib.util import subclasses, increment_byte_string import lib.util as util
def db_class(name): def db_class(name):
'''Returns a DB engine class.''' '''Returns a DB engine class.'''
for db_class in subclasses(Storage): for db_class in util.subclasses(Storage):
if db_class.__name__.lower() == name.lower(): if db_class.__name__.lower() == name.lower():
db_class.import_module() db_class.import_module()
return db_class return db_class

View File

@ -1 +1 @@
VERSION = "ElectrumX 0.10.7" VERSION = "ElectrumX 0.10.11"

View File

@ -56,4 +56,4 @@ def test_chunks():
def test_increment_byte_string(): def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2' assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02' assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00' assert util.increment_byte_string(b'\xff\xff') == None