Merge branch 'develop'
This commit is contained in:
commit
491080f4c2
73
README.rst
73
README.rst
@ -115,8 +115,6 @@ Roadmap Pre-1.0
|
||||
|
||||
- minor code cleanups.
|
||||
- implement simple protocol to discover peers without resorting to IRC.
|
||||
This may slip to post 1.0
|
||||
|
||||
|
||||
Roadmap Post-1.0
|
||||
================
|
||||
@ -137,6 +135,33 @@ version prior to the release of 1.0.
|
||||
ChangeLog
|
||||
=========
|
||||
|
||||
Version 0.10.11
|
||||
---------------
|
||||
|
||||
* rewrite of JSON RPC layer to improve usability for clients.
|
||||
Includes support of JSON RPC v1, v2 and a compat layer that tries to
|
||||
detect the peer's version.
|
||||
|
||||
Version 0.10.10
|
||||
---------------
|
||||
|
||||
* move peer management from irc.py to peers.py. This is preparataion
|
||||
for peer discovery without IRC.
|
||||
* misc cleanups
|
||||
* fix Litecoin genesis hash (petrkr)
|
||||
|
||||
Version 0.10.9
|
||||
--------------
|
||||
|
||||
* restore client to sessions output
|
||||
* cleanup shutdown process; hopefully this resolves the log spew for good
|
||||
|
||||
Version 0.10.8
|
||||
--------------
|
||||
|
||||
* fix import for reverse iterator for RocksDB
|
||||
* fix tests
|
||||
|
||||
Version 0.10.7
|
||||
--------------
|
||||
|
||||
@ -220,46 +245,6 @@ variables to use roughly the same amount of memory.
|
||||
For now this code should be considered experimental; if you want
|
||||
stability please stick with the 0.9 series.
|
||||
|
||||
Version 0.9.23
|
||||
--------------
|
||||
|
||||
* Backport of the fix for issue `#94#` - stale references to old
|
||||
sessions. This would effectively memory and network handles.
|
||||
|
||||
Version 0.9.22
|
||||
--------------
|
||||
|
||||
* documentation updates (ARCHITECTURE.rst, ENVIRONMENT.rst) only.
|
||||
|
||||
Version 0.9.21
|
||||
--------------
|
||||
|
||||
* moved RELEASE-NOTES into this README
|
||||
* document the RPC interface in docs/RPC-INTERFACE.rst
|
||||
* clean up open DB handling, issue `#89`_
|
||||
|
||||
Version 0.9.20
|
||||
--------------
|
||||
|
||||
* fix for IRC flood issue `#93`_
|
||||
|
||||
Version 0.9.19
|
||||
--------------
|
||||
|
||||
* move sleep outside semaphore (issue `#88`_)
|
||||
|
||||
Version 0.9.18
|
||||
--------------
|
||||
|
||||
* last release of 2016. Just a couple of minor tweaks to logging.
|
||||
|
||||
Version 0.9.17
|
||||
--------------
|
||||
|
||||
* have all the DBs use fsync on write; hopefully means DB won't corrupt in
|
||||
case of a kernel panic (issue `#75`_)
|
||||
* replace $DONATION_ADDRESS in banner file
|
||||
|
||||
|
||||
**Neil Booth** kyuupichan@gmail.com https://github.com/kyuupichan
|
||||
|
||||
@ -267,11 +252,7 @@ Version 0.9.17
|
||||
|
||||
|
||||
.. _#72: https://github.com/kyuupichan/electrumx/issues/72
|
||||
.. _#75: https://github.com/kyuupichan/electrumx/issues/75
|
||||
.. _#88: https://github.com/kyuupichan/electrumx/issues/88
|
||||
.. _#89: https://github.com/kyuupichan/electrumx/issues/89
|
||||
.. _#92: https://github.com/kyuupichan/electrumx/issues/92
|
||||
.. _#93: https://github.com/kyuupichan/electrumx/issues/93
|
||||
.. _#94: https://github.com/kyuupichan/electrumx/issues/94
|
||||
.. _#99: https://github.com/kyuupichan/electrumx/issues/99
|
||||
.. _#100: https://github.com/kyuupichan/electrumx/issues/100
|
||||
|
||||
@ -36,7 +36,7 @@ Not started until the Block Processor has caught up with bitcoind.
|
||||
Daemon
|
||||
------
|
||||
|
||||
Encapsulates the RPC wire protcol with bitcoind for the whole server.
|
||||
Encapsulates the RPC wire protocol with bitcoind for the whole server.
|
||||
Transparently handles temporary bitcoind connection errors, and fails
|
||||
over if necessary.
|
||||
|
||||
|
||||
@ -205,7 +205,8 @@ below are low and encourage you to raise them.
|
||||
An integer number of seconds defaulting to 600. Sessions with no
|
||||
activity for longer than this are disconnected. Properly
|
||||
functioning Electrum clients by default will send pings roughly
|
||||
every 60 seconds.
|
||||
every 60 seconds, and servers doing peer discovery roughly every 300
|
||||
seconds.
|
||||
|
||||
IRC
|
||||
---
|
||||
@ -239,8 +240,9 @@ connectivity on IRC:
|
||||
|
||||
* **REPORT_HOST_TOR**
|
||||
|
||||
The tor .onion address to advertise. If set, an additional
|
||||
connection to IRC happens with '_tor" appended to **IRC_NICK**.
|
||||
The tor address to advertise; must end with `.onion`. If set, an
|
||||
additional connection to IRC happens with '_tor' appended to
|
||||
**IRC_NICK**.
|
||||
|
||||
* **REPORT_TCP_PORT_TOR**
|
||||
|
||||
|
||||
@ -16,45 +16,60 @@ import json
|
||||
from functools import partial
|
||||
from os import environ
|
||||
|
||||
from lib.jsonrpc import JSONRPC
|
||||
from lib.jsonrpc import JSONSession, JSONRPCv2
|
||||
from server.controller import Controller
|
||||
|
||||
|
||||
class RPCClient(JSONRPC):
|
||||
class RPCClient(JSONSession):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.queue = asyncio.Queue()
|
||||
self.max_send = 1000000
|
||||
super().__init__(version=JSONRPCv2)
|
||||
self.max_send = 0
|
||||
self.max_buffer_size = 5*10**6
|
||||
self.event = asyncio.Event()
|
||||
|
||||
def enqueue_request(self, request):
|
||||
self.queue.put_nowait(request)
|
||||
def have_pending_items(self):
|
||||
self.event.set()
|
||||
|
||||
async def send_and_wait(self, method, params, timeout=None):
|
||||
# Raise incoming buffer size - presumably connection is trusted
|
||||
self.max_buffer_size = 5000000
|
||||
if params:
|
||||
params = [params]
|
||||
payload = self.request_payload(method, id_=method, params=params)
|
||||
self.encode_and_send_payload(payload)
|
||||
async def wait_for_response(self):
|
||||
await self.event.wait()
|
||||
await self.process_pending_items()
|
||||
|
||||
future = asyncio.ensure_future(self.queue.get())
|
||||
for f in asyncio.as_completed([future], timeout=timeout):
|
||||
try:
|
||||
request = await f
|
||||
except asyncio.TimeoutError:
|
||||
future.cancel()
|
||||
print('request timed out after {}s'.format(timeout))
|
||||
def send_rpc_request(self, method, params):
|
||||
handler = partial(self.handle_response, method)
|
||||
self.send_request(handler, method, params)
|
||||
|
||||
def handle_response(self, method, result, error):
|
||||
if method in ('groups', 'sessions') and not error:
|
||||
if method == 'groups':
|
||||
lines = Controller.groups_text_lines(result)
|
||||
else:
|
||||
await request.process(self)
|
||||
|
||||
async def handle_response(self, result, error, method):
|
||||
if result and method in ('groups', 'sessions'):
|
||||
for line in Controller.text_lines(method, result):
|
||||
lines = Controller.sessions_text_lines(result)
|
||||
for line in lines:
|
||||
print(line)
|
||||
elif error:
|
||||
print('error: {} (code {:d})'
|
||||
.format(error['message'], error['code']))
|
||||
else:
|
||||
value = {'error': error} if error else result
|
||||
print(json.dumps(value, indent=4, sort_keys=True))
|
||||
print(json.dumps(result, indent=4, sort_keys=True))
|
||||
|
||||
|
||||
def rpc_send_and_wait(port, method, params, timeout=15):
|
||||
loop = asyncio.get_event_loop()
|
||||
coro = loop.create_connection(RPCClient, 'localhost', port)
|
||||
try:
|
||||
transport, rpc_client = loop.run_until_complete(coro)
|
||||
rpc_client.send_rpc_request(method, params)
|
||||
try:
|
||||
coro = rpc_client.wait_for_response()
|
||||
loop.run_until_complete(asyncio.wait_for(coro, timeout))
|
||||
except asyncio.TimeoutError:
|
||||
print('request timed out after {}s'.format(timeout))
|
||||
except OSError:
|
||||
print('cannot connect - is ElectrumX catching up, not running, or '
|
||||
'is {:d} the wrong RPC port?'.format(port))
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
def main():
|
||||
@ -68,19 +83,17 @@ def main():
|
||||
help='params to send')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.port is None:
|
||||
args.port = int(environ.get('RPC_PORT', 8000))
|
||||
port = args.port
|
||||
if port is None:
|
||||
port = int(environ.get('RPC_PORT', 8000))
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
coro = loop.create_connection(RPCClient, 'localhost', args.port)
|
||||
try:
|
||||
transport, protocol = loop.run_until_complete(coro)
|
||||
coro = protocol.send_and_wait(args.command[0], args.param, timeout=15)
|
||||
loop.run_until_complete(coro)
|
||||
except OSError:
|
||||
print('error connecting - is ElectrumX catching up or not running?')
|
||||
finally:
|
||||
loop.close()
|
||||
# Get the RPC request.
|
||||
method = args.command[0]
|
||||
params = args.param
|
||||
if method in ('log', 'disconnect'):
|
||||
params = [params]
|
||||
|
||||
rpc_send_and_wait(port, method, params)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@ -318,8 +318,8 @@ class Litecoin(Coin):
|
||||
P2PKH_VERBYTE = 0x30
|
||||
P2SH_VERBYTE = 0x05
|
||||
WIF_BYTE = 0xb0
|
||||
GENESIS_HASH=('000000000019d6689c085ae165831e93'
|
||||
'4ff763ae46a2a6c172b3f1b60a8ce26f')
|
||||
GENESIS_HASH=('12a765e31ffd4059bada1e25190f6e98'
|
||||
'c99d9714d334efa41a195a7e7e04bfe2')
|
||||
TX_COUNT = 8908766
|
||||
TX_COUNT_HEIGHT = 1105256
|
||||
TX_PER_BLOCK = 10
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016, Neil Booth
|
||||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
877
lib/jsonrpc.py
877
lib/jsonrpc.py
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016, Neil Booth
|
||||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016, Neil Booth
|
||||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016, Neil Booth
|
||||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
@ -196,11 +196,14 @@ class BlockProcessor(server.db.DB):
|
||||
task = await self.task_queue.get()
|
||||
await task()
|
||||
|
||||
def shutdown(self):
|
||||
def shutdown(self, executor):
|
||||
'''Shutdown cleanly and flush to disk.'''
|
||||
# First stut down the executor; it may be processing a block.
|
||||
# Then we can flush anything remaining to disk.
|
||||
executor.shutdown()
|
||||
if self.height != self.db_height:
|
||||
self.logger.info('flushing state to DB for a clean shutdown...')
|
||||
self.flush(True)
|
||||
self.logger.info('shutdown complete')
|
||||
|
||||
async def executor(self, func, *args, **kwargs):
|
||||
'''Run func taking args in the executor.'''
|
||||
|
||||
@ -18,14 +18,14 @@ from functools import partial
|
||||
|
||||
import pylru
|
||||
|
||||
from lib.jsonrpc import JSONRPC, RPCError, RequestBase
|
||||
from lib.jsonrpc import JSONRPC, RPCError
|
||||
from lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash
|
||||
import lib.util as util
|
||||
from server.block_processor import BlockProcessor
|
||||
from server.daemon import Daemon, DaemonError
|
||||
from server.irc import IRC
|
||||
from server.session import LocalRPC, ElectrumX
|
||||
from server.mempool import MemPool
|
||||
from server.peers import PeerManager
|
||||
from server.session import LocalRPC, ElectrumX
|
||||
from server.version import VERSION
|
||||
|
||||
|
||||
@ -39,16 +39,6 @@ class Controller(util.LoggedClass):
|
||||
BANDS = 5
|
||||
CATCHING_UP, LISTENING, PAUSED, SHUTTING_DOWN = range(4)
|
||||
|
||||
class NotificationRequest(RequestBase):
|
||||
def __init__(self, height, touched):
|
||||
super().__init__(1)
|
||||
self.height = height
|
||||
self.touched = touched
|
||||
|
||||
async def process(self, session):
|
||||
self.remaining = 0
|
||||
await session.notify(self.height, self.touched)
|
||||
|
||||
def __init__(self, env):
|
||||
super().__init__()
|
||||
# Set this event to cleanly shutdown
|
||||
@ -56,12 +46,12 @@ class Controller(util.LoggedClass):
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.executor = ThreadPoolExecutor()
|
||||
self.loop.set_default_executor(self.executor)
|
||||
self.start = time.time()
|
||||
self.start_time = time.time()
|
||||
self.coin = env.coin
|
||||
self.daemon = Daemon(env.coin.daemon_urls(env.daemon_url))
|
||||
self.bp = BlockProcessor(env, self.daemon)
|
||||
self.mempool = MemPool(self.bp)
|
||||
self.irc = IRC(env)
|
||||
self.peers = PeerManager(env)
|
||||
self.env = env
|
||||
self.servers = {}
|
||||
# Map of session to the key of its list in self.groups
|
||||
@ -73,7 +63,8 @@ class Controller(util.LoggedClass):
|
||||
self.max_sessions = env.max_sessions
|
||||
self.low_watermark = self.max_sessions * 19 // 20
|
||||
self.max_subs = env.max_subs
|
||||
self.subscription_count = 0
|
||||
# Cache some idea of room to avoid recounting on each subscription
|
||||
self.subs_room = 0
|
||||
self.next_stale_check = 0
|
||||
self.history_cache = pylru.lrucache(256)
|
||||
self.header_cache = pylru.lrucache(8)
|
||||
@ -95,12 +86,14 @@ class Controller(util.LoggedClass):
|
||||
'block.get_header block.get_chunk estimatefee relayfee '
|
||||
'transaction.get transaction.get_merkle utxo.get_address'),
|
||||
('server',
|
||||
'banner donation_address peers.subscribe version'),
|
||||
'banner donation_address'),
|
||||
]
|
||||
self.electrumx_handlers = {'.'.join([prefix, suffix]):
|
||||
getattr(self, suffix.replace('.', '_'))
|
||||
for prefix, suffixes in rpcs
|
||||
for suffix in suffixes.split()}
|
||||
handlers = {'.'.join([prefix, suffix]):
|
||||
getattr(self, suffix.replace('.', '_'))
|
||||
for prefix, suffixes in rpcs
|
||||
for suffix in suffixes.split()}
|
||||
handlers['server.peers.subscribe'] = self.peers.subscribe
|
||||
self.electrumx_handlers = handlers
|
||||
|
||||
async def mempool_transactions(self, hashX):
|
||||
'''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
|
||||
@ -138,9 +131,9 @@ class Controller(util.LoggedClass):
|
||||
if isinstance(session, LocalRPC):
|
||||
return 0
|
||||
gid = self.sessions[session]
|
||||
group_bandwidth = sum(s.bandwidth_used for s in self.groups[gid])
|
||||
return 1 + (bisect_left(self.bands, session.bandwidth_used)
|
||||
+ bisect_left(self.bands, group_bandwidth)) // 2
|
||||
group_bw = sum(session.bw_used for session in self.groups[gid])
|
||||
return 1 + (bisect_left(self.bands, session.bw_used)
|
||||
+ bisect_left(self.bands, group_bw)) // 2
|
||||
|
||||
def is_deprioritized(self, session):
|
||||
return self.session_priority(session) > self.BANDS
|
||||
@ -163,6 +156,15 @@ class Controller(util.LoggedClass):
|
||||
and self.state == self.PAUSED):
|
||||
await self.start_external_servers()
|
||||
|
||||
# Periodically log sessions
|
||||
if self.env.log_sessions and time.time() > self.next_log_sessions:
|
||||
if self.next_log_sessions:
|
||||
data = self.session_data(for_log=True)
|
||||
for line in Controller.sessions_text_lines(data):
|
||||
self.logger.info(line)
|
||||
self.logger.info(json.dumps(self.server_summary()))
|
||||
self.next_log_sessions = time.time() + self.env.log_sessions
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def enqueue_session(self, session):
|
||||
@ -192,7 +194,10 @@ class Controller(util.LoggedClass):
|
||||
while True:
|
||||
priority_, id_, session = await self.queue.get()
|
||||
if session in self.sessions:
|
||||
await session.serve_requests()
|
||||
await session.process_pending_items()
|
||||
# Re-enqueue the session if stuff is left
|
||||
if session.items:
|
||||
self.enqueue_session(session)
|
||||
|
||||
def initiate_shutdown(self):
|
||||
'''Call this function to start the shutdown process.'''
|
||||
@ -206,11 +211,11 @@ class Controller(util.LoggedClass):
|
||||
async def await_bp_catchup():
|
||||
'''Wait for the block processor to catch up.
|
||||
|
||||
When it has, start the servers and connect to IRC.
|
||||
Then start the servers and the peer manager.
|
||||
'''
|
||||
await self.bp.caught_up_event.wait()
|
||||
self.logger.info('block processor has caught up')
|
||||
add_future(self.irc.start())
|
||||
add_future(self.peers.main_loop())
|
||||
add_future(self.start_servers())
|
||||
add_future(self.mempool.main_loop())
|
||||
add_future(self.enqueue_delayed_sessions())
|
||||
@ -225,7 +230,13 @@ class Controller(util.LoggedClass):
|
||||
|
||||
# Perform a clean shutdown when this event is signalled.
|
||||
await self.shutdown_event.wait()
|
||||
self.logger.info('shutting down gracefully')
|
||||
|
||||
self.logger.info('shutting down')
|
||||
await self.shutdown(futures)
|
||||
self.logger.info('shutdown complete')
|
||||
|
||||
async def shutdown(self, futures):
|
||||
'''Perform the shutdown sequence.'''
|
||||
self.state = self.SHUTTING_DOWN
|
||||
|
||||
# Close servers and sessions
|
||||
@ -237,11 +248,12 @@ class Controller(util.LoggedClass):
|
||||
for future in futures:
|
||||
future.cancel()
|
||||
|
||||
await asyncio.wait(futures)
|
||||
# Wait for all futures to finish
|
||||
while any(not future.done() for future in futures):
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Wait for the executor to finish anything it's doing
|
||||
self.executor.shutdown()
|
||||
self.bp.shutdown()
|
||||
# Finally shut down the block processor and executor
|
||||
self.bp.shutdown(self.executor)
|
||||
|
||||
def close_servers(self, kinds):
|
||||
'''Close the servers of the given kinds (TCP etc.).'''
|
||||
@ -253,22 +265,10 @@ class Controller(util.LoggedClass):
|
||||
if server:
|
||||
server.close()
|
||||
|
||||
async def wait_for_sessions(self, secs=30):
|
||||
if not self.sessions:
|
||||
return
|
||||
self.logger.info('waiting up to {:d} seconds for socket cleanup'
|
||||
.format(secs))
|
||||
limit = time.time() + secs
|
||||
while self.sessions and time.time() < limit:
|
||||
self.clear_stale_sessions(grace=secs//2)
|
||||
await asyncio.sleep(2)
|
||||
self.logger.info('{:,d} sessions remaining'
|
||||
.format(len(self.sessions)))
|
||||
|
||||
async def start_server(self, kind, *args, **kw_args):
|
||||
protocol_class = LocalRPC if kind == 'RPC' else ElectrumX
|
||||
protocol = partial(protocol_class, self, self.bp, self.env, kind)
|
||||
server = self.loop.create_server(protocol, *args, **kw_args)
|
||||
protocol_factory = partial(protocol_class, self, kind)
|
||||
server = self.loop.create_server(protocol_factory, *args, **kw_args)
|
||||
|
||||
host, port = args[:2]
|
||||
try:
|
||||
@ -331,17 +331,7 @@ class Controller(util.LoggedClass):
|
||||
|
||||
for session in self.sessions:
|
||||
if isinstance(session, ElectrumX):
|
||||
request = self.NotificationRequest(self.bp.db_height,
|
||||
touched)
|
||||
session.enqueue_request(request)
|
||||
# Periodically log sessions
|
||||
if self.env.log_sessions and time.time() > self.next_log_sessions:
|
||||
if self.next_log_sessions:
|
||||
data = self.session_data(for_log=True)
|
||||
for line in Controller.sessions_text_lines(data):
|
||||
self.logger.info(line)
|
||||
self.logger.info(json.dumps(self.server_summary()))
|
||||
self.next_log_sessions = time.time() + self.env.log_sessions
|
||||
await session.notify(self.bp.db_height, touched)
|
||||
|
||||
def electrum_header(self, height):
|
||||
'''Return the binary header at the given height.'''
|
||||
@ -359,7 +349,7 @@ class Controller(util.LoggedClass):
|
||||
if now > self.next_stale_check:
|
||||
self.next_stale_check = now + 300
|
||||
self.clear_stale_sessions()
|
||||
gid = int(session.start - self.start) // 900
|
||||
gid = int(session.start_time - self.start_time) // 900
|
||||
self.groups[gid].append(session)
|
||||
self.sessions[session] = gid
|
||||
session.log_info('{} {}, {:,d} total'
|
||||
@ -379,17 +369,16 @@ class Controller(util.LoggedClass):
|
||||
gid = self.sessions.pop(session)
|
||||
assert gid in self.groups
|
||||
self.groups[gid].remove(session)
|
||||
self.subscription_count -= session.sub_count()
|
||||
|
||||
def close_session(self, session):
|
||||
'''Close the session's transport and cancel its future.'''
|
||||
session.close_connection()
|
||||
return 'disconnected {:d}'.format(session.id_)
|
||||
return 'disconnected {:d}'.format(session.session_id)
|
||||
|
||||
def toggle_logging(self, session):
|
||||
'''Toggle logging of the session.'''
|
||||
session.log_me = not session.log_me
|
||||
return 'log {:d}: {}'.format(session.id_, session.log_me)
|
||||
return 'log {:d}: {}'.format(session.session_id, session.log_me)
|
||||
|
||||
def clear_stale_sessions(self, grace=15):
|
||||
'''Cut off sessions that haven't done anything for 10 minutes. Force
|
||||
@ -403,17 +392,17 @@ class Controller(util.LoggedClass):
|
||||
stale = []
|
||||
for session in self.sessions:
|
||||
if session.is_closing():
|
||||
if session.stop <= shutdown_cutoff:
|
||||
session.transport.abort()
|
||||
if session.close_time <= shutdown_cutoff:
|
||||
session.abort()
|
||||
elif session.last_recv < stale_cutoff:
|
||||
self.close_session(session)
|
||||
stale.append(session.id_)
|
||||
stale.append(session.session_id)
|
||||
if stale:
|
||||
self.logger.info('closing stale connections {}'.format(stale))
|
||||
|
||||
# Consolidate small groups
|
||||
gids = [gid for gid, l in self.groups.items() if len(l) <= 4
|
||||
and sum(session.bandwidth_used for session in l) < 10000]
|
||||
and sum(session.bw_used for session in l) < 10000]
|
||||
if len(gids) > 1:
|
||||
sessions = sum([self.groups[gid] for gid in gids], [])
|
||||
new_gid = max(gids)
|
||||
@ -438,19 +427,15 @@ class Controller(util.LoggedClass):
|
||||
'logged': len([s for s in self.sessions if s.log_me]),
|
||||
'paused': sum(s.pause for s in self.sessions),
|
||||
'pid': os.getpid(),
|
||||
'peers': len(self.irc.peers),
|
||||
'requests': sum(s.requests_remaining() for s in self.sessions),
|
||||
'peers': self.peers.count(),
|
||||
'requests': sum(s.count_pending_items() for s in self.sessions),
|
||||
'sessions': self.session_count(),
|
||||
'subs': self.subscription_count,
|
||||
'subs': self.sub_count(),
|
||||
'txs_sent': self.txs_sent,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def text_lines(method, data):
|
||||
if method == 'sessions':
|
||||
return Controller.sessions_text_lines(data)
|
||||
else:
|
||||
return Controller.groups_text_lines(data)
|
||||
def sub_count(self):
|
||||
return sum(s.sub_count() for s in self.sessions)
|
||||
|
||||
@staticmethod
|
||||
def groups_text_lines(data):
|
||||
@ -482,8 +467,8 @@ class Controller(util.LoggedClass):
|
||||
sessions = self.groups[gid]
|
||||
result.append([gid,
|
||||
len(sessions),
|
||||
sum(s.bandwidth_used for s in sessions),
|
||||
sum(s.requests_remaining() for s in sessions),
|
||||
sum(s.bw_used for s in sessions),
|
||||
sum(s.count_pending_items() for s in sessions),
|
||||
sum(s.txs_sent for s in sessions),
|
||||
sum(s.sub_count() for s in sessions),
|
||||
sum(s.recv_count for s in sessions),
|
||||
@ -523,17 +508,17 @@ class Controller(util.LoggedClass):
|
||||
def session_data(self, for_log):
|
||||
'''Returned to the RPC 'sessions' call.'''
|
||||
now = time.time()
|
||||
sessions = sorted(self.sessions, key=lambda s: s.start)
|
||||
return [(session.id_,
|
||||
sessions = sorted(self.sessions, key=lambda s: s.start_time)
|
||||
return [(session.session_id,
|
||||
session.flags(),
|
||||
session.peername(for_log=for_log),
|
||||
session.client,
|
||||
session.requests_remaining(),
|
||||
session.count_pending_items(),
|
||||
session.txs_sent,
|
||||
session.sub_count(),
|
||||
session.recv_count, session.recv_size,
|
||||
session.send_count, session.send_size,
|
||||
now - session.start)
|
||||
now - session.start_time)
|
||||
for session in sessions]
|
||||
|
||||
def lookup_session(self, session_id):
|
||||
@ -543,7 +528,7 @@ class Controller(util.LoggedClass):
|
||||
pass
|
||||
else:
|
||||
for session in self.sessions:
|
||||
if session.id_ == session_id:
|
||||
if session.session_id == session_id:
|
||||
return session
|
||||
return None
|
||||
|
||||
@ -562,42 +547,42 @@ class Controller(util.LoggedClass):
|
||||
|
||||
# Local RPC command handlers
|
||||
|
||||
async def rpc_disconnect(self, session_ids):
|
||||
def rpc_disconnect(self, session_ids):
|
||||
'''Disconnect sesssions.
|
||||
|
||||
session_ids: array of session IDs
|
||||
'''
|
||||
return self.for_each_session(session_ids, self.close_session)
|
||||
|
||||
async def rpc_log(self, session_ids):
|
||||
def rpc_log(self, session_ids):
|
||||
'''Toggle logging of sesssions.
|
||||
|
||||
session_ids: array of session IDs
|
||||
'''
|
||||
return self.for_each_session(session_ids, self.toggle_logging)
|
||||
|
||||
async def rpc_stop(self):
|
||||
def rpc_stop(self):
|
||||
'''Shut down the server cleanly.'''
|
||||
self.initiate_shutdown()
|
||||
return 'stopping'
|
||||
|
||||
async def rpc_getinfo(self):
|
||||
def rpc_getinfo(self):
|
||||
'''Return summary information about the server process.'''
|
||||
return self.server_summary()
|
||||
|
||||
async def rpc_groups(self):
|
||||
def rpc_groups(self):
|
||||
'''Return statistics about the session groups.'''
|
||||
return self.group_data()
|
||||
|
||||
async def rpc_sessions(self):
|
||||
def rpc_sessions(self):
|
||||
'''Return statistics about connected sessions.'''
|
||||
return self.session_data(for_log=False)
|
||||
|
||||
async def rpc_peers(self):
|
||||
def rpc_peers(self):
|
||||
'''Return a list of server peers, currently taken from IRC.'''
|
||||
return self.irc.peers
|
||||
return self.peers.peer_list()
|
||||
|
||||
async def rpc_reorg(self, count=3):
|
||||
def rpc_reorg(self, count=3):
|
||||
'''Force a reorg of the given number of blocks.
|
||||
|
||||
count: number of blocks to reorg (default 3)
|
||||
@ -647,10 +632,12 @@ class Controller(util.LoggedClass):
|
||||
raise RPCError('daemon error: {}'.format(e))
|
||||
|
||||
async def new_subscription(self, address):
|
||||
if self.subscription_count >= self.max_subs:
|
||||
raise RPCError('server subscription limit {:,d} reached'
|
||||
.format(self.max_subs))
|
||||
self.subscription_count += 1
|
||||
if self.subs_room <= 0:
|
||||
self.subs_room = self.max_subs - self.sub_count()
|
||||
if self.subs_room <= 0:
|
||||
raise RPCError('server subscription limit {:,d} reached'
|
||||
.format(self.max_subs))
|
||||
self.subs_room -= 1
|
||||
hashX = self.address_to_hashX(address)
|
||||
status = await self.address_status(hashX)
|
||||
return hashX, status
|
||||
@ -777,14 +764,14 @@ class Controller(util.LoggedClass):
|
||||
'height': utxo.height, 'value': utxo.value}
|
||||
for utxo in sorted(await self.get_utxos(hashX))]
|
||||
|
||||
async def block_get_chunk(self, index):
|
||||
def block_get_chunk(self, index):
|
||||
'''Return a chunk of block headers.
|
||||
|
||||
index: the chunk index'''
|
||||
index = self.non_negative_integer(index)
|
||||
return self.get_chunk(index)
|
||||
|
||||
async def block_get_header(self, height):
|
||||
def block_get_header(self, height):
|
||||
'''The deserialized header at a given height.
|
||||
|
||||
height: the header's height'''
|
||||
@ -877,24 +864,6 @@ class Controller(util.LoggedClass):
|
||||
|
||||
return banner
|
||||
|
||||
async def donation_address(self):
|
||||
def donation_address(self):
|
||||
'''Return the donation address as a string, empty if there is none.'''
|
||||
return self.env.donation_address
|
||||
|
||||
async def peers_subscribe(self):
|
||||
'''Returns the server peers as a list of (ip, host, ports) tuples.
|
||||
|
||||
Despite the name this is not currently treated as a subscription.'''
|
||||
return list(self.irc.peers.values())
|
||||
|
||||
async def version(self, client_name=None, protocol_version=None):
|
||||
'''Returns the server version as a string.
|
||||
|
||||
client_name: a string identifying the client
|
||||
protocol_version: the protocol version spoken by the client
|
||||
'''
|
||||
if client_name:
|
||||
self.client = str(client_name)[:15]
|
||||
if protocol_version is not None:
|
||||
self.protocol_version = protocol_version
|
||||
return VERSION
|
||||
|
||||
@ -66,7 +66,7 @@ class Env(LoggedClass):
|
||||
self.report_ssl_port
|
||||
if self.report_ssl_port else
|
||||
self.ssl_port)
|
||||
self.report_host_tor = self.default('REPORT_HOST_TOR', None)
|
||||
self.report_host_tor = self.default('REPORT_HOST_TOR', '')
|
||||
|
||||
def default(self, envvar, default):
|
||||
return environ.get(envvar, default)
|
||||
|
||||
116
server/irc.py
116
server/irc.py
@ -12,7 +12,6 @@ Only calling start() requires the IRC Python module.
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
import socket
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
@ -22,52 +21,26 @@ from lib.util import LoggedClass
|
||||
|
||||
class IRC(LoggedClass):
|
||||
|
||||
Peer = namedtuple('Peer', 'ip_addr host ports')
|
||||
|
||||
class DisconnectedError(Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, env):
|
||||
def __init__(self, env, peer_mgr):
|
||||
super().__init__()
|
||||
self.env = env
|
||||
self.coin = env.coin
|
||||
self.peer_mgr = peer_mgr
|
||||
|
||||
# If this isn't something a peer or client expects
|
||||
# then you won't appear in the client's network dialog box
|
||||
irc_address = (env.coin.IRC_SERVER, env.coin.IRC_PORT)
|
||||
self.channel = env.coin.IRC_CHANNEL
|
||||
self.prefix = env.coin.IRC_PREFIX
|
||||
|
||||
self.clients = []
|
||||
self.nick = '{}{}'.format(self.prefix,
|
||||
env.irc_nick if env.irc_nick else
|
||||
double_sha256(env.report_host.encode())
|
||||
[:5].hex())
|
||||
self.clients.append(IrcClient(irc_address, self.nick,
|
||||
env.report_host,
|
||||
env.report_tcp_port,
|
||||
env.report_ssl_port))
|
||||
if env.report_host_tor:
|
||||
self.clients.append(IrcClient(irc_address, self.nick + '_tor',
|
||||
env.report_host_tor,
|
||||
env.report_tcp_port_tor,
|
||||
env.report_ssl_port_tor))
|
||||
|
||||
self.peer_regexp = re.compile('({}[^!]*)!'.format(self.prefix))
|
||||
self.peers = {}
|
||||
|
||||
async def start(self):
|
||||
async def start(self, name_pairs):
|
||||
'''Start IRC connections if enabled in environment.'''
|
||||
try:
|
||||
if self.env.irc:
|
||||
await self.join()
|
||||
else:
|
||||
self.logger.info('IRC is disabled')
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
self.logger.error(str(e))
|
||||
|
||||
async def join(self):
|
||||
import irc.client as irc_client
|
||||
from jaraco.stream import buffer
|
||||
|
||||
@ -77,21 +50,18 @@ class IRC(LoggedClass):
|
||||
|
||||
# Register handlers for events we're interested in
|
||||
reactor = irc_client.Reactor()
|
||||
for event in 'welcome join quit kick whoreply disconnect'.split():
|
||||
for event in 'welcome join quit whoreply disconnect'.split():
|
||||
reactor.add_global_handler(event, getattr(self, 'on_' + event))
|
||||
|
||||
# Note: Multiple nicks in same channel will trigger duplicate events
|
||||
for client in self.clients:
|
||||
client.connection = reactor.server()
|
||||
clients = [IrcClient(self.coin, real_name, self.nick + suffix,
|
||||
reactor.server())
|
||||
for (real_name, suffix) in name_pairs]
|
||||
|
||||
while True:
|
||||
try:
|
||||
for client in self.clients:
|
||||
self.logger.info('Joining IRC in {} as "{}" with '
|
||||
'real name "{}"'
|
||||
.format(self.channel, client.nick,
|
||||
client.realname))
|
||||
client.connect()
|
||||
for client in clients:
|
||||
client.connect(self)
|
||||
while True:
|
||||
reactor.process_once()
|
||||
await asyncio.sleep(2)
|
||||
@ -130,14 +100,7 @@ class IRC(LoggedClass):
|
||||
'''Called when someone leaves our channel.'''
|
||||
match = self.peer_regexp.match(event.source)
|
||||
if match:
|
||||
self.peers.pop(match.group(1), None)
|
||||
|
||||
def on_kick(self, connection, event):
|
||||
'''Called when someone is kicked from our channel.'''
|
||||
self.log_event(event)
|
||||
match = self.peer_regexp.match(event.arguments[0])
|
||||
if match:
|
||||
self.peers.pop(match.group(1), None)
|
||||
self.peer_mgr.remove_irc_peer(match.group(1))
|
||||
|
||||
def on_whoreply(self, connection, event):
|
||||
'''Called when a response to our who requests arrives.
|
||||
@ -145,50 +108,25 @@ class IRC(LoggedClass):
|
||||
The nick is the 4th argument, and real name is in the 6th
|
||||
argument preceeded by '0 ' for some reason.
|
||||
'''
|
||||
try:
|
||||
nick = event.arguments[4]
|
||||
if nick.startswith(self.prefix):
|
||||
line = event.arguments[6].split()
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(line[1])
|
||||
except socket.error:
|
||||
# Could be .onion or IPv6.
|
||||
ip_addr = line[1]
|
||||
peer = self.Peer(ip_addr, line[1], line[2:])
|
||||
self.peers[nick] = peer
|
||||
except (IndexError, UnicodeError):
|
||||
# UnicodeError comes from invalid domains (issue #68)
|
||||
pass
|
||||
nick = event.arguments[4]
|
||||
if nick.startswith(self.prefix):
|
||||
line = event.arguments[6].split()
|
||||
hostname, details = line[1], line[2:]
|
||||
self.peer_mgr.add_irc_peer(nick, hostname, details)
|
||||
|
||||
|
||||
class IrcClient(LoggedClass):
|
||||
class IrcClient(object):
|
||||
|
||||
VERSION = '1.0'
|
||||
DEFAULT_PORTS = {'t': 50001, 's': 50002}
|
||||
|
||||
def __init__(self, irc_address, nick, host, tcp_port, ssl_port):
|
||||
super().__init__()
|
||||
self.irc_host, self.irc_port = irc_address
|
||||
def __init__(self, coin, real_name, nick, server):
|
||||
self.irc_host = coin.IRC_SERVER
|
||||
self.irc_port = coin.IRC_PORT
|
||||
self.nick = nick
|
||||
self.realname = self.create_realname(host, tcp_port, ssl_port)
|
||||
self.connection = None
|
||||
self.real_name = real_name
|
||||
self.server = server
|
||||
|
||||
def connect(self, keepalive=60):
|
||||
def connect(self, irc):
|
||||
'''Connect this client to its IRC server'''
|
||||
self.connection.connect(self.irc_host, self.irc_port, self.nick,
|
||||
ircname=self.realname)
|
||||
self.connection.set_keepalive(keepalive)
|
||||
|
||||
@classmethod
|
||||
def create_realname(cls, host, tcp_port, ssl_port):
|
||||
def port_text(letter, port):
|
||||
if not port:
|
||||
return ''
|
||||
if port == cls.DEFAULT_PORTS.get(letter):
|
||||
return ' ' + letter
|
||||
else:
|
||||
return ' ' + letter + str(port)
|
||||
|
||||
tcp = port_text('t', tcp_port)
|
||||
ssl = port_text('s', ssl_port)
|
||||
return '{} v{}{}{}'.format(host, cls.VERSION, tcp, ssl)
|
||||
irc.logger.info('joining {} as "{}" with real name "{}"'
|
||||
.format(irc.channel, self.nick, self.real_name))
|
||||
self.server.connect(self.irc_host, self.irc_port, self.nick,
|
||||
ircname=self.real_name)
|
||||
|
||||
139
server/peers.py
Normal file
139
server/peers.py
Normal file
@ -0,0 +1,139 @@
|
||||
# Copyright (c) 2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
'''Peer management.'''
|
||||
|
||||
import asyncio
|
||||
import socket
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
|
||||
import lib.util as util
|
||||
from server.irc import IRC
|
||||
|
||||
|
||||
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
|
||||
IRCPeer = namedtuple('IRCPeer', 'ip_addr host details')
|
||||
|
||||
|
||||
class PeerManager(util.LoggedClass):
|
||||
'''Looks after the DB of peer network servers.
|
||||
|
||||
Attempts to maintain a connection with up to 8 peers.
|
||||
Issues a 'peers.subscribe' RPC to them and tells them our data.
|
||||
'''
|
||||
VERSION = '1.0'
|
||||
DEFAULT_PORTS = {'t': 50001, 's': 50002}
|
||||
|
||||
def __init__(self, env):
|
||||
super().__init__()
|
||||
self.env = env
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.irc = IRC(env, self)
|
||||
self.futures = set()
|
||||
self.identities = []
|
||||
# Keyed by nick
|
||||
self.irc_peers = {}
|
||||
|
||||
# We can have a Tor identity inaddition to a normal one
|
||||
self.identities.append(NetIdentity(env.report_host,
|
||||
env.report_tcp_port,
|
||||
env.report_ssl_port,
|
||||
''))
|
||||
if env.report_host_tor.endswith('.onion'):
|
||||
self.identities.append(NetIdentity(env.report_host_tor,
|
||||
env.report_tcp_port_tor,
|
||||
env.report_ssl_port_tor,
|
||||
'_tor'))
|
||||
|
||||
async def executor(self, func, *args, **kwargs):
|
||||
'''Run func taking args in the executor.'''
|
||||
await self.loop.run_in_executor(None, partial(func, *args, **kwargs))
|
||||
|
||||
@classmethod
|
||||
def real_name(cls, identity):
|
||||
'''Real name as used on IRC.'''
|
||||
def port_text(letter, port):
|
||||
if not port:
|
||||
return ''
|
||||
if port == cls.DEFAULT_PORTS.get(letter):
|
||||
return ' ' + letter
|
||||
else:
|
||||
return ' ' + letter + str(port)
|
||||
|
||||
tcp = port_text('t', identity.tcp_port)
|
||||
ssl = port_text('s', identity.ssl_port)
|
||||
return '{} v{}{}{}'.format(identity.host, cls.VERSION, tcp, ssl)
|
||||
|
||||
def ensure_future(self, coro):
|
||||
'''Convert a coro into a future and add it to our pending list
|
||||
to be waited for.'''
|
||||
self.futures.add(asyncio.ensure_future(coro))
|
||||
|
||||
def start_irc(self):
|
||||
'''Start up the IRC connections if enabled.'''
|
||||
if self.env.irc:
|
||||
name_pairs = [(self.real_name(identity), identity.nick_suffix)
|
||||
for identity in self.identities]
|
||||
self.ensure_future(self.irc.start(name_pairs))
|
||||
else:
|
||||
self.logger.info('IRC is disabled')
|
||||
|
||||
async def main_loop(self):
|
||||
'''Start and then enter the main loop.'''
|
||||
self.start_irc()
|
||||
|
||||
try:
|
||||
while True:
|
||||
await asyncio.sleep(10)
|
||||
done = [future for future in self.futures if future.done()]
|
||||
self.futures.difference_update(done)
|
||||
for future in done:
|
||||
try:
|
||||
future.result()
|
||||
except:
|
||||
self.log_error(traceback.format_exc())
|
||||
finally:
|
||||
for future in self.futures:
|
||||
future.cancel()
|
||||
|
||||
def dns_lookup_peer(self, nick, hostname, details):
|
||||
try:
|
||||
ip_addr = None
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(hostname)
|
||||
except socket.error:
|
||||
pass # IPv6?
|
||||
ip_addr = ip_addr or hostname
|
||||
self.irc_peers[nick] = IRCPeer(ip_addr, hostname, details)
|
||||
self.logger.info('new IRC peer {} at {} ({})'
|
||||
.format(nick, hostname, details))
|
||||
except UnicodeError:
|
||||
# UnicodeError comes from invalid domains (issue #68)
|
||||
self.logger.info('IRC peer domain {} invalid'.format(hostname))
|
||||
|
||||
def add_irc_peer(self, *args):
|
||||
'''Schedule DNS lookup of peer.'''
|
||||
self.ensure_future(self.executor(self.dns_lookup_peer, *args))
|
||||
|
||||
def remove_irc_peer(self, nick):
|
||||
'''Remove a peer from our IRC peers map.'''
|
||||
self.logger.info('removing IRC peer {}'.format(nick))
|
||||
self.irc_peers.pop(nick, None)
|
||||
|
||||
def count(self):
|
||||
return len(self.irc_peers)
|
||||
|
||||
def peer_list(self):
|
||||
return self.irc_peers
|
||||
|
||||
def subscribe(self):
|
||||
'''Returns the server peers as a list of (ip, host, details) tuples.
|
||||
|
||||
Despite the name this is not currently treated as a subscription.'''
|
||||
return list(self.irc_peers.values())
|
||||
@ -9,38 +9,61 @@
|
||||
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
import traceback
|
||||
from functools import partial
|
||||
|
||||
from lib.jsonrpc import JSONRPC, RPCError
|
||||
from lib.jsonrpc import JSONSession, RPCError
|
||||
from server.daemon import DaemonError
|
||||
from server.version import VERSION
|
||||
|
||||
|
||||
class Session(JSONRPC):
|
||||
'''Base class of ElectrumX JSON session protocols.
|
||||
class SessionBase(JSONSession):
|
||||
'''Base class of ElectrumX JSON sessions.
|
||||
|
||||
Each session runs its tasks in asynchronous parallelism with other
|
||||
sessions. To prevent some sessions blocking others, potentially
|
||||
long-running requests should yield.
|
||||
sessions.
|
||||
'''
|
||||
|
||||
def __init__(self, controller, bp, env, kind):
|
||||
def __init__(self, controller, kind):
|
||||
super().__init__()
|
||||
self.kind = kind # 'RPC', 'TCP' etc.
|
||||
self.controller = controller
|
||||
self.bp = bp
|
||||
self.env = env
|
||||
self.daemon = bp.daemon
|
||||
self.kind = kind
|
||||
self.bp = controller.bp
|
||||
self.env = controller.env
|
||||
self.daemon = self.bp.daemon
|
||||
self.client = 'unknown'
|
||||
self.anon_logs = env.anon_logs
|
||||
self.max_send = env.max_send
|
||||
self.bandwidth_limit = env.bandwidth_limit
|
||||
self.anon_logs = self.env.anon_logs
|
||||
self.last_delay = 0
|
||||
self.txs_sent = 0
|
||||
self.requests = []
|
||||
self.start_time = time.time()
|
||||
self.close_time = 0
|
||||
self.bw_time = self.start_time
|
||||
self.bw_interval = 3600
|
||||
self.bw_used = 0
|
||||
|
||||
def is_closing(self):
|
||||
'''True if this session is closing.'''
|
||||
return self.transport and self.transport.is_closing()
|
||||
def have_pending_items(self):
|
||||
'''Called each time the pending item queue goes from empty to having
|
||||
one item.'''
|
||||
self.controller.enqueue_session(self)
|
||||
|
||||
def close_connection(self):
|
||||
'''Call this to close the connection.'''
|
||||
self.close_time = time.time()
|
||||
super().close_connection()
|
||||
|
||||
def peername(self, *, for_log=True):
|
||||
'''Return the peer name of this connection.'''
|
||||
peer_info = self.peer_info()
|
||||
if not peer_info:
|
||||
return 'unknown'
|
||||
if for_log and self.anon_logs:
|
||||
return 'xx.xx.xx.xx:xx'
|
||||
if ':' in peer_info[0]:
|
||||
return '[{}]:{}'.format(peer_info[0], peer_info[1])
|
||||
else:
|
||||
return '{}:{}'.format(peer_info[0], peer_info[1])
|
||||
|
||||
def flags(self):
|
||||
'''Status flags.'''
|
||||
@ -52,42 +75,6 @@ class Session(JSONRPC):
|
||||
status += str(self.controller.session_priority(self))
|
||||
return status
|
||||
|
||||
def requests_remaining(self):
|
||||
return sum(request.remaining for request in self.requests)
|
||||
|
||||
def enqueue_request(self, request):
|
||||
'''Add a request to the session's list.'''
|
||||
self.requests.append(request)
|
||||
if len(self.requests) == 1:
|
||||
self.controller.enqueue_session(self)
|
||||
|
||||
async def serve_requests(self):
|
||||
'''Serve requests in batches.'''
|
||||
total = 0
|
||||
errs = []
|
||||
# Process 8 items at a time
|
||||
for request in self.requests:
|
||||
try:
|
||||
initial = request.remaining
|
||||
await request.process(self)
|
||||
total += initial - request.remaining
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
# Should probably be considered a bug and fixed
|
||||
self.log_error('error handling request {}'.format(request))
|
||||
traceback.print_exc()
|
||||
errs.append(request)
|
||||
await asyncio.sleep(0)
|
||||
if total >= 8:
|
||||
break
|
||||
|
||||
# Remove completed requests and re-enqueue ourself if any remain.
|
||||
self.requests = [req for req in self.requests
|
||||
if req.remaining and not req in errs]
|
||||
if self.requests:
|
||||
self.controller.enqueue_session(self)
|
||||
|
||||
def connection_made(self, transport):
|
||||
'''Handle an incoming client connection.'''
|
||||
super().connection_made(transport)
|
||||
@ -95,27 +82,32 @@ class Session(JSONRPC):
|
||||
|
||||
def connection_lost(self, exc):
|
||||
'''Handle client disconnection.'''
|
||||
super().connection_lost(exc)
|
||||
if (self.pause or self.controller.is_deprioritized(self)
|
||||
or self.send_size >= 1024*1024 or self.error_count):
|
||||
self.log_info('disconnected. Sent {:,d} bytes in {:,d} messages '
|
||||
'{:,d} errors'
|
||||
.format(self.send_size, self.send_count,
|
||||
self.error_count))
|
||||
msg = ''
|
||||
if self.pause:
|
||||
msg += ' whilst paused'
|
||||
if self.controller.is_deprioritized(self):
|
||||
msg += ' whilst deprioritized'
|
||||
if self.send_size >= 1024*1024:
|
||||
msg += ('. Sent {:,d} bytes in {:,d} messages'
|
||||
.format(self.send_size, self.send_count))
|
||||
if msg:
|
||||
msg = 'disconnected' + msg
|
||||
self.log_info(msg)
|
||||
self.controller.remove_session(self)
|
||||
|
||||
def sub_count(self):
|
||||
return 0
|
||||
|
||||
|
||||
class ElectrumX(Session):
|
||||
class ElectrumX(SessionBase):
|
||||
'''A TCP server that handles incoming Electrum connections.'''
|
||||
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.subscribe_headers = False
|
||||
self.subscribe_height = False
|
||||
self.notified_height = None
|
||||
self.max_send = self.env.max_send
|
||||
self.max_subs = self.env.max_session_subs
|
||||
self.hashX_subs = {}
|
||||
self.electrumx_handlers = {
|
||||
@ -123,6 +115,7 @@ class ElectrumX(Session):
|
||||
'blockchain.headers.subscribe': self.headers_subscribe,
|
||||
'blockchain.numblocks.subscribe': self.numblocks_subscribe,
|
||||
'blockchain.transaction.broadcast': self.transaction_broadcast,
|
||||
'server.version': self.server_version,
|
||||
}
|
||||
|
||||
def sub_count(self):
|
||||
@ -133,32 +126,29 @@ class ElectrumX(Session):
|
||||
|
||||
Cache is a shared cache for this update.
|
||||
'''
|
||||
controller = self.controller
|
||||
pairs = []
|
||||
|
||||
if height != self.notified_height:
|
||||
self.notified_height = height
|
||||
if self.subscribe_headers:
|
||||
payload = self.notification_payload(
|
||||
'blockchain.headers.subscribe',
|
||||
(self.controller.electrum_header(height), ),
|
||||
)
|
||||
self.encode_and_send_payload(payload)
|
||||
args = (controller.electrum_header(height), )
|
||||
pairs.append(('blockchain.headers.subscribe', args))
|
||||
|
||||
if self.subscribe_height:
|
||||
payload = self.notification_payload(
|
||||
'blockchain.numblocks.subscribe',
|
||||
(height, ),
|
||||
)
|
||||
self.encode_and_send_payload(payload)
|
||||
pairs.append(('blockchain.numblocks.subscribe', (height, )))
|
||||
|
||||
matches = touched.intersection(self.hashX_subs)
|
||||
for hashX in matches:
|
||||
address = self.hashX_subs[hashX]
|
||||
status = await self.controller.address_status(hashX)
|
||||
payload = self.notification_payload(
|
||||
'blockchain.address.subscribe', (address, status))
|
||||
self.encode_and_send_payload(payload)
|
||||
status = await controller.address_status(hashX)
|
||||
pairs.append(('blockchain.address.subscribe', (address, status)))
|
||||
|
||||
self.send_notifications(pairs)
|
||||
if matches:
|
||||
self.log_info('notified of {:,d} addresses'.format(len(matches)))
|
||||
es = '' if len(matches) == 1 else 'es'
|
||||
self.log_info('notified of {:,d} address{}'
|
||||
.format(len(matches), es))
|
||||
|
||||
def height(self):
|
||||
'''Return the current flushed database height.'''
|
||||
@ -168,12 +158,12 @@ class ElectrumX(Session):
|
||||
'''Used as response to a headers subscription request.'''
|
||||
return self.controller.electrum_header(self.height())
|
||||
|
||||
async def headers_subscribe(self):
|
||||
def headers_subscribe(self):
|
||||
'''Subscribe to get headers of new blocks.'''
|
||||
self.subscribe_headers = True
|
||||
return self.current_electrum_header()
|
||||
|
||||
async def numblocks_subscribe(self):
|
||||
def numblocks_subscribe(self):
|
||||
'''Subscribe to get height of new blocks.'''
|
||||
self.subscribe_height = True
|
||||
return self.height()
|
||||
@ -191,6 +181,18 @@ class ElectrumX(Session):
|
||||
self.hashX_subs[hashX] = address
|
||||
return status
|
||||
|
||||
def server_version(self, client_name=None, protocol_version=None):
|
||||
'''Returns the server version as a string.
|
||||
|
||||
client_name: a string identifying the client
|
||||
protocol_version: the protocol version spoken by the client
|
||||
'''
|
||||
if client_name:
|
||||
self.client = str(client_name)[:15]
|
||||
if protocol_version is not None:
|
||||
self.protocol_version = protocol_version
|
||||
return VERSION
|
||||
|
||||
async def transaction_broadcast(self, raw_tx):
|
||||
'''Broadcast a raw transaction to the network.
|
||||
|
||||
@ -230,13 +232,13 @@ class ElectrumX(Session):
|
||||
return handler
|
||||
|
||||
|
||||
class LocalRPC(Session):
|
||||
'''A local TCP RPC server for querying status.'''
|
||||
class LocalRPC(SessionBase):
|
||||
'''A local TCP RPC server session.'''
|
||||
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.client = 'RPC'
|
||||
self.max_send = 5000000
|
||||
self.max_send = 0
|
||||
|
||||
def request_handler(self, method):
|
||||
'''Return the async handler for the given request method.'''
|
||||
|
||||
@ -10,11 +10,11 @@
|
||||
import os
|
||||
from functools import partial
|
||||
|
||||
from lib.util import subclasses, increment_byte_string
|
||||
import lib.util as util
|
||||
|
||||
def db_class(name):
|
||||
'''Returns a DB engine class.'''
|
||||
for db_class in subclasses(Storage):
|
||||
for db_class in util.subclasses(Storage):
|
||||
if db_class.__name__.lower() == name.lower():
|
||||
db_class.import_module()
|
||||
return db_class
|
||||
|
||||
@ -1 +1 @@
|
||||
VERSION = "ElectrumX 0.10.7"
|
||||
VERSION = "ElectrumX 0.10.11"
|
||||
|
||||
@ -56,4 +56,4 @@ def test_chunks():
|
||||
def test_increment_byte_string():
|
||||
assert util.increment_byte_string(b'1') == b'2'
|
||||
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
|
||||
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
||||
assert util.increment_byte_string(b'\xff\xff') == None
|
||||
|
||||
Loading…
Reference in New Issue
Block a user