use asyncio in network layer

This commit is contained in:
Janus 2017-11-29 19:07:13 +01:00
parent 5522e9ea9f
commit e170f4c7d3
17 changed files with 10538 additions and 602 deletions

View File

@ -25,7 +25,7 @@
import signal
import sys
import traceback
try:
import PyQt5
@ -191,6 +191,7 @@ class ElectrumGui:
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
traceback.print_exc()
d = QMessageBox(QMessageBox.Warning, _('Error'), 'Cannot load wallet:\n' + str(e))
d.exec_()
return

View File

@ -4,7 +4,7 @@ from .wallet import Synchronizer, Wallet
from .storage import WalletStorage
from .coinchooser import COIN_CHOOSERS
from .network import Network, pick_random_server
from .interface import Connection, Interface
from .interface import Interface
from .simple_config import SimpleConfig, get_config, set_config
from . import bitcoin
from . import transaction

View File

@ -22,18 +22,26 @@
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import aiosocks
import os
import re
import socket
import ssl
import sys
import threading
import time
import traceback
import asyncio
import json
import asyncio.streams
from asyncio.sslproto import SSLProtocol
import requests
from aiosocks.errors import SocksError
from concurrent.futures import TimeoutError
from .util import print_error
from .ssl_in_socks import sslInSocksReaderWriter
ca_path = requests.certs.where()
@ -41,268 +49,175 @@ from . import util
from . import x509
from . import pem
def Connection(server, queue, config_path):
"""Makes asynchronous connections to a remote electrum server.
Returns the running thread that is making the connection.
Once the thread has connected, it finishes, placing a tuple on the
queue of the form (server, socket), where socket is None if
connection failed.
"""
host, port, protocol = server.rsplit(':', 2)
if not protocol in 'st':
raise Exception('Unknown protocol: %s' % protocol)
c = TcpConnection(server, queue, config_path)
c.start()
return c
class TcpConnection(threading.Thread, util.PrintError):
def __init__(self, server, queue, config_path):
threading.Thread.__init__(self)
self.config_path = config_path
self.queue = queue
self.server = server
self.host, self.port, self.protocol = self.server.rsplit(':', 2)
self.host = str(self.host)
self.port = int(self.port)
self.use_ssl = (self.protocol == 's')
self.daemon = True
def diagnostic_name(self):
return self.host
def check_host_name(self, peercert, name):
"""Simple certificate/host name checker. Returns True if the
certificate matches, False otherwise. Does not support
wildcards."""
# Check that the peer has supplied a certificate.
# None/{} is not acceptable.
if not peercert:
return False
if 'subjectAltName' in peercert:
for typ, val in peercert["subjectAltName"]:
if typ == "DNS" and val == name:
return True
else:
# Only check the subject DN if there is no subject alternative
# name.
cn = None
for attr, val in peercert["subject"]:
# Use most-specific (last) commonName attribute.
if attr == "commonName":
cn = val
if cn is not None:
return cn == name
return False
def get_simple_socket(self):
try:
l = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.gaierror:
self.print_error("cannot resolve hostname")
return
e = None
for res in l:
try:
s = socket.socket(res[0], socket.SOCK_STREAM)
s.settimeout(10)
s.connect(res[4])
s.settimeout(2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
except BaseException as _e:
e = _e
continue
else:
self.print_error("failed to connect", str(e))
@staticmethod
def get_ssl_context(cert_reqs, ca_certs):
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_certs)
context.check_hostname = False
context.verify_mode = cert_reqs
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.options |= ssl.OP_NO_TLSv1
return context
def get_socket(self):
if self.use_ssl:
cert_path = os.path.join(self.config_path, 'certs', self.host)
if not os.path.exists(cert_path):
is_new = True
s = self.get_simple_socket()
if s is None:
return
# try with CA first
try:
context = self.get_ssl_context(cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_path)
s = context.wrap_socket(s, do_handshake_on_connect=True)
except ssl.SSLError as e:
print_error(e)
s = None
except:
return
if s and self.check_host_name(s.getpeercert(), self.host):
self.print_error("SSL certificate signed by CA")
return s
# get server certificate.
# Do not use ssl.get_server_certificate because it does not work with proxy
s = self.get_simple_socket()
if s is None:
return
try:
context = self.get_ssl_context(cert_reqs=ssl.CERT_NONE, ca_certs=None)
s = context.wrap_socket(s)
except ssl.SSLError as e:
self.print_error("SSL error retrieving SSL certificate:", e)
return
except:
return
dercert = s.getpeercert(True)
s.close()
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
temporary_path = cert_path + '.temp'
with open(temporary_path,"w") as f:
f.write(cert)
else:
is_new = False
s = self.get_simple_socket()
if s is None:
return
if self.use_ssl:
try:
context = self.get_ssl_context(cert_reqs=ssl.CERT_REQUIRED,
ca_certs=(temporary_path if is_new else cert_path))
s = context.wrap_socket(s, do_handshake_on_connect=True)
except socket.timeout:
self.print_error('timeout')
return
except ssl.SSLError as e:
self.print_error("SSL error:", e)
if e.errno != 1:
return
if is_new:
rej = cert_path + '.rej'
if os.path.exists(rej):
os.unlink(rej)
os.rename(temporary_path, rej)
else:
with open(cert_path) as f:
cert = f.read()
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stderr)
self.print_error("wrong certificate")
return
try:
x.check_date()
except:
self.print_error("certificate has expired:", cert_path)
os.unlink(cert_path)
return
self.print_error("wrong certificate")
if e.errno == 104:
return
return
except BaseException as e:
self.print_error(e)
traceback.print_exc(file=sys.stderr)
return
if is_new:
self.print_error("saving certificate")
os.rename(temporary_path, cert_path)
return s
def run(self):
socket = self.get_socket()
if socket:
self.print_error("connected")
self.queue.put((self.server, socket))
def get_ssl_context(cert_reqs, ca_certs):
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_certs)
context.check_hostname = False
context.verify_mode = cert_reqs
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.options |= ssl.OP_NO_TLSv1
return context
class Interface(util.PrintError):
"""The Interface class handles a socket connected to a single remote
electrum server. It's exposed API is:
- Member functions close(), fileno(), get_responses(), has_timed_out(),
ping_required(), queue_request(), send_requests()
- Member functions close(), fileno(), get_response(), has_timed_out(),
ping_required(), queue_request(), send_request()
- Member variable server.
"""
def __init__(self, server, socket):
self.server = server
self.host, _, _ = server.rsplit(':', 2)
self.socket = socket
def __init__(self, server, config_path, proxy_config):
self.addr = self.auth = None
if proxy_config is not None:
if proxy_config["mode"] == "socks5":
self.addr = aiosocks.Socks5Addr(proxy_config["host"], proxy_config["port"])
self.auth = aiosocks.Socks5Auth(proxy_config["user"], proxy_config["password"]) if proxy_config["user"] != "" else None
elif proxy_config["mode"] == "socks4":
self.addr = aiosocks.Socks4Addr(proxy_config["host"], proxy_config["port"])
self.auth = aiosocks.Socks4Auth(proxy_config["password"]) if proxy_config["password"] != "" else None
else:
raise Exception("proxy mode not supported")
self.pipe = util.SocketPipe(socket)
self.pipe.set_timeout(0.0) # Don't wait for data
self.server = server
self.config_path = config_path
host, port, protocol = self.server.split(':')
self.host = host
self.port = int(port)
self.use_ssl = (protocol=='s')
self.reader = self.writer = None
self.lock = asyncio.Lock()
# Dump network messages. Set at runtime from the console.
self.debug = False
self.unsent_requests = []
self.unsent_requests = asyncio.PriorityQueue()
self.unanswered_requests = {}
# Set last ping to zero to ensure immediate ping
self.last_request = time.time()
self.last_ping = 0
self.closed_remotely = False
def conn_coro(self, context):
return asyncio.open_connection(self.host, self.port, ssl=context)
async def _get_read_write(self):
async with self.lock:
if self.reader is not None and self.writer is not None:
return self.reader, self.writer
if self.use_ssl:
cert_path = os.path.join(self.config_path, 'certs', self.host)
if not os.path.exists(cert_path):
context = get_ssl_context(cert_reqs=ssl.CERT_NONE, ca_certs=None)
if self.addr is not None:
proto_factory = lambda: SSLProtocol(asyncio.get_event_loop(), asyncio.Protocol(), context, None)
socks_create_coro = aiosocks.create_connection(proto_factory, \
proxy=self.addr, \
proxy_auth=self.auth, \
dst=(self.host, self.port))
transport, protocol = await asyncio.wait_for(socks_create_coro, 5)
while True:
try:
if protocol._sslpipe is not None:
dercert = protocol._sslpipe.ssl_object.getpeercert(True)
break
except ValueError:
print("sleeping for cert")
await asyncio.sleep(1)
transport.close()
else:
reader, writer = await asyncio.wait_for(self.conn_coro(context), 5)
dercert = writer.get_extra_info('ssl_object').getpeercert(True)
writer.close()
cert = ssl.DER_cert_to_PEM_cert(dercert)
temporary_path = cert_path + '.temp'
with open(temporary_path, "w") as f:
f.write(cert)
is_new = True
else:
is_new = False
ca_certs = temporary_path if is_new else cert_path
try:
if self.addr is not None:
if not self.use_ssl:
open_coro = aiosocks.open_connection(proxy=self.addr, proxy_auth=self.auth, dst=(self.host, self.port))
self.reader, self.writer = await asyncio.wait_for(open_coro, 5)
else:
ssl_in_socks_coro = sslInSocksReaderWriter(self.addr, self.auth, self.host, self.port, ca_certs)
self.reader, self.writer = await asyncio.wait_for(ssl_in_socks_coro, 10)
else:
context = get_ssl_context(cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_certs) if self.use_ssl else None
self.reader, self.writer = await asyncio.wait_for(self.conn_coro(context), 5)
except BaseException as e:
traceback.print_exc()
print("Previous exception will now be reraised")
raise e
if self.use_ssl and is_new:
self.print_error("saving new certificate for", self.host)
os.rename(temporary_path, cert_path)
return self.reader, self.writer
async def send_all(self, list_of_requests):
_, w = await self._get_read_write()
for i in list_of_requests:
w.write(json.dumps(i).encode("ascii") + b"\n")
await w.drain()
def close(self):
if self.writer:
self.writer.close()
async def get(self):
reader, _ = await self._get_read_write()
obj = b""
while True:
if len(obj) > 3000000:
raise BaseException("too much data: " + str(len(obj)))
try:
obj += await reader.readuntil(b"\n")
except asyncio.LimitOverrunError as e:
obj += await reader.read(e.consumed)
except asyncio.streams.IncompleteReadError as e:
return None
try:
obj = json.loads(obj.decode("ascii"))
except ValueError:
continue
else:
self.last_action = time.time()
return obj
def idle_time(self):
return time.time() - self.last_action
def diagnostic_name(self):
return self.host
def fileno(self):
# Needed for select
return self.socket.fileno()
def close(self):
if not self.closed_remotely:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self.socket.close()
def queue_request(self, *args): # method, params, _id
async def queue_request(self, *args): # method, params, _id
'''Queue a request, later to be send with send_requests when the
socket is available for writing.
'''
self.request_time = time.time()
self.unsent_requests.append(args)
await self.unsent_requests.put((self.request_time, args))
def num_requests(self):
'''Keep unanswered requests below 100'''
n = 100 - len(self.unanswered_requests)
return min(n, len(self.unsent_requests))
return min(n, self.unsent_requests.qsize())
def send_requests(self):
async def send_request(self):
'''Sends queued requests. Returns False on failure.'''
make_dict = lambda m, p, i: {'method': m, 'params': p, 'id': i}
n = self.num_requests()
wire_requests = self.unsent_requests[0:n]
prio, request = await self.unsent_requests.get()
try:
self.pipe.send_all([make_dict(*r) for r in wire_requests])
except socket.error as e:
self.print_error("socket error:", e)
await self.send_all([make_dict(*request)])
except (SocksError, OSError, TimeoutError) as e:
if type(e) is SocksError:
print(e)
await self.unsent_requests.put((prio, request))
return False
self.unsent_requests = self.unsent_requests[n:]
for request in wire_requests:
if self.debug:
self.print_error("-->", request)
self.unanswered_requests[request[2]] = request
if self.debug:
self.print_error("-->", request)
self.unanswered_requests[request[2]] = request
self.last_action = time.time()
return True
def ping_required(self):
@ -318,13 +233,12 @@ class Interface(util.PrintError):
def has_timed_out(self):
'''Returns True if the interface has timed out.'''
if (self.unanswered_requests and time.time() - self.request_time > 10
and self.pipe.idle_time() > 10):
and self.idle_time() > 10):
self.print_error("timeout", len(self.unanswered_requests))
return True
return False
def get_responses(self):
async def get_response(self):
'''Call if there is data available on the socket. Returns a list of
(request, response) pairs. Notifications are singleton
unsolicited responses presumably as a result of prior
@ -333,34 +247,25 @@ class Interface(util.PrintError):
corresponding request. If the connection was closed remotely
or the remote server is misbehaving, a (None, None) will appear.
'''
responses = []
while True:
try:
response = self.pipe.get()
except util.timeout:
break
if not type(response) is dict:
responses.append((None, None))
if response is None:
self.closed_remotely = True
self.print_error("connection closed remotely")
break
if self.debug:
self.print_error("<--", response)
wire_id = response.get('id', None)
if wire_id is None: # Notification
responses.append((None, response))
response = await self.get()
if not type(response) is dict:
print("response type not dict!", response)
if response is None:
self.closed_remotely = True
self.print_error("connection closed remotely")
return None, None
if self.debug:
self.print_error("<--", response)
wire_id = response.get('id', None)
if wire_id is None: # Notification
return None, response
else:
request = self.unanswered_requests.pop(wire_id, None)
if request:
return request, response
else:
request = self.unanswered_requests.pop(wire_id, None)
if request:
responses.append((request, response))
else:
self.print_error("unknown wire ID", wire_id)
responses.append((None, None)) # Signal
break
return responses
self.print_error("unknown wire ID", wire_id)
return None, None # Signal
def check_cert(host, cert):
try:

View File

@ -0,0 +1,46 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/annotations.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import http_pb2 as google_dot_api_dot_http__pb2
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/annotations.proto',
package='google.api',
syntax='proto3',
serialized_pb=_b('\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3')
,
dependencies=[google_dot_api_dot_http__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
HTTP_FIELD_NUMBER = 72295728
http = _descriptor.FieldDescriptor(
name='http', full_name='google.api.http', index=0,
number=72295728, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None, file=DESCRIPTOR)
DESCRIPTOR.extensions_by_name['http'] = http
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
http.message_type = google_dot_api_dot_http__pb2._HTTPRULE
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI'))
# @@protoc_insertion_point(module_scope)

View File

@ -0,0 +1,236 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/http.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/http.proto',
package='google.api',
syntax='proto3',
serialized_pb=_b('\n\x15google/api/http.proto\x12\ngoogle.api\"+\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\"\xea\x01\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
)
_HTTP = _descriptor.Descriptor(
name='Http',
full_name='google.api.Http',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.api.Http.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=80,
)
_HTTPRULE = _descriptor.Descriptor(
name='HttpRule',
full_name='google.api.HttpRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='google.api.HttpRule.selector', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='get', full_name='google.api.HttpRule.get', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='put', full_name='google.api.HttpRule.put', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='post', full_name='google.api.HttpRule.post', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='delete', full_name='google.api.HttpRule.delete', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patch', full_name='google.api.HttpRule.patch', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom', full_name='google.api.HttpRule.custom', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='body', full_name='google.api.HttpRule.body', index=7,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='additional_bindings', full_name='google.api.HttpRule.additional_bindings', index=8,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='pattern', full_name='google.api.HttpRule.pattern',
index=0, containing_type=None, fields=[]),
],
serialized_start=83,
serialized_end=317,
)
_CUSTOMHTTPPATTERN = _descriptor.Descriptor(
name='CustomHttpPattern',
full_name='google.api.CustomHttpPattern',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='google.api.CustomHttpPattern.kind', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='path', full_name='google.api.CustomHttpPattern.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=319,
serialized_end=366,
)
_HTTP.fields_by_name['rules'].message_type = _HTTPRULE
_HTTPRULE.fields_by_name['custom'].message_type = _CUSTOMHTTPPATTERN
_HTTPRULE.fields_by_name['additional_bindings'].message_type = _HTTPRULE
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['get'])
_HTTPRULE.fields_by_name['get'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['put'])
_HTTPRULE.fields_by_name['put'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['post'])
_HTTPRULE.fields_by_name['post'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['delete'])
_HTTPRULE.fields_by_name['delete'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['patch'])
_HTTPRULE.fields_by_name['patch'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['custom'])
_HTTPRULE.fields_by_name['custom'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
DESCRIPTOR.message_types_by_name['Http'] = _HTTP
DESCRIPTOR.message_types_by_name['HttpRule'] = _HTTPRULE
DESCRIPTOR.message_types_by_name['CustomHttpPattern'] = _CUSTOMHTTPPATTERN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), dict(
DESCRIPTOR = _HTTP,
__module__ = 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.Http)
))
_sym_db.RegisterMessage(Http)
HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), dict(
DESCRIPTOR = _HTTPRULE,
__module__ = 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.HttpRule)
))
_sym_db.RegisterMessage(HttpRule)
CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), dict(
DESCRIPTOR = _CUSTOMHTTPPATTERN,
__module__ = 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
))
_sym_db.RegisterMessage(CustomHttpPattern)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'))
# @@protoc_insertion_point(module_scope)

2514
lib/ln/rpc_pb2.py Normal file

File diff suppressed because one or more lines are too long

301
lib/ln/rpc_pb2_grpc.py Normal file
View File

@ -0,0 +1,301 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import rpc_pb2 as rpc__pb2
class ElectrumBridgeStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SetHdSeed = channel.unary_unary(
'/electrumbridge.ElectrumBridge/SetHdSeed',
request_serializer=rpc__pb2.SetHdSeedRequest.SerializeToString,
response_deserializer=rpc__pb2.SetHdSeedResponse.FromString,
)
self.NewAddress = channel.unary_unary(
'/electrumbridge.ElectrumBridge/NewAddress',
request_serializer=rpc__pb2.NewAddressRequest.SerializeToString,
response_deserializer=rpc__pb2.NewAddressResponse.FromString,
)
self.ConfirmedBalance = channel.unary_unary(
'/electrumbridge.ElectrumBridge/ConfirmedBalance',
request_serializer=rpc__pb2.ConfirmedBalanceRequest.SerializeToString,
response_deserializer=rpc__pb2.ConfirmedBalanceResponse.FromString,
)
self.FetchRootKey = channel.unary_unary(
'/electrumbridge.ElectrumBridge/FetchRootKey',
request_serializer=rpc__pb2.FetchRootKeyRequest.SerializeToString,
response_deserializer=rpc__pb2.FetchRootKeyResponse.FromString,
)
self.ListUnspentWitness = channel.unary_unary(
'/electrumbridge.ElectrumBridge/ListUnspentWitness',
request_serializer=rpc__pb2.ListUnspentWitnessRequest.SerializeToString,
response_deserializer=rpc__pb2.ListUnspentWitnessResponse.FromString,
)
self.NewRawKey = channel.unary_unary(
'/electrumbridge.ElectrumBridge/NewRawKey',
request_serializer=rpc__pb2.NewRawKeyRequest.SerializeToString,
response_deserializer=rpc__pb2.NewRawKeyResponse.FromString,
)
self.FetchInputInfo = channel.unary_unary(
'/electrumbridge.ElectrumBridge/FetchInputInfo',
request_serializer=rpc__pb2.FetchInputInfoRequest.SerializeToString,
response_deserializer=rpc__pb2.FetchInputInfoResponse.FromString,
)
self.ComputeInputScript = channel.unary_unary(
'/electrumbridge.ElectrumBridge/ComputeInputScript',
request_serializer=rpc__pb2.ComputeInputScriptRequest.SerializeToString,
response_deserializer=rpc__pb2.ComputeInputScriptResponse.FromString,
)
self.SignOutputRaw = channel.unary_unary(
'/electrumbridge.ElectrumBridge/SignOutputRaw',
request_serializer=rpc__pb2.SignOutputRawRequest.SerializeToString,
response_deserializer=rpc__pb2.SignOutputRawResponse.FromString,
)
self.PublishTransaction = channel.unary_unary(
'/electrumbridge.ElectrumBridge/PublishTransaction',
request_serializer=rpc__pb2.PublishTransactionRequest.SerializeToString,
response_deserializer=rpc__pb2.PublishTransactionResponse.FromString,
)
self.LockOutpoint = channel.unary_unary(
'/electrumbridge.ElectrumBridge/LockOutpoint',
request_serializer=rpc__pb2.LockOutpointRequest.SerializeToString,
response_deserializer=rpc__pb2.LockOutpointResponse.FromString,
)
self.UnlockOutpoint = channel.unary_unary(
'/electrumbridge.ElectrumBridge/UnlockOutpoint',
request_serializer=rpc__pb2.UnlockOutpointRequest.SerializeToString,
response_deserializer=rpc__pb2.UnlockOutpointResponse.FromString,
)
self.ListTransactionDetails = channel.unary_unary(
'/electrumbridge.ElectrumBridge/ListTransactionDetails',
request_serializer=rpc__pb2.ListTransactionDetailsRequest.SerializeToString,
response_deserializer=rpc__pb2.ListTransactionDetailsResponse.FromString,
)
self.SendOutputs = channel.unary_unary(
'/electrumbridge.ElectrumBridge/SendOutputs',
request_serializer=rpc__pb2.SendOutputsRequest.SerializeToString,
response_deserializer=rpc__pb2.SendOutputsResponse.FromString,
)
self.IsSynced = channel.unary_unary(
'/electrumbridge.ElectrumBridge/IsSynced',
request_serializer=rpc__pb2.IsSyncedRequest.SerializeToString,
response_deserializer=rpc__pb2.IsSyncedResponse.FromString,
)
self.SignMessage = channel.unary_unary(
'/electrumbridge.ElectrumBridge/SignMessage',
request_serializer=rpc__pb2.SignMessageRequest.SerializeToString,
response_deserializer=rpc__pb2.SignMessageResponse.FromString,
)
class ElectrumBridgeServicer(object):
# missing associated documentation comment in .proto file
pass
def SetHdSeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NewAddress(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ConfirmedBalance(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchRootKey(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUnspentWitness(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NewRawKey(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchInputInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ComputeInputScript(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SignOutputRaw(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PublishTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LockOutpoint(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnlockOutpoint(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTransactionDetails(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOutputs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def IsSynced(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SignMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ElectrumBridgeServicer_to_server(servicer, server):
rpc_method_handlers = {
'SetHdSeed': grpc.unary_unary_rpc_method_handler(
servicer.SetHdSeed,
request_deserializer=rpc__pb2.SetHdSeedRequest.FromString,
response_serializer=rpc__pb2.SetHdSeedResponse.SerializeToString,
),
'NewAddress': grpc.unary_unary_rpc_method_handler(
servicer.NewAddress,
request_deserializer=rpc__pb2.NewAddressRequest.FromString,
response_serializer=rpc__pb2.NewAddressResponse.SerializeToString,
),
'ConfirmedBalance': grpc.unary_unary_rpc_method_handler(
servicer.ConfirmedBalance,
request_deserializer=rpc__pb2.ConfirmedBalanceRequest.FromString,
response_serializer=rpc__pb2.ConfirmedBalanceResponse.SerializeToString,
),
'FetchRootKey': grpc.unary_unary_rpc_method_handler(
servicer.FetchRootKey,
request_deserializer=rpc__pb2.FetchRootKeyRequest.FromString,
response_serializer=rpc__pb2.FetchRootKeyResponse.SerializeToString,
),
'ListUnspentWitness': grpc.unary_unary_rpc_method_handler(
servicer.ListUnspentWitness,
request_deserializer=rpc__pb2.ListUnspentWitnessRequest.FromString,
response_serializer=rpc__pb2.ListUnspentWitnessResponse.SerializeToString,
),
'NewRawKey': grpc.unary_unary_rpc_method_handler(
servicer.NewRawKey,
request_deserializer=rpc__pb2.NewRawKeyRequest.FromString,
response_serializer=rpc__pb2.NewRawKeyResponse.SerializeToString,
),
'FetchInputInfo': grpc.unary_unary_rpc_method_handler(
servicer.FetchInputInfo,
request_deserializer=rpc__pb2.FetchInputInfoRequest.FromString,
response_serializer=rpc__pb2.FetchInputInfoResponse.SerializeToString,
),
'ComputeInputScript': grpc.unary_unary_rpc_method_handler(
servicer.ComputeInputScript,
request_deserializer=rpc__pb2.ComputeInputScriptRequest.FromString,
response_serializer=rpc__pb2.ComputeInputScriptResponse.SerializeToString,
),
'SignOutputRaw': grpc.unary_unary_rpc_method_handler(
servicer.SignOutputRaw,
request_deserializer=rpc__pb2.SignOutputRawRequest.FromString,
response_serializer=rpc__pb2.SignOutputRawResponse.SerializeToString,
),
'PublishTransaction': grpc.unary_unary_rpc_method_handler(
servicer.PublishTransaction,
request_deserializer=rpc__pb2.PublishTransactionRequest.FromString,
response_serializer=rpc__pb2.PublishTransactionResponse.SerializeToString,
),
'LockOutpoint': grpc.unary_unary_rpc_method_handler(
servicer.LockOutpoint,
request_deserializer=rpc__pb2.LockOutpointRequest.FromString,
response_serializer=rpc__pb2.LockOutpointResponse.SerializeToString,
),
'UnlockOutpoint': grpc.unary_unary_rpc_method_handler(
servicer.UnlockOutpoint,
request_deserializer=rpc__pb2.UnlockOutpointRequest.FromString,
response_serializer=rpc__pb2.UnlockOutpointResponse.SerializeToString,
),
'ListTransactionDetails': grpc.unary_unary_rpc_method_handler(
servicer.ListTransactionDetails,
request_deserializer=rpc__pb2.ListTransactionDetailsRequest.FromString,
response_serializer=rpc__pb2.ListTransactionDetailsResponse.SerializeToString,
),
'SendOutputs': grpc.unary_unary_rpc_method_handler(
servicer.SendOutputs,
request_deserializer=rpc__pb2.SendOutputsRequest.FromString,
response_serializer=rpc__pb2.SendOutputsResponse.SerializeToString,
),
'IsSynced': grpc.unary_unary_rpc_method_handler(
servicer.IsSynced,
request_deserializer=rpc__pb2.IsSyncedRequest.FromString,
response_serializer=rpc__pb2.IsSyncedResponse.SerializeToString,
),
'SignMessage': grpc.unary_unary_rpc_method_handler(
servicer.SignMessage,
request_deserializer=rpc__pb2.SignMessageRequest.FromString,
response_serializer=rpc__pb2.SignMessageResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'electrumbridge.ElectrumBridge', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))

View File

@ -30,14 +30,14 @@ import re
import select
from collections import defaultdict
import threading
import socket
import json
import asyncio
import traceback
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from .interface import Connection, Interface
from .interface import Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
@ -45,6 +45,7 @@ from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
from concurrent.futures import CancelledError
def parse_servers(result):
""" parse servers list into dict format"""
@ -76,7 +77,7 @@ def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
except BaseException as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
@ -157,7 +158,7 @@ class Network(util.DaemonThread):
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
is_connected(), set_parameters(), stop(), follow_chain()
"""
def __init__(self, config=None):
@ -183,7 +184,6 @@ class Network(util.DaemonThread):
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
@ -218,10 +218,8 @@ class Network(util.DaemonThread):
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
self.network_job = None
self.proxy = None
def register_callback(self, callback, events):
with self.lock:
@ -288,42 +286,43 @@ class Network(util.DaemonThread):
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
async def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
assert self.interface is not None
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
await interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
async def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = await self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
await self.queue_request('server.banner', [])
await self.queue_request('server.donation_address', [])
await self.queue_request('server.peers.subscribe', [])
await self.request_fee_estimates()
await self.queue_request('blockchain.relayfee', [])
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.request_fee_estimates()
self.queue_request('blockchain.relayfee', [])
await self.queue_request('server.version', params, self.interface)
for h in self.subscribed_addresses:
self.queue_request('blockchain.scripthash.subscribe', [h])
await self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
async def request_fee_estimates(self):
self.config.requested_fee_estimates()
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
await self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
@ -372,57 +371,37 @@ class Network(util.DaemonThread):
out[host] = { protocol:port }
return out
def start_interface(self, server):
async def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
return await self.new_interface(server)
def start_random_interface(self):
async def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
return await self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
async def start_interfaces(self):
await self.start_interface(self.default_server)
print("started default server interface")
for i in range(self.num_server - 1):
self.start_random_interface()
await self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
async def start_network(self, protocol, proxy):
# TODO proxy
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
assert not self.connecting
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
self.proxy = proxy
await self.start_interfaces()
def stop_network(self):
async def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
@ -431,9 +410,8 @@ class Network(util.DaemonThread):
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
# called from the Qt thread
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
@ -453,25 +431,31 @@ class Network(util.DaemonThread):
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
async def job():
# Restart the network defaulting to the given server
await self.stop_network()
self.default_server = server
await self.start_network(protocol, proxy)
asyncio.run_coroutine_threadsafe(job(), self.loop)
elif self.default_server != server:
self.switch_to_interface(server)
async def job():
await self.switch_to_interface(server)
asyncio.run_coroutine_threadsafe(job(), self.loop)
else:
self.switch_lagging_interface()
self.notify('updated')
async def job():
await self.switch_lagging_interface()
self.notify('updated')
asyncio.run_coroutine_threadsafe(job(), self.loop)
def switch_to_random_interface(self):
async def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
await self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
async def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
@ -479,9 +463,9 @@ class Network(util.DaemonThread):
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
await self.switch_to_interface(choice)
def switch_to_interface(self, server):
async def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
@ -489,7 +473,7 @@ class Network(util.DaemonThread):
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
await self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
@ -498,16 +482,21 @@ class Network(util.DaemonThread):
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
await self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
self.print_error('closing connection', interface.server)
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
if interface.jobs is not None:
interface.jobs.cancel()
if self.process_pending_sends_job is not None:
self.process_pending_sends_job.cancel()
interface.close()
def add_recent_server(self, server):
@ -518,7 +507,7 @@ class Network(util.DaemonThread):
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
async def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
@ -531,7 +520,7 @@ class Network(util.DaemonThread):
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
await self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
@ -555,9 +544,9 @@ class Network(util.DaemonThread):
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
await self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
await self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
@ -566,9 +555,9 @@ class Network(util.DaemonThread):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
async def process_responses(self, interface):
while self.is_running():
request, response = await interface.get_response()
if request:
method, params, message_id = request
k = self.get_index(method, params)
@ -594,7 +583,7 @@ class Network(util.DaemonThread):
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
return
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
@ -611,7 +600,9 @@ class Network(util.DaemonThread):
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
await self.process_response(interface, response, callbacks)
await self.run_coroutines() # Synchronizer and Verifier
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
@ -640,37 +631,38 @@ class Network(util.DaemonThread):
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
async def job(future):
await self.pending_sends.put((messages, callback))
if future: future.set_result("put pending send: " + repr(messages))
asyncio.run_coroutine_threadsafe(job(None), self.loop)
def process_pending_sends(self):
async def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
print("no interface, returning")
await asyncio.sleep(1)
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
messages, callback = await self.pending_sends.get()
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = await self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
@ -685,6 +677,7 @@ class Network(util.DaemonThread):
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.print_error("connection down", server)
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
@ -695,75 +688,27 @@ class Network(util.DaemonThread):
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
async def new_interface(self, server):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface = Interface(server, self.config.path, self.proxy)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
await self.queued_interfaces.put(interface)
#self.interfaces[server] = interface
return interface
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
async def request_chunk(self, interface, idx):
interface.print_error("requesting chunk %d" % idx)
await self.queue_request('blockchain.block.get_chunk', [idx], interface)
assert interface.jobs
interface.request = idx
interface.req_time = time.time()
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
self.request_fee_estimates()
def request_chunk(self, interface, index):
if index in self.requested_chunks:
return
interface.print_error("requesting chunk %d" % index)
self.requested_chunks.add(index)
self.queue_request('blockchain.block.get_chunk', [index], interface)
def on_get_chunk(self, interface, response):
async def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
@ -782,20 +727,20 @@ class Network(util.DaemonThread):
return
# If not finished, get the next chunk
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
await self.request_chunk(interface, index+1)
else:
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
async def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
await self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
async def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
@ -903,7 +848,7 @@ class Network(util.DaemonThread):
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
await self.switch_lagging_interface()
self.notify('updated')
else:
@ -911,9 +856,9 @@ class Network(util.DaemonThread):
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
await self.request_chunk(interface, next_height // 2016)
else:
self.request_header(interface, next_height)
await self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
@ -921,34 +866,51 @@ class Network(util.DaemonThread):
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
async def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def make_send_requests_job(self, interface):
async def job():
try:
while self.is_running():
result = await interface.send_request()
if not result:
self.connection_down(interface.server)
except CancelledError:
pass
except:
traceback.print_exc()
print("FATAL ERROR ^^^")
return asyncio.ensure_future(job())
def make_process_responses_job(self, interface):
async def job():
try:
await self.process_responses(interface)
except CancelledError:
pass
except OSError:
self.connection_down(interface.server)
print("OS error, connection downed")
except BaseException:
traceback.print_exc()
print("FATAL ERROR in process_responses")
return asyncio.ensure_future(job())
def make_process_pending_sends_job(self):
async def job():
try:
while self.is_running():
await self.process_pending_sends()
except CancelledError:
pass
except BaseException as e:
traceback.print_exc()
print("FATAL ERROR in process_pending_sends")
return asyncio.ensure_future(job())
def init_headers_file(self):
b = self.blockchains[0]
@ -962,18 +924,121 @@ class Network(util.DaemonThread):
with b.lock:
b.update_size()
def run(self):
self.init_headers_file()
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
async def make_network_job(self, future):
try:
await self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
self.process_pending_sends_job = self.make_process_pending_sends_job()
while self.is_running():
interface = await self.queued_interfaces.get()
await self.queue_request('server.version', [ELECTRUM_VERSION, PROTOCOL_VERSION], interface)
if not await interface.send_request():
print("interface did not work")
self.connection_down(interface.server)
continue
gathered = asyncio.gather(self.make_ping_job(interface), self.make_send_requests_job(interface), self.make_process_responses_job(interface))
interface.jobs = asyncio.ensure_future(gathered)
def cb(fut):
fut.exception()
try:
for i in fut.result(): assert i is None
except CancelledError:
pass
if not future.done(): future.set_result("Network job done")
interface.jobs.add_done_callback(cb)
self.interfaces[interface.server] = interface
await self.queue_request('blockchain.headers.subscribe', [], interface)
if interface.server == self.default_server:
await self.switch_to_interface(interface.server)
#self.notify('interfaces')
def on_notify_header(self, interface, header):
except BaseException as e:
traceback.print_exc()
print("FATAL ERROR in network_job")
if not future.done(): future.set_exception(e)
def make_ping_job(self, interface):
async def job():
try:
while self.is_running():
await asyncio.sleep(1)
# Send pings and shut down stale interfaces
# must use copy of values
if interface.has_timed_out():
print("timed out")
self.connection_down(interface.server)
elif interface.ping_required():
print("ping required")
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
await self.queue_request('server.version', params, interface)
except CancelledError:
pass
except:
traceback.print_exc()
print("FATAL ERRROR in ping_job")
return asyncio.ensure_future(job())
async def maintain_interfaces(self):
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
await self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
await self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
await self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
await self.request_fee_estimates()
def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop) # this does not set the loop on the qt thread
self.loop = loop # so we store it in the instance too
self.init_headers_file()
self.pending_sends = asyncio.Queue()
self.queued_interfaces = asyncio.Queue()
if not self.network_job:
network_job_future = asyncio.Future()
self.network_job = asyncio.ensure_future(self.make_network_job(network_job_future))
run_future = asyncio.Future()
asyncio.ensure_future(self.run_async(run_future))
combined_task = asyncio.gather(network_job_future, run_future)
loop.run_until_complete(combined_task)
combined_task.exception()
self.print_error("combined task result", combined_task.result())
loop.close()
async def run_async(self, future):
try:
while self.is_running():
await asyncio.sleep(1)
await self.maintain_requests()
await self.maintain_interfaces()
self.run_jobs()
await self.stop_network()
self.on_stop()
future.set_result("run_async done")
except BaseException as e:
future.set_exception(e)
async def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
@ -987,7 +1052,7 @@ class Network(util.DaemonThread):
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
await self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
@ -995,7 +1060,7 @@ class Network(util.DaemonThread):
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
await self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
@ -1004,17 +1069,14 @@ class Network(util.DaemonThread):
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip +1, height - 1))
await self.request_header(interface, min(tip + 1, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.print_error("switching to catchup mode", tip, self.blockchains)
self.request_header(interface, 0)
else:
self.print_error("chain already catching up with", chain.catch_up.server)
await self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
@ -1029,6 +1091,7 @@ class Network(util.DaemonThread):
out[k] = r
return out
# called from the Qt thread
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
@ -1036,16 +1099,19 @@ class Network(util.DaemonThread):
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
asyncio.run_coroutine_threadsafe(self.switch_to_interface(i.server), self.loop)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
# commented out on migration to asyncio. not clear if it
# relies on the coroutine to be done:
#if self.interface:
# server = self.interface.server
# host, port, protocol, proxy, auto_connect = self.get_parameters()
# host, port, protocol = server.split(':')
# self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()

83
lib/ssl_in_socks.py Normal file
View File

@ -0,0 +1,83 @@
import ssl
from asyncio.sslproto import SSLProtocol
import aiosocks
import asyncio
from . import interface
class AppProto(asyncio.Protocol):
def __init__(self, receivedQueue, connUpLock):
self.buf = bytearray()
self.receivedQueue = receivedQueue
self.connUpLock = connUpLock
def connection_made(self, transport):
self.connUpLock.release()
def data_received(self, data):
self.buf.extend(data)
NEWLINE = b"\n"[0]
for idx, val in enumerate(self.buf):
if NEWLINE == val:
asyncio.ensure_future(self.receivedQueue.put(bytes(self.buf[:idx+1])))
self.buf = self.buf[idx:]
def makeProtocolFactory(receivedQueue, connUpLock, ca_certs):
class MySSLProtocol(SSLProtocol):
def connection_lost(self, data):
print("conn lost")
super().connection_lost(data)
def _on_handshake_complete(self, handshake_exc):
super()._on_handshake_complete(handshake_exc)
if handshake_exc is not None:
print("handshake complete", handshake_exc)
print("cert length", len(self._sslpipe.ssl_object.getpeercert(True)))
def __init__(self):
context = interface.get_ssl_context(cert_reqs=ssl.CERT_REQUIRED if ca_certs is not None else ssl.CERT_NONE, ca_certs=ca_certs)
proto = AppProto(receivedQueue, connUpLock)
super().__init__(asyncio.get_event_loop(), proto, context, None)
return MySSLProtocol
class ReaderEmulator:
def __init__(self, receivedQueue):
self.receivedQueue = receivedQueue
async def readuntil(self, splitter):
assert splitter == b"\n"
return await self.receivedQueue.get()
class WriterEmulator:
def __init__(self, transport):
self.transport = transport
def write(self, data):
self.transport.write(data)
async def drain(self):
pass
def close(self):
self.transport.close()
async def sslInSocksReaderWriter(socksAddr, socksAuth, host, port, ca_certs):
receivedQueue = asyncio.Queue()
connUpLock = asyncio.Lock()
await connUpLock.acquire()
transport, protocol = await aiosocks.create_connection(makeProtocolFactory(receivedQueue, connUpLock, ca_certs), proxy=socksAddr, proxy_auth=socksAuth, dst=(host, port))
await connUpLock.acquire()
return ReaderEmulator(receivedQueue), WriterEmulator(protocol._app_transport)
if __name__ == "__main__":
async def l(fut):
try:
reader, writer = await sslInSocksReaderWriter(aiosocks.Socks4Addr("127.0.0.1", 9050), None, "songbird.bauerj.eu", 50002, None)
writer.write(b'{"id":0,"method":"server.version","args":["3.0.2", "1.1"]}\n')
await writer.drain()
print(await reader.readuntil(b"\n"))
writer.close()
fut.set_result("finished")
except BaseException as e:
fut.set_exception(e)
def f():
loop = asyncio.get_event_loop()
fut = asyncio.Future()
asyncio.ensure_future(l(fut))
loop.run_until_complete(fut)
print(fut.result())
loop.close()
f()

View File

@ -27,10 +27,10 @@ import hashlib
# from .bitcoin import Hash, hash_encode
from .transaction import Transaction
from .util import ThreadJob, bh2u
from .util import CoroutineJob, bh2u
class Synchronizer(ThreadJob):
class Synchronizer(CoroutineJob):
'''The synchronizer keeps the wallet up-to-date with its set of
addresses and their transactions. It subscribes over the network
to wallet addresses, gets the wallet to generate new addresses
@ -178,7 +178,7 @@ class Synchronizer(ThreadJob):
self.print_error("missing tx", self.requested_tx)
self.subscribe_to_addresses(set(self.wallet.get_addresses()))
def run(self):
async def run(self):
'''Called from the network proxy thread main loop.'''
# 1. Create new addresses
self.wallet.synchronize()

View File

@ -29,13 +29,14 @@ import traceback
import urllib
import threading
import hmac
import time
import json
import urllib.request, urllib.parse, urllib.error
import queue
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
@ -86,6 +87,15 @@ class PrintError(object):
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class CoroutineJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
async def run(self):
"""Called periodically from the thread"""
pass
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
@ -130,6 +140,21 @@ class DaemonThread(threading.Thread, PrintError):
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
self.coroutines = []
def add_coroutines(self, jobs):
for i in jobs: assert isinstance(i, CoroutineJob), i.__class__.__name__ + " does not inherit from CoroutineJob"
self.coroutines.extend(jobs)
async def run_coroutines(self):
for coroutine in self.coroutines:
assert isinstance(coroutine, CoroutineJob)
await coroutine.run()
def remove_coroutines(self, jobs):
for i in jobs: assert isinstance(i, CoroutineJob)
for job in jobs:
self.coroutines.remove(job)
def add_jobs(self, jobs):
with self.job_lock:
@ -141,6 +166,7 @@ class DaemonThread(threading.Thread, PrintError):
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
assert isinstance(job, ThreadJob)
try:
job.run()
except Exception as e:
@ -600,112 +626,8 @@ def parse_json(message):
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
@ -732,4 +654,4 @@ def setup_thread_excepthook():
self.run = run_with_except_hook
threading.Thread.__init__ = init
threading.Thread.__init__ = init

View File

@ -20,11 +20,11 @@
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .util import ThreadJob
from .util import CoroutineJob
from .bitcoin import *
class SPV(ThreadJob):
class SPV(CoroutineJob):
""" Simple Payment Verification """
def __init__(self, network, wallet):
@ -35,7 +35,7 @@ class SPV(ThreadJob):
# requested, and the merkle root once it has been verified
self.merkle_roots = {}
def run(self):
async def run(self):
lh = self.network.get_local_height()
unverified = self.wallet.get_unverified_txs()
for tx_hash, tx_height in unverified.items():

View File

@ -1003,14 +1003,14 @@ class Abstract_Wallet(PrintError):
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
network.add_coroutines([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.network.remove_coroutines([self.synchronizer, self.verifier])
self.synchronizer.release()
self.synchronizer = None
self.verifier = None

9
lol.py Normal file
View File

@ -0,0 +1,9 @@
from lib import bitcoin, transaction
import binascii
bitcoin.set_simnet()
print(transaction.Transaction.pay_script(bitcoin.TYPE_ADDRESS, "sb1q3hmm6ehggew56pz06km429mxeg3jhwtj8yfgk5"))
scr = bitcoin.address_to_script("sb1q3hmm6ehggew56pz06km429mxeg3jhwtj8yfgk5")
print(scr)
print(len(scr))

6853
out.perf Normal file

File diff suppressed because it is too large Load Diff

BIN
perf.data Normal file

Binary file not shown.

View File

@ -44,7 +44,7 @@ setup(
'protobuf',
'dnspython',
'jsonrpclib-pelix',
'PySocks>=1.6.6',
'aiosocks>=0.2.6',
],
packages=[
'electrum',