latest upstream changes

This commit is contained in:
Vivek Teega 2020-01-28 16:21:11 +00:00
parent 01a3cbefd6
commit 5fbb1d2dd2
107 changed files with 7300 additions and 2475 deletions

4
.gitignore vendored
View File

@ -9,4 +9,6 @@ docs/_build
/build
/dist
/electrumx.egg-info
.idea/
.vscode/
.mypy_cache/
.idea/

View File

@ -2,16 +2,11 @@ sudo: required
dist: trusty
language: python
before_install:
- sudo add-apt-repository -y ppa:giskou/librocksdb
- sudo add-apt-repository -y ppa:streetcrypto7/rocksdb
- sudo add-apt-repository -y ppa:streetcrypto7/leveldb
- sudo apt-get -qq update
- sudo apt-get install -yq libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev
- sudo apt-get install -yq --allow-unauthenticated librocksdb
- wget https://launchpad.net/ubuntu/+archive/primary/+files/leveldb_1.20.orig.tar.gz
- tar -xzvf leveldb_1.20.orig.tar.gz
- pushd leveldb-1.20 && make && sudo mv out-shared/libleveldb.* /usr/local/lib && sudo cp -R include/leveldb /usr/local/include && sudo ldconfig && popd
- sudo apt-get install -yq libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev librocksdb-dev libleveldb-dev
python:
- "3.6"
- "3.6-dev"
- "3.7-dev"
- "nightly"
# command to install dependencies
@ -22,7 +17,7 @@ install:
- pip install plyvel
- pip install pycodestyle
- pip install pylru
- pip install pyrocksdb
- pip install python-rocksdb
- pip install pytest-asyncio
- pip install pytest-cov
- pip install Sphinx
@ -35,17 +30,22 @@ install:
- pip install xevan_hash
- pip install quark_hash
- pip install groestlcoin_hash
- pip install git+https://github.com/goacoincore/neoscrypt
- pip install git+https://github.com/motioncrypto/x16r_hash
- pip install neoscrypt
- pip install x16r_hash
- pip install pycryptodomex
- pip install git+https://github.com/Electra-project/nist5_hash
- pip install git+https://github.com/RitoProject/x21s_hash
- pip install git+https://github.com/traysi/x16rv2_hash
- pip install bell-yespower
- pip install cpupower
# command to run tests
script:
- pytest --cov=electrumx
- pycodestyle electrumx/server/*.py electrumx/lib/*.py
- pycodestyle --max-line-length=100 electrumx/server/*.py electrumx/lib/*.py *.py
- sh -c "cd docs && make html"
# Dont report coverage from nightly
after_success:
- if [[ $(python3 -V 2>&1) == *"Python 3.6"* ]]; then
- if [[ $(python3 -V 2>&1) == *"Python 3.7"* ]]; then
pip install python-coveralls;
coveralls;
fi

View File

@ -1 +1,22 @@
Ranchi Mall offers ElectrumX server service for FLO publicly at ranchimall.duckdns.org and ranchimall1.duckdns.org at ports 50001 and 50002
.. image:: https://travis-ci.org/kyuupichan/electrumx.svg?branch=master
:target: https://travis-ci.org/kyuupichan/electrumx
.. image:: https://coveralls.io/repos/github/kyuupichan/electrumx/badge.svg
:target: https://coveralls.io/github/kyuupichan/electrumx
===============================================
ElectrumX - Reimplementation of electrum-server
===============================================
For a future network with bigger blocks.
:Licence: MIT
:Language: Python (>= 3.7)
:Author: Neil Booth
Documentation
=============
See `readthedocs <https://electrumx.readthedocs.io/>`_.
**Neil Booth** kyuupichan@gmail.com https://github.com/kyuupichan

35
contrib/Dockerfile Normal file
View File

@ -0,0 +1,35 @@
# example of Dockerfile that builds release of electrumx-1.13.0
# ENV variables can be overrided on the `docker run` command
FROM ubuntu:18.04
WORKDIR /
ADD https://github.com/kyuupichan/electrumx/archive/1.13.0.tar.gz /
RUN tar zxvf *.tar.gz
RUN apt-get update && \
apt-get -y install python3.7 python3-pip librocksdb-dev libsnappy-dev libbz2-dev libz-dev liblz4-dev && \
pip3 install aiohttp pylru python-rocksdb
RUN cd /electrumx* && python3 setup.py install
ENV SERVICES="tcp://:50001"
ENV COIN=BitcoinSV
ENV DB_DIRECTORY=/db
ENV DAEMON_URL="http://username:password@hostname:port/"
ENV ALLOW_ROOT=true
ENV DB_ENGINE=rocksdb
ENV MAX_SEND=10000000
ENV BANDWIDTH_UNIT_COST=50000
ENV CACHE_MB=2000
VOLUME /db
RUN mkdir -p "$DB_DIRECTORY" && ulimit -n 1048576
CMD ["/usr/bin/python3", "/usr/local/bin/electrumx_server"]
# build it with eg.: `docker build -t electrumx .`
# run it with eg.:
# `docker run -d --net=host -v /home/electrumx/db/:/db -e DAEMON_URL="http://youruser:yourpass@localhost:8332" -e REPORT_SERVICES=tcp://example.com:50001 electrumx`
# for a proper clean shutdown, send TERM signal to the running container eg.: `docker kill --signal="TERM" CONTAINER_ID`

View File

@ -5,5 +5,7 @@ DB_DIRECTORY = /db
# Bitcoin Node RPC Credentials
DAEMON_URL = http://username:password@hostname:port/
# COIN = BitcoinSegwit
# See http://electrumx.readthedocs.io/en/latest/environment.html for
# information about other configuration settings you probably want to consider.

View File

@ -15,7 +15,7 @@ small - pull requests are welcome.
================ ========================
Package Notes
================ ========================
Python3 ElectrumX uses asyncio. Python version >= 3.6 is
Python3 ElectrumX uses asyncio. Python version >= 3.7 is
**required**.
`aiohttp`_ Python library for asynchronous HTTP. Version >=
2.0 required.
@ -51,11 +51,11 @@ used to either.
When building the database from the genesis block, ElectrumX has to
flush large quantities of data to disk and its DB. You will have a
better experience if the database directory is on an SSD than on an
HDD. Currently to around height 447,100 of the Bitcoin blockchain the
HDD. Currently to around height 611,600 of the Bitcoin blockchain the
final size of the leveldb database, and other ElectrumX file metadata
comes to just over 18.7GB (17.5 GiB). LevelDB needs a bit more for
comes to just over 46.9GB (43.7 GiB). LevelDB needs a bit more for
brief periods, and the block chain is only getting longer, so I would
recommend having at least 30-40GB of free space before starting.
recommend having at least 70-80GB of free space before starting.
Database Engine
===============
@ -208,14 +208,6 @@ Once configured you may want to start ElectrumX at boot::
:file:`.service` file.
Installing Python 3.6 under Ubuntu
----------------------------------
Many Ubuntu distributions have an incompatible Python version baked
in. Because of this, it is easier to install Python 3.6. See
`contrib/python3.6/python-3.6.sh`_.
Installing on Raspberry Pi 3
----------------------------
@ -371,6 +363,8 @@ The ETA shown is just a rough guide and in the short term can be quite
volatile. It tends to be a little optimistic at first; once you get
to height 280,000 is should be fairly accurate.
.. _SSL certificates:
Creating a self-signed SSL certificate
======================================
@ -433,6 +427,5 @@ You can then set the port as follows and advertise the service externally on the
.. _`aiohttp`: https://pypi.python.org/pypi/aiohttp
.. _`pylru`: https://pypi.python.org/pypi/pylru
.. _`x11_hash`: https://pypi.python.org/pypi/x11_hash
.. _`contrib/python3.6/python-3.6.sh`: https://github.com/kyuupichan/electrumx/blob/master/contrib/python3.6/python-3.6.sh
.. _`contrib/raspberrypi3/install_electrumx.sh`: https://github.com/kyuupichan/electrumx/blob/master/contrib/raspberrypi3/install_electrumx.sh
.. _`contrib/raspberrypi3/run_electrumx.sh`: https://github.com/kyuupichan/electrumx/blob/master/contrib/raspberrypi3/run_electrumx.sh

View File

@ -7,13 +7,122 @@
and memory consumption whilst serving clients. Those problems
should not occur with Python 3.7.
.. note:: Bitcoin ABC developers have hastily introduced controversial
changes that break ElectrumX's block processing by requiring it to
be non-sequential. Unlike others with unique requirements they
refused to make their code coin-specific. ElectrumX continues to
require blocks be naturally ordered, and is compatible with any
non-CToR daemon, such as Bitcoin SV, and Bitcoin Unlimited /
Bitcoin XT with CToR disabled.
Version 1.13.0 (26 Sep 2019)
============================
* daemon: use a single connection for all requests rather than a connection per request.
Distinguish handling of JSON and HTTP errors
* recognise OP_FALSE OP_RETURN scripts as unspendable
* peers - attempt to bind to correct local IP address
* improve name support (domob1812)
* coin additions / updates: BitZeny (y-chan), ZCoin (a-bezrukov), Emercoin (yakimka),
BSV (Roger Taylor), Bellcoin (streetcrypto7), Ritocoin (traysi), BTC (Sombernight),
PIVX (mrcarlanthony), Monacoin (wakiyamap)), NamecoinRegtest (JeremyRand), Axe (ddude1),
Xaya (domob1812), GZRO (MrNaif2018), Ravencoin (standard-error)
* other: gits7r
Version 1.12.0 (13 May 2019)
============================
* require aiorpcX 0.18.1. This introduces websocket support. The environment variables
changed accordingly; see :envvar:`SERVICES` and :envvar:`REPORT_SERVICES`.
* work around bug in recent versions of uvloop
* aiorpcX upgrade fixes from Shane M
* coin additions / updates: BitcoinSV, Bolivarcoin (Jose Luis Estevez), BTC Testnet (ghost43),
Odin (Pixxl)
Version 1.11.0 (18 Apr 2019)
============================
* require aiorpcX 0.15.x
* require aiohttp 3.3 or higher; earlier versions had a problematic bug
* add :envvar:`REQUEST_TIMEOUT` and :envvar:`LOG_LEVEL` environment variables
* mark 4 old environment variables obsolete. ElectrumX won't start until they are removed
* getinfo local RPC cleaned up and shows more stats
* miscellaneous fixes and improvements
* more efficient handling of some RPC methods, particularly
:func:`blockchain.transaction.get_merkle`
* coin additions / updates: BitcoinSV scaling testnet (Roger Taylor), Dash (zebra lucky),
* issues resolved: `#566`_, `#731`_, `#795`_
Version 1.10.1 (13 Apr 2019)
============================
* introduce per-request costing. See environment variables documentation for new
variables :envvar:`COST_SOFT_LIMIT`, :envvar:`COST_HARD_LIMIT`, :envvar:`REQUEST_SLEEP`,
:envvar:`INITIAL_CONCURRENT`, :envvar:`BANDWIDTH_UNIT_COST`. Sessions are placed in groups
with which they share some of their costs. Prior cost is remembered across reconnects.
* require aiorpcX 0.13.5 for better concurrency handling
* require clients use protocol 1.4 or higher
* handle transaction.get_merkle requests more efficiently (ghost43)
* Windows support (sancoder)
* peers improvements (ghost43)
* report mempool and block sizes in logs
* electrumx_rpc: timeout raised to 30s, fix session request counts
* other tweaks and improvements by Bjorge Dijkstra, ghost43, peleion,
* coin additions / updates: ECA (Jenova7), ECCoin (smogm), GXX (DEVCØN), BZX (2INFINITY),
DeepOnion (Liam Alford), CivX / EXOS (turcol)
Version 1.10.0 (15 Mar 2019)
============================
* extra countermeasures to limit BTC phishing effectiveness (ghost43)
* peers: mark blacklisted peers bad; force retry blacklisted peers (ghost43)
* coin additions / updates: Monacoin (wakiyamap), Sparks (Mircea Rila), ColossusXT,
Polis, MNPCoin, Zcoin, GINCoin (cronos), Grosetlcoin (gruve-p), Dash (konez2k),
Bitsend (David), Ravencoin (standard-error), Onixcoin (Jose Estevez), SnowGem
* coin removals: Gobyte, Moneci (cronos)
* minor tweaks by d42
* issues fixed `#660`_ - unclean shutdowns during initial sync
Version 1.9.5 (08 Feb 2019)
===========================
* server blacklist logic (ecdsa)
* require aiorpcX 0.10.4
* remove dead wallet code
* fix `#727`_ - not listing same peer twice
Version 1.9.4 (07 Feb 2019)
===========================
* require aiorpcX 0.10.3
* fix `#713`_
Version 1.9.3 (05 Feb 2019)
===========================
* ignore potential sybil peers
* coin additions / updates: BitcoinCashABC (cculianu), Monacoin (wakiyamap)
Version 1.9.2 (03 Feb 2019)
===========================
* restore protocol version 1.2 and send a warning for old BTC Electrum clients that they
need to upgrade. This is an attempt to protect users of old versions of Electrum from
the ongoing phishing attacks
* increase default MAX_SEND for AuxPow Chains. Truncate AuxPow for block heights covered
by a checkpoint. (jeremyrand)
* coin additions / updates: NMC (jeremyrand), Dash (zebra-lucky), PeerCoin (peerchemist),
BCH testnet (Mark Lundeberg), Unitus (ChekaZ)
* tighter RPC param checking (ghost43)
Version 1.9.1 (11 Jan 2019)
===========================
* fix `#684`_
Version 1.9.0 (10 Jan 2019)
===========================
* minimum protocol version is now 1.4
* coin additions / updates: BitcoinSV, SmartCash (rc125), NIX (phamels), Minexcoin (joesixpack),
BitcoinABC (mblunderburg), Dash (zebra-lucky), BitcoinABCRegtest (ezegom), AXE (slowdive),
NOR (flo071), BitcoinPlus (bushsolo), Myriadcoin (cryptapus), Trezarcoin (ChekaZ),
Bitcoin Diamond (John Shine),
* close `#554`_, `#653`_, `#655`_
* other minor tweaks (Michael Schmoock, Michael Taborsky)
Version 1.8.12 (10 Nov 2018)
============================
@ -60,20 +169,6 @@ Version 1.8.6 (12 Sep 2018)
* new coin TokenPay (samfiragabriel)
* minor fix: wakiyamap
Version 1.8.7 (13 Sep 2018)
===========================
* require aiorpcX 0.8.1
* fix reorg bug loading blocks from disk (erasmospunk)
Version 1.8.6 (12 Sep 2018)
===========================
* require aiorpcX 0.8.0
* suppress socket.send() errors
* new coin TokenPay (samfiragabriel)
* minor fix: wakiyamap
Version 1.8.5 (18 Aug 2018)
===========================
@ -128,125 +223,17 @@ Version 1.8 (06 Aug 2018)
Decred (erasmonpsunk)
* other minor (smmalis37)
Version 1.7.3 (01 Aug 2018)
============================
* fix `#538`_
Version 1.7.2 (29 Jul 2018)
============================
* require aiorpcX 0.5.9; 0.5.8 didn't work on Python 3.7
Version 1.7.1 (28 Jul 2018)
============================
* switch to aiorpcX 0.5.8 which implements some curio task management
primitives on top of asyncio that make writing correct async code
much easier, as well as making it simpler to reason about
* use those primitives to restructure the peer manager, which is now
fully concurrent again, as well as the block processor and
controller
* fix `#534`_ introduced in 1.7
* minor coin tweaks (ghost43, cipig)
Version 1.7 (25 Jul 2018)
==========================
* completely overhauled mempool and address notifications
implementation. Cleaner and a lot more efficient, especially for
initial synchronization of the mempool. Mempool handling is fully
asynchronous and doesn't hinder client responses or block
processing.
* peer discovery cleaned up, more work remains
* cleaner shutdown process with clear guarantees
* aiohttp min version requirement raised to 2.0
* onion peers are ignored if no tor proxy is available
* add Motion coin (ocruzv), MinexCoin (joesixpack)
Version 1.6 (19 July 2018)
===========================
* implement :ref:`version 1.4` of the protocol, with benefit for light
clients, particularly mobile
* implement header proofs and merkle caches
* implement :func:`blockchain.transaction.id_from_pos` (ghost43)
* large refactoring of session and controller classes
* recent blocks are now stored on disk. When backing up in a reorg
ElectrumX uses these rather than asking the daemon for the blocks --
some daemons cannot correctly handle orphaned block requests after
a reorg. Fixes `#258`_, `#315`_, `#479`_
* minor fixes: nijel
Version 1.5.2
=============
* package renamed from elctrumX-kyuupichan to electrumX
* split merkle logic out into lib/merkle.py
* fix `#523`_ for daemons based on older releases of core
Version 1.5.1
=============
Fixes a couple of issues found in 1.5 after release:
* update peer discovery code for :ref:`version 1.3` of the protocol
* setup.py would not run in a clean environment (e.g. virtualenv)
* logging via aiorpcX didn't work with the logging hierarchy updates
* log Python interpreter version on startup
Version 1.5
===========
.. note:: The two main scripts, :file:`electrumx_server` and
:file:`electrumx_rpc` were renamed to drop the `.py` suffix. You
will probably need to update your run script accordingly.
* support :ref:`version 1.3` of the protocol
* increase minimum supported protocol version to :ref:`version 1.1`
* split out history handling in preparation for new DB format
* force close stubborn connections that refuse to close gracefully
* RPC getinfo returns server version (erasmospunk)
* add new masternode methods; document them all (elmora-do)
* make electrumx a Python package (eukreign)
* hierarchical logging, Env to take a coin class directly,
server_listening event (eukreign)
* decred coin removed as mainnet does not sync
* issues fixed: `#414`_, `#443`_, `#455`_, `#480`_, `#485`_, `#502`_,
`#506`_, `#519`_ (wakiyamap)
* new or updated coins: Feathercoin (lclc), NewYorkCoin Testnet(nicovs),
BitZeny (wakiyamap), UFO (bushstar), GAME (cipig), MAC (nico205),
Xuez (ddude), ZCash (wo01), PAC (elmora-do), Koto Testnet (wo01),
Dash Testnet (ser), BTG all nets (wilsonmeier), Polis + ColossusXT +
GoByte + Monoeci (cronos-polis), BitcoinCash Regtest (eukreign)
* minor tweaks: romanz, you21979, SuBPaR42, sangaman, wakiyamap, DaShak
**Neil Booth** kyuupichan@gmail.com https://github.com/kyuupichan
bitcoincash:qzxpdlt8ehu9ehftw6rqsy2jgfq4nsltxvhrdmdfpn
.. _#258: https://github.com/kyuupichan/electrumx/issues/258
.. _#315: https://github.com/kyuupichan/electrumx/issues/315
.. _#414: https://github.com/kyuupichan/electrumx/issues/414
.. _#443: https://github.com/kyuupichan/electrumx/issues/443
.. _#455: https://github.com/kyuupichan/electrumx/issues/455
.. _#479: https://github.com/kyuupichan/electrumx/issues/479
.. _#480: https://github.com/kyuupichan/electrumx/issues/480
.. _#485: https://github.com/kyuupichan/electrumx/issues/485
.. _#502: https://github.com/kyuupichan/electrumx/issues/50
.. _#506: https://github.com/kyuupichan/electrumx/issues/506
.. _#519: https://github.com/kyuupichan/electrumx/issues/519
.. _#521: https://github.com/kyuupichan/electrumx/issues/521
.. _#523: https://github.com/kyuupichan/electrumx/issues/523
.. _#534: https://github.com/kyuupichan/electrumx/issues/534
.. _#538: https://github.com/kyuupichan/electrumx/issues/538
.. _#552: https://github.com/kyuupichan/electrumx/issues/552
.. _#554: https://github.com/kyuupichan/electrumx/issues/554
.. _#557: https://github.com/kyuupichan/electrumx/issues/557
.. _#559: https://github.com/kyuupichan/electrumx/issues/559
.. _#564: https://github.com/kyuupichan/electrumx/issues/564
.. _#565: https://github.com/kyuupichan/electrumx/issues/565
.. _#566: https://github.com/kyuupichan/electrumx/issues/566
.. _#567: https://github.com/kyuupichan/electrumx/issues/567
.. _#570: https://github.com/kyuupichan/electrumx/issues/570
.. _#577: https://github.com/kyuupichan/electrumx/issues/577
@ -254,3 +241,11 @@ bitcoincash:qzxpdlt8ehu9ehftw6rqsy2jgfq4nsltxvhrdmdfpn
.. _#608: https://github.com/kyuupichan/electrumx/issues/608
.. _#630: https://github.com/kyuupichan/electrumx/issues/630
.. _#632: https://github.com/kyuupichan/electrumx/issues/630
.. _#653: https://github.com/kyuupichan/electrumx/issues/653
.. _#655: https://github.com/kyuupichan/electrumx/issues/655
.. _#660: https://github.com/kyuupichan/electrumx/issues/660
.. _#684: https://github.com/kyuupichan/electrumx/issues/684
.. _#713: https://github.com/kyuupichan/electrumx/issues/713
.. _#727: https://github.com/kyuupichan/electrumx/issues/727
.. _#731: https://github.com/kyuupichan/electrumx/issues/731
.. _#795: https://github.com/kyuupichan/electrumx/issues/795

View File

@ -15,7 +15,7 @@
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
VERSION="ElectrumX 1.8.12"
VERSION="ElectrumX 1.13.0"
# -- Project information -----------------------------------------------------

View File

@ -4,15 +4,14 @@
Environment Variables
=====================
ElectrumX takes no command line arguments, instead its behaviour is
controlled by environment variables. Only a few are required to be
given, the rest will have sensible defaults if not specified. Many of
the defaults around resource usage are conservative; I encourage you
to review them.
ElectrumX takes no command line arguments, instead its behaviour is controlled by
environment variables. Only a few are required to be given, the rest will have sensible
defaults if not specified. Many of the defaults around resource usage are conservative; I
encourage you to review them.
Note: by default the server will only serve to connections from the
same machine. To be accessible to other users across the internet you
must set **HOST** appropriately; see below.
.. note:: set :envvar:`SERVICES` appropriately to be able to connect to your server. For
clients across the internet to know what services you offer you must advertize your
services with :envvar:`REPORT_SERVICES`.
Required
@ -46,6 +45,11 @@ These environment variables are always required:
port for :envvar:`COIN` and :envvar:`NET` if omitted.
.. note:: With the above set your server will run and index the chain. To enable incoming
connections you must set :envvar:`SERVICES`, and for others to be aware of your server
set :envvar:`REPORT_SERVICES`.
For the ``run`` script
======================
@ -61,6 +65,119 @@ The following are required if you use the ``run`` script:
The username the server will run as.
Services
========
These two environment variables are comma-separated lists of individual *services*.
A **service** has the general form::
protocol://host:port
*protocol* is case-insensitive. The recognised protocols are::
tcp Plaintext TCP sockets
ssl SSL-encrypted TCP sockets
ws Plaintext websockets
wss SSL-encrypted websockets
rpc Plaintext RPC
In a services list, a protocol can be specified multiple times, with different hosts or
ports. This might be useful for multi-homed hosts, or if you offer both Tor and clearnet
services.
*host* can be a hostname, an IPv4 address, or an IPv6 address enclosed in square brackets.
*port* is an integer from :const:`1` to :const:`65535` inclusive.
Where documented, one or more of *protocol*, *host* and *port* can be omitted, in which
case a default value will be assumed.
Here are some examples of valid services::
tcp://host.domain.tld:50001 # Hostname, lowercase protocol, port
SSL://23.45.67.78:50002 # An IPv4 address, upper-case protocol, port
rpC://localhost # Host as a string, mixed-case protocol, default port
ws://[1234:5678:abcd::5601]:8000 # Host as an IPv6 address
wss://h3ubaasdlkheryasd.onion:50001 # Host as a Tor ".onion" address
rpc://:8000 # Default host, port given
host.domain.tld:5151 # Default protocol, hostname, port
rpc:// # RPC protocol, default host and port
.. note:: ElectrumX will not serve any incoming connections until it has fully caught up
with your bitcoin daemon. The only exception is local **RPC** connections,
which are served at any time after the server has initialized.
.. envvar:: SERVICES
A comma-separated list of services ElectrumX will accept incoming connections for.
This environment variable determines what interfaces and ports the server listens on, so
must be set correctly for any connection to the server to succeed. If unset or empty,
ElectrumX will not listen for any incoming connections.
*protocol* can be any recognised protocol.
*host* defaults to all of the machine's interfaces, except if the protocol is **rpc**,
when it defaults to :const:`localhost`.
*port* can only be defaulted for **rpc** where the default is :const:`8000`.
On most Unix systems ports below 1024 require elevated priveleges so choosing a higher
port is advisable. On Debian for example, this can be achieved by installinng
libcap2-bin package::
sudo apt-get update && sudo apt-get -y install libcap2-bin
sudo setcap cap_net_bind_service=+ep /path/to/electrumx_server
If any listed service has protocol **ssl** or **wss** then :envvar:`SSL_CERTFILE` and
:envvar:`SSL_KEYFILE` must be defined.
Tor **onion** addresses are invalid in :envvar:`SERVICES`.
Here is an example value of the :envvar:`SERVICES` environment variable::
tcp://:50001,ssl://:50002,wss://:50004,rpc://
This serves **tcp**, **ssl**, **wss** on all interfaces on ports 50001, 50002 and 50004
respectively. **rpc** is served on its default host :const:`localhost` and default port
:const:`8000`.
.. envvar:: REPORT_SERVICES
A comma-separated list of services ElectrumX will advertize and other servers in the
server network (if peer discovery is enabled), and any successful connection.
This environment variable must be set correctly, taking account of your network,
firewall and router setup, for clients and other servers to see how to connect to your
server. If not set or empty, no services are advertized.
The **rpc** protocol, special IP addresses (inlcuding private ones if peer discovery is
enabled), and :const:`localhost` are invalid in :envvar:`REPORT_SERVICES`.
Here is an example value of the :envvar:`REPORT_SERVICES` environment variable::
tcp://sv.usebsv.com:50001,ssl://sv.usebsv.com:50002,wss://sv.usebsv.com:50004
This advertizes **tcp**, **ssl**, **wss** services at :const:`sv.usebsv.com` on ports
50001, 50002 and 50004 respectively.
.. note:: Certificate Authority-signed certificates don't work over Tor, so you should
only have Tor services` in :envvar:`REPORT_SERVICES` if yours is self-signed.
.. envvar:: SSL_CERTFILE
The filesystem path to your SSL certificate file.
:ref:`SSL certificates`
.. envvar:: SSL_KEYFILE
The filesystem path to your SSL key file.
:ref:`SSL certificates`
Miscellaneous
=============
@ -72,6 +189,11 @@ These environment variables are optional:
<https://docs.python.org/3/library/logging.html#logrecord-attributes>`_
to use. Defaults to ``%(levelname)s:%(name)s:%(message)s``.
.. envvar:: LOG_LEVEL
The default Python logging level, a case-insensitive string. Useful values
are 'debug', 'info', 'warning' and 'error'.
.. envvar:: ALLOW_ROOT
Set this environment variable to anything non-empty to allow running
@ -89,54 +211,6 @@ These environment variables are optional:
to install the appropriate python package for your engine. The
value is not case sensitive.
.. envvar:: HOST
The host or IP address that the TCP and SSL servers will use when
binding listening sockets. Defaults to ``localhost``. To listen on
multiple specific addresses specify a comma-separated list. Set to
an empty string to listen on all available interfaces (likely both
IPv4 and IPv6).
.. envvar:: TCP_PORT
If set ElectrumX will serve TCP clients on
:envvar:`HOST`\::envvar:`TCP_PORT`.
.. note:: ElectrumX will not serve TCP connections until it has
fully caught up with your daemon.
.. envvar:: SSL_PORT
If set ElectrumX will serve SSL clients on
:envvar:`HOST`\::envvar:`SSL_PORT`. If set then
:envvar:`SSL_CERTFILE` and :envvar:`SSL_KEYFILE` must be defined
environment variables with values the filesystem paths to those SSL
files.
.. note:: ElectrumX will not serve SSL connections until it has
fully caught up with your daemon.
.. envvar:: RPC_HOST
The host or IP address that the RPC server will listen on and
defaults to ``localhost``. To listen on multiple specific addresses
specify a comma-separated list. Servers with unusual networking
setups might want to specify e.g. ``::1`` or ``127.0.0.1``
explicitly rather than defaulting to ``localhost``.
An empty string (normally indicating all interfaces) is interpreted
as ``localhost``, because allowing access to the server's RPC
interface to arbitrary connections across the internet is not a good
idea.
.. envvar:: RPC_PORT
ElectrumX will listen on this port for local RPC connections.
ElectrumX listens for RPC connections unless this is explicitly set
to blank. The default depends on :envvar:`COIN` and :envvar:`NET`
(e.g., 8000 for Bitcoin mainnet) if not set, as indicated in
`lib/coins.py`_.
.. envvar:: DONATION_ADDRESS
The server donation address reported to Electrum clients. Defaults
@ -228,9 +302,10 @@ raise them.
.. envvar:: MAX_SEND
The maximum size of a response message to send over the wire, in
bytes. Defaults to 1,000,000. Values smaller than 350,000 are
taken as 350,000 because standard Electrum protocol header "chunk"
requests are almost that large.
bytes. Defaults to 1,000,000 (except for AuxPoW coins, which default
to 10,000,000). Values smaller than 350,000 are taken as 350,000
because standard Electrum protocol header "chunk" requests are almost
that large.
The Electrum protocol has a flaw in that address histories must be
served all at once or not at all, an obvious avenue for abuse.
@ -245,40 +320,69 @@ raise them.
hexadecimal ASCII characters on the wire. Very few transactions on
Bitcoin mainnet are over 500KB in size.
.. envvar:: MAX_SUBS
.. envvar:: COST_SOFT_LIMIT
.. envvar:: COST_HARD_LIMIT
.. envvar:: REQUEST_SLEEP
.. envvar:: INITIAL_CONCURRENT
The maximum number of address subscriptions across all sessions.
Defaults to 250,000.
All values are integers. :envvar:`COST_SOFT_LIMIT` defaults to :const:`1,000`,
:envvar:`COST_HARD_LIMIT` to :const:`10,000`, :envvar:`REQUEST_SLEEP` to :const:`2,500`
milliseconds, and :envvar:`INITIAL_CONCURRENT` to :const:`10` concurrent requests.
.. envvar:: MAX_SESSION_SUBS
The server prices each request made to it based upon an estimate of the resources needed
to process it. Factors include whether the request uses bitcoind, how much bandwidth
it uses, and how hard it hits the databases.
The maximum number of address subscriptions permitted to a single
session. Defaults to 50,000.
To set a base for the units, a :func:`blockchain.scripthash.subscribe` subscription to
an address with a history of 2 or fewer transactions is costed at :const:`1.0` before
considering the bandwidth consumed. :func:`server.ping` is costed at :const:`0.1`.
.. envvar:: BANDWIDTH_LIMIT
As the total cost of a session goes over the soft limit, its requests start to be
throttled in two ways. First, the number of requests for that session that the server
will process concurrently is reduced. Second, each request starts to sleep a little
before being handled.
Per-session periodic bandwidth usage limit in bytes. This is a soft,
not hard, limit. Currently the period is hard-coded to be one hour.
The default limit value is 2 million bytes.
Before throttling starts, the server will process up to :envvar:`INITIAL_CONCURRENT`
requests concurrently without sleeping. As the session cost ranges from
:envvar:`COST_SOFT_LIMIT` to :envvar:`COST_HARD_LIMIT`, concurrency drops linearly to
zero and each request's sleep time increases linearly up to :envvar:`REQUEST_SLEEP`
milliseconds. Once the hard limit is reached, the session is disconnected.
Bandwidth usage over each period is totalled, and when this limit is
exceeded each subsequent request is stalled by sleeping before
handling it, effectively giving higher processing priority to other
sessions.
In order that non-abusive sessions can continue to be served, a session's cost gradually
decays over time. Subscriptions have an ongoing servicing cost, so the decay is slower
as the number of subscriptions increases.
The more bandwidth usage exceeds this soft limit the longer the next
request will sleep. Sleep times are a round number of seconds with
a minimum of 1. Each time the delay changes the event is logged.
If a session disconnects, ElectrumX continues to associate its cost with its IP address,
so if it immediately reconnects it will re-acquire its previous cost allocation.
Bandwidth usage is gradually reduced over time by "refunding" a
proportional part of the limit every now and then.
A server operator should experiment with different values according to server loads. It
is not necessarily true that e.g. having a low soft limit, decreasing concurrency and
increasing sleep will help handling heavy loads, as it will also increase the backlog of
requests the server has to manage in memory. It will also give a much worse experience
for genuine connections.
.. envvar:: BANDWIDTH_UNIT_COST
The number of bytes, sent and received, by a session that is deemed to cost :const:`1.0`.
The default value :const:`5,000` bytes, meaning the bandwidth cost assigned to a response
of 100KB is 20. If your bandwidth is cheap you should probably raise this.
.. envvar:: REQUEST_TIMEOUT
An integer number of seconds defaulting to :const:`30`. If a request takes longer than
this to respond to, either because of request limiting or because the request is
expensive, the server rejects it and returns a timeout error to the client indicating
that the server is busy.
This can help prevent large backlogs of unprocessed requests building up under heavy load.
.. envvar:: SESSION_TIMEOUT
An integer number of seconds defaulting to 600. Sessions with no
activity for longer than this are disconnected. Properly
functioning Electrum clients by default will send pings roughly
every 60 seconds.
An integer number of seconds defaulting to :const:`600`. Sessions that have not sent a
request for longer than this are disconnected. Properly functioning clients should send
a :func:`server.ping` request once roughly 450 seconds have passed since the previous
request, in order to avoid disconnection.
Peer Discovery
@ -344,50 +448,10 @@ some of this.
will autodetect any proxy running on the usual ports 9050 (Tor),
9150 (Tor browser bundle) and 1080 (socks).
.. envvar:: BLACKLIST_URL
Server Advertising
==================
These environment variables affect how your server is advertised
by peer discovery (if enabled).
.. envvar:: REPORT_HOST
The clearnet host to advertise. If not set, no clearnet host is
advertised.
.. envvar:: REPORT_TCP_PORT
The clearnet TCP port to advertise if :envvar:`REPORT_HOST` is set.
Defaults to :envvar:`TCP_PORT`. ``0`` disables publishing a TCP
port.
.. envvar:: REPORT_SSL_PORT
The clearnet SSL port to advertise if :envvar:`REPORT_HOST` is set.
Defaults to :envvar:`SSL_PORT`. ``0`` disables publishing an SSL
port.
.. envvar:: REPORT_HOST_TOR
If you wish run a Tor service, this is the Tor host name to
advertise and must end with ``.onion``.
.. envvar:: REPORT_TCP_PORT_TOR
The Tor TCP port to advertise. The default is the clearnet
:envvar:`REPORT_TCP_PORT`, unless disabled or it is ``0``, otherwise
:envvar:`TCP_PORT`. ``0`` disables publishing a Tor TCP port.
.. envvar:: REPORT_SSL_PORT_TOR
The Tor SSL port to advertise. The default is the clearnet
:envvar:`REPORT_SSL_PORT`, unless disabled or it is ``0``, otherwise
:envvar:`SSL_PORT`. ``0`` disables publishing a Tor SSL port.
.. note:: Certificate-Authority signed certificates don't work over
Tor, so you should set :envvar:`REPORT_SSL_PORT_TOR` to
``0`` if yours is not self-signed.
URL to retrieve a list of blacklisted peers. If not set, a coin-
specific default is used.
Cache

View File

@ -27,7 +27,7 @@ Authors and License
===================
Neil Booth wrote the vast majority of the code; see :ref:`Authors`.
Python version at least 3.6 is required.
Python version at least 3.7 is required.
The code is released under the `MIT Licence
<https://github.com/kyuupichan/electrumx/LICENCE>`_.

View File

@ -4,8 +4,8 @@ Protocol Basics
Message Stream
--------------
Clients and servers communicate using **JSON RPC** over an unspecified
underlying stream transport protocol, typically TCP or SSL.
Clients and servers communicate using **JSON RPC** over an unspecified underlying stream
transport. Examples include TCP, SSL, WS and WSS.
Two standards `JSON RPC 1.0
<http://www.jsonrpc.org/specification_v1>`_ and `JSON RPC 2.0
@ -25,11 +25,12 @@ Clients making batch requests should limit their size depending on the
nature of their query, because servers will limit response size as an
anti-DoS mechanism.
Each RPC call, and each response, is separated by a single newline in
their respective streams. The JSON specification does not permit
control characters within strings, so no confusion is possible there.
However it does permit newlines as extraneous whitespace between
elements; client and server MUST NOT use newlines in such a way.
Over TCP and SSL raw sockets each RPC call, and each response, MUST be terminated by a
single newline to delimit messages. Websocket messages are already framed so they MUST
NOT be newline terminated. The JSON specification does not permit control characters
within strings, so no confusion is possible there. However it does permit newlines as
extraneous whitespace between elements; client and server MUST NOT use newlines in such a
way.
If using JSON RPC 2.0's feature of parameter passing by name, the
names shown in the description of the method or notification in
@ -77,38 +78,6 @@ from and including the server's response to this call will use its
negotiated protocol version.
.. _deserialized header:
Deserialized Headers
--------------------
A :dfn:`deserialized header` is a dictionary describing a block at a
given height.
A typical example would be similar to this template::
{
"block_height": <integer>,
"version": <integer>,
"prev_block_hash": <hexadecimal string>,
"merkle_root": <hexadecimal string>,
"timestamp": <integer>,
"bits": <integer>,
"nonce": <integer>
}
.. note:: The precise format of a deserialized block header varies by
coin, and also potentially by height for the same coin. Detailed
knowledge of the meaning of a block header is neither necessary nor
appropriate in the server.
Consequently deserialized headers are deprecated and will be removed
from the protocol in a future version. Instead, raw headers (as
hexadecimal strings) along with their height will be returned by new
RPC calls, and it will be up to the client to interpret the meaning
of the raw header.
.. _script hashes:
Script Hashes

View File

@ -121,14 +121,8 @@ Deprecated methods
Version 1.4
===========
<<<<<<< HEAD
This documents the current intent for protocol version 1.4 which is
not yet implemented. It removes all support for :ref:`deserialized
headers <deserialized header>`.
=======
This version removes all support for :ref:`deserialized headers
<deserialized header>`.
>>>>>>> upstream/master
Changes
-------
@ -153,65 +147,22 @@ Removed methods
* :func:`blockchain.block.get_header`
* :func:`blockchain.block.get_chunk`
Version 1.5
===========
.. note:: This is a draft of ideas for protocol 1.5; they are not
implemented
This protocol version makes changes intended to allow clients and
servers to more easily scale to support queries about busy addresses.
It has changes to reduce the amount of round-trip queries made in
common usage, and to make results more compact to reduce bandwidth
consumption.
RPC calls with potentially large responses have pagination support,
and the return value of :func:`blockchain.scripthash.subscribe`
changes. Script hash :ref:`status <status>` had to be recalculated
with each new transaction and was undefined if it included more than
one mempool transaction. Its calculation is linear in history length
resulting in quadratic complexity as history grows. Its calculation
for large histories was demanding for both the server to compute and
the client to check.
RPC calls and notifications that combined the effects of the mempool
and confirmed history are removed.
The changes are beneficial to clients and servers alike, but will
require changes to both client-side and server-side logic. In
particular, the client should track what block (by hash and height)
wallet data is synchronized to, and if that hash is no longer part of
the main chain, it will need to remove wallet data for blocks that
were reorganized away and get updated information as of the first
reorganized block. The effects are limited to script hashes
potentially affected by the reorg, and for most clients this will be
the empty set.
New methods
-----------
* :func:`blockchain.scripthash.history`
* :func:`blockchain.scripthash.utxos`
New notifications
-----------------
* :func:`mempool.changes`
Version 1.4.1
=============
Changes
-------
* :func:`blockchain.scripthash.subscribe` has changed its return value
and the notifications it sends
* :func:`blockchain.transaction.get` takes an additional optional
argument *merkle*
* :func:`blockchain.block.header` and :func:`blockchain.block.headers` now
truncate AuxPoW data (if using an AuxPoW chain) when *cp_height* is
nonzero. AuxPoW data is still present when *cp_height* is zero.
Non-AuxPoW chains are unaffected.
Removed methods
---------------
* :func:`blockchain.scripthash.get_history`. Switch to
:func:`blockchain.scripthash.history`
* :func:`blockchain.scripthash.get_mempool`. Switch to
handling :func:`mempool.changes` notifications
* :func:`blockchain.scripthash.listunspent`. Switch to
:func:`blockchain.scripthash.utxos`
Version 1.4.1
=============
New methods
-----------
* :func:`blockchain.scipthash.unsubscribe` to unsubscribe from a script hash.

283
docs/protocol-ideas.rst Normal file
View File

@ -0,0 +1,283 @@
==============
Protocol Ideas
==============
.. note:: This is a draft of ideas for a future protocol tentatively called 2.0; they are
not implemented and it is likely they will change and that protocol 2.0 will be
quite different.
This protocol version makes changes intended to allow clients and servers to more easily
scale to support queries about busy addresses. It has changes to reduce the amount of
round-trip queries made in common usage, and to make results more compact to reduce
bandwidth consumption.
RPC calls with potentially large responses have pagination support, and the return value
of :func:`blockchain.scripthash.subscribe` changes. Script hash :ref:`status <status>`
had to be recalculated with each new transaction and was undefined if it included more
than one mempool transaction. Its calculation is linear in history length resulting in
quadratic complexity as history grows. Its calculation for large histories was demanding
for both the server to compute and the client to check.
RPC calls and notifications that combined the effects of the mempool and confirmed history
are removed.
The changes are beneficial to clients and servers alike, but will require changes to both
client-side and server-side logic. In particular, the client should track what block (by
hash and height) wallet data is synchronized to, and if that hash is no longer part of the
main chain, it will need to remove wallet data for blocks that were reorganized away and
get updated information as of the first reorganized block. The effects are limited to
script hashes potentially affected by the reorg, and for most clients this will be the
empty set.
blockchain.scripthash.subscribe
===============================
Subscribe to a script hash.
**Signature**
.. function:: blockchain_.scripthash.subscribe(scripthash)
*scripthash*
The script hash as a hexadecimal string.
**Result**
.. versionchanged:: 2.0
As of protocol 2.0, the transaction hash of the last confirmed
transaction in blockchain order, or :const:`null` if there are none.
For protocol versions 1.4 and below, the :ref:`status <status>` of
the script hash.
**Notifications**
.. versionchanged:: 2.0
As this is a subscription, the client receives notifications when
the confirmed transaction history and/or associated mempool
transactions change.
As of protocol 2.0, the initial mempool and subsequent changes to it
are sent with :func:`mempool.changes` notifications. When confirmed
history changes, a notification with signature
.. function:: blockchain_.scripthash.subscribe(scripthash, tx_hash)
is sent, where *tx_hash* is the hash of the last confirmed
transaction in blockchain order.
blockchain.scripthash.history
=============================
Return part of the confirmed history of a :ref:`script hash <script
hashes>`.
**Signature**
.. function:: blockchain.scripthash.history(scripthash, start_height)
*scripthash*
The script hash as a hexadecimal string.
*start_height*
History will be returned starting from this height, a non-negative
integer. If there are several matching transactions in a block,
the server will return *all* of them -- partial results from a
block are not permitted. The client can start subsequent requests
at one above the greatest returned height and avoid repeats.
**Result**
A dictionary with the following keys.
* *more*
:const:`true` indicates that there *may* be more history
available. A follow-up request is required to obtain any.
:const:`false` means all history to blockchain's tip has been
returned.
* *history*
A list ot transactions. Each transaction is itself a list of
two elements:
1. The block height
2. The transaction hash
**Result Examples**
::
{
"more": false,
"history": [
[
200004,
"acc3758bd2a26f869fcc67d48ff30b96464d476bca82c1cd6656e7d506816412"
],
[
215008,
"f3e1bf48975b8d6060a9de8884296abb80be618dc00ae3cb2f6cee3085e09403"
]
]
}
blockchain.scripthash.utxos
===========================
Return some confirmed UTXOs sent to a script hash.
**Signature**
.. function:: blockchain.scripthash.utxos(scripthash, start_height)
.. versionadded:: 2.0
*scripthash*
The script hash as a hexadecimal string.
*start_height*
UTXOs will be returned starting from this height, a non-negative
integer. If there are several UTXOs in one block, the server will
return *all* of them -- partial results from a block are not
permitted. The client can start subsequent requests at one above
the greatest returned height and avoid repeats.
.. note:: To get the effects of transactions in the mempool adding or
removing UTXOs, a client must
:func:`blockchain.scripthash.subscribe` and track mempool
transactions sent via :func:`mempool.changes` notifications.
**Result**
A dictionary with the following keys.
* *more*
:const:`true` indicates that there *may* be more UTXOs available.
A follow-up request is required to obtain any. :const:`false`
means all UTXOs to the blockchain's tip have been returned.
* *utxos*
A list of UTXOs. Each UTXO is itself a list with the following
elements:
1. The height of the block the transaction is in
2. The transaction hash as a hexadecimal string
3. The zero-based index of the output in the transaction's outputs
4. The output value, an integer in minimum coin units (satoshis)
**Result Example**
::
**TODO**
blockchain.transaction.get
==========================
Return a raw transaction.
**Signature**
.. function:: blockchain_.transaction.get(tx_hash, verbose=false, merkle=false)
.. versionchanged:: 1.1
ignored argument *height* removed
.. versionchanged:: 1.2
*verbose* argument added
.. versionchanged:: 2.0
*merkle* argument added
*tx_hash*
The transaction hash as a hexadecimal string.
*verbose*
Whether a verbose coin-specific response is required.
*merkle*
Whether a merkle branch proof should be returned as well.
**Result**
If *verbose* is :const:`false`:
If *merkle* is :const:`false`, the raw transaction as a
hexadecimal string. If :const:`true`, the dictionary returned
by :func:`blockchain.transaction.get_merkle` with an additional
key:
*hex*
The raw transaction as a hexadecimal string.
If *verbose* is :const:`true`:
The result is a coin-specific dictionary -- whatever the coin
daemon returns when asked for a verbose form of the raw
transaction. If *merkle* is :const:`true` it will have an
additional key:
*merkle*
The dictionary returned by
:func:`blockchain.transaction.get_merkle`.
mempool.changes
===============
A notification that indicates changes to unconfirmed transactions of a
:ref:`subscribed <subscribed>` :ref:`script hash <script hashes>`. As
its name suggests the notification is stateful; its contents are a
function of what was sent previously.
**Signature**
.. function:: mempool.changes(scripthash, new, gone)
.. versionadded:: 2.0
The parameters are as follows:
* *scripthash*
The script hash the notification is for, a hexadecimal string.
* *new*
A list of transactions in the mempool that have not previously
been sent to the client, or whose *confirmed input* status
has changed. Each transaction is an ordered list of 3 items:
1. The raw transaction or its hash as a hexadecimal string. The
first time the server sends a transaction it sends it raw.
Subsequent references in the same *new* list or in later
notifications will send the hash only. Transactions cannot be
32 bytes in size so length can be used to distinguish.
2. The transaction fee, an integer in minimum coin units (satoshis)
3. :const:`true` if all inputs are confirmed otherwise :const:`false`
* *gone*
A list of hashes of transactions that were previously sent to the
client as being in the mempool but no longer are. Those
transactions presumably were confirmed in a block or were evicted
from the mempool.
**Notification Example**
::
**TODO**

View File

@ -2,7 +2,6 @@
Protocol Methods
==================
blockchain.block.header
=======================
@ -14,6 +13,7 @@ Return the block header at the given height.
.. versionadded:: 1.3
.. versionchanged:: 1.4
*cp_height* parameter added
.. versionchanged:: 1.4.1
*height*
@ -41,7 +41,8 @@ Return the block header at the given height.
* *header*
The raw block header as a hexadecimal string.
The raw block header as a hexadecimal string. Starting with version 1.4.1,
AuxPoW data (if present in the original header) is truncated.
* *root*
@ -72,7 +73,6 @@ With *cp_height* 8::
"root": "e347b1c43fd9b5415bf0d92708db8284b78daf4d0e24f9c3405f45feb85e25db"
}
blockchain.block.headers
========================
@ -84,6 +84,7 @@ Return a concatenated chunk of block headers from the main chain.
.. versionadded:: 1.2
.. versionchanged:: 1.4
*cp_height* parameter added
.. versionchanged:: 1.4.1
*start_height*
@ -114,7 +115,8 @@ Return a concatenated chunk of block headers from the main chain.
* *hex*
The binary block headers concatenated together in-order as a
hexadecimal string.
hexadecimal string. Starting with version 1.4.1, AuxPoW data (if present
in the original header) is truncated if *cp_height* is nonzero.
* *max*
@ -176,6 +178,7 @@ be confirmed within a certain number of blocks.
0.00101079
blockchain.headers.subscribe
============================
@ -184,24 +187,10 @@ Subscribe to receive block headers when a new block is found.
**Signature**
.. function:: blockchain.headers.subscribe()
.. versionchanged:: 1.2
Optional *raw* parameter added, defaulting to :const:`false`.
.. versionchanged:: 1.3
*raw* parameter deafults to :const:`true`.
.. versionchanged:: 1.4
*raw* parameter removed; responses and notifications pass raw
headers.
* *raw*
This single boolean argument exists in protocol versions 1.2
(defaulting to :const:`false`) and 1.3 (defaulting to
:const:`true`) only.
**Result**
The header of the current block chain tip. If *raw* is
:const:`true` the result is a dictionary with two members:
The header of the current block chain tip. The result is a dictionary with two members:
* *hex*
@ -211,24 +200,9 @@ Subscribe to receive block headers when a new block is found.
The height of the header, an integer.
If *raw* is :const:`false` the result is the coin-specific
:ref:`deserialized header <deserialized header>`.
**Example Result**
With *raw* :const:`false`::
{
"bits": 402858285,
"block_height": 520481,
"merkle_root": "8e8e932eb858fd53cf09943d7efc9a8f674dc1363010ee64907a292d2fb0c25d",
"nonce": 3288656012,
"prev_block_hash": "000000000000000000b512b5d9fc7c5746587268547c04aa92383aaea0080289",
"timestamp": 1520495819,
"version": 536870912
}
With *raw* :const:`true`::
::
{
"height": 520481,
@ -414,66 +388,6 @@ hashes>`.
}
]
blockchain.scripthash.history
=============================
Return part of the confirmed history of a :ref:`script hash <script
hashes>`.
**Signature**
.. function:: blockchain.scripthash.history(scripthash, start_height)
.. versionadded:: 1.5
*scripthash*
The script hash as a hexadecimal string.
*start_height*
History will be returned starting from this height, a non-negative
integer. If there are several matching transactions in a block,
the server will return *all* of them -- partial results from a
block are not permitted. The client can start subsequent requests
at one above the greatest returned height and avoid repeats.
**Result**
A dictionary with the following keys.
* *more*
:const:`true` indicates that there *may* be more history
available. A follow-up request is required to obtain any.
:const:`false` means all history to blockchain's tip has been
returned.
* *history*
A list ot transactions. Each transaction is itself a list of
two elements:
1. The block height
2. The transaction hash
**Result Examples**
::
{
"more": false,
"history": [
[
200004,
"acc3758bd2a26f869fcc67d48ff30b96464d476bca82c1cd6656e7d506816412"
],
[
215008,
"f3e1bf48975b8d6060a9de8884296abb80be618dc00ae3cb2f6cee3085e09403"
]
]
}
blockchain.scripthash.listunspent
=================================
@ -552,89 +466,35 @@ Subscribe to a script hash.
**Result**
.. versionchanged:: 1.5
As of protocol 1.5, the transaction hash of the last confirmed
transaction in blockchain order, or :const:`null` if there are none.
For protocol versions 1.4 and below, the :ref:`status <status>` of
the script hash.
The :ref:`status <status>` of the script hash.
**Notifications**
.. versionchanged:: 1.5
As this is a subscription, the client receives notifications when
the confirmed transaction history and/or associated mempool
transactions change.
As of protocol 1.5, the initial mempool and subsequent changes to it
are sent with :func:`mempool.changes` notifications. When confirmed
history changes, a notification with signature
.. function:: blockchain.scripthash.subscribe(scripthash, tx_hash)
is sent, where *tx_hash* is the hash of the last confirmed
transaction in blockchain order.
For protocol versions 1.4 and below, the client will receive a
notification when the :ref:`status <status>` of the script hash
changes. Its signature is
The client will receive a notification when the :ref:`status <status>` of the script
hash changes. Its signature is
.. function:: blockchain.scripthash.subscribe(scripthash, status)
blockchain.scripthash.utxos
===========================
blockchain.scripthash.unsubscribe
=================================
Return some confirmed UTXOs sent to a script hash.
Unsubscribe from a script hash, preventing future notifications if its :ref:`status
<status>` changes.
**Signature**
.. function:: blockchain.scripthash.utxos(scripthash, start_height)
.. versionadded:: 1.5
.. function:: blockchain.scripthash.unsubscribe(scripthash)
.. versionadded:: 1.4.2
*scripthash*
The script hash as a hexadecimal string.
*start_height*
UTXOs will be returned starting from this height, a non-negative
integer. If there are several UTXOs in one block, the server will
return *all* of them -- partial results from a block are not
permitted. The client can start subsequent requests at one above
the greatest returned height and avoid repeats.
.. note:: To get the effects of transactions in the mempool adding or
removing UTXOs, a client must
:func:`blockchain.scripthash.subscribe` and track mempool
transactions sent via :func:`mempool.changes` notifications.
**Result**
A dictionary with the following keys.
* *more*
:const:`true` indicates that there *may* be more UTXOs available.
A follow-up request is required to obtain any. :const:`false`
means all UTXOs to the blockchain's tip have been returned.
* *utxos*
A list of UTXOs. Each UTXO is itself a list with the following
elements:
1. The height of the block the transaction is in
2. The transaction hash as a hexadecimal string
3. The zero-based index of the output in the transaction's outputs
4. The output value, an integer in minimum coin units (satoshis)
**Result Example**
::
**TODO**
Returns :const:`True` if the scripthash was subscribed to, otherwise :const:`False`.
Note that :const:`False` might be returned even for something subscribed to earlier,
becuase the server can drop subscriptions in rare circumstances.
blockchain.transaction.broadcast
================================
@ -681,13 +541,11 @@ Return a raw transaction.
**Signature**
.. function:: blockchain.transaction.get(tx_hash, verbose=false, merkle=false)
.. function:: blockchain.transaction.get(tx_hash, verbose=false)
.. versionchanged:: 1.1
ignored argument *height* removed
.. versionchanged:: 1.2
*verbose* argument added
.. versionchanged:: 1.5
*merkle* argument added
*tx_hash*
@ -697,38 +555,21 @@ Return a raw transaction.
Whether a verbose coin-specific response is required.
*markle*
Whether a merkle branch proof should be returned as well.
**Result**
If *verbose* is :const:`false`:
If *merkle* is :const:`false`, the raw transaction as a
hexadecimal string. If :const:`true`, the dictionary returned
by :func:`blockchain.transaction.get_merkle` with an additional
key:
*hex*
The raw transaction as a hexadecimal string.
The raw transaction as a hexadecimal string.
If *verbose* is :const:`true`:
The result is a coin-specific dictionary -- whatever the coin
daemon returns when asked for a verbose form of the raw
transaction. If *merkle* is :const:`true` it will have an
additional key:
*merkle*
The dictionary returned by
:func:`blockchain.transaction.get_merkle`.
transaction.
**Example Results**
When *verbose* is :const:`false` and *merkle* is :const:`false`::
When *verbose* is :const:`false`::
"01000000015bb9142c960a838329694d3fe9ba08c2a6421c5158d8f7044cb7c48006c1b48"
"4000000006a4730440220229ea5359a63c2b83a713fcc20d8c41b20d48fe639a639d2a824"
@ -738,7 +579,7 @@ When *verbose* is :const:`false` and *merkle* is :const:`false`::
"4fe5f88ac50a8cf00000000001976a91445dac110239a7a3814535c15858b939211f85298"
"88ac61ee0700"
When *verbose* is :const:`true` and *merkle* is :const:`false`::
When *verbose* is :const:`true`::
{
"blockhash": "0000000000000000015a4f37ece911e5e3549f988e855548ce7494a0a08b2ad6",
@ -834,7 +675,6 @@ and height.
"pos": 710
}
blockchain.transaction.id_from_pos
==================================
@ -899,52 +739,6 @@ When *merkle* is :const:`true`::
]
}
mempool.changes
===============
A notification that indicates changes to unconfirmed transactions of a
:ref:`subscribed <subscribed>` :ref:`script hash <script hashes>`. As
its name suggests the notification is stateful; its contents are a
function of what was sent previously.
**Signature**
.. function:: mempool.changes(scripthash, new, gone)
.. versionadded:: 1.5
The parameters are as follows:
* *scripthash*
The script hash the notification is for, a hexadecimal string.
* *new*
A list of transactions in the mempool that have not previously
been sent to the client, or whose *confirmed input* status
has changed. Each transaction is an ordered list of 3 items:
1. The raw transaction or its hash as a hexadecimal string. The
first time the server sends a transaction it sends it raw.
Subsequent references in the same *new* list or in later
notifications will send the hash only. Transactions cannot be
32 bytes in size so length can be used to distinguish.
2. The transaction fee, an integer in minimum coin units (satoshis)
3. :const:`true` if all inputs are confirmed otherwise :const:`false`
* *gone*
A list of hashes of transactions that were previously sent to the
client as being in the mempool but no longer are. Those
transactions presumably were confirmed in a block or were evicted
from the mempool.
**Notification Example**
::
**TODO**
mempool.get_fee_histogram
=========================
@ -1095,7 +889,7 @@ Return a list of features and services supported by the server.
* *server_version*
A string that identifies the server software. Should be the same
as the result to the :func:`server.version` RPC call.
as the first element of the result to the :func:`server.version` RPC call.
* *protocol_max*
* *protocol_min*
@ -1172,17 +966,11 @@ server.version
==============
Identify the client to the server and negotiate the protocol version.
Only the first :func:`server.version` message is accepted.
**Signature**
.. function:: server.version(client_name="", protocol_version="1.1")
.. versionchanged:: 1.1
*protocol_version* is not ignored.
.. versionchanged:: 1.2
Use :func:`server.ping` rather than sending version requests as a
ping mechanism.
.. versionchanged:: 1.4
Only the first :func:`server.version` message is accepted.
.. function:: server.version(client_name="", protocol_version="1.4")
* *client_name*
@ -1216,17 +1004,13 @@ Identify the client to the server and negotiate the protocol version.
identifying the server and the protocol version that will be used
for future communication.
*Protocol version 1.0*: A string identifying the server software.
**Examples**::
**Example**::
server.version("Electrum 3.0.6", ["1.1", "1.2"])
server.version("2.7.1", "1.0")
**Example Results**::
**Example Result**::
["ElectrumX 1.2.1", "1.2"]
"ElectrumX 1.2.1"
Masternode methods (Dash and compatible coins)
@ -1357,3 +1141,107 @@ Returns the list of masternodes.
...,
...
]
ProTx methods (Dash DIP3)
==============================================
protx.diff
=============================
Returns a diff between two deterministic masternode lists.
The result also contains proof data.
**Signature**
.. function:: protx.diff(base_height, height)
*base_height*
The starting block height
*1* <= *base_height*
*height*
The ending block height.
*base_height* <= *height*
**Result**
A dictionary with deterministic masternode lists diff plus proof data
**Example**::
protx.diff(1, 20000)
**Example Result**::
{
"baseBlockHash": "000000000b866e7fefc7df2b4b37f236175cee9ab6dc925a30c62401d92b7406",
"blockHash": "0000000005b3f97e0af8c72f9a96eca720237e374ca860938ba0d7a68471c4d6",
"cbTxMerkleTree": "0200000002c9802d02435cfe09e4253bc1ba4875e9a2f920d5d6adf005d5b9306e5322e6f476d885273422c2fe18e8c420d09484f89eaeee7bb7f4e1ff54bddeb94e099a910103",
"cbTx": "03000500010000000000000000000000000000000000000000000000000000000000000000ffffffff4b02204e047867335c08fabe6d6d8b2b76b7000000000470393f63424273736170747365743a7265737574736574010000000000000010000015770000000d2f6e6f64655374726174756d2f000000000336c8a119010000001976a914cb594917ad4e5849688ec63f29a0f7f3badb5da688ac6c62c216010000001976a914a3c5284d3cd896815ac815f2dd76a3a71cb3d8e688acba65df02000000001976a9146d649e1c05e89d30809ef39cc8ee1002c0c8c84b88ac00000000260100204e0000b301c3d88e4072305bec5d09e2ed6b836b23af640bcdefd7b8ae7e2ca182dc17",
"deletedMNs": [
],
"mnList": [
{
"proRegTxHash": "6f0bdd7034ce8d3a6976a15e4b4442c274b5c1739fb63fc0a50f01425580e17e",
"confirmedHash": "000000000be653cd1fbc213239cfec83ca68da657f24cc05305d0be75d34e392",
"service": "173.61.30.231:19023",
"pubKeyOperator": "8da7ee1a40750868badef2c17d5385480cae7543f8d4d6e5f3c85b37fdd00a6b4f47726b96e7e7c7a3ea68b5d5cb2196",
"keyIDVoting": "b35c75cbc69433175d3459843e1f6ebe145bf6a3",
"isValid": true
}
],
"merkleRootMNList": "17dc82a12c7eaeb8d7efcd0b64af236b836bede2095dec5b3072408ed8c301b3"
}
protx.info
=============================
Returns detailed information about a deterministic masternode.
**Signature**
.. function:: protx.info(protx_hash)
*protx_hash*
The hash of the initial ProRegTx.
**Result**
A dictionary with detailed deterministic masternode data
**Example**::
protx.info("6f0bdd7034ce8d3a6976a15e4b4442c274b5c1739fb63fc0a50f01425580e17e")
**Example Result**::
{
"proTxHash": "6f0bdd7034ce8d3a6976a15e4b4442c274b5c1739fb63fc0a50f01425580e17e",
"collateralHash": "b41439376b6117aebe6ad1ce31dcd217d4934fd00c104029ecb7d21c11d17c94",
"collateralIndex": 3,
"operatorReward": 0,
"state": {
"registeredHeight": 19525,
"lastPaidHeight": 20436,
"PoSePenalty": 0,
"PoSeRevivedHeight": -1,
"PoSeBanHeight": -1,
"revocationReason": 0,
"keyIDOwner": "b35c75cbc69433175d3459843e1f6ebe145bf6a3",
"pubKeyOperator": "8da7ee1a40750868badef2c17d5385480cae7543f8d4d6e5f3c85b37fdd00a6b4f47726b96e7e7c7a3ea68b5d5cb2196",
"keyIDVoting": "b35c75cbc69433175d3459843e1f6ebe145bf6a3",
"ownerKeyAddr": "ybGQ7a6e7dkJY2jxdbDwdBtyjKZJ8VB7YC",
"votingKeyAddr": "ybGQ7a6e7dkJY2jxdbDwdBtyjKZJ8VB7YC",
"addr": "173.61.30.231:19023",
"payoutAddress": "yWdXnYxGbouNoo8yMvcbZmZ3Gdp6BpySxL"
},
"confirmations": 984
}

View File

@ -5,6 +5,33 @@ Removed Protocol Methods
This documents protocol methods that are still supported in some protocol
versions, but not the most recent one.
.. _deserialized header:
Deserialized Headers
--------------------
A :dfn:`deserialized header` is a dictionary describing a block at a
given height.
A typical example would be similar to this template::
{
"block_height": <integer>,
"version": <integer>,
"prev_block_hash": <hexadecimal string>,
"merkle_root": <hexadecimal string>,
"timestamp": <integer>,
"bits": <integer>,
"nonce": <integer>
}
.. note:: The precise format of a deserialized block header varies by
coin, and also potentially by height for the same coin. Detailed
knowledge of the meaning of a block header is neither necessary nor
appropriate in the server. Consequently they were removed from the
protocol in version 1.4.
blockchain.address.get_balance
==============================
@ -103,6 +130,87 @@ Subscribe to a bitcoin address.
.. function:: blockchain.address.subscribe(address, status)
blockchain.headers.subscribe
============================
Subscribe to receive block headers when a new block is found.
**Signature**
.. blockchain.headers.subscribe()
.. versionchanged:: 1.2
Optional *raw* parameter added, defaulting to :const:`false`.
.. versionchanged:: 1.3
*raw* parameter deafults to :const:`true`.
.. versionchanged:: 1.4
*raw* parameter removed; responses and notifications pass raw
headers.
* *raw*
This single boolean argument exists in protocol versions 1.2
(defaulting to :const:`false`) and 1.3 (defaulting to
:const:`true`) only.
**Result**
The header of the current block chain tip. If *raw* is
:const:`true` the result is a dictionary with two members:
* *hex*
The binary header as a hexadecimal string.
* *height*
The height of the header, an integer.
If *raw* is :const:`false` the result is the coin-specific
:ref:`deserialized header <deserialized header>`.
**Example Result**
With *raw* :const:`false`::
{
"bits": 402858285,
"block_height": 520481,
"merkle_root": "8e8e932eb858fd53cf09943d7efc9a8f674dc1363010ee64907a292d2fb0c25d",
"nonce": 3288656012,
"prev_block_hash": "000000000000000000b512b5d9fc7c5746587268547c04aa92383aaea0080289",
"timestamp": 1520495819,
"version": 536870912
}
With *raw* :const:`true`::
{
"height": 520481,
"hex": "00000020890208a0ae3a3892aa047c5468725846577cfcd9b512b50000000000000000005dc2b02f2d297a9064ee103036c14d678f9afc7e3d9409cf53fd58b82e938e8ecbeca05a2d2103188ce804c4"
}
**Notifications**
As this is a subcription, the client will receive a notification
when a new block is found. The notification's signature is:
.. blockchain.headers.subscribe(header)
* *header*
See **Result** above.
.. note:: should a new block arrive quickly, perhaps while the server
is still processing prior blocks, the server may only notify of the
most recent chain tip. The protocol does not guarantee notification
of all intermediate block headers.
In a similar way the client must be prepared to handle chain
reorganisations. Should a re-org happen the new chain tip will not
sit directly on top of the prior chain tip. The client must be able
to figure out the common ancestor block and request any missing
block headers to acquire a consistent view of the chain state.
blockchain.numblocks.subscribe
==============================
@ -212,3 +320,63 @@ bandwidth-intensive request.
concatenated together. As many as headers as are available at the
implied starting height will be returned; this may range from zero
to the coin-specific chunk size.
server.version
==============
Identify the client to the server and negotiate the protocol version.
**Signature**
.. server.version(client_name="", protocol_version="1.4")
.. versionchanged:: 1.1
*protocol_version* is not ignored.
.. versionchanged:: 1.2
Use :func:`server.ping` rather than sending version requests as a
ping mechanism.
.. versionchanged:: 1.4
Only the first :func:`server.version` message is accepted.
* *client_name*
A string identifying the connecting client software.
* *protocol_version*
An array ``[protocol_min, protocol_max]``, each of which is a
string. If ``protocol_min`` and ``protocol_max`` are the same,
they can be passed as a single string rather than as an array of
two strings, as for the default value.
The server should use the highest protocol version both support::
version = min(client.protocol_max, server.protocol_max)
If this is below the value::
max(client.protocol_min, server.protocol_min)
then there is no protocol version in common and the server must
close the connection. Otherwise it should send a response
appropriate for that protocol version.
**Result**
An array of 2 strings:
``[server_software_version, protocol_version]``
identifying the server and the protocol version that will be used
for future communication.
*Protocol version 1.0*: A string identifying the server software.
**Examples**::
server.version("Electrum 3.0.6", ["1.1", "1.2"])
server.version("2.7.1", "1.0")
**Example Results**::
["ElectrumX 1.2.1", "1.2"]
"ElectrumX 1.2.1"

View File

@ -11,3 +11,4 @@ alike.
protocol-methods
protocol-changes
protocol-removed
protocol-ideas

View File

@ -45,19 +45,22 @@ have that effect.
disconnect
----------
Disconnect the given session IDs. Session IDs can be seen in the logs
or with the `sessions`_ RPC command::
Disonnect the given session IDs or group names.
$ electrumx_rpc disconnect 2 3
Session IDs can be obtained in the logs or with the `sessions`_ RPC command. Group
names can be optained with the `groups`_ RPC command.
The special string :const:`all` disconnects all sessions.
Example::
$ electrumx_rpc disconnect 209.59.102 34 2
[
"disconnected 2",
"disconnected 3"
"disconnecting session 34",
"disconnecting group 209.59.102"
"unknown: 2",
]
ElectrumX initiates the socket close process for the passed sessions.
Whilst most connections close quickly, it can take several minutes for
Python to shut some SSL connections down.
getinfo
-------
@ -66,53 +69,77 @@ A typical result is as follows (with annotated comments)::
$ electrumx_rpc getinfo
{
"closing": 1, # The number of sessions being closed down
"daemon": "192.168.0.2:8332/", # The daemon URL without auth info
"daemon_height": 520527, # The daemon's height when last queried
"db_height": 520527, # The height to which the DB is flushed
"errors": 0, # Errors across current sessions
"groups": 7, # The number of session groups
"logged": 0, # The number of sessions being logged
"paused": 0, # The number of paused sessions
"peers": { # Various categories of server peers
"bad": 0, # Not responding or invalid height etc.
"good": 28, # Responding with good data
"never": 0, # Never managed to connect
"stale": 0, # Was "good" but not recently connected
"total": 28 # Sum of the above
},
"pid": 85861, # Server's process ID
"requests": 0, # Unprocessed requests across all sessions
"sessions": 43, # Total number of sessions
"subs": 84, # Script hash subscriptions across all sessions
"txs_sent": 4, # Transactions sent since server started
"uptime": "06h 48m 00s" # Time since server started
"coin": "BitcoinSegwit",
"daemon": "127.0.0.1:9334/",
"daemon height": 572154, # The daemon's height when last queried
"db height": 572154, # The height to which the DB is flushed
"groups": 586, # The number of session groups
"history cache": "185,014 lookups 9,756 hits 1,000 entries",
"merkle cache": "280 lookups 54 hits 213 entries",
"peers": { # Peer information
"bad": 1,
"good": 51,
"never": 2,
"stale": 0,
"total": 54
},
"pid": 11804, # Process ID
"request counts": { # Count of RPC requests by method name
"blockchain.block.header": 245,
"blockchain.block.headers": 70,
"blockchain.estimatefee": 12776,
"blockchain.headers.subscribe": 2825,
"blockchain.relayfee": 740,
"blockchain.scripthash.get_history": 196,
"blockchain.scripthash.subscribe": 184626,
"blockchain.transaction.broadcast": 19,
"blockchain.transaction.get": 213,
"blockchain.transaction.get_merkle": 289,
"getinfo": 3,
"groups": 1,
"mempool.get_fee_histogram": 3194,
"server.add_peer": 9,
"server.banner": 740,
"server.donation_address": 754,
"server.features": 50,
"server.peers.subscribe": 792,
"server.ping": 6412,
"server.version": 2866
},
"request total": 216820, # Total requests served
"sessions": { # Live session stats
"count": 670,
"count with subs": 45,
"errors": 0,
"logged": 0,
"paused": 0,
"pending requests": 79, # Number of requests currently being processed
"subs": 36292 # Total subscriptions
},
"tx hashes cache": "289 lookups 38 hits 213 entries",
"txs sent": 19, # Transactions broadcast
"uptime": "01h 39m 04s",
"version": "ElectrumX 1.10.1"
}
Each ill-formed request, or one that does not follow the Electrum
protocol, increments the error count of the session that sent it. If
the error count reaches a certain level (currently ``10``) that client
is disconnected.
protocol, increments the error count of the session that sent it.
:ref:`logging <session logging>` of sessions can be enabled by RPC.
For more information on peers see :ref:`here <peers>`.
Clients that are slow to consume data sent to them are :dfn:`paused`
until their socket buffer drains sufficiently, at which point
processing of requests resumes.
Clients that are slow to consume data sent to them are :dfn:`paused` until their socket
buffer drains sufficiently, at which point processing of requests resumes.
Apart from very short intervals, typically after a new block or when
a client has just connected, the number of unprocessed requests
should normally be zero.
Apart from very short intervals, typically after a new block or when a client has just
connected, the number of unprocessed requests should be low, say 250 or fewer. If it is
over 1,000 the server is overloaded.
Sessions are put into groups, primarily as an anti-DoS measure.
Initially all connections made within a period of time are put in the
same group. High bandwidth usage by a member of a group deprioritizes
that session, and all members of its group to a lesser extent.
Low-priority sessions have their requests served after higher priority
sessions. ElectrumX will start delaying responses to a session if it
becomes sufficiently deprioritized.
Sessions are put into groups, primarily as an anti-DoS measure. Currently each session
goes into two groups: one for an IP subnet, and one based on the timeslice it connected
in. Each member of a group incurs a fraction of the costs of the other group members.
This appears in the `sessions_` list under the column XCost.
groups
------
@ -124,23 +151,33 @@ The output is quite similar to the `sessions`_ command.
log
---
Toggle logging of the given session IDs. All incoming requests for a
logged session are written to the server log. Session IDs can be seen
in the logs or with the `sessions`_ RPC command::
Toggle logging of the given session IDs or group names. All incoming requests for a
logged session are written to the server log. The arguments are case-insensitive.
$ electrumx_rpc log 0 1 2 3 4 5
When a group is specified, logging is toggled for its current members only; there is no
effect on future group members.
Session IDs can be obtained in the logs or with the `sessions`_ RPC command. Group
names can be optained with the `groups`_ RPC command.
The special string :const:`all` turns on logging of all current and future sessions,
:const:`none` turns off logging of all current and future sessions, and :const:`new`
toggles logging of future sessions.
Example::
$ electrumx_rpc log new 6 t0 z
[
"log 0: False",
"log 1: False",
"log 2: False",
"log 3: True",
"log 4: True",
"unknown session: 5"
"logging new sessions",
"logging session 6",
"logging session 3",
"logging session 57",
"logging session 12"
"unknown: z",
]
The return value shows this command turned off logging for sesssions
0, 1 and 2. It was turned on for sessions 3 and 4, and there was no
session 5.
In the above command sessions 3, 12 and 57 were in group `t0` (in fact, session 6 was
too).
.. _peers:
@ -167,7 +204,8 @@ query
-----
Run a query of the UTXO and history databases against one or more
addresses or hex scripts. `--limit <N>` or `-l <N>` limits the output
addresses, hex scripts or ASCII names (for coins that have an index
on names like Namecoin). `--limit <N>` or `-l <N>` limits the output
for each kind to that many entries. History is printed in blockchain
order; UTXOs in an arbitrary order.
@ -205,22 +243,19 @@ sessions
Return a list of all current sessions. Takes no arguments::
ID Flags Client Proto Reqs Txs Subs Recv Recv KB Sent Sent KB Time Peer
110 S1 2.9.4 0.10 0 0 0 403 28 442 37 06h41m41s xxx.xxx.xxx.xxx:xx
282 S1 3.1.5 1.1 0 0 0 380 25 417 40 06h21m38s xxx.xxx.xxx.xxx:xx
300 S1 2.9.4 0.10 0 0 0 381 25 418 34 06h19m35s xxx.xxx.xxx.xxx:xx
[...]
3313 S1 2.9.3 0.10 0 0 0 22 1 22 6 07s xxx.xxx.xxx.xxx:xx
4 R0 RPC RPC 0 0 0 1 0 0 0 00s [::1]:62479
ID Flags Client Proto Cost XCost Reqs Txs Subs Recv Recv KB Sent Sent KB Time Peer
1 S6 1.1.1 1.4 0 16 0 0 0 3 0 3 0 05m42s 165.255.191.213:22349
2 S6 all_seeing_eye 1.4 0 16 0 0 0 2 0 2 0 05m40s 67.170.52.226:24995
4 S6 3.3.2 1.4 0 16 0 0 34 45 5 45 3 05m40s 185.220.100.252:40463
3 S6 1.1.2 1.4 0 16 0 0 0 3 0 3 0 05m40s 89.17.142.28:59241
The columns show information by session: the session ID, flags (see
below), how the client identifies itself - typically the Electrum
client version, the protocol version negotiated, the number of
unprocessed requests, the number of transactions sent, the number of
address subscriptions, the number of requests received and their total
size, the number of messages sent and their size, how long the client
has been connected, and the client's IP address (if anonymous logging
is disabled).
The columns show information by session: the session ID, flags (see below), how the client
identifies itself - typically the Electrum client version, the protocol version
negotiated, the session cost, the additional session cost accrued from its groups, the
number of unprocessed requests, the number of transactions sent, the number of address
subscriptions, the number of requests received and their total size, the number of
messages sent and their size, how long the client has been connected, and the client's IP
address (if anonymous logging is disabled).
The flags are:

View File

@ -1,4 +1,4 @@
version = 'ElectrumX 1.8.12'
version = 'ElectrumX 1.13.0'
version_short = version.split()[-1]
from electrumx.server.controller import Controller

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,6 @@ class EnvBase(object):
def __init__(self):
self.logger = class_logger(__name__, self.__class__.__name__)
self.allow_root = self.boolean('ALLOW_ROOT', False)
self.host = self.default('HOST', 'localhost')
self.rpc_host = self.default('RPC_HOST', 'localhost')
self.loop_policy = self.event_loop_policy()
@classmethod
@ -79,21 +77,3 @@ class EnvBase(object):
import uvloop
return uvloop.EventLoopPolicy()
raise self.Error('unknown event loop policy "{}"'.format(policy))
def cs_host(self, *, for_rpc):
'''Returns the 'host' argument to pass to asyncio's create_server
call. The result can be a single host name string, a list of
host name strings, or an empty string to bind to all interfaces.
If rpc is True the host to use for the RPC server is returned.
Otherwise the host to use for SSL/TCP servers is returned.
'''
host = self.rpc_host if for_rpc else self.host
result = [part.strip() for part in host.split(',')]
if len(result) == 1:
result = result[0]
# An empty result indicates all interfaces, which we do not
# permitted for an RPC server.
if for_rpc and not result:
result = 'localhost'
return result

View File

@ -88,7 +88,7 @@ class Merkle(object):
def root(self, hashes, length=None):
'''Return the merkle root of a non-empty iterable of binary hashes.'''
branch, root = self.branch_and_root(hashes, 0, length)
_branch, root = self.branch_and_root(hashes, 0, length)
return root
def root_from_proof(self, hash, branch, index):
@ -129,7 +129,7 @@ class Merkle(object):
level cached.
To maximally reduce the amount of data hashed in computing a
markle branch, cache a tree of depth N at level N // 2.
merkle branch, cache a tree of depth N at level N // 2.
level is a list of hashes in the middle of the tree (returned
by level())
@ -169,6 +169,7 @@ class MerkleCache(object):
self.merkle = merkle
self.source_func = source_func
self.length = 0
self.level = []
self.depth_higher = 0
self.initialized = Event()

View File

@ -25,11 +25,11 @@
'''Representation of a peer server.'''
from ipaddress import ip_address, IPv4Address, IPv6Address
from ipaddress import ip_address, IPv4Address, IPv6Address, IPv4Network, IPv6Network
from socket import AF_INET, AF_INET6
from electrumx.lib.util import cachedproperty
import electrumx.lib.util as util
from aiorpcx import is_valid_hostname
from electrumx.lib.util import cachedproperty, protocol_tuple, version_string
class Peer(object):
@ -102,7 +102,7 @@ class Peer(object):
'''Update features in-place.'''
try:
tmp = Peer(self.host, features)
except Exception:
except AssertionError:
pass
else:
self.update_features_from_peer(tmp)
@ -156,7 +156,7 @@ class Peer(object):
if ip:
return ((ip.is_global or ip.is_private)
and not (ip.is_multicast or ip.is_unspecified))
return util.is_valid_hostname(self.host)
return is_valid_hostname(self.host)
@cachedproperty
def is_public(self):
@ -174,12 +174,38 @@ class Peer(object):
except ValueError:
return None
def bucket(self):
def bucket_for_internal_purposes(self):
'''Used for keeping the internal peer list manageable in size.
Restrictions are loose.
'''
if self.is_tor:
return 'onion'
if not self.ip_addr:
return ''
return tuple(self.ip_addr.split('.')[:2])
ip_addr = ip_address(self.ip_addr)
if ip_addr.version == 4:
return str(ip_addr)
elif ip_addr.version == 6:
slash64 = IPv6Network(self.ip_addr).supernet(prefixlen_diff=128-64)
return str(slash64)
return ''
def bucket_for_external_interface(self):
'''Used when responding to RPC queries to return a distributed list
of peers. Restrictions are stricter than internal bucketing.
'''
if self.is_tor:
return 'onion'
if not self.ip_addr:
return ''
ip_addr = ip_address(self.ip_addr)
if ip_addr.version == 4:
slash16 = IPv4Network(self.ip_addr).supernet(prefixlen_diff=32-16)
return str(slash16)
elif ip_addr.version == 6:
slash56 = IPv6Network(self.ip_addr).supernet(prefixlen_diff=128-56)
return str(slash56)
return ''
def serialize(self):
'''Serialize to a dictionary.'''
@ -210,7 +236,7 @@ class Peer(object):
@cachedproperty
def genesis_hash(self):
'''Returns None if no SSL port, otherwise the port as an integer.'''
'''Returns the network genesis block hash as a string if known, otherwise None.'''
return self._string('genesis_hash')
@cachedproperty
@ -239,8 +265,8 @@ class Peer(object):
def _protocol_version_string(self, key):
version_str = self.features.get(key)
ptuple = util.protocol_tuple(version_str)
return util.version_string(ptuple)
ptuple = protocol_tuple(version_str)
return version_string(ptuple)
@cachedproperty
def protocol_min(self):

View File

@ -27,11 +27,9 @@
'''Script-related classes and functions.'''
import struct
from collections import namedtuple
from electrumx.lib.enum import Enumeration
from electrumx.lib.hash import hash160
from electrumx.lib.util import unpack_le_uint16_from, unpack_le_uint32_from, \
pack_le_uint16, pack_le_uint32
@ -145,37 +143,6 @@ class ScriptPubKey(object):
+ Script.push_data(hash160)
+ bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]))
@classmethod
def validate_pubkey(cls, pubkey, req_compressed=False):
if isinstance(pubkey, (bytes, bytearray)):
if len(pubkey) == 33 and pubkey[0] in (2, 3):
return # Compressed
if len(pubkey) == 65 and pubkey[0] == 4:
if not req_compressed:
return
raise PubKeyError('uncompressed pubkeys are invalid')
raise PubKeyError('invalid pubkey {}'.format(pubkey))
@classmethod
def pubkey_script(cls, pubkey):
cls.validate_pubkey(pubkey)
return Script.push_data(pubkey) + bytes([OpCodes.OP_CHECKSIG])
@classmethod
def multisig_script(cls, m, pubkeys):
'''Returns the script for a pay-to-multisig transaction.'''
n = len(pubkeys)
if not 1 <= m <= n <= 15:
raise ScriptError('{:d} of {:d} multisig script not possible'
.format(m, n))
for pubkey in pubkeys:
cls.validate_pubkey(pubkey, req_compressed=True)
# See https://bitcoin.org/en/developer-guide
# 2 of 3 is: OP_2 pubkey1 pubkey2 pubkey3 OP_3 OP_CHECKMULTISIG
return (bytes([OP_1 + m - 1])
+ b''.join(cls.push_data(pubkey) for pubkey in pubkeys)
+ bytes([OP_1 + n - 1, OP_CHECK_MULTISIG]))
class Script(object):

View File

@ -5,12 +5,16 @@
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Base class of servers'''
import asyncio
import os
import platform
import re
import signal
import sys
import time
from contextlib import suppress
from functools import partial
from aiorpcx import spawn
@ -18,7 +22,7 @@ from aiorpcx import spawn
from electrumx.lib.util import class_logger
class ServerBase(object):
class ServerBase:
'''Base class server implementation.
Derived classes are expected to:
@ -42,15 +46,19 @@ class ServerBase(object):
asyncio.set_event_loop_policy(env.loop_policy)
self.logger = class_logger(__name__, self.__class__.__name__)
self.logger.info(f'Python version: {sys.version}')
version_str = ' '.join(sys.version.splitlines())
self.logger.info(f'Python version: {version_str}')
self.env = env
self.start_time = 0
# Sanity checks
if sys.version_info < self.PYTHON_MIN_VERSION:
mvs = '.'.join(str(part) for part in self.PYTHON_MIN_VERSION)
raise RuntimeError('Python version >= {} is required'.format(mvs))
if os.geteuid() == 0 and not env.allow_root:
if platform.system() == 'Windows':
pass
elif os.geteuid() == 0 and not env.allow_root:
raise RuntimeError('RUNNING AS ROOT IS STRONGLY DISCOURAGED!\n'
'You shoud create an unprivileged user account '
'and use that.\n'
@ -63,7 +71,6 @@ class ServerBase(object):
Setting the event also shuts down the server.
'''
shutdown_event.set()
def on_exception(self, loop, context):
'''Suppress spurious messages it appears we cannot control.'''
@ -74,7 +81,7 @@ class ServerBase(object):
return
loop.default_exception_handler(context)
async def _main(self, loop):
async def run(self):
'''Run the server application:
- record start time
@ -84,30 +91,37 @@ class ServerBase(object):
'''
def on_signal(signame):
shutdown_event.set()
self.logger.warning(f'received {signame} signal, '
f'initiating shutdown')
self.logger.warning(f'received {signame} signal, initiating shutdown')
async def serve():
try:
await self.serve(shutdown_event)
finally:
shutdown_event.set()
self.start_time = time.time()
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
partial(on_signal, signame))
loop = asyncio.get_event_loop()
shutdown_event = asyncio.Event()
if platform.system() != 'Windows':
# No signals on Windows
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
partial(on_signal, signame))
loop.set_exception_handler(self.on_exception)
shutdown_event = asyncio.Event()
server_task = await spawn(self.serve(shutdown_event))
# Wait for shutdown, log on receipt of the event
await shutdown_event.wait()
self.logger.info('shutting down')
server_task.cancel()
# Prevent some silly logs
await asyncio.sleep(0.01)
self.logger.info('shutdown complete')
def run(self):
loop = asyncio.get_event_loop()
# Start serving and wait for shutdown, log receipt of the event
server_task = await spawn(serve, report_crash=False)
try:
loop.run_until_complete(self._main(loop))
await shutdown_event.wait()
except KeyboardInterrupt:
self.logger.warning('received keyboard interrupt, initiating shutdown')
self.logger.info('shutting down')
server_task.cancel()
try:
with suppress(asyncio.CancelledError):
await server_task
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
self.logger.info('shutdown complete')

View File

@ -7,14 +7,17 @@ def sessions_lines(data):
'''A generator returning lines for a list of sessions.
data is the return value of rpc_sessions().'''
fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} '
'{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
fmt = ('{:<6} {:<5} {:>17} {:>5} '
'{:>7} {:>7} {:>5} {:>5} {:>7} '
'{:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
yield fmt.format('ID', 'Flags', 'Client', 'Proto',
'Reqs', 'Txs', 'Subs',
'Cost', 'XCost', 'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer')
for (id_, flags, peer, client, proto, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size, time) in data:
for (id_, flags, peer, client, proto, cost, extra_cost, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size, conn_time) in data:
yield fmt.format(id_, flags, client, proto,
'{:,d}'.format(int(cost)),
'{:,d}'.format(int(extra_cost)),
'{:,d}'.format(reqs),
'{:,d}'.format(txs_sent),
'{:,d}'.format(subs),
@ -22,7 +25,7 @@ def sessions_lines(data):
'{:,d}'.format(recv_size // 1024),
'{:,d}'.format(send_count),
'{:,d}'.format(send_size // 1024),
util.formatted_time(time, sep=''), peer)
util.formatted_time(conn_time, sep=''), peer)
def groups_lines(data):
@ -30,15 +33,16 @@ def groups_lines(data):
data is the return value of rpc_groups().'''
fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}'
fmt = ('{:<14} {:>9} {:>8} {:>8} {:>6} {:>6} {:>8}'
'{:>7} {:>9} {:>7} {:>9}')
yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs',
yield fmt.format('Name', 'Sessions', 'Cost', 'Retained', 'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB')
for (id_, session_count, bandwidth, reqs, txs_sent, subs,
for (name, session_count, cost, retained_cost, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size) in data:
yield fmt.format(id_,
yield fmt.format(name,
'{:,d}'.format(session_count),
'{:,d}'.format(bandwidth // 1024),
'{:,d}'.format(int(cost)),
'{:,d}'.format(int(retained_cost)),
'{:,d}'.format(reqs),
'{:,d}'.format(txs_sent),
'{:,d}'.format(subs),

View File

@ -34,6 +34,7 @@ from electrumx.lib.hash import sha256, double_sha256, hash_to_hex_str
from electrumx.lib.script import OpCodes
from electrumx.lib.util import (
unpack_le_int32_from, unpack_le_int64_from, unpack_le_uint16_from,
unpack_be_uint16_from,
unpack_le_uint32_from, unpack_le_uint64_from, pack_le_int32, pack_varint,
pack_le_uint32, pack_le_int64, pack_varbytes,
)
@ -194,6 +195,11 @@ class Deserializer(object):
self.cursor += 2
return result
def _read_be_uint16(self):
result, = unpack_be_uint16_from(self.binary, self.cursor)
self.cursor += 2
return result
def _read_le_uint32(self):
result, = unpack_le_uint32_from(self.binary, self.cursor)
self.cursor += 4
@ -258,39 +264,131 @@ class DeserializerSegWit(Deserializer):
return self._read_tx_parts()[0]
def read_tx_and_hash(self):
tx, tx_hash, vsize = self._read_tx_parts()
tx, tx_hash, _vsize = self._read_tx_parts()
return tx, tx_hash
def read_tx_and_vsize(self):
tx, tx_hash, vsize = self._read_tx_parts()
tx, _tx_hash, vsize = self._read_tx_parts()
return tx, vsize
class TxFlo(namedtuple("Tx", "version inputs outputs locktime txcomment")):
'''Class representing a transaction.'''
def is_coinbase(self):
return self.inputs[0].is_coinbase
class TxFloSegWit(namedtuple("Tx", "version marker flag inputs outputs "
"witness locktime txcomment")):
'''Class representing a SegWit transaction.'''
def is_coinbase(self):
return self.inputs[0].is_coinbase
class DeserializerFlo(DeserializerSegWit):
# https://bitcoincore.org/en/segwit_wallet_dev/#transaction-serialization
def _read_tx(self):
'''Return a deserialized transaction.'''
version = self._read_le_int32()
inputs = self._read_inputs()
outputs = self._read_outputs()
locktime = self._read_le_uint32()
if version >= 2:
comment = self._read_varbytes()
else:
comment = ""
return TxFlo(version, inputs, outputs, locktime, comment)
def _read_tx_parts(self):
'''Return a (deserialized TX, tx_hash, vsize) tuple.'''
start = self.cursor
marker = self.binary[self.cursor + 4]
if marker:
tx = self._read_tx()
tx_hash = double_sha256(self.binary[start:self.cursor])
return tx, tx_hash, self.binary_length
# Ugh, this is nasty.
version = self._read_le_int32()
orig_ser = self.binary[start:self.cursor]
marker = self._read_byte()
flag = self._read_byte()
start = self.cursor
inputs = self._read_inputs()
outputs = self._read_outputs()
orig_ser += self.binary[start:self.cursor]
base_size = self.cursor - start
witness = self._read_witness(len(inputs))
start = self.cursor
locktime = self._read_le_uint32()
# FLO ->
if version >= 2:
tx_comment = self._read_varbytes()
else:
tx_comment = ""
# <- FLO
orig_ser += self.binary[start:self.cursor]
vsize = (3 * base_size + self.binary_length) // 4
return TxFloSegWit(version, marker, flag, inputs, outputs, witness,
locktime, tx_comment), double_sha256(orig_ser), vsize
class DeserializerAuxPow(Deserializer):
VERSION_AUXPOW = (1 << 8)
def read_header(self, height, static_header_size):
'''Return the AuxPow block header bytes'''
def read_auxpow(self):
'''Reads and returns the CAuxPow data'''
# We first calculate the size of the CAuxPow instance and then
# read it as bytes in the final step.
start = self.cursor
self.read_tx() # AuxPow transaction
self.cursor += 32 # Parent block hash
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Merkle branch
self.cursor += 4 # Index
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Chain merkle branch
self.cursor += 4 # Chain index
self.cursor += 80 # Parent block header
end = self.cursor
self.cursor = start
return self._read_nbytes(end - start)
def read_header(self, static_header_size):
'''Return the AuxPow block header bytes'''
# We are going to calculate the block size then read it as bytes
start = self.cursor
version = self._read_le_uint32()
if version & self.VERSION_AUXPOW:
# We are going to calculate the block size then read it as bytes
self.cursor = start
self.cursor += static_header_size # Block normal header
self.read_tx() # AuxPow transaction
self.cursor += 32 # Parent block hash
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Merkle branch
self.cursor += 4 # Index
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Chain merkle branch
self.cursor += 4 # Chain index
self.cursor += 80 # Parent block header
self.read_auxpow()
header_end = self.cursor
else:
header_end = static_header_size
header_end = start + static_header_size
self.cursor = start
return self._read_nbytes(header_end)
return self._read_nbytes(header_end - start)
class DeserializerAuxPowSegWit(DeserializerSegWit, DeserializerAuxPow):
@ -298,7 +396,7 @@ class DeserializerAuxPowSegWit(DeserializerSegWit, DeserializerAuxPow):
class DeserializerEquihash(Deserializer):
def read_header(self, height, static_header_size):
def read_header(self, static_header_size):
'''Return the block header bytes'''
start = self.cursor
# We are going to calculate the block size then read it as bytes
@ -380,6 +478,65 @@ class DeserializerTxTime(Deserializer):
)
class TxTimeSegWit(namedtuple(
"Tx", "version time marker flag inputs outputs witness locktime")):
'''Class representing a SegWit transaction with time.'''
class DeserializerTxTimeSegWit(DeserializerTxTime):
def _read_witness(self, fields):
read_witness_field = self._read_witness_field
return [read_witness_field() for _ in range(fields)]
def _read_witness_field(self):
read_varbytes = self._read_varbytes
return [read_varbytes() for _ in range(self._read_varint())]
def _read_tx_parts(self):
'''Return a (deserialized TX, tx_hash, vsize) tuple.'''
start = self.cursor
marker = self.binary[self.cursor + 8]
if marker:
tx = super().read_tx()
tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor])
return tx, tx_hash, self.binary_length
version = self._read_le_int32()
time = self._read_le_uint32()
orig_ser = self.binary[start:self.cursor]
marker = self._read_byte()
flag = self._read_byte()
start = self.cursor
inputs = self._read_inputs()
outputs = self._read_outputs()
orig_ser += self.binary[start:self.cursor]
base_size = self.cursor - start
witness = self._read_witness(len(inputs))
start = self.cursor
locktime = self._read_le_uint32()
orig_ser += self.binary[start:self.cursor]
vsize = (3 * base_size + self.binary_length) // 4
return TxTimeSegWit(
version, time, marker, flag, inputs, outputs, witness, locktime),\
self.TX_HASH_FN(orig_ser), vsize
def read_tx(self):
return self._read_tx_parts()[0]
def read_tx_and_hash(self):
tx, tx_hash, vsize = self._read_tx_parts()
return tx, tx_hash
def read_tx_and_vsize(self):
tx, tx_hash, vsize = self._read_tx_parts()
return tx, vsize
class TxTrezarcoin(
namedtuple("Tx", "version time inputs outputs locktime txcomment")):
'''Class representing transaction that has a time and txcomment field.'''
@ -402,7 +559,6 @@ class DeserializerTrezarcoin(Deserializer):
@staticmethod
def blake2s_gen(data):
version = data[0:1]
keyOne = data[36:46]
keyTwo = data[58:68]
ntime = data[68:72]
@ -411,16 +567,15 @@ class DeserializerTrezarcoin(Deserializer):
_full_merkle = data[36:68]
_input112 = data + _full_merkle
_key = keyTwo + ntime + _nBits + _nonce + keyOne
'''Prepare 112Byte Header '''
# Prepare 112Byte Header
blake2s_hash = blake2s(key=_key, digest_size=32)
blake2s_hash.update(_input112)
'''TrezarFlips - Only for Genesis'''
# TrezarFlips - Only for Genesis
return ''.join(map(str.__add__, blake2s_hash.hexdigest()[-2::-2],
blake2s_hash.hexdigest()[-1::-2]))
@staticmethod
def blake2s(data):
version = data[0:1]
keyOne = data[36:46]
keyTwo = data[58:68]
ntime = data[68:72]
@ -429,10 +584,10 @@ class DeserializerTrezarcoin(Deserializer):
_full_merkle = data[36:68]
_input112 = data + _full_merkle
_key = keyTwo + ntime + _nBits + _nonce + keyOne
'''Prepare 112Byte Header '''
# Prepare 112Byte Header
blake2s_hash = blake2s(key=_key, digest_size=32)
blake2s_hash.update(_input112)
'''TrezarFlips'''
# TrezarFlips
return blake2s_hash.digest()
@ -450,7 +605,7 @@ class DeserializerReddcoin(Deserializer):
return TxTime(version, time, inputs, outputs, locktime)
class DeserializerTxTimeAuxPow(DeserializerTxTime):
class DeserializerEmercoin(DeserializerTxTimeSegWit):
VERSION_AUXPOW = (1 << 8)
def is_merged_block(self):
@ -462,7 +617,7 @@ class DeserializerTxTimeAuxPow(DeserializerTxTime):
return True
return False
def read_header(self, height, static_header_size):
def read_header(self, static_header_size):
'''Return the AuxPow block header bytes'''
start = self.cursor
version = self._read_le_uint32()
@ -585,7 +740,6 @@ class TxInputDcr(namedtuple("TxInput", "prev_hash prev_idx tree sequence")):
class TxOutputDcr(namedtuple("TxOutput", "value version pk_script")):
'''Class representing a Decred transaction output.'''
pass
class TxDcr(namedtuple("Tx", "version inputs outputs locktime expiry "
@ -608,11 +762,11 @@ class DeserializerDecred(Deserializer):
return self._read_tx_parts(produce_hash=False)[0]
def read_tx_and_hash(self):
tx, tx_hash, vsize = self._read_tx_parts()
tx, tx_hash, _vsize = self._read_tx_parts()
return tx, tx_hash
def read_tx_and_vsize(self):
tx, tx_hash, vsize = self._read_tx_parts(produce_hash=False)
tx, _tx_hash, vsize = self._read_tx_parts(produce_hash=False)
return tx, vsize
def read_tx_block(self):
@ -682,50 +836,80 @@ class DeserializerDecred(Deserializer):
), tx_hash, self.cursor - start
class TxFlo(namedtuple("Tx", "version inputs outputs locktime txcomment")):
class DeserializerSmartCash(Deserializer):
@staticmethod
def keccak(data):
from Cryptodome.Hash import keccak
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(data)
return keccak_hash.digest()
def read_tx_and_hash(self):
start = self.cursor
return self.read_tx(), sha256(self.binary[start:self.cursor])
class TxBitcoinDiamond(namedtuple("Tx",
"version preblockhash inputs outputs "
"locktime")):
'''Class representing a transaction.'''
def is_coinbase(self):
return self.inputs[0].is_coinbase
class DeserializerBitcoinDiamond(Deserializer):
bitcoin_diamond_tx_version = 12
def read_tx(self):
# Return a Deserialized TX.
version = self._get_version()
if version != self.bitcoin_diamond_tx_version:
return Tx(
self._read_le_int32(), # version
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32() # locktime
)
else:
return TxBitcoinDiamond(
self._read_le_int32(), # version
hash_to_hex_str(self._read_nbytes(32)), # blockhash
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32() # locktime
)
def _get_version(self):
result, = unpack_le_int32_from(self.binary, self.cursor)
return result
class TxFloSegWit(namedtuple("Tx", "version marker flag inputs outputs "
"witness locktime txcomment")):
class TxBitcoinDiamondSegWit(namedtuple("Tx",
"version preblockhash marker flag "
"inputs outputs witness locktime")):
'''Class representing a SegWit transaction.'''
def is_coinbase(self):
return self.inputs[0].is_coinbase
class DeserializerFlo(DeserializerSegWit):
# https://bitcoincore.org/en/segwit_wallet_dev/#transaction-serialization
def _read_tx(self):
'''Return a deserialized transaction.'''
version = self._read_le_int32()
inputs = self._read_inputs()
outputs = self._read_outputs()
locktime = self._read_le_uint32()
if version >= 2:
comment = self._read_varbytes()
else:
comment = ""
return TxFlo(version, inputs, outputs, locktime, comment)
class DeserializerBitcoinDiamondSegWit(DeserializerBitcoinDiamond,
DeserializerSegWit):
def _read_tx_parts(self):
'''Return a (deserialized TX, tx_hash, vsize) tuple.'''
start = self.cursor
marker = self.binary[self.cursor + 4]
tx_version = self._get_version()
if tx_version == self.bitcoin_diamond_tx_version:
marker = self.binary[self.cursor + 4 + 32]
else:
marker = self.binary[self.cursor + 4]
if marker:
tx = self._read_tx()
tx_hash = double_sha256(self.binary[start:self.cursor])
tx = super().read_tx()
tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor])
return tx, tx_hash, self.binary_length
# Ugh, this is nasty.
version = self._read_le_int32()
present_block_hash = None
if version == self.bitcoin_diamond_tx_version:
present_block_hash = hash_to_hex_str(self._read_nbytes(32))
orig_ser = self.binary[start:self.cursor]
marker = self._read_byte()
@ -741,31 +925,148 @@ class DeserializerFlo(DeserializerSegWit):
start = self.cursor
locktime = self._read_le_uint32()
# FLO ->
if version >= 2:
tx_comment = self._read_varbytes()
else:
tx_comment = ""
# <- FLO
orig_ser += self.binary[start:self.cursor]
vsize = (3 * base_size + self.binary_length) // 4
return TxFloSegWit(version, marker, flag, inputs, outputs, witness,
locktime, tx_comment), double_sha256(orig_ser), vsize
if present_block_hash is not None:
return TxBitcoinDiamondSegWit(
version, present_block_hash, marker, flag, inputs, outputs,
witness, locktime), self.TX_HASH_FN(orig_ser), vsize
else:
return TxSegWit(
version, marker, flag, inputs, outputs, witness,
locktime), self.TX_HASH_FN(orig_ser), vsize
def read_tx(self):
'''Return a (Deserialized TX, TX_HASH) pair.
The hash needs to be reversed for human display; for efficiency
we process it in the natural serialized order.
'''
return self._read_tx_parts()[0]
class DeserializerSmartCash(Deserializer):
class DeserializerElectra(Deserializer):
ELECTRA_TX_VERSION = 7
@staticmethod
def keccak(data):
from Cryptodome.Hash import keccak
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(data)
return keccak_hash.digest()
def _get_version(self):
result, = unpack_le_int32_from(self.binary, self.cursor)
return result
def read_tx_and_hash(self):
from electrumx.lib.hash import sha256
def read_tx(self):
version = self._get_version()
if version != self.ELECTRA_TX_VERSION:
return TxTime(
self._read_le_int32(), # version
self._read_le_uint32(), # time
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32(), # locktime
)
else:
return Tx(
self._read_le_int32(), # version
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32() # locktime
)
class DeserializerECCoin(Deserializer):
def read_tx(self):
tx_version = self._read_le_int32()
tx = TxTime(
tx_version,
self._read_le_uint32(),
self._read_inputs(),
self._read_outputs(),
self._read_le_uint32(),
)
if tx_version > 1:
self.cursor += 32
return tx
class DeserializerZcoin(Deserializer):
def _read_input(self):
tx_input = TxInput(
self._read_nbytes(32), # prev_hash
self._read_le_uint32(), # prev_idx
self._read_varbytes(), # script
self._read_le_uint32() # sequence
)
if tx_input.prev_idx == MINUS_1 and tx_input.prev_hash == ZERO:
return tx_input
if tx_input.script[0] == 0xc4: # This is a Sigma spend - mimic a generation tx
return TxInput(
ZERO,
MINUS_1,
tx_input.script,
tx_input.sequence
)
return tx_input
class DeserializerXaya(DeserializerSegWit, DeserializerAuxPow):
"""Deserializer class for the Xaya network
The main difference to other networks is the changed format of the
block header with "triple purpose mining", see
https://github.com/xaya/xaya/blob/master/doc/xaya/mining.md.
This builds upon classic auxpow, but has a modified serialisation format
that we have to implement here."""
MM_FLAG = 0x80
def read_header(self, static_header_size):
"""Reads in the full block header (including PoW data)"""
# We first calculate the dynamic size of the block header, and then
# read in all the data in the final step.
start = self.cursor
return self.read_tx(), sha256(self.binary[start:self.cursor])
self.cursor += static_header_size # Normal block header
algo = self._read_byte()
self._read_le_uint32() # nBits
if algo & self.MM_FLAG:
self.read_auxpow()
else:
self.cursor += static_header_size # Fake header
end = self.cursor
self.cursor = start
return self._read_nbytes(end - start)
class DeserializerSimplicity(Deserializer):
SIMPLICITY_TX_VERSION = 3
def _get_version(self):
result, = unpack_le_int32_from(self.binary, self.cursor)
return result
def read_tx(self):
version = self._get_version()
if version < self.SIMPLICITY_TX_VERSION:
return TxTime(
self._read_le_int32(), # version
self._read_le_uint32(), # time
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32(), # locktime
)
else:
return Tx(
self._read_le_int32(), # version
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32() # locktime
)

413
electrumx/lib/tx_axe.py Normal file
View File

@ -0,0 +1,413 @@
# Copyright (c) 2016-2018, Neil Booth
# Copyright (c) 2018, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Deserializer for AXE DIP2 special transaction types'''
from collections import namedtuple
from electrumx.lib.tx import Deserializer
from electrumx.lib.util import (pack_le_uint16, pack_le_int32, pack_le_uint32,
pack_le_int64, pack_varint, pack_varbytes,
pack_be_uint16)
# https://github.com/dashpay/dips/blob/master/dip-0002.md
class AxeTx(namedtuple("AxeTx",
"version inputs outputs locktime "
"tx_type extra_payload")):
'''Class representing a Axe transaction'''
def serialize(self):
nLocktime = pack_le_uint32(self.locktime)
txins = (pack_varint(len(self.inputs)) +
b''.join(tx_in.serialize() for tx_in in self.inputs))
txouts = (pack_varint(len(self.outputs)) +
b''.join(tx_out.serialize() for tx_out in self.outputs))
if self.tx_type:
uVersion = pack_le_uint16(self.version)
uTxType = pack_le_uint16(self.tx_type)
vExtra = self._serialize_extra_payload()
return uVersion + uTxType + txins + txouts + nLocktime + vExtra
else:
nVersion = pack_le_int32(self.version)
return nVersion + txins + txouts + nLocktime
def _serialize_extra_payload(self):
extra = self.extra_payload
spec_tx_class = DeserializerAxe.SPEC_TX_HANDLERS.get(self.tx_type)
if not spec_tx_class:
assert isinstance(extra, (bytes, bytearray))
return pack_varbytes(extra)
if not isinstance(extra, spec_tx_class):
raise ValueError('Axe tx_type does not conform with extra'
' payload class: %s, %s' % (self.tx_type, extra))
return pack_varbytes(extra.serialize())
# https://github.com/dashpay/dips/blob/master/dip-0002-special-transactions.md
class AxeProRegTx(namedtuple("AxeProRegTx",
"version type mode collateralOutpoint "
"ipAddress port KeyIdOwner PubKeyOperator "
"KeyIdVoting operatorReward scriptPayout "
"inputsHash payloadSig")):
'''Class representing DIP3 ProRegTx'''
def serialize(self):
assert (len(self.ipAddress) == 16
and len(self.KeyIdOwner) == 20
and len(self.PubKeyOperator) == 48
and len(self.KeyIdVoting) == 20
and len(self.inputsHash) == 32)
return (
pack_le_uint16(self.version) + # version
pack_le_uint16(self.type) + # type
pack_le_uint16(self.mode) + # mode
self.collateralOutpoint.serialize() + # collateralOutpoint
self.ipAddress + # ipAddress
pack_be_uint16(self.port) + # port
self.KeyIdOwner + # KeyIdOwner
self.PubKeyOperator + # PubKeyOperator
self.KeyIdVoting + # KeyIdVoting
pack_le_uint16(self.operatorReward) + # operatorReward
pack_varbytes(self.scriptPayout) + # scriptPayout
self.inputsHash + # inputsHash
pack_varbytes(self.payloadSig) # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeProRegTx(
deser._read_le_uint16(), # version
deser._read_le_uint16(), # type
deser._read_le_uint16(), # mode
deser._read_outpoint(), # collateralOutpoint
deser._read_nbytes(16), # ipAddress
deser._read_be_uint16(), # port
deser._read_nbytes(20), # KeyIdOwner
deser._read_nbytes(48), # PubKeyOperator
deser._read_nbytes(20), # KeyIdVoting
deser._read_le_uint16(), # operatorReward
deser._read_varbytes(), # scriptPayout
deser._read_nbytes(32), # inputsHash
deser._read_varbytes() # payloadSig
)
class AxeProUpServTx(namedtuple("AxeProUpServTx",
"version proTxHash ipAddress port "
"scriptOperatorPayout inputsHash "
"payloadSig")):
'''Class representing DIP3 ProUpServTx'''
def serialize(self):
assert (len(self.proTxHash) == 32
and len(self.ipAddress) == 16
and len(self.inputsHash) == 32
and len(self.payloadSig) == 96)
return (
pack_le_uint16(self.version) + # version
self.proTxHash + # proTxHash
self.ipAddress + # ipAddress
pack_be_uint16(self.port) + # port
pack_varbytes(self.scriptOperatorPayout) + # scriptOperatorPayout
self.inputsHash + # inputsHash
self.payloadSig # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeProUpServTx(
deser._read_le_uint16(), # version
deser._read_nbytes(32), # proTxHash
deser._read_nbytes(16), # ipAddress
deser._read_be_uint16(), # port
deser._read_varbytes(), # scriptOperatorPayout
deser._read_nbytes(32), # inputsHash
deser._read_nbytes(96) # payloadSig
)
class AxeProUpRegTx(namedtuple("AxeProUpRegTx",
"version proTxHash mode PubKeyOperator "
"KeyIdVoting scriptPayout inputsHash "
"payloadSig")):
'''Class representing DIP3 ProUpRegTx'''
def serialize(self):
assert (len(self.proTxHash) == 32
and len(self.PubKeyOperator) == 48
and len(self.KeyIdVoting) == 20
and len(self.inputsHash) == 32)
return (
pack_le_uint16(self.version) + # version
self.proTxHash + # proTxHash
pack_le_uint16(self.mode) + # mode
self.PubKeyOperator + # PubKeyOperator
self.KeyIdVoting + # KeyIdVoting
pack_varbytes(self.scriptPayout) + # scriptPayout
self.inputsHash + # inputsHash
pack_varbytes(self.payloadSig) # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeProUpRegTx(
deser._read_le_uint16(), # version
deser._read_nbytes(32), # proTxHash
deser._read_le_uint16(), # mode
deser._read_nbytes(48), # PubKeyOperator
deser._read_nbytes(20), # KeyIdVoting
deser._read_varbytes(), # scriptPayout
deser._read_nbytes(32), # inputsHash
deser._read_varbytes() # payloadSig
)
class AxeProUpRevTx(namedtuple("AxeProUpRevTx",
"version proTxHash reason "
"inputsHash payloadSig")):
'''Class representing DIP3 ProUpRevTx'''
def serialize(self):
assert (len(self.proTxHash) == 32
and len(self.inputsHash) == 32
and len(self.payloadSig) == 96)
return (
pack_le_uint16(self.version) + # version
self.proTxHash + # proTxHash
pack_le_uint16(self.reason) + # reason
self.inputsHash + # inputsHash
self.payloadSig # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeProUpRevTx(
deser._read_le_uint16(), # version
deser._read_nbytes(32), # proTxHash
deser._read_le_uint16(), # reason
deser._read_nbytes(32), # inputsHash
deser._read_nbytes(96) # payloadSig
)
class AxeCbTx(namedtuple("AxeCbTx", "version height merkleRootMNList "
"merkleRootQuorums")):
'''Class representing DIP4 coinbase special tx'''
def serialize(self):
assert len(self.merkleRootMNList) == 32
res = (
pack_le_uint16(self.version) + # version
pack_le_uint32(self.height) + # height
self.merkleRootMNList # merkleRootMNList
)
if self.version > 1:
assert len(self.merkleRootQuorums) == 32
res += self.merkleRootQuorums # merkleRootQuorums
return res
@classmethod
def read_tx_extra(cls, deser):
version = deser._read_le_uint16()
height = deser._read_le_uint32()
merkleRootMNList = deser._read_nbytes(32)
merkleRootQuorums = b''
if version > 1:
merkleRootQuorums = deser._read_nbytes(32)
return AxeCbTx(version, height, merkleRootMNList, merkleRootQuorums)
class AxeSubTxRegister(namedtuple("AxeSubTxRegister",
"version userName pubKey payloadSig")):
'''Class representing DIP5 SubTxRegister'''
def serialize(self):
assert (len(self.pubKey) == 48
and len(self.payloadSig) == 96)
return (
pack_le_uint16(self.version) + # version
pack_varbytes(self.userName) + # userName
self.pubKey + # pubKey
self.payloadSig # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeSubTxRegister(
deser._read_le_uint16(), # version
deser._read_varbytes(), # userName
deser._read_nbytes(48), # pubKey
deser._read_nbytes(96) # payloadSig
)
class AxeSubTxTopup(namedtuple("AxeSubTxTopup",
"version regTxHash")):
'''Class representing DIP5 SubTxTopup'''
def serialize(self):
assert len(self.regTxHash) == 32
return (
pack_le_uint16(self.version) + # version
self.regTxHash # regTxHash
)
@classmethod
def read_tx_extra(cls, deser):
return AxeSubTxTopup(
deser._read_le_uint16(), # version
deser._read_nbytes(32) # regTxHash
)
class AxeSubTxResetKey(namedtuple("AxeSubTxResetKey",
"version regTxHash hashPrevSubTx "
"creditFee newPubKey payloadSig")):
'''Class representing DIP5 SubTxResetKey'''
def serialize(self):
assert (len(self.regTxHash) == 32
and len(self.hashPrevSubTx) == 32
and len(self.newPubKey) == 48
and len(self.payloadSig) == 96)
return (
pack_le_uint16(self.version) + # version
self.regTxHash + # regTxHash
self.hashPrevSubTx + # hashPrevSubTx
pack_le_int64(self.creditFee) + # creditFee
self.newPubKey + # newPubKey
self.payloadSig # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeSubTxResetKey(
deser._read_le_uint16(), # version
deser._read_nbytes(32), # regTxHash
deser._read_nbytes(32), # hashPrevSubTx
deser._read_le_int64(), # creditFee
deser._read_nbytes(48), # newPubKey
deser._read_nbytes(96) # payloadSig
)
class AxeSubTxCloseAccount(namedtuple("AxeSubTxCloseAccount",
"version regTxHash hashPrevSubTx "
"creditFee payloadSig")):
'''Class representing DIP5 SubTxCloseAccount'''
def serialize(self):
assert (len(self.regTxHash) == 32
and len(self.hashPrevSubTx) == 32
and len(self.payloadSig) == 96)
return (
pack_le_uint16(self.version) + # version
self.regTxHash + # regTxHash
self.hashPrevSubTx + # hashPrevSubTx
pack_le_int64(self.creditFee) + # creditFee
self.payloadSig # payloadSig
)
@classmethod
def read_tx_extra(cls, deser):
return AxeSubTxCloseAccount(
deser._read_le_uint16(), # version
deser._read_nbytes(32), # regTxHash
deser._read_nbytes(32), # hashPrevSubTx
deser._read_le_int64(), # creditFee
deser._read_nbytes(96) # payloadSig
)
# https://dash-docs.github.io/en/developer-reference#outpoint
class TxOutPoint(namedtuple("TxOutPoint", "hash index")):
'''Class representing tx output outpoint'''
def serialize(self):
assert len(self.hash) == 32
return (
self.hash + # hash
pack_le_uint32(self.index) # index
)
@classmethod
def read_outpoint(cls, deser):
return TxOutPoint(
deser._read_nbytes(32), # hash
deser._read_le_uint32() # index
)
class DeserializerAxe(Deserializer):
'''Deserializer for AXE DIP2 special tx types'''
# Supported Spec Tx types and corresponding classes mapping
PRO_REG_TX = 1
PRO_UP_SERV_TX = 2
PRO_UP_REG_TX = 3
PRO_UP_REV_TX = 4
CB_TX = 5
SUB_TX_REGISTER = 8
SUB_TX_TOPUP = 9
SUB_TX_RESET_KEY = 10
SUB_TX_CLOSE_ACCOUNT = 11
SPEC_TX_HANDLERS = {
PRO_REG_TX: AxeProRegTx,
PRO_UP_SERV_TX: AxeProUpServTx,
PRO_UP_REG_TX: AxeProUpRegTx,
PRO_UP_REV_TX: AxeProUpRevTx,
CB_TX: AxeCbTx,
SUB_TX_REGISTER: AxeSubTxRegister,
SUB_TX_TOPUP: AxeSubTxTopup,
SUB_TX_RESET_KEY: AxeSubTxResetKey,
SUB_TX_CLOSE_ACCOUNT: AxeSubTxCloseAccount,
}
def _read_outpoint(self):
return TxOutPoint.read_outpoint(self)
def read_tx(self):
header = self._read_le_uint32()
tx_type = header >> 16 # DIP2 tx type
if tx_type:
version = header & 0x0000ffff
else:
version = header
if tx_type and version < 3:
version = header
tx_type = 0
inputs = self._read_inputs()
outputs = self._read_outputs()
locktime = self._read_le_uint32()
if tx_type:
extra_payload_size = self._read_varint()
end = self.cursor + extra_payload_size
spec_tx_class = DeserializerAxe.SPEC_TX_HANDLERS.get(tx_type)
if spec_tx_class:
read_method = getattr(spec_tx_class, 'read_tx_extra', None)
extra_payload = read_method(self)
assert isinstance(extra_payload, spec_tx_class)
else:
extra_payload = self._read_nbytes(extra_payload_size)
assert self.cursor == end
else:
extra_payload = b''
tx = AxeTx(version, inputs, outputs, locktime, tx_type, extra_payload)
return tx

View File

@ -30,7 +30,8 @@ from collections import namedtuple
from electrumx.lib.tx import Deserializer
from electrumx.lib.util import (pack_le_uint16, pack_le_int32, pack_le_uint32,
pack_le_int64, pack_varint, pack_varbytes)
pack_le_int64, pack_varint, pack_varbytes,
pack_be_uint16)
# https://github.com/dashpay/dips/blob/master/dip-0002.md
@ -86,7 +87,7 @@ class DashProRegTx(namedtuple("DashProRegTx",
pack_le_uint16(self.mode) + # mode
self.collateralOutpoint.serialize() + # collateralOutpoint
self.ipAddress + # ipAddress
pack_le_uint16(self.port) + # port
pack_be_uint16(self.port) + # port
self.KeyIdOwner + # KeyIdOwner
self.PubKeyOperator + # PubKeyOperator
self.KeyIdVoting + # KeyIdVoting
@ -104,7 +105,7 @@ class DashProRegTx(namedtuple("DashProRegTx",
deser._read_le_uint16(), # mode
deser._read_outpoint(), # collateralOutpoint
deser._read_nbytes(16), # ipAddress
deser._read_le_uint16(), # port
deser._read_be_uint16(), # port
deser._read_nbytes(20), # KeyIdOwner
deser._read_nbytes(48), # PubKeyOperator
deser._read_nbytes(20), # KeyIdVoting
@ -129,7 +130,7 @@ class DashProUpServTx(namedtuple("DashProUpServTx",
pack_le_uint16(self.version) + # version
self.proTxHash + # proTxHash
self.ipAddress + # ipAddress
pack_le_uint16(self.port) + # port
pack_be_uint16(self.port) + # port
pack_varbytes(self.scriptOperatorPayout) + # scriptOperatorPayout
self.inputsHash + # inputsHash
self.payloadSig # payloadSig
@ -141,7 +142,7 @@ class DashProUpServTx(namedtuple("DashProUpServTx",
deser._read_le_uint16(), # version
deser._read_nbytes(32), # proTxHash
deser._read_nbytes(16), # ipAddress
deser._read_le_uint16(), # port
deser._read_be_uint16(), # port
deser._read_varbytes(), # scriptOperatorPayout
deser._read_nbytes(32), # inputsHash
deser._read_nbytes(96) # payloadSig
@ -210,23 +211,30 @@ class DashProUpRevTx(namedtuple("DashProUpRevTx",
)
class DashCbTx(namedtuple("DashCbTx", "version height merkleRootMNList")):
class DashCbTx(namedtuple("DashCbTx", "version height merkleRootMNList "
"merkleRootQuorums")):
'''Class representing DIP4 coinbase special tx'''
def serialize(self):
assert len(self.merkleRootMNList) == 32
return (
res = (
pack_le_uint16(self.version) + # version
pack_le_uint32(self.height) + # height
self.merkleRootMNList # merkleRootMNList
)
if self.version > 1:
assert len(self.merkleRootQuorums) == 32
res += self.merkleRootQuorums # merkleRootQuorums
return res
@classmethod
def read_tx_extra(cls, deser):
return DashCbTx(
deser._read_le_uint16(), # version
deser._read_le_uint32(), # height
deser._read_nbytes(32) # merkleRootMNList
)
version = deser._read_le_uint16()
height = deser._read_le_uint32()
merkleRootMNList = deser._read_nbytes(32)
merkleRootQuorums = b''
if version > 1:
merkleRootQuorums = deser._read_nbytes(32)
return DashCbTx(version, height, merkleRootMNList, merkleRootQuorums)
class DashSubTxRegister(namedtuple("DashSubTxRegister",

View File

@ -33,8 +33,9 @@ from ipaddress import ip_address
import logging
import re
import sys
from collections import Container, Mapping
from struct import pack, Struct
from collections.abc import Container, Mapping
from struct import Struct
# Logging utilities
@ -57,7 +58,7 @@ def make_logger(name, *, handler, level):
'''Return the root ElectrumX logger.'''
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.setLevel(level)
logger.propagate = False
return logger
@ -255,22 +256,6 @@ def address_string(address):
fmt = '[{}]:{:d}'
return fmt.format(host, port)
# See http://stackoverflow.com/questions/2532053/validate-a-hostname-string
# Note underscores are valid in domain names, but strictly invalid in host
# names. We ignore that distinction.
SEGMENT_REGEX = re.compile("(?!-)[A-Z_\\d-]{1,63}(?<!-)$", re.IGNORECASE)
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
return all(SEGMENT_REGEX.match(x) for x in hostname.split("."))
def protocol_tuple(s):
'''Converts a protocol version number, such as "1.0" to a tuple (1, 0).
@ -278,7 +263,7 @@ def protocol_tuple(s):
If the version number is bad, (0, ) indicating version 0 is returned.'''
try:
return tuple(int(part) for part in s.split('.'))
except Exception:
except (TypeError, ValueError, AttributeError):
return (0, )
@ -333,6 +318,10 @@ unpack_le_uint64_from = struct_le_Q.unpack_from
unpack_be_uint16_from = struct_be_H.unpack_from
unpack_be_uint32_from = struct_be_I.unpack_from
unpack_le_uint32 = struct_le_I.unpack
unpack_le_uint64 = struct_le_Q.unpack
unpack_be_uint32 = struct_be_I.unpack
pack_le_int32 = struct_le_i.pack
pack_le_int64 = struct_le_q.pack
pack_le_uint16 = struct_le_H.pack

View File

@ -9,18 +9,17 @@
'''Block prefetcher and chain processor.'''
import array
import asyncio
from struct import pack, unpack
import time
from functools import partial
from aiorpcx import TaskGroup, run_in_thread
import electrumx
from electrumx.server.daemon import DaemonError
from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN
from electrumx.lib.util import chunks, class_logger
from electrumx.lib.util import (
chunks, class_logger, pack_le_uint32, pack_le_uint64, unpack_le_uint32
)
from electrumx.server.db import FlushData
@ -57,6 +56,11 @@ class Prefetcher(object):
await asyncio.sleep(self.polling_delay)
except DaemonError as e:
self.logger.info(f'ignoring daemon error: {e}')
except asyncio.CancelledError as e:
self.logger.info(f'cancelled; prefetcher stopping {e}')
raise
except Exception:
self.logger.exception(f'ignoring unexpected exception')
def get_prefetched_blocks(self):
'''Called by block processor when it is processing queued blocks.'''
@ -98,17 +102,16 @@ class Prefetcher(object):
daemon_height = await daemon.height()
async with self.semaphore:
while self.cache_size < self.min_cache_size:
first = self.fetched_height + 1
# Try and catch up all blocks but limit to room in cache.
# Constrain fetch count to between 0 and 100 regardless;
# some chains can be lumpy.
cache_room = max(self.min_cache_size // self.ave_size, 1)
count = min(daemon_height - self.fetched_height, cache_room)
count = min(100, max(count, 0))
# Don't make too large a request
count = min(self.coin.max_fetch_blocks(first), max(count, 0))
if not count:
self.caught_up = True
return False
first = self.fetched_height + 1
hex_hashes = await daemon.block_hex_hashes(first, count)
if self.caught_up:
self.logger.info('new block height {:,d} hash {}'
@ -165,6 +168,10 @@ class BlockProcessor(object):
self.next_cache_check = 0
self.touched = set()
self.reorg_count = 0
self.height = -1
self.tip = None
self.tx_count = 0
self._caught_up_event = None
# Caches of unflushed items.
self.headers = []
@ -209,9 +216,9 @@ class BlockProcessor(object):
await self._maybe_flush()
if not self.db.first_sync:
s = '' if len(blocks) == 1 else 's'
self.logger.info('processed {:,d} block{} in {:.1f}s'
.format(len(blocks), s,
time.time() - start))
blocks_size = sum(len(block) for block in raw_blocks) / 1_000_000
self.logger.info(f'processed {len(blocks):,d} block{s} size {blocks_size:.2f} MB '
f'in {time.time() - start:.1f}s')
if self._caught_up_event.is_set():
await self.notifications.on_block(self.touched, self.height)
self.touched = set()
@ -253,7 +260,7 @@ class BlockProcessor(object):
self.touched.discard(None)
self.db.flush_backup(self.flush_data(), self.touched)
start, last, hashes = await self.reorg_hashes(count)
_start, last, hashes = await self.reorg_hashes(count)
# Reverse and convert to hex strings.
hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
for hex_hashes in chunks(hashes, 50):
@ -398,18 +405,19 @@ class BlockProcessor(object):
undo_info = []
tx_num = self.tx_count
script_hashX = self.coin.hashX_from_script
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
undo_info_append = undo_info.append
update_touched = self.touched.update
hashXs_by_tx = []
append_hashXs = hashXs_by_tx.append
to_le_uint32 = pack_le_uint32
to_le_uint64 = pack_le_uint64
for tx, tx_hash in txs:
hashXs = []
append_hashX = hashXs.append
tx_numb = s_pack('<I', tx_num)
tx_numb = to_le_uint32(tx_num)
# Spend the inputs
for txin in tx.inputs:
@ -425,8 +433,8 @@ class BlockProcessor(object):
hashX = script_hashX(txout.pk_script)
if hashX:
append_hashX(hashX)
put_utxo(tx_hash + s_pack('<H', idx),
hashX + tx_numb + s_pack('<Q', txout.value))
put_utxo(tx_hash + to_le_uint32(idx),
hashX + tx_numb + to_le_uint64(txout.value))
append_hashXs(hashXs)
update_touched(hashXs)
@ -475,7 +483,6 @@ class BlockProcessor(object):
n = len(undo_info)
# Use local vars for speed in the loops
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
script_hashX = self.coin.hashX_from_script
@ -497,8 +504,7 @@ class BlockProcessor(object):
continue
n -= undo_entry_len
undo_item = undo_info[n:n + undo_entry_len]
put_utxo(txin.prev_hash + s_pack('<H', txin.prev_idx),
undo_item)
put_utxo(txin.prev_hash + pack_le_uint32(txin.prev_idx), undo_item)
touched.add(undo_item[:-12])
assert n == 0
@ -514,10 +520,10 @@ class BlockProcessor(object):
TX not per UTXO). So store them in a Python dictionary with
binary keys and values.
Key: TX_HASH + TX_IDX (32 + 2 = 34 bytes)
Key: TX_HASH + TX_IDX (32 + 4 = 36 bytes)
Value: HASHX + TX_NUM + VALUE (11 + 4 + 8 = 23 bytes)
That's 57 bytes of raw data in-memory. Python dictionary overhead
That's 59 bytes of raw data in-memory. Python dictionary overhead
means each entry actually uses about 205 bytes of memory. So
almost 5 million UTXOs can fit in 1GB of RAM. There are
approximately 42 million UTXOs on bitcoin mainnet at height
@ -566,7 +572,7 @@ class BlockProcessor(object):
corruption.
'''
# Fast track is it being in the cache
idx_packed = pack('<H', tx_idx)
idx_packed = pack_le_uint32(tx_idx)
cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
if cache_value:
return cache_value
@ -583,7 +589,7 @@ class BlockProcessor(object):
tx_num_packed = hdb_key[-4:]
if len(candidates) > 1:
tx_num, = unpack('<I', tx_num_packed)
tx_num, = unpack_le_uint32(tx_num_packed)
hash, height = self.db.fs_tx_hash(tx_num)
if hash != tx_hash:
assert hash is not None # Should always be found
@ -591,7 +597,7 @@ class BlockProcessor(object):
# Key: b'u' + address_hashX + tx_idx + tx_num
# Value: the UTXO value as a 64-bit unsigned integer
udb_key = b'u' + hashX + hdb_key[-6:]
udb_key = b'u' + hashX + hdb_key[-8:]
utxo_value_packed = self.db.utxo_db.get(udb_key)
if utxo_value_packed:
# Remove both entries for this UTXO
@ -683,7 +689,7 @@ class DecredBlockProcessor(BlockProcessor):
return start, count
class NamecoinBlockProcessor(BlockProcessor):
class NameIndexBlockProcessor(BlockProcessor):
def advance_txs(self, txs):
result = super().advance_txs(txs)
@ -694,12 +700,12 @@ class NamecoinBlockProcessor(BlockProcessor):
hashXs_by_tx = []
append_hashXs = hashXs_by_tx.append
for tx, tx_hash in txs:
for tx, _tx_hash in txs:
hashXs = []
append_hashX = hashXs.append
# Add the new UTXOs and associate them with the name script
for idx, txout in enumerate(tx.outputs):
for txout in tx.outputs:
# Get the hashX of the name script. Ignore non-name scripts.
hashX = script_name_hashX(txout.pk_script)
if hashX:
@ -723,26 +729,27 @@ class LTORBlockProcessor(BlockProcessor):
undo_info = []
tx_num = self.tx_count
script_hashX = self.coin.hashX_from_script
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
undo_info_append = undo_info.append
update_touched = self.touched.update
to_le_uint32 = pack_le_uint32
to_le_uint64 = pack_le_uint64
hashXs_by_tx = [set() for _ in txs]
# Add the new UTXOs
for (tx, tx_hash), hashXs in zip(txs, hashXs_by_tx):
add_hashXs = hashXs.add
tx_numb = s_pack('<I', tx_num)
tx_numb = to_le_uint32(tx_num)
for idx, txout in enumerate(tx.outputs):
# Get the hashX. Ignore unspendable outputs.
hashX = script_hashX(txout.pk_script)
if hashX:
add_hashXs(hashX)
put_utxo(tx_hash + s_pack('<H', idx),
hashX + tx_numb + s_pack('<Q', txout.value))
put_utxo(tx_hash + to_le_uint32(idx),
hashX + tx_numb + to_le_uint64(txout.value))
tx_num += 1
# Spend the inputs
@ -774,7 +781,6 @@ class LTORBlockProcessor(BlockProcessor):
.format(self.height))
# Use local vars for speed in the loops
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
script_hashX = self.coin.hashX_from_script
@ -789,8 +795,7 @@ class LTORBlockProcessor(BlockProcessor):
if txin.is_generation():
continue
undo_item = undo_info[n:n + undo_entry_len]
put_utxo(txin.prev_hash + s_pack('<H', txin.prev_idx),
undo_item)
put_utxo(txin.prev_hash + pack_le_uint32(txin.prev_idx), undo_item)
add_touched(undo_item[:-12])
n += undo_entry_len

View File

@ -82,8 +82,8 @@ class Controller(ServerBase):
'''Start the RPC server and wait for the mempool to synchronize. Then
start serving external clients.
'''
if not (0, 10, 1) <= aiorpcx_version < (0, 11):
raise RuntimeError('aiorpcX version 0.10.x, x >= 1, required')
if not (0, 18, 1) <= aiorpcx_version < (0, 19):
raise RuntimeError('aiorpcX version 0.18.x is required')
env = self.env
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
@ -97,35 +97,38 @@ class Controller(ServerBase):
Daemon = env.coin.DAEMON
BlockProcessor = env.coin.BLOCK_PROCESSOR
daemon = Daemon(env.coin, env.daemon_url)
db = DB(env)
bp = BlockProcessor(env, db, daemon, notifications)
async with Daemon(env.coin, env.daemon_url) as daemon:
db = DB(env)
bp = BlockProcessor(env, db, daemon, notifications)
# Set notifications up to implement the MemPoolAPI
notifications.height = daemon.height
notifications.cached_height = daemon.cached_height
notifications.mempool_hashes = daemon.mempool_hashes
notifications.raw_transactions = daemon.getrawtransactions
notifications.lookup_utxos = db.lookup_utxos
MemPoolAPI.register(Notifications)
mempool = MemPool(env.coin, notifications)
# Set notifications up to implement the MemPoolAPI
def get_db_height():
return db.db_height
notifications.height = daemon.height
notifications.db_height = get_db_height
notifications.cached_height = daemon.cached_height
notifications.mempool_hashes = daemon.mempool_hashes
notifications.raw_transactions = daemon.getrawtransactions
notifications.lookup_utxos = db.lookup_utxos
MemPoolAPI.register(Notifications)
mempool = MemPool(env.coin, notifications)
session_mgr = SessionManager(env, db, bp, daemon, mempool,
shutdown_event)
session_mgr = SessionManager(env, db, bp, daemon, mempool,
shutdown_event)
# Test daemon authentication, and also ensure it has a cached
# height. Do this before entering the task group.
await daemon.height()
# Test daemon authentication, and also ensure it has a cached
# height. Do this before entering the task group.
await daemon.height()
caught_up_event = Event()
mempool_event = Event()
caught_up_event = Event()
mempool_event = Event()
async def wait_for_catchup():
await caught_up_event.wait()
await group.spawn(db.populate_header_merkle_cache())
await group.spawn(mempool.keep_synchronized(mempool_event))
async def wait_for_catchup():
await caught_up_event.wait()
await group.spawn(db.populate_header_merkle_cache())
await group.spawn(mempool.keep_synchronized(mempool_event))
async with TaskGroup() as group:
await group.spawn(session_mgr.serve(notifications, mempool_event))
await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
await group.spawn(wait_for_catchup())
async with TaskGroup() as group:
await group.spawn(session_mgr.serve(notifications, mempool_event))
await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
await group.spawn(wait_for_catchup())

View File

@ -14,15 +14,14 @@ import json
import time
from calendar import timegm
from struct import pack
from time import strptime
import aiohttp
from aiorpcx import JSONRPC
from electrumx.lib.util import hex_to_bytes, class_logger,\
unpack_le_uint16_from, pack_varint
from electrumx.lib.hash import hex_str_to_hash, hash_to_hex_str
from electrumx.lib.tx import DeserializerDecred
from aiorpcx import JSONRPC
class DaemonError(Exception):
@ -33,8 +32,9 @@ class WarmingUpError(Exception):
'''Internal - when the daemon is warming up.'''
class WorkQueueFullError(Exception):
'''Internal - when the daemon's work queue is full.'''
class ServiceRefusedError(Exception):
'''Internal - when the daemon doesn't provide a JSON response, only an HTTP error, for
some reason.'''
class Daemon(object):
@ -43,10 +43,11 @@ class Daemon(object):
WARMING_UP = -28
id_counter = itertools.count()
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
max_retry=4.0):
def __init__(self, coin, url, *, max_workqueue=10, init_retry=0.25, max_retry=4.0):
self.coin = coin
self.logger = class_logger(__name__, self.__class__.__name__)
self.url_index = None
self.urls = []
self.set_url(url)
# Limit concurrent RPC calls to this number.
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
@ -55,6 +56,18 @@ class Daemon(object):
self.max_retry = max_retry
self._height = None
self.available_rpcs = {}
self.session = None
async def __aenter__(self):
self.session = aiohttp.ClientSession(connector=self.connector())
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.session.close()
self.session = None
def connector(self):
return None
def set_url(self, url):
'''Set the URLS to the given list, and switch to the first one.'''
@ -87,24 +100,15 @@ class Daemon(object):
return True
return False
def client_session(self):
'''An aiohttp client session.'''
return aiohttp.ClientSession()
async def _send_data(self, data):
async with self.workqueue_semaphore:
async with self.client_session() as session:
async with session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
# bitcoind's HTTP protocol "handling" is a bad joke
text = await resp.text()
if 'Work queue depth exceeded' in text:
raise WorkQueueFullError
text = text.strip() or resp.reason
self.logger.error(text)
raise DaemonError(text)
async with self.session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
text = await resp.text()
text = text.strip() or resp.reason
raise ServiceRefusedError(text)
async def _send(self, payload, processor):
'''Send a payload to be converted to JSON.
@ -117,7 +121,7 @@ class Daemon(object):
now = time.time()
if now - last_error_log > 60:
last_error_log = now
self.logger.error(f'{error} Retrying occasionally...')
self.logger.error(f'{error}. Retrying occasionally...')
if retry == self.max_retry and self.failover():
retry = 0
@ -133,21 +137,24 @@ class Daemon(object):
self.logger.info(on_good_message)
return result
except asyncio.TimeoutError:
log_error('timeout error.')
log_error('timeout error')
except aiohttp.ServerDisconnectedError:
log_error('disconnected.')
log_error('disconnected')
on_good_message = 'connection restored'
except ConnectionResetError:
log_error('connection reset')
on_good_message = 'connection restored'
except aiohttp.ClientConnectionError:
log_error('connection problem - is your daemon running?')
log_error('connection problem - check your daemon is running')
on_good_message = 'connection restored'
except aiohttp.ClientError as e:
log_error(f'daemon error: {e}')
on_good_message = 'running normally'
except WarmingUpError:
log_error('starting up checking blocks.')
except ServiceRefusedError as e:
log_error(f'daemon service refused: {e}')
on_good_message = 'running normally'
except WorkQueueFullError:
log_error('work queue full.')
except WarmingUpError:
log_error('starting up checking blocks')
on_good_message = 'running normally'
await asyncio.sleep(retry)
@ -288,6 +295,10 @@ class DashDaemon(Daemon):
'''Return the masternode status.'''
return await self._send_single('masternodelist', params)
async def protx(self, params):
'''Set of commands to execute ProTx related actions.'''
return await self._send_single('protx', params)
class FakeEstimateFeeDaemon(Daemon):
'''Daemon that simulates estimatefee and relayfee RPC calls. Coin that
@ -360,7 +371,11 @@ class LegacyRPCDaemon(Daemon):
def timestamp_safe(self, t):
if isinstance(t, int):
return t
return timegm(strptime(t, "%Y-%m-%d %H:%M:%S %Z"))
return timegm(time.strptime(t, "%Y-%m-%d %H:%M:%S %Z"))
class FakeEstimateLegacyRPCDaemon(LegacyRPCDaemon, FakeEstimateFeeDaemon):
pass
class DecredDaemon(Daemon):
@ -441,10 +456,9 @@ class DecredDaemon(Daemon):
mempool += tip.get('stx', [])
return mempool
def client_session(self):
def connector(self):
# FIXME allow self signed certificates
connector = aiohttp.TCPConnector(verify_ssl=False)
return aiohttp.ClientSession(connector=connector)
return aiohttp.TCPConnector(verify_ssl=False)
class PreLegacyRPCDaemon(LegacyRPCDaemon):
@ -472,3 +486,32 @@ class SmartCashDaemon(Daemon):
async def smartrewards(self, params):
'''Return smartrewards data.'''
return await self._send_single('smartrewards', params)
class ZcoinMtpDaemon(Daemon):
def strip_mtp_data(self, raw_block):
if self.coin.is_mtp(raw_block):
return \
raw_block[:self.coin.MTP_HEADER_DATA_START*2] + \
raw_block[self.coin.MTP_HEADER_DATA_END*2:]
return raw_block
async def raw_blocks(self, hex_hashes):
'''Return the raw binary blocks with the given hex hashes.'''
params_iterable = ((h, False) for h in hex_hashes)
blocks = await self._send_vector('getblock', params_iterable)
# Convert hex string to bytes
return [hex_to_bytes(self.strip_mtp_data(block)) for block in blocks]
async def masternode_broadcast(self, params):
'''Broadcast a transaction to the network.'''
return await self._send_single('znodebroadcast', params)
async def masternode_list(self, params):
'''Return the masternode status.'''
return await self._send_single('znodelist', params)
async def protx(self, params):
'''Set of commands to execute ProTx related actions.'''
return await self._send_single('protx', params)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2016, Neil Booth
# Copyright (c) 2016-2020, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
@ -16,15 +16,18 @@ import time
from bisect import bisect_right
from collections import namedtuple
from glob import glob
from struct import pack, unpack
from struct import Struct
import attr
from aiorpcx import run_in_thread, sleep
import electrumx.lib.util as util
from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN
from electrumx.lib.hash import hash_to_hex_str
from electrumx.lib.merkle import Merkle, MerkleCache
from electrumx.lib.util import formatted_time
from electrumx.lib.util import (
formatted_time, pack_be_uint16, pack_be_uint32, pack_le_uint64, pack_le_uint32,
unpack_le_uint32, unpack_be_uint32, unpack_le_uint64
)
from electrumx.server.storage import db_class
from electrumx.server.history import History
@ -52,7 +55,7 @@ class DB(object):
it was shutdown uncleanly.
'''
DB_VERSIONS = [6]
DB_VERSIONS = [6, 7]
class DBError(Exception):
'''Raised on general DB errors generally indicating corruption.'''
@ -76,8 +79,18 @@ class DB(object):
self.db_class = db_class(self.env.db_engine)
self.history = History()
self.utxo_db = None
self.utxo_flush_count = 0
self.fs_height = -1
self.fs_tx_count = 0
self.db_height = -1
self.db_tx_count = 0
self.db_tip = None
self.tx_counts = None
self.last_flush = time.time()
self.last_flush_tx_count = 0
self.wall_time = 0
self.first_sync = True
self.db_version = -1
self.logger.info(f'using {self.env.db_engine} for DB backend')
@ -288,7 +301,7 @@ class DB(object):
for key, value in flush_data.adds.items():
# suffix = tx_idx + tx_num
hashX = value[:-12]
suffix = key[-2:] + value[-12:-8]
suffix = key[-4:] + value[-12:-8]
batch_put(b'h' + key[:4] + suffix, hashX)
batch_put(b'u' + hashX + suffix, value[-8:])
flush_data.adds.clear()
@ -348,7 +361,7 @@ class DB(object):
offsets = []
for h in headers:
offset += len(h)
offsets.append(pack("<Q", offset))
offsets.append(pack_le_uint64(offset))
# For each header we get the offset of the next header, hence we
# start writing from the next height
pos = (height_start + 1) * 8
@ -356,7 +369,7 @@ class DB(object):
def dynamic_header_offset(self, height):
assert not self.coin.STATIC_BLOCK_HEADERS
offset, = unpack('<Q', self.headers_offsets_file.read(height * 8, 8))
offset, = unpack_le_uint64(self.headers_offsets_file.read(height * 8, 8))
return offset
def dynamic_header_len(self, height):
@ -402,7 +415,7 @@ class DB(object):
return await run_in_thread(read_headers)
def fs_tx_hash(self, tx_num):
'''Return a par (tx_hash, tx_height) for the given tx number.
'''Return a pair (tx_hash, tx_height) for the given tx number.
If the tx_height is not on disk, returns (None, tx_height).'''
tx_height = bisect_right(self.tx_counts, tx_num)
@ -412,6 +425,25 @@ class DB(object):
tx_hash = self.hashes_file.read(tx_num * 32, 32)
return tx_hash, tx_height
def fs_tx_hashes_at_blockheight(self, block_height):
'''Return a list of tx_hashes at given block height,
in the same order as in the block.
'''
if block_height > self.db_height:
raise self.DBError(f'block {block_height:,d} not on disk (>{self.db_height:,d})')
assert block_height >= 0
if block_height > 0:
first_tx_num = self.tx_counts[block_height - 1]
else:
first_tx_num = 0
num_txs_in_block = self.tx_counts[block_height] - first_tx_num
tx_hashes = self.hashes_file.read(first_tx_num * 32, num_txs_in_block * 32)
assert num_txs_in_block == len(tx_hashes) // 32
return [tx_hashes[idx * 32: (idx+1) * 32] for idx in range(num_txs_in_block)]
async def tx_hashes_at_blockheight(self, block_height):
return await run_in_thread(self.fs_tx_hashes_at_blockheight, block_height)
async def fs_block_hashes(self, height, count):
headers_concat, headers_count = await self.read_headers(height, count)
if headers_count != count:
@ -454,7 +486,7 @@ class DB(object):
def undo_key(self, height):
'''DB key for undo information at the given height.'''
return b'U' + pack('>I', height)
return b'U' + pack_be_uint32(height)
def read_undo_info(self, height):
'''Read undo information from a file for the current height.'''
@ -493,8 +525,8 @@ class DB(object):
prefix = b'U'
min_height = self.min_undo_height(self.db_height)
keys = []
for key, hist in self.utxo_db.iterator(prefix=prefix):
height, = unpack('>I', key[-4:])
for key, _hist in self.utxo_db.iterator(prefix=prefix):
height, = unpack_be_uint32(key[-4:])
if height >= min_height:
break
keys.append(key)
@ -559,6 +591,10 @@ class DB(object):
self.fs_tx_count = self.db_tx_count
self.last_flush_tx_count = self.fs_tx_count
# Upgrade DB
if self.db_version != max(self.DB_VERSIONS):
self.upgrade_db()
# Log some stats
self.logger.info('DB version: {:d}'.format(self.db_version))
self.logger.info('coin: {}'.format(self.coin.NAME))
@ -572,6 +608,66 @@ class DB(object):
self.logger.info('sync time so far: {}'
.format(util.formatted_time(self.wall_time)))
def upgrade_db(self):
self.logger.info('DB version: {:d}'.format(self.db_version))
self.logger.info('Upgrading your DB; this can take some time...')
def upgrade_u_prefix(prefix):
count = 0
with self.utxo_db.write_batch() as batch:
batch_delete = batch.delete
batch_put = batch.put
# Key: b'u' + address_hashX + tx_idx + tx_num
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
if len(db_key) != 18:
break
count += 1
batch_delete(db_key)
batch_put(db_key[:14] + b'\0\0' + db_key[14:], db_value)
return count
last = time.time()
count = 0
for cursor in range(65536):
prefix = b'u' + pack_be_uint16(cursor)
count += upgrade_u_prefix(prefix)
now = time.time()
if now > last + 10:
last = now
self.logger.info(f'DB 1 of 2: {count:,d} entries updated, '
f'{cursor * 100 / 65536:.1f}% complete')
self.logger.info('DB 1 of 2 upgraded successfully')
def upgrade_h_prefix(prefix):
count = 0
with self.utxo_db.write_batch() as batch:
batch_delete = batch.delete
batch_put = batch.put
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
if len(db_key) != 11:
break
count += 1
batch_delete(db_key)
batch_put(db_key[:7] + b'\0\0' + db_key[7:], db_value)
return count
last = time.time()
count = 0
for cursor in range(65536):
prefix = b'h' + pack_be_uint16(cursor)
count += upgrade_h_prefix(prefix)
now = time.time()
if now > last + 10:
last = now
self.logger.info(f'DB 2 of 2: {count:,d} entries updated, '
f'{cursor * 100 / 65536:.1f}% complete')
self.db_version = max(self.DB_VERSIONS)
with self.utxo_db.write_batch() as batch:
self.write_utxo_state(batch)
self.logger.info('DB 2 of 2 upgraded successfully')
def write_utxo_state(self, batch):
'''Write (UTXO) state to the batch.'''
state = {
@ -596,13 +692,13 @@ class DB(object):
def read_utxos():
utxos = []
utxos_append = utxos.append
s_unpack = unpack
unpack_2_le_uint32 = Struct('<II').unpack
# Key: b'u' + address_hashX + tx_idx + tx_num
# Value: the UTXO value as a 64-bit unsigned integer
prefix = b'u' + hashX
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
tx_pos, tx_num = s_unpack('<HI', db_key[-6:])
value, = unpack('<Q', db_value)
tx_pos, tx_num = unpack_2_le_uint32(db_key[-8:])
value, = unpack_le_uint64(db_value)
tx_hash, height = self.fs_tx_hash(tx_num)
utxos_append(UTXO(tx_num, tx_pos, tx_hash, height, value))
return utxos
@ -626,7 +722,7 @@ class DB(object):
for each prevout.
'''
def lookup_hashX(tx_hash, tx_idx):
idx_packed = pack('<H', tx_idx)
idx_packed = pack_le_uint32(tx_idx)
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
# Value: hashX
@ -635,8 +731,8 @@ class DB(object):
# Find which entry, if any, the TX_HASH matches.
for db_key, hashX in self.utxo_db.iterator(prefix=prefix):
tx_num_packed = db_key[-4:]
tx_num, = unpack('<I', tx_num_packed)
hash, height = self.fs_tx_hash(tx_num)
tx_num, = unpack_le_uint32(tx_num_packed)
hash, _height = self.fs_tx_hash(tx_num)
if hash == tx_hash:
return hashX, idx_packed + tx_num_packed
return None, None
@ -657,7 +753,7 @@ class DB(object):
# This can happen if the DB was updated between
# getting the hashXs and getting the UTXOs
return None
value, = unpack('<Q', db_value)
value, = unpack_le_uint64(db_value)
return hashX, value
return [lookup_utxo(*hashX_pair) for hashX_pair in hashX_pairs]

View File

@ -9,16 +9,15 @@
import re
import resource
from collections import namedtuple
from ipaddress import ip_address
from ipaddress import IPv4Address, IPv6Address
from aiorpcx import Service, ServicePart
from electrumx.lib.coins import Coin
from electrumx.lib.env_base import EnvBase
import electrumx.lib.util as lib_util
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
class ServiceError(Exception):
pass
class Env(EnvBase):
@ -28,13 +27,20 @@ class Env(EnvBase):
'''
# Peer discovery
PD_OFF, PD_SELF, PD_ON = range(3)
PD_OFF, PD_SELF, PD_ON = ('OFF', 'SELF', 'ON')
SSL_PROTOCOLS = {'ssl', 'wss'}
KNOWN_PROTOCOLS = {'ssl', 'tcp', 'ws', 'wss', 'rpc'}
def __init__(self, coin=None):
super().__init__()
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.obsolete(["MAX_SUBSCRIPTIONS", "MAX_SUBS", "MAX_SESSION_SUBS", "BANDWIDTH_LIMIT",
"HOST", "TCP_PORT", "SSL_PORT", "RPC_HOST", "RPC_PORT", "REPORT_HOST",
"REPORT_TCP_PORT", "REPORT_SSL_PORT", "REPORT_HOST_TOR",
"REPORT_TCP_PORT_TOR", "REPORT_SSL_PORT_TOR"])
# Core items
self.db_dir = self.required('DB_DIRECTORY')
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.daemon_url = self.required('DAEMON_URL')
if coin is not None:
assert issubclass(coin, Coin)
@ -43,121 +49,123 @@ class Env(EnvBase):
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.host = self.default('HOST', 'localhost')
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server stuff
self.tcp_port = self.integer('TCP_PORT', None)
self.ssl_port = self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.rpc_port = self.integer('RPC_PORT', 8000)
self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified
self.donation_address = self.default('DONATION_ADDRESS', '')
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', 1000000)
self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = self.sane_max_sessions()
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.bandwidth_limit = self.integer('BANDWIDTH_LIMIT', 2000000)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
# Identities
clearnet_identity = self.clearnet_identity()
tor_identity = self.tor_identity(clearnet_identity)
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
# Misc
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
self.log_level = self.default('LOG_LEVEL', 'info').upper()
self.donation_address = self.default('DONATION_ADDRESS', '')
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
self.blacklist_url = self.default('BLACKLIST_URL', self.coin.BLACKLIST_URL)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', self.coin.DEFAULT_MAX_SEND)
self.max_sessions = self.sane_max_sessions()
self.cost_soft_limit = self.integer('COST_SOFT_LIMIT', 1000)
self.cost_hard_limit = self.integer('COST_HARD_LIMIT', 10000)
self.bw_unit_cost = self.integer('BANDWIDTH_UNIT_COST', 5000)
self.initial_concurrent = self.integer('INITIAL_CONCURRENT', 10)
self.request_sleep = self.integer('REQUEST_SLEEP', 2500)
self.request_timeout = self.integer('REQUEST_TIMEOUT', 30)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
# Services last - uses some env vars above
self.services = self.services_to_run()
if {service.protocol for service in self.services}.intersection(self.SSL_PROTOCOLS):
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.report_services = self.services_to_report()
def sane_max_sessions(self):
'''Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit.'''
env_value = self.integer('MAX_SESSIONS', 1000)
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning('lowered maximum sessions from {:,d} to {:,d} '
'because your open file limit is {:,d}'
.format(env_value, value, nofile_limit))
# No resource module on Windows
try:
import resource
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning('lowered maximum sessions from {:,d} to {:,d} '
'because your open file limit is {:,d}'
.format(env_value, value, nofile_limit))
except ImportError:
value = 512 # that is what returned by stdio's _getmaxstdio()
return value
def clearnet_identity(self):
host = self.default('REPORT_HOST', None)
if host is None:
return None
try:
ip = ip_address(host)
except ValueError:
bad = (not lib_util.is_valid_hostname(host)
or host.lower() == 'localhost')
else:
bad = (ip.is_multicast or ip.is_unspecified
or (ip.is_private and self.peer_announce))
if bad:
raise self.Error('"{}" is not a valid REPORT_HOST'.format(host))
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
''
)
def _parse_services(self, services_str, default_func):
result = []
for service_str in services_str.split(','):
if not service_str:
continue
try:
service = Service.from_string(service_str, default_func=default_func)
except Exception as e:
raise ServiceError(f'"{service_str}" invalid: {e}') from None
if service.protocol not in self.KNOWN_PROTOCOLS:
raise ServiceError(f'"{service_str}" invalid: unknown protocol')
result.append(service)
def tor_identity(self, clearnet):
host = self.default('REPORT_HOST_TOR', None)
if host is None:
return None
if not host.endswith('.onion'):
raise self.Error('tor host "{}" must end with ".onion"'
.format(host))
# Find duplicate addresses
service_map = {service.address: [] for service in result}
for service in result:
service_map[service.address].append(service)
for address, services in service_map.items():
if len(services) > 1:
raise ServiceError(f'address {address} has multiple services')
def port(port_kind):
'''Returns the clearnet identity port, if any and not zero,
otherwise the listening port.'''
result = 0
if clearnet:
result = getattr(clearnet, port_kind)
return result or getattr(self, port_kind)
return result
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
port('tcp_port')) or None
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
port('ssl_port')) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
'both resolve to {}'.format(tcp_port))
def services_to_run(self):
def default_part(protocol, part):
return default_services.get(protocol, {}).get(part)
return NetIdentity(
host,
tcp_port,
ssl_port,
'_tor',
)
default_services = {protocol: {ServicePart.HOST: 'all_interfaces'}
for protocol in self.KNOWN_PROTOCOLS}
default_services['rpc'] = {ServicePart.HOST: 'localhost', ServicePart.PORT: 8000}
services = self._parse_services(self.default('SERVICES', ''), default_part)
def hosts_dict(self):
return {identity.host: {'tcp_port': identity.tcp_port,
'ssl_port': identity.ssl_port}
for identity in self.identities}
# Find onion hosts
for service in services:
if str(service.host).endswith('.onion'):
raise ServiceError(f'bad host for SERVICES: {service}')
return services
def services_to_report(self):
services = self._parse_services(self.default('REPORT_SERVICES', ''), None)
for service in services:
if service.protocol == 'rpc':
raise ServiceError(f'bad protocol for REPORT_SERVICES: {service.protocol}')
if isinstance(service.host, (IPv4Address, IPv6Address)):
ip_addr = service.host
if (ip_addr.is_multicast or ip_addr.is_unspecified or
(ip_addr.is_private and self.peer_announce)):
raise ServiceError(f'bad IP address for REPORT_SERVICES: {ip_addr}')
elif service.host.lower() == 'localhost':
raise ServiceError(f'bad host for REPORT_SERVICES: {service.host}')
return services
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()

View File

@ -30,6 +30,10 @@ class History(object):
self.max_hist_row_entries = 12500
self.unflushed = defaultdict(partial(array.array, 'I'))
self.unflushed_count = 0
self.flush_count = 0
self.comp_flush_count = -1
self.comp_cursor = -1
self.db_version = max(self.DB_VERSIONS)
self.db = None
def open_db(self, db_class, for_sync, utxo_flush_count, compacting):
@ -80,7 +84,7 @@ class History(object):
'excess history flushes...')
keys = []
for key, hist in self.db.iterator(prefix=b''):
for key, _hist in self.db.iterator(prefix=b''):
flush_id, = unpack_be_uint16_from(key[-2:])
if flush_id > utxo_flush_count:
keys.append(key)
@ -179,7 +183,7 @@ class History(object):
transactions. By default yields at most 1000 entries. Set
limit to None to get them all. '''
limit = util.resolve_limit(limit)
for key, hist in self.db.iterator(prefix=hashX):
for _key, hist in self.db.iterator(prefix=hashX):
a = array.array('I')
a.frombytes(hist)
for tx_num in a:

View File

@ -38,6 +38,10 @@ class MemPoolTxSummary(object):
has_unconfirmed_inputs = attr.ib()
class DBSyncError(Exception):
pass
class MemPoolAPI(ABC):
'''A concrete instance of this class is passed to the MemPool object
and used by it to query DB and blockchain state.'''
@ -52,6 +56,10 @@ class MemPoolAPI(ABC):
for any reason, without actually querying it.
'''
@abstractmethod
def db_height(self):
'''Return the height flushed to the on-disk DB.'''
@abstractmethod
async def mempool_hashes(self):
'''Query bitcoind for the hashes of all transactions in its
@ -93,7 +101,7 @@ class MemPool(object):
hashXs: hashX -> set of all hashes of txs touching the hashX
'''
def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=120.0):
def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=60.0):
assert isinstance(api, MemPoolAPI)
self.coin = coin
self.api = api
@ -115,7 +123,8 @@ class MemPool(object):
elapsed = time.time() - start
self.logger.info(f'synced in {elapsed:.2f}s')
while True:
self.logger.info(f'{len(self.txs):,d} txs '
mempool_size = sum(tx.size for tx in self.txs.values()) / 1_000_000
self.logger.info(f'{len(self.txs):,d} txs {mempool_size:.2f} MB '
f'touching {len(self.hashXs):,d} addresses')
await sleep(self.log_status_secs)
await synchronized_event.wait()
@ -193,7 +202,7 @@ class MemPool(object):
sum(v for _, v in tx.out_pairs)))
txs[hash] = tx
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
for hashX, _value in itertools.chain(tx.in_pairs, tx.out_pairs):
touched.add(hashX)
hashXs[hashX].add(hash)
@ -201,24 +210,36 @@ class MemPool(object):
async def _refresh_hashes(self, synchronized_event):
'''Refresh our view of the daemon's mempool.'''
# Touched accumulates between calls to on_mempool and each
# call transfers ownership
touched = set()
while True:
height = self.api.cached_height()
hex_hashes = await self.api.mempool_hashes()
if height != await self.api.height():
continue
hashes = set(hex_str_to_hash(hh) for hh in hex_hashes)
async with self.lock:
touched = await self._process_mempool(hashes)
synchronized_event.set()
synchronized_event.clear()
await self.api.on_mempool(touched, height)
try:
async with self.lock:
await self._process_mempool(hashes, touched, height)
except DBSyncError:
# The UTXO DB is not at the same height as the
# mempool; wait and try again
self.logger.debug('waiting for DB to sync')
else:
synchronized_event.set()
synchronized_event.clear()
await self.api.on_mempool(touched, height)
touched = set()
await sleep(self.refresh_secs)
async def _process_mempool(self, all_hashes):
async def _process_mempool(self, all_hashes, touched, mempool_height):
# Re-sync with the new set of hashes
txs = self.txs
hashXs = self.hashXs
touched = set()
if mempool_height != self.api.db_height():
raise DBSyncError
# First handle txs that have disappeared
for tx_hash in set(txs).difference(all_hashes):
@ -238,6 +259,9 @@ class MemPool(object):
for hashes in chunks(new_hashes, 200):
coro = self._fetch_and_accept(hashes, all_hashes, touched)
await group.spawn(coro)
if mempool_height != self.api.db_height():
raise DBSyncError
tx_map = {}
utxo_map = {}
async for task in group:
@ -252,7 +276,7 @@ class MemPool(object):
tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map,
touched)
if tx_map:
self.logger.info(f'{len(tx_map)} txs dropped')
self.logger.error(f'{len(tx_map)} txs dropped')
return touched

View File

@ -8,23 +8,26 @@
'''Peer management.'''
import asyncio
from ipaddress import IPv4Address, IPv6Address
import json
import random
import socket
import ssl
import time
from collections import defaultdict, Counter
from aiorpcx import (Connector, RPCSession, SOCKSProxy,
Notification, handler_invocation,
import aiohttp
from aiorpcx import (connect_rs, RPCSession, SOCKSProxy, Notification, handler_invocation,
SOCKSError, RPCError, TaskTimeout, TaskGroup, Event,
sleep, run_in_thread, ignore_after, timeout_after)
sleep, ignore_after)
from electrumx.lib.peer import Peer
from electrumx.lib.util import class_logger, protocol_tuple
from electrumx.lib.util import class_logger
PEER_GOOD, PEER_STALE, PEER_NEVER, PEER_BAD = range(4)
STALE_SECS = 24 * 3600
STALE_SECS = 3 * 3600
WAKEUP_SECS = 300
PEER_ADD_PAUSE = 600
class BadPeerError(Exception):
@ -49,7 +52,7 @@ class PeerSession(RPCSession):
await handler_invocation(None, request) # Raises
class PeerManager(object):
class PeerManager:
'''Looks after the DB of peer network servers.
Attempts to maintain a connection with up to 8 peers.
@ -62,10 +65,10 @@ class PeerManager(object):
self.env = env
self.db = db
# Our clearnet and Tor Peers, if any
# Our reported clearnet and Tor Peers, if any
sclass = env.coin.SESSIONCLS
self.myselves = [Peer(ident.host, sclass.server_features(env), 'env')
for ident in env.identities]
self.myselves = [Peer(str(service.host), sclass.server_features(env), 'env')
for service in env.report_services]
self.server_version_args = sclass.server_version_args()
# Peers have one entry per hostname. Once connected, the
# ip_addr property is either None, an onion peer, or the
@ -75,6 +78,9 @@ class PeerManager(object):
self.permit_onion_peer_time = time.time()
self.proxy = None
self.group = TaskGroup()
self.recent_peer_adds = {}
# refreshed
self.blacklist = set()
def _my_clearnet_peer(self):
'''Returns the clearnet peer representing this server, if any.'''
@ -112,9 +118,8 @@ class PeerManager(object):
return None
return my.features
def _permit_new_onion_peer(self):
def _permit_new_onion_peer(self, now):
'''Accept a new onion peer only once per random time interval.'''
now = time.time()
if now < self.permit_onion_peer_time:
return False
self.permit_onion_peer_time = now + random.randrange(0, 1200)
@ -129,12 +134,49 @@ class PeerManager(object):
for real_name in self.env.coin.PEERS)
await self._note_peers(imported_peers, limit=None)
async def _refresh_blacklist(self):
url = self.env.blacklist_url
if not url:
return
async def read_blacklist():
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
return set(entry.lower() for entry in json.loads(text))
while True:
try:
self.blacklist = await read_blacklist()
except Exception as e:
self.logger.error(f'could not retrieve blacklist from {url}: {e}')
else:
self.logger.info(f'blacklist from {url} has {len(self.blacklist)} entries')
# Got new blacklist. Now check our current peers against it
for peer in self.peers:
if self._is_blacklisted(peer):
peer.retry_event.set()
await sleep(600)
def _is_blacklisted(self, peer):
host = peer.host.lower()
second_level_domain = '*.' + '.'.join(host.split('.')[-2:])
return any(item in self.blacklist
for item in (host, second_level_domain, peer.ip_addr))
def _get_recent_good_peers(self):
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
recent = [peer for peer in recent if not self._is_blacklisted(peer)]
return recent
async def _detect_proxy(self):
'''Detect a proxy if we don't have one and some time has passed since
the last attempt.
If found self.proxy is set to a SOCKSProxy instance, otherwise
None.
If found self.proxy is set to a SOCKSProxy instance, otherwise None.
'''
host = self.env.tor_proxy_host
if self.env.tor_proxy_port is None:
@ -144,7 +186,7 @@ class PeerManager(object):
while True:
self.logger.info(f'trying to detect proxy on "{host}" '
f'ports {ports}')
proxy = await SOCKSProxy.auto_detect_host(host, ports, None)
proxy = await SOCKSProxy.auto_detect_at_host(host, ports, None)
if proxy:
self.proxy = proxy
self.logger.info(f'detected {proxy}')
@ -152,22 +194,24 @@ class PeerManager(object):
self.logger.info('no proxy detected, will try later')
await sleep(900)
async def _note_peers(self, peers, limit=2, check_ports=False,
source=None):
async def _note_peers(self, peers, limit=2, check_ports=False, source=None):
'''Add a limited number of peers that are not already present.'''
new_peers = []
match_set = self.peers.copy()
for peer in peers:
if not peer.is_public or (peer.is_tor and not self.proxy):
continue
matches = peer.matches(self.peers)
if not matches:
matches = peer.matches(match_set)
if matches:
if check_ports:
for match in matches:
if match.check_ports(peer):
self.logger.info(f'ports changed for {peer}')
match.retry_event.set()
else:
match_set.add(peer)
new_peers.append(peer)
elif check_ports:
for match in matches:
if match.check_ports(peer):
self.logger.info(f'ports changed for {peer}')
match.retry_event.set()
if new_peers:
source = source or new_peers[0].source
@ -182,6 +226,8 @@ class PeerManager(object):
self.peers.add(peer)
await self.group.spawn(self._monitor_peer(peer))
return True
async def _monitor_peer(self, peer):
# Stop monitoring if we were dropped (a duplicate peer)
while peer in self.peers:
@ -209,26 +255,26 @@ class PeerManager(object):
if kind == 'SSL':
kwargs['ssl'] = ssl.SSLContext(ssl.PROTOCOL_TLS)
host = self.env.cs_host(for_rpc=False)
if isinstance(host, list):
host = host[0]
if self.env.force_proxy or peer.is_tor:
if not self.proxy:
return
kwargs['proxy'] = self.proxy
kwargs['resolve'] = not peer.is_tor
elif host:
else:
# Use our listening Host/IP for outgoing non-proxy
# connections so our peers see the correct source.
kwargs['local_addr'] = (host, None)
local_hosts = {service.host for service in self.env.services
if isinstance(service.host, (IPv4Address, IPv6Address))
and service.protocol != 'rpc'}
if local_hosts:
kwargs['local_addr'] = (str(local_hosts.pop()), None)
peer_text = f'[{peer}:{port} {kind}]'
try:
async with timeout_after(120 if peer.is_tor else 30):
async with Connector(PeerSession, peer.host, port,
**kwargs) as session:
await self._verify_peer(session, peer)
async with connect_rs(peer.host, port, session_factory=PeerSession,
**kwargs) as session:
session.sent_request_timeout = 120 if peer.is_tor else 30
await self._verify_peer(session, peer)
is_good = True
break
except BadPeerError as e:
@ -260,6 +306,9 @@ class PeerManager(object):
match.retry_event.set()
elif peer.host in match.features['hosts']:
match.update_features_from_peer(peer)
# Trim this data structure
self.recent_peer_adds = {k: v for k, v in self.recent_peer_adds.items()
if v + PEER_ADD_PAUSE < now}
else:
# Forget the peer if long-term unreachable
if peer.last_good and not peer.bad:
@ -273,10 +322,37 @@ class PeerManager(object):
return False
async def _verify_peer(self, session, peer):
# store IP address for peer
if not peer.is_tor:
address = session.peer_address()
if address:
peer.ip_addr = address[0]
address = session.remote_address()
if isinstance(address.host, (IPv4Address, IPv6Address)):
peer.ip_addr = str(address.host)
if self._is_blacklisted(peer):
raise BadPeerError('blacklisted')
# Bucket good recent peers; forbid many servers from similar IPs
# FIXME there's a race here, when verifying multiple peers
# that belong to the same bucket ~simultaneously
recent_peers = self._get_recent_good_peers()
if peer in recent_peers:
recent_peers.remove(peer)
onion_peers = []
buckets = defaultdict(list)
for other_peer in recent_peers:
if other_peer.is_tor:
onion_peers.append(other_peer)
else:
buckets[other_peer.bucket_for_internal_purposes()].append(other_peer)
if peer.is_tor:
# keep number of onion peers below half of all peers,
# but up to 100 is OK regardless
if len(onion_peers) > len(recent_peers) // 2 >= 100:
raise BadPeerError('too many onion peers already')
else:
bucket = peer.bucket_for_internal_purposes()
if buckets[bucket]:
raise BadPeerError(f'too many peers already in bucket {bucket}')
# server.version goes first
message = 'server.version'
@ -286,13 +362,12 @@ class PeerManager(object):
# Protocol version 1.1 returns a pair with the version first
if len(result) != 2 or not all(isinstance(x, str) for x in result):
raise BadPeerError(f'bad server.version result: {result}')
server_version, protocol_version = result
server_version, _protocol_version = result
peer.server_version = server_version
peer.features['server_version'] = server_version
ptuple = protocol_tuple(protocol_version)
async with TaskGroup() as g:
await g.spawn(self._send_headers_subscribe(session, peer, ptuple))
await g.spawn(self._send_headers_subscribe(session))
await g.spawn(self._send_server_features(session, peer))
peers_task = await g.spawn(self._send_peers_subscribe
(session, peer))
@ -300,22 +375,20 @@ class PeerManager(object):
# Process reported peers if remote peer is good
peers = peers_task.result()
await self._note_peers(peers)
features = self._features_to_register(peer, peers)
if features:
self.logger.info(f'registering ourself with {peer}')
# We only care to wait for the response
await session.send_request('server.add_peer', [features])
async def _send_headers_subscribe(self, session, peer, ptuple):
async def _send_headers_subscribe(self, session):
message = 'blockchain.headers.subscribe'
result = await session.send_request(message)
assert_good(message, result, dict)
our_height = self.db.db_height
if ptuple < (1, 3):
their_height = result.get('block_height')
else:
their_height = result.get('height')
their_height = result.get('height')
if not isinstance(their_height, int):
raise BadPeerError(f'invalid height {their_height}')
if abs(our_height - their_height) > 5:
@ -325,24 +398,13 @@ class PeerManager(object):
# Check prior header too in case of hard fork.
check_height = min(our_height, their_height)
raw_header = await self.db.raw_header(check_height)
if ptuple >= (1, 4):
ours = raw_header.hex()
message = 'blockchain.block.header'
theirs = await session.send_request(message, [check_height])
assert_good(message, theirs, str)
if ours != theirs:
raise BadPeerError(f'our header {ours} and '
f'theirs {theirs} differ')
else:
ours = self.env.coin.electrum_header(raw_header, check_height)
ours = ours.get('prev_block_hash')
message = 'blockchain.block.get_header'
theirs = await session.send_request(message, [check_height])
assert_good(message, theirs, dict)
theirs = theirs.get('prev_block_hash')
if ours != theirs:
raise BadPeerError(f'our header hash {ours} and '
f'theirs {theirs} differ')
ours = raw_header.hex()
message = 'blockchain.block.header'
theirs = await session.send_request(message, [check_height])
assert_good(message, theirs, str)
if ours != theirs:
raise BadPeerError(f'our header {ours} and '
f'theirs {theirs} differ')
async def _send_server_features(self, session, peer):
message = 'server.features'
@ -351,7 +413,7 @@ class PeerManager(object):
hosts = [host.lower() for host in features.get('hosts', {})]
if self.env.coin.GENESIS_HASH != features.get('genesis_hash'):
raise BadPeerError('incorrect genesis hash')
elif peer.host.lower() in hosts:
if peer.host.lower() in hosts:
peer.update_features(features)
else:
raise BadPeerError(f'not listed in own hosts list {hosts}')
@ -382,24 +444,19 @@ class PeerManager(object):
2) Verifying connectivity of new peers.
3) Retrying old peers at regular intervals.
'''
self.logger.info(f'peer discovery: {self.env.peer_discovery}')
if self.env.peer_discovery != self.env.PD_ON:
self.logger.info('peer discovery is disabled')
return
self.logger.info(f'beginning peer discovery. Force use of '
f'proxy: {self.env.force_proxy}')
forever = Event()
self.logger.info(f'announce ourself: {self.env.peer_announce}')
self.logger.info(f'my clearnet self: {self._my_clearnet_peer()}')
self.logger.info(f'force use of proxy: {self.env.force_proxy}')
self.logger.info(f'beginning peer discovery...')
async with self.group as group:
await group.spawn(forever.wait())
await group.spawn(self._refresh_blacklist())
await group.spawn(self._detect_proxy())
await group.spawn(self._import_peers())
# Consume tasks as they complete, logging unexpected failures
async for task in group:
if not task.cancelled():
try:
task.result()
except Exception:
self.logger.exception('task failed unexpectedly')
def info(self):
'''The number of peers.'''
@ -417,12 +474,14 @@ class PeerManager(object):
'''Add a peer passed by the admin over LocalRPC.'''
await self._note_peers([Peer.from_real_name(real_name, 'RPC')])
async def on_add_peer(self, features, source_info):
async def on_add_peer(self, features, source_addr):
'''Add a peer (but only if the peer resolves to the source).'''
if not source_info:
if self.env.peer_discovery != self.env.PD_ON:
return False
if not source_addr:
self.logger.info('ignored add_peer request: no source info')
return False
source = source_info[0]
source = str(source_addr.host)
peers = Peer.peers_from_features(features, source)
if not peers:
self.logger.info('ignored add_peer request: no peers given')
@ -431,8 +490,20 @@ class PeerManager(object):
# Just look at the first peer, require it
peer = peers[0]
host = peer.host
now = time.time()
# Rate limit peer adds by domain to one every 10 minutes
if peer.ip_address is not None:
bucket = 'ip_addr'
else:
bucket = '.'.join(host.lower().split('.')[-2:])
last = self.recent_peer_adds.get(bucket, 0)
self.recent_peer_adds[bucket] = now
if last + PEER_ADD_PAUSE >= now:
return False
if peer.is_tor:
permit = self._permit_new_onion_peer()
permit = self._permit_new_onion_peer(now)
reason = 'rate limiting'
else:
getaddrinfo = asyncio.get_event_loop().getaddrinfo
@ -446,8 +517,7 @@ class PeerManager(object):
reason = 'source-destination mismatch'
if permit:
self.logger.info(f'accepted add_peer request from {source} '
f'for {host}')
self.logger.info(f'accepted add_peer request from {source} for {host}')
await self._note_peers([peer], check_ports=True)
else:
self.logger.warning(f'rejected add_peer request from {source} '
@ -462,23 +532,21 @@ class PeerManager(object):
Additionally, if we don't have onion routing, we return a few
hard-coded onion servers.
'''
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
onion_peers = []
recent = self._get_recent_good_peers()
# Always report ourselves if valid (even if not public)
cutoff = time.time() - STALE_SECS
peers = set(myself for myself in self.myselves
if myself.last_good > cutoff)
# Bucket the clearnet peers and select up to two from each
onion_peers = []
buckets = defaultdict(list)
for peer in recent:
if peer.is_tor:
onion_peers.append(peer)
else:
buckets[peer.bucket()].append(peer)
buckets[peer.bucket_for_external_interface()].append(peer)
for bucket_peers in buckets.values():
random.shuffle(bucket_peers)
peers.update(bucket_peers[:2])
@ -491,10 +559,10 @@ class PeerManager(object):
return [peer.to_tuple() for peer in peers]
def proxy_peername(self):
'''Return the peername of the proxy, if there is a proxy, otherwise
def proxy_address(self):
'''Return the NetAddress of the proxy, if there is a proxy, otherwise
None.'''
return self.proxy.peername if self.proxy else None
return self.proxy.address if self.proxy else None
def rpc_data(self):
'''Peer data for the peers RPC method.'''

File diff suppressed because it is too large Load Diff

82
electrumx_compact_history Executable file
View File

@ -0,0 +1,82 @@
#!/usr/bin/env python3
#
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Script to compact the history database. This should save space and
will reset the flush counter to a low number, avoiding overflow when
the flush count reaches 65,536.
This needs to lock the database so ElectrumX must not be running -
shut it down cleanly first.
It is recommended you run this script with the same environment as
ElectrumX. However it is intended to be runnable with just
DB_DIRECTORY and COIN set (COIN defaults as for ElectrumX).
If you use daemon tools, you might run this script like so:
envdir /path/to/the/environment/directory ./compact_history.py
Depending on your hardware this script may take up to 6 hours to
complete; it logs progress regularly.
Compaction can be interrupted and restarted harmlessly and will pick
up where it left off. However, if you restart ElectrumX without
running the compaction to completion, it will not benefit and
subsequent compactions will restart from the beginning.
'''
import asyncio
import logging
import sys
import traceback
from os import environ
from electrumx import Env
from electrumx.server.db import DB
async def compact_history():
if sys.version_info < (3, 7):
raise RuntimeError('Python >= 3.7 is required to run ElectrumX')
environ['DAEMON_URL'] = '' # Avoid Env erroring out
env = Env()
db = DB(env)
await db.open_for_compacting()
assert not db.first_sync
history = db.history
# Continue where we left off, if interrupted
if history.comp_cursor == -1:
history.comp_cursor = 0
history.comp_flush_count = max(history.comp_flush_count, 1)
limit = 8 * 1000 * 1000
while history.comp_cursor != -1:
history._compact_history(limit)
# When completed also update the UTXO flush count
db.set_flush_count(history.flush_count)
def main():
logging.basicConfig(level=logging.INFO)
logging.info('Starting history compaction...')
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(compact_history())
except Exception:
traceback.print_exc()
logging.critical('History compaction terminated abnormally')
else:
logging.info('History compaction complete')
if __name__ == '__main__':
main()

View File

@ -10,12 +10,13 @@
'''Script to send RPC commands to a running ElectrumX server.'''
from aiorpcx import timeout_after, Connector, RPCSession, TaskTimeout
import argparse
import asyncio
import json
import sys
from os import environ
from aiorpcx import timeout_after, connect_rs
import electrumx.lib.text as text
@ -29,7 +30,7 @@ simple_commands = {
session_commands = {
'disconnect': 'Disconnect sessions',
'log': 'Toggle logging of sessions',
'log': 'Control logging of sessions',
}
other_commands = {
@ -79,7 +80,7 @@ other_commands = {
def main():
'''Send the RPC command to the server and print the result.'''
main_parser = argparse.ArgumentParser(
'elextrumx_rpc',
'electrumx_rpc',
description='Send electrumx an RPC command'
)
main_parser.add_argument('-p', '--port', metavar='port_num', type=int,
@ -93,7 +94,7 @@ def main():
for command, help in session_commands.items():
parser = subparsers.add_parser(command, help=help)
parser.add_argument('session_ids', nargs='+', type=int,
parser.add_argument('session_ids', nargs='+', type=str,
help='list of session ids')
for command, data in other_commands.items():
@ -113,8 +114,9 @@ def main():
# aiorpcX makes this so easy...
async def send_request():
try:
async with timeout_after(15):
async with Connector(RPCSession, 'localhost', port) as session:
async with timeout_after(30):
async with connect_rs('localhost', port) as session:
session.transport._framer.max_size = 0
result = await session.send_request(method, args)
if method in ('query', ):
for line in result:
@ -125,15 +127,19 @@ def main():
print(line)
else:
print(json.dumps(result, indent=4, sort_keys=True))
return 0
except OSError:
print('cannot connect - is ElectrumX catching up, not running, or '
f'is {port} the wrong RPC port?')
return 1
except Exception as e:
print(f'error making request: {e!r}')
print(f'error making request: {e}')
return 1
loop = asyncio.get_event_loop()
loop.run_until_complete(send_request())
code = loop.run_until_complete(send_request())
sys.exit(code)
if __name__ == '__main__':
main()
main()

View File

@ -1,17 +1,17 @@
#!/usr/bin/env python3
#!/usr/bin/env python3.7
#
# Copyright (c) 2016-2018, Neil Booth
#
# All rights reserved.
#
#d
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Script to kick off the server.'''
import logging
import asyncio
import loggingd
import sys
import traceback
from electrumx import Controller, Env
from electrumx.lib.util import CompactFormatter, make_logger
@ -22,17 +22,21 @@ def main():
log_fmt = Env.default('LOG_FORMAT', '%(levelname)s:%(name)s:%(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(CompactFormatter(log_fmt))
make_logger('electrumx', handler=handler, level=logging.INFO)
logger = make_logger('electrumx', handler=handler, level='INFO')
logging.info('ElectrumX server starting')
logger.info('ElectrumX server starting')
try:
controller = Controller(Env())
controller.run()
if sys.version_info < (3, 7):
raise RuntimeError('ElectrumX requires Python 3.7 or greater')
env = Env()
logger.info(f'logging level: {env.log_level}')
logger.setLevel(env.log_level)
controller = Controller(env)
asyncio.run(controller.run())
except Exception:
traceback.print_exc()
logging.critical('ElectrumX server terminated abnormally')
logger.exception('ElectrumX server terminated abnormally')
else:
logging.info('ElectrumX server terminated normally')
logger.info('ElectrumX server terminated normally')
if __name__ == '__main__':

View File

@ -1,21 +1,27 @@
import setuptools
version = '1.8.12'
version = '1.13.0'
setuptools.setup(
name='electrumX',
version=version,
scripts=['electrumx_server', 'electrumx_rpc'],
python_requires='>=3.6',
# via environment variables, in which case I've tested with 15.0.4
# "x11_hash" package (1.4) is required to sync DASH network.
# "x13_hash" package is required to sync BitcoinPlus network.
# "tribus_hash" package is required to sync Denarius network.
# "blake256" package is required to sync Decred network.
# "xevan_hash" package is required to sync Xuez network.
# "groestlcoin_hash" package is required to sync Groestlcoin network.
# "pycryptodomex" package is required to sync SmartCash network.
install_requires=['aiorpcX>=0.10.1,<0.11', 'attrs',
'plyvel', 'pylru', 'aiohttp >= 2'],
scripts=['electrumx_server', 'electrumx_rpc', 'electrumx_compact_history'],
python_requires='>=3.7',
install_requires=['aiorpcX[ws]>=0.18.3,<0.19', 'attrs',
'plyvel', 'pylru', 'aiohttp>=3.3'],
extras_require={
'rocksdb': ['python-rocksdb>=0.6.9'],
'uvloop': ['uvloop>=0.12.2'], # Bump when the uvloop connection_lost bug is fixed
# For various coins
'blake256': ['blake256>=0.1.1'],
'crypto': ['pycryptodomex>=3.8.1'],
'groestl': ['groestlcoin-hash>=1.0.1'],
'tribus-hash': ['tribus-hash>=1.0.2'],
'xevan-hash': ['xevan-hash'],
'x11-hash': ['x11-hash>=1.4'],
'zny-yespower-0-5': ['zny-yespower-0-5'],
'bell-yespower': ['bell-yespower'],
'cpupower': ['cpupower'],
},
packages=setuptools.find_packages(include=('electrumx*',)),
description='ElectrumX Server',
author='Neil Booth',
@ -30,7 +36,7 @@ setuptools.setup(
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Database",
'Topic :: Internet',
],

View File

@ -0,0 +1,14 @@
{
"hash": "000004585973c0ce2c9f4c8be13983f901e712e808b1603ddc84c5fc1d630fe4",
"size": 281,
"height": 412394,
"merkleroot": "22d70bbc624844f38343efc35e551d5ce5110d7fddd450bf4a535c830a8a7b3b",
"tx": [
"22d70bbc624844f38343efc35e551d5ce5110d7fddd450bf4a535c830a8a7b3b"
],
"time": 1561546385,
"nonce": 1896874112,
"bits": "1e0745c4",
"previousblockhash": "000001301d2ac66c89ebf7bed1bfaf9398efaea6c3fc2aa5da80efadab7eac40",
"block": "0000002040ac7eabadef80daa52afcc3a6aeef9893afbfd1bef7eb896cc62a1d300100003b7b8a0a835c534abf50d4dd7f0d11e55c1d555ec3ef4383f3444862bc0bd722914e135dc445071e8000107101010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff2003ea4a0604924e135d085ffffdb3000000000d2f6e6f64655374726174756d2f00000000020000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf900ca9a3b000000001976a9144570c3c687ebf8ef5af7f6efae9b810055305c3088ac0120000000000000000000000000000000000000000000000000000000000000000000000000"
}

View File

@ -0,0 +1,19 @@
{
"hash": "abbed178fd5943a1583de2f1d48dbd18bb49d49e2498366a4dde02ebe8ff5e62",
"powhash": "000000000016a5e5f562ee36daba04bb50c82699de998d0e20ac08a3943a6900",
"size": 1438,
"height": 496659,
"merkleroot": "78479b38a072b1da6d84e53efed2a09b5878c13462d0ff6c468a2130c117fdcc",
"tx": [
"914ce5d8967e32cc443b445117bd0da38d1c110860cfe7f49644654d119f02e8",
"a407f6eb6709c67a60c7af7185d160fa8a9f88306c73ddf5668e9953df647353",
"e20d84055134cf6e6caa6b99c7564386086720905df1c49aa5997222ca8bfa6c",
"940cc720f6385b9b9184f68524226de013f706333f907d111a4e74992bb44003",
"0c814cc375b6486304159121c1a2f9d0aa708ef8d11eb044dd8c151a43aa27a3"
],
"time": 1514019261,
"nonce": 2219259,
"bits": "1b6925cf",
"previousblockhash": "460390420cdf623a5a57ffb02ebeae15345b0324eb8150c49ae1826db2cb4fa5",
"block": "00000060a54fcbb26d82e19ac45081eb24035b3415aebe2eb0ff575a3a62df0c42900346ccfd17c130218a466cffd06234c178589ba0d2fe3ee5846ddab172a0389b4778bd193e5acf25691bfbdc210005010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff0403139407ffffffff04009b8647000000001976a9145efb63ceeac14fe5d518a057338e3d8a84e459c888ac20f1fa02000000001976a914cf8c5810dbb0e77bfa5811ef99a509978d85dd5e88ac0000000000000000246a224d696e6564206279203131313131313131315a378b8e1a1e555e000001ffc40500000000000000000000266a24aa21a9edf76fed3ded3f7d6b60934dd6acc8bec321b6f35b581da0d10514c558b3f189e401200000000000000000000000000000000000000000000000000000000000000000000000000c0000006f51630e6f847f16bfef023c1987e890304baa77eef212a94f7a064ff3e0e72500010128b0c588366178258de1c074542c9b9a6bdd5dd0b5022fce1cf593ed976973510200000000ffffffff01583e0f00000000001976a914e92a30c6c5ac231c004c1b3170874b390b03acb188ac0247304402204d9bc16a0814f67d09f967191db2c04898ac8627498361aea19f7876a1abfbb702201a609c8cba158ca7c9594cefab8fb3505cf3fd5d061e8a1db0292983d071c38a0121027314fab69a06e2f79fd7e9f47ed780f45e6d8227d03ba98f79db9899259d1db9000000000c000000a54fcbb26d82e19ac45081eb24035b3415aebe2eb0ff575a3a62df0c4290034600010128b0c588366178258de1c074542c9b9a6bdd5dd0b5022fce1cf593ed976973510400000017160014371727d42f36c47f501fdaab485edb41b2134174ffffffff01583e0f00000000001976a914e92a30c6c5ac231c004c1b3170874b390b03acb188ac02473044022003402bf9997859d4a437b0c0d00b52095833aafa5fefa0d88cd30b5cc31ee896022064ad27334dfeace63e2fe28c01793fc7b5366bc2ad2dac30276a6ddb1e1afd310121027314fab69a06e2f79fd7e9f47ed780f45e6d8227d03ba98f79db9899259d1db9000000000c000000a54fcbb26d82e19ac45081eb24035b3415aebe2eb0ff575a3a62df0c4290034600010128b0c588366178258de1c074542c9b9a6bdd5dd0b5022fce1cf593ed976973510500000023220020b3bfabc6f5da87286fe387aa59397fcffd800e417e0e0c0230018fdf8a692008ffffffff01583e0f00000000001976a914e92a30c6c5ac231c004c1b3170874b390b03acb188ac0400473044022027065788b6d6e778d2092067edf5b13350c2b29fda474ee69450c1693680dd9902200b42c22cc9024c87dca0c8030d66cc102cfd994bb3f0c7804961cb3ec4d0fb2c01483045022100956ba661054d5b9ee04930c926b0cd83cbb0230629e23b6b1f4a45518778fd06022009cba7398d3e25eef19e704eaebd87ad9010ba11185233af993bf0c23b739fd701695221027314fab69a06e2f79fd7e9f47ed780f45e6d8227d03ba98f79db9899259d1db92103088359d8c4df1d3753c67946de474de56a717ca793a9f81dfcbf1c9a470ff01921021841bc49e8baaa73dfa26074ca766bd6c31e83a9169b53b5556cae446dce1d4253ae000000000c000000a54fcbb26d82e19ac45081eb24035b3415aebe2eb0ff575a3a62df0c429003460128b0c588366178258de1c074542c9b9a6bdd5dd0b5022fce1cf593ed97697351030000006b483045022100944916c41549c8dfbc213ab8eb10b2987ef2bed51ea1eb51cbf3987951b99d9a02202b5feec10cdaa7f09bf23e9d3fa6825e2a72cc52bf25266fb5fbcb63ea73e8de0121027314fab69a06e2f79fd7e9f47ed780f45e6d8227d03ba98f79db9899259d1db9ffffffff01583e0f00000000001976a914e92a30c6c5ac231c004c1b3170874b390b03acb188ac00000000"
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,14 @@
{
"hash": "8baa0a0c95d5c3d968d3223c507951337d4431bf61786cb051512842e9a5f18a",
"size": 295,
"height": 50000,
"merkleroot": "75a2bf3a5f1464116b3edca2bc53c3074245eb872048b2baa2684c5ac15ad903",
"tx": [
"75a2bf3a5f1464116b3edca2bc53c3074245eb872048b2baa2684c5ac15ad903"
],
"time": 1551713199,
"nonce": 3508846327,
"bits": "1c00b776",
"previousblockhash": "5a49a0944a9a799c11b31aacf2e9ff85da6a308f28a4cb6d6e4b2cf9a7bdb332",
"block": "0000002032b3bda7f92c4b6e6dcba4288f306ada85ffe9f2ac1ab3119c799a4a94a0495a03d95ac15a4c68a2bab2482087eb454207c353bca2dc3e6b1164145f3abfa275af437d5c76b7001cf7be24d10102000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1b0350c30004af437d5c08810006596f0100004e4c506f6f6c2e4e4c000000000480ba953e000000001976a9140a35b7717be43b406868a94ca9b7317a3cf5910b88ac80ba953e000000001976a914616a70b9473e81d38f624b0cb765d3b1b35f202288ac8017b42c000000001976a914d3e505575aaa0174c1801fcb78f47bb312be24d788ac80d1f008000000001976a9148b6f6866e5081ea5ba5b5793dd2c24f7e254a05a88ac00000000"
}

View File

@ -0,0 +1,14 @@
{
"hash": "0000000001b4c0cb741826a3236dfec65bafa6c901017ec2fe2b2662c6ec61cd",
"size": 271,
"height": 571000,
"merkleroot": "06fd61c7db6ba8d545b2c526c97f12d38474c9d5af8db804628c6c85c187a7b1",
"tx": [
"06fd61c7db6ba8d545b2c526c97f12d38474c9d5af8db804628c6c85c187a7b1"
],
"time": 1545267703,
"nonce": 679226306,
"bits": "1c028ff3",
"previousblockhash": "0000000002342e7f451c92b854ca068e1ea74d63405c01474ba05f80f0339078",
"block": "00000020789033f0805fa04b47015c40634da71e8e06ca54b8921c457f2e340200000000b1a787c1856c8c6204b88dafd5c97484d3127fc926c5b245d5a86bdbc761fd06f7e91a5cf38f021cc22b7c280101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff470378b60804f7e91a5c088101a570000000007a706f6f6c2e636100fabe6d6dfc11ced4e4a37b1706f30fea53f48c8baca01352a779873de984f74878b0b56a0100000000000000000000000200943577000000001976a9144364f5d8286d24d39df19bc6a5249a556b44730f88ac0065cd1d000000001976a9143a08893f724a005fe395db90e9572189e7ce4a2588ac00000000"
}

View File

@ -0,0 +1,16 @@
{
"hash": "000000000000b9dfa8f44b5a56041a6b4d3370a61f3074f18f9eab5675f3ce9f",
"confirmations": 56493,
"size": 423,
"height": 500000,
"version": 2,
"merkleroot": "388d067407c3e228d2a1699af1f6118722e7f45522daa97dd258f4472013fb30",
"tx": ["bfb870ffdb5d176fe054163f059a4e5175d69df7dadac12853ee216093317c6c", "5c379e96e10e68174da4afcda015bc82d8c03387e9f824e04e6aaabb4d79f408"],
"time": 1543531308,
"nonce": 3033454746,
"bits": "1b020838",
"difficulty": 32249.81798522,
"previousblockhash": "000000000000f7cea54210135a9c89eead3d0d0a0ebe7eb7efb7a13fa4a2da2a",
"nextblockhash": "00000000000004c8b97e38733e408015c2f2646714b63483304bba63b52c5184",
"block": "020000002adaa2a43fa1b7efb77ebe0e0a0d3dadee899c5a131042a5cef700000000000030fb132047f458d27da9da2255f4e7228711f6f19a69a1d228e2c30774068d382c6b005c3808021b9ad8ceb40202000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1f0320a107062f503253482f042c6b005c0881000161bc0000007969696d70000000000001807c814a000000001976a91436e83f1c5aadb907b295b373f121542ce53ae26b88ac000000000100000001247ca95b435fd5ccffc0b7a584d2b4add0b27ff6b344f69d6296dfde98a4975d010000006b483045022100e53a43841194f7778ea7d5ef050087c77f19aaa19f0212d47fe54130e5a575ca02200c849e251d6008e7a381636db3627d2a5980a709e9e97ce0ddf4195cb9b16e31012102a34a68a5959d6dd1d37a30e08828dc90880e2da9ab98d5070b06a4d4c77d2e28ffffffff029cbfbb01000000001976a914a8dbb38f4b49423ece34d33c7186358d4e663fe888ace4f22a0d000000001976a91463762453311c1477c2c9164992a4a013cc7fd8f188ac00000000"
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,15 @@
{
"hash": "c6b3c170350aaaa3728df3d0b0e93d2bf52cfb848bbc28137639846e056713a6",
"size": 445,
"height": 50000,
"merkleroot": "10c72de807fa5acdec02f2e6d1061489b27a71b4ef812310dcd315a2868f4d88",
"tx": [
"e617d1b662f4eacd402dc2b2460f2f2d44f1d121a3f36d30fa20b53a1aa9d3b3",
"aec49d1a1864f55cdb295d5c232b284b17d7c3e6a88829de854704a7e6af0784"
],
"time": 1502958262,
"nonce": 0,
"bits": "1d0e11f1",
"previousblockhash": "6bd4c3a7685f81957d7b4fc0227c39c675861489dbefc5e9268db0721280e0c8",
"block": "06000000c8e0801272b08d26e9c5efdb89148675c6397c22c04f7b7d95815f68a7c3d46b884d8f86a215d3dc102381efb4717ab2891406d1e6f202eccd5afa07e82dc710b6529559f1110e1d000000000201000000b6529559010000000000000000000000000000000000000000000000000000000000000000ffffffff040350c300ffffffff010000000000000000000000000001000000b652955901bfcbd1d8bf080e0ba5491e45013563d54ef589f11fb3539c9995e958cb837ebb010000004847304402205561cf3dd6a7e5e39ffcc10bab0ae364c79b6b575e4d8a9d6ec3f6a9239b269f02200082f249ba415f1520367a006c4c3d9200dc49814f8620f2ef636d1e80b72f8d01ffffffff03000000000000000000c072acf51600000023210355073d2fdc1b90c3d6a58505a620062651d8236a08e5252f40fe872ecdf90cffac498fc1f51600000023210355073d2fdc1b90c3d6a58505a620062651d8236a08e5252f40fe872ecdf90cffac000000004730450221009410903e2848cba96b8b69cfd459560d3446050c778cd7cc4e881ac68b7c31c902201703743a7f8fe8b1371f0072a2bfda25e2ce68c68aa65434531c716511151eb6"
}

View File

@ -0,0 +1,28 @@
{
"hash": "8371f04354871c4093a802618f1c4e5fb153b14fa1a73d2b27f45208f14afb41",
"confirmations": 1994860,
"size": 446,
"height": 120000,
"version": 4,
"merkleroot": "543c076b56ccc14156f336b852451b69dcc52bf9d611fc646e6c7cfe0a1d698a",
"mint": 18.650684,
"tx": [
"dbaa5fc586560ec48d3d17c46a48718b0e5a88bc0c9caa39cd890dfa48b40ec4",
"260c31682199d58036d2a4c0b88b26cbac3f88b22d92bdd6a5206a883f3274cb"
],
"time": 1399711315,
"mediantime": 1399711154,
"nonce": 0,
"bits": "1d349684",
"difficulty": 0.019015472359015,
"chainwork": "000000000000000000000000000000000000000000000000002635365d8d7de3",
"previousblockhash": "7fc769c6064646cf01007533a2576cacf0750b30752745685f906a647c4abb44",
"nextblockhash": "dc8ceed3959e71cf3c27d2075786ed145530ddaf9a0bda6235297fc935ab0a3a",
"flags": "proof-of-stake",
"nflags:": "3",
"proofhash": "a006c7f11bc0fe9c06613755977965bf9dceaf51ddfbe89aa3435c93db0e19a8",
"entropybit": 1,
"block entropybit": 1,
"modifier": "68e882e7c1a87f3e2c4e7ff6b3a0f08adfb7a4d000c3239b230d3705e9104a62",
"block": "0400000044bb4a7c646a905f68452775300b75f0ac6c57a233750001cf464606c669c77f8a691d0afe7c6c6e64fc11d6f92bc5dc691b4552b836f35641c1cc566b073c5453e66d538496341d00000000020100000053e66d53010000000000000000000000000000000000000000000000000000000000000000ffffffff0d03c0d401014b062f503253482fffffffff01000000000000000000000000000100000053e66d5301d1c25787342cbbf2b025f92e2f2ed458b1d1ee48e00c55c9b888861aa5cb3b9f010000006c493046022100897b872ffc1da613b3fd075a05e1e3a529d12905ab13edaaec77bbdfa7b33ce5022100f6e12b78cd7b2269308870be2aeb37816777c47062b27700b3ad44ecf3631b0f0121027401e2b15c988824d5707b9ce3c966e96e41e1867a618ffe8e6b31a3196c0606ffffffff020000000000000000006c15ee24000000002321027401e2b15c988824d5707b9ce3c966e96e41e1867a618ffe8e6b31a3196c0606ac00000000473045022100907498630f6fe2e5c633c898abafccb35536beb039481a5543fdac7d5a2b730202200e36513f380b01087d83878989c3c0325e47984ce77bbe0a284a574799c53405"
}

View File

@ -0,0 +1,28 @@
{
"hash": "1f1ea51aee8a7456655e31857c7cd4a9f494556438485abd4c60d86cacf24b44",
"confirmations": 1914861,
"size": 412,
"height": 200000,
"version": 4,
"merkleroot": "d53f4d65ec4cbab16b538d2c5b1e80681510c000c8dcf33121dd3dce90d7f9fb",
"mint": 15.931506,
"tx": [
"dc74bc4d471b12780ebdad606d60927f15ca892dde9c38042b6490bb10f32e43",
"c3d0aa280a269ab7edcc5bb16c9c9a7a9fb0ded98a632bf39cca936a9880f270"
],
"time": 1405318345,
"mediantime": 1405317997,
"nonce": 0,
"bits": "1d06fd2d",
"difficulty": 0.14308045997088,
"chainwork": "00000000000000000000000000000000000000000000000000264aaf47bc1642",
"previousblockhash": "cbedb6de9ce90a6a4124f84d975070fcef0664a4d0c954403cbb6ef54fe7307a",
"nextblockhash": "731cc4879221da610890deaf16cc2085f6e833d47f180f35a1552e666e5d7af5",
"flags": "proof-of-stake",
"nflags:": "1",
"proofhash": "ffd9bac8019ae3fa8d613336467d7c08e7abd9ce55d502ffd7c95842550edd53",
"entropybit": 0,
"block entropybit": 0,
"modifier": "7967e8d05d08c98620215610a4a3484735446a12eb1671a890607199aa244446",
"block": "040000007a30e74ff56ebb3c4054c9d0a46406effc7050974df824416a0ae99cdeb6edcbfbf9d790ce3ddd2131f3dcc800c0101568801e5b2c8d536bb1ba4cec654d3fd5c974c3532dfd061d000000000201000000c974c353010000000000000000000000000000000000000000000000000000000000000000ffffffff0d03400d030122062f503253482fffffffff010000000000000000000000000001000000c974c353017c9f872e042e1a47a015a971978fa959e7698c06bbfed6ac875946e947708b04010000004948304502201b36699258081d054eb47329d319ce9bf1141a00bc96ca95880aad9a9f7765d702210094d36cabea1f2d011bdd9f4e1906f294b149d2d3cfb5b295ce4a85dafef2469501ffffffff0200000000000000000029de3716000000002321027401e2b15c988824d5707b9ce3c966e96e41e1867a618ffe8e6b31a3196c0606ac00000000483046022100d1e1269b8dd0a280f70e6507963cf07a6bcadb40daa5acf9258964788d30db3f0221008d115faa6e0eee6974588718aa1c4b98a1d7d5d344b049a88e946a071f3e389a"
}

View File

@ -0,0 +1,20 @@
{
"hash": "ed87415eec682bd45915cf9b58c3240937373b5904e2e781504960930400228d",
"size": 821,
"height": 120000,
"version": 6,
"merkleroot": "9ff8c579c4929aa1aa1e6a9a704e36afa7e075f6f555dfa915af27f5c6fcb9c0",
"tx": [
"7e95106d7f2f9ce046c97e991aedb13ca631633b5d93aea216dc24ab05303a11",
"3f4cfbc123c934ab983c989e3ed81cc8a3296a696474a589e7fae45cc22a14fa",
"793b99eb068bd837743e5730b75739254095efc0ea70a1480e8488c73050e6de"
],
"time": 1539998087,
"nonce": 0,
"bits": "1b094a51",
"difficulty": 7054.132896662391,
"chainwork": "0000000000000000000000000000000000000000000000005e21e43dfb3d33a6",
"previousblockhash": "3471e0e3d5bda84109f9e9a0676d36d4e2448c4ae22283af8b117300380c95c8",
"nextblockhash": "5488c63f091160dd27a070fdc6a4a261d838dbdd84570f6170f6b9fa34ba8a90",
"block": "06000000c8950c380073118baf8322e24a8c44e2d4366d67a0e9f90941a8bdd5e3e07134c0b9fcc6f527af15a9df55f5f675e0a7af364e709a6a1eaaa19a92c479c5f89f8781ca5b514a091b0000000003010000008781ca5b010000000000000000000000000000000000000000000000000000000000000000ffffffff0403c0d401ffffffff0100000000000000000000000000010000008781ca5b01a943749d1b322c6cc73d09aed39ac5f98b7d949663a65e11b450d73854023cc1010000004847304402205b7c7fb4ef30a134e803521ee883e1439f6425f5850a0758382a04ba3bd133900220289f23f63ada2785d22b150f88afe8ab037766e37e76b00a726558151dea0e2101ffffffff03000000000000000000c0ce822556db0000232102755f8f296fc1e5e643b9f90415144f9311e029d8e284bdaac838b45c3d32cd55ac88e0832556db0000232102755f8f296fc1e5e643b9f90415144f9311e029d8e284bdaac838b45c3d32cd55ac0000000001000000fc80ca5b029ec3ff99757227d7aafb90829b56307716b250ac0967e8574661a99e036db165000000006a473044022051edc0e5ab66205d6e28d7c02f41458059d109823a7a2d2007bdb274c48dd68a02200654c59f234fa370a602dd45ed6039c09057d70ed337d1d03215bd8e082bd5f80121022adbff4e8ad66004fe7ab9a72cac0f53b64cad3ecba683c923a22a16f23a4683ffffffffcb3100ffd79b9a3ff3da51d68a6768473f57f6e4b9909e818c76f6041e7a2a7c000000006b483045022100bace32c50fa6ba9840d7685ac2a85afaf0214b6239082b023f53a411f45ca4b702205f0d12ec1ec101df8e3a37f5288daaa9d312f72d2b822d4dd551b331e5ac4e6001210206970eaf01f12e516da6ced85bf623c59450676a42c2ea3b40f7179974975252ffffffff021cd46e0f000000001976a91492dd2d2a436680ff82c814da03477a3d1be295e188ac7aa91989020000001976a9140a237dd29cc896e96ad996aa689056575c48bb0388ac00000000463044022026caa49a29231a9490215d13843dbe5c8067f4a92f9487cff3d6f77341896c0302206ce90a5724d699d29a5e32a12c85d0d7ea49a7279f14a1c9611850866b8ad904"
}

View File

@ -0,0 +1,19 @@
{
"hash": "eb35558f177a902e165a54474bfd465c6fab11c4131fa9963cc224267bde1cb7",
"size": 438,
"height": 200000,
"version": 8,
"merkleroot": "3fe27f540667f559da14159ed95f51a990a80bc08a266a89338b02c211b8fe78",
"tx": [
"7e5a915352a2941063a3cdb9319c2384857f7c3f97be68a0835a5fe8b2adc4e0",
"3672944d4e16e8c76a09c8bb2cfbe5c8fdfdef3416b28e87359b6aa660059aca"
],
"time": 1551967523,
"nonce": 0,
"bits": "1949405a",
"difficulty": 58632371.88655923,
"chainwork": "000000000000000000000000000000000000000000000184aa9c6381e7103e02",
"previousblockhash": "b0d74219e3802621250af7ea50b858321fa360762bbaddf3968c1c044ba483e7",
"nextblockhash": "4c1e839bcad65f4e7ea257db6169b5f025c13ac8f07825f70c93aa56d8cd0cb3",
"block": "08000000e783a44b041c8c96f3ddba2b7660a31f3258b850eaf70a25212680e31942d7b078feb811c2028b33896a268ac00ba890a9515fd99e1514da59f56706547fe23f2325815c5a404919000000000207000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0603400d030101ffffffff01000000000000000000000000000700000001e01bde66c2b1fbf602c78ce2f43cbc53517d86b1caef3396962a881872c84bc4020000004847304402203bf1a143b384d257cf0f25803307c688fb09b1fdf57d310eea45beee8b6c85e002200dfef2703b80ccd6d7f30b32e702af7654d51f026dd1cd39f1cb8b25ce1b49ed01ffffffff03000000000000000000c02d08cdaa19000023210242a06ef798e2c434aba6fcdf5c30c80ce4382d722e19a4e85f91db67d6698b39acbf4c0dcdaa19000023210242a06ef798e2c434aba6fcdf5c30c80ce4382d722e19a4e85f91db67d6698b39ac00000000463044022055c7116568454603aa4529b399f2bcb50cb6e6ec9d64e1c8c95444fe1d38dca902202452c53ee34befaf525c5c91be465488c14a447838976a8f16255fc9bff6090e"
}

View File

@ -0,0 +1,15 @@
{
"hash": "3547b7602c258ecbcc41add2a88ae7a17ba4ba652738665f2d251721f5ef5c10",
"size": 529,
"height": 357826,
"merkleroot": "e3006f819f8d63d775fb9768c79459020af2476378038582ae84b161a0ba0acc",
"tx": [
"9a5459ab695dc33350fe8cda07e0e7d0785a8ffd15ab15ce24e71b7c29032ac0",
"478e827fd064766f3ffd4a48d66436d3f5b4067d8f23eb61138e9d79b2b44c0e"
],
"time": 1559191908,
"nonce": 0,
"bits": "1c0bd7cd",
"previousblockhash": "7a6ca59966f6c64c63e3a7368e69d802bcce24bb4e52f4913e38440ec4511599",
"block": "08009a02991551c40e44383e91f4524ebb24cebc02d8698e36a7e3634cc6f66699a56c7acc0abaa061b184ae828503786347f20a025994c76897fb75d7638d9f816f00e36461ef5ccdd70b1c0000000002020000006461ef5c0001010000000000000000000000000000000000000000000000000000000000000000ffffffff0603c275050101ffffffff020000000000000000000000000000000000266a24aa21a9edcb3fd1d48b60833fd11082400a3b11380cd784beee5e34ec04edb76d9839212e0120000000000000000000000000000000000000000000000000000000000000000000000000020000006461ef5c0143c349bfe857bb2d6277d01f554d2e716420896d18a2468b4fb92dc608b73d860100000048473044022041f43ec86d25cbe2a1e768e20b3dfe1cb4268e12c2c128632a42fd87f308e2a302206f87ce19fe4b2c71f77cbe651cde44df12966b84367e7a3f20bd0bb586672ffa01ffffffff03000000000000000000041eb01e00000000232102a7a1a6520b42f10f17a9c5aae2b8f6e24c7d5c4cdef7d7abcfe54bf71ddec72facd410510b00000000232102a7a1a6520b42f10f17a9c5aae2b8f6e24c7d5c4cdef7d7abcfe54bf71ddec72fac0000000046304402203f9b16d59876626514326d01cad088cc3763cfa90172afd57b4c5716b8d63fa702203fb45c154c1b4158070d6bcdd62a14258d3e218663b298d5be2bafa0d03acdf3"
}

View File

@ -0,0 +1,16 @@
{
"hash": "596b9249ffd6c1dfb3cfa16d3ade2e0cb57342fed66b90e6444dc54cc99d0fc4",
"size": 445,
"height": 50000,
"merkleroot": "8db60d963dad433c8655021d291aafd731a6754aac9b69cf0dece61e5b3c1b75",
"tx": [
"364c947002496d3115379dd779e2ce54cf114fb6cca42bd3da4de1823e4f08cc",
"a09b658cbe21162bbeb1821e2caf1f0947f8cdd517b87f2232ba1150043790e1"
],
"time": 1526710352,
"nonce": 0,
"bits": "1a7b61c6",
"previousblockhash": "44530f8cc7040528199dc3a41a8b660bf2feb8e0015d5c787086655ac03cbca3",
"signature": "30440220710162172b283cca7007c72cbb848a52388518b8cfb7a6c906744f473afe312b022056fd92b06351ee7049f0857d688134449566dcb82d0f6a3cb36208832988cba3",
"block": "07000000a3bc3cc05a658670785c5d01e0b8fef20b668b1aa4c39d19280504c78c0f5344751b3c5b1ee6ec0dcf699bac4a75a631d7af1a291d0255863c43ad3d960db68d50c0ff5ac6617b1a00000000020100000050c0ff5a010000000000000000000000000000000000000000000000000000000000000000ffffffff040350c300ffffffff01000000000000000000000000000100000050c0ff5a017879028fc93da2d5ce18feee9f074086790d34c97ccfafe15a42b2423f3e28490200000049483045022100e3a092a079346179213d9b7079c9795e291883cb715e3efee656b9a981632432022002cafb6d6d497705f455b5d6a60bca38c88c26910101faef433ecfe80515e1ef01ffffffff03000000000000000000c07975296d000000232103c0c30d173c8478ceaaba836e8cae3c8c4e43f88f6d555600be124781b533956bacc07975296d000000232103c0c30d173c8478ceaaba836e8cae3c8c4e43f88f6d555600be124781b533956bac000000004630440220710162172b283cca7007c72cbb848a52388518b8cfb7a6c906744f473afe312b022056fd92b06351ee7049f0857d688134449566dcb82d0f6a3cb36208832988cba3"
}

View File

@ -0,0 +1,16 @@
{
"hash": "1e4447195f4259b313b2c56072f7000237828e659254d5bf55f2b91e443f124b",
"size": 401,
"height": 60000,
"merkleroot": "9cf808c8f0e0d62864edee37a27fc44114a8896a6ace0c4ac6434c58e0d450ef",
"tx": [
"b3e734e183f1b4f10cc3de258d02efbd73fc64577e9c5fc54f7d750b95b29aff",
"859a989109ee967e941808a43224463e181b51af8acc27ad05e2e4f7fdf45f81"
],
"time": 1537111488,
"nonce": 0,
"bits": "1b00ffff",
"previousblockhash": "4f57fffd01fabbf020ac9e2110b4de9e127c06ba19f83741a5cb26f3b0aa13fe",
"signature" : "304402201d89a82d54b81e3aa0de97875bb15a874fce3319c7baf1751d18620f905909dc02202fc3fe4c17ef43aaa62dab4a6e0c735751d75b8f66e2a81cbead52f744570769",
"block": "07000000fe13aab0f326cba54137f819ba067c129edeb410219eac20f0bbfa01fdff574fef50d4e0584c43c64a0cce6a6a89a81441c47fa237eeed6428d6e0f0c808f89cc0759e5bffff001b000000000201000000c0759e5b010000000000000000000000000000000000000000000000000000000000000000ffffffff040360ea00ffffffff010000000000000000000000000001000000c0759e5b0143723908791e72544c4796be9da581a40eae3ba54b71b596d167f6ab245b37d60100000049483045022100d11ebd7ac7d0dd94f22416a1b223cf91cf1a70de52b2cd502edc1121c4d5409302207e22f22add1dd96bd214ca0f8875e4f06d9688c27053db82ea0ae0e9676d575401ffffffff02000000000000000000c05469f60300000023210288e5256969a3a9fd4735e6b8c8f905b270564f2448658177faf4c990e5745c45ac0000000046304402201d89a82d54b81e3aa0de97875bb15a874fce3319c7baf1751d18620f905909dc02202fc3fe4c17ef43aaa62dab4a6e0c735751d75b8f66e2a81cbead52f744570769"
}

View File

@ -0,0 +1,14 @@
{
"hash": "000000000014475c6019e0702f7ed76a33215236cd5bce6620f66e4d6a4737ce",
"size": 223,
"height": 30000,
"merkleroot": "90150c9db0646dbb498e2389a002b6f31812103010a8cdfa22d766454c43d1fa",
"tx": [
"90150c9db0646dbb498e2389a002b6f31812103010a8cdfa22d766454c43d1fa"
],
"time": 1523881569,
"nonce": 2358339072,
"bits": "1b6eb5bc",
"previousblockhash": "0000000000679de3a18bc02ddb554a123d99fad96dbc6a97746eb16efddb8265",
"block": "000000206582dbfd6eb16e74976abc6dd9fa993d124a55db2dc08ba1e39d670000000000fad1434c4566d722facda81030101218f3b602a089238e49bb6d64b09d0c15906196d45abcb56e1b0066918c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff17023075046196d45a0881000be75c0300007969696d7000000000000200ca9a3b000000001976a914c82d962c67815f3df3c1dfa8ad485ac2c4a9aaa888ac00ca9a3b000000001976a91450dacc9eceebb94d12219fed9f7250822a2ae5b688ac00000000"
}

View File

@ -0,0 +1,14 @@
{
"hash": "2b1844e65e398d76f686caf43da644cb060f35db7b4142f25ad28cb400a0b99c",
"size": 205,
"height": 100000,
"merkleroot": "f204afdd7fac0522b02565a7adeff019c8f7feeb977e95a0b10da276cd3ce57a",
"tx": [
"f204afdd7fac0522b02565a7adeff019c8f7feeb977e95a0b10da276cd3ce57a"
],
"time": 1501400120,
"nonce": 3071148160,
"bits": "1e01a4c6",
"previousblockhash": "3c15e085c951d49230f06ba01f29db01d6fa8b65fce5b412cff39b06852288ba",
"block": "02000000ba882285069bf3cf12b4e5fc658bfad601db291fa06bf03092d451c985e0153c7ae53ccd76a20db1a0957e97ebfef7c819f0efada76525b02205ac7fddaf04f2388c7d59c6a4011e80000eb70101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2703a08601062f503253482f04388c7d59080ffffff9000000000d2f6e6f64655374726174756d2f000000000100e1f505000000001976a91447e1988902e3f0bff643c4a2a17262dbf06c81cd88ac00000000"
}

View File

@ -0,0 +1,14 @@
{
"hash": "80f2ff951df0dfd82cfb26c25699aa3b56700dd564e39744faf5bd5540631cb0",
"size": 416,
"height": 400000,
"merkleroot": "85246549a084ff10cffbff02b2e977c8c0e84275ee2a012b4b9f49ceb021c33d",
"tx": [
"85246549a084ff10cffbff02b2e977c8c0e84275ee2a012b4b9f49ceb021c33d"
],
"time": 1546823803,
"nonce": 1651216932,
"bits": "1d122121",
"previousblockhash": "388fce0759735e2a7eb5f437f302feb7c05ab07c7f3fafbccc5c25707738009c",
"block": "000000209c00387770255cccbcaf3f7f7cb05ac0b7fe02f337f4b57e2a5e735907ce8f383dc321b0ce499f4b2b012aee7542e8c0c877e9b202fffbcf10ff84a0496524857ba8325c2121121d24926b620101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5003801a06047ba8325c08fabe6d6d000000000000000000000000000000000000000000000000000000000000000001000000000000002800faa900000000112f456173794d696e652e6f6e6c696e652f0000000006801d2c04000000001976a914dda8644ebb6959b161812fb10b42e2a5888b1c8f88ac801d2c04000000001976a914f2ff0ecbb0907837aeaa93a22b60c40eac244ab788ac80969800000000001976a9145342c2ddb553cee1b6144ca17d9670f7d05d72d588ac80969800000000001976a914b0ce6adc1e0713b4927aa6daa22488fac9ff259988ac80969800000000001976a914682ac36e06f6cb60f4135a60a578150b989b0b9688ac80c3c901000000001976a9141bea0e3994eb788cd6c597b7b456e0f0111e2de888ac00000000"
}

View File

@ -0,0 +1,27 @@
{
"hash": "c4a16ed137cf312ffc8ec989dbcc230b179d1bc6d7e2369cff0c54e268bcb458",
"confirmations": 371494,
"size": 402,
"height": 400000,
"version": 6,
"merkleroot": "d96a352f4cab0e73b8ddc69633b5fb54c39c53f8d93b9e1fb4882132a68646a7",
"mint": 13.55584584,
"time": 1547766314,
"nonce": 0,
"bits": "1d03bed6",
"difficulty": 0.26698634,
"blocktrust": "44597bc6",
"chaintrust": "106a07cee0e24",
"previousblockhash": "3a045b39c2369f23a95b32b0ec2c30234fd307dc86795d8f9a295f85b1e6f632",
"nextblockhash": "24f1df989cbede25bbeef305a420b22b28840de7ee52122be0e3d56c6875b062",
"flags": "proof-of-stake",
"proofhash": "0000637026cfd1b97af9bcbc558cd577962057c99760dedbabd3f7b3ced909bb",
"entropybit": 0,
"modifier": "7e35682c0e03ff1b",
"modifierchecksum": "fa487772",
"tx": [
"7d37da7d2d20142d2323af4b556e2377faf4d86d2fc29ad307f983bfe4d5465c",
"36811b8de5a6fd1e2bf3a5f5e7157a5169d380345bb6b688c89ab07280faf79f"
],
"block": "0600000032f6e6b1855f299a8f5d7986dc07d34f23302cecb0325ba9239f36c2395b043aa74686a6322188b41f9e3bd9f8539cc354fbb53396c6ddb8730eab4c2f356ad92a0a415cd6be031d0000000002010000002a0a415c010000000000000000000000000000000000000000000000000000000000000000ffffffff0403801a06ffffffff0100000000000000000000000000010000002a0a415c016596bbccd731657b1a47d8570c2ae3ef3976c199d4784a8ca2dff1a3c3684a160100000049483045022100a1bc3acbbf446729e387baf13e5d58566b192178a43d2ccdf7ac0b5aa699fe9102206625909db2a437976689594a5b488a9137b8bb458e3bee5227a051917646c2a901ffffffff02000000000000000000d3f51b394600000023210394f136be999f1e1daca9198dbb61418a1d658cb6857aaaacf06e3e4f9a33fdd0ac00000000"
}

View File

@ -0,0 +1,27 @@
{
"hash": "c6224c2d994ed417be7a550b6201cf71a110c378df5dfe1a423b51deef276309",
"confirmations": 729491,
"size": 515,
"height": 42000,
"version": 6,
"merkleroot": "1118f54a5cedcda6cdd669d7ed337ddd418edcc9c8065ba06ec8b3473179c7d2",
"mint": 3.24335269,
"time": 1526472385,
"nonce": 0,
"bits": "1d154384",
"difficulty": 0.04702771,
"blocktrust": "c0a0e25",
"chaintrust": "21f0d6cef092",
"previousblockhash": "2407cc6fe5d1293808bc681771aa292b736ce7e142ad32d181101c18c23cbc89",
"nextblockhash": "e4a12df47a26484006ffdfc35d6db49cfe74626d57d580483b5a6e1ebf0ea307",
"flags": "proof-of-stake",
"proofhash": "000060fe229530c712d0fa9501a7212da3748b8a95c9e876f72d28ffe1dedae2",
"entropybit": 1,
"modifier": "f7a842c2194e61e3",
"modifierchecksum": "e20172af",
"tx": [
"761638848add367b5dd05b913be94af62e4b313f3a6f807803f174ed989b5912",
"2e9d00b06f1f18457b7ab45ec81e02e8fb01d33912dc65c05f5d871ffe981db6"
],
"block": "0600000089bc3cc2181c1081d132ad42e1e76c732b29aa711768bc083829d1e56fcc0724d2c7793147b3c86ea05b06c8c9dc8e41dd7d33edd769d6cda6cded5c4af51811c11efc5a8443151d000000000201000000c11efc5a010000000000000000000000000000000000000000000000000000000000000000ffffffff040310a400ffffffff010000000000000000000000000001000000c11efc5a026fedaa2a742acabb33a2c7c91954e22781f2140a3992beb109d5b3d48a804abe020000004847304402205910e4efd1b60edd9a5e6fdd4bc65dccb9b88019f1e2119861498fe3b3630aec0220692619c3f5c6b2d01c3af3d4835d9fe1d10556f587733a3ca436a30559e2e09c01ffffffff3bd3c8697d79143d6c35474c2de04e255499fa1bab71be1e03a320ad1225c5a30100000049483045022100d3eea15b5d3dcfd80eb021aa2446f98abf517af7dc74eef2e149ba6a8f7199ba02203b2e595e01beaa6f6bb4e15850ae9ff87fd6facebe65ef745618f3eaa02f849c01ffffffff020000000000000000006f2fa37322000000232103ebb76b9b604909b1af2ba737e31f54b9e394f160174f44de330cb7c08563b47eac00000000"
}

View File

@ -0,0 +1,16 @@
{
"hash": "d79ad638259f3837ab25769130a518238ef9b6b20653d98df96e602fd1c8ee98",
"size": 430,
"height": 50000,
"merkleroot": "c9361953258dc88686d1ca8136f16e84e3363bb4bf5fb5f008270b0994e74b9b",
"tx": [
"367d5902ddb72eb8a8a943322501b225b1dd3fc3bddbc301b76ec54d653971a7",
"299ef64c6891b4041c4b78159b419f32843ddd01e6a0bce643e8d9cb10075606"
],
"time": 1536213079,
"nonce": 0,
"bits": "1b1d9628",
"previousblockhash": "a8eaea0689418de02be93d1507a5a89a2fd8ab3b83aaadb548cd24303567b006",
"block": "0300000006b067353024cd48b5adaa833babd82f9aa8a507153de92be08d418906eaeaa89b4be794090b2708f0b55fbfb43b36e3846ef13681cad18686c88d25531936c957c0905b28961d1b000000000201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff060350c3000101ffffffff01000000000000000000000000000100000001dc1a9bcc0a7c5ae8b1241a6bcbe376fd83618763b9d03cabc3746f2344e9dcb40100000049483045022100f390507a07487a5c85ce6a77576b67eed72d86c60ad740dcd1ca418f46123bcf0220296358bc415152eb602849d613a3296a2013a4c645287d215d0a78355fd23ec901ffffffff0300000000000000000060dfa12224000000232102d0ab909607aec2711640fa6adb1665745e0e08b11ec5ad0336d2ec0106473837ac0046c323000000001976a914f605f5fd1c364d0ff189066bc991b83148e7460188ac00000000473045022100ca9a3654cfc3a4e5792d47e7726e29426737a599ea4077b80ec86b30782df49002206c07975a7ab2b393df45e33d40d913ccbecfc5958429e0158ac4ddadc1c460e0"
}

View File

@ -0,0 +1,19 @@
{
"hash": "769f485854697c20088703e205017b96853f1a0bfbd288110bed45d39a3a60b4",
"size": 435,
"height": 341500,
"version": 7,
"merkleroot": "abb3cb9914b4f2737af1f2e0cf07710779798ef4d33bc1d68ca29de1eeae37a0",
"tx": [
"7ad425173d5d06b7311768a5e92df028afc042134a820299aeebf53c6f5abbab",
"c2f911bdb2c54babed5246cdc158d1ae4476bb56c63f4002600d28d1b6b1c115"
],
"time": 1536377872,
"nonce": 0,
"bits": "1a52cc7e",
"difficulty": 202623.232278631,
"chainwork": "00000000000000000000000000000000000000000000000f50d15e43b4b0e049",
"previousblockhash": "390f9714894676d115c7c628e51c4e19c9ed5e1c21731e8a55b47df28734a6e2",
"nextblockhash": "57768c45fc2b5cec2061188bcc5f7a6c310a364827bd2652aa3238b88afc3804",
"block": "07000000e2a63487f27db4558a1e73211c5eedc9194e1ce528c6c715d176468914970f39a037aeeee19da28cd6c13bd3f48e7979077107cfe0f2f17a73f2b41499cbb3ab1044935b7ecc521a0000000002010000001044935b010000000000000000000000000000000000000000000000000000000000000000ffffffff0403fc3505ffffffff0100000000000000000000000000010000001044935b015a41fb10e592107c8771c48e8953892a1e9712577aaf2c4d2587e00e50adc4c30100000049483045022100d3f39b0a909fb466999774eb266197229cdae8a38148a2ca17fe8bd86b77099802205f10346461b1b388308f763500bcb447451b80f41bcb9f8b8912723ee9096c2001ffffffff0300000000000000000000001c70af000000232102cdd566a2aeb6ba7bcf10be34b06e7abc4a6c1caff37ca091a7c4c2813a1be331ac00bca065010000001976a91413bbc57f041c647891de55f9cab25a943ce20b5588ac0000000046304402203a8768e5f32a1a2c57b97a04dfdc6a78a1378336f2f1bd1e0c0aab1f0ed915ee0220435d11dedb8c55e9014e29c433d0afbf6128348ba829dae49abc3ab8ec5b48e5"
}

View File

@ -0,0 +1,19 @@
{
"hash": "6bf18c06e6aef68b04b2768867ddf8b227449ad0bc596070f00efeb5dbc67022",
"size": 429,
"height": 710000,
"version": 9,
"merkleroot": "d6ec61592540184d9bd10b676ed8f7e3faa54c028a9a6f96c0e5549e208e5312",
"tx": [
"124fdf0bbc71b0f46516223298b86cbb366dd8b209509fb53e9c63a66a9af4d1",
"2beacd9f483e5cea74da90d57a8e061cd63ca82ce277926feda7026546fe008c"
],
"time": 1559855502,
"nonce": 0,
"bits": "1d251585",
"difficulty": 0.02696535177573847,
"chainwork": "000000000000000000000000000000000000000000000019c39c3ca805670cd5",
"previousblockhash": "f2f114a7e86bc4206c4da4bcc21aebe2f31798362720b9a6c017d166479d6a51",
"nextblockhash": "61ccc10b583c9b23091398b24877ae2eafccc78ef88967ed12a75ccd49c00814",
"block": "09000000516a9d4766d117c0a6b92027369817f3e2eb1ac2bca44d6c20c46be8a714f1f212538e209e54e5c0966f9a8a024ca5fae3f7d86e670bd19b4d1840255961ecd68e81f95c8515251d000000000203000000010000000000000000000000000000000000000000000000000000000000000000ffffffff060370d50a0101ffffffff010000000000000000000000000003000000014ff94bb3f4d3e464dc7a2b4eed036880dcbb0115cc065bece3433b36b3dc63ff01000000494830450221009a56fa7cb74211484637ce89725b91cca1ebadd4a93c607ee28d3b071be13e9b022077bc00625fee9e7b0578ab966564c03840f19748bf340fc8a67c340d8803a5d001ffffffff030000000000000000009981371b4b00000023210259cd61a5962fd375ccac514fcf976794fb86656f3ed343b5a28674d8a68855c2ac0008af2f000000001976a914b61d9e80b3ca66a79c867030a18f2f22a0ab24f988ac0000000046304402207fd3bd8bd350ed1819fcc4a045d90eb0e8b1f75a031524fd02627063736a4899022075a81334973d1794218f2a5547794b94a30eca136d301a51510db87cdff3b368"
}

View File

@ -0,0 +1,20 @@
{
"hash": "6733a167600d75c51e4e1c86aca95e5cb2c7b5a545f4027906dfda890e426413",
"confirmations": 313490,
"height": 1337,
"version": 3,
"merkleroot": "82428534b45d72fb714efc68201805a4108e438f344a21d73e270a19515be161",
"time": 1538189461,
"nonce": 0,
"bits": "1a0db9f7",
"difficulty": 1222238.113064503,
"chainwork": "000000000000000000000000000000000000000000000000264ff2b7dbef2c2f",
"acc_checkpoint": "0000000000000000000000000000000000000000000000000000000000000000",
"previousblockhash": "8124baf13a48e7f9336db961d0b59b2021cf95138b3068f6c22b689cbe265c83",
"nextblockhash": "28dd5e9887f029a46e1bf12100403c698087747ff1fbb7cf1f5802b4e28cb982",
"tx": [
"bf927d12784d64dfadb8ccd27124abe0c5c4978cef1dc84187285e7bca05d282",
"8663461f3adaa54a2438ef83198ad31f34747f9e037ca89351e88c9dd9e7c3e3"
],
"block": "03000000835c26be9c682bc2f668308b1395cf21209bb5d061b96d33f9e7483af1ba248161e15b51190a273ed7214a348f438e10a405182068fc4e71fb725db43485428295e8ae5bf7b90d1a000000000201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff050239050101ffffffff010000000000000000000000000001000000010b0e46907b49b0794767633f59e584719d81aead72d87a26bbbae595cc04f88b0100000049483045022100fff2f7b2b4abad935f5a5731dd4b3363e658d3bcf433263e7ec813313dc6230102207772df5639a8cfad622ba8d54f7aa22ecaff88bc360222fc6e6d2ebaa9f9941401ffffffff040000000000000000000028babda0050000232103c16d7c1523d9f36578e9e631f97b7a39e5282ccd6374d98b2aacf1dd4fe984a6ac42f2f6aba0050000232103c16d7c1523d9f36578e9e631f97b7a39e5282ccd6374d98b2aacf1dd4fe984a6ac00a3e111000000001976a91492150f863991f5a465a0a3a06caebd2787a260a588ac00000000473045022100d478732e49ab4f273c687bc24f6a646d8aad6877b0948a24f9030d7683b028d502207f8933b6de4604d2d702a165eaca9ede81933ca1185c00c6dd365e074935a741"
}

View File

@ -0,0 +1,20 @@
{
"hash": "7bc09de55cd8334f6f167829753f043ac1afa4eddaf5898dc1a14b46ace947f0",
"confirmations": 144827,
"height": 170000,
"version": 4,
"merkleroot": "e60ecea79426c869bea8d064465dd9cc9e6b426bdfb632752e3de3c352272f27",
"time": 1548246463,
"nonce": 0,
"bits": "1a2865aa",
"difficulty": 415300.8388247479,
"chainwork": "00000000000000000000000000000000000000000000001371001c2972478ec1",
"acc_checkpoint": "0000000000000000000000000000000000000000000000000000000000000000",
"previousblockhash": "2cc1019efc9b89475c0f2a72d1418c4387cfd8b9da5548d819740b0a9843568d",
"nextblockhash": "cce7302d617f9a490104e70098f8e6015a46499516b25bf6fbd54c4edbefb808",
"tx": [
"77f183818333fc317122c448151f5d11b7f621151130ce02bf6acd1f6ccbadd6",
"39436b3d686eb2be88c89e4edb66fa853a3d49f1ded40d2cd156b0e43c2e8e82"
],
"block": "040000008d5643980a0b7419d84855dab9d8cf87438c41d1722a0f5c47899bfc9e01c12c272f2752c3e33d2e7532b6df6b426b9eccd95d4664d0a8be69c82694a7ce0ee6bf5d485caa65281a0000000000000000000000000000000000000000000000000000000000000000000000000201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff06031098020101ffffffff0100000000000000000000000000010000000102b5c06f8f38c2139a5e51b15fcc8ce4685523b4549b4c3528483fad0c865b87030000006b4830450221008374ad472e099de0cc627f2d6ea2e9ffe878ad41bc1209ebe8e942eb516dc9dc022070968c94f60d55902a64bb3705502eb0ed591cf090785ad44061ba4a987db0ab012103cdca2f4c4d26ce23d543caf2d14426e4cd03878bd29c3637d7f181e3fe1ffc25ffffffff03000000000000000000366f6a6701000000232103cdca2f4c4d26ce23d543caf2d14426e4cd03878bd29c3637d7f181e3fe1ffc25ac009aad4e000000001976a9141ec10816853643a3b3cbbb8189d0bfbea75304bf88ac00000000473045022100b8aa4d4fde2452268ba06a0bf7be76e48007ec439ce97a6647ff10a293d2f54a02202c21ee5eefa4d078eae5d6a16ce29debfce274ed4f9cd38feab80821a6ec83fc"
}

View File

@ -0,0 +1,18 @@
{
"hash": "00000000000502760fe120b5be6f315513af417cd7942ce4f760399d7fe37707",
"confirmations": 17763,
"size": 212,
"height": 300000,
"version": 2,
"merkleroot": "83587626c5485313158092719facfa72ae1c2a65d9666ad1ab31b2545d496d10",
"tx": [
"83587626c5485313158092719facfa72ae1c2a65d9666ad1ab31b2545d496d10"
],
"time": 1547896171,
"nonce": 1025707132,
"bits": "1b057450",
"difficulty": 12015.1900094,
"previousblockhash": "000000000003f3b11ace3464840836fc361ac91bad3ef9395024cf4c846cad2d",
"nextblockhash": "0000000000045d9259a114088079b005518bb23a72a2f2802348ddb382726459",
"block": "020000002dad6c844ccf245039f93ead1bc91a36fc3608846434ce1ab1f3030000000000106d495d54b231abd16a66d9652a1cae72faac9f71928015135348c5267658836b05435c5074051b7c0c233d0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2e03e09304062f503253482f049805435c08581eb2a333010000142f70726f68617368696e672e636f6d5a0307002f000000000100bca065010000001976a9149f7932e1c8fcef3251869121da7351921e7671dc88ac00000000"
}

View File

@ -0,0 +1,25 @@
{
"hash" : "c30fd1d91e93a2ede890082f2bebdc9ff3f98bedbd3f2af93c24e78d5f02ae71",
"confirmations" : 9303,
"size" : 447,
"height" : 400000,
"version" : 2,
"merkleroot" : "c8f7fa5334c7b0eb5109657c49c175fd5dd2917eb964e08aae320a2a0569aafd",
"time" : 1543222503,
"nonce" : 0,
"bits" : "1c209b13",
"difficulty" : 7.85125402,
"mint" : 0.17000000,
"previousblockhash" : "c2386977e86edce265e16dee971e16182d30276dcbe4f8d3952de2f7604205e8",
"nextblockhash" : "0b44da48e6c67204a8ba70287661aaeb2a701860db6cbdbc524383a1ff5f69c3",
"flags" : "proof-of-stake",
"proofhash" : "00000017751b95cb61a50e42edd4199222b35bac524cda142e1f21fa5ab2bb81",
"entropybit" : 1,
"modifier" : "0a1b9dcd55c93fad",
"modifierchecksum" : "88a78bdd",
"tx" : [
"b43a373255449745a771fa606b56b6b10c7aaca6e2804bb7a30fc7717b80cc73",
"ffcf0b421c3fe791a1baf068deb538a508950231812905e2110f12727f780a21"
],
"block": "02000000e8054260f7e22d95d3f8e4cb6d27302d18161e97ee6de165e2dc6ee8776938c2fdaa69052a0a32ae8ae064b97e91d25dfd75c1497c650951ebb0c73453faf7c8e7b4fb5b139b201c000000000201000000e7b4fb5b010000000000000000000000000000000000000000000000000000000000000000ffffffff0e03801a06026f01062f503253482fffffffff010000000000000000000000000001000000e7b4fb5b011565b918218df4f074319bafafc94ec0ff50e6f9f681143ccb2f429f1a85e1c2000000006c493046022100acbc9716921186723bf778b9ff7d8a4e252275674e2735cd3b02c272dbe678b8022100a7b4170b8c3707101bfd82a7fa7c16436740df0803ab6a9024f82d09b0d98a8e0121029fb0c1b71e83ff07732d0847133e46e4557bc95f1b670ea598f1dccfe70b1312ffffffff0200000000000000000000050802000000002321029fb0c1b71e83ff07732d0847133e46e4557bc95f1b670ea598f1dccfe70b1312ac00000000473045022018245fa0e09bbe08696724016243fac0de694bb61a841655a48598d32070a42d022100fa118a1a8d179789f852f9517ac5ab06dfa72475b4cc0527a6972a142add80f4"
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,25 @@
{
"hash": "f0e6edf02c1fda098144756cf466936977a553fa18739325cd4af36d489e8632",
"confirmations": 380792,
"size": 457,
"height": 1337,
"version": 1,
"merkleroot": "df17fad72c04727b88e748f087dfcf6aab817f9bd248a38bd88bb967c104cada",
"time": 1346638335,
"nonce": 0,
"bits": "1d05789d",
"difficulty": 0.18277429,
"mint": 0.07,
"previousblockhash": "2b7fd3d7e53bdf8bbe5d8241e313458fb7a2a65dfaa8ea2a03710709c9e60d4e",
"nextblockhash": "f47c998cfc6f7089665b0be9b42c00ec83442c5059beb5490abf16a81f3827fd",
"flags": "proof-of-stake",
"proofhash": "0000371d6be77a6e38732ebe0b388017c1d29bc7843227d6244b7b837dd7d45d",
"entropybit": 1,
"modifier": "a7dde56766970857",
"modifierchecksum": "875223e4",
"tx": [
"f7214afc86d37d29d8cc4f9d040d6605512fbf3ac21bc1a6f32609dfa7b0f7da",
"575eb33679da3981cfb5ed35f48a02a0b6492ceb30dc75ccc307e0e8ee1d3ca1"
],
"block": "010000004e0de6c9090771032aeaa8fa5da6a2b78f4513e341825dbe8bdf3be5d7d37f2bdaca04c167b98bd88ba348d29b7f81ab6acfdf87f048e7887b72042cd7fa17dfff1144509d78051d000000000201000000ff114450010000000000000000000000000000000000000000000000000000000000000000ffffffff0f04ff114450029701062f503253482fffffffff010000000000000000000000000001000000ff1144500115f754e065609bf45b0ff796b3be0e4b91357edebae2f3d1a3ced332b6c52dd10100000049483045022001f3652da344fa1f25a4da28aa0b5f727f58c5182a42931a6433d1995d089ba9022100b9420da34863c0c7ed27c6bdc916c045485a0d2ad3049cb989adda728c4f6d7d01ffffffff03000000000000000000a097223c00000000232102ed1b78f2dd015603be7d7a108ea26c1e9535f98a4e3c8ddd7dc6291e378009b0aca097223c00000000232102ed1b78f2dd015603be7d7a108ea26c1e9535f98a4e3c8ddd7dc6291e378009b0ac000000004730450220097e783d6d92f4882ce58bebb7094d1420abd8669183f862bb4c3978cf0589e00221009f2598fdc627711431dbcbc732e043037b052a32e00edfd6bee511bcd2b9e76f"
}

View File

@ -0,0 +1,25 @@
{
"hash": "305970c9387e4adde8dee27ed50b6207d738b7578f4f1f481cee23a727a76172",
"confirmations": 72129,
"size": 456,
"height": 310000,
"version": 2,
"merkleroot": "87a298a3e1890c08948727e8ce4a6367e77d0ab7e5ff3b1875fe607c03992c9e",
"time": 1512575032,
"nonce": 0,
"bits": "1c0ab99a",
"difficulty": 23.86908607,
"mint": 0.46,
"previousblockhash": "7d25ff5e201e2951ff9b4d877c2a218ea0df17c4dd47abf2978b41fc0b890862",
"nextblockhash": "bf09ac2b219f3784e8fe1d4f65b1c77d8cb4bff0bacab9677436346c14f524ea",
"flags": "proof-of-stake",
"proofhash": "00000251ba021240b5ffb2d918d8475b19f368962e28350155d66b6f3a04fb25",
"entropybit": 0,
"modifier": "006d50c1cdf9fafd",
"modifierchecksum": "68f95e6a",
"tx": [
"25f0a69c9e7cd99eb6be21e3152447cf5a48d71903e3b15da01118191b9ec764",
"549d76e88e06c3e0e02058d01f543a6da5ac7b8d34520ffad1104829432944c1"
],
"block": "020000006208890bfc418b97f2ab47ddc417dfa08e212a7c874d9bff51291e205eff257d9e2c99037c60fe75183bffe5b70a7de767634acee8278794080c89e1a398a2873810285a9ab90a1c0000000002010000003810285a010000000000000000000000000000000000000000000000000000000000000000ffffffff0e03f0ba04024a01062f503253482fffffffff0100000000000000000000000000010000003810285a0132e413fa8effb7c4a99e89f6a62cc1a4ea3424ee78a391b117b047c57f40de690100000049483045022100b45fc1471ca229fd692e4b96a67e8e25ab7858f8c445c1cf92e362758f7e23d402203778a10d47ba33a94cb89b0fde0d9883d5cca8ca25456288bd59026c0c900c9101ffffffff0300000000000000000060765925000000002321035df758c3d2f0d60efc48b6c3a08e0aea265861c355ead942705503258307c698ac709d5925000000002321035df758c3d2f0d60efc48b6c3a08e0aea265861c355ead942705503258307c698ac0000000047304502205361a4b0d0937b2904078ba643d0ca106691c17b5c700045f1b658bb4311dd6e022100e96082cf51d2a19c3b24c44044f75dc796d5770690b2cba24de44ef16de62684"
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,21 @@
{
"hash": "00000006580060522a1361e79685fe2a85e917de1089fe5743ab1ee35407d789",
"size": 281,
"weight": 1016,
"height": 200000,
"version": 805306368,
"versionHex": "30000000",
"merkleroot": "cb1a8b42f809dc5b443608b8056734653f619652af3012c0827372bcd7808954",
"tx": [
"cb1a8b42f809dc5b443608b8056734653f619652af3012c0827372bcd7808954"
],
"time": 1550634944,
"mediantime": 1550634620,
"nonce": 1933676035,
"bits": "1d0d67c7",
"difficulty": 0.0745957759224163,
"chainwork": "00000000000000000000000000000000000000000000000000001c4a9cd47c3c",
"previousblockhash": "0000000bf80298a54a30bc2cabc55b1940696047577cfb87e5af1b942cfc421d",
"nextblockhash": "000000034d3fee7aac4a6aa33f4cff189f9e6836f0649b048d0cb953e045dd43",
"block": "000000301d42fc2c941bafe587fb7c5747606940195bc5ab2cbc304aa59802f80b000000548980d7bc727382c01230af5296613f65346705b80836445bdc09f8428b1acbc0cf6c5cc7670d1d038e417301010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff2003400d0304c0cf6c5c08ef47c347000000000d2f6e6f64655374726174756d2f00000000020000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf90088526a740000001976a914eb0ff2678a9e5f1d2c0e8bd4812dc46043c669fc88ac0120000000000000000000000000000000000000000000000000000000000000000000000000"
}

View File

@ -0,0 +1,23 @@
{
"hash": "000000049aabfb33d6767b02942f167506604c18ee84a71964bfbee4874e2e94",
"confirmations": 405,
"strippedsize": 245,
"size": 281,
"weight": 1016,
"height": 424000,
"version": 1342177280,
"versionHex": "50000000",
"merkleroot": "7ad0d336ba84e1df5e4b4d5166e176ac8ef3993c1fa4f49b9584a3135d6bcc7a",
"tx": [
"7ad0d336ba84e1df5e4b4d5166e176ac8ef3993c1fa4f49b9584a3135d6bcc7a"
],
"time": 1568942728,
"mediantime": 1568942609,
"nonce": 2208067584,
"bits": "1d05a3a5",
"difficulty": 0.1773262657174631,
"chainwork": "000000000000000000000000000000000000000000000000000034d496753368",
"previousblockhash": "00000003a5a7200c799a131ec0fe39d1484f58473f0c1b911540f2bf6e9bbe2f",
"nextblockhash": "00000002654f6bae0e602eb32900586ffd2035c0331c50e862ecdee053953c1a",
"block": "000000502fbe9b6ebff24015911b0c3f47584f48d139fec01e139a790c20a7a5030000007acc6b5d13a384959bf4a41f3c99f38eac76e166514d4b5edfe184ba36d3d07a882a845da5a3051d00709c8301010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff200340780604882a845d08d6c61088010000000d2f6e6f64655374726174756d2f00000000020000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf90088526a740000001976a91467f8037a52d10988b41989432ccf5a096600b1dd88ac0120000000000000000000000000000000000000000000000000000000000000000000000000"
}

View File

@ -0,0 +1,23 @@
{
"hash": "0000000000fa8ca8bbc8d2a5c43221bbbd23868d128fb6f942ea79f4fb8fb3f7",
"confirmations": 1,
"strippedsize": 279,
"size": 315,
"weight": 1152,
"height": 296039,
"version": 805306369,
"versionHex": "30000001",
"merkleroot": "e428c003c3d8660cabb891c1da141283a1a0045353786a7e7c809ef7fc5ff77b",
"tx": [
"e428c003c3d8660cabb891c1da141283a1a0045353786a7e7c809ef7fc5ff77b"
],
"time": 1561402552,
"mediantime": 1561402193,
"nonce": 3796686866,
"bits": "1c022450",
"difficulty": 119.5212584064744,
"chainwork": "0000000000000000000000000000000000000000000000000842199d62c03de2",
"previousblockhash": "00000000007dc528b4381ca868349627fd9e6d89f71607ab075f572448ed4a49",
"nextblockhash": "0000000001875011ab354686523fe37786d417bf53efd7b09448bed928a74bd6",
"block": "01000030494aed4824575f07ab0716f7896d9efd27963468a81c38b428c57d00000000007bf75ffcf79e807c7e6a78535304a0a1831214dac191b8ab0c66d8c303c028e4b81c115d5024021c12d84ce20101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff200367840404b81c115d08602a016b1cb500000d2f6e6f64655374726174756d2f00000000030000000000000000266a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf9001edc0c170000001976a914e6802e3c73096ca0baf6166e3b7ed2427984e8fd88ac8033023b000000001976a91464f64fb46815dbabaa57f61aca598eb93217ae7a88ac00000000"
}

View File

@ -0,0 +1,19 @@
{
"hash": "a8212fbda825a42ecd3a0d1251437626fbde53afc1ea4eea76d05b4898718a0f",
"size": 441,
"height": 1040000,
"version": 536870912,
"merkleroot": "a72e4d38041df2065a1d8523943e0e0aeb7a0e15d931feb4ec2e2525d9988f8d",
"tx": [
"9d238b6afb4a3c6767e510d6278934169292dd2589bfea05bea509c27f4055bd",
"9a02f1240a1347091b3d530fac4f889a49a7d3c85ebbb80509f13c71acd2e11c"
],
"time": 1574635768,
"nonce": 0,
"bits": "1a032448",
"difficulty": 5340049.590861406,
"chainwork": "000000000000000000000000000000000000000000000c88128735f6b6caae3f",
"previousblockhash": "e0cf7f7201fdcc1b5f30f57a64cb8be9889124bd230eca349c18b3c6a2f73c4b",
"nextblockhash": "d3eb14aa9b8eec45b059ec189be270c0b8d58943e76a296eea18745fe01d843c",
"block": "000000204b3cf7a2c6b3189c34ca0e23bd249188e98bcb647af5305f1bccfd01727fcfe08d8f98d925252eecb4fe31d9150e7aeb0a0e3e9423851d5a06f21d04384d2ea7f808db5d4824031a000000000203000000010000000000000000000000000000000000000000000000000000000000000000ffffffff070380de0f02c000ffffffff0100000000000000000000000000030000000100da81675dca6b9766faec59cc99e69cce6c4ddc9dc6afa8fa7f2b7406a18f0001000000494830450221009b812b2030b859645a58cd9836bcff268626ca4d5f978304764593403698eca5022062d7b0d2173b80aa52a3eb69ba75e38c0ff5d7d575ee626d8f47bc76b1383fed01ffffffff030000000000000000005ee3b14d235d00002321028ac5d9ed13c61f9fb986c3038d783f6b15da502f341172e40188d536a12143c1ac525394b204000000232103a4f6c7336909f19028383b1ca702eb1eaa0e25605925d834123290099719cb3fac00000000473045022100d65f72c17e8c66a3693aad7e1121f62fd85ccce4704fcccf5084a3d837e6175f022010e84847b6b10b728bc070ff046c571dfe14df274d4604087fd7248870e0d309"
}

View File

@ -0,0 +1,18 @@
{
"hash": "02709ff59f09b08684bdab6000b7ad134f1d54ac4079202c94173378fabc44e7",
"size": 204,
"height": 400000,
"version": 7,
"merkleroot": "500065fdf911655da912e4da08bcd364d30e28f28806a05fb6719f0189611e85",
"tx": [
"500065fdf911655da912e4da08bcd364d30e28f28806a05fb6719f0189611e85"
],
"time": 1535894357,
"nonce": 165549067,
"bits": "1c11baaf",
"difficulty": 14.43920878076741,
"chainwork": "00000000000000000000000000000000000000000000092b98eeae51bff1a231",
"previousblockhash": "d36ab89ca028f18f7847ebdb6e5fbe9adbcccb2cadbd4cdb0eb3586f1ea1ca2f",
"nextblockhash": "b3a08ffcb11a7b5a7c1226c1a4a291a1fe49e39e75a0646aeb6d67146e059e4a",
"block": "070000002fcaa11e6f58b30edb4cbdad2ccbccdb9abe5f6edbeb47788ff128a09cb86ad3851e6189019f71b65fa00688f2280ed364d3bc08dae412a95d6511f9fd65005055e38b5bafba111c0b14de09010200000055e38b5b010000000000000000000000000000000000000000000000000000000000000000ffffffff1803801a060455e38b5b08810000011e0000007969696d7000000000000100e40b54020000002321035bb9cfdf931fb739df296058de9e27596f8051ebc373422eae8c10fdbd68284eac00000000"
}

View File

@ -0,0 +1,14 @@
{
"hash": "00000000020ff055aa61a4d11d1d66730d7e0534dc8570dc2d40c45b675b2582",
"size": 196,
"height": 1000,
"merkleroot": "665fabda52881b52894d5b30feead9755d0679ee8fa832f26a82bba039c25745",
"tx": [
"665fabda52881b52894d5b30feead9755d0679ee8fa832f26a82bba039c25745"
],
"time": 1514021201,
"nonce": 3752529043,
"bits": "1c027039",
"previousblockhash": "00000000022df0e0facb9d2c6f64636cf1d8452ccfd8d3243fa83caa8a6d9259",
"block": "0000002059926d8aaa3ca83f24d3d8cf2c45d8f16c63646f2c9dcbfae0f02d02000000004557c239a0bb826af232a88fee79065d75d9eafe305b4d89521b8852daab5f6651213e5a3970021c930cabdf0102000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e02e8030451213e5a0881000638d2000000416c744d696e65722e4e657400000000000100d2496b000000001976a9143d619fd153f2872791be572f222c0b896517f91288ac00000000"
}

View File

@ -0,0 +1,14 @@
{
"hash": "d10fb9c30342acd0af365faee684f39c7ea08387732ad4f5569a56cd5ec06b1d",
"size": 475,
"height": 88888,
"merkleroot": "bd91e953702e9ee14b231a4d5ba3b8ce4c492a27815c8ea372f3e477623bca43",
"tx": [
"bd91e953702e9ee14b231a4d5ba3b8ce4c492a27815c8ea372f3e477623bca43"
],
"time": 1426508828,
"nonce": 0,
"bits": "1b352f80",
"previousblockhash": "109a8dcbf8688bf36dba6f78019ddc1969298116020e87f816a823107b91095d",
"block": "02099b005d09917b1023a816f8870e021681296919dc9d01786fba6df38b68f8cb8d9a1043ca3b6277e4f372a38e5c81272a494cceb8a35b4d1a234be19e2e7053e991bd1ccc0655802f351b000000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0d03385b010101062f503253482fffffffff01a0aaff250200000023210267c1c03b12eee9de742d97828388e867a9b9eea60ca53c05f573ded072846206ac00000000"
}

View File

@ -0,0 +1,31 @@
{
"hash": "c53c371f253426f9e582e3de9d9f1f03e5e40e771aa96953c502ebe2222bb013",
"confirmations": 630231,
"strippedsize": 582,
"size": 618,
"weight": 2364,
"height": 400021,
"version": 536870912,
"versionHex": "20000000",
"merkleroot": "28e89aa0dbc0f1cacccb3ab402d7891abf636165b2cfd1b018e3b675468d8855",
"tx": [
"43b8bea316ac7df3eeadd3b941808b100d66d424464af2dfb211773cd8dc0752",
"4d581281a17e679df9f109e0b0e7c59efacfe50860a733d1f06a23fa08540b6d"
],
"time": 1544070336,
"mediantime": 1544070221,
"nonce": 0,
"chainwork": "00000000000000000000000000000000000000000011cc010c812da27182ad31",
"nTx": 2,
"powdata": {
"algo": "neoscrypt",
"mergemined": false,
"bits": "1d016293",
"difficulty": 0.7219816901873947,
"fakeheader": "00000000000000000000000000000000000000000000000000000000000000000000000013b02b22e2eb02c55369a91a770ee4e5031f9f9ddee382e5f92634251f373cc50000000000000000cd8740e3"
},
"rngseed": "ae81fb25abb6328512e52af04b7e36e03ed545061ad000e6b07ed86e08551cb1",
"previousblockhash": "ab66782b9aa7edd86912bde739b7954517ef82907e7df2b910c6720b69c197fb",
"nextblockhash": "adfaac85095ab7f6c565a4c7af76229abcfd07fb0a0199383c3d65b5294f471a",
"block": "00000020fb97c1690b72c610b9f27d7e9082ef174595b739e7bd1269d8eda79a2b7866ab55888d4675b6e318b0d1cfb2656163bf1a89d702b43acbcccaf1c0dba09ae828c0a4085c0000000000000000029362011d00000000000000000000000000000000000000000000000000000000000000000000000013b02b22e2eb02c55369a91a770ee4e5031f9f9ddee382e5f92634251f373cc50000000000000000cd8740e302020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff0603951a060101ffffffff02404af6050000000023210259b332f71852584ffcbb7701e76721adf988ba12cdbe1091b7cfd7b36ec93869ac0000000000000000266a24aa21a9edb0c34f0b713a6807ef08628f5db0c5c0037c39b2f6d208b5e08da25b13cfb2e501200000000000000000000000000000000000000000000000000000000000000000000000000200000001ebd8995be8b4ebc8ce2f082bf577818a5abf667afd09f5dcafd1c8790a63b391010000006a4730440220442743b7fc92e9904d3910cc61f5077a787b78f763b9efbe56c9306cbb6e2a77022004fae47d54cb45728894c511611cb66513cf01a04471a4b53821ba4cf386f19901210319f13957ea073a98708f09531a10d0f4e4f42a82102c4fead7c69ba4b4974e6bfeffffff0240420f0000000000445108702f444f474341541e7b2267223a7b2274667472223a5b7b226f223a332c226d223a317d5d7d7d6d7576a91452df4afbfbc31db12150f6e2038fa43c55aeafe988ac04f7fc03000000001976a914711acc7fc374161611ac25fa4f3d264940bcbdd288ac931a0600"
}

View File

@ -0,0 +1,103 @@
{
"hash": "948183fa76c2a4466bb32a06cc4f4f0afe3894d524dd07a963b3836c092f3acd",
"confirmations": 930248,
"strippedsize": 1567,
"size": 1603,
"weight": 6304,
"height": 100000,
"version": 536870912,
"versionHex": "20000000",
"merkleroot": "bd2b4a5a8f1f8085bab936f2f20f82e5928a0bbd573be780fec14d8b4fc4e922",
"tx": [
"e16a22c4dfd99c639b60914ce75e3a7847fed7c1f3f5f2dd565ff48d6395a116",
"9854b65e59dca27649a3268d0feee2d55036e26cc49038f67aae000dd0126b48"
],
"time": 1534624527,
"mediantime": 1534624309,
"nonce": 0,
"chainwork": "00000000000000000000000000000000000000000003fbc9da84e345e04f5136",
"nTx": 2,
"powdata": {
"algo": "sha256d",
"mergemined": true,
"bits": "18347258",
"difficulty": 20964060431.07306,
"auxpow": {
"tx": {
"hex": "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff64033c33082cfabe6d6dfe0718afd8c0651b58f14378a128efde085d91bcfa2790de57ec862ad420d90708000000f09f909f000f4d696e6564206279207374636f646500000000000000000000000000000000000000000000000000000000000000000000ff3b9a010320983b4b000000001976a914c825a1ecf2a6830c4401620c3a16f1995057c2ab88ac00000000000000002f6a24aa21a9ed98a08f4c63b7cad2f139005520cf5998e1fa2ad8d53bbad87e0868d8c509149308000000000000000000000000000000002c6a4c2952534b424c4f434b3a88b841b4ba9c30eff483b55f8a520403afac9e2050a769129a76620c51e633615b9cd447",
"txid": "54bfbec36774378ce65ab829b00d45bb4d734a9220932f5514d8b4e1992e8dfc",
"hash": "54bfbec36774378ce65ab829b00d45bb4d734a9220932f5514d8b4e1992e8dfc",
"version": 1,
"size": 294,
"vsize": 294,
"weight": 1176,
"locktime": 1205115995,
"vin": [
{
"coinbase": "033c33082cfabe6d6dfe0718afd8c0651b58f14378a128efde085d91bcfa2790de57ec862ad420d90708000000f09f909f000f4d696e6564206279207374636f646500000000000000000000000000000000000000000000000000000000000000000000",
"sequence": 26885119
}
],
"vout": [
{
"value": 12.62196768,
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 c825a1ecf2a6830c4401620c3a16f1995057c2ab OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914c825a1ecf2a6830c4401620c3a16f1995057c2ab88ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"CaiAoAHC4kYmGJnNdcJWP8EFDE42Ji7XiS"
]
}
},
{
"value": 0.00000000,
"n": 1,
"scriptPubKey": {
"asm": "OP_RETURN aa21a9ed98a08f4c63b7cad2f139005520cf5998e1fa2ad8d53bbad87e0868d8c5091493 0000000000000000",
"hex": "6a24aa21a9ed98a08f4c63b7cad2f139005520cf5998e1fa2ad8d53bbad87e0868d8c5091493080000000000000000",
"type": "nulldata"
}
},
{
"value": 0.00000000,
"n": 2,
"scriptPubKey": {
"asm": "OP_RETURN 52534b424c4f434b3a88b841b4ba9c30eff483b55f8a520403afac9e2050a769129a76620c51e63361",
"hex": "6a4c2952534b424c4f434b3a88b841b4ba9c30eff483b55f8a520403afac9e2050a769129a76620c51e63361",
"type": "nulldata"
}
}
],
"blockhash": "00000000000000001903108b258887d0258a3e8caf776983069f94b9f47049f6"
},
"chainindex": 7,
"merklebranch": [
"5c3158d4f108917fba77f9695650608477456b8f6f06238b1613311aafa74b09",
"eebaf1b6b9ab455497e6dd571140f0a3647ce36e1ab3e34a177dacc474a755d9",
"69191fcf85cf624ec99de2d341475478d5b4dc6ba47fc615aaefb05af2ca27a8",
"4930ddee5d4553184fc83c98e86f3fb317e6811c00840789dc67fea8be9a182f",
"32648a287897e297e999fff9426c2b90b79bf7d1b086a227a19b13a7ec2b41a2",
"fffd55d7bbea5f951ae2f3a8f540a9a0a71949d9578d9d44d20f86e090a2f810",
"74ddee1ce47dcb62229411fe9ae3a208f488ed18aef409fb5704fb55844ce2e0",
"a3b018da9723bdef26aaf35c69c24ef03c028ef0080cb78f524de8a3f1ae4ef4",
"b2b0fee71fab43ae9c6eb27aaff5f4a1806a3568a0fd7cdae67c95b7f5684ce2",
"ca7503900f2b8aeec18402721788f9ea340ab4216ffd095cba066b3af306cbdb",
"0aba5a93156602ce5202293e3c83c4e1f3c754ff9338770d18bceda4fcfbd686",
"084cf897b58d851626d0ba32d4c717584b018881ab022e8fba1c9d8ed6aaae0e"
],
"chainmerklebranch": [
"2cadbb3ce34769fc6f95572012c22605f8c3a69982a9a6854523f6548007d30b",
"e5c2407ba454ffeb28cf0c50c5c293a84c9a75788f8a8f35ccb974e606280377",
"a5cd51355bea0fd77416d06bf3647e16b4663d2f87e6a3ccb23e713cb8b4de1c"
],
"parentblock": "00000020913d01444f14db2c6ab43ffd0f36af5613423815543c22000000000000000000a2edf8bab99ac19e7f7adaf23bc43570d543054aed83e77694f26d33ec9db2a51e83785ba70d2c17865f3a57"
}
},
"rngseed": "1cc1ed830b99a352c02a10fca08284d005c1cfb01e5ffd10b85bcda3e2164636",
"previousblockhash": "8acbc6b31dabb5b3a224d9e4e55e2bd766882f07b2c592e1a101ab7a6e59cd6b",
"nextblockhash": "427b1860366cfc584170f73b0518a71adb9787841c960ad3a857e27b6b773385",
"block": "000000206bcd596e7aab01a1e192c5b2072f8866d72b5ee5e4d924a2b3b5ab1db3c6cb8a22e9c44f8b4dc1fe80e73b57bd0b8a92e5820ff2f236b9ba85801f8f5a4a2bbd0f83785b0000000000000000815872341801000000010000000000000000000000000000000000000000000000000000000000000000ffffffff64033c33082cfabe6d6dfe0718afd8c0651b58f14378a128efde085d91bcfa2790de57ec862ad420d90708000000f09f909f000f4d696e6564206279207374636f646500000000000000000000000000000000000000000000000000000000000000000000ff3b9a010320983b4b000000001976a914c825a1ecf2a6830c4401620c3a16f1995057c2ab88ac00000000000000002f6a24aa21a9ed98a08f4c63b7cad2f139005520cf5998e1fa2ad8d53bbad87e0868d8c509149308000000000000000000000000000000002c6a4c2952534b424c4f434b3a88b841b4ba9c30eff483b55f8a520403afac9e2050a769129a76620c51e633615b9cd44700000000000000000000000000000000000000000000000000000000000000000c094ba7af1a3113168b23066f8f6b45778460505669f977ba7f9108f1d458315cd955a774c4ac7d174ae3b31a6ee37c64a3f0401157dde6975445abb9b6f1baeea827caf25ab0efaa15c67fa46bdcb4d578544741d3e29dc94e62cf85cf1f19692f189abea8fe67dc890784001c81e617b33f6fe8983cc84f1853455deedd3049a2412beca7139ba127a286b0d1f79bb7902b6c42f9ff99e997e29778288a643210f8a290e0860fd2449d8d57d94919a7a0a940f5a8f3e21a955feabbd755fdffe0e24c8455fb0457fb09f4ae18ed88f408a2e39afe11942262cb7de41ceedd74f44eaef1a3e84d528fb70c08f08e023cf04ec2695cf3aa26efbd2397da18b0a3e24c68f5b7957ce6da7cfda068356a80a1f4f5af7ab26e9cae43ab1fe7feb0b2dbcb06f33a6b06ba5c09fd6f21b40a34eaf98817720284c1ee8a2b0f900375ca86d6fbfca4edbc180d773893ff54c7f3e1c4833c3e290252ce026615935aba0a0eaeaad68e9d1cba8f2e02ab8188014b5817c7d432bad02616858db597f84c0800000000030bd3078054f6234585a6a98299a6c3f80526c2122057956ffc6947e33cbbad2c77032806e674b9cc358f8a8f78759a4ca893c2c5500ccf28ebff54a47b40c2e51cdeb4b83c713eb2cca3e6872f3d66b4167e64f36bd01674d70fea5b3551cda50700000000000020913d01444f14db2c6ab43ffd0f36af5613423815543c22000000000000000000a2edf8bab99ac19e7f7adaf23bc43570d543054aed83e77694f26d33ec9db2a51e83785ba70d2c17865f3a5702020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff0603a086010101ffffffff02ae8ef60500000000232103ac17736c4dfa051108f5a9f6c602bdac64ff6f4aab4dd36b7fb47ed5df8df68bac0000000000000000266a24aa21a9ed2c2eedd160077355e9d578fc8deebe7008bb20ecb418ccb750aa8367a385ad9401200000000000000000000000000000000000000000000000000000000000000000000000000200000002963d484c856e84ee811aaec4e4f1819d01b8444a82cd967b35dda08edec7f772010000006a4730440220528529296ef5d2a3341bf7438f7fc6a8baf3d77cc0f58718b3cb03503dfb64a502203ceff4277e2a7f4acc3b9dea2c8df64b1a794e6dda790de986033dcafb2df3660121039f8c7dfcbe7745bc72d5e1fb3e77a7dac3a20220102e7a1bb4ff96e7f8b8c0cbfeffffff963d484c856e84ee811aaec4e4f1819d01b8444a82cd967b35dda08edec7f772000000006b483045022100998ad854b30a4049e6eda69dd6248976bfa8eaf73244e909f01bd3ced9e1a9fb02203229b294e83931ad4ea8ff823895cd0d579c590337e72a40b1f254d33d65df460121021f787facf0e78ba561a2094803d32d4cba38ec861c60b1732e2615963e6c82a1feffffff02f947fa02000000001976a914463f344026a1829a0dec9da38efe408f786be9e988ac40420f000000000059520b702f46756e436f6f6b6965307b2267223a7b2274667472223a5b7b226f223a31322c226d223a35337d2c7b226f223a31322c226d223a32347d5d7d7d6d7576a914ef56cda5eeea29b552dc6c2526b6a1a16715483188ac9e860100"
}

View File

@ -0,0 +1,15 @@
{
"hash": "fa192afbf7a43689502135e425df7279b13f450f5b49c1107746362c343a68f5",
"size": 402,
"height": 100000,
"merkleroot": "019e31415109ca16f09e35cab38ac0a0624949c0539ce110fbbe4cf506e9b776",
"tx": [
"019e31415109ca16f09e35cab38ac0a0624949c0539ce110fbbe4cf506e9b776"
],
"time": 1533976567,
"nonce": 1565590912,
"bits": "1b02364f",
"previousblockhash": "3523dfb821472a1f1472bc12eee06e4ff404b92d03dc51318ab589b2906c538f",
"block": "000000208f536c90b289b58a3151dc032db904f44f6ee0ee12bc72141f2a4721b8df233576b7e906f54cbefb10e19c53c0494962a0c08ab3ca359ef016ca095141319e01f79f6e5b4f36021b8005515d0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2003a0860104f79f6e5b0877ffe6dd840000000d2f6e6f64655374726174756d2f0000000007009ce4a6000000001976a914dfa17720fa101e262a8fc1a378f25a275a26952288ac00e1f505000000001976a9147d9ed014fc4e603fca7c2e3f9097fb7d0fb487fc88ac00e1f505000000001976a914bc7e5a5234db3ab82d74c396ad2b2af419b7517488ac00e1f505000000001976a914ff71b0c9c2a90c6164a50a2fb523eb54a8a6b55088ac00a3e111000000001976a9140654dd9b856f2ece1d56cb4ee5043cd9398d962c88ac00e1f505000000001976a9140b4bfb256ef4bfa360e3b9e66e53a0bd84d196bc88ac002f6859000000001976a914dec7ddb718550686e8ea9b100354ef04f20167a988ac00000000"
}

View File

@ -69,6 +69,12 @@ addresses = [
"23b5dd9b7b402388c7a40bc88c261f3178acf30d", "7c7bdf0e0713f3752f4b88"),
(coins.SmartCash, "SQFDM9NtRRmpHebq3H5RA3qpGJfGqp8Xgw",
"206168f5322583ff37f8e55665a4789ae8963532", "b8cb80b26e8932f5b12a7e"),
(coins.Peercoin, "PAprodbYvZqf4vjhef49aThB9rSZRxXsM6",
"1886c1b26e9546e8d424eb79631ee68de134466b", "2a9d25cb0c9f05690bff89"),
(coins.PeercoinTestnet, "msnHPXDWuJhRBPVNQnwXdKvEMQHLr9z1P5",
"8687b01c03f47615226bdf54443f172d8eb523c6", "318a000ccf626685c8c213"),
(coins.GravityZeroCoin, "GJPaFtL7ffbFPQKbPoLn2PvgajQ48QGo67",
"05fc7b8c8ba198dedb4d14223aeb709436373d6b", "bd8fa7f0c7b50b023ac0c6"),
]

View File

@ -1,24 +1,121 @@
import electrumx.lib.coins as coins
# Tests of lib/coins.py
import pytest
from electrumx.lib.coins import BitcoinSV, NameMixin
from electrumx.lib.script import OpCodes, Script
coin = BitcoinSV
def test_bitcoin_cash():
raw_header = bytes.fromhex(
"00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0"
"1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d"
"e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5"
)
height = 540000
electrum_header = {
'block_height': 540000,
'version': 536870912,
'prev_block_hash':
'0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df',
'merkle_root':
'81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36',
'timestamp': 1532215285,
'bits': 402774676,
'nonce': 3321400748
}
@pytest.mark.parametrize("script", (
bytes([OpCodes.OP_RETURN]),
bytes([OpCodes.OP_RETURN]) + bytes([2, 28, 50]),
bytes([OpCodes.OP_0, OpCodes.OP_RETURN]),
bytes([OpCodes.OP_0, OpCodes.OP_RETURN]) + bytes([2, 28, 50]),
))
def test_op_return(script):
assert coin.hashX_from_script(script) is None
assert coins.BitcoinCash.electrum_header(
raw_header, height) == electrum_header
@pytest.mark.parametrize("script", (
bytes([]),
bytes([OpCodes.OP_1, OpCodes.OP_RETURN]) + bytes([2, 28, 50]),
bytes([OpCodes.OP_0]),
bytes([OpCodes.OP_0, OpCodes.OP_1]),
bytes([OpCodes.OP_HASH160]),
))
def test_not_op_return(script):
assert coin.hashX_from_script(script) is not None
NAME = "name".encode("ascii")
DAYS = hex(6).encode("ascii")
VALUE = "value".encode("ascii")
ADDRESS_SCRIPT = "address_script".encode("ascii")
OP_NAME_NEW = OpCodes.OP_1
OP_NAME_UPDATE = OpCodes.OP_2
OP_DROP = OpCodes.OP_DROP
OP_2DROP = OpCodes.OP_2DROP
DP_MULT = NameMixin.DATA_PUSH_MULTIPLE
def create_script(pattern, address_script):
script = bytearray()
for item in pattern:
if type(item) == int:
script.append(item)
else:
script.extend(Script.push_data(item))
script.extend(address_script)
return bytes(script)
@pytest.mark.parametrize("opcode,pattern", (
([OP_NAME_NEW, OP_DROP, -1, -1, OP_2DROP, -1, OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, OP_DROP]),
([OP_NAME_NEW, OP_DROP, -1, -1, OP_2DROP, DP_MULT],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, OP_DROP]),
([OP_NAME_NEW, OP_DROP, -1, -1, OP_2DROP, DP_MULT],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, VALUE, OP_2DROP]),
([OP_NAME_NEW, OP_DROP, -1, OP_2DROP, DP_MULT, -1, OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, OP_2DROP, VALUE, OP_DROP, DAYS, OP_DROP]),
([OP_NAME_NEW, OP_DROP, -1, OP_2DROP, DP_MULT, -1, OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, OP_2DROP, VALUE, VALUE, OP_2DROP, DAYS, OP_DROP]),
))
def test_name_mixin_interpret_name_prefix(opcode, pattern):
ops = [opcode]
script = create_script(pattern, ADDRESS_SCRIPT)
parsed_names, parsed_address_script = NameMixin.interpret_name_prefix(script, ops)
assert len(parsed_names) == 0
assert parsed_address_script == ADDRESS_SCRIPT
@pytest.mark.parametrize("opcode,pattern", (
([OP_NAME_NEW, OP_DROP, "name", "days", OP_2DROP, -1, OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, OP_DROP]),
([OP_NAME_NEW, OP_DROP, "name", OP_DROP, -1, OP_DROP, "days", OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, OP_DROP, VALUE, OP_DROP, DAYS, OP_DROP]),
([OP_NAME_NEW, OP_DROP, "name", "days", OP_2DROP, DP_MULT],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, OP_DROP]),
([OP_NAME_NEW, OP_DROP, "name", "days", OP_2DROP, DP_MULT],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, VALUE, OP_2DROP]),
([OP_NAME_NEW, OP_DROP, "name", "days", OP_2DROP, DP_MULT],
[OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP, VALUE, VALUE, VALUE, OP_2DROP, OP_DROP]),
([OP_NAME_NEW, OP_DROP, "name", OP_2DROP, DP_MULT, "days", OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, OP_2DROP, VALUE, OP_DROP, DAYS, OP_DROP]),
([OP_NAME_NEW, OP_DROP, "name", OP_2DROP, DP_MULT, "days", OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, OP_2DROP, VALUE, VALUE, OP_2DROP, DAYS, OP_DROP]),
([OP_NAME_NEW, OP_DROP, "name", OP_2DROP, DP_MULT, "days", OP_DROP],
[OP_NAME_NEW, OP_DROP, NAME, OP_2DROP, VALUE, VALUE, VALUE, OP_2DROP, OP_DROP, DAYS, OP_DROP]),
))
def test_name_mixin_interpret_name_prefix_with_named_placeholders(opcode, pattern):
ops = [opcode]
script = create_script(pattern, ADDRESS_SCRIPT)
parsed_names, parsed_address_script = NameMixin.interpret_name_prefix(script, ops)
assert parsed_names["name"][1] == NAME
assert parsed_names["days"][1] == DAYS
assert parsed_address_script == ADDRESS_SCRIPT
@pytest.mark.parametrize("opcode", (
[OP_NAME_UPDATE, OP_DROP, -1, -1, OP_2DROP, -1, OP_DROP],
[OP_NAME_NEW, OP_DROP, -1, -1, OP_DROP, OP_DROP, -1, OP_DROP],
[OP_NAME_NEW, OP_DROP, "name", "days", OP_DROP, -1, OP_DROP],
))
def test_name_mixin_interpret_name_prefix_wrong_ops(opcode):
ops = [opcode]
script = create_script([OP_NAME_NEW, OP_DROP, NAME, DAYS, OP_2DROP,
VALUE, OP_DROP], ADDRESS_SCRIPT)
parsed_names, parsed_address_script = NameMixin.interpret_name_prefix(script, ops)
assert parsed_names is None
assert parsed_address_script == script

View File

@ -234,7 +234,7 @@ async def test_truncation_bad():
@pytest.mark.asyncio
async def test_markle_cache_bad():
async def test_merkle_cache_bad():
length = 23
source = Source(length).hashes
cache = MerkleCache(merkle, source)

402
tests/lib/test_tx_axe.py Normal file
View File

@ -0,0 +1,402 @@
import pytest
import electrumx.lib.tx_axe as lib_tx_axe
bfh = bytes.fromhex
V2_TX = (
'020000000192809f0b234cb850d71d020e678e93f074648ed0df5affd0c46d3bcb177f'
'9ccf020000008b483045022100c5403bcf86c3ae7b8fd4ca0d1e4df6729cc1af05ff95'
'd9726b43a64b41dd5d9902207fab615f41871885aa3062fc7d8f8d9d3dcbc2e4867c5d'
'96dd7a176b99e927924141040baa4271a82c5f1a09a5ea63d763697ca0545b6049c4dd'
'8e8d099dd91f2da10eb11e829000a82047ac56969fb582433067a21c3171e569d1832c'
'34fdd793cfc8ffffffff030000000000000000226a20195ce612d20e5284eb78bb28c9'
'c50d6139b10b77b2d5b2f94711b13162700472bfc53000000000001976a9144a519c63'
'f985ba5ab8b71bb42f1ecb82a0a0d80788acf6984315000000001976a9148b80536aa3'
'c460258cda834b86a46787c9a2b0bf88ac00000000')
CB_TX = ( '03000500010000000000000000000000000000000000000000000000000000000000000000ffffffff1303c407040e2f5032506f6f6c2d74444153482fffffffff0448d6a73d000000001976a914293859173a34194d445c2962b97383e2a93d7cb288ac22fc433e000000001976a914bf09c602c6b8f1db246aba5c37ad1cfdcb16b15e88ace9259c00000000004341047559d13c3f81b1fadbd8dd03e4b5a1c73b05e2b980e00d467aa9440b29c7de23664dde6428d75cafed22ae4f0d302e26c5c5a5dd4d3e1b796d7281bdc9430f35ac00000000000000002a6a28be61411c3c79b7fd45923118ba74d340afb248ae2edafe78c15e2d1aa337c942000000000000000000000000260100c407040076629a6e42fb519188f65889fd3ac0201be87aa227462b5643e8bb2ec1d7a82a')
CB_TX_V2 = ( '03000500010000000000000000000000000000000000000000000000000000000000000000ffffffff2603caac04194d696e656420627920416e74506f6f6c52000b03203e009e9457070000441a0000ffffffff02db98d40b000000001976a9141d16d67366c081e6cc6b402667fa8044c4a42e8888ac9d65130d000000001976a9146ca3b3578583b5c73adb302ccb612b9a5fbef17488ac00000000460200caac04002d126055d9cd81da35972dd5f11d8b7d24d23beeb9331d64d93ca74966fff9120000000000000000000000000000000000000000000000000000000000000000')
PRO_REG_TX = ( '03000100013a196f2a59b88dcbadb48b580dd4ef14f6d1dd6f86e8d6cb8942c34837a45448000000006a473044022048b0c1703e7c40750097d770b5f05b8ccdd5a656d24970fd769712830404958702205c63c57cdba86026a0fd58dc7633eb0ae6f66ef43e96915c3b74bb4c7747a885012103542c5d737685944600d238ae5e6abc7e2906e34453d6af5a0fadd84dd8e09242feffffff0121c89a3b000000001976a91435af662851170d5ddebba4e287019a4597dd453d88ac00000000fd12010100000000002181625260614470c020764d56d7a2eb9bedb0a2f65f0168c99a4fdf846573910000000000000000000000000000ffff74cb7f7f26d1c1a9c6453135e83be71635ac840652bbefc607861908c153792530dfe635bdf819ceb7963b624233a3465be94a4651ca36f7c0917d301c1e61485b68debc2f9fe5b7e84bc1a9c6453135e83be71635ac840652bbefc6078600001976a914950c9dea79cee4c4fea859c95fd534bba21f9a3088acf3a95bbe35ccdbe9d8cd3a875e3f80af725422bcf44d35ce79078df357c29bf9412088b70bd05dbbd5a86d51abc78e33658f95ac01b98bc16dd537bcac5c48d3db437ee6c9b1d664e2a192b64b1893b3ce22b4ac47da9c4a0d148440be74527a43d4' )
PRO_UP_SERV_TX = ( '0300020001423cef3af2b56378737de6bf9d1a5055f07bd35d86585770ce0449c7d5062f0a010000006a473044022013390c4c6f4411cbaee91ea49bb12c29b9618f4902c522274b013c5c20f81f77022022cc2ebb97aed1eecaaad17c9dbd4ad0f42a2969e4599f8e4214a008cd51b9f20121034125651548c482a003fb59229898169e8d289b84dc48f6dfbe160c08bfcec144feffffff01fa294209000000001976a914f489af78bc73074aee98082d2876d710aa242e6188ac00000000b50100bef73450cab0889367710dd4ccd80cdb47557ad135dcb0e54f2cd8b9623cd57300000000000000000000ffff2d3f1f37270f00768450381dca4e9888aa7be9a7725e0824696fc24f5a35de62916dc81b3085b3077e02247c17992ba5ee9a2894e03c2e489b07cb9b751207dd51ca5c3dc6b67bada8f82a41c3137dbb203ce185caaccf1565e62c626e5225ff741e01faf4b09d8d7300dfb5c3e6f352120047b2e7b0547e5ea1198b615bf4826eaba252f97007' )
PRO_UP_REG_TX = ( '030003000114b05579c72dc0785130153c35c8e398fcf1c49594800707b42e8beb73e5d76d000000006b483045022100b13e220d2bf1633002cb9d94f33f5bbf974ea0c4380d4497fedad2c422d92fde0220166f6894eb8e778d55f7b86089fd4ef08e481bb4f04bff67ea2bcb07c5dc2cba012102e2f8099a84f13f9e8e5d784567d49df919ec63764966738df83371c66ff534f6feffffff011f320f00000000001976a914bcc00fdff28f1b7b85ec5135b7a2d8c800a5c01888ac00000000e40100454bce26dde61cce5a8190928ed2af0f95c5bc053c88170b1a653028b36a29a300008aba2efc6ef0305e8aadb650a5e5e15f7ea583968b48ea47bcec4bfaab108aa98f2e4ed0bb66946f7b65dff9ee92820dc99a89fcafd3a4729860b2f9cd2ba4a7d965e9271976a9148e124bfba342a13dec4d7a932284179a7f9c9e6188acd2972fcc5557d534cc6a9ed494e6213673521dbccacf4d3637994a5aaad721d34120697182d7c398d4add6c49f0ddc7e71ec0253f0ff0d198b23913f8231ff18140f0b880feed08c267b0e9c70d9136c028c6126f7ae014879442b93c3a538ee6ac6' )
PRO_UP_REV_TX = ( '0300040001fb9e4b60ee4c625820b52f20067e1f7ce652526f3834afe334b010eddf1ac4e8000000006a47304402203386143fa1e39df1936fcc2af593837f9c815deb04440bfc9b877790e4caff4202202cdb7895a9656d3e62dc8ec2eada1de6d3b11acd517ab09c46ec274698cdefd7012103e10e8542bc703af92dc7c66a4a471f8101472b1a37b83a01309227ad2f7ebf14feffffff017b0d0b00000000001976a91493b17c6f50f75069b93864675c7cb1e9b9dbf3cf88ac00000000a40100d1061c3f0f32e332f100f153041797eebd2702a5f419d2d057023f7f7238c922030041a7d06f62ec60981969414d738489e4a0d8d4dcfb74e8a1ad2182f4e3de97d90e4222e82aa968d8d1c6fda6a553d6071897c8985fda438922d773b7aeabdbaa22cb699b248c0e15bfd7b52e230920ab04ac4881aaffe836bfbc53c6e95671ede9a8185fceece5b3ce1480a824a67fd004a15a2bb34fabb82688344d0b6d2669' )
''' No DIP0005 in Axe for Now
SUB_TX_REGISTER = (
'03000800010931c6b0ad7ce07f3c8aefeeb78e246a4fe6872bbf08ab6e4eb6a7b69acd'
'64a6010000006b483045022100a2feb698c43c752738fabea281b7e9e5a3aa648a4c54'
'1171e06d7c372db92c65022061c1ec3c92f2e76bb7fb1b548d854f19a41e6421267231'
'74150412caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b'
'3d0103f761cc69a211feffffff0189fa433e000000001976a914551ab8ca96a9142217'
'4d22769c3a4f90b2dcd0de88ac00000000960100036162638e7042ec88acefcfe3d578'
'914bb48c6bd71b3459d384e42374e8abfeffffff01570b0000000000001976a91490c5'
'ce9d8bc992a88ac00000000a40100b67ffbbd095de31ea38446754e8abfeffffff0157'
'0b0000000000001976a91490c5ce9d8bc992a88ac00000000a40100b67ffbbd095de31'
'ea38446754e8abfeffffff01570b0000000000001976a91490c5ce9d')
SUB_TX_TOPUP = (
'03000900010931c6b0ad7ce07f3c8aefeeb78e246a4fe6872bbf08ab6e4eb6a7b69acd'
'64a6010000006b483045022100a2feb698c43c752738fabea281b7e9e5a3aa648a4c54'
'1171e06d7c372db92c65022061c1ec3c92f2e76bb7fb1b548d854f19a41e6421267231'
'74150412caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b'
'3d0103f761cc69a211feffffff0189fa433e000000001976a914551ab8ca96a9142217'
'4d22769c3a4f90b2dcd0de88ac00000000220100d384e42374e8abfeffffff01570b00'
'0000a40100b67ffbbd095de31ea3844675')
SUB_TX_RESET_KEY = (
'03000a00010931c6b0ad7ce07f3c8aefeeb78e246a4fe6872bbf08ab6e4eb6a7b69acd'
'64a6010000006b483045022100a2feb698c43c752738fabea281b7e9e5a3aa648a4c54'
'1171e06d7c372db92c65022061c1ec3c92f2e76bb7fb1b548d854f19a41e6421267231'
'74150412caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b'
'3d0103f761cc69a211feffffff0189fa433e000000001976a914551ab8ca96a9142217'
'4d22769c3a4f90b2dcd0de88ac00000000da0100d384e42374e8abfeffffff01570b00'
'0000a40100b67ffbbd095de31ea3844675af3e98e9601210293360bf2a2e810673412b'
'c6e8e0e358f3fb7bdbe9a667b3d0e803000000000000601210293360bf2a2e81067341'
'2bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e9601210293360bf2a2e810673'
'412bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e9601210293360bf2a2e8106'
'73412bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e9601210293360bf2a2e81'
'0673412bc6e8e0e358f3fb7bdbe9a667b3d0103f761cabcdefab')
SUB_TX_CLOSE_ACCOUNT = (
'03000b00010931c6b0ad7ce07f3c8aefeeb78e246a4fe6872bbf08ab6e4eb6a7b69acd'
'64a6010000006b483045022100a2feb698c43c752738fabea281b7e9e5a3aa648a4c54'
'1171e06d7c372db92c65022061c1ec3c92f2e76bb7fb1b548d854f19a41e6421267231'
'74150412caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b'
'3d0103f761cc69a211feffffff0189fa433e000000001976a914551ab8ca96a9142217'
'4d22769c3a4f90b2dcd0de88ac00000000aa0100d384e42374e8abfeffffff01570b00'
'0000a40100b67ffbbd095de31ea3844675af3e98e9601210293360bf2a2e810673412b'
'c6e8e0e358f3fb7bdbe9a12bc6e8e803000000000000a62bc6e8e0e358f3fb7bdbe9a6'
'67b3d0103f761caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9'
'a667b3d0103f761caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdb'
'e9a667b3d0103f761cabcdefab')
'''
UNKNOWN_SPEC_TX = (
'0300bb00010931c6b0ad7ce07f3c8aefeeb78e246a4fe6872bbf08ab6e4eb6a7b69acd'
'64a6010000006b483045022100a2feb698c43c752738fabea281b7e9e5a3aa648a4c54'
'1171e06d7c372db92c65022061c1ec3c92f2e76bb7fb1b548d854f19a41e6421267231'
'74150412caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b'
'3d0103f761cc69a211feffffff0189fa433e000000001976a914551ab8ca96a9142217'
'4d22769c3a4f90b2dcd0de88ac00000000aa0100d384e42374e8abfeffffff01570b00'
'0000a40100b67ffbbd095de31ea3844675af3e98e9601210293360bf2a2e810673412b'
'c6e8e0e358f3fb7bdbe9a12bc6e8e0e358f3fb7bdbe9a62bc6e8e0e358f3fb7bdbe9a6'
'67b3d0103f761caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9'
'a667b3d0103f761caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdb'
'e9a667b3d0103f761cabcdefab')
WRONG_SPEC_TX = ( # Tx version < 3
'0200bb00010931c6b0ad7ce07f3c8aefeeb78e246a4fe6872bbf08ab6e4eb6a7b69acd'
'64a6010000006b483045022100a2feb698c43c752738fabea281b7e9e5a3aa648a4c54'
'1171e06d7c372db92c65022061c1ec3c92f2e76bb7fb1b548d854f19a41e6421267231'
'74150412caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b'
'3d0103f761cc69a211feffffff0189fa433e000000001976a914551ab8ca96a9142217'
'4d22769c3a4f90b2dcd0de88ac00000000')
def test_axe_v2_tx():
test = bfh(V2_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 2
assert tx.tx_type == 0
assert tx.extra_payload == b''
ser = tx.serialize()
assert ser == test
def test_axe_tx_cb_tx():
test = bfh(CB_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 5
extra = tx.extra_payload
assert extra.version == 1
assert extra.height == 264132
assert len(extra.merkleRootMNList) == 32
assert extra.merkleRootMNList == bfh(
'76629a6e42fb519188f65889fd3ac0201be87aa227462b5643e8bb2ec1d7a82a')
ser = tx.serialize()
assert ser == test
def test_axe_tx_cb_tx_v2():
test = bfh(CB_TX_V2)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 5
extra = tx.extra_payload
assert extra.version == 2
assert extra.height == 306378
assert len(extra.merkleRootMNList) == 32
assert extra.merkleRootMNList == bfh(
'2d126055d9cd81da35972dd5f11d8b7d24d23beeb9331d64d93ca74966fff912')
assert len(extra.merkleRootQuorums) == 32
assert extra.merkleRootQuorums == bfh(
'0000000000000000000000000000000000000000000000000000000000000000')
ser = tx.serialize()
assert ser == test
def test_axe_tx_pro_reg_tx():
test = bfh(PRO_REG_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 1
extra = tx.extra_payload
assert extra.version == 1
assert extra.type == 0
assert extra.mode == 0
assert len(extra.collateralOutpoint.hash) == 32
assert extra.collateralOutpoint.hash == bfh(
'2181625260614470c020764d56d7a2eb9bedb0a2f65f0168c99a4fdf84657391')
assert extra.collateralOutpoint.index == 0
assert len(extra.ipAddress) == 16
assert extra.ipAddress == bfh('00000000000000000000ffff74cb7f7f')
assert extra.port == 9937
assert len(extra.KeyIdOwner) == 20
assert extra.KeyIdOwner == bfh(
'c1a9c6453135e83be71635ac840652bbefc60786')
assert len(extra.PubKeyOperator) == 48
assert extra.PubKeyOperator == bfh(
'1908c153792530dfe635bdf819ceb7963b624233a3465be94a4651ca36f7c0917d301c1e61485b68debc2f9fe5b7e84b')
assert len(extra.KeyIdVoting) == 20
assert extra.KeyIdVoting == bfh(
'c1a9c6453135e83be71635ac840652bbefc60786')
assert extra.operatorReward == 0
assert extra.scriptPayout == bfh(
'76a914950c9dea79cee4c4fea859c95fd534bba21f9a3088ac')
assert len(extra.inputsHash) == 32
assert extra.inputsHash == bfh(
'f3a95bbe35ccdbe9d8cd3a875e3f80af725422bcf44d35ce79078df357c29bf9')
assert extra.payloadSig == bfh(
'2088b70bd05dbbd5a86d51abc78e33658f95ac01b98bc16dd537bcac5c48d3db437ee6c9b1d664e2a192b64b1893b3ce22b4ac47da9c4a0d148440be74527a43d4')
ser = tx.serialize()
assert ser == test
def test_axe_tx_pro_up_serv_tx():
test = bfh(PRO_UP_SERV_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 2
extra = tx.extra_payload
assert extra.version == 1
assert len(extra.proTxHash) == 32
assert extra.proTxHash == bfh(
'bef73450cab0889367710dd4ccd80cdb47557ad135dcb0e54f2cd8b9623cd573')
assert len(extra.ipAddress) == 16
assert extra.ipAddress == bfh('00000000000000000000ffff2d3f1f37')
assert extra.port == 9999
assert extra.scriptOperatorPayout == bfh( '')
#not present in the test transaction
assert len(extra.inputsHash) == 32
assert extra.inputsHash == bfh(
'768450381dca4e9888aa7be9a7725e0824696fc24f5a35de62916dc81b3085b3')
assert len(extra.payloadSig) == 96
assert extra.payloadSig == bfh( '077e02247c17992ba5ee9a2894e03c2e489b07cb9b751207dd51ca5c3dc6b67bada8f82a41c3137dbb203ce185caaccf1565e62c626e5225ff741e01faf4b09d8d7300dfb5c3e6f352120047b2e7b0547e5ea1198b615bf4826eaba252f97007' )
ser = tx.serialize()
assert ser == test
def test_axe_tx_pro_up_reg_tx():
test = bfh(PRO_UP_REG_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 3
extra = tx.extra_payload
assert extra.version == 1
assert len(extra.proTxHash) == 32
assert extra.proTxHash == bfh(
'454bce26dde61cce5a8190928ed2af0f95c5bc053c88170b1a653028b36a29a3')
assert extra.mode == 0
assert len(extra.PubKeyOperator) == 48
assert extra.PubKeyOperator == bfh(
'8aba2efc6ef0305e8aadb650a5e5e15f7ea583968b48ea47bcec4bfaab108aa98f2e4ed0bb66946f7b65dff9ee92820d' )
assert len(extra.KeyIdVoting) == 20
print(extra.KeyIdVoting)
#assert extra.KeyIdVoting == bfh(
# 'c99a89fcafd3a4729860b2f9cd2ba4a7d965e9')
assert extra.scriptPayout == bfh(
'76a9148e124bfba342a13dec4d7a932284179a7f9c9e6188ac')
assert len(extra.inputsHash) == 32
assert extra.inputsHash == bfh(
'd2972fcc5557d534cc6a9ed494e6213673521dbccacf4d3637994a5aaad721d3')
assert extra.payloadSig == bfh( '20697182d7c398d4add6c49f0ddc7e71ec0253f0ff0d198b23913f8231ff18140f0b880feed08c267b0e9c70d9136c028c6126f7ae014879442b93c3a538ee6ac6' )
ser = tx.serialize()
assert ser == test
def test_axe_tx_pro_up_rev_tx():
test = bfh(PRO_UP_REV_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 4
extra = tx.extra_payload
assert extra.version == 1
assert len(extra.proTxHash) == 32
assert extra.proTxHash == bfh(
'd1061c3f0f32e332f100f153041797eebd2702a5f419d2d057023f7f7238c922')
assert extra.reason == 3
assert len(extra.inputsHash) == 32
assert extra.inputsHash == bfh(
'41a7d06f62ec60981969414d738489e4a0d8d4dcfb74e8a1ad2182f4e3de97d9')
assert len(extra.payloadSig) == 96
assert extra.payloadSig == bfh(
'0e4222e82aa968d8d1c6fda6a553d6071897c8985fda438922d773b7aeabdbaa22cb699b248c0e15bfd7b52e230920ab04ac4881aaffe836bfbc53c6e95671ede9a8185fceece5b3ce1480a824a67fd004a15a2bb34fabb82688344d0b6d2669' )
ser = tx.serialize()
assert ser == test
''' No DIP0005 in Axe
def test_axe_tx_sub_tx_register_tx():
test = bfh(SUB_TX_REGISTER)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 8
extra = tx.extra_payload
assert extra.version == 1
assert extra.userName == b'abc'
assert len(extra.pubKey) == 48
assert extra.pubKey == bfh(
'8e7042ec88acefcfe3d578914bb48c6bd71b3459d384e42374e8abfeffff'
'ff01570b0000000000001976a91490c5ce9d')
assert len(extra.payloadSig) == 96
assert extra.payloadSig == bfh(
'8bc992a88ac00000000a40100b67ffbbd095de31ea38446754e8abfeffff'
'ff01570b0000000000001976a91490c5ce9d8bc992a88ac00000000a4010'
'0b67ffbbd095de31ea38446754e8abfeffffff01570b0000000000001976'
'a91490c5ce9d')
ser = tx.serialize()
assert ser == test
def test_axe_tx_sub_tx_topup_tx():
test = bfh(SUB_TX_TOPUP)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 9
extra = tx.extra_payload
assert extra.version == 1
assert len(extra.regTxHash) == 32
assert extra.regTxHash == bfh(
'd384e42374e8abfeffffff01570b000000a40100b67ffbbd095de31ea3844675')
ser = tx.serialize()
assert ser == test
def test_axe_tx_sub_tx_reset_key_tx():
test = bfh(SUB_TX_RESET_KEY)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 10
extra = tx.extra_payload
assert extra.version == 1
assert len(extra.regTxHash) == 32
assert extra.regTxHash == bfh(
'd384e42374e8abfeffffff01570b000000a40100b67ffbbd095de31ea3844675')
assert len(extra.hashPrevSubTx) == 32
assert extra.hashPrevSubTx == bfh(
'af3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b3d0')
assert extra.creditFee == 1000
assert len(extra.newPubKey) == 48
assert extra.newPubKey == bfh(
'601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b3d0103f7'
'61caf3e98e9601210293360bf2a2e810673')
assert len(extra.payloadSig) == 96
assert extra.payloadSig == bfh(
'412bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e9601210293360b'
'f2a2e810673412bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e960'
'1210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b3d0103f761'
'cabcdefab')
ser = tx.serialize()
assert ser == test
def test_axe_tx_sub_tx_close_account_tx():
test = bfh(SUB_TX_CLOSE_ACCOUNT)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 11
extra = tx.extra_payload
assert extra.version == 1
assert len(extra.regTxHash) == 32
assert extra.regTxHash == bfh(
'd384e42374e8abfeffffff01570b000000a40100b67ffbbd095de31ea3844675')
assert len(extra.hashPrevSubTx) == 32
assert extra.hashPrevSubTx == bfh(
'af3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a12bc6e8')
assert extra.creditFee == 1000
assert len(extra.payloadSig) == 96
assert extra.payloadSig == bfh(
'a62bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e9601210293360b'
'f2a2e810673412bc6e8e0e358f3fb7bdbe9a667b3d0103f761caf3e98e960'
'1210293360bf2a2e810673412bc6e8e0e358f3fb7bdbe9a667b3d0103f761'
'cabcdefab')
ser = tx.serialize()
assert ser == test
'''
def test_axe_tx_unknown_spec_tx():
test = bfh(UNKNOWN_SPEC_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 187
extra = tx.extra_payload
assert extra == bfh(
'0100d384e42374e8abfeffffff01570b000000a40100b67ffbbd095de31e'
'a3844675af3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7b'
'dbe9a12bc6e8e0e358f3fb7bdbe9a62bc6e8e0e358f3fb7bdbe9a667b3d0'
'103f761caf3e98e9601210293360bf2a2e810673412bc6e8e0e358f3fb7b'
'dbe9a667b3d0103f761caf3e98e9601210293360bf2a2e810673412bc6e8'
'e0e358f3fb7bdbe9a667b3d0103f761cabcdefab')
ser = tx.serialize()
assert ser == test
def test_axe_tx_wrong_spec_tx():
test = bfh(WRONG_SPEC_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.version == 12255234
assert tx.tx_type == 0
extra = tx.extra_payload
assert extra == b''
ser = tx.serialize()
assert ser == test
def test_axe_tx_serialize_wrong_tx_type():
test = bfh(CB_TX)
deser = lib_tx_axe.DeserializerAxe(test)
tx = deser.read_tx()
assert tx.tx_type == 5
tx = tx._replace(tx_type=4)
assert tx.tx_type == 4
with pytest.raises(ValueError) as excinfo:
ser = tx.serialize()
assert ('Axe tx_type does not conform'
' with extra payload class' in str(excinfo.value))

View File

@ -29,6 +29,18 @@ CB_TX = (
'c15e2d1aa337c942000000000000000000000000260100c407040076629a6e42fb5191'
'88f65889fd3ac0201be87aa227462b5643e8bb2ec1d7a82a')
CB_TX_V2 = (
'0300050001000000000000000000000000000000000000000000000000000000000000'
'0000ffffffff1303c407040e2f5032506f6f6c2d74444153482fffffffff0448d6a73d'
'000000001976a914293859173a34194d445c2962b97383e2a93d7cb288ac22fc433e00'
'0000001976a914bf09c602c6b8f1db246aba5c37ad1cfdcb16b15e88ace9259c000000'
'00004341047559d13c3f81b1fadbd8dd03e4b5a1c73b05e2b980e00d467aa9440b29c7'
'de23664dde6428d75cafed22ae4f0d302e26c5c5a5dd4d3e1b796d7281bdc9430f35ac'
'00000000000000002a6a28be61411c3c79b7fd45923118ba74d340afb248ae2edafe78'
'c15e2d1aa337c942000000000000000000000000460200c407040076629a6e42fb5191'
'88f65889fd3ac0201be87aa227462b5643e8bb2ec1d7a82a76629a6e42fb519188f658'
'89fd3ac0201be87aa227462b5643e8bb2ec1d7a82a')
PRO_REG_TX = (
'030001000335f1c2ca44a1eb72e59f589df2852caacba39b7c0a5e61967f6b71d7a763'
@ -201,6 +213,25 @@ def test_dash_tx_cb_tx():
assert ser == test
def test_dash_tx_cb_tx_v2():
test = bfh(CB_TX_V2)
deser = lib_tx_dash.DeserializerDash(test)
tx = deser.read_tx()
assert tx.version == 3
assert tx.tx_type == 5
extra = tx.extra_payload
assert extra.version == 2
assert extra.height == 264132
assert len(extra.merkleRootMNList) == 32
assert extra.merkleRootMNList == bfh(
'76629a6e42fb519188f65889fd3ac0201be87aa227462b5643e8bb2ec1d7a82a')
assert len(extra.merkleRootQuorums) == 32
assert extra.merkleRootQuorums == bfh(
'76629a6e42fb519188f65889fd3ac0201be87aa227462b5643e8bb2ec1d7a82a')
ser = tx.serialize()
assert ser == test
def test_dash_tx_pro_reg_tx():
test = bfh(PRO_REG_TX)
deser = lib_tx_dash.DeserializerDash(test)
@ -217,7 +248,7 @@ def test_dash_tx_pro_reg_tx():
assert extra.collateralOutpoint.index == 1
assert len(extra.ipAddress) == 16
assert extra.ipAddress == bfh('00000000000000000000ffff12ca34aa')
assert extra.port == 12149
assert extra.port == 29999
assert len(extra.KeyIdOwner) == 20
assert extra.KeyIdOwner == bfh(
'2b3edeed6842db1f59cf35de1ab5721094f049d0')
@ -255,7 +286,7 @@ def test_dash_tx_pro_up_serv_tx():
'3c6dca244f49f19d3f09889753ffff1fec5bb8f9f5bd5bc09dabd999da21198f')
assert len(extra.ipAddress) == 16
assert extra.ipAddress == bfh('00000000000000000000ffff5fb73580')
assert extra.port == 4391
assert extra.port == 10001
assert extra.scriptOperatorPayout == bfh(
'76a91421851058431a7d722e8e8dd9509e7f2b8e7042ec88ac')
assert len(extra.inputsHash) == 32

View File

@ -0,0 +1,54 @@
import electrumx.lib.tx as tx_lib
tests = [
"0100000001000000000000000000000000000000000000000000000000000000000000"
"000001000000fd2805c4267ec1112af809d3f18d118ae574af82a24a3311454d88aa15"
"a2d4d51985a2d00000ee1a674c9a7abb62fce054d62c56fdf511eebb3c86301651f8d9"
"f24b6dbc72c00000ac34e72f3af4ebb6ea84a1baf7b088f0d262934e8fa6abb974b1d3"
"4e38dd88600100f251cbdac8a3ae679a014a5692267ed68acde7c238379788cdd9c2e4"
"d06b1e09000015de8545abd016297392901e805bc303d9dff92e6414707c039a3bba26"
"ac424f19aa67359d4970398d7ec9c0ec88cb7c8935176795103f07cb60e93e7d499406"
"c8ba92b067b83bf7a42235337492f60bad0f4d3524619b436521213fd7859149ac1788"
"6154bd82e5bcf76b1a2f54360758a834f17ebcd580548364db3ce3cdfd9ff688535b8f"
"eb76a51744248e3fdead193a69912db593f5c7acd72a3fe17b8664dfd33a1884ae80b6"
"18770a93c4e650dba3e0e7b082d28e162a9aa71301df35200366ae420db5d1ba79db51"
"937aa44be1209ccb28c43ac26cf84cd9312d70f3a6a61b4ebfccebc3796090d755df74"
"6838a923a7e408bdeca7fb961361aceceb2018f882edd38fe9fc083c03369afc218dbf"
"9d48f64a5eaf1615b59cf71c7c79b8ac06f323782fcdef5b510e89b8cd02c9207c3cff"
"6cc547767c67cd0dbecf479abb027de64ac57e8b368594791dbb6021edc5bc00b00d3a"
"05789b3b2e3ab7c05170efe7d55c502327c37fad9875ca59d6249edc4d70a11c4df782"
"c6b97bd79f80fe43c4b645a0b891b078318094d3c88db56a636502255fffa6eabb29e5"
"18e2b33ddc49b963702f1787b6c4667630aed09a490688dc689a588471d398d3c90be9"
"11076225935f6473737d7dfbe63b714d3a27cf07120e5c4f95d564b25830e7a7dd0261"
"09743e2a9d078313fbe76e799fbc599e1052afc672a5573be8808254acd4a0f78fb5d9"
"79e28bf1303ed28e88d84965925560c336202f8571d46b6a49e17a04697a15e2f195f9"
"ec0680e5fef25b9f7003f66644da1a81257377c68a15de0f58b89927240e358388269e"
"22855d1a605bb12d74dc954e27a31b8c5290e7ee3596a146254c4b0baaacc7c73a9f18"
"6d580dbb951481bcba737566f7dc5ccb0ba5cfced401877db3aa7139e68411ef40cfd3"
"a1b9a951998b20f3a3d8aef73d073ccfeeadeab56925055daed9ae93a5b1ab887f1ea1"
"75076a9153b31fb7e581f7b0776c69583967fae7c0bd95e4d6c69a7649e6cddb3eae94"
"1e843774aee8b5f707957cb7d12705746208431da75c5aa05be1321298b6311c22d193"
"7afae2bdd65400008752b06f1ecc26dc59b579f9e8f20ee52d526627ecd7f4c579b2eb"
"cf7c5aa5390000af608e02424ceeaf68cef06f6247011188724ec554e8e9163e9512c9"
"e18978af0000117e5cf334ff7dd4858397b15235895bd76ceb9397a2d226272a5e84e5"
"2d954d01006480c480ce0805f7377736eaebdb698f0fcd729726406691a09194de98c6"
"09d601004d93ab877bdc866b6944dc654138b784b0dd81fd037811afff20fb85b69b61"
"910100af16962e0f953579c123b31e211d53258d19147d5d7736656ffa23bf8a6c9b1c"
"010018da2ac49971b595959c760e264ec6f2362d68093c5e6785da88f1204b24b02a99"
"841a25d89984202a97c10ae2b7e9780970c6a7862991d5f97f7a21b839c6471e000000"
"00e1f5050000000039c71b4fbdbf35e4aeabd312f560f07ff271b983d788bec185a86b"
"03ffe691ee2103686b4bfb98e20953850634420bc147eeca3377fb726e1069a53e3fc8"
"65744b3e40c0348010fa0c172c89eb3c87e9515516ccdc3939ef0c2a056faaf637fd9b"
"af51001f5caaa1a654e3af936ac213fc9688b093d950865026534a2d5d09834a4408ff"
"ffffff01605af405000000001976a914a201533465a5d9a18b958cc1eed30bc3b087e3"
"5b88acb2e20000"
]
def test_tx_serialiazation():
for test in tests:
test = bytes.fromhex(test)
deser_xzc = tx_lib.DeserializerZcoin(test)
tx = deser_xzc.read_tx()
assert tx.inputs[0].prev_hash == tx_lib.ZERO
assert tx.inputs[0].prev_idx == tx_lib.MINUS_1

View File

@ -128,34 +128,6 @@ def test_address_string():
assert util.address_string(('1.2.3.4', 84)) == '1.2.3.4:84'
assert util.address_string(('0a::23', 84)) == '[a::23]:84'
def test_is_valid_hostname():
is_valid_hostname = util.is_valid_hostname
assert not is_valid_hostname('')
assert is_valid_hostname('a')
assert is_valid_hostname('_')
# Hyphens
assert not is_valid_hostname('-b')
assert not is_valid_hostname('a.-b')
assert is_valid_hostname('a-b')
assert not is_valid_hostname('b-')
assert not is_valid_hostname('b-.c')
# Dots
assert is_valid_hostname('a.')
assert is_valid_hostname('foo1.Foo')
assert not is_valid_hostname('foo1..Foo')
assert is_valid_hostname('12Foo.Bar.Bax_')
assert is_valid_hostname('12Foo.Bar.Baz_12')
# 63 octets in part
assert is_valid_hostname('a.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN'
'OPQRSTUVWXYZ0123456789_.bar')
# Over 63 octets in part
assert not is_valid_hostname('a.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN'
'OPQRSTUVWXYZ0123456789_1.bar')
len255 = ('a' * 62 + '.') * 4 + 'abc'
assert is_valid_hostname(len255)
assert not is_valid_hostname(len255 + 'd')
def test_protocol_tuple():
assert util.protocol_tuple(None) == (0, )
assert util.protocol_tuple("foo") == (0, )

0
tests/server/__init__.py Normal file
View File

View File

@ -9,7 +9,7 @@ from aiorpcx import (
JSONRPCv1, JSONRPCLoose, RPCError, ignore_after,
Request, Batch,
)
from electrumx.lib.coins import BitcoinCash, CoinError, Bitzeny
from electrumx.lib.coins import BitcoinCash, CoinError, Bitzeny, Dash
from electrumx.server.daemon import (
Daemon, FakeEstimateFeeDaemon, DaemonError
)
@ -28,6 +28,12 @@ def daemon(request):
return coin.DAEMON(coin, ','.join(urls))
@pytest.fixture(params=[Dash])
def dash_daemon(request):
coin = request.param
return coin.DAEMON(coin, ','.join(urls))
class ResponseBase(object):
def __init__(self, headers, status):
@ -70,23 +76,7 @@ class HTMLResponse(ResponseBase):
return self._text
class ClientSessionBase(object):
def __enter__(self):
self.prior_class = aiohttp.ClientSession
aiohttp.ClientSession = lambda: self
def __exit__(self, exc_type, exc_value, traceback):
aiohttp.ClientSession = self.prior_class
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
pass
class ClientSessionGood(ClientSessionBase):
class ClientSessionGood:
'''Imitate aiohttp for testing purposes.'''
def __init__(self, *triples):
@ -114,7 +104,7 @@ class ClientSessionGood(ClientSessionBase):
return JSONResponse(result, request_ids)
class ClientSessionBadAuth(ClientSessionBase):
class ClientSessionBadAuth:
def post(self, url, data=""):
return HTMLResponse('', 'Unauthorized', 401)
@ -128,25 +118,18 @@ class ClientSessionWorkQueueFull(ClientSessionGood):
'Internal server error', 500)
class ClientSessionNoConnection(ClientSessionGood):
def __init__(self, *args):
self.args = args
async def __aenter__(self):
aiohttp.ClientSession = lambda: ClientSessionGood(*self.args)
raise aiohttp.ClientConnectionError
class ClientSessionPostError(ClientSessionGood):
def __init__(self, exception, *args):
super().__init__(*args)
self.exception = exception
self.args = args
self.n = 0
def post(self, url, data=""):
aiohttp.ClientSession = lambda: ClientSessionGood(*self.args)
raise self.exception
self.n += 1
if self.n == 1:
raise self.exception
return super().post(url, data)
class ClientSessionFailover(ClientSessionGood):
@ -168,35 +151,39 @@ def in_caplog(caplog, message, count=1):
# Tests
#
def test_set_urls_bad():
@pytest.mark.asyncio
async def test_set_urls_bad():
with pytest.raises(CoinError):
Daemon(coin, '')
with pytest.raises(CoinError):
Daemon(coin, 'a')
def test_set_urls_one(caplog):
@pytest.mark.asyncio
async def test_set_urls_one(caplog):
with caplog.at_level(logging.INFO):
daemon = Daemon(coin, urls[0])
assert daemon.current_url() == urls[0]
assert len(daemon.urls) == 1
logged_url = daemon.logged_url()
assert logged_url == '127.0.0.1:8332/'
assert in_caplog(caplog, f'daemon #1 at {logged_url} (current)')
assert daemon.current_url() == urls[0]
assert len(daemon.urls) == 1
logged_url = daemon.logged_url()
assert logged_url == '127.0.0.1:8332/'
assert in_caplog(caplog, f'daemon #1 at {logged_url} (current)')
def test_set_urls_two(caplog):
@pytest.mark.asyncio
async def test_set_urls_two(caplog):
with caplog.at_level(logging.INFO):
daemon = Daemon(coin, ','.join(urls))
assert daemon.current_url() == urls[0]
assert len(daemon.urls) == 2
logged_url = daemon.logged_url()
assert logged_url == '127.0.0.1:8332/'
assert in_caplog(caplog, f'daemon #1 at {logged_url} (current)')
assert in_caplog(caplog, 'daemon #2 at 192.168.0.1:8332')
assert daemon.current_url() == urls[0]
assert len(daemon.urls) == 2
logged_url = daemon.logged_url()
assert logged_url == '127.0.0.1:8332/'
assert in_caplog(caplog, f'daemon #1 at {logged_url} (current)')
assert in_caplog(caplog, 'daemon #2 at 192.168.0.1:8332')
def test_set_urls_short():
@pytest.mark.asyncio
async def test_set_urls_short():
no_prefix_urls = ['/'.join(part for part in url.split('/')[2:])
for url in urls]
daemon = Daemon(coin, ','.join(no_prefix_urls))
@ -214,7 +201,8 @@ def test_set_urls_short():
assert len(daemon.urls) == 2
def test_failover_good(caplog):
@pytest.mark.asyncio
async def test_failover_good(caplog):
daemon = Daemon(coin, ','.join(urls))
with caplog.at_level(logging.INFO):
result = daemon.failover()
@ -228,7 +216,8 @@ def test_failover_good(caplog):
assert daemon.current_url() == urls[0]
def test_failover_fail(caplog):
@pytest.mark.asyncio
async def test_failover_fail(caplog):
daemon = Daemon(coin, urls[0])
with caplog.at_level(logging.INFO):
result = daemon.failover()
@ -241,8 +230,8 @@ def test_failover_fail(caplog):
async def test_height(daemon):
assert daemon.cached_height() is None
height = 300
with ClientSessionGood(('getblockcount', [], height)):
assert await daemon.height() == height
daemon.session = ClientSessionGood(('getblockcount', [], height))
assert await daemon.height() == height
assert daemon.cached_height() == height
@ -250,15 +239,15 @@ async def test_height(daemon):
async def test_broadcast_transaction(daemon):
raw_tx = 'deadbeef'
tx_hash = 'hash'
with ClientSessionGood(('sendrawtransaction', [raw_tx], tx_hash)):
assert await daemon.broadcast_transaction(raw_tx) == tx_hash
daemon.session = ClientSessionGood(('sendrawtransaction', [raw_tx], tx_hash))
assert await daemon.broadcast_transaction(raw_tx) == tx_hash
@pytest.mark.asyncio
async def test_relayfee(daemon):
response = {"relayfee": sats, "other:": "cruft"}
with ClientSessionGood(('getnetworkinfo', [], response)):
assert await daemon.getnetworkinfo() == response
daemon.session = ClientSessionGood(('getnetworkinfo', [], response))
assert await daemon.getnetworkinfo() == response
@pytest.mark.asyncio
@ -268,23 +257,23 @@ async def test_relayfee(daemon):
else:
sats = 2
response = {"relayfee": sats, "other:": "cruft"}
with ClientSessionGood(('getnetworkinfo', [], response)):
assert await daemon.relayfee() == sats
daemon.session = ClientSessionGood(('getnetworkinfo', [], response))
assert await daemon.relayfee() == sats
@pytest.mark.asyncio
async def test_mempool_hashes(daemon):
hashes = ['hex_hash1', 'hex_hash2']
with ClientSessionGood(('getrawmempool', [], hashes)):
assert await daemon.mempool_hashes() == hashes
daemon.session = ClientSessionGood(('getrawmempool', [], hashes))
assert await daemon.mempool_hashes() == hashes
@pytest.mark.asyncio
async def test_deserialised_block(daemon):
block_hash = 'block_hash'
result = {'some': 'mess'}
with ClientSessionGood(('getblock', [block_hash, True], result)):
assert await daemon.deserialised_block(block_hash) == result
daemon.session = ClientSessionGood(('getblock', [block_hash, True], result))
assert await daemon.deserialised_block(block_hash) == result
@pytest.mark.asyncio
@ -294,11 +283,11 @@ async def test_estimatefee(daemon):
result = daemon.coin.ESTIMATE_FEE
else:
result = -1
with ClientSessionGood(
daemon.session = ClientSessionGood(
('estimatesmartfee', [], method_not_found),
('estimatefee', [2], result)
):
assert await daemon.estimatefee(2) == result
)
assert await daemon.estimatefee(2) == result
@pytest.mark.asyncio
@ -308,15 +297,15 @@ async def test_estimatefee_smart(daemon):
return
rate = 0.0002
result = {'feerate': rate}
with ClientSessionGood(
('estimatesmartfee', [], bad_args),
('estimatesmartfee', [2], result)
):
assert await daemon.estimatefee(2) == rate
daemon.session = ClientSessionGood(
('estimatesmartfee', [], bad_args),
('estimatesmartfee', [2], result)
)
assert await daemon.estimatefee(2) == rate
# Test the rpc_available_cache is used
with ClientSessionGood(('estimatesmartfee', [2], result)):
assert await daemon.estimatefee(2) == rate
daemon.session = ClientSessionGood(('estimatesmartfee', [2], result))
assert await daemon.estimatefee(2) == rate
@pytest.mark.asyncio
@ -325,13 +314,20 @@ async def test_getrawtransaction(daemon):
simple = 'tx_in_hex'
verbose = {'hex': hex_hash, 'other': 'cruft'}
# Test False is converted to 0 - old daemon's reject False
with ClientSessionGood(('getrawtransaction', [hex_hash, 0], simple)):
assert await daemon.getrawtransaction(hex_hash) == simple
daemon.session = ClientSessionGood(('getrawtransaction', [hex_hash, 0], simple))
assert await daemon.getrawtransaction(hex_hash) == simple
# Test True is converted to 1
with ClientSessionGood(('getrawtransaction', [hex_hash, 1], verbose)):
assert await daemon.getrawtransaction(
hex_hash, True) == verbose
daemon.session = ClientSessionGood(('getrawtransaction', [hex_hash, 1], verbose))
assert await daemon.getrawtransaction(
hex_hash, True) == verbose
@pytest.mark.asyncio
async def test_protx(dash_daemon):
protx_hash = 'deadbeaf'
dash_daemon.session = ClientSessionGood(('protx', ['info', protx_hash], {}))
assert await dash_daemon.protx(['info', protx_hash]) == {}
# Batch tests
@ -340,8 +336,8 @@ async def test_getrawtransaction(daemon):
async def test_empty_send(daemon):
first = 5
count = 0
with ClientSessionGood(('getblockhash', [], [])):
assert await daemon.block_hex_hashes(first, count) == []
daemon.session = ClientSessionGood(('getblockhash', [], []))
assert await daemon.block_hex_hashes(first, count) == []
@pytest.mark.asyncio
@ -349,10 +345,10 @@ async def test_block_hex_hashes(daemon):
first = 5
count = 3
hashes = [f'hex_hash{n}' for n in range(count)]
with ClientSessionGood(('getblockhash',
[[n] for n in range(first, first + count)],
hashes)):
assert await daemon.block_hex_hashes(first, count) == hashes
daemon.session = ClientSessionGood(('getblockhash',
[[n] for n in range(first, first + count)],
hashes))
assert await daemon.block_hex_hashes(first, count) == hashes
@pytest.mark.asyncio
@ -363,8 +359,8 @@ async def test_raw_blocks(daemon):
iterable = (hex_hash for hex_hash in hex_hashes)
blocks = ["00", "019a", "02fe"]
blocks_raw = [bytes.fromhex(block) for block in blocks]
with ClientSessionGood(('getblock', args_list, blocks)):
assert await daemon.raw_blocks(iterable) == blocks_raw
daemon.session = ClientSessionGood(('getblock', args_list, blocks))
assert await daemon.raw_blocks(iterable) == blocks_raw
@pytest.mark.asyncio
@ -374,26 +370,26 @@ async def test_get_raw_transactions(daemon):
raw_txs_hex = ['fffefdfc', '0a0b0c0d']
raw_txs = [bytes.fromhex(raw_tx) for raw_tx in raw_txs_hex]
# Test 0 - old daemon's reject False
with ClientSessionGood(('getrawtransaction', args_list, raw_txs_hex)):
assert await daemon.getrawtransactions(hex_hashes) == raw_txs
daemon.session = ClientSessionGood(('getrawtransaction', args_list, raw_txs_hex))
assert await daemon.getrawtransactions(hex_hashes) == raw_txs
# Test one error
tx_not_found = RPCError(-1, 'some error message')
results = ['ff0b7d', tx_not_found]
raw_txs = [bytes.fromhex(results[0]), None]
with ClientSessionGood(('getrawtransaction', args_list, results)):
assert await daemon.getrawtransactions(hex_hashes) == raw_txs
daemon.session = ClientSessionGood(('getrawtransaction', args_list, results))
assert await daemon.getrawtransactions(hex_hashes) == raw_txs
# Other tests
@pytest.mark.asyncio
async def test_bad_auth(daemon, caplog):
with pytest.raises(DaemonError) as e:
with ClientSessionBadAuth():
await daemon.height()
async with ignore_after(0.1):
daemon.session = ClientSessionBadAuth()
await daemon.height()
assert "Unauthorized" in e.value.args[0]
assert in_caplog(caplog, "daemon service refused")
assert in_caplog(caplog, "Unauthorized")
@ -402,10 +398,10 @@ async def test_workqueue_depth(daemon, caplog):
daemon.init_retry = 0.01
height = 125
with caplog.at_level(logging.INFO):
with ClientSessionWorkQueueFull(('getblockcount', [], height)):
await daemon.height() == height
daemon.session = ClientSessionWorkQueueFull(('getblockcount', [], height))
await daemon.height() == height
assert in_caplog(caplog, "work queue full")
assert in_caplog(caplog, "Work queue depth exceeded")
assert in_caplog(caplog, "running normally")
@ -414,10 +410,11 @@ async def test_connection_error(daemon, caplog):
height = 100
daemon.init_retry = 0.01
with caplog.at_level(logging.INFO):
with ClientSessionNoConnection(('getblockcount', [], height)):
await daemon.height() == height
daemon.session = ClientSessionPostError(aiohttp.ClientConnectionError,
('getblockcount', [], height))
await daemon.height() == height
assert in_caplog(caplog, "connection problem - is your daemon running?")
assert in_caplog(caplog, "connection problem - check your daemon is running")
assert in_caplog(caplog, "connection restored")
@ -426,9 +423,9 @@ async def test_timeout_error(daemon, caplog):
height = 100
daemon.init_retry = 0.01
with caplog.at_level(logging.INFO):
with ClientSessionPostError(asyncio.TimeoutError,
('getblockcount', [], height)):
await daemon.height() == height
daemon.session = ClientSessionPostError(asyncio.TimeoutError,
('getblockcount', [], height))
await daemon.height() == height
assert in_caplog(caplog, "timeout error")
@ -438,9 +435,9 @@ async def test_disconnected(daemon, caplog):
height = 100
daemon.init_retry = 0.01
with caplog.at_level(logging.INFO):
with ClientSessionPostError(aiohttp.ServerDisconnectedError,
('getblockcount', [], height)):
await daemon.height() == height
daemon.session = ClientSessionPostError(aiohttp.ServerDisconnectedError,
('getblockcount', [], height))
await daemon.height() == height
assert in_caplog(caplog, "disconnected")
assert in_caplog(caplog, "connection restored")
@ -452,11 +449,11 @@ async def test_warming_up(daemon, caplog):
height = 100
daemon.init_retry = 0.01
with caplog.at_level(logging.INFO):
with ClientSessionGood(
('getblockcount', [], warming_up),
('getblockcount', [], height)
):
assert await daemon.height() == height
daemon.session = ClientSessionGood(
('getblockcount', [], warming_up),
('getblockcount', [], height)
)
assert await daemon.height() == height
assert in_caplog(caplog, "starting up checking blocks")
assert in_caplog(caplog, "running normally")
@ -470,9 +467,9 @@ async def test_warming_up_batch(daemon, caplog):
daemon.init_retry = 0.01
hashes = ['hex_hash5']
with caplog.at_level(logging.INFO):
with ClientSessionGood(('getblockhash', [[first]], [warming_up]),
('getblockhash', [[first]], hashes)):
assert await daemon.block_hex_hashes(first, count) == hashes
daemon.session = ClientSessionGood(('getblockhash', [[first]], [warming_up]),
('getblockhash', [[first]], hashes))
assert await daemon.block_hex_hashes(first, count) == hashes
assert in_caplog(caplog, "starting up checking blocks")
assert in_caplog(caplog, "running normally")
@ -484,8 +481,8 @@ async def test_failover(daemon, caplog):
daemon.init_retry = 0.01
daemon.max_retry = 0.04
with caplog.at_level(logging.INFO):
with ClientSessionFailover(('getblockcount', [], height)):
await daemon.height() == height
daemon.session = ClientSessionFailover(('getblockcount', [], height))
await daemon.height() == height
assert in_caplog(caplog, "disconnected", 1)
assert in_caplog(caplog, "failing over")

View File

@ -6,7 +6,8 @@ import re
import pytest
from electrumx.server.env import Env, NetIdentity
from aiorpcx import Service, NetAddress
from electrumx.server.env import Env, ServiceError
import electrumx.lib.coins as lib_coins
@ -42,6 +43,7 @@ def assert_default(env_var, attr, default):
def assert_integer(env_var, attr, default=''):
setup_base_env()
if default != '':
e = Env()
assert getattr(e, attr) == default
@ -143,28 +145,127 @@ def test_CACHE_MB():
assert_integer('CACHE_MB', 'cache_MB', 1200)
def test_HOST():
assert_default('HOST', 'host', 'localhost')
os.environ['HOST'] = ''
def test_SERVICES():
setup_base_env()
e = Env()
assert e.cs_host(for_rpc=False) == ''
os.environ['HOST'] = '192.168.0.1,23.45.67.89'
assert e.services == []
# This has a blank entry between commas
os.environ['SERVICES'] = 'tcp://foo.bar:1234,,ws://1.2.3.4:567,rpc://[::1]:700'
e = Env()
assert e.cs_host(for_rpc=False) == ['192.168.0.1', '23.45.67.89']
os.environ['HOST'] = '192.168.0.1 , 23.45.67.89 '
assert e.services == [
Service('tcp', NetAddress('foo.bar', 1234)),
Service('ws', NetAddress('1.2.3.4', 567)),
Service('rpc', NetAddress('::1', 700)),
]
def test_SERVICES_default_rpc():
# This has a blank entry between commas
os.environ['SERVICES'] = 'rpc://foo.bar'
e = Env()
assert e.cs_host(for_rpc=False) == ['192.168.0.1', '23.45.67.89']
assert e.services[0].host == 'foo.bar'
assert e.services[0].port == 8000
os.environ['SERVICES'] = 'rpc://:800'
e = Env()
assert e.services[0].host == 'localhost'
assert e.services[0].port == 800
os.environ['SERVICES'] = 'rpc://'
e = Env()
assert e.services[0].host == 'localhost'
assert e.services[0].port == 8000
def test_RPC_HOST():
assert_default('RPC_HOST', 'rpc_host', 'localhost')
os.environ['RPC_HOST'] = ''
def test_bad_SERVICES():
setup_base_env()
os.environ['SERVICES'] = 'tcp:foo.bar:1234'
with pytest.raises(ServiceError) as err:
Env()
assert 'invalid service string' in str(err.value)
os.environ['SERVICES'] = 'xxx://foo.com:50001'
with pytest.raises(ServiceError) as err:
Env()
assert 'unknown protocol' in str(err.value)
def test_onion_SERVICES():
setup_base_env()
os.environ['SERVICES'] = 'tcp://foo.bar.onion:1234'
with pytest.raises(ServiceError) as err:
Env()
assert 'bad host' in str(err.value)
def test_duplicate_SERVICES():
setup_base_env()
os.environ['SERVICES'] = 'tcp://foo.bar:1234,ws://foo.bar:1235'
e = Env()
# Blank reverts to localhost
assert e.cs_host(for_rpc=True) == 'localhost'
os.environ['RPC_HOST'] = '127.0.0.1, ::1'
os.environ['SERVICES'] = 'tcp://foo.bar:1234,ws://foo.bar:1234'
with pytest.raises(ServiceError) as err:
Env()
assert 'multiple services' in str(err.value)
@pytest.mark.parametrize("service", (
'ssl://foo.bar:1234',
'wss://foo.bar:1234',
))
def test_ssl_SERVICES(service):
setup_base_env()
os.environ['SERVICES'] = service
with pytest.raises(Env.Error) as err:
Env()
assert 'SSL_CERTFILE' in str(err.value)
os.environ['SSL_CERTFILE'] = 'certfile'
with pytest.raises(Env.Error) as err:
Env()
assert 'SSL_KEYFILE' in str(err.value)
os.environ['SSL_KEYFILE'] = 'keyfile'
Env()
setup_base_env()
os.environ['SERVICES'] = service
os.environ['SSL_KEYFILE'] = 'keyfile'
with pytest.raises(Env.Error) as err:
Env()
assert 'SSL_CERTFILE' in str(err.value)
def test_REPORT_SERVICES():
setup_base_env()
e = Env()
assert e.cs_host(for_rpc=True) == ['127.0.0.1', '::1']
assert e.report_services == []
# This has a blank entry between commas
os.environ['REPORT_SERVICES'] = 'tcp://foo.bar:1234,,ws://1.2.3.4:567'
e = Env()
assert e.report_services == [
Service('tcp', NetAddress('foo.bar', 1234)),
Service('ws', NetAddress('1.2.3.4', 567)),
]
def test_REPORT_SERVICES_rpc():
setup_base_env()
os.environ['REPORT_SERVICES'] = 'rpc://foo.bar:1234'
with pytest.raises(ServiceError) as err:
Env()
assert 'bad protocol' in str(err.value)
def test_REPORT_SERVICES_private():
setup_base_env()
os.environ['REPORT_SERVICES'] = 'tcp://192.168.0.1:1234'
with pytest.raises(ServiceError) as err:
Env()
assert 'bad IP address' in str(err.value)
# Accept it not PEER_ANNOUNCE
os.environ['PEER_ANNOUNCE'] = ''
Env()
def test_REPORT_SERVICES_localhost():
setup_base_env()
os.environ['REPORT_SERVICES'] = 'tcp://localhost:1234'
with pytest.raises(ServiceError) as err:
Env()
assert 'bad host' in str(err.value)
def test_REORG_LIMIT():
@ -172,36 +273,24 @@ def test_REORG_LIMIT():
lib_coins.BitcoinSV.REORG_LIMIT)
def test_TCP_PORT():
assert_integer('TCP_PORT', 'tcp_port', None)
def test_COST_HARD_LIMIT():
assert_integer('COST_HARD_LIMIT', 'cost_hard_limit', 10000)
def test_SSL_PORT():
# Requires both SSL_CERTFILE and SSL_KEYFILE to be set
os.environ['SSL_PORT'] = '50002'
os.environ['SSL_CERTFILE'] = 'certfile'
with pytest.raises(Env.Error):
Env()
os.environ.pop('SSL_CERTFILE')
os.environ['SSL_KEYFILE'] = 'keyfile'
with pytest.raises(Env.Error):
Env()
os.environ['SSL_CERTFILE'] = 'certfile'
Env()
os.environ.pop('SSL_PORT')
assert_integer('SSL_PORT', 'ssl_port', None)
def test_COST_SOFT_LIMIT():
assert_integer('COST_SOFT_LIMIT', 'cost_soft_limit', 1000)
def test_RPC_PORT():
assert_integer('RPC_PORT', 'rpc_port', 8000)
def test_INITIAL_CONCURRENT():
assert_integer('INITIAL_CONCURRENT', 'initial_concurrent', 10)
def test_MAX_SUBSCRIPTIONS():
assert_integer('MAX_SUBSCRIPTIONS', 'max_subscriptions', 10000)
def test_REQUEST_SLEEP():
assert_integer('REQUEST_SLEEP', 'request_sleep', 2500)
def test_LOG_SESSIONS():
assert_integer('LOG_SESSIONS', 'log_sessions', 3600)
def test_BANDWIDTH_UNIT_COST():
assert_integer('BANDWIDTH_UNIT_COST', 'bw_unit_cost', 5000)
def test_DONATION_ADDRESS():
@ -216,8 +305,13 @@ def test_MAX_SEND():
assert_integer('MAX_SEND', 'max_send', 1000000)
def test_MAX_SUBS():
assert_integer('MAX_SUBS', 'max_subs', 250000)
def test_LOG_LEVEL():
setup_base_env()
e = Env()
assert e.log_level == 'INFO'
os.environ['LOG_LEVEL'] = 'warning'
e = Env()
assert e.log_level == 'WARNING'
def test_MAX_SESSIONS():
@ -228,12 +322,8 @@ def test_MAX_SESSIONS():
# Cannot test default as it may be lowered by the open file limit cap
def test_MAX_SESSION_SUBS():
assert_integer('MAX_SESSION_SUBS', 'max_session_subs', 50000)
def test_BANDWIDTH_LIMIT():
assert_integer('BANDWIDTH_LIMIT', 'bandwidth_limit', 2000000)
def test_REQUEST_TIMEOUT():
assert_integer('REQUEST_TIMEOUT', 'request_timeout', 30)
def test_SESSION_TIMEOUT():
@ -302,112 +392,10 @@ def test_TOR_PROXY_PORT():
assert_integer('TOR_PROXY_PORT', 'tor_proxy_port', None)
def test_clearnet_identity():
os.environ['REPORT_TCP_PORT'] = '456'
e = Env()
assert len(e.identities) == 0
os.environ['REPORT_HOST'] = '8.8.8.8'
e = Env()
assert len(e.identities) == 1
assert e.identities[0].host == '8.8.8.8'
os.environ['REPORT_HOST'] = 'localhost'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = ''
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '127.0.0.1'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '0.0.0.0'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '224.0.0.2'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '$HOST'
with pytest.raises(Env.Error):
Env()
# Accept private IP, unless PEER_ANNOUNCE
os.environ['PEER_ANNOUNCE'] = ''
os.environ['REPORT_HOST'] = '192.168.0.1'
os.environ['SSL_CERTFILE'] = 'certfile'
os.environ['SSL_KEYFILE'] = 'keyfile'
Env()
os.environ['PEER_ANNOUNCE'] = 'OK'
with pytest.raises(Env.Error) as err:
Env()
os.environ.pop('PEER_ANNOUNCE', None)
assert 'not a valid REPORT_HOST' in str(err)
os.environ['REPORT_HOST'] = '1.2.3.4'
os.environ['REPORT_SSL_PORT'] = os.environ['REPORT_TCP_PORT']
with pytest.raises(Env.Error) as err:
Env()
assert 'both resolve' in str(err)
os.environ['REPORT_SSL_PORT'] = '457'
os.environ['REPORT_HOST'] = 'foo.com'
e = Env()
assert len(e.identities) == 1
ident = e.identities[0]
assert ident.host == 'foo.com'
assert ident.tcp_port == 456
assert ident.ssl_port == 457
assert ident.nick_suffix == ''
def test_tor_identity():
tor_host = 'something.onion'
os.environ.pop('REPORT_HOST', None)
os.environ.pop('REPORT_HOST_TOR', None)
e = Env()
assert len(e.identities) == 0
os.environ['REPORT_HOST_TOR'] = 'foo'
os.environ['REPORT_SSL_PORT_TOR'] = '123'
os.environ['TCP_PORT'] = '456'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST_TOR'] = tor_host
e = Env()
assert len(e.identities) == 1
ident = e.identities[0]
assert ident.host == tor_host
assert ident.tcp_port == 456
assert ident.ssl_port == 123
assert ident.nick_suffix == '_tor'
os.environ['REPORT_TCP_PORT_TOR'] = os.environ['REPORT_SSL_PORT_TOR']
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = 'foo.com'
os.environ['TCP_PORT'] = '456'
os.environ['SSL_PORT'] = '789'
os.environ['REPORT_TCP_PORT'] = '654'
os.environ['REPORT_SSL_PORT'] = '987'
os.environ['SSL_CERTFILE'] = 'certfile'
os.environ['SSL_KEYFILE'] = 'keyfile'
os.environ.pop('REPORT_TCP_PORT_TOR', None)
os.environ.pop('REPORT_SSL_PORT_TOR', None)
e = Env()
assert len(e.identities) == 2
ident = e.identities[1]
assert ident.host == tor_host
assert ident.tcp_port == 654
assert ident.ssl_port == 987
os.environ['REPORT_TCP_PORT_TOR'] = '234'
os.environ['REPORT_SSL_PORT_TOR'] = '432'
e = Env()
assert len(e.identities) == 2
ident = e.identities[1]
assert ident.host == tor_host
assert ident.tcp_port == 234
assert ident.ssl_port == 432
def test_ban_versions():
e = Env()
assert e.drop_client is None
ban_re = '1\.[0-2]\.\d+?[_\w]*'
ban_re = r'1\.[0-2]\.\d+?[_\w]*'
os.environ['DROP_CLIENT'] = ban_re
e = Env()
assert e.drop_client == re.compile(ban_re)

View File

@ -63,7 +63,7 @@ class API(MemPoolAPI):
def __init__(self):
self._height = 0
self._cached_height = self._height
self._cached_height = self._db_height = self._height
# Create a pool of hash160s. Map them to their script hashes
# Create a bunch of UTXOs paying to those script hashes
# Create a bunch of TXs that spend from the UTXO set and create
@ -203,6 +203,9 @@ class API(MemPoolAPI):
self._cached_height = self._height
return self._height
def db_height(self):
return self._db_height
def cached_height(self):
return self._cached_height
@ -272,7 +275,8 @@ async def test_keep_synchronized(caplog):
assert in_caplog(caplog, 'beginning processing of daemon mempool')
assert in_caplog(caplog, 'compact fee histogram')
assert in_caplog(caplog, 'synced in ')
assert in_caplog(caplog, '0 txs touching 0 addresses')
assert in_caplog(caplog, '0 txs')
assert in_caplog(caplog, 'touching 0 addresses')
assert not in_caplog(caplog, 'txs dropped')
@ -443,7 +447,7 @@ async def test_daemon_drops_txs():
@pytest.mark.asyncio
async def test_notifications():
async def test_notifications(caplog):
# Tests notifications over a cycle of:
# 1) A first batch of txs come in
# 2) A second batch of txs come in
@ -461,6 +465,8 @@ async def test_notifications():
second_hashes = api.ordered_adds[n:]
second_touched = api.touched(second_hashes)
caplog.set_level(logging.DEBUG)
async with TaskGroup() as group:
# First batch enters the mempool
api.raw_txs = {hash: raw_txs[hash] for hash in first_hashes}
@ -471,7 +477,7 @@ async def test_notifications():
await event.wait()
assert len(api.on_mempool_calls) == 1
touched, height = api.on_mempool_calls[0]
assert height == api._height == api._cached_height
assert height == api._height == api._db_height == api._cached_height
assert touched == first_touched
# Second batch enters the mempool
api.raw_txs = raw_txs
@ -479,21 +485,32 @@ async def test_notifications():
await event.wait()
assert len(api.on_mempool_calls) == 2
touched, height = api.on_mempool_calls[1]
assert height == api._height == api._cached_height
assert height == api._height == api._db_height == api._cached_height
# Touched is incremental
assert touched == second_touched
# Block found; first half confirm
new_height = 2
api._height = new_height
api.db_utxos.update(first_utxos)
for spend in first_spends:
del api.db_utxos[spend]
api.raw_txs = {hash: raw_txs[hash] for hash in second_hashes}
api.txs = {hash: txs[hash] for hash in second_hashes}
# Delay the DB update
assert not in_caplog(caplog, 'waiting for DB to sync')
async with ignore_after(mempool.refresh_secs * 2):
await event.wait()
assert in_caplog(caplog, 'waiting for DB to sync')
assert len(api.on_mempool_calls) == 2
assert not event.is_set()
assert api._height == api._cached_height == new_height
assert touched == second_touched
# Now update the DB
api.db_utxos.update(first_utxos)
api._db_height = new_height
for spend in first_spends:
del api.db_utxos[spend]
await event.wait()
assert len(api.on_mempool_calls) == 3
touched, height = api.on_mempool_calls[2]
assert height == api._height == api._cached_height == new_height
assert height == api._db_height == new_height
assert touched == first_touched
await group.cancel_remaining()

View File

@ -61,17 +61,11 @@ def test_block(block_details):
raw_block = unhexlify(block_info['block'])
block = coin.block(raw_block, block_info['height'])
h = coin.electrum_header(block.header, block_info['height'])
assert block_info['merkleroot'] == h['merkle_root']
assert block_info['time'] == h['timestamp']
assert block_info['previousblockhash'] == h['prev_block_hash']
assert block_info['height'] == h['block_height']
assert block_info['nonce'] == h['nonce']
assert block_info['bits'] == pack_be_uint32(h['bits']).hex()
assert coin.header_hash(
block.header) == hex_str_to_hash(block_info['hash'])
assert (coin.header_prevhash(block.header)
== hex_str_to_hash(block_info['previousblockhash']))
assert len(block_info['tx']) == len(block.transactions)
for n, (tx, txid) in enumerate(block.transactions):
assert txid == hex_str_to_hash(block_info['tx'][n])

View File

@ -56,8 +56,12 @@ def test_transaction(transaction_details):
spk = vout[i]['scriptPubKey']
tx_pks = tx.outputs[i].pk_script
assert spk['hex'] == tx_pks.hex()
assert coin.address_to_hashX(spk['address']) == \
coin.hashX_from_script(tx_pks)
if "addresses" in spk:
assert len(spk["addresses"]) == 1
address = spk["addresses"][0]
else:
address = spk["address"]
assert coin.address_to_hashX(address) == coin.hashX_from_script(tx_pks)
if issubclass(coin, Namecoin):
if "nameOp" not in spk or "name" not in spk["nameOp"]:
assert coin.name_hashX_from_script(tx_pks) is None

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,54 @@
{
"txid": "35ea64349dfca4158ba745d3031c1014b9f27262c9c8bb672b829b33b3ac56ee",
"hash": "35ea64349dfca4158ba745d3031c1014b9f27262c9c8bb672b829b33b3ac56ee",
"size": 390,
"vsize": 390,
"version": 1638,
"locktime": 358665,
"time": 1559642505,
"hex": "660600008941f65c02ad38a624bee634b804a930564b1f92c8ec062c20619a6ae22ff667bd5fecf8d7000000006b483045022100d3b0aa2d650b1e632c8392180c4305b939c61a054947bde720840ae6765a6c0e0220309d4cff12bfacadeb994ae81810d9b368c27f7c84e34a4d55683928deea14c1012103b5e9a6b0f0feb239060986d3eadbb554b82dff8a6b75d8880810391e276d197cfeffffff4c96d5b045260667313a9f51c81028d13c66bd32ae2134d162d4931f2ad45a61010000006a4730440220573baeb5ae416971bb112197978f1a7053b4dc75ab9a877671541796d1df33f80220444153b0085ca275fc05efac6dbbcad95bbfa07de51ea69a9d01325d45af99ad012102df1f5deb42e7568ac9578ab942ceeca8e8cade0cbe7678e66089794771e6cd31feffffff026400000000000000265375095465737431303030317576a91434960bef9fcc33ff4d2c8c2cfc77edd2f463a58f88ac1c250000000000001976a9148d5104c62b85ac953de67538dbdc3b70e4027abb88ac09790500",
"vin": [
{
"txid": "d7f8ec5fbd67f62fe26a9a61202c06ecc8921f4b5630a904b834e6be24a638ad",
"vout": 0,
"scriptSig": {
"asm": "3045022100d3b0aa2d650b1e632c8392180c4305b939c61a054947bde720840ae6765a6c0e0220309d4cff12bfacadeb994ae81810d9b368c27f7c84e34a4d55683928deea14c1[ALL] 03b5e9a6b0f0feb239060986d3eadbb554b82dff8a6b75d8880810391e276d197c",
"hex": "483045022100d3b0aa2d650b1e632c8392180c4305b939c61a054947bde720840ae6765a6c0e0220309d4cff12bfacadeb994ae81810d9b368c27f7c84e34a4d55683928deea14c1012103b5e9a6b0f0feb239060986d3eadbb554b82dff8a6b75d8880810391e276d197c"
},
"sequence": 4294967294
},
{
"txid": "615ad42a1f93d462d13421ae32bd663cd12810c8519f3a3167062645b0d5964c",
"vout": 1,
"scriptSig": {
"asm": "30440220573baeb5ae416971bb112197978f1a7053b4dc75ab9a877671541796d1df33f80220444153b0085ca275fc05efac6dbbcad95bbfa07de51ea69a9d01325d45af99ad[ALL] 02df1f5deb42e7568ac9578ab942ceeca8e8cade0cbe7678e66089794771e6cd31",
"hex": "4730440220573baeb5ae416971bb112197978f1a7053b4dc75ab9a877671541796d1df33f80220444153b0085ca275fc05efac6dbbcad95bbfa07de51ea69a9d01325d45af99ad012102df1f5deb42e7568ac9578ab942ceeca8e8cade0cbe7678e66089794771e6cd31"
},
"sequence": 4294967294
}
],
"vout": [
{
"value": 100,
"n": 0,
"scriptPubKey": {
"asm": "3 OP_DROP 546573743130303031 OP_DROP OP_DUP OP_HASH160 34960bef9fcc33ff4d2c8c2cfc77edd2f463a58f OP_EQUALVERIFY OP_CHECKSIG",
"hex": "5375095465737431303030317576a91434960bef9fcc33ff4d2c8c2cfc77edd2f463a58f88ac",
"reqSigs": 1,
"type": "name_pubkeyhash",
"address": "EMwxNytaiBZkYSZpCrqeRMaMMeqfXEXmAH"
}
},
{
"value": 9500,
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8d5104c62b85ac953de67538dbdc3b70e4027abb OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a9148d5104c62b85ac953de67538dbdc3b70e4027abb88ac",
"reqSigs": 1,
"type": "pubkeyhash",
"address": "EW37puYPfAKP5QvB8BQbeoyh5L58GiMBy6"
}
}
]
}

View File

@ -0,0 +1,45 @@
{
"txid": "b9b30bea0cf9e12d17d7b2adb00645906f2767b3d2f5ac04207fe7c3a6562afe",
"hash": "b9b30bea0cf9e12d17d7b2adb00645906f2767b3d2f5ac04207fe7c3a6562afe",
"size": 404,
"vsize": 404,
"version": 1638,
"locktime": 358667,
"time": 1559644057,
"hex": "660600009947f65c01bf1de4f417f6a81cb633ab11b7d3697fc3f2cf354e35ea51c59d042c1f1963ca010000006b483045022100c2c0697e3e4d6f48202724278248b3294e2aa2ed378a86975b1234119559fb5302201c9942386681ab911e6b5a8e9f9083c1858df1997ff8b39f0c8d8fff92ecafa3012103a708dad6af6017858eac071eb38deeaefaca9f986e46c3244f9cddc9539fc819feffffff026400000000000000c751750a49545458493652593557020f276d4c9a747970653d61737369676e65640a2c706f736974696f6e5f69643d38340a2c706f736974696f6e5f6e616d653d4950464d3a20d09cd0a1d0a4d09e0a2c76616c75653dd09fd0bed0bbd18cd0b7d0bed0b2d0b0d182d0b5d0bbd18c20d0bfd0bed181d182d183d0bfd0b8d0bb20d0bdd0b020d0bad183d180d1810a2c656e645f74696d653d300a2c636f6d70616e793d46696e61636164656d797576a9142eca0d118305feb0c2d3c718fb751a0ca88781d488ac08a31800000000001976a914c069a9b8139bbace0a6cd384e50f59014d329c3f88ac0b790500",
"vin": [
{
"txid": "ca63191f2c049dc551ea354e35cff2c37f69d3b711ab33b61ca8f617f4e41dbf",
"vout": 1,
"scriptSig": {
"asm": "3045022100c2c0697e3e4d6f48202724278248b3294e2aa2ed378a86975b1234119559fb5302201c9942386681ab911e6b5a8e9f9083c1858df1997ff8b39f0c8d8fff92ecafa3[ALL] 03a708dad6af6017858eac071eb38deeaefaca9f986e46c3244f9cddc9539fc819",
"hex": "483045022100c2c0697e3e4d6f48202724278248b3294e2aa2ed378a86975b1234119559fb5302201c9942386681ab911e6b5a8e9f9083c1858df1997ff8b39f0c8d8fff92ecafa3012103a708dad6af6017858eac071eb38deeaefaca9f986e46c3244f9cddc9539fc819"
},
"sequence": 4294967294
}
],
"vout": [
{
"value": 100,
"n": 0,
"scriptPubKey": {
"asm": "1 OP_DROP 49545458493652593557 9999 OP_2DROP 747970653d61737369676e65640a2c706f736974696f6e5f69643d38340a2c706f736974696f6e5f6e616d653d4950464d3a20d09cd0a1d0a4d09e0a2c76616c75653dd09fd0bed0bbd18cd0b7d0bed0b2d0b0d182d0b5d0bbd18c20d0bfd0bed181d182d183d0bfd0b8d0bb20d0bdd0b020d0bad183d180d1810a2c656e645f74696d653d300a2c636f6d70616e793d46696e61636164656d79 OP_DROP OP_DUP OP_HASH160 2eca0d118305feb0c2d3c718fb751a0ca88781d4 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "51750a49545458493652593557020f276d4c9a747970653d61737369676e65640a2c706f736974696f6e5f69643d38340a2c706f736974696f6e5f6e616d653d4950464d3a20d09cd0a1d0a4d09e0a2c76616c75653dd09fd0bed0bbd18cd0b7d0bed0b2d0b0d182d0b5d0bbd18c20d0bfd0bed181d182d183d0bfd0b8d0bb20d0bdd0b020d0bad183d180d1810a2c656e645f74696d653d300a2c636f6d70616e793d46696e61636164656d797576a9142eca0d118305feb0c2d3c718fb751a0ca88781d488ac",
"reqSigs": 1,
"type": "name_pubkeyhash",
"address": "EMRJdAK5gdzhUzxbRMcte4nHwTaAPCL8Cc"
}
},
{
"value": 1614600,
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 c069a9b8139bbace0a6cd384e50f59014d329c3f OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914c069a9b8139bbace0a6cd384e50f59014d329c3f88ac",
"reqSigs": 1,
"type": "pubkeyhash",
"address": "EahHp1NLcbvTUxmbdzsy4ZfHd5yVtX5uzf"
}
}
]
}

View File

@ -0,0 +1,54 @@
{
"txid": "ca63191f2c049dc551ea354e35cff2c37f69d3b711ab33b61ca8f617f4e41dbf",
"hash": "ca63191f2c049dc551ea354e35cff2c37f69d3b711ab33b61ca8f617f4e41dbf",
"size": 552,
"vsize": 552,
"version": 1638,
"locktime": 358666,
"time": 1559643476,
"hex": "660600005445f65c02d654cf3d02945aae4dd8ab489dd42070e9a32eff0e64aabcb081545d08db0824010000006b48304502210094fef2bea1988f16da02b8ab2a569c00cc85a1c6cfec696e6269b307f610f766022024c67382889ab13c3753dfb84374264eb902c498d59e5e9bcd293fb654371ff10121035bdf2ebca856595d10b845a6adfaf009b0aefe7a54d3aa1b350ce8e9db8d2530feffffff77fe55ec180d4b0b211d580f5e887d6ce26937eec124b077f89898fc3fff6b5b010000006b4830450221009c9329da2a26ebcda79b398642488a980a8aa441c7a89462edc16f88d56f2cba0220777a148b32ed60e0188dbabdcd8d8c67ae166a8308a90adfae9ab1542d6113eb0121020223ca251cd161c9ef5f258a91315c07a5a7af45a8c95fe20dd409083962e95cfeffffff026400000000000000c752750a424d3553334f52373957020f276d4c9a747970653d61737369676e65640a2c706f736974696f6e5f69643d38340a2c706f736974696f6e5f6e616d653d4950464d3a20d09cd0a1d0a4d09e0a2c76616c75653dd09fd0bed0bbd18cd0b7d0bed0b2d0b0d182d0b5d0bbd18c20d0bfd0bed181d182d183d0bfd0b8d0bb20d0bdd0b020d0bad183d180d1810a2c656e645f74696d653d300a2c636f6d70616e793d46696e61636164656d797576a9148354849b536141ecda5c9568993eee8daecec17c88ac54a71800000000001976a914a7b05bc714718f20040a712b812b2037f2cb0fc388ac0a790500",
"vin": [
{
"txid": "2408db085d5481b0bcaa640eff2ea3e97020d49d48abd84dae5a94023dcf54d6",
"vout": 1,
"scriptSig": {
"asm": "304502210094fef2bea1988f16da02b8ab2a569c00cc85a1c6cfec696e6269b307f610f766022024c67382889ab13c3753dfb84374264eb902c498d59e5e9bcd293fb654371ff1[ALL] 035bdf2ebca856595d10b845a6adfaf009b0aefe7a54d3aa1b350ce8e9db8d2530",
"hex": "48304502210094fef2bea1988f16da02b8ab2a569c00cc85a1c6cfec696e6269b307f610f766022024c67382889ab13c3753dfb84374264eb902c498d59e5e9bcd293fb654371ff10121035bdf2ebca856595d10b845a6adfaf009b0aefe7a54d3aa1b350ce8e9db8d2530"
},
"sequence": 4294967294
},
{
"txid": "5b6bff3ffc9898f877b024c1ee3769e26c7d885e0f581d210b4b0d18ec55fe77",
"vout": 1,
"scriptSig": {
"asm": "30450221009c9329da2a26ebcda79b398642488a980a8aa441c7a89462edc16f88d56f2cba0220777a148b32ed60e0188dbabdcd8d8c67ae166a8308a90adfae9ab1542d6113eb[ALL] 020223ca251cd161c9ef5f258a91315c07a5a7af45a8c95fe20dd409083962e95c",
"hex": "4830450221009c9329da2a26ebcda79b398642488a980a8aa441c7a89462edc16f88d56f2cba0220777a148b32ed60e0188dbabdcd8d8c67ae166a8308a90adfae9ab1542d6113eb0121020223ca251cd161c9ef5f258a91315c07a5a7af45a8c95fe20dd409083962e95c"
},
"sequence": 4294967294
}
],
"vout": [
{
"value": 100,
"n": 0,
"scriptPubKey": {
"asm": "2 OP_DROP 424d3553334f52373957 9999 OP_2DROP 747970653d61737369676e65640a2c706f736974696f6e5f69643d38340a2c706f736974696f6e5f6e616d653d4950464d3a20d09cd0a1d0a4d09e0a2c76616c75653dd09fd0bed0bbd18cd0b7d0bed0b2d0b0d182d0b5d0bbd18c20d0bfd0bed181d182d183d0bfd0b8d0bb20d0bdd0b020d0bad183d180d1810a2c656e645f74696d653d300a2c636f6d70616e793d46696e61636164656d79 OP_DROP OP_DUP OP_HASH160 8354849b536141ecda5c9568993eee8daecec17c OP_EQUALVERIFY OP_CHECKSIG",
"hex": "52750a424d3553334f52373957020f276d4c9a747970653d61737369676e65640a2c706f736974696f6e5f69643d38340a2c706f736974696f6e5f6e616d653d4950464d3a20d09cd0a1d0a4d09e0a2c76616c75653dd09fd0bed0bbd18cd0b7d0bed0b2d0b0d182d0b5d0bbd18c20d0bfd0bed181d182d183d0bfd0b8d0bb20d0bdd0b020d0bad183d180d1810a2c656e645f74696d653d300a2c636f6d70616e793d46696e61636164656d797576a9148354849b536141ecda5c9568993eee8daecec17c88ac",
"reqSigs": 1,
"type": "name_pubkeyhash",
"address": "EV8KG8LGoW8P2HdJ5npqqNKP5VcVJG6tgd"
}
},
{
"value": 1615700,
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 a7b05bc714718f20040a712b812b2037f2cb0fc3 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914a7b05bc714718f20040a712b812b2037f2cb0fc388ac",
"reqSigs": 1,
"type": "pubkeyhash",
"address": "EYSZcDvtKZGRrGM2nzokRmMPL9rrzpCirN"
}
}
]
}

Some files were not shown because too many files have changed in this diff Show More