From b05367409513e5d902d81bbdae6683e48369d59f Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Fri, 13 Apr 2018 16:29:00 +0200 Subject: [PATCH 01/37] use -short flag for testing by default and clean vendor in build env. --- Makefile | 3 +++ build/bin/Makefile | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/Makefile b/Makefile index 00e051c8..806bbaad 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,9 @@ build-debug: .bin-image test: .bin-image docker run -t --rm -e PACKAGER=$(PACKAGER) -v $(CURDIR):/src $(BIN_IMAGE) make test +test-all: .bin-image + docker run -t --rm -e PACKAGER=$(PACKAGER) -v $(CURDIR):/src $(BIN_IMAGE) make test-all + deb: .deb-image docker run -t --rm -e PACKAGER=$(PACKAGER) -v $(CURDIR):/src -v $(CURDIR)/build:/out $(DEB_IMAGE) diff --git a/build/bin/Makefile b/build/bin/Makefile index 7d4efd84..386d9fa1 100644 --- a/build/bin/Makefile +++ b/build/bin/Makefile @@ -9,9 +9,13 @@ build-debug: prepare-sources chown $(PACKAGER) /out/blockbook test: prepare-sources + cd $(GOPATH)/src/blockbook && go test -v -short ./... + +test-all: prepare-sources cd $(GOPATH)/src/blockbook && go test -v ./... prepare-sources: @ [ -n "`ls /src 2> /dev/null`" ] || (echo "/src doesn't exist or is empty" 1>&2 && exit 1) cp -r /src $(GOPATH)/src/blockbook + rm -rf $(GOPATH)/src/blockbook/vendor cd $(GOPATH)/src/blockbook && dep ensure -vendor-only From 466ed7912dcf7313270c6ff24326477a43f62a42 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Fri, 13 Apr 2018 16:34:56 +0200 Subject: [PATCH 02/37] removed verbose output of test targets --- build/bin/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/bin/Makefile b/build/bin/Makefile index 386d9fa1..932285c9 100644 --- a/build/bin/Makefile +++ b/build/bin/Makefile @@ -9,10 +9,10 @@ build-debug: prepare-sources chown $(PACKAGER) /out/blockbook test: prepare-sources - cd $(GOPATH)/src/blockbook && go test -v -short ./... + cd $(GOPATH)/src/blockbook && go test -short ./... test-all: prepare-sources - cd $(GOPATH)/src/blockbook && go test -v ./... + cd $(GOPATH)/src/blockbook && go test ./... prepare-sources: @ [ -n "`ls /src 2> /dev/null`" ] || (echo "/src doesn't exist or is empty" 1>&2 && exit 1) From 173a0feb4a3461fd52977dd7887642778d032045 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Fri, 13 Apr 2018 15:23:15 +0200 Subject: [PATCH 03/37] added package definition for Bitcoin Cash backend --- contrib/backends/Makefile | 2 +- contrib/backends/bcash/Makefile | 14 ++++++ contrib/backends/bcash/bch-testnet.conf | 13 ++++++ contrib/backends/bcash/bch.conf | 12 +++++ .../backends/bcash/debian/bcash-bch.conffiles | 1 + contrib/backends/bcash/debian/bcash-bch.dirs | 1 + .../backends/bcash/debian/bcash-bch.install | 2 + .../backends/bcash/debian/bcash-bch.logrotate | 10 +++++ .../backends/bcash/debian/bcash-bch.postinst | 20 +++++++++ .../backends/bcash/debian/bcash-bch.service | 44 +++++++++++++++++++ .../bcash/debian/bcash-testnet.conffiles | 1 + .../backends/bcash/debian/bcash-testnet.dirs | 1 + .../bcash/debian/bcash-testnet.install | 2 + .../bcash/debian/bcash-testnet.logrotate | 10 +++++ .../bcash/debian/bcash-testnet.postinst | 20 +++++++++ .../bcash/debian/bcash-testnet.service | 44 +++++++++++++++++++ contrib/backends/bcash/debian/changelog | 5 +++ contrib/backends/bcash/debian/compat | 1 + contrib/backends/bcash/debian/control | 16 +++++++ contrib/backends/bcash/debian/rules | 6 +++ 20 files changed, 224 insertions(+), 1 deletion(-) create mode 100644 contrib/backends/bcash/Makefile create mode 100644 contrib/backends/bcash/bch-testnet.conf create mode 100644 contrib/backends/bcash/bch.conf create mode 100644 contrib/backends/bcash/debian/bcash-bch.conffiles create mode 100644 contrib/backends/bcash/debian/bcash-bch.dirs create mode 100644 contrib/backends/bcash/debian/bcash-bch.install create mode 100644 contrib/backends/bcash/debian/bcash-bch.logrotate create mode 100644 contrib/backends/bcash/debian/bcash-bch.postinst create mode 100644 contrib/backends/bcash/debian/bcash-bch.service create mode 100644 contrib/backends/bcash/debian/bcash-testnet.conffiles create mode 100644 contrib/backends/bcash/debian/bcash-testnet.dirs create mode 100644 contrib/backends/bcash/debian/bcash-testnet.install create mode 100644 contrib/backends/bcash/debian/bcash-testnet.logrotate create mode 100644 contrib/backends/bcash/debian/bcash-testnet.postinst create mode 100644 contrib/backends/bcash/debian/bcash-testnet.service create mode 100644 contrib/backends/bcash/debian/changelog create mode 100644 contrib/backends/bcash/debian/compat create mode 100644 contrib/backends/bcash/debian/control create mode 100755 contrib/backends/bcash/debian/rules diff --git a/contrib/backends/Makefile b/contrib/backends/Makefile index 1d3e964c..8331a7bf 100644 --- a/contrib/backends/Makefile +++ b/contrib/backends/Makefile @@ -1,4 +1,4 @@ -TARGETS = bitcoin zcash +TARGETS = bitcoin zcash bcash IMAGE = blockbook-backend-build-deb NO_CACHE = false diff --git a/contrib/backends/bcash/Makefile b/contrib/backends/bcash/Makefile new file mode 100644 index 00000000..5b958fdc --- /dev/null +++ b/contrib/backends/bcash/Makefile @@ -0,0 +1,14 @@ +BITCOINABC_VERSION := 0.17.0 + +all: + wget https://download.bitcoinabc.org/0.17.0/linux/bitcoin-abc-${BITCOINABC_VERSION}-x86_64-linux-gnu.tar.gz + tar -xf bitcoin-abc-${BITCOINABC_VERSION}-x86_64-linux-gnu.tar.gz + mv bitcoin-abc-${BITCOINABC_VERSION} bitcoin-abc + rm bitcoin-abc/bin/bitcoin-qt + rm bitcoin-abc/bin/bitcoin-tx + rm bitcoin-abc/bin/bitcoin-seeder + rm bitcoin-abc/bin/test_bitcoin + +clean: + rm -rf bitcoin-abc + rm -f bitcoin-abc-${BITCOINABC_VERSION}-x86_64-linux-gnu.tar.gz* diff --git a/contrib/backends/bcash/bch-testnet.conf b/contrib/backends/bcash/bch-testnet.conf new file mode 100644 index 00000000..a524d0d6 --- /dev/null +++ b/contrib/backends/bcash/bch-testnet.conf @@ -0,0 +1,13 @@ +daemon=1 +server=1 +testnet=1 +nolisten=1 +rpcuser=rpc +rpcpassword=rpc +rpcport=18432 +txindex=1 +rpcworkqueue=32 +zmqpubhashtx=tcp://127.0.0.1:18434 +zmqpubhashblock=tcp://127.0.0.1:18434 +zmqpubrawblock=tcp://127.0.0.1:18434 +zmqpubrawtx=tcp://127.0.0.1:18434 diff --git a/contrib/backends/bcash/bch.conf b/contrib/backends/bcash/bch.conf new file mode 100644 index 00000000..718a68a8 --- /dev/null +++ b/contrib/backends/bcash/bch.conf @@ -0,0 +1,12 @@ +daemon=1 +server=1 +nolisten=1 +rpcuser=rpc +rpcpassword=rpc +rpcport=8432 +txindex=1 +rpcworkqueue=32 +zmqpubhashtx=tcp://127.0.0.1:8434 +zmqpubhashblock=tcp://127.0.0.1:8434 +zmqpubrawblock=tcp://127.0.0.1:8434 +zmqpubrawtx=tcp://127.0.0.1:8434 diff --git a/contrib/backends/bcash/debian/bcash-bch.conffiles b/contrib/backends/bcash/debian/bcash-bch.conffiles new file mode 100644 index 00000000..e8931283 --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-bch.conffiles @@ -0,0 +1 @@ +/opt/bitcoin/bch/bch.conf diff --git a/contrib/backends/bcash/debian/bcash-bch.dirs b/contrib/backends/bcash/debian/bcash-bch.dirs new file mode 100644 index 00000000..ad156456 --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-bch.dirs @@ -0,0 +1 @@ +/data/bch/bitcoin diff --git a/contrib/backends/bcash/debian/bcash-bch.install b/contrib/backends/bcash/debian/bcash-bch.install new file mode 100644 index 00000000..c002ed3e --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-bch.install @@ -0,0 +1,2 @@ +bitcoin-abc/* /opt/bitcoin/bch +bch.conf /opt/bitcoin/bch diff --git a/contrib/backends/bcash/debian/bcash-bch.logrotate b/contrib/backends/bcash/debian/bcash-bch.logrotate new file mode 100644 index 00000000..130eb1dc --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-bch.logrotate @@ -0,0 +1,10 @@ +/data/bch/bitcoin/debug.log +/data/bch/bitcoin/db.log +{ + rotate 7 + daily + compress + missingok + notifempty + copytruncate +} diff --git a/contrib/backends/bcash/debian/bcash-bch.postinst b/contrib/backends/bcash/debian/bcash-bch.postinst new file mode 100644 index 00000000..2f0885aa --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-bch.postinst @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +case "$1" in + + configure) + if ! id -u bitcoin &> /dev/null + then + useradd --system -M -U bitcoin + fi + + if [ "$(stat -c '%U' /data/btc/bitcoin)" != "bitcoin" ] + then + chown bitcoin:bitcoin /data/bch/bitcoin + fi + ;; + +esac + +#DEBHELPER# diff --git a/contrib/backends/bcash/debian/bcash-bch.service b/contrib/backends/bcash/debian/bcash-bch.service new file mode 100644 index 00000000..298e0d97 --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-bch.service @@ -0,0 +1,44 @@ +# It is not recommended to modify this file in-place, because it will +# be overwritten during package upgrades. If you want to add further +# options or overwrite existing ones then use +# $ systemctl edit bcash-bch.service +# See "man systemd.service" for details. + +# Note that almost all daemon options could be specified in +# /opt/bitcoin/bch/bch.conf + +[Unit] +Description=Bitcoin Cash daemon (mainnet) +After=network.target + +[Service] +ExecStart=/opt/bitcoin/bch/bin/bitcoind -datadir=/data/bch/bitcoin -conf=/opt/bitcoin/bch/bch.conf -pid=/run/bitcoind/bch.pid +# Creates /run/bitcoind owned by bitcoin +RuntimeDirectory=bitcoind +User=bitcoin +Type=forking +PIDFile=/run/bitcoind/bch.pid +Restart=on-failure + +# Hardening measures +#################### + +# Provide a private /tmp and /var/tmp. +PrivateTmp=true + +# Mount /usr, /boot/ and /etc read-only for the process. +ProtectSystem=full + +# Disallow the process and all of its children to gain +# new privileges through execve(). +NoNewPrivileges=true + +# Use a new /dev namespace only populated with API pseudo devices +# such as /dev/null, /dev/zero and /dev/random. +PrivateDevices=true + +# Deny the creation of writable and executable memory mappings. +MemoryDenyWriteExecute=true + +[Install] +WantedBy=multi-user.target diff --git a/contrib/backends/bcash/debian/bcash-testnet.conffiles b/contrib/backends/bcash/debian/bcash-testnet.conffiles new file mode 100644 index 00000000..a1304926 --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-testnet.conffiles @@ -0,0 +1 @@ +/opt/bitcoin/bch-testnet/bch-testnet.conf diff --git a/contrib/backends/bcash/debian/bcash-testnet.dirs b/contrib/backends/bcash/debian/bcash-testnet.dirs new file mode 100644 index 00000000..86472a15 --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-testnet.dirs @@ -0,0 +1 @@ +/data/bch-testnet/bitcoin diff --git a/contrib/backends/bcash/debian/bcash-testnet.install b/contrib/backends/bcash/debian/bcash-testnet.install new file mode 100644 index 00000000..ce91977a --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-testnet.install @@ -0,0 +1,2 @@ +bitcoin-abc/* /opt/bitcoin/bch-testnet +bch-testnet.conf /opt/bitcoin/bch-testnet diff --git a/contrib/backends/bcash/debian/bcash-testnet.logrotate b/contrib/backends/bcash/debian/bcash-testnet.logrotate new file mode 100644 index 00000000..d009963f --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-testnet.logrotate @@ -0,0 +1,10 @@ +/data/bch-testnet/bitcoin/testnet3/debug.log +/data/bch-testnet/bitcoin/testnet3/db.log +{ + rotate 7 + daily + compress + missingok + notifempty + copytruncate +} diff --git a/contrib/backends/bcash/debian/bcash-testnet.postinst b/contrib/backends/bcash/debian/bcash-testnet.postinst new file mode 100644 index 00000000..33dec1ef --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-testnet.postinst @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +case "$1" in + + configure) + if ! id -u bitcoin &> /dev/null + then + useradd --system -M -U bitcoin + fi + + if [ "$(stat -c '%U' /data/bch-testnet/bitcoin)" != "bitcoin" ] + then + chown bitcoin:bitcoin /data/bch-testnet/bitcoin + fi + ;; + +esac + +#DEBHELPER# diff --git a/contrib/backends/bcash/debian/bcash-testnet.service b/contrib/backends/bcash/debian/bcash-testnet.service new file mode 100644 index 00000000..56d8fc31 --- /dev/null +++ b/contrib/backends/bcash/debian/bcash-testnet.service @@ -0,0 +1,44 @@ +# It is not recommended to modify this file in-place, because it will +# be overwritten during package upgrades. If you want to add further +# options or overwrite existing ones then use +# $ systemctl edit bcash-testnet.service +# See "man systemd.service" for details. + +# Note that almost all daemon options could be specified in +# /opt/bitcoin/bch-testnet/bch-testnet.conf + +[Unit] +Description=Bitcoin Cash daemon (testnet) +After=network.target + +[Service] +ExecStart=/opt/bitcoin/bch-testnet/bin/bitcoind -datadir=/data/bch-testnet/bitcoin -conf=/opt/bitcoin/bch-testnet/bch-testnet.conf -pid=/run/bitcoind/bch-testnet.pid +# Creates /run/bitcoind owned by bitcoin +RuntimeDirectory=bitcoind +User=bitcoin +Type=forking +PIDFile=/run/bitcoind/bch-testnet.pid +Restart=on-failure + +# Hardening measures +#################### + +# Provide a private /tmp and /var/tmp. +PrivateTmp=true + +# Mount /usr, /boot/ and /etc read-only for the process. +ProtectSystem=full + +# Disallow the process and all of its children to gain +# new privileges through execve(). +NoNewPrivileges=true + +# Use a new /dev namespace only populated with API pseudo devices +# such as /dev/null, /dev/zero and /dev/random. +PrivateDevices=true + +# Deny the creation of writable and executable memory mappings. +MemoryDenyWriteExecute=true + +[Install] +WantedBy=multi-user.target diff --git a/contrib/backends/bcash/debian/changelog b/contrib/backends/bcash/debian/changelog new file mode 100644 index 00000000..464f4c7d --- /dev/null +++ b/contrib/backends/bcash/debian/changelog @@ -0,0 +1,5 @@ +bcash (0.17.0-satoshilabs1) unstable; urgency=medium + + * Initial build + + -- Jakub Matys Fri, 13 Apr 2018 11:31:01 +0200 diff --git a/contrib/backends/bcash/debian/compat b/contrib/backends/bcash/debian/compat new file mode 100644 index 00000000..ec635144 --- /dev/null +++ b/contrib/backends/bcash/debian/compat @@ -0,0 +1 @@ +9 diff --git a/contrib/backends/bcash/debian/control b/contrib/backends/bcash/debian/control new file mode 100644 index 00000000..e7223d94 --- /dev/null +++ b/contrib/backends/bcash/debian/control @@ -0,0 +1,16 @@ +Source: bcash +Section: satoshilabs +Priority: optional +Maintainer: jakub.matys@satoshilabs.com +Build-Depends: debhelper, wget, tar, gzip, make, dh-systemd, dh-exec +Standards-Version: 3.9.5 + +Package: bcash-bch +Architecture: amd64 +Depends: ${shlibs:Depends}, ${misc:Depends}, logrotate +Description: Satoshilabs packaged bitcoin-cash server + +Package: bcash-testnet +Architecture: amd64 +Depends: ${shlibs:Depends}, ${misc:Depends}, logrotate +Description: Satoshilabs packaged bitcoin-cash server diff --git a/contrib/backends/bcash/debian/rules b/contrib/backends/bcash/debian/rules new file mode 100755 index 00000000..e9b6951b --- /dev/null +++ b/contrib/backends/bcash/debian/rules @@ -0,0 +1,6 @@ +#!/usr/bin/make -f + +DH_VERBOSE = 1 + +%: + dh $@ --with=systemd From 11e519970ed79ad3704c4ae4e0409825b0733229 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Mon, 16 Apr 2018 19:50:34 +0200 Subject: [PATCH 04/37] added package definition for Bitcoin Cash blockbook --- .../debian/blockbook-bch-testnet.conffiles | 1 + .../debian/blockbook-bch-testnet.cron.daily | 2 + build/deb/debian/blockbook-bch-testnet.dirs | 2 + .../deb/debian/blockbook-bch-testnet.install | 5 +++ build/deb/debian/blockbook-bch-testnet.links | 2 + .../deb/debian/blockbook-bch-testnet.postinst | 23 +++++++++++ .../deb/debian/blockbook-bch-testnet.service | 39 +++++++++++++++++++ build/deb/debian/blockbook-bch.conffiles | 1 + build/deb/debian/blockbook-bch.cron.daily | 2 + build/deb/debian/blockbook-bch.dirs | 2 + build/deb/debian/blockbook-bch.install | 5 +++ build/deb/debian/blockbook-bch.links | 2 + build/deb/debian/blockbook-bch.postinst | 23 +++++++++++ build/deb/debian/blockbook-bch.service | 39 +++++++++++++++++++ build/deb/debian/control | 16 ++++++-- configs/bch-testnet.json | 8 ++++ configs/bch.json | 8 ++++ 17 files changed, 177 insertions(+), 3 deletions(-) create mode 100644 build/deb/debian/blockbook-bch-testnet.conffiles create mode 100644 build/deb/debian/blockbook-bch-testnet.cron.daily create mode 100644 build/deb/debian/blockbook-bch-testnet.dirs create mode 100755 build/deb/debian/blockbook-bch-testnet.install create mode 100644 build/deb/debian/blockbook-bch-testnet.links create mode 100644 build/deb/debian/blockbook-bch-testnet.postinst create mode 100644 build/deb/debian/blockbook-bch-testnet.service create mode 100644 build/deb/debian/blockbook-bch.conffiles create mode 100644 build/deb/debian/blockbook-bch.cron.daily create mode 100644 build/deb/debian/blockbook-bch.dirs create mode 100755 build/deb/debian/blockbook-bch.install create mode 100644 build/deb/debian/blockbook-bch.links create mode 100644 build/deb/debian/blockbook-bch.postinst create mode 100644 build/deb/debian/blockbook-bch.service create mode 100644 configs/bch-testnet.json create mode 100644 configs/bch.json diff --git a/build/deb/debian/blockbook-bch-testnet.conffiles b/build/deb/debian/blockbook-bch-testnet.conffiles new file mode 100644 index 00000000..51318941 --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.conffiles @@ -0,0 +1 @@ +/opt/blockbook/bch-testnet/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-bch-testnet.cron.daily b/build/deb/debian/blockbook-bch-testnet.cron.daily new file mode 100644 index 00000000..4827959f --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.cron.daily @@ -0,0 +1,2 @@ +#!/bin/sh +find /opt/blockbook/bch-testnet/logs -mtime +6 -type f -delete diff --git a/build/deb/debian/blockbook-bch-testnet.dirs b/build/deb/debian/blockbook-bch-testnet.dirs new file mode 100644 index 00000000..c2be0d25 --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.dirs @@ -0,0 +1,2 @@ +/data/bch-testnet/blockbook +/opt/blockbook/bch-testnet/logs diff --git a/build/deb/debian/blockbook-bch-testnet.install b/build/deb/debian/blockbook-bch-testnet.install new file mode 100755 index 00000000..ac6b38e6 --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.install @@ -0,0 +1,5 @@ +#!/usr/bin/dh-exec +blockbook /opt/blockbook/bch-testnet/bin +server/testcert.* /opt/blockbook/bch-testnet/cert +server/static /opt/blockbook/bch-testnet +configs/bch-testnet.json => /opt/blockbook/bch-testnet/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-bch-testnet.links b/build/deb/debian/blockbook-bch-testnet.links new file mode 100644 index 00000000..a2ffacc3 --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.links @@ -0,0 +1,2 @@ +/opt/blockbook/bch-testnet/cert/testcert.crt /opt/blockbook/bch-testnet/cert/blockbook.crt +/opt/blockbook/bch-testnet/cert/testcert.key /opt/blockbook/bch-testnet/cert/blockbook.key diff --git a/build/deb/debian/blockbook-bch-testnet.postinst b/build/deb/debian/blockbook-bch-testnet.postinst new file mode 100644 index 00000000..6e94b440 --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.postinst @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +case "$1" in + + configure) + if ! id -u blockbook &> /dev/null + then + useradd --system -M -U blockbook + fi + + for dir in /data/bch-testnet/blockbook /opt/blockbook/bch-testnet/logs + do + if [ "$(stat -c '%U' $dir)" != "blockbook" ] + then + chown -R blockbook:blockbook $dir + fi + done + ;; + +esac + +#DEBHELPER# diff --git a/build/deb/debian/blockbook-bch-testnet.service b/build/deb/debian/blockbook-bch-testnet.service new file mode 100644 index 00000000..ee5b1d7f --- /dev/null +++ b/build/deb/debian/blockbook-bch-testnet.service @@ -0,0 +1,39 @@ +# It is not recommended to modify this file in-place, because it will +# be overwritten during package upgrades. If you want to add further +# options or overwrite existing ones then use +# $ systemctl edit blockbook-bch-testnet.service +# See "man systemd.service" for details. + +[Unit] +Description=Blockbook daemon (BCH testnet) +After=network.target + +[Service] +ExecStart=/opt/blockbook/bch-testnet/bin/blockbook -coin=bch-testnet -blockchaincfg=/opt/blockbook/bch-testnet/config/blockchaincfg.json -datadir=/data/bch-testnet/blockbook/db -sync -httpserver=:18435 -socketio=:18436 -certfile=/opt/blockbook/bch-testnet/cert/blockbook -explorer=https://bch-bitcore1.trezor.io/ -log_dir=/opt/blockbook/bch-testnet/logs +User=blockbook +Type=simple +Restart=on-failure +WorkingDirectory=/opt/blockbook/bch-testnet + +# Hardening measures +#################### + +# Provide a private /tmp and /var/tmp. +PrivateTmp=true + +# Mount /usr, /boot/ and /etc read-only for the process. +ProtectSystem=full + +# Disallow the process and all of its children to gain +# new privileges through execve(). +NoNewPrivileges=true + +# Use a new /dev namespace only populated with API pseudo devices +# such as /dev/null, /dev/zero and /dev/random. +PrivateDevices=true + +# Deny the creation of writable and executable memory mappings. +MemoryDenyWriteExecute=true + +[Install] +WantedBy=multi-user.target diff --git a/build/deb/debian/blockbook-bch.conffiles b/build/deb/debian/blockbook-bch.conffiles new file mode 100644 index 00000000..7195f6f3 --- /dev/null +++ b/build/deb/debian/blockbook-bch.conffiles @@ -0,0 +1 @@ +/opt/blockbook/bch/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-bch.cron.daily b/build/deb/debian/blockbook-bch.cron.daily new file mode 100644 index 00000000..f6fc7427 --- /dev/null +++ b/build/deb/debian/blockbook-bch.cron.daily @@ -0,0 +1,2 @@ +#!/bin/sh +find /opt/blockbook/bch/logs -mtime +6 -type f -delete diff --git a/build/deb/debian/blockbook-bch.dirs b/build/deb/debian/blockbook-bch.dirs new file mode 100644 index 00000000..5f081d6a --- /dev/null +++ b/build/deb/debian/blockbook-bch.dirs @@ -0,0 +1,2 @@ +/data/bch/blockbook +/opt/blockbook/bch/logs diff --git a/build/deb/debian/blockbook-bch.install b/build/deb/debian/blockbook-bch.install new file mode 100755 index 00000000..bec00277 --- /dev/null +++ b/build/deb/debian/blockbook-bch.install @@ -0,0 +1,5 @@ +#!/usr/bin/dh-exec +blockbook /opt/blockbook/bch/bin +server/testcert.* /opt/blockbook/bch/cert +server/static /opt/blockbook/bch +configs/bch.json => /opt/blockbook/bch/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-bch.links b/build/deb/debian/blockbook-bch.links new file mode 100644 index 00000000..e236a84c --- /dev/null +++ b/build/deb/debian/blockbook-bch.links @@ -0,0 +1,2 @@ +/opt/blockbook/bch/cert/testcert.crt /opt/blockbook/bch/cert/blockbook.crt +/opt/blockbook/bch/cert/testcert.key /opt/blockbook/bch/cert/blockbook.key diff --git a/build/deb/debian/blockbook-bch.postinst b/build/deb/debian/blockbook-bch.postinst new file mode 100644 index 00000000..f40d9728 --- /dev/null +++ b/build/deb/debian/blockbook-bch.postinst @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +case "$1" in + + configure) + if ! id -u blockbook &> /dev/null + then + useradd --system -M -U blockbook + fi + + for dir in /data/bch/blockbook /opt/blockbook/bch/logs + do + if [ "$(stat -c '%U' $dir)" != "blockbook" ] + then + chown -R blockbook:blockbook $dir + fi + done + ;; + +esac + +#DEBHELPER# diff --git a/build/deb/debian/blockbook-bch.service b/build/deb/debian/blockbook-bch.service new file mode 100644 index 00000000..e9c74a4f --- /dev/null +++ b/build/deb/debian/blockbook-bch.service @@ -0,0 +1,39 @@ +# It is not recommended to modify this file in-place, because it will +# be overwritten during package upgrades. If you want to add further +# options or overwrite existing ones then use +# $ systemctl edit blockbook-bch.service +# See "man systemd.service" for details. + +[Unit] +Description=Blockbook daemon (BCH mainnet) +After=network.target + +[Service] +ExecStart=/opt/blockbook/bch/bin/blockbook -coin=bch -blockchaincfg=/opt/blockbook/bch/config/blockchaincfg.json -datadir=/data/bch/blockbook/db -sync -httpserver=:8435 -socketio=:8436 -certfile=/opt/blockbook/bch/cert/blockbook -explorer=https://bitcore1.trezor.io/ -log_dir=/opt/blockbook/bch/logs +User=blockbook +Type=simple +Restart=on-failure +WorkingDirectory=/opt/blockbook/bch + +# Hardening measures +#################### + +# Provide a private /tmp and /var/tmp. +PrivateTmp=true + +# Mount /usr, /boot/ and /etc read-only for the process. +ProtectSystem=full + +# Disallow the process and all of its children to gain +# new privileges through execve(). +NoNewPrivileges=true + +# Use a new /dev namespace only populated with API pseudo devices +# such as /dev/null, /dev/zero and /dev/random. +PrivateDevices=true + +# Deny the creation of writable and executable memory mappings. +MemoryDenyWriteExecute=true + +[Install] +WantedBy=multi-user.target diff --git a/build/deb/debian/control b/build/deb/debian/control index 5ab64d64..74a071a1 100644 --- a/build/deb/debian/control +++ b/build/deb/debian/control @@ -8,14 +8,24 @@ Standards-Version: 3.9.5 Package: blockbook-btc Architecture: amd64 Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils -Description: Satoshilabs blockbook server +Description: Satoshilabs blockbook server (Bitcoin mainnet) Package: blockbook-btc-testnet Architecture: amd64 Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils -Description: Satoshilabs blockbook server +Description: Satoshilabs blockbook server (Bitcoin testnet) Package: blockbook-zec Architecture: amd64 Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils -Description: Satoshilabs blockbook server +Description: Satoshilabs blockbook server (ZCash mainnet) + +Package: blockbook-bch +Architecture: amd64 +Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils +Description: Satoshilabs blockbook server (Bitcoin Cash mainnet) + +Package: blockbook-bch-testnet +Architecture: amd64 +Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils +Description: Satoshilabs blockbook server (Bitcoin Cash testnet) diff --git a/configs/bch-testnet.json b/configs/bch-testnet.json new file mode 100644 index 00000000..606eb66a --- /dev/null +++ b/configs/bch-testnet.json @@ -0,0 +1,8 @@ +{ + "rpcURL": "http://localhost:18432", + "rpcUser": "rpc", + "rpcPass": "rpc", + "rpcTimeout": 25, + "parse": true, + "zeroMQBinding": "tcp://127.0.0.1:18434" +} diff --git a/configs/bch.json b/configs/bch.json new file mode 100644 index 00000000..e124c7af --- /dev/null +++ b/configs/bch.json @@ -0,0 +1,8 @@ +{ + "rpcURL": "http://127.0.0.1:8432", + "rpcUser": "rpc", + "rpcPass": "rpc", + "rpcTimeout": 25, + "parse": true, + "zeroMQBinding": "tcp://127.0.0.1:8434" +} From 5cf9dd11690b7587c3ee234142a71590b070b482 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Mon, 16 Apr 2018 19:51:35 +0200 Subject: [PATCH 05/37] quickfix of tests execution --- build/bin/Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/build/bin/Makefile b/build/bin/Makefile index 932285c9..31e683ef 100644 --- a/build/bin/Makefile +++ b/build/bin/Makefile @@ -9,10 +9,12 @@ build-debug: prepare-sources chown $(PACKAGER) /out/blockbook test: prepare-sources - cd $(GOPATH)/src/blockbook && go test -short ./... + #cd $(GOPATH)/src/blockbook && go test -short ./... # FIXME + cd $(GOPATH)/src/blockbook && go test -short ./bchain/coins/btc ./bchain/coins/bch ./bchain/coins/eth ./bchain/coins/zec test-all: prepare-sources - cd $(GOPATH)/src/blockbook && go test ./... + # cd $(GOPATH)/src/blockbook && go test ./... # FIXME + cd $(GOPATH)/src/blockbook && go test ./bchain/coins/btc ./bchain/coins/bch ./bchain/coins/eth ./bchain/coins/zec prepare-sources: @ [ -n "`ls /src 2> /dev/null`" ] || (echo "/src doesn't exist or is empty" 1>&2 && exit 1) From 296eee828f48ff9e38530c2d068e9b6ac7bd676a Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Tue, 17 Apr 2018 23:50:01 +0200 Subject: [PATCH 06/37] Change the way UTXO addresses are indexed - WIP Columns before: outputs: saddress+block height -> outpoints inputs: txid+vout -> spending txid+vout Columns after change: addresses: address+block height -> input or output outpoints unspenttxs: txid -> addresses+indexes --- db/rocksdb.go | 316 ++++++++++++++++++++++++-------------------------- 1 file changed, 153 insertions(+), 163 deletions(-) diff --git a/db/rocksdb.go b/db/rocksdb.go index 959fd498..1dda824d 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -5,12 +5,9 @@ import ( "bytes" "encoding/binary" "encoding/hex" - "math" "os" "path/filepath" - "github.com/juju/errors" - "github.com/bsm/go-vlq" "github.com/golang/glog" @@ -40,12 +37,12 @@ type RocksDB struct { const ( cfDefault = iota cfHeight - cfOutputs - cfInputs + cfAddresses + cfUnspentTxs cfTransactions ) -var cfNames = []string{"default", "height", "outputs", "inputs", "transactions"} +var cfNames = []string{"default", "height", "addresses", "unspenttxs", "transactions"} func openDB(path string) (*gorocksdb.DB, []*gorocksdb.ColumnFamilyHandle, error) { c := gorocksdb.NewLRUCache(8 << 30) // 8GB @@ -156,11 +153,9 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f return err } - it := d.db.NewIteratorCF(d.ro, d.cfh[cfOutputs]) + it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddresses]) defer it.Close() - isUTXO := d.chainParser.IsUTXOChain() - for it.Seek(kstart); it.Valid(); it.Next() { key := it.Key().Data() val := it.Value().Data() @@ -187,20 +182,6 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f if err := fn(o.txid, vout, isOutput); err != nil { return err } - if isUTXO { - stxid, so, err := d.GetSpentOutput(o.txid, o.vout) - if err != nil { - return err - } - if stxid != "" { - if glog.V(2) { - glog.Infof("rocksdb: input %s/%d: %s/%d", o.txid, o.vout, stxid, so) - } - if err := fn(stxid, uint32(so), false); err != nil { - return err - } - } - } } } return nil @@ -237,11 +218,12 @@ func (d *RocksDB) writeBlock(block *bchain.Block, op int) error { if err := d.writeHeight(wb, block, op); err != nil { return err } - if err := d.writeOutputs(wb, block, op, isUTXO); err != nil { - return err - } if isUTXO { - if err := d.writeInputs(wb, block, op); err != nil { + if err := d.writeAddressesUTXO(wb, block, op); err != nil { + return err + } + } else { + if err := d.writeAddressesNonUTXO(wb, block, op); err != nil { return err } } @@ -249,40 +231,149 @@ func (d *RocksDB) writeBlock(block *bchain.Block, op int) error { return d.db.Write(d.wo, wb) } -// Output Index +// Addresses index type outpoint struct { txid string vout int32 } -func (d *RocksDB) addAddrIDToRecords(op int, wb *gorocksdb.WriteBatch, records map[string][]outpoint, addrID []byte, txid string, vout int32, bh uint32) error { +func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, records map[string][]outpoint) error { + for addrID, outpoints := range records { + key, err := packOutputKey([]byte(addrID), block.Height) + if err != nil { + glog.Warningf("rocksdb: packOutputKey: %v - %d %s", err, block.Height, addrID) + continue + } + switch op { + case opInsert: + val, err := d.packOutputValue(outpoints) + if err != nil { + glog.Warningf("rocksdb: packOutputValue: %v", err) + continue + } + wb.PutCF(d.cfh[cfAddresses], key, val) + case opDelete: + wb.DeleteCF(d.cfh[cfAddresses], key) + } + } + return nil +} + +func (d *RocksDB) addAddrIDToRecords(op int, wb *gorocksdb.WriteBatch, records map[string][]outpoint, addrID []byte, btxid []byte, vout int32, bh uint32) error { if len(addrID) > 0 { if len(addrID) > 1024 { glog.Infof("block %d, skipping addrID of length %d", bh, len(addrID)) } else { strAddrID := string(addrID) records[strAddrID] = append(records[strAddrID], outpoint{ - txid: txid, + txid: string(btxid), vout: vout, }) if op == opDelete { // remove transactions from cache - b, err := d.chainParser.PackTxid(txid) - if err != nil { - return err - } - wb.DeleteCF(d.cfh[cfTransactions], b) + wb.DeleteCF(d.cfh[cfTransactions], btxid) } } } return nil } -func (d *RocksDB) writeOutputs(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, isUTXO bool) error { - records := make(map[string][]outpoint) +func (d *RocksDB) getUnspentTx(btxID []byte) ([]byte, error) { + // find it in db, in the column cfUnspentTxs + val, err := d.db.GetCF(d.ro, d.cfh[cfUnspentTxs], btxID) + if err != nil { + return nil, err + } + defer val.Free() + data := append([]byte(nil), val.Data()...) + return data, nil +} +func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error { + var err error + addresses := make(map[string][]outpoint) + unspentTxs := make(map[string][]byte) + btxIDs := make([][]byte, len(block.Txs)) + // first process all outputs, build mapping of addresses to outpoints and mapppings of unspent txs to addresses + for txi, tx := range block.Txs { + btxID, err := d.chainParser.PackTxid(tx.Txid) + if err != nil { + return err + } + btxIDs[txi] = btxID + // preallocate estimated size of addresses (32 bytes is 1 byte length of addrID, 25 bytes addrID, 1-2 bytes vout and reserve) + txAddrs := make([]byte, 0, len(tx.Vout)*32) + for i, output := range tx.Vout { + addrID, err := d.chainParser.GetAddrIDFromVout(&output) + if err != nil { + // do not log ErrAddressMissing, transactions can be without to address (for example eth contracts) + if err != bchain.ErrAddressMissing { + glog.Warningf("rocksdb: addrID: %v - height %d, tx %v, output %v", err, block.Height, tx.Txid, output) + } + continue + } + err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(output.N), block.Height) + if err != nil { + return err + } + // resize the addr buffer if necessary by a new estimate + if cap(txAddrs)-len(txAddrs) < 2*vlq.MaxLen32+len(addrID) { + txAddrs = append(txAddrs, make([]byte, vlq.MaxLen32+len(addrID)+(len(tx.Vout)-i)*32)...)[:len(txAddrs)] + } + // addrID is packed as number of bytes of the addrID + bytes of addrID + vout + lv := packVarint(int32(len(addrID)), txAddrs[len(txAddrs):]) + txAddrs = txAddrs[:len(txAddrs)+lv] + txAddrs = append(txAddrs, addrID...) + lv = packVarint(int32(output.N), txAddrs[len(txAddrs):]) + txAddrs = txAddrs[:len(txAddrs)+lv] + } + unspentTxs[tx.Txid] = txAddrs + // locate unspent txs/addresses and store them in format txid ^index + } + for txi, tx := range block.Txs { + btxID := btxIDs[txi] + // try to find the tx in current block + unspentAddrs, thisBlock := unspentTxs[string(btxID)] + if thisBlock { + + } else { + unspentAddrs, err = d.getUnspentTx(btxID) + if err != nil { + return err + } + if unspentAddrs == nil { + glog.Warningf("rocksdb: height %d, tx %v in inputs but missing in unspentTxs", block.Height, tx.Txid) + continue + } + } + + // for _, input := range tx.Vin { + // input.Vout + // } + } + if err := d.writeAddressRecords(wb, block, op, addresses); err != nil { + return err + } + // save unspent txs from current block + for tx, val := range unspentTxs { + switch op { + case opInsert: + wb.PutCF(d.cfh[cfUnspentTxs], []byte(tx), val) + case opDelete: + wb.DeleteCF(d.cfh[cfUnspentTxs], []byte(tx)) + } + } + return nil +} + +func (d *RocksDB) writeAddressesNonUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error { + addresses := make(map[string][]outpoint) for _, tx := range block.Txs { + btxID, err := d.chainParser.PackTxid(tx.Txid) + if err != nil { + return err + } for _, output := range tx.Vout { addrID, err := d.chainParser.GetAddrIDFromVout(&output) if err != nil { @@ -292,50 +383,27 @@ func (d *RocksDB) writeOutputs(wb *gorocksdb.WriteBatch, block *bchain.Block, op } continue } - err = d.addAddrIDToRecords(op, wb, records, addrID, tx.Txid, int32(output.N), block.Height) + err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(output.N), block.Height) if err != nil { return err } } - if !isUTXO { - // store inputs in output column in format txid ^index - for _, input := range tx.Vin { - for i, a := range input.Addresses { - addrID, err := d.chainParser.GetAddrIDFromAddress(a) - if err != nil { - glog.Warningf("rocksdb: addrID: %v - %d %s", err, block.Height, addrID) - continue - } - err = d.addAddrIDToRecords(op, wb, records, addrID, tx.Txid, int32(^i), block.Height) - if err != nil { - return err - } + // store inputs in format txid ^index + for _, input := range tx.Vin { + for i, a := range input.Addresses { + addrID, err := d.chainParser.GetAddrIDFromAddress(a) + if err != nil { + glog.Warningf("rocksdb: addrID: %v - %d %s", err, block.Height, addrID) + continue + } + err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(^i), block.Height) + if err != nil { + return err } } } } - - for addrID, outpoints := range records { - key, err := packOutputKey([]byte(addrID), block.Height) - if err != nil { - glog.Warningf("rocksdb: packOutputKey: %v - %d %s", err, block.Height, addrID) - continue - } - - switch op { - case opInsert: - val, err := d.packOutputValue(outpoints) - if err != nil { - glog.Warningf("rocksdb: packOutputValue: %v", err) - continue - } - wb.PutCF(d.cfh[cfOutputs], key, val) - case opDelete: - wb.DeleteCF(d.cfh[cfOutputs], key) - } - } - - return nil + return d.writeAddressRecords(wb, block, op, addresses) } func packOutputKey(outputScript []byte, height uint32) ([]byte, error) { @@ -353,9 +421,10 @@ func (d *RocksDB) packOutputValue(outpoints []outpoint) ([]byte, error) { if err != nil { return nil, err } - bvout := packVarint(o.vout) + bvout := make([]byte, vlq.MaxLen32) + l := packVarint(o.vout, bvout) buf = append(buf, btxid...) - buf = append(buf, bvout...) + buf = append(buf, bvout[:l]...) } return buf, nil } @@ -379,46 +448,16 @@ func (d *RocksDB) unpackOutputValue(buf []byte) ([]outpoint, error) { return outpoints, nil } -// Input index - -func (d *RocksDB) writeInputs( - wb *gorocksdb.WriteBatch, - block *bchain.Block, - op int, -) error { - for _, tx := range block.Txs { - for i, input := range tx.Vin { - if input.Coinbase != "" { - continue - } - key, err := d.packOutpoint(input.Txid, int32(input.Vout)) - if err != nil { - return err - } - val, err := d.packOutpoint(tx.Txid, int32(i)) - if err != nil { - return err - } - switch op { - case opInsert: - wb.PutCF(d.cfh[cfInputs], key, val) - case opDelete: - wb.DeleteCF(d.cfh[cfInputs], key) - } - } - } - return nil -} - func (d *RocksDB) packOutpoint(txid string, vout int32) ([]byte, error) { btxid, err := d.chainParser.PackTxid(txid) if err != nil { return nil, err } - bvout := packVarint(vout) - buf := make([]byte, 0, len(btxid)+len(bvout)) + bv := make([]byte, vlq.MaxLen32) + l := packVarint(vout, bv) + buf := make([]byte, 0, l+len(btxid)) buf = append(buf, btxid...) - buf = append(buf, bvout...) + buf = append(buf, bv[:l]...) return buf, nil } @@ -457,30 +496,6 @@ func (d *RocksDB) GetBlockHash(height uint32) (string, error) { return d.chainParser.UnpackBlockHash(val.Data()) } -// GetSpentOutput returns output which is spent by input tx -func (d *RocksDB) GetSpentOutput(txid string, i int32) (string, int32, error) { - b, err := d.packOutpoint(txid, i) - if err != nil { - return "", 0, err - } - val, err := d.db.GetCF(d.ro, d.cfh[cfInputs], b) - if err != nil { - return "", 0, err - } - defer val.Free() - p, err := d.unpackOutputValue(val.Data()) - if err != nil { - return "", 0, err - } - var otxid string - var oi int32 - for _, i := range p { - otxid, oi = i.txid, i.vout - break - } - return otxid, oi, nil -} - func (d *RocksDB) writeHeight( wb *gorocksdb.WriteBatch, block *bchain.Block, @@ -512,7 +527,7 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error { var seekKey []byte for { var key []byte - it := d.db.NewIteratorCF(d.ro, d.cfh[cfOutputs]) + it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddresses]) if totalOutputs == 0 { it.SeekToFirst() } else { @@ -552,7 +567,7 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error { if glog.V(2) { glog.Info("output ", hex.EncodeToString(outputKeys[i])) } - wb.DeleteCF(d.cfh[cfOutputs], outputKeys[i]) + wb.DeleteCF(d.cfh[cfAddresses], outputKeys[i]) outpoints, err := d.unpackOutputValue(outputValues[i]) if err != nil { return err @@ -566,7 +581,7 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error { if glog.V(2) { glog.Info("input ", hex.EncodeToString(boutpoint)) } - wb.DeleteCF(d.cfh[cfInputs], boutpoint) + wb.DeleteCF(d.cfh[cfUnspentTxs], boutpoint) // delete from txCache b, err := d.chainParser.PackTxid(o.txid) if err != nil { @@ -651,8 +666,6 @@ func (d *RocksDB) DeleteTx(txid string) error { // Helpers -var ErrInvalidAddress = errors.New("invalid address") - func packUint(i uint32) []byte { buf := make([]byte, 4) binary.BigEndian.PutUint32(buf, i) @@ -663,34 +676,11 @@ func unpackUint(buf []byte) uint32 { return binary.BigEndian.Uint32(buf) } -func packFloat64(f float64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, math.Float64bits(f)) - return buf -} - -func unpackFloat64(buf []byte) float64 { - return math.Float64frombits(binary.BigEndian.Uint64(buf)) -} - -func packVarint(i int32) []byte { - buf := make([]byte, vlq.MaxLen32) - ofs := vlq.PutInt(buf, int64(i)) - return buf[:ofs] +func packVarint(i int32, buf []byte) int { + return vlq.PutInt(buf, int64(i)) } func unpackVarint(buf []byte) (int32, int) { i, ofs := vlq.Uint(buf) return int32(i), ofs } - -func packVarint64(i int64) []byte { - buf := make([]byte, vlq.MaxLen64) - ofs := vlq.PutInt(buf, i) - return buf[:ofs] -} - -func unpackVarint64(buf []byte) (int64, int) { - i, ofs := vlq.Int(buf) - return i, ofs -} From a8e603d945a763b0b51a2a8aff8879e719959ffa Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Wed, 18 Apr 2018 14:15:19 +0100 Subject: [PATCH 07/37] added Bitcoin Cash's rpc and parser --- Gopkg.lock | 8 +- Gopkg.toml | 4 + bchain/coins/bch/bcashparser.go | 75 +++++++++++++ bchain/coins/bch/bcashrpc.go | 180 ++++++++++++++++++++++++++++++++ bchain/coins/blockchain.go | 3 + 5 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 bchain/coins/bch/bcashparser.go create mode 100644 bchain/coins/bch/bcashrpc.go diff --git a/Gopkg.lock b/Gopkg.lock index 1cac8e92..a40eb931 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -31,6 +31,12 @@ packages = [".","base58","bech32"] revision = "501929d3d046174c3d39f0ea54ece471aa17238c" +[[projects]] + branch = "master" + name = "github.com/cpacia/bchutil" + packages = ["."] + revision = "12e86f41eb040d3b85b5d8e3a3a4bed035517c52" + [[projects]] name = "github.com/ethereum/go-ethereum" packages = [".","common","common/hexutil","common/math","core/types","crypto","crypto/secp256k1","crypto/sha3","ethclient","ethdb","log","metrics","params","rlp","rpc","trie"] @@ -184,6 +190,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "a463c234bc11d9917876a827f692392845ed89571edc1484ae3e932f555d484b" + inputs-digest = "e632a1e904953397e9eae00f30a86bffab2d303232c7bac47a16e1ce663043bf" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index a70bb0c3..e0144613 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -72,3 +72,7 @@ [[constraint]] name = "github.com/golang/protobuf" version = "1.0.0" + +[[constraint]] + branch = "master" + name = "github.com/cpacia/bchutil" diff --git a/bchain/coins/bch/bcashparser.go b/bchain/coins/bch/bcashparser.go new file mode 100644 index 00000000..3df5f65e --- /dev/null +++ b/bchain/coins/bch/bcashparser.go @@ -0,0 +1,75 @@ +package bch + +import ( + "blockbook/bchain/coins/btc" + "strings" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcutil" + "github.com/cpacia/bchutil" +) + +var prefixes []string + +func init() { + prefixes = make([]string, 0, len(bchutil.Prefixes)) + for _, prefix := range bchutil.Prefixes { + prefixes = append(prefixes, prefix) + } +} + +// BCashParser handle +type BCashParser struct { + *btc.BitcoinParser +} + +// GetChainParams contains network parameters for the main Bitcoin Cash network, +// the regression test Bitcoin Cash network, the test Bitcoin Cash network and +// the simulation test Bitcoin Cash network, in this order +func GetChainParams(chain string) *chaincfg.Params { + var params *chaincfg.Params + switch chain { + case "test": + params = &chaincfg.TestNet3Params + params.Net = bchutil.TestnetMagic + case "regtest": + params = &chaincfg.RegressionNetParams + params.Net = bchutil.Regtestmagic + default: + params = &chaincfg.MainNetParams + params.Net = bchutil.MainnetMagic + } + + return params +} + +// GetAddrIDFromAddress returns internal address representation of given address +func (p *BCashParser) GetAddrIDFromAddress(address string) ([]byte, error) { + return p.AddressToOutputScript(address) +} + +// AddressToOutputScript converts bitcoin address to ScriptPubKey +func (p *BCashParser) AddressToOutputScript(address string) ([]byte, error) { + if strings.Contains(address, ":") { + da, err := bchutil.DecodeAddress(address, p.Params) + if err != nil { + return nil, err + } + script, err := bchutil.PayToAddrScript(da) + if err != nil { + return nil, err + } + return script, nil + } else { + da, err := btcutil.DecodeAddress(address, p.Params) + if err != nil { + return nil, err + } + script, err := txscript.PayToAddrScript(da) + if err != nil { + return nil, err + } + return script, nil + } +} diff --git a/bchain/coins/bch/bcashrpc.go b/bchain/coins/bch/bcashrpc.go new file mode 100644 index 00000000..222a7b78 --- /dev/null +++ b/bchain/coins/bch/bcashrpc.go @@ -0,0 +1,180 @@ +package bch + +import ( + "blockbook/bchain" + "blockbook/bchain/coins/btc" + "encoding/hex" + "encoding/json" + + "github.com/cpacia/bchutil" + "github.com/golang/glog" + "github.com/juju/errors" +) + +// BCashRPC is an interface to JSON-RPC bitcoind service. +type BCashRPC struct { + *btc.BitcoinRPC +} + +// NewBCashRPC returns new BCashRPC instance. +func NewBCashRPC(config json.RawMessage, pushHandler func(bchain.NotificationType)) (bchain.BlockChain, error) { + b, err := btc.NewBitcoinRPC(config, pushHandler) + if err != nil { + return nil, err + } + + s := &BCashRPC{ + b.(*btc.BitcoinRPC), + } + + return s, nil +} + +func (b *BCashRPC) Initialize() error { + b.Mempool = bchain.NewUTXOMempool(b) + + chainName, err := b.GetBlockChainInfo() + if err != nil { + return err + } + + params := GetChainParams(chainName) + + // always create parser + b.Parser = &BCashParser{ + &btc.BitcoinParser{ + Params: params, + }, + } + + // parameters for getInfo request + if params.Net == bchutil.MainnetMagic { + b.Testnet = false + b.Network = "livenet" + } else { + b.Testnet = true + b.Network = "testnet" + } + + glog.Info("rpc: block chain ", params.Name) + + return nil +} + +// getblock + +type cmdGetBlock struct { + Method string `json:"method"` + Params struct { + BlockHash string `json:"blockhash"` + Verbose bool `json:"verbose"` + } `json:"params"` +} + +type resGetBlockRaw struct { + Error *bchain.RPCError `json:"error"` + Result string `json:"result"` +} + +type resGetBlockThin struct { + Error *bchain.RPCError `json:"error"` + Result bchain.ThinBlock `json:"result"` +} + +// GetBlock returns block with given hash. +func (b *BCashRPC) GetBlock(hash string, height uint32) (*bchain.Block, error) { + var err error + if hash == "" && height > 0 { + hash, err = b.GetBlockHash(height) + if err != nil { + return nil, err + } + } + // XXX + // // optimization + // if height > 0 { + // return b.getBlockWithoutHeader(hash, height) + // } + header, err := b.GetBlockHeader(hash) + if err != nil { + return nil, err + } + data, err := b.GetBlockRaw(hash) + if err != nil { + return nil, err + } + block, err := b.Parser.ParseBlock(data) + if err != nil { + return nil, errors.Annotatef(err, "hash %v", hash) + } + block.BlockHeader = *header + return block, nil +} + +// GetBlockRaw returns block with given hash as bytes. +func (b *BCashRPC) GetBlockRaw(hash string) ([]byte, error) { + glog.V(1).Info("rpc: getblock (verbose=0) ", hash) + + res := resGetBlockRaw{} + req := cmdGetBlock{Method: "getblock"} + req.Params.BlockHash = hash + req.Params.Verbose = false + err := b.Call(&req, &res) + + if err != nil { + return nil, errors.Annotatef(err, "hash %v", hash) + } + if res.Error != nil { + if isErrBlockNotFound(res.Error) { + return nil, bchain.ErrBlockNotFound + } + return nil, errors.Annotatef(res.Error, "hash %v", hash) + } + return hex.DecodeString(res.Result) +} + +// GetBlockList returns block with given hash by downloading block +// transactions one by one. +func (b *BCashRPC) GetBlockList(hash string) (*bchain.Block, error) { + glog.V(1).Info("rpc: getblock (verbose=1) ", hash) + + res := resGetBlockThin{} + req := cmdGetBlock{Method: "getblock"} + req.Params.BlockHash = hash + req.Params.Verbose = true + err := b.Call(&req, &res) + + if err != nil { + return nil, errors.Annotatef(err, "hash %v", hash) + } + if res.Error != nil { + if isErrBlockNotFound(res.Error) { + return nil, bchain.ErrBlockNotFound + } + return nil, errors.Annotatef(res.Error, "hash %v", hash) + } + + txs := make([]bchain.Tx, len(res.Result.Txids)) + for i, txid := range res.Result.Txids { + tx, err := b.GetTransaction(txid) + if err != nil { + return nil, err + } + txs[i] = *tx + } + block := &bchain.Block{ + BlockHeader: res.Result.BlockHeader, + Txs: txs, + } + return block, nil +} + +// GetBlockFull returns block with given hash. +func (b *BCashRPC) GetBlockFull(hash string) (*bchain.Block, error) { + return nil, errors.New("Not implemented") +} + +func isErrBlockNotFound(err *bchain.RPCError) bool { + return err.Message == "Block not found" || + err.Message == "Block height out of range" +} diff --git a/bchain/coins/blockchain.go b/bchain/coins/blockchain.go index 973df631..39f01e6e 100644 --- a/bchain/coins/blockchain.go +++ b/bchain/coins/blockchain.go @@ -2,6 +2,7 @@ package coins import ( "blockbook/bchain" + "blockbook/bchain/coins/bch" "blockbook/bchain/coins/btc" "blockbook/bchain/coins/eth" "blockbook/bchain/coins/zec" @@ -25,6 +26,8 @@ func init() { blockChainFactories["zec"] = zec.NewZCashRPC blockChainFactories["eth"] = eth.NewEthereumRPC blockChainFactories["eth-testnet"] = eth.NewEthereumRPC + blockChainFactories["bch"] = bch.NewBCashRPC + blockChainFactories["bch-testnet"] = bch.NewBCashRPC } // NewBlockChain creates bchain.BlockChain of type defined by parameter coin From 2ee21779708a681e8d9e088ab0f55e475cab6728 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Wed, 18 Apr 2018 23:42:11 +0200 Subject: [PATCH 08/37] Change the way UTXO addresses are indexed - WIP --- db/rocksdb.go | 93 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 67 insertions(+), 26 deletions(-) diff --git a/db/rocksdb.go b/db/rocksdb.go index 1dda824d..3d936882 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -290,12 +290,46 @@ func (d *RocksDB) getUnspentTx(btxID []byte) ([]byte, error) { return data, nil } +func appendPackedAddrID(txAddrs []byte, addrID []byte, n uint32, remaining int) []byte { + // resize the addr buffer if necessary by a new estimate + if cap(txAddrs)-len(txAddrs) < 2*vlq.MaxLen32+len(addrID) { + txAddrs = append(txAddrs, make([]byte, vlq.MaxLen32+len(addrID)+remaining*32)...)[:len(txAddrs)] + } + // addrID is packed as number of bytes of the addrID + bytes of addrID + vout + lv := packVarint(int32(len(addrID)), txAddrs[len(txAddrs):len(txAddrs)+vlq.MaxLen32]) + txAddrs = txAddrs[:len(txAddrs)+lv] + txAddrs = append(txAddrs, addrID...) + lv = packVarint(int32(n), txAddrs[len(txAddrs):len(txAddrs)+vlq.MaxLen32]) + txAddrs = txAddrs[:len(txAddrs)+lv] + return txAddrs +} + +func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, uint32, []byte) { + for i := 0; i < len(unspentAddrs); { + l, lv1 := unpackVarint(unspentAddrs[i:]) + // index of vout of address in unspentAddrs + j := i + int(l) + lv1 + if j >= len(unspentAddrs) { + glog.Error("Inconsistent data in unspentAddrs") + return nil, 0, unspentAddrs + } + n, lv2 := unpackVarint(unspentAddrs[j:]) + if uint32(n) == vout { + addrID := append([]byte(nil), unspentAddrs[i+lv1:j]...) + unspentAddrs = append(unspentAddrs[:i], unspentAddrs[i+lv2:]...) + return addrID, uint32(n), unspentAddrs + } + i += j + lv2 + } + return nil, 0, unspentAddrs +} + func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error { var err error addresses := make(map[string][]outpoint) unspentTxs := make(map[string][]byte) btxIDs := make([][]byte, len(block.Txs)) - // first process all outputs, build mapping of addresses to outpoints and mapppings of unspent txs to addresses + // first process all outputs, build mapping of addresses to outpoints and mappings of unspent txs to addresses for txi, tx := range block.Txs { btxID, err := d.chainParser.PackTxid(tx.Txid) if err != nil { @@ -317,27 +351,16 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo if err != nil { return err } - // resize the addr buffer if necessary by a new estimate - if cap(txAddrs)-len(txAddrs) < 2*vlq.MaxLen32+len(addrID) { - txAddrs = append(txAddrs, make([]byte, vlq.MaxLen32+len(addrID)+(len(tx.Vout)-i)*32)...)[:len(txAddrs)] - } - // addrID is packed as number of bytes of the addrID + bytes of addrID + vout - lv := packVarint(int32(len(addrID)), txAddrs[len(txAddrs):]) - txAddrs = txAddrs[:len(txAddrs)+lv] - txAddrs = append(txAddrs, addrID...) - lv = packVarint(int32(output.N), txAddrs[len(txAddrs):]) - txAddrs = txAddrs[:len(txAddrs)+lv] + txAddrs = appendPackedAddrID(txAddrs, addrID, output.N, len(tx.Vout)-i) } - unspentTxs[tx.Txid] = txAddrs - // locate unspent txs/addresses and store them in format txid ^index + unspentTxs[string(btxID)] = txAddrs } + // locate unspent addresses and add them to addresses map them in format txid ^index for txi, tx := range block.Txs { btxID := btxIDs[txi] // try to find the tx in current block - unspentAddrs, thisBlock := unspentTxs[string(btxID)] - if thisBlock { - - } else { + unspentAddrs, inThisBlock := unspentTxs[string(btxID)] + if !inThisBlock { unspentAddrs, err = d.getUnspentTx(btxID) if err != nil { return err @@ -347,10 +370,32 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo continue } } - - // for _, input := range tx.Vin { - // input.Vout - // } + var addrID []byte + var n uint32 + for _, input := range tx.Vin { + addrID, n, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) + if addrID == nil { + glog.Warningf("rocksdb: height %d, tx %v vout %v in inputs but missing in unspentTxs", block.Height, tx.Txid, input.Vout) + continue + } + err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(^n), block.Height) + if err != nil { + return err + } + } + if inThisBlock { + if len(unspentAddrs) == 0 { + delete(unspentTxs, string(btxID)) + } else { + unspentTxs[string(btxID)] = unspentAddrs + } + } else { + if len(unspentAddrs) == 0 { + wb.DeleteCF(d.cfh[cfUnspentTxs], btxID) + } else { + wb.PutCF(d.cfh[cfUnspentTxs], btxID, unspentAddrs) + } + } } if err := d.writeAddressRecords(wb, block, op, addresses); err != nil { return err @@ -417,13 +462,9 @@ func packOutputKey(outputScript []byte, height uint32) ([]byte, error) { func (d *RocksDB) packOutputValue(outpoints []outpoint) ([]byte, error) { buf := make([]byte, 0) for _, o := range outpoints { - btxid, err := d.chainParser.PackTxid(o.txid) - if err != nil { - return nil, err - } bvout := make([]byte, vlq.MaxLen32) l := packVarint(o.vout, bvout) - buf = append(buf, btxid...) + buf = append(buf, []byte(o.txid)...) buf = append(buf, bvout[:l]...) } return buf, nil From 0ae9c446a0976dfca972e81fc9815a93154a4484 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Wed, 18 Apr 2018 23:42:38 +0200 Subject: [PATCH 09/37] Add TestRocksDB_Index_UTXO test - WIP --- db/rocksdb_test.go | 158 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 db/rocksdb_test.go diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go new file mode 100644 index 00000000..4e4217d2 --- /dev/null +++ b/db/rocksdb_test.go @@ -0,0 +1,158 @@ +package db + +import ( + "blockbook/bchain" + "blockbook/bchain/coins/btc" + "encoding/hex" + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/juju/errors" +) + +func setupRocksDB(t *testing.T, p bchain.BlockChainParser) *RocksDB { + tmp, err := ioutil.TempDir("", "testdb") + if err != nil { + t.Fatal(err) + } + d, err := NewRocksDB(tmp, p) + if err != nil { + t.Fatal(err) + } + return d +} + +func closeAnddestroyRocksDB(t *testing.T, d *RocksDB) { + if err := d.Close(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.path) +} + +func addressToPubKeyHex(addr string, t *testing.T, d *RocksDB) string { + b, err := d.chainParser.AddressToOutputScript(addr) + if err != nil { + t.Fatal(err) + } + return hex.EncodeToString(b) +} + +func addressToPubKeyHexWithLenght(addr string, t *testing.T, d *RocksDB) string { + h := addressToPubKeyHex(addr, t, d) + // length is signed varint, therefore 2 times big, we can take len(h) as the correct value + return strconv.FormatInt(int64(len(h)), 16) + h +} + +type keyPair struct { + Key, Value string +} + +func checkColumn(d *RocksDB, col int, kp []keyPair) error { + it := d.db.NewIteratorCF(d.ro, d.cfh[col]) + defer it.Close() + i := 0 + for it.SeekToFirst(); it.Valid(); it.Next() { + if i >= len(kp) { + return errors.Errorf("Expected less rows in column %v", col) + } + key := hex.EncodeToString(it.Key().Data()) + if key != kp[i].Key { + return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", key, col, i, kp[i].Key) + } + val := hex.EncodeToString(it.Value().Data()) + if val != kp[i].Value { + return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", val, col, i, kp[i].Value) + } + i++ + } + if i != len(kp) { + return errors.Errorf("Expected more rows in column %v: found %v, expected %v", col, i, len(kp)) + } + return nil +} +func TestRocksDB_Index_UTXO(t *testing.T) { + d := setupRocksDB(t, &btc.BitcoinParser{Params: btc.GetChainParams("test")}) + defer closeAnddestroyRocksDB(t, d) + + // connect 1st block - will log warnings about missing UTXO transactions in cfUnspentTxs column + block1 := bchain.Block{ + BlockHeader: bchain.BlockHeader{ + Height: 225493, + Hash: "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", + }, + Txs: []bchain.Tx{ + bchain.Tx{ + Txid: "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", + Vout: []bchain.Vout{ + bchain.Vout{ + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d), + }, + }, + bchain.Vout{ + N: 1, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), + }, + }, + }, + }, + bchain.Tx{ + Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", + Vout: []bchain.Vout{ + bchain.Vout{ + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), + }, + }, + bchain.Vout{ + N: 1, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), + }, + }, + }, + }, + }, + } + if err := d.ConnectBlock(&block1); err != nil { + t.Fatal(err) + } + if err := checkColumn(d, cfHeight, []keyPair{ + keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, + }); err != nil { + { + t.Fatal(err) + } + } + // the vout is encoded as signed varint, i.e. value * 2 for non negative values + if err := checkColumn(d, cfAddresses, []keyPair{ + keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00"}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02"}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00"}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02"}, + }); err != nil { + { + t.Fatal(err) + } + } + if err := checkColumn(d, cfUnspentTxs, []keyPair{ + keyPair{ + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", + addressToPubKeyHexWithLenght("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00" + addressToPubKeyHexWithLenght("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", + }, + keyPair{ + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", + addressToPubKeyHexWithLenght("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00" + addressToPubKeyHexWithLenght("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", + }, + }); err != nil { + { + t.Fatal(err) + } + } + +} From b88a88ad55140fbd2169fb53b5017bd4a2ac94ba Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Thu, 19 Apr 2018 00:49:56 +0100 Subject: [PATCH 10/37] use abstract address for Vout --- bchain/baseparser.go | 23 +++++++++ bchain/coins/bch/bcashparser.go | 77 ++++++++++++++++++++++++++++++- bchain/coins/btc/bitcoinparser.go | 7 +++ bchain/types.go | 6 +++ 4 files changed, 112 insertions(+), 1 deletion(-) diff --git a/bchain/baseparser.go b/bchain/baseparser.go index 1b831b44..ba8bfb1a 100644 --- a/bchain/baseparser.go +++ b/bchain/baseparser.go @@ -2,6 +2,7 @@ package bchain import ( "encoding/hex" + "fmt" "github.com/gogo/protobuf/proto" "github.com/juju/errors" @@ -149,6 +150,9 @@ func (p *BaseParser) UnpackTx(buf []byte) (*Tx, uint32, error) { }, Value: pto.Value, } + if len(pto.Addresses) == 1 { + vout[i].Address = NewBaseAddress(pto.Addresses[0]) + } } tx := Tx{ Blocktime: int64(pt.Blocktime), @@ -161,3 +165,22 @@ func (p *BaseParser) UnpackTx(buf []byte) (*Tx, uint32, error) { } return &tx, pt.Height, nil } + +type baseAddress struct { + addr string +} + +func NewBaseAddress(addr string) Address { + return &baseAddress{addr: addr} +} + +func (a baseAddress) String() string { + return a.addr +} + +func (a baseAddress) EncodeAddress(format uint8) (string, error) { + if format != 0 { + return "", fmt.Errorf("Unknown address format: %d", format) + } + return a.addr, nil +} diff --git a/bchain/coins/bch/bcashparser.go b/bchain/coins/bch/bcashparser.go index 3df5f65e..8eb94292 100644 --- a/bchain/coins/bch/bcashparser.go +++ b/bchain/coins/bch/bcashparser.go @@ -1,13 +1,16 @@ package bch import ( + "blockbook/bchain" "blockbook/bchain/coins/btc" + "fmt" "strings" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcutil" "github.com/cpacia/bchutil" + "github.com/golang/glog" ) var prefixes []string @@ -51,7 +54,7 @@ func (p *BCashParser) GetAddrIDFromAddress(address string) ([]byte, error) { // AddressToOutputScript converts bitcoin address to ScriptPubKey func (p *BCashParser) AddressToOutputScript(address string) ([]byte, error) { - if strings.Contains(address, ":") { + if isCashAddr(address) { da, err := bchutil.DecodeAddress(address, p.Params) if err != nil { return nil, err @@ -73,3 +76,75 @@ func (p *BCashParser) AddressToOutputScript(address string) ([]byte, error) { return script, nil } } + +func isCashAddr(addr string) bool { + slice := strings.Split(addr, ":") + if len(slice) != 2 { + return false + } + for _, prefix := range prefixes { + if slice[0] == prefix { + return true + } + } + return false +} + +func (p *BCashParser) UnpackTx(buf []byte) (tx *bchain.Tx, height uint32, err error) { + tx, height, err = p.BitcoinParser.UnpackTx(buf) + + for i, vout := range tx.Vout { + if len(vout.ScriptPubKey.Addresses) == 1 { + tx.Vout[i].Address = &bcashAddress{ + addr: vout.ScriptPubKey.Addresses[0], + net: p.Params, + } + } + } + + return +} + +type bcashAddress struct { + addr string + net *chaincfg.Params +} + +func (a *bcashAddress) String() string { + return a.addr +} + +type AddressFormat = uint8 + +const ( + LegacyAddress AddressFormat = iota + CashAddress +) + +func (a *bcashAddress) EncodeAddress(format AddressFormat) (string, error) { + switch format { + case LegacyAddress: + return a.String(), nil + case CashAddress: + da, err := btcutil.DecodeAddress(a.addr, a.net) + if err != nil { + return "", err + } + var ca btcutil.Address + switch da := da.(type) { + case *btcutil.AddressPubKeyHash: + ca, err = bchutil.NewCashAddressPubKeyHash(da.Hash160()[:], a.net) + case *btcutil.AddressScriptHash: + ca, err = bchutil.NewCashAddressScriptHash(da.Hash160()[:], a.net) + default: + err = fmt.Errorf("Unknown address type: %T", da) + } + if err != nil { + return "", err + } + return ca.String(), nil + + default: + return "", fmt.Errorf("Unknown address format: %d", format) + } +} diff --git a/bchain/coins/btc/bitcoinparser.go b/bchain/coins/btc/bitcoinparser.go index 0f1578ae..0c1f2fe7 100644 --- a/bchain/coins/btc/bitcoinparser.go +++ b/bchain/coins/btc/bitcoinparser.go @@ -169,5 +169,12 @@ func (p *BitcoinParser) UnpackTx(buf []byte) (*bchain.Tx, uint32, error) { return nil, 0, err } tx.Blocktime = bt + + for i, vout := range tx.Vout { + if len(vout.ScriptPubKey.Addresses) == 1 { + tx.Vout[i].Address = bchain.NewBaseAddress(vout.ScriptPubKey.Addresses[0]) + } + } + return tx, height, nil } diff --git a/bchain/types.go b/bchain/types.go index 8e2a3d1b..59ce276e 100644 --- a/bchain/types.go +++ b/bchain/types.go @@ -37,10 +37,16 @@ type ScriptPubKey struct { Addresses []string `json:"addresses,omitempty"` } +type Address interface { + String() string + EncodeAddress(format uint8) (string, error) +} + type Vout struct { Value float64 `json:"value"` N uint32 `json:"n"` ScriptPubKey ScriptPubKey `json:"scriptPubKey"` + Address Address } // Tx is blockchain transaction From c657381d7eda1f62be6fd3bb0c4adba40c97b525 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Thu, 19 Apr 2018 14:28:05 +0200 Subject: [PATCH 11/37] Change the way UTXO addresses are indexed - WIP --- db/rocksdb.go | 61 ++++++++++------------ db/rocksdb_test.go | 124 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 151 insertions(+), 34 deletions(-) diff --git a/db/rocksdb.go b/db/rocksdb.go index 3d936882..5c5c0aa3 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -263,7 +263,7 @@ func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Bl func (d *RocksDB) addAddrIDToRecords(op int, wb *gorocksdb.WriteBatch, records map[string][]outpoint, addrID []byte, btxid []byte, vout int32, bh uint32) error { if len(addrID) > 0 { if len(addrID) > 1024 { - glog.Infof("block %d, skipping addrID of length %d", bh, len(addrID)) + glog.Infof("rocksdb: block %d, skipping addrID of length %d", bh, len(addrID)) } else { strAddrID := string(addrID) records[strAddrID] = append(records[strAddrID], outpoint{ @@ -305,18 +305,19 @@ func appendPackedAddrID(txAddrs []byte, addrID []byte, n uint32, remaining int) } func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, uint32, []byte) { + // the addresses are packed as lenaddrID:addrID:vout, where lenaddrID and vout are varints for i := 0; i < len(unspentAddrs); { l, lv1 := unpackVarint(unspentAddrs[i:]) // index of vout of address in unspentAddrs j := i + int(l) + lv1 if j >= len(unspentAddrs) { - glog.Error("Inconsistent data in unspentAddrs") + glog.Error("rocksdb: Inconsistent data in unspentAddrs") return nil, 0, unspentAddrs } n, lv2 := unpackVarint(unspentAddrs[j:]) if uint32(n) == vout { addrID := append([]byte(nil), unspentAddrs[i+lv1:j]...) - unspentAddrs = append(unspentAddrs[:i], unspentAddrs[i+lv2:]...) + unspentAddrs = append(unspentAddrs[:i], unspentAddrs[j+lv2:]...) return addrID, uint32(n), unspentAddrs } i += j + lv2 @@ -325,7 +326,6 @@ func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, uint32, } func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error { - var err error addresses := make(map[string][]outpoint) unspentTxs := make(map[string][]byte) btxIDs := make([][]byte, len(block.Txs)) @@ -357,44 +357,35 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } // locate unspent addresses and add them to addresses map them in format txid ^index for txi, tx := range block.Txs { - btxID := btxIDs[txi] - // try to find the tx in current block - unspentAddrs, inThisBlock := unspentTxs[string(btxID)] - if !inThisBlock { - unspentAddrs, err = d.getUnspentTx(btxID) + spendingTxid := btxIDs[txi] + for i, input := range tx.Vin { + btxID, err := d.chainParser.PackTxid(input.Txid) if err != nil { return err } - if unspentAddrs == nil { - glog.Warningf("rocksdb: height %d, tx %v in inputs but missing in unspentTxs", block.Height, tx.Txid) - continue + // try to find the tx in current block + unspentAddrs, inThisBlock := unspentTxs[string(btxID)] + if !inThisBlock { + unspentAddrs, err = d.getUnspentTx(btxID) + if err != nil { + return err + } + if unspentAddrs == nil { + glog.Warningf("rocksdb: height %d, tx %v in inputs but missing in unspentTxs", block.Height, tx.Txid) + continue + } } - } - var addrID []byte - var n uint32 - for _, input := range tx.Vin { - addrID, n, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) + var addrID []byte + addrID, _, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) if addrID == nil { glog.Warningf("rocksdb: height %d, tx %v vout %v in inputs but missing in unspentTxs", block.Height, tx.Txid, input.Vout) continue } - err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(^n), block.Height) + err = d.addAddrIDToRecords(op, wb, addresses, addrID, spendingTxid, int32(^i), block.Height) if err != nil { return err } - } - if inThisBlock { - if len(unspentAddrs) == 0 { - delete(unspentTxs, string(btxID)) - } else { - unspentTxs[string(btxID)] = unspentAddrs - } - } else { - if len(unspentAddrs) == 0 { - wb.DeleteCF(d.cfh[cfUnspentTxs], btxID) - } else { - wb.PutCF(d.cfh[cfUnspentTxs], btxID, unspentAddrs) - } + unspentTxs[string(btxID)] = unspentAddrs } } if err := d.writeAddressRecords(wb, block, op, addresses); err != nil { @@ -404,7 +395,11 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo for tx, val := range unspentTxs { switch op { case opInsert: - wb.PutCF(d.cfh[cfUnspentTxs], []byte(tx), val) + if len(val) == 0 { + wb.DeleteCF(d.cfh[cfUnspentTxs], []byte(tx)) + } else { + wb.PutCF(d.cfh[cfUnspentTxs], []byte(tx), val) + } case opDelete: wb.DeleteCF(d.cfh[cfUnspentTxs], []byte(tx)) } @@ -722,6 +717,6 @@ func packVarint(i int32, buf []byte) int { } func unpackVarint(buf []byte) (int32, int) { - i, ofs := vlq.Uint(buf) + i, ofs := vlq.Int(buf) return int32(i), ofs } diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index 4e4217d2..afc8b065 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "io/ioutil" "os" + "sort" "strconv" "testing" @@ -50,6 +51,9 @@ type keyPair struct { } func checkColumn(d *RocksDB, col int, kp []keyPair) error { + sort.Slice(kp, func(i, j int) bool { + return kp[i].Key < kp[j].Key + }) it := d.db.NewIteratorCF(d.ro, d.cfh[col]) defer it.Close() i := 0 @@ -63,7 +67,7 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { } val := hex.EncodeToString(it.Value().Data()) if val != kp[i].Value { - return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", val, col, i, kp[i].Value) + return errors.Errorf("Incorrect value %v found in column %v row %v, expecting %v", val, col, i, kp[i].Value) } i++ } @@ -72,6 +76,15 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { } return nil } + +// TestRocksDB_Index_UTXO is a composite test testing the whole indexing functionality for UTXO chains +// It does the following: +// 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) +// 2) GetTransactions for known addresses +// 3) Disconnect block 2 +// 4) GetTransactions for known addresses +// 5) Connect the block 2 back +// After each step, the whole content of DB is examined and any difference against expected state is regarded as failure func TestRocksDB_Index_UTXO(t *testing.T) { d := setupRocksDB(t, &btc.BitcoinParser{Params: btc.GetChainParams("test")}) defer closeAnddestroyRocksDB(t, d) @@ -155,4 +168,113 @@ func TestRocksDB_Index_UTXO(t *testing.T) { } } + // connect 2nd block - use some outputs from the 1st block as the inputs and 1 input uses tx from the same block + block2 := bchain.Block{ + BlockHeader: bchain.BlockHeader{ + Height: 225494, + Hash: "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6", + }, + Txs: []bchain.Tx{ + bchain.Tx{ + Txid: "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", + Vin: []bchain.Vin{ + bchain.Vin{ + Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", + Vout: 0, + }, + bchain.Vin{ + Txid: "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", + Vout: 1, + }, + }, + Vout: []bchain.Vout{ + bchain.Vout{ + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d), + }, + }, + bchain.Vout{ + N: 1, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d), + }, + }, + }, + }, + bchain.Tx{ + Txid: "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", + Vin: []bchain.Vin{ + bchain.Vin{ + Txid: "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", + Vout: 0, + }, + bchain.Vin{ + Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", + Vout: 1, + }, + }, + Vout: []bchain.Vout{ + bchain.Vout{ + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d), + }, + }, + bchain.Vout{ + N: 1, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d), + }, + }, + }, + }, + }, + } + if err := d.ConnectBlock(&block2); err != nil { + t.Fatal(err) + } + if err := checkColumn(d, cfHeight, []keyPair{ + keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, + keyPair{"000370d6", "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6"}, + }); err != nil { + { + t.Fatal(err) + } + } + if err := checkColumn(d, cfAddresses, []keyPair{ + keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00"}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02"}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00"}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02"}, + keyPair{addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00" + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "01"}, + keyPair{addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "02"}, + keyPair{addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "00"}, + keyPair{addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "02"}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "01"}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "03"}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "03"}, + }); err != nil { + { + t.Fatal(err) + } + } + if err := checkColumn(d, cfUnspentTxs, []keyPair{ + keyPair{ + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", + addressToPubKeyHexWithLenght("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00", + }, + keyPair{ + "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", + addressToPubKeyHexWithLenght("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "02", + }, + keyPair{ + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", + addressToPubKeyHexWithLenght("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00" + addressToPubKeyHexWithLenght("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "02", + }, + }); err != nil { + { + t.Fatal(err) + } + } } From 9ad8a4b8733682c6d63b793a406204e8abb91ef1 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Thu, 19 Apr 2018 15:11:32 +0200 Subject: [PATCH 12/37] Test for GetTransactions in TestRocksDB_Index_UTXO --- db/rocksdb_test.go | 163 ++++++++++++++++++++++++++++++--------------- 1 file changed, 110 insertions(+), 53 deletions(-) diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index afc8b065..48edf8ae 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "io/ioutil" "os" + "reflect" "sort" "strconv" "testing" @@ -77,20 +78,8 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { return nil } -// TestRocksDB_Index_UTXO is a composite test testing the whole indexing functionality for UTXO chains -// It does the following: -// 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) -// 2) GetTransactions for known addresses -// 3) Disconnect block 2 -// 4) GetTransactions for known addresses -// 5) Connect the block 2 back -// After each step, the whole content of DB is examined and any difference against expected state is regarded as failure -func TestRocksDB_Index_UTXO(t *testing.T) { - d := setupRocksDB(t, &btc.BitcoinParser{Params: btc.GetChainParams("test")}) - defer closeAnddestroyRocksDB(t, d) - - // connect 1st block - will log warnings about missing UTXO transactions in cfUnspentTxs column - block1 := bchain.Block{ +func getTestBlock1(t *testing.T, d *RocksDB) *bchain.Block { + return &bchain.Block{ BlockHeader: bchain.BlockHeader{ Height: 225493, Hash: "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", @@ -132,44 +121,10 @@ func TestRocksDB_Index_UTXO(t *testing.T) { }, }, } - if err := d.ConnectBlock(&block1); err != nil { - t.Fatal(err) - } - if err := checkColumn(d, cfHeight, []keyPair{ - keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, - }); err != nil { - { - t.Fatal(err) - } - } - // the vout is encoded as signed varint, i.e. value * 2 for non negative values - if err := checkColumn(d, cfAddresses, []keyPair{ - keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00"}, - keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02"}, - keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00"}, - keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02"}, - }); err != nil { - { - t.Fatal(err) - } - } - if err := checkColumn(d, cfUnspentTxs, []keyPair{ - keyPair{ - "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", - addressToPubKeyHexWithLenght("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00" + addressToPubKeyHexWithLenght("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", - }, - keyPair{ - "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", - addressToPubKeyHexWithLenght("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00" + addressToPubKeyHexWithLenght("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", - }, - }); err != nil { - { - t.Fatal(err) - } - } +} - // connect 2nd block - use some outputs from the 1st block as the inputs and 1 input uses tx from the same block - block2 := bchain.Block{ +func getTestBlock2(t *testing.T, d *RocksDB) *bchain.Block { + return &bchain.Block{ BlockHeader: bchain.BlockHeader{ Height: 225494, Hash: "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6", @@ -231,9 +186,44 @@ func TestRocksDB_Index_UTXO(t *testing.T) { }, }, } - if err := d.ConnectBlock(&block2); err != nil { - t.Fatal(err) +} + +func verifyAfterBlock1(t *testing.T, d *RocksDB) { + if err := checkColumn(d, cfHeight, []keyPair{ + keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, + }); err != nil { + { + t.Fatal(err) + } } + // the vout is encoded as signed varint, i.e. value * 2 for non negative values + if err := checkColumn(d, cfAddresses, []keyPair{ + keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00"}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02"}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00"}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02"}, + }); err != nil { + { + t.Fatal(err) + } + } + if err := checkColumn(d, cfUnspentTxs, []keyPair{ + keyPair{ + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", + addressToPubKeyHexWithLenght("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00" + addressToPubKeyHexWithLenght("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", + }, + keyPair{ + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", + addressToPubKeyHexWithLenght("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00" + addressToPubKeyHexWithLenght("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", + }, + }); err != nil { + { + t.Fatal(err) + } + } +} + +func verifyAfterBlock2(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfHeight, []keyPair{ keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, keyPair{"000370d6", "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6"}, @@ -278,3 +268,70 @@ func TestRocksDB_Index_UTXO(t *testing.T) { } } } + +type txidVoutOutput struct { + txid string + vout uint32 + isOutput bool +} + +func verifyGetTransactions(t *testing.T, d *RocksDB, addr string, low, high uint32, wantTxids []txidVoutOutput, wantErr error) { + gotTxids := make([]txidVoutOutput, 0) + addToTxids := func(txid string, vout uint32, isOutput bool) error { + gotTxids = append(gotTxids, txidVoutOutput{txid, vout, isOutput}) + return nil + } + if err := d.GetTransactions(addr, low, high, addToTxids); err != nil { + if wantErr == nil || wantErr.Error() != err.Error() { + t.Fatal(err) + } + } + if !reflect.DeepEqual(gotTxids, wantTxids) { + t.Errorf("GetTransactions() = %v, want %v", gotTxids, wantTxids) + } +} + +// TestRocksDB_Index_UTXO is a composite test probing the whole indexing functionality for UTXO chains +// It does the following: +// 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) +// 2) GetTransactions for various addresses / low-high ranges +// 3) Disconnect block 2 +// 4) GetTransactions for various addresses +// 5) Connect the block 2 back +// After each step, the whole content of DB is examined and any difference against expected state is regarded as failure +func TestRocksDB_Index_UTXO(t *testing.T) { + d := setupRocksDB(t, &btc.BitcoinParser{Params: btc.GetChainParams("test")}) + defer closeAnddestroyRocksDB(t, d) + + // connect 1st block - will log warnings about missing UTXO transactions in cfUnspentTxs column + block1 := getTestBlock1(t, d) + if err := d.ConnectBlock(block1); err != nil { + t.Fatal(err) + } + verifyAfterBlock1(t, d) + + // connect 2nd block - use some outputs from the 1st block as the inputs and 1 input uses tx from the same block + block2 := getTestBlock2(t, d) + if err := d.ConnectBlock(block2); err != nil { + t.Fatal(err) + } + verifyAfterBlock2(t, d) + + // get transactions for various addresses / low-high ranges + verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 0, 1000000, []txidVoutOutput{ + txidVoutOutput{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 1, true}, + txidVoutOutput{"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", 1, false}, + }, nil) + verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 225493, 225493, []txidVoutOutput{ + txidVoutOutput{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 1, true}, + }, nil) + verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 225494, 1000000, []txidVoutOutput{ + txidVoutOutput{"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", 1, false}, + }, nil) + verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 500000, 1000000, []txidVoutOutput{}, nil) + verifyGetTransactions(t, d, "mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", 0, 1000000, []txidVoutOutput{ + txidVoutOutput{"3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", 0, true}, + }, nil) + verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eBad", 500000, 1000000, []txidVoutOutput{}, errors.New("checksum mismatch")) + +} From febcba5fbe6b0cd5ff1358c58ff2e004c64296eb Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Fri, 20 Apr 2018 13:56:55 +0200 Subject: [PATCH 13/37] Use new db column blockaddresses to support UTXO chain block disconnect --- bchain/baseparser.go | 5 ++ bchain/coins/eth/ethparser.go | 6 ++ bchain/types.go | 4 + db/rocksdb.go | 55 +++++++++--- db/rocksdb_test.go | 160 ++++++++++++++++++++++++++-------- db/sync.go | 5 ++ 6 files changed, 185 insertions(+), 50 deletions(-) diff --git a/bchain/baseparser.go b/bchain/baseparser.go index 1b831b44..8b747ed9 100644 --- a/bchain/baseparser.go +++ b/bchain/baseparser.go @@ -35,6 +35,11 @@ func (p *BaseParser) PackedTxidLen() int { return 32 } +// KeepBlockAddresses returns number of blocks which are to be kept in blockaddresses column +func (p *BaseParser) KeepBlockAddresses() int { + return 100 +} + // PackTxid packs txid to byte array func (p *BaseParser) PackTxid(txid string) ([]byte, error) { return hex.DecodeString(txid) diff --git a/bchain/coins/eth/ethparser.go b/bchain/coins/eth/ethparser.go index be642bd5..5e3ed45f 100644 --- a/bchain/coins/eth/ethparser.go +++ b/bchain/coins/eth/ethparser.go @@ -272,3 +272,9 @@ func (p *EthereumParser) UnpackBlockHash(buf []byte) (string, error) { func (p *EthereumParser) IsUTXOChain() bool { return false } + +// KeepBlockAddresses returns number of blocks which are to be kept in blockaddresses column +// do not use the blockaddresses for eth +func (p *EthereumParser) KeepBlockAddresses() int { + return 0 +} diff --git a/bchain/types.go b/bchain/types.go index 8e2a3d1b..46effc48 100644 --- a/bchain/types.go +++ b/bchain/types.go @@ -134,6 +134,10 @@ type BlockChainParser interface { // UTXO chains need "inputs" column in db, that map transactions to transactions that spend them // non UTXO chains have mapping of address to input and output transactions directly in "outputs" column in db IsUTXOChain() bool + // KeepBlockAddresses returns number of blocks which are to be kept in blockaddresses column + // and used in case of fork + // if 0 the blockaddresses column is not used at all (usually non UTXO chains) + KeepBlockAddresses() int // address id conversions GetAddrIDFromVout(output *Vout) ([]byte, error) GetAddrIDFromAddress(address string) ([]byte, error) diff --git a/db/rocksdb.go b/db/rocksdb.go index 5c5c0aa3..b155fe52 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -10,6 +10,7 @@ import ( "github.com/bsm/go-vlq" "github.com/golang/glog" + "github.com/juju/errors" "github.com/tecbot/gorocksdb" ) @@ -40,9 +41,10 @@ const ( cfAddresses cfUnspentTxs cfTransactions + cfBlockAddresses ) -var cfNames = []string{"default", "height", "addresses", "unspenttxs", "transactions"} +var cfNames = []string{"default", "height", "addresses", "unspenttxs", "transactions", "blockaddresses"} func openDB(path string) (*gorocksdb.DB, []*gorocksdb.ColumnFamilyHandle, error) { c := gorocksdb.NewLRUCache(8 << 30) // 8GB @@ -80,7 +82,7 @@ func openDB(path string) (*gorocksdb.DB, []*gorocksdb.ColumnFamilyHandle, error) optsOutputs.SetMaxOpenFiles(25000) optsOutputs.SetCompression(gorocksdb.NoCompression) - fcOptions := []*gorocksdb.Options{opts, opts, optsOutputs, opts, opts} + fcOptions := []*gorocksdb.Options{opts, opts, optsOutputs, opts, opts, opts} db, cfh, err := gorocksdb.OpenDbColumnFamilies(opts, path, cfNames, fcOptions) if err != nil { @@ -239,6 +241,9 @@ type outpoint struct { } func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, records map[string][]outpoint) error { + keep := d.chainParser.KeepBlockAddresses() + blockAddresses := make([]byte, 0) + vBuf := make([]byte, vlq.MaxLen32) for addrID, outpoints := range records { key, err := packOutputKey([]byte(addrID), block.Height) if err != nil { @@ -253,10 +258,36 @@ func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Bl continue } wb.PutCF(d.cfh[cfAddresses], key, val) + if keep > 0 { + // collect all addresses to be stored in blockaddresses + vl := packVarint(int32(len([]byte(addrID))), vBuf) + blockAddresses = append(blockAddresses, vBuf[0:vl]...) + blockAddresses = append(blockAddresses, []byte(addrID)...) + } case opDelete: wb.DeleteCF(d.cfh[cfAddresses], key) } } + if keep > 0 && op == opInsert { + // write new block address + key := packUint(block.Height) + wb.PutCF(d.cfh[cfBlockAddresses], key, blockAddresses) + // cleanup old block address + if block.Height > uint32(keep) { + for rh := block.Height - uint32(keep); rh < block.Height; rh-- { + key = packUint(rh) + val, err := d.db.GetCF(d.ro, d.cfh[cfBlockAddresses], key) + if err != nil { + return err + } + if val.Size() == 0 { + break + } + val.Free() + d.db.DeleteCF(d.wo, d.cfh[cfBlockAddresses], key) + } + } + } return nil } @@ -305,7 +336,7 @@ func appendPackedAddrID(txAddrs []byte, addrID []byte, n uint32, remaining int) } func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, uint32, []byte) { - // the addresses are packed as lenaddrID:addrID:vout, where lenaddrID and vout are varints + // the addresses are packed as lenaddrID addrID vout, where lenaddrID and vout are varints for i := 0; i < len(unspentAddrs); { l, lv1 := unpackVarint(unspentAddrs[i:]) // index of vout of address in unspentAddrs @@ -326,6 +357,11 @@ func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, uint32, } func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error { + if op == opDelete { + // block does not contain mapping tx-> input address, which is necessary to recreate + // unspentTxs; therefore it is not possible to DisconnectBlocks this way + return errors.New("DisconnectBlock is not supported for UTXO chains") + } addresses := make(map[string][]outpoint) unspentTxs := make(map[string][]byte) btxIDs := make([][]byte, len(block.Txs)) @@ -355,7 +391,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } unspentTxs[string(btxID)] = txAddrs } - // locate unspent addresses and add them to addresses map them in format txid ^index + // locate addresses spent by this tx and add them to addresses map them in format txid ^index for txi, tx := range block.Txs { spendingTxid := btxIDs[txi] for i, input := range tx.Vin { @@ -393,15 +429,10 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } // save unspent txs from current block for tx, val := range unspentTxs { - switch op { - case opInsert: - if len(val) == 0 { - wb.DeleteCF(d.cfh[cfUnspentTxs], []byte(tx)) - } else { - wb.PutCF(d.cfh[cfUnspentTxs], []byte(tx), val) - } - case opDelete: + if len(val) == 0 { wb.DeleteCF(d.cfh[cfUnspentTxs], []byte(tx)) + } else { + wb.PutCF(d.cfh[cfUnspentTxs], []byte(tx), val) } } return nil diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index 48edf8ae..b12a5c3f 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -9,6 +9,7 @@ import ( "reflect" "sort" "strconv" + "strings" "testing" "github.com/juju/errors" @@ -41,14 +42,17 @@ func addressToPubKeyHex(addr string, t *testing.T, d *RocksDB) string { return hex.EncodeToString(b) } -func addressToPubKeyHexWithLenght(addr string, t *testing.T, d *RocksDB) string { +func addressToPubKeyHexWithLength(addr string, t *testing.T, d *RocksDB) string { h := addressToPubKeyHex(addr, t, d) // length is signed varint, therefore 2 times big, we can take len(h) as the correct value return strconv.FormatInt(int64(len(h)), 16) + h } +// keyPair is used to compare given key value in DB with expected +// for more complicated compares it is possible to specify CompareFunc type keyPair struct { - Key, Value string + Key, Value string + CompareFunc func(string) bool } func checkColumn(d *RocksDB, col int, kp []keyPair) error { @@ -67,7 +71,13 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", key, col, i, kp[i].Key) } val := hex.EncodeToString(it.Value().Data()) - if val != kp[i].Value { + var valOK bool + if kp[i].CompareFunc == nil { + valOK = val == kp[i].Value + } else { + valOK = kp[i].CompareFunc(val) + } + if !valOK { return errors.Errorf("Incorrect value %v found in column %v row %v, expecting %v", val, col, i, kp[i].Value) } i++ @@ -78,7 +88,7 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { return nil } -func getTestBlock1(t *testing.T, d *RocksDB) *bchain.Block { +func getTestUTXOBlock1(t *testing.T, d *RocksDB) *bchain.Block { return &bchain.Block{ BlockHeader: bchain.BlockHeader{ Height: 225493, @@ -123,7 +133,7 @@ func getTestBlock1(t *testing.T, d *RocksDB) *bchain.Block { } } -func getTestBlock2(t *testing.T, d *RocksDB) *bchain.Block { +func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block { return &bchain.Block{ BlockHeader: bchain.BlockHeader{ Height: 225494, @@ -188,9 +198,9 @@ func getTestBlock2(t *testing.T, d *RocksDB) *bchain.Block { } } -func verifyAfterBlock1(t *testing.T, d *RocksDB) { +func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfHeight, []keyPair{ - keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, + keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", nil}, }); err != nil { { t.Fatal(err) @@ -198,10 +208,10 @@ func verifyAfterBlock1(t *testing.T, d *RocksDB) { } // the vout is encoded as signed varint, i.e. value * 2 for non negative values if err := checkColumn(d, cfAddresses, []keyPair{ - keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00"}, - keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02"}, - keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00"}, - keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02"}, + keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00", nil}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", nil}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", nil}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", nil}, }); err != nil { { t.Fatal(err) @@ -210,11 +220,38 @@ func verifyAfterBlock1(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfUnspentTxs, []keyPair{ keyPair{ "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", - addressToPubKeyHexWithLenght("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00" + addressToPubKeyHexWithLenght("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", + addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00" + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", + nil, }, keyPair{ "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", - addressToPubKeyHexWithLenght("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00" + addressToPubKeyHexWithLenght("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00" + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", + nil, + }, + }); err != nil { + { + t.Fatal(err) + } + } + // the values in cfBlockAddresses has random order, must use CompareFunc + if err := checkColumn(d, cfBlockAddresses, []keyPair{ + keyPair{"000370d5", "", + func(v string) bool { + expected := []string{ + addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d), + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), + } + for _, e := range expected { + lb := len(v) + v = strings.Replace(v, e, "", 1) + if lb == len(v) { + return false + } + } + return len(v) == 0 + }, }, }); err != nil { { @@ -223,27 +260,27 @@ func verifyAfterBlock1(t *testing.T, d *RocksDB) { } } -func verifyAfterBlock2(t *testing.T, d *RocksDB) { +func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfHeight, []keyPair{ - keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997"}, - keyPair{"000370d6", "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6"}, + keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", nil}, + keyPair{"000370d6", "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6", nil}, }); err != nil { { t.Fatal(err) } } if err := checkColumn(d, cfAddresses, []keyPair{ - keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00"}, - keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02"}, - keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00"}, - keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02"}, - keyPair{addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00" + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "01"}, - keyPair{addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "02"}, - keyPair{addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "00"}, - keyPair{addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "02"}, - keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "01"}, - keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "03"}, - keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "03"}, + keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00", nil}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", nil}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", nil}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", nil}, + keyPair{addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00" + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "01", nil}, + keyPair{addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "02", nil}, + keyPair{addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "00", nil}, + keyPair{addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "02", nil}, + keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "01", nil}, + keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "03", nil}, + keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "03", nil}, }); err != nil { { t.Fatal(err) @@ -252,15 +289,45 @@ func verifyAfterBlock2(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfUnspentTxs, []keyPair{ keyPair{ "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", - addressToPubKeyHexWithLenght("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00", + addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00", + nil, }, keyPair{ "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", - addressToPubKeyHexWithLenght("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "02", + addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "02", + nil, }, keyPair{ "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", - addressToPubKeyHexWithLenght("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00" + addressToPubKeyHexWithLenght("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "02", + addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00" + addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "02", + nil, + }, + }); err != nil { + { + t.Fatal(err) + } + } + if err := checkColumn(d, cfBlockAddresses, []keyPair{ + keyPair{"000370d6", "", + func(v string) bool { + expected := []string{ + addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d), + addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d), + addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d), + addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d), + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), + } + for _, e := range expected { + lb := len(v) + v = strings.Replace(v, e, "", 1) + if lb == len(v) { + return false + } + } + return len(v) == 0 + }, }, }); err != nil { { @@ -291,31 +358,39 @@ func verifyGetTransactions(t *testing.T, d *RocksDB, addr string, low, high uint } } +type testBitcoinParser struct { + *btc.BitcoinParser +} + +// override btc.KeepBlockAddresses to keep only one blockaddress +func (p *testBitcoinParser) KeepBlockAddresses() int { + return 1 +} + // TestRocksDB_Index_UTXO is a composite test probing the whole indexing functionality for UTXO chains // It does the following: // 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) // 2) GetTransactions for various addresses / low-high ranges -// 3) Disconnect block 2 -// 4) GetTransactions for various addresses -// 5) Connect the block 2 back +// 3) Disconnect block 2 - expect error +// 4) Disconnect the block 2 using full scan // After each step, the whole content of DB is examined and any difference against expected state is regarded as failure func TestRocksDB_Index_UTXO(t *testing.T) { - d := setupRocksDB(t, &btc.BitcoinParser{Params: btc.GetChainParams("test")}) + d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}}) defer closeAnddestroyRocksDB(t, d) // connect 1st block - will log warnings about missing UTXO transactions in cfUnspentTxs column - block1 := getTestBlock1(t, d) + block1 := getTestUTXOBlock1(t, d) if err := d.ConnectBlock(block1); err != nil { t.Fatal(err) } - verifyAfterBlock1(t, d) + verifyAfterUTXOBlock1(t, d) // connect 2nd block - use some outputs from the 1st block as the inputs and 1 input uses tx from the same block - block2 := getTestBlock2(t, d) + block2 := getTestUTXOBlock2(t, d) if err := d.ConnectBlock(block2); err != nil { t.Fatal(err) } - verifyAfterBlock2(t, d) + verifyAfterUTXOBlock2(t, d) // get transactions for various addresses / low-high ranges verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 0, 1000000, []txidVoutOutput{ @@ -334,4 +409,13 @@ func TestRocksDB_Index_UTXO(t *testing.T) { }, nil) verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eBad", 500000, 1000000, []txidVoutOutput{}, errors.New("checksum mismatch")) + // DisconnectBlock for UTXO chains is not possible + err := d.DisconnectBlock(block2) + if err == nil || err.Error() != "DisconnectBlock is not supported for UTXO chains" { + t.Fatal(err) + } + verifyAfterUTXOBlock2(t, d) + + // disconnect the 2nd block, verify that the db contains only the 1st block + } diff --git a/db/sync.go b/db/sync.go index 339daedb..845a517d 100644 --- a/db/sync.go +++ b/db/sync.go @@ -382,6 +382,11 @@ func (w *SyncWorker) getBlockChain(out chan blockResult, done chan struct{}) { // otherwise doing full scan func (w *SyncWorker) DisconnectBlocks(lower uint32, higher uint32, hashes []string) error { glog.Infof("sync: disconnecting blocks %d-%d", lower, higher) + // if the chain uses Block to Addresses mapping, always use DisconnectBlocksFullScan + // the full scan will be optimized using the mapping + if w.chain.GetChainParser().KeepBlockAddresses() > 0 { + return w.db.DisconnectBlocksFullScan(lower, higher) + } blocks := make([]*bchain.Block, len(hashes)) var err error // get all blocks first to see if we can avoid full scan From 850b1759408c8b61e692ad91a2362f193ea15488 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Fri, 20 Apr 2018 15:08:08 +0200 Subject: [PATCH 14/37] Test rocksdb GetBestBlock, GetBlockHash, small refactor in test --- db/rocksdb_test.go | 66 +++++++++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index b12a5c3f..86dae6b8 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -55,6 +55,17 @@ type keyPair struct { CompareFunc func(string) bool } +func compareFuncBlockAddresses(v string, expected []string) bool { + for _, e := range expected { + lb := len(v) + v = strings.Replace(v, e, "", 1) + if lb == len(v) { + return false + } + } + return len(v) == 0 +} + func checkColumn(d *RocksDB, col int, kp []keyPair) error { sort.Slice(kp, func(i, j int) bool { return kp[i].Key < kp[j].Key @@ -83,7 +94,7 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { i++ } if i != len(kp) { - return errors.Errorf("Expected more rows in column %v: found %v, expected %v", col, i, len(kp)) + return errors.Errorf("Expected more rows in column %v: got %v, expected %v", col, i, len(kp)) } return nil } @@ -237,20 +248,12 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfBlockAddresses, []keyPair{ keyPair{"000370d5", "", func(v string) bool { - expected := []string{ + return compareFuncBlockAddresses(v, []string{ addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d), addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), - } - for _, e := range expected { - lb := len(v) - v = strings.Replace(v, e, "", 1) - if lb == len(v) { - return false - } - } - return len(v) == 0 + }) }, }, }); err != nil { @@ -310,7 +313,7 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfBlockAddresses, []keyPair{ keyPair{"000370d6", "", func(v string) bool { - expected := []string{ + return compareFuncBlockAddresses(v, []string{ addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d), addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d), addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d), @@ -318,15 +321,7 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), - } - for _, e := range expected { - lb := len(v) - v = strings.Replace(v, e, "", 1) - if lb == len(v) { - return false - } - } - return len(v) == 0 + }) }, }, }); err != nil { @@ -371,8 +366,10 @@ func (p *testBitcoinParser) KeepBlockAddresses() int { // It does the following: // 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) // 2) GetTransactions for various addresses / low-high ranges -// 3) Disconnect block 2 - expect error -// 4) Disconnect the block 2 using full scan +// 3) GetBestBlock, GetBlockHash +// 4) Test tx caching functionality +// 5) Disconnect block 2 - expect error +// 6) Disconnect the block 2 using full scan // After each step, the whole content of DB is examined and any difference against expected state is regarded as failure func TestRocksDB_Index_UTXO(t *testing.T) { d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}}) @@ -409,8 +406,29 @@ func TestRocksDB_Index_UTXO(t *testing.T) { }, nil) verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eBad", 500000, 1000000, []txidVoutOutput{}, errors.New("checksum mismatch")) + // GetBestBlock + height, hash, err := d.GetBestBlock() + if err != nil { + t.Fatal(err) + } + if height != 225494 { + t.Fatalf("GetBestBlock: got height %v, expected %v", height, 225494) + } + if hash != "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6" { + t.Fatalf("GetBestBlock: got hash %v, expected %v", hash, "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6") + } + + // GetBlockHash + hash, err = d.GetBlockHash(225493) + if err != nil { + t.Fatal(err) + } + if hash != "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997" { + t.Fatalf("GetBlockHash: got hash %v, expected %v", hash, "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997") + } + // DisconnectBlock for UTXO chains is not possible - err := d.DisconnectBlock(block2) + err = d.DisconnectBlock(block2) if err == nil || err.Error() != "DisconnectBlock is not supported for UTXO chains" { t.Fatal(err) } From d569a08cf828c7fcff7a45afce3cdfffa5293d21 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Fri, 20 Apr 2018 16:03:45 +0200 Subject: [PATCH 15/37] Test rocksdb tx storage functionality --- db/rocksdb_test.go | 56 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index 86dae6b8..9fc84241 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -4,6 +4,7 @@ import ( "blockbook/bchain" "blockbook/bchain/coins/btc" "encoding/hex" + "fmt" "io/ioutil" "os" "reflect" @@ -122,6 +123,8 @@ func getTestUTXOBlock1(t *testing.T, d *RocksDB) *bchain.Block { }, }, }, + Blocktime: 22549300000, + Time: 22549300000, }, bchain.Tx{ Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", @@ -139,6 +142,8 @@ func getTestUTXOBlock1(t *testing.T, d *RocksDB) *bchain.Block { }, }, }, + Blocktime: 22549300001, + Time: 22549300001, }, }, } @@ -177,6 +182,8 @@ func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block { }, }, }, + Blocktime: 22549400000, + Time: 22549400000, }, bchain.Tx{ Txid: "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", @@ -204,6 +211,8 @@ func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block { }, }, }, + Blocktime: 22549400001, + Time: 22549400001, }, }, } @@ -362,6 +371,35 @@ func (p *testBitcoinParser) KeepBlockAddresses() int { return 1 } +// override PackTx and UnpackTx to default BaseParser functionality +// BitcoinParser uses tx hex which is not available for the test transactions +func (p *testBitcoinParser) PackTx(tx *bchain.Tx, height uint32, blockTime int64) ([]byte, error) { + return p.BaseParser.PackTx(tx, height, blockTime) +} + +func (p *testBitcoinParser) UnpackTx(buf []byte) (*bchain.Tx, uint32, error) { + return p.BaseParser.UnpackTx(buf) +} + +func testTxCache(t *testing.T, d *RocksDB, b *bchain.Block, tx *bchain.Tx) { + if err := d.PutTx(tx, b.Height, tx.Blocktime); err != nil { + t.Fatal(err) + } + gtx, height, err := d.GetTx(tx.Txid) + if err != nil { + t.Fatal(err) + } + if b.Height != height { + t.Fatalf("GetTx: got height %v, expected %v", height, b.Height) + } + if fmt.Sprint(gtx) != fmt.Sprint(tx) { + t.Errorf("GetTx: %v, want %v", gtx, tx) + } + if err := d.DeleteTx(tx.Txid); err != nil { + t.Fatal(err) + } +} + // TestRocksDB_Index_UTXO is a composite test probing the whole indexing functionality for UTXO chains // It does the following: // 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) @@ -370,7 +408,7 @@ func (p *testBitcoinParser) KeepBlockAddresses() int { // 4) Test tx caching functionality // 5) Disconnect block 2 - expect error // 6) Disconnect the block 2 using full scan -// After each step, the whole content of DB is examined and any difference against expected state is regarded as failure +// After each step, the content of DB is examined and any difference against expected state is regarded as failure func TestRocksDB_Index_UTXO(t *testing.T) { d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}}) defer closeAnddestroyRocksDB(t, d) @@ -427,6 +465,22 @@ func TestRocksDB_Index_UTXO(t *testing.T) { t.Fatalf("GetBlockHash: got hash %v, expected %v", hash, "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997") } + // Test tx caching functionality, leave one tx in db to test cleanup in DisconnectBlock + testTxCache(t, d, block1, &block1.Txs[0]) + testTxCache(t, d, block2, &block2.Txs[0]) + if err = d.PutTx(&block2.Txs[1], block2.Height, block2.Txs[1].Blocktime); err != nil { + t.Fatal(err) + } + // check that there is only the last tx in the cache + packedTx, err := d.chainParser.PackTx(&block2.Txs[1], block2.Height, block2.Txs[1].Blocktime) + if err := checkColumn(d, cfTransactions, []keyPair{ + keyPair{block2.Txs[1].Txid, hex.EncodeToString(packedTx), nil}, + }); err != nil { + { + t.Fatal(err) + } + } + // DisconnectBlock for UTXO chains is not possible err = d.DisconnectBlock(block2) if err == nil || err.Error() != "DisconnectBlock is not supported for UTXO chains" { From f78b050234509ffb04aac2f24d42b5efcdf98e6b Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Fri, 20 Apr 2018 23:53:17 +0200 Subject: [PATCH 16/37] Fix indexing of coinbase transactions --- bchain/baseparser.go | 3 +++ bchain/types.go | 3 +++ db/rocksdb.go | 18 ++++++++------ db/rocksdb_test.go | 58 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 7 deletions(-) diff --git a/bchain/baseparser.go b/bchain/baseparser.go index 8b747ed9..791f3b9f 100644 --- a/bchain/baseparser.go +++ b/bchain/baseparser.go @@ -42,6 +42,9 @@ func (p *BaseParser) KeepBlockAddresses() int { // PackTxid packs txid to byte array func (p *BaseParser) PackTxid(txid string) ([]byte, error) { + if txid == "" { + return nil, ErrTxidMissing + } return hex.DecodeString(txid) } diff --git a/bchain/types.go b/bchain/types.go index 46effc48..ddac7be6 100644 --- a/bchain/types.go +++ b/bchain/types.go @@ -14,6 +14,9 @@ var ( // ErrAddressMissing is returned if address is not specified // for example To address in ethereum can be missing in case of contract transaction ErrAddressMissing = errors.New("Address missing") + // ErrTxidMissing is returned if txid is not specified + // for example coinbase transactions in Bitcoin + ErrTxidMissing = errors.New("Txid missing") ) type ScriptSig struct { diff --git a/db/rocksdb.go b/db/rocksdb.go index b155fe52..fd2f33e8 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -335,25 +335,25 @@ func appendPackedAddrID(txAddrs []byte, addrID []byte, n uint32, remaining int) return txAddrs } -func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, uint32, []byte) { +func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, []byte) { // the addresses are packed as lenaddrID addrID vout, where lenaddrID and vout are varints for i := 0; i < len(unspentAddrs); { l, lv1 := unpackVarint(unspentAddrs[i:]) // index of vout of address in unspentAddrs j := i + int(l) + lv1 if j >= len(unspentAddrs) { - glog.Error("rocksdb: Inconsistent data in unspentAddrs") - return nil, 0, unspentAddrs + glog.Error("rocksdb: Inconsistent data in unspentAddrs ", hex.EncodeToString(unspentAddrs), ", ", vout) + return nil, unspentAddrs } n, lv2 := unpackVarint(unspentAddrs[j:]) if uint32(n) == vout { addrID := append([]byte(nil), unspentAddrs[i+lv1:j]...) unspentAddrs = append(unspentAddrs[:i], unspentAddrs[j+lv2:]...) - return addrID, uint32(n), unspentAddrs + return addrID, unspentAddrs } - i += j + lv2 + i = j + lv2 } - return nil, 0, unspentAddrs + return nil, unspentAddrs } func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error { @@ -397,6 +397,10 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo for i, input := range tx.Vin { btxID, err := d.chainParser.PackTxid(input.Txid) if err != nil { + // do not process inputs without input txid + if err == bchain.ErrTxidMissing { + continue + } return err } // try to find the tx in current block @@ -412,7 +416,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } } var addrID []byte - addrID, _, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) + addrID, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) if addrID == nil { glog.Warningf("rocksdb: height %d, tx %v vout %v in inputs but missing in unspentTxs", block.Height, tx.Txid, input.Vout) continue diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index 9fc84241..ea2ef808 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -491,3 +491,61 @@ func TestRocksDB_Index_UTXO(t *testing.T) { // disconnect the 2nd block, verify that the db contains only the 1st block } + +func Test_findAndRemoveUnspentAddr(t *testing.T) { + type args struct { + unspentAddrs string + vout uint32 + } + tests := []struct { + name string + args args + want string + want2 string + }{ + { + name: "3", + args: args{ + unspentAddrs: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114", + vout: 3, + }, + want: "64635167006868", + want2: "029c0010517a0115887452870212709393588893935687040e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114", + }, + { + name: "10", + args: args{ + unspentAddrs: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114", + vout: 10, + }, + want: "61", + want2: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112", + }, + { + name: "not there", + args: args{ + unspentAddrs: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114", + vout: 11, + }, + want: "", + want2: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := hex.DecodeString(tt.args.unspentAddrs) + if err != nil { + panic(err) + } + got, got2 := findAndRemoveUnspentAddr(b, tt.args.vout) + h := hex.EncodeToString(got) + if !reflect.DeepEqual(h, tt.want) { + t.Errorf("findAndRemoveUnspentAddr() got = %v, want %v", h, tt.want) + } + h2 := hex.EncodeToString(got2) + if !reflect.DeepEqual(h2, tt.want2) { + t.Errorf("findAndRemoveUnspentAddr() got2 = %v, want %v", h2, tt.want2) + } + }) + } +} From ce485099a7f5dc298c839433803e7e2461547cb7 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Mon, 23 Apr 2018 17:05:23 +0200 Subject: [PATCH 17/37] Implement DisconnectBlocks in index v2 - WIP --- db/rocksdb.go | 245 ++++++++++++++++++++++++++++++++------------- db/rocksdb_test.go | 92 +++++++++++++---- db/sync.go | 9 +- 3 files changed, 257 insertions(+), 89 deletions(-) diff --git a/db/rocksdb.go b/db/rocksdb.go index fd2f33e8..38e30e0c 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -18,7 +18,9 @@ import ( // iterator creates snapshot, which takes lots of resources // when doing huge scan, it is better to close it and reopen from time to time to free the resources const disconnectBlocksRefreshIterator = uint64(1000000) +const packedHeightBytes = 4 +// RepairRocksDB calls RocksDb db repair function func RepairRocksDB(name string) error { glog.Infof("rocksdb: repair") opts := gorocksdb.NewDefaultOptions() @@ -146,11 +148,11 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f return err } - kstart, err := packOutputKey(addrID, lower) + kstart, err := packAddressKey(addrID, lower) if err != nil { return err } - kstop, err := packOutputKey(addrID, higher) + kstop, err := packAddressKey(addrID, higher) if err != nil { return err } @@ -164,7 +166,7 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f if bytes.Compare(key, kstop) > 0 { break } - outpoints, err := d.unpackOutputValue(val) + outpoints, err := d.unpackOutpoints(val) if err != nil { return err } @@ -181,7 +183,11 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f vout = uint32(o.vout) isOutput = true } - if err := fn(o.txid, vout, isOutput); err != nil { + tx, err := d.chainParser.UnpackTxid(o.btxID) + if err != nil { + return err + } + if err := fn(tx, vout, isOutput); err != nil { return err } } @@ -194,10 +200,12 @@ const ( opDelete = 1 ) +// ConnectBlock indexes addresses in the block and stores them in db func (d *RocksDB) ConnectBlock(block *bchain.Block) error { return d.writeBlock(block, opInsert) } +// DisconnectBlock removes addresses in the block from the db func (d *RocksDB) DisconnectBlock(block *bchain.Block) error { return d.writeBlock(block, opDelete) } @@ -236,33 +244,41 @@ func (d *RocksDB) writeBlock(block *bchain.Block, op int) error { // Addresses index type outpoint struct { - txid string - vout int32 + btxID []byte + vout int32 +} + +func packBlockAddress(addrID []byte) []byte { + vBuf := make([]byte, vlq.MaxLen32) + vl := packVarint(int32(len(addrID)), vBuf) + blockAddress := append([]byte(nil), vBuf[:vl]...) + blockAddress = append(blockAddress, addrID...) + return blockAddress } func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, records map[string][]outpoint) error { keep := d.chainParser.KeepBlockAddresses() blockAddresses := make([]byte, 0) - vBuf := make([]byte, vlq.MaxLen32) for addrID, outpoints := range records { - key, err := packOutputKey([]byte(addrID), block.Height) + baddrID := []byte(addrID) + key, err := packAddressKey(baddrID, block.Height) if err != nil { glog.Warningf("rocksdb: packOutputKey: %v - %d %s", err, block.Height, addrID) continue } switch op { case opInsert: - val, err := d.packOutputValue(outpoints) + val, err := d.packOutpoints(outpoints) if err != nil { glog.Warningf("rocksdb: packOutputValue: %v", err) continue } wb.PutCF(d.cfh[cfAddresses], key, val) if keep > 0 { - // collect all addresses to be stored in blockaddresses - vl := packVarint(int32(len([]byte(addrID))), vBuf) - blockAddresses = append(blockAddresses, vBuf[0:vl]...) - blockAddresses = append(blockAddresses, []byte(addrID)...) + // collect all addresses be stored in blockaddresses + // they are used in disconnect blocks + blockAddress := packBlockAddress(baddrID) + blockAddresses = append(blockAddresses, blockAddress...) } case opDelete: wb.DeleteCF(d.cfh[cfAddresses], key) @@ -298,8 +314,8 @@ func (d *RocksDB) addAddrIDToRecords(op int, wb *gorocksdb.WriteBatch, records m } else { strAddrID := string(addrID) records[strAddrID] = append(records[strAddrID], outpoint{ - txid: string(btxid), - vout: vout, + btxID: btxid, + vout: vout, }) if op == opDelete { // remove transactions from cache @@ -404,7 +420,8 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo return err } // try to find the tx in current block - unspentAddrs, inThisBlock := unspentTxs[string(btxID)] + stxID := string(btxID) + unspentAddrs, inThisBlock := unspentTxs[stxID] if !inThisBlock { unspentAddrs, err = d.getUnspentTx(btxID) if err != nil { @@ -418,14 +435,14 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo var addrID []byte addrID, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) if addrID == nil { - glog.Warningf("rocksdb: height %d, tx %v vout %v in inputs but missing in unspentTxs", block.Height, tx.Txid, input.Vout) + glog.Warningf("rocksdb: height %d, tx %v vin %v in inputs but missing in unspentTxs", block.Height, tx.Txid, i) continue } err = d.addAddrIDToRecords(op, wb, addresses, addrID, spendingTxid, int32(^i), block.Height) if err != nil { return err } - unspentTxs[string(btxID)] = unspentAddrs + unspentTxs[stxID] = unspentAddrs } } if err := d.writeAddressRecords(wb, block, op, addresses); err != nil { @@ -481,39 +498,45 @@ func (d *RocksDB) writeAddressesNonUTXO(wb *gorocksdb.WriteBatch, block *bchain. return d.writeAddressRecords(wb, block, op, addresses) } -func packOutputKey(outputScript []byte, height uint32) ([]byte, error) { - bheight := packUint(height) - buf := make([]byte, 0, len(outputScript)+len(bheight)) - buf = append(buf, outputScript...) - buf = append(buf, bheight...) - return buf, nil +func unpackBlockAddresses(buf []byte) ([][]byte, error) { + addresses := make([][]byte, 0) + // the addresses are packed as lenaddrID addrID vout, where lenaddrID and vout are varints + for i := 0; i < len(buf); { + l, lv := unpackVarint(buf[i:]) + j := i + int(l) + lv + if j > len(buf) { + glog.Error("rocksdb: Inconsistent data in blockAddresses ", hex.EncodeToString(buf)) + return nil, errors.New("Inconsistent data in blockAddresses") + } + addrID := append([]byte(nil), buf[i+lv:j]...) + addresses = append(addresses, addrID) + i = j + } + return addresses, nil } -func (d *RocksDB) packOutputValue(outpoints []outpoint) ([]byte, error) { +func (d *RocksDB) packOutpoints(outpoints []outpoint) ([]byte, error) { buf := make([]byte, 0) + bvout := make([]byte, vlq.MaxLen32) for _, o := range outpoints { - bvout := make([]byte, vlq.MaxLen32) l := packVarint(o.vout, bvout) - buf = append(buf, []byte(o.txid)...) + buf = append(buf, []byte(o.btxID)...) buf = append(buf, bvout[:l]...) } return buf, nil } -func (d *RocksDB) unpackOutputValue(buf []byte) ([]outpoint, error) { +func (d *RocksDB) unpackOutpoints(buf []byte) ([]outpoint, error) { txidUnpackedLen := d.chainParser.PackedTxidLen() outpoints := make([]outpoint, 0) for i := 0; i < len(buf); { - txid, err := d.chainParser.UnpackTxid(buf[i : i+txidUnpackedLen]) - if err != nil { - return nil, err - } + btxid := buf[i : i+txidUnpackedLen] i += txidUnpackedLen vout, voutLen := unpackVarint(buf[i:]) i += voutLen outpoints = append(outpoints, outpoint{ - txid: txid, - vout: vout, + btxID: btxid, + vout: vout, }) } return outpoints, nil @@ -588,12 +611,23 @@ func (d *RocksDB) writeHeight( return nil } -// DisconnectBlocksFullScan removes all data belonging to blocks in range lower-higher -// it finds the data by doing full scan of outputs column, therefore it is quite slow -func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error { - glog.Infof("db: disconnecting blocks %d-%d using full scan", lower, higher) - outputKeys := [][]byte{} - outputValues := [][]byte{} +func (d *RocksDB) getBlockAddresses(key []byte) ([][]byte, error) { + b, err := d.db.GetCF(d.ro, d.cfh[cfBlockAddresses], key) + if err != nil { + return nil, err + } + defer b.Free() + // block is missing in DB + if b.Data() == nil { + return nil, errors.New("Block addresses missing") + } + return unpackBlockAddresses(b.Data()) +} + +func (d *RocksDB) fullAddressesScan(lower uint32, higher uint32) ([][]byte, [][]byte, error) { + glog.Infof("db: doing full scan of addresses column") + addrKeys := [][]byte{} + addrValues := [][]byte{} var totalOutputs, count uint64 var seekKey []byte for { @@ -610,16 +644,16 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error { count++ key = it.Key().Data() l := len(key) - if l > 4 { - height := unpackUint(key[l-4 : l]) + if l > packedHeightBytes { + height := unpackUint(key[l-packedHeightBytes : l]) if height >= lower && height <= higher { - outputKey := make([]byte, len(key)) - copy(outputKey, key) - outputKeys = append(outputKeys, outputKey) + addrKey := make([]byte, len(key)) + copy(addrKey, key) + addrKeys = append(addrKeys, addrKey) value := it.Value().Data() - outputValue := make([]byte, len(value)) - copy(outputValue, value) - outputValues = append(outputValues, outputValue) + addrValue := make([]byte, len(value)) + copy(addrValue, value) + addrValues = append(addrValues, addrValue) } } } @@ -631,43 +665,104 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error { break } } - glog.Infof("rocksdb: about to disconnect %d outputs from %d", len(outputKeys), totalOutputs) + glog.Infof("rocksdb: scanned %d addresses, found %d to disconnect", totalOutputs, len(addrKeys)) + return addrKeys, addrValues, nil +} + +// DisconnectBlockRange removes all data belonging to blocks in range lower-higher +// it finds the data in blockaddresses column if available, +// otherwise by doing quite slow full scan of addresses column +func (d *RocksDB) DisconnectBlockRange(lower uint32, higher uint32) error { + glog.Infof("db: disconnecting blocks %d-%d", lower, higher) + addrKeys := [][]byte{} + addrValues := [][]byte{} + keep := d.chainParser.KeepBlockAddresses() + var err error + doFullScan := true + if keep > 0 { + for height := lower; height <= higher; height++ { + key := packUint(height) + addresses, err := d.getBlockAddresses(key) + if err != nil { + glog.Error(err) + goto GoFullScan + } + for _, addrID := range addresses { + addrKey := append(addrID, key...) + val, err := d.db.GetCF(d.ro, d.cfh[cfAddresses], addrKey) + if err != nil { + goto GoFullScan + } + addrKeys = append(addrKeys, addrKey) + addrValue := append([]byte(nil), val.Data()...) + val.Free() + addrValues = append(addrValues, addrValue) + } + } + doFullScan = false + GoFullScan: + } + if doFullScan { + addrKeys, addrValues, err = d.fullAddressesScan(lower, higher) + if err != nil { + return err + } + } + glog.Infof("rocksdb: about to disconnect %d addresses ", len(addrKeys)) wb := gorocksdb.NewWriteBatch() defer wb.Destroy() - for i := 0; i < len(outputKeys); i++ { + unspentTxs := make(map[string][]byte) + for i, addrKey := range addrKeys { if glog.V(2) { - glog.Info("output ", hex.EncodeToString(outputKeys[i])) + glog.Info("address ", hex.EncodeToString(addrKey)) } - wb.DeleteCF(d.cfh[cfAddresses], outputKeys[i]) - outpoints, err := d.unpackOutputValue(outputValues[i]) + wb.DeleteCF(d.cfh[cfAddresses], addrKey) + outpoints, err := d.unpackOutpoints(addrValues[i]) + if err != nil { + return err + } + addrID, height, err := unpackAddressKey(addrKey) if err != nil { return err } for _, o := range outpoints { - // delete from inputs - boutpoint, err := d.packOutpoint(o.txid, o.vout) - if err != nil { - return err - } if glog.V(2) { - glog.Info("input ", hex.EncodeToString(boutpoint)) + glog.Info("tx ", height, " ", hex.EncodeToString(o.btxID), " ", o.vout) } - wb.DeleteCF(d.cfh[cfUnspentTxs], boutpoint) - // delete from txCache - b, err := d.chainParser.PackTxid(o.txid) - if err != nil { - return err + // recreate unspentTxs from inputs + if o.vout < 0 { + stxID := string(o.btxID) + txAddrs, exists := unspentTxs[stxID] + if !exists { + txAddrs, err = d.getUnspentTx(o.btxID) + if err != nil { + return err + } + } + txAddrs = appendPackedAddrID(txAddrs, addrID, uint32(^o.vout), 1) + unspentTxs[stxID] = txAddrs + } else { + // remove from cfUnspentTxs + wb.DeleteCF(d.cfh[cfUnspentTxs], o.btxID) } - wb.DeleteCF(d.cfh[cfTransactions], b) + // delete cached transaction + wb.DeleteCF(d.cfh[cfTransactions], o.btxID) } } + for key, val := range unspentTxs { + wb.PutCF(d.cfh[cfUnspentTxs], []byte(key), val) + } for height := lower; height <= higher; height++ { if glog.V(2) { glog.Info("height ", height) } - wb.DeleteCF(d.cfh[cfHeight], packUint(height)) + key := packUint(height) + if keep > 0 { + wb.DeleteCF(d.cfh[cfBlockAddresses], key) + } + wb.DeleteCF(d.cfh[cfHeight], key) } - err := d.db.Write(d.wo, wb) + err = d.db.Write(d.wo, wb) if err == nil { glog.Infof("rocksdb: blocks %d-%d disconnected", lower, higher) } @@ -737,6 +832,22 @@ func (d *RocksDB) DeleteTx(txid string) error { // Helpers +func packAddressKey(addrID []byte, height uint32) ([]byte, error) { + bheight := packUint(height) + buf := make([]byte, 0, len(addrID)+len(bheight)) + buf = append(buf, addrID...) + buf = append(buf, bheight...) + return buf, nil +} + +func unpackAddressKey(key []byte) ([]byte, uint32, error) { + i := len(key) - packedHeightBytes + if i <= 0 { + return nil, 0, errors.New("Invalid address key") + } + return key[:i], unpackUint(key[i : i+packedHeightBytes]), nil +} + func packUint(i uint32) []byte { buf := make([]byte, 4) binary.BigEndian.PutUint32(buf, i) diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index ea2ef808..d37be34c 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -218,7 +218,7 @@ func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block { } } -func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB) { +func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { if err := checkColumn(d, cfHeight, []keyPair{ keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", nil}, }); err != nil { @@ -253,19 +253,26 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB) { t.Fatal(err) } } - // the values in cfBlockAddresses has random order, must use CompareFunc - if err := checkColumn(d, cfBlockAddresses, []keyPair{ - keyPair{"000370d5", "", - func(v string) bool { - return compareFuncBlockAddresses(v, []string{ - addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d), - addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), - addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), - addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), - }) + // after disconnect there are no blockaddresses for the previous block + var blockAddressesKp []keyPair + if noBlockAddresses { + blockAddressesKp = []keyPair{} + } else { + // the values in cfBlockAddresses have random order, must use CompareFunc + blockAddressesKp = []keyPair{ + keyPair{"000370d5", "", + func(v string) bool { + return compareFuncBlockAddresses(v, []string{ + addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d), + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), + }) + }, }, - }, - }); err != nil { + } + } + if err := checkColumn(d, cfBlockAddresses, blockAddressesKp); err != nil { { t.Fatal(err) } @@ -400,14 +407,15 @@ func testTxCache(t *testing.T, d *RocksDB, b *bchain.Block, tx *bchain.Tx) { } } -// TestRocksDB_Index_UTXO is a composite test probing the whole indexing functionality for UTXO chains +// TestRocksDB_Index_UTXO is an integration test probing the whole indexing functionality for UTXO chains // It does the following: // 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block) // 2) GetTransactions for various addresses / low-high ranges // 3) GetBestBlock, GetBlockHash // 4) Test tx caching functionality // 5) Disconnect block 2 - expect error -// 6) Disconnect the block 2 using full scan +// 6) Disconnect the block 2 using blockaddresses column +// 7) Reconnect block 2 and disconnect blocks 1 and 2 using full scan // After each step, the content of DB is examined and any difference against expected state is regarded as failure func TestRocksDB_Index_UTXO(t *testing.T) { d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}}) @@ -418,7 +426,7 @@ func TestRocksDB_Index_UTXO(t *testing.T) { if err := d.ConnectBlock(block1); err != nil { t.Fatal(err) } - verifyAfterUTXOBlock1(t, d) + verifyAfterUTXOBlock1(t, d, false) // connect 2nd block - use some outputs from the 1st block as the inputs and 1 input uses tx from the same block block2 := getTestUTXOBlock2(t, d) @@ -488,7 +496,19 @@ func TestRocksDB_Index_UTXO(t *testing.T) { } verifyAfterUTXOBlock2(t, d) - // disconnect the 2nd block, verify that the db contains only the 1st block + // disconnect the 2nd block, verify that the db contains only data from the 1st block with restored unspentTxs + // and that the cached tx is removed + err = d.DisconnectBlockRange(225494, 225494) + if err != nil { + t.Fatal(err) + } + + verifyAfterUTXOBlock1(t, d, true) + if err := checkColumn(d, cfTransactions, []keyPair{}); err != nil { + { + t.Fatal(err) + } + } } @@ -549,3 +569,41 @@ func Test_findAndRemoveUnspentAddr(t *testing.T) { }) } } + +func Test_unpackBlockAddresses(t *testing.T) { + type args struct { + buf string + } + tests := []struct { + name string + args args + want []string + wantErr bool + }{ + { + name: "1", + args: args{"029c10517a011588745287127093935888939356870e646351670068680e765193518800870a7b7b0115873276a9144150837fb91d9461d6b95059842ab85262c2923f88ac08636751680457870291"}, + want: []string{"9c", "517a011588745287", "709393588893935687", "64635167006868", "76519351880087", "7b7b011587", "76a9144150837fb91d9461d6b95059842ab85262c2923f88ac", "63675168", "5787", "91"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := hex.DecodeString(tt.args.buf) + if err != nil { + panic(err) + } + got, err := unpackBlockAddresses(b) + if (err != nil) != tt.wantErr { + t.Errorf("unpackBlockAddresses() error = %v, wantErr %v", err, tt.wantErr) + return + } + h := make([]string, len(got)) + for i, g := range got { + h[i] = hex.EncodeToString(g) + } + if !reflect.DeepEqual(h, tt.want) { + t.Errorf("unpackBlockAddresses() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/db/sync.go b/db/sync.go index 845a517d..e0216583 100644 --- a/db/sync.go +++ b/db/sync.go @@ -382,10 +382,9 @@ func (w *SyncWorker) getBlockChain(out chan blockResult, done chan struct{}) { // otherwise doing full scan func (w *SyncWorker) DisconnectBlocks(lower uint32, higher uint32, hashes []string) error { glog.Infof("sync: disconnecting blocks %d-%d", lower, higher) - // if the chain uses Block to Addresses mapping, always use DisconnectBlocksFullScan - // the full scan will be optimized using the mapping + // if the chain uses Block to Addresses mapping, always use DisconnectBlockRange if w.chain.GetChainParser().KeepBlockAddresses() > 0 { - return w.db.DisconnectBlocksFullScan(lower, higher) + return w.db.DisconnectBlockRange(lower, higher) } blocks := make([]*bchain.Block, len(hashes)) var err error @@ -393,8 +392,8 @@ func (w *SyncWorker) DisconnectBlocks(lower uint32, higher uint32, hashes []stri for i, hash := range hashes { blocks[i], err = w.chain.GetBlock(hash, 0) if err != nil { - // cannot get block, do full range scan - return w.db.DisconnectBlocksFullScan(lower, higher) + // cannot get a block, we must do full range scan + return w.db.DisconnectBlockRange(lower, higher) } } // then disconnect one after another From 9c9367491846ba1f9479ed967921232cbaf8a178 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Tue, 24 Apr 2018 10:20:26 +0200 Subject: [PATCH 18/37] added support of bcash addresses to RPCs --- bchain/baseparser.go | 25 +++++++- bchain/coins/bch/bcashparser.go | 53 +++++++++++------ bchain/types.go | 11 +++- server/socketio.go | 102 ++++++++++++++++++++++---------- server/static/test.html | 39 +++++++++--- 5 files changed, 168 insertions(+), 62 deletions(-) diff --git a/bchain/baseparser.go b/bchain/baseparser.go index ba8bfb1a..4d6bc761 100644 --- a/bchain/baseparser.go +++ b/bchain/baseparser.go @@ -178,9 +178,30 @@ func (a baseAddress) String() string { return a.addr } -func (a baseAddress) EncodeAddress(format uint8) (string, error) { - if format != 0 { +func (a baseAddress) EncodeAddress(format AddressFormat) (string, error) { + if format != DefaultAddress { return "", fmt.Errorf("Unknown address format: %d", format) } return a.addr, nil } + +func (a baseAddress) AreEqual(addr string) (bool, error) { + ea, err := a.EncodeAddress(0) + if err != nil { + return false, err + } + return ea == addr, nil +} + +func (a baseAddress) InSlice(addrs []string) (bool, error) { + for _, addr := range addrs { + eq, err := a.AreEqual(addr) + if err != nil { + return false, err + } + if eq { + return true, nil + } + } + return false, nil +} diff --git a/bchain/coins/bch/bcashparser.go b/bchain/coins/bch/bcashparser.go index 8eb94292..2e1fe9ca 100644 --- a/bchain/coins/bch/bcashparser.go +++ b/bchain/coins/bch/bcashparser.go @@ -10,17 +10,9 @@ import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcutil" "github.com/cpacia/bchutil" - "github.com/golang/glog" ) -var prefixes []string - -func init() { - prefixes = make([]string, 0, len(bchutil.Prefixes)) - for _, prefix := range bchutil.Prefixes { - prefixes = append(prefixes, prefix) - } -} +var prefixes = []string{"bitcoincash", "bchtest", "bchreg"} // BCashParser handle type BCashParser struct { @@ -92,6 +84,9 @@ func isCashAddr(addr string) bool { func (p *BCashParser) UnpackTx(buf []byte) (tx *bchain.Tx, height uint32, err error) { tx, height, err = p.BitcoinParser.UnpackTx(buf) + if err != nil { + return + } for i, vout := range tx.Vout { if len(vout.ScriptPubKey.Addresses) == 1 { @@ -114,18 +109,11 @@ func (a *bcashAddress) String() string { return a.addr } -type AddressFormat = uint8 - -const ( - LegacyAddress AddressFormat = iota - CashAddress -) - -func (a *bcashAddress) EncodeAddress(format AddressFormat) (string, error) { +func (a *bcashAddress) EncodeAddress(format bchain.AddressFormat) (string, error) { switch format { - case LegacyAddress: + case bchain.DefaultAddress: return a.String(), nil - case CashAddress: + case bchain.BCashAddress: da, err := btcutil.DecodeAddress(a.addr, a.net) if err != nil { return "", err @@ -148,3 +136,30 @@ func (a *bcashAddress) EncodeAddress(format AddressFormat) (string, error) { return "", fmt.Errorf("Unknown address format: %d", format) } } + +func (a *bcashAddress) AreEqual(addr string) (bool, error) { + var format bchain.AddressFormat + if isCashAddr(addr) { + format = bchain.BCashAddress + } else { + format = bchain.DefaultAddress + } + ea, err := a.EncodeAddress(format) + if err != nil { + return false, err + } + return ea == addr, nil +} + +func (a *bcashAddress) InSlice(addrs []string) (bool, error) { + for _, addr := range addrs { + eq, err := a.AreEqual(addr) + if err != nil { + return false, err + } + if eq { + return true, nil + } + } + return false, nil +} diff --git a/bchain/types.go b/bchain/types.go index 59ce276e..ddc49f4b 100644 --- a/bchain/types.go +++ b/bchain/types.go @@ -37,9 +37,18 @@ type ScriptPubKey struct { Addresses []string `json:"addresses,omitempty"` } +type AddressFormat = uint8 + +const ( + DefaultAddress AddressFormat = iota + BCashAddress +) + type Address interface { String() string - EncodeAddress(format uint8) (string, error) + EncodeAddress(format AddressFormat) (string, error) + AreEqual(addr string) (bool, error) + InSlice(addrs []string) (bool, error) } type Vout struct { diff --git a/server/socketio.go b/server/socketio.go index dc84af64..f7cb7007 100644 --- a/server/socketio.go +++ b/server/socketio.go @@ -124,27 +124,32 @@ func (s *SocketIoServer) txRedirect(w http.ResponseWriter, r *http.Request) { } } -type reqRange struct { - Start int `json:"start"` - End int `json:"end"` - QueryMempol bool `json:"queryMempol"` - QueryMempoolOnly bool `json:"queryMempoolOnly"` - From int `json:"from"` - To int `json:"to"` +type addrOpts struct { + Start int `json:"start"` + End int `json:"end"` + QueryMempol bool `json:"queryMempol"` + QueryMempoolOnly bool `json:"queryMempoolOnly"` + From int `json:"from"` + To int `json:"to"` + AddressFormat uint8 `json:"addressFormat"` +} + +type txOpts struct { + AddressFormat uint8 `json:"addressFormat"` } var onMessageHandlers = map[string]func(*SocketIoServer, json.RawMessage) (interface{}, error){ "getAddressTxids": func(s *SocketIoServer, params json.RawMessage) (rv interface{}, err error) { - addr, rr, err := unmarshalGetAddressRequest(params) + addr, opts, err := unmarshalGetAddressRequest(params) if err == nil { - rv, err = s.getAddressTxids(addr, &rr) + rv, err = s.getAddressTxids(addr, &opts) } return }, "getAddressHistory": func(s *SocketIoServer, params json.RawMessage) (rv interface{}, err error) { - addr, rr, err := unmarshalGetAddressRequest(params) + addr, opts, err := unmarshalGetAddressRequest(params) if err == nil { - rv, err = s.getAddressHistory(addr, &rr) + rv, err = s.getAddressHistory(addr, &opts) } return }, @@ -173,9 +178,9 @@ var onMessageHandlers = map[string]func(*SocketIoServer, json.RawMessage) (inter return s.getInfo() }, "getDetailedTransaction": func(s *SocketIoServer, params json.RawMessage) (rv interface{}, err error) { - txid, err := unmarshalStringParameter(params) + txid, opts, err := unmarshalGetDetailedTransaction(params) if err == nil { - rv, err = s.getDetailedTransaction(txid) + rv, err = s.getDetailedTransaction(txid, opts) } return }, @@ -226,7 +231,7 @@ func (s *SocketIoServer) onMessage(c *gosocketio.Channel, req map[string]json.Ra return e } -func unmarshalGetAddressRequest(params []byte) (addr []string, rr reqRange, err error) { +func unmarshalGetAddressRequest(params []byte) (addr []string, opts addrOpts, err error) { var p []json.RawMessage err = json.Unmarshal(params, &p) if err != nil { @@ -240,7 +245,7 @@ func unmarshalGetAddressRequest(params []byte) (addr []string, rr reqRange, err if err != nil { return } - err = json.Unmarshal(p[1], &rr) + err = json.Unmarshal(p[1], &opts) return } @@ -261,14 +266,14 @@ type resultAddressTxids struct { Result []string `json:"result"` } -func (s *SocketIoServer) getAddressTxids(addr []string, rr *reqRange) (res resultAddressTxids, err error) { +func (s *SocketIoServer) getAddressTxids(addr []string, opts *addrOpts) (res resultAddressTxids, err error) { txids := make([]string, 0) - lower, higher := uint32(rr.To), uint32(rr.Start) + lower, higher := uint32(opts.To), uint32(opts.Start) for _, address := range addr { - if !rr.QueryMempoolOnly { + if !opts.QueryMempoolOnly { err = s.db.GetTransactions(address, lower, higher, func(txid string, vout uint32, isOutput bool) error { txids = append(txids, txid) - if isOutput && rr.QueryMempol { + if isOutput && opts.QueryMempol { input := s.chain.GetMempoolSpentOutput(txid, vout) if input != "" { txids = append(txids, txid) @@ -280,7 +285,7 @@ func (s *SocketIoServer) getAddressTxids(addr []string, rr *reqRange) (res resul return res, err } } - if rr.QueryMempoolOnly || rr.QueryMempol { + if opts.QueryMempoolOnly || opts.QueryMempol { mtxids, err := s.chain.GetMempoolTransactions(address) if err != nil { return res, err @@ -375,8 +380,8 @@ func txToResTx(tx *bchain.Tx, height int, hi []txInputs, ho []txOutputs) resTx { } } -func (s *SocketIoServer) getAddressHistory(addr []string, rr *reqRange) (res resultGetAddressHistory, err error) { - txr, err := s.getAddressTxids(addr, rr) +func (s *SocketIoServer) getAddressHistory(addr []string, opts *addrOpts) (res resultGetAddressHistory, err error) { + txr, err := s.getAddressTxids(addr, opts) if err != nil { return } @@ -388,7 +393,7 @@ func (s *SocketIoServer) getAddressHistory(addr []string, rr *reqRange) (res res res.Result.TotalCount = len(txids) res.Result.Items = make([]addressHistoryItem, 0) for i, txid := range txids { - if i >= rr.From && i < rr.To { + if i >= opts.From && i < opts.To { tx, height, err := s.txCache.GetTransaction(txid, bestheight) if err != nil { return res, err @@ -402,10 +407,17 @@ func (s *SocketIoServer) getAddressHistory(addr []string, rr *reqRange) (res res Script: &vout.ScriptPubKey.Hex, SpentIndex: int(vout.N), } - if len(vout.ScriptPubKey.Addresses) == 1 { - a := vout.ScriptPubKey.Addresses[0] + if vout.Address != nil { + a, err := vout.Address.EncodeAddress(opts.AddressFormat) + if err != nil { + return res, err + } ao.Address = &a - if stringInSlice(a, addr) { + found, err := vout.Address.InSlice(addr) + if err != nil { + return res, err + } + if found { hi, ok := ads[a] if ok { hi.OutputIndexes = append(hi.OutputIndexes, int(vout.N)) @@ -603,11 +615,31 @@ func unmarshalStringParameter(params []byte) (s string, err error) { return } +func unmarshalGetDetailedTransaction(params []byte) (txid string, opts txOpts, err error) { + var p []json.RawMessage + err = json.Unmarshal(params, &p) + if err != nil { + return + } + if len(p) < 1 || len(p) > 2 { + err = errors.New("incorrect number of parameters") + return + } + err = json.Unmarshal(p[0], &txid) + if err != nil { + return + } + if len(p) > 1 { + err = json.Unmarshal(p[1], &opts) + } + return +} + type resultGetDetailedTransaction struct { Result resTx `json:"result"` } -func (s *SocketIoServer) getDetailedTransaction(txid string) (res resultGetDetailedTransaction, err error) { +func (s *SocketIoServer) getDetailedTransaction(txid string, opts txOpts) (res resultGetDetailedTransaction, err error) { bestheight, _, err := s.db.GetBestBlock() if err != nil { return @@ -631,8 +663,12 @@ func (s *SocketIoServer) getDetailedTransaction(txid string) (res resultGetDetai } if len(otx.Vout) > int(vin.Vout) { vout := otx.Vout[vin.Vout] - if len(vout.ScriptPubKey.Addresses) == 1 { - ai.Address = &vout.ScriptPubKey.Addresses[0] + if vout.Address != nil { + a, err := vout.Address.EncodeAddress(opts.AddressFormat) + if err != nil { + return res, err + } + ai.Address = &a } ai.Satoshis = int64(vout.Value * 1E8) } @@ -645,8 +681,12 @@ func (s *SocketIoServer) getDetailedTransaction(txid string) (res resultGetDetai Script: &vout.ScriptPubKey.Hex, SpentIndex: int(vout.N), } - if len(vout.ScriptPubKey.Addresses) == 1 { - ao.Address = &vout.ScriptPubKey.Addresses[0] + if vout.Address != nil { + a, err := vout.Address.EncodeAddress(opts.AddressFormat) + if err != nil { + return res, err + } + ao.Address = &a } ho = append(ho, ao) } diff --git a/server/static/test.html b/server/static/test.html index 8073cf2a..99097265 100644 --- a/server/static/test.html +++ b/server/static/test.html @@ -56,16 +56,17 @@ var addresses = document.getElementById('getAddressHistoryAddresses').value.split(","); addresses = addresses.map(s => s.trim()); var mempool = document.getElementById("getAddressHistoryMempool").checked; - lookupAddressHistories(addresses, 0, 5, mempool, 20000000, 0, function (result) { + var format = document.getElementById("getAddressHistoryFormat").value; + lookupAddressHistories(addresses, 0, 5, mempool, 20000000, 0, format, function (result) { console.log('getAddressHistory sent successfully'); console.log(result); document.getElementById('getAddressHistoryResult').innerText = JSON.stringify(result).replace(/,/g, ", "); }); } - function lookupAddressHistories(addresses, from, to, mempool, start, end, f) { + function lookupAddressHistories(addresses, from, to, mempool, start, end, format, f) { const method = 'getAddressHistory'; - const rangeParam = mempool ? { + const opts = mempool ? { start, // needed for older bitcores (so we don't load all history if bitcore-node < 3.1.3) end, queryMempoolOnly: true, @@ -77,9 +78,10 @@ const params = [ addresses, { - ...rangeParam, + ...opts, from, to, + addressFormat: parseInt(format), }, ]; return socket.send({ method, params }, f); @@ -87,7 +89,7 @@ function lookupTransactionsIdsMempool(addresses, mempool, start, end, f) { const method = 'getAddressTxids'; - const rangeParam = mempool ? { + const opts = mempool ? { start, end, queryMempoolOnly: true, @@ -98,7 +100,7 @@ }; const params = [ addresses, - rangeParam, + opts, ]; return socket.send({ method, params }, f); } @@ -155,17 +157,21 @@ function getDetailedTransaction() { var hash = document.getElementById('getDetailedTransactionHash').value.trim(); - lookupDetailedTransaction(hash, function (result) { + var format = document.getElementById("getDetailedTransactionFormat").value; + lookupDetailedTransaction(hash, format, function (result) { console.log('getDetailedTransaction sent successfully'); console.log(result); document.getElementById('getDetailedTransactionResult').innerText = JSON.stringify(result).replace(/,/g, ", "); }); } - function lookupDetailedTransaction(hash, f) { + function lookupDetailedTransaction(hash, format, f) { const method = 'getDetailedTransaction'; const params = [ hash, + { + addressFormat: parseInt(format), + }, ]; return socket.send({ method, params }, f); } @@ -275,6 +281,14 @@   +
+
+   + +
@@ -324,7 +338,14 @@
-
+
+
+
+   +
From ed027a68c9d63592437a0635347d81dc8bff010c Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Tue, 24 Apr 2018 12:00:24 +0200 Subject: [PATCH 19/37] Implement DisconnectBlocks in index v2 - WIP --- db/rocksdb.go | 93 ++++++++++++++++++++++++++++++++-------------- db/rocksdb_test.go | 76 +++++++++++++++++++++++++++---------- 2 files changed, 122 insertions(+), 47 deletions(-) diff --git a/db/rocksdb.go b/db/rocksdb.go index 38e30e0c..64552f6f 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -248,15 +248,23 @@ type outpoint struct { vout int32 } -func packBlockAddress(addrID []byte) []byte { +func (d *RocksDB) packBlockAddress(addrID []byte, removedUnspentTxs map[string][]outpoint) []byte { vBuf := make([]byte, vlq.MaxLen32) vl := packVarint(int32(len(addrID)), vBuf) blockAddress := append([]byte(nil), vBuf[:vl]...) blockAddress = append(blockAddress, addrID...) + if removedUnspentTxs == nil { + } else { + addrUnspentTxs := removedUnspentTxs[string(addrID)] + vl = packVarint(int32(len(addrUnspentTxs)), vBuf) + blockAddress = append(blockAddress, vBuf[:vl]...) + buf := d.packOutpoints(addrUnspentTxs) + blockAddress = append(blockAddress, buf...) + } return blockAddress } -func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, records map[string][]outpoint) error { +func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, records map[string][]outpoint, removedUnspentTxs map[string][]outpoint) error { keep := d.chainParser.KeepBlockAddresses() blockAddresses := make([]byte, 0) for addrID, outpoints := range records { @@ -268,16 +276,12 @@ func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Bl } switch op { case opInsert: - val, err := d.packOutpoints(outpoints) - if err != nil { - glog.Warningf("rocksdb: packOutputValue: %v", err) - continue - } + val := d.packOutpoints(outpoints) wb.PutCF(d.cfh[cfAddresses], key, val) if keep > 0 { // collect all addresses be stored in blockaddresses // they are used in disconnect blocks - blockAddress := packBlockAddress(baddrID) + blockAddress := d.packBlockAddress(baddrID, removedUnspentTxs) blockAddresses = append(blockAddresses, blockAddress...) } case opDelete: @@ -380,6 +384,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } addresses := make(map[string][]outpoint) unspentTxs := make(map[string][]byte) + removedUnspentTxs := make(map[string][]outpoint) btxIDs := make([][]byte, len(block.Txs)) // first process all outputs, build mapping of addresses to outpoints and mappings of unspent txs to addresses for txi, tx := range block.Txs { @@ -438,6 +443,9 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo glog.Warningf("rocksdb: height %d, tx %v vin %v in inputs but missing in unspentTxs", block.Height, tx.Txid, i) continue } + rut := removedUnspentTxs[string(addrID)] + rut = append(rut, outpoint{btxID, int32(input.Vout)}) + removedUnspentTxs[string(addrID)] = rut err = d.addAddrIDToRecords(op, wb, addresses, addrID, spendingTxid, int32(^i), block.Height) if err != nil { return err @@ -445,7 +453,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo unspentTxs[stxID] = unspentAddrs } } - if err := d.writeAddressRecords(wb, block, op, addresses); err != nil { + if err := d.writeAddressRecords(wb, block, op, addresses, removedUnspentTxs); err != nil { return err } // save unspent txs from current block @@ -495,27 +503,34 @@ func (d *RocksDB) writeAddressesNonUTXO(wb *gorocksdb.WriteBatch, block *bchain. } } } - return d.writeAddressRecords(wb, block, op, addresses) + return d.writeAddressRecords(wb, block, op, addresses, nil) } -func unpackBlockAddresses(buf []byte) ([][]byte, error) { +func (d *RocksDB) unpackBlockAddresses(buf []byte) ([][]byte, [][]outpoint, error) { addresses := make([][]byte, 0) + outpointsArray := make([][]outpoint, 0) // the addresses are packed as lenaddrID addrID vout, where lenaddrID and vout are varints for i := 0; i < len(buf); { l, lv := unpackVarint(buf[i:]) j := i + int(l) + lv if j > len(buf) { glog.Error("rocksdb: Inconsistent data in blockAddresses ", hex.EncodeToString(buf)) - return nil, errors.New("Inconsistent data in blockAddresses") + return nil, nil, errors.New("Inconsistent data in blockAddresses") } addrID := append([]byte(nil), buf[i+lv:j]...) + outpoints, ol, err := d.unpackNOutpoints(buf[j:]) + if err != nil { + glog.Error("rocksdb: Inconsistent data in blockAddresses ", hex.EncodeToString(buf)) + return nil, nil, errors.New("Inconsistent data in blockAddresses") + } addresses = append(addresses, addrID) - i = j + outpointsArray = append(outpointsArray, outpoints) + i = j + ol } - return addresses, nil + return addresses, outpointsArray, nil } -func (d *RocksDB) packOutpoints(outpoints []outpoint) ([]byte, error) { +func (d *RocksDB) packOutpoints(outpoints []outpoint) []byte { buf := make([]byte, 0) bvout := make([]byte, vlq.MaxLen32) for _, o := range outpoints { @@ -523,25 +538,45 @@ func (d *RocksDB) packOutpoints(outpoints []outpoint) ([]byte, error) { buf = append(buf, []byte(o.btxID)...) buf = append(buf, bvout[:l]...) } - return buf, nil + return buf } func (d *RocksDB) unpackOutpoints(buf []byte) ([]outpoint, error) { txidUnpackedLen := d.chainParser.PackedTxidLen() outpoints := make([]outpoint, 0) for i := 0; i < len(buf); { - btxid := buf[i : i+txidUnpackedLen] + btxID := buf[i : i+txidUnpackedLen] i += txidUnpackedLen vout, voutLen := unpackVarint(buf[i:]) i += voutLen outpoints = append(outpoints, outpoint{ - btxID: btxid, + btxID: btxID, vout: vout, }) } return outpoints, nil } +func (d *RocksDB) unpackNOutpoints(buf []byte) ([]outpoint, int, error) { + txidUnpackedLen := d.chainParser.PackedTxidLen() + n, p := unpackVarint(buf) + outpoints := make([]outpoint, n) + for i := int32(0); i < n; i++ { + if p+txidUnpackedLen >= len(buf) { + return nil, 0, errors.New("Inconsistent data in unpackNOutpoints") + } + btxID := buf[p : p+txidUnpackedLen] + p += txidUnpackedLen + vout, voutLen := unpackVarint(buf[p:]) + p += voutLen + outpoints[i] = outpoint{ + btxID: btxID, + vout: vout, + } + } + return outpoints, p, nil +} + func (d *RocksDB) packOutpoint(txid string, vout int32) ([]byte, error) { btxid, err := d.chainParser.PackTxid(txid) if err != nil { @@ -611,17 +646,17 @@ func (d *RocksDB) writeHeight( return nil } -func (d *RocksDB) getBlockAddresses(key []byte) ([][]byte, error) { +func (d *RocksDB) getBlockAddresses(key []byte) ([][]byte, [][]outpoint, error) { b, err := d.db.GetCF(d.ro, d.cfh[cfBlockAddresses], key) if err != nil { - return nil, err + return nil, nil, err } defer b.Free() // block is missing in DB if b.Data() == nil { - return nil, errors.New("Block addresses missing") + return nil, nil, errors.New("Block addresses missing") } - return unpackBlockAddresses(b.Data()) + return d.unpackBlockAddresses(b.Data()) } func (d *RocksDB) fullAddressesScan(lower uint32, higher uint32) ([][]byte, [][]byte, error) { @@ -673,21 +708,25 @@ func (d *RocksDB) fullAddressesScan(lower uint32, higher uint32) ([][]byte, [][] // it finds the data in blockaddresses column if available, // otherwise by doing quite slow full scan of addresses column func (d *RocksDB) DisconnectBlockRange(lower uint32, higher uint32) error { + + // TODO - it is still a mess + glog.Infof("db: disconnecting blocks %d-%d", lower, higher) addrKeys := [][]byte{} addrValues := [][]byte{} + addrUnspentOutpoints := [][]outpoint{} keep := d.chainParser.KeepBlockAddresses() var err error doFullScan := true if keep > 0 { for height := lower; height <= higher; height++ { key := packUint(height) - addresses, err := d.getBlockAddresses(key) + addresses, unspentOutpoints, err := d.getBlockAddresses(key) if err != nil { glog.Error(err) goto GoFullScan } - for _, addrID := range addresses { + for i, addrID := range addresses { addrKey := append(addrID, key...) val, err := d.db.GetCF(d.ro, d.cfh[cfAddresses], addrKey) if err != nil { @@ -695,6 +734,7 @@ func (d *RocksDB) DisconnectBlockRange(lower uint32, higher uint32) error { } addrKeys = append(addrKeys, addrKey) addrValue := append([]byte(nil), val.Data()...) + addrUnspentOutpoints = append(addrUnspentOutpoints, unspentOutpoints[i]) val.Free() addrValues = append(addrValues, addrValue) } @@ -717,10 +757,7 @@ func (d *RocksDB) DisconnectBlockRange(lower uint32, higher uint32) error { glog.Info("address ", hex.EncodeToString(addrKey)) } wb.DeleteCF(d.cfh[cfAddresses], addrKey) - outpoints, err := d.unpackOutpoints(addrValues[i]) - if err != nil { - return err - } + outpoints := addrUnspentOutpoints[i] addrID, height, err := unpackAddressKey(addrKey) if err != nil { return err diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index d37be34c..d354643d 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -16,6 +16,10 @@ import ( "github.com/juju/errors" ) +// simplified explanation of signed varint packing, used in many index data structures +// for number n, the packing is: 2*n if n>=0 else 2*(-n)-1 +// take only 1 byte if abs(n)<127 + func setupRocksDB(t *testing.T, p bchain.BlockChainParser) *RocksDB { tmp, err := ioutil.TempDir("", "testdb") if err != nil { @@ -45,7 +49,6 @@ func addressToPubKeyHex(addr string, t *testing.T, d *RocksDB) string { func addressToPubKeyHexWithLength(addr string, t *testing.T, d *RocksDB) string { h := addressToPubKeyHex(addr, t, d) - // length is signed varint, therefore 2 times big, we can take len(h) as the correct value return strconv.FormatInt(int64(len(h)), 16) + h } @@ -56,14 +59,18 @@ type keyPair struct { CompareFunc func(string) bool } -func compareFuncBlockAddresses(v string, expected []string) bool { +func compareFuncBlockAddresses(t *testing.T, v string, expected []string) bool { for _, e := range expected { lb := len(v) v = strings.Replace(v, e, "", 1) if lb == len(v) { + t.Error(e, " not found in ", v) return false } } + if len(v) != 0 { + t.Error("not expected content ", v) + } return len(v) == 0 } @@ -262,11 +269,11 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { blockAddressesKp = []keyPair{ keyPair{"000370d5", "", func(v string) bool { - return compareFuncBlockAddresses(v, []string{ - addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d), - addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), - addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), - addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), + return compareFuncBlockAddresses(t, v, []string{ + addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00", + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "00", + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00", + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "00", }) }, }, @@ -329,14 +336,14 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { if err := checkColumn(d, cfBlockAddresses, []keyPair{ keyPair{"000370d6", "", func(v string) bool { - return compareFuncBlockAddresses(v, []string{ - addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d), - addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d), - addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d), - addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d), - addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d), - addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d), - addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), + return compareFuncBlockAddresses(t, v, []string{ + addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "02" + "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00", + addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "00", + addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00", + addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "00", + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02" + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", }) }, }, @@ -570,7 +577,14 @@ func Test_findAndRemoveUnspentAddr(t *testing.T) { } } +type hexoutpoint struct { + txID string + vout int32 +} + func Test_unpackBlockAddresses(t *testing.T) { + d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}}) + defer closeAnddestroyRocksDB(t, d) type args struct { buf string } @@ -578,12 +592,25 @@ func Test_unpackBlockAddresses(t *testing.T) { name string args args want []string + want2 [][]hexoutpoint wantErr bool }{ { name: "1", - args: args{"029c10517a011588745287127093935888939356870e646351670068680e765193518800870a7b7b0115873276a9144150837fb91d9461d6b95059842ab85262c2923f88ac08636751680457870291"}, - want: []string{"9c", "517a011588745287", "709393588893935687", "64635167006868", "76519351880087", "7b7b011587", "76a9144150837fb91d9461d6b95059842ab85262c2923f88ac", "63675168", "5787", "91"}, + args: args{"029c0010517a011588745287047c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d250000b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa38400612709393588893935687000e64635167006868000e7651935188008702effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac7502"}, + want: []string{"9c", "517a011588745287", "709393588893935687", "64635167006868", "76519351880087"}, + want2: [][]hexoutpoint{ + []hexoutpoint{}, + []hexoutpoint{ + hexoutpoint{"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", 0}, + hexoutpoint{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 3}, + }, + []hexoutpoint{}, + []hexoutpoint{}, + []hexoutpoint{ + hexoutpoint{"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", 1}, + }, + }, }, } for _, tt := range tests { @@ -592,7 +619,7 @@ func Test_unpackBlockAddresses(t *testing.T) { if err != nil { panic(err) } - got, err := unpackBlockAddresses(b) + got, got2, err := d.unpackBlockAddresses(b) if (err != nil) != tt.wantErr { t.Errorf("unpackBlockAddresses() error = %v, wantErr %v", err, tt.wantErr) return @@ -602,7 +629,18 @@ func Test_unpackBlockAddresses(t *testing.T) { h[i] = hex.EncodeToString(g) } if !reflect.DeepEqual(h, tt.want) { - t.Errorf("unpackBlockAddresses() = %v, want %v", got, tt.want) + t.Errorf("unpackBlockAddresses() = %v, want %v", h, tt.want) + } + h2 := make([][]hexoutpoint, len(got2)) + for i, g := range got2 { + ho := make([]hexoutpoint, len(g)) + for j, o := range g { + ho[j] = hexoutpoint{hex.EncodeToString(o.btxID), o.vout} + } + h2[i] = ho + } + if !reflect.DeepEqual(h2, tt.want2) { + t.Errorf("unpackBlockAddresses() = %v, want %v", h2, tt.want2) } }) } From 85beeb938cbda6197113fa4938c0f50acd1103ce Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Wed, 25 Apr 2018 13:00:51 +0200 Subject: [PATCH 20/37] moved `static` directory and upgraded installation of certs --- build/deb/build-deb.sh | 3 ++- build/deb/debian/blockbook-bch-testnet.install | 4 ++-- build/deb/debian/blockbook-bch.install | 4 ++-- build/deb/debian/blockbook-btc-testnet.install | 4 ++-- build/deb/debian/blockbook-btc.install | 4 ++-- build/deb/debian/blockbook-zec.install | 4 ++-- {server/static => static}/test.html | 0 7 files changed, 12 insertions(+), 11 deletions(-) rename {server/static => static}/test.html (100%) diff --git a/build/deb/build-deb.sh b/build/deb/build-deb.sh index 21aabdde..4b97be27 100755 --- a/build/deb/build-deb.sh +++ b/build/deb/build-deb.sh @@ -3,7 +3,8 @@ set -e cp -r /src/build/deb/debian . cp -r /src/configs . -mkdir server && cp -r /src/server/testcert.* /src/server/static server +cp -r /src/static static +mkdir cert && cp /src/server/testcert.* cert dpkg-buildpackage -us -uc mv ../*.deb /out diff --git a/build/deb/debian/blockbook-bch-testnet.install b/build/deb/debian/blockbook-bch-testnet.install index ac6b38e6..b145f532 100755 --- a/build/deb/debian/blockbook-bch-testnet.install +++ b/build/deb/debian/blockbook-bch-testnet.install @@ -1,5 +1,5 @@ #!/usr/bin/dh-exec blockbook /opt/blockbook/bch-testnet/bin -server/testcert.* /opt/blockbook/bch-testnet/cert -server/static /opt/blockbook/bch-testnet +cert /opt/blockbook/bch-testnet +static /opt/blockbook/bch-testnet configs/bch-testnet.json => /opt/blockbook/bch-testnet/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-bch.install b/build/deb/debian/blockbook-bch.install index bec00277..33f4a700 100755 --- a/build/deb/debian/blockbook-bch.install +++ b/build/deb/debian/blockbook-bch.install @@ -1,5 +1,5 @@ #!/usr/bin/dh-exec blockbook /opt/blockbook/bch/bin -server/testcert.* /opt/blockbook/bch/cert -server/static /opt/blockbook/bch +cert /opt/blockbook/bch +static /opt/blockbook/bch configs/bch.json => /opt/blockbook/bch/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-btc-testnet.install b/build/deb/debian/blockbook-btc-testnet.install index cc143232..e648ad35 100755 --- a/build/deb/debian/blockbook-btc-testnet.install +++ b/build/deb/debian/blockbook-btc-testnet.install @@ -1,5 +1,5 @@ #!/usr/bin/dh-exec blockbook /opt/blockbook/btc-testnet/bin -server/testcert.* /opt/blockbook/btc-testnet/cert -server/static /opt/blockbook/btc-testnet +cert /opt/blockbook/btc-testnet +static /opt/blockbook/btc-testnet configs/btc-testnet.json => /opt/blockbook/btc-testnet/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-btc.install b/build/deb/debian/blockbook-btc.install index 29961515..27a11fc0 100755 --- a/build/deb/debian/blockbook-btc.install +++ b/build/deb/debian/blockbook-btc.install @@ -1,5 +1,5 @@ #!/usr/bin/dh-exec blockbook /opt/blockbook/btc/bin -server/testcert.* /opt/blockbook/btc/cert -server/static /opt/blockbook/btc +cert /opt/blockbook/btc +static /opt/blockbook/btc configs/btc.json => /opt/blockbook/btc/config/blockchaincfg.json diff --git a/build/deb/debian/blockbook-zec.install b/build/deb/debian/blockbook-zec.install index d34066f5..caf66e51 100755 --- a/build/deb/debian/blockbook-zec.install +++ b/build/deb/debian/blockbook-zec.install @@ -1,5 +1,5 @@ #!/usr/bin/dh-exec --with=install blockbook /opt/blockbook/zec/bin -server/testcert.* /opt/blockbook/zec/cert -server/static /opt/blockbook/zec +cert /opt/blockbook/zec +static /opt/blockbook/zec configs/zec.json => /opt/blockbook/zec/config/blockchaincfg.json diff --git a/server/static/test.html b/static/test.html similarity index 100% rename from server/static/test.html rename to static/test.html From a929f27d5ce9de5a0278ee2275a7c89e15c4e8e6 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Wed, 25 Apr 2018 13:38:03 +0200 Subject: [PATCH 21/37] implemented `estimatesmartfee` method --- bchain/coins/bch/bcashrpc.go | 36 ++++++++++++++++++++++++++++++++++++ static/test.html | 28 ++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/bchain/coins/bch/bcashrpc.go b/bchain/coins/bch/bcashrpc.go index 222a7b78..cb23030e 100644 --- a/bchain/coins/bch/bcashrpc.go +++ b/bchain/coins/bch/bcashrpc.go @@ -81,6 +81,23 @@ type resGetBlockThin struct { Result bchain.ThinBlock `json:"result"` } +// estimatesmartfee + +type cmdEstimateSmartFee struct { + Method string `json:"method"` + Params struct { + Blocks int `json:"nblocks"` + } `json:"params"` +} + +type resEstimateSmartFee struct { + Error *bchain.RPCError `json:"error"` + Result struct { + Feerate float64 `json:"feerate"` + Blocks int `json:"blocks"` + } `json:"result"` +} + // GetBlock returns block with given hash. func (b *BCashRPC) GetBlock(hash string, height uint32) (*bchain.Block, error) { var err error @@ -174,6 +191,25 @@ func (b *BCashRPC) GetBlockFull(hash string) (*bchain.Block, error) { return nil, errors.New("Not implemented") } +// EstimateSmartFee returns fee estimation. +func (b *BCashRPC) EstimateSmartFee(blocks int, conservative bool) (float64, error) { + glog.V(1).Info("rpc: estimatesmartfee ", blocks) + + res := resEstimateSmartFee{} + req := cmdEstimateSmartFee{Method: "estimatesmartfee"} + req.Params.Blocks = blocks + // conservative param is omitted + err := b.Call(&req, &res) + + if err != nil { + return 0, err + } + if res.Error != nil { + return 0, res.Error + } + return res.Result.Feerate, nil +} + func isErrBlockNotFound(err *bchain.RPCError) bool { return err.Message == "Block not found" || err.Message == "Block height out of range" diff --git a/static/test.html b/static/test.html index 99097265..45c27e86 100644 --- a/static/test.html +++ b/static/test.html @@ -141,6 +141,21 @@ return socket.send({ method, params }, f); } + function estimateFee() { + var blocks = document.getElementById('estimateFeeBlocks').value.trim(); + estimateTxFee(parseInt(blocks), function (result) { + console.log('estimateFee sent successfully'); + console.log(result); + document.getElementById('estimateFeeResult').innerText = JSON.stringify(result).replace(/,/g, ", "); + }); + } + + function estimateTxFee(blocks, f) { + const method = 'estimateFee'; + const params = [blocks]; + return socket.send({ method, params }, f); + } + function getInfo() { lookupSyncStatus(function (result) { console.log('getInfo sent successfully'); @@ -324,6 +339,19 @@
+
+
+ +
+
+ +
+
+
+
+
+
+
From f05b1175bf72f44fc874a4efef3c7588a804d4e6 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Thu, 26 Apr 2018 11:02:03 +0200 Subject: [PATCH 22/37] fixed typo --- contrib/backends/bcash/debian/bcash-bch.postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/backends/bcash/debian/bcash-bch.postinst b/contrib/backends/bcash/debian/bcash-bch.postinst index 2f0885aa..bea81f65 100644 --- a/contrib/backends/bcash/debian/bcash-bch.postinst +++ b/contrib/backends/bcash/debian/bcash-bch.postinst @@ -9,7 +9,7 @@ case "$1" in useradd --system -M -U bitcoin fi - if [ "$(stat -c '%U' /data/btc/bitcoin)" != "bitcoin" ] + if [ "$(stat -c '%U' /data/bch/bitcoin)" != "bitcoin" ] then chown bitcoin:bitcoin /data/bch/bitcoin fi From 22af986121ced0ee0efb215ac1f9cd5dbedbd627 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Thu, 26 Apr 2018 19:50:22 +0200 Subject: [PATCH 23/37] Implement DisconnectBlocks in index v2 - WIP --- Gopkg.lock | 2 +- db/rocksdb.go | 134 +++++++++++++++++++++------------------------ db/rocksdb_test.go | 32 +++++++++-- 3 files changed, 89 insertions(+), 79 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index a40eb931..9bd61bee 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -190,6 +190,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "e632a1e904953397e9eae00f30a86bffab2d303232c7bac47a16e1ce663043bf" + inputs-digest = "3e3bcaeb80d40bd8073342d32dbc57e4266fba7b8dfa00fc90bc6184e03ab96f" solver-name = "gps-cdcl" solver-version = 1 diff --git a/db/rocksdb.go b/db/rocksdb.go index 64552f6f..9c220b40 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -148,14 +148,8 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f return err } - kstart, err := packAddressKey(addrID, lower) - if err != nil { - return err - } - kstop, err := packAddressKey(addrID, higher) - if err != nil { - return err - } + kstart := packAddressKey(addrID, lower) + kstop := packAddressKey(addrID, higher) it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddresses]) defer it.Close() @@ -248,14 +242,14 @@ type outpoint struct { vout int32 } -func (d *RocksDB) packBlockAddress(addrID []byte, removedUnspentTxs map[string][]outpoint) []byte { +func (d *RocksDB) packBlockAddress(addrID []byte, spentTxs map[string][]outpoint) []byte { vBuf := make([]byte, vlq.MaxLen32) vl := packVarint(int32(len(addrID)), vBuf) blockAddress := append([]byte(nil), vBuf[:vl]...) blockAddress = append(blockAddress, addrID...) - if removedUnspentTxs == nil { + if spentTxs == nil { } else { - addrUnspentTxs := removedUnspentTxs[string(addrID)] + addrUnspentTxs := spentTxs[string(addrID)] vl = packVarint(int32(len(addrUnspentTxs)), vBuf) blockAddress = append(blockAddress, vBuf[:vl]...) buf := d.packOutpoints(addrUnspentTxs) @@ -264,16 +258,12 @@ func (d *RocksDB) packBlockAddress(addrID []byte, removedUnspentTxs map[string][ return blockAddress } -func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, records map[string][]outpoint, removedUnspentTxs map[string][]outpoint) error { +func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, addresses map[string][]outpoint, spentTxs map[string][]outpoint) error { keep := d.chainParser.KeepBlockAddresses() blockAddresses := make([]byte, 0) - for addrID, outpoints := range records { + for addrID, outpoints := range addresses { baddrID := []byte(addrID) - key, err := packAddressKey(baddrID, block.Height) - if err != nil { - glog.Warningf("rocksdb: packOutputKey: %v - %d %s", err, block.Height, addrID) - continue - } + key := packAddressKey(baddrID, block.Height) switch op { case opInsert: val := d.packOutpoints(outpoints) @@ -281,7 +271,7 @@ func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Bl if keep > 0 { // collect all addresses be stored in blockaddresses // they are used in disconnect blocks - blockAddress := d.packBlockAddress(baddrID, removedUnspentTxs) + blockAddress := d.packBlockAddress(baddrID, spentTxs) blockAddresses = append(blockAddresses, blockAddress...) } case opDelete: @@ -289,7 +279,7 @@ func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Bl } } if keep > 0 && op == opInsert { - // write new block address + // write new block address and txs spent in this block key := packUint(block.Height) wb.PutCF(d.cfh[cfBlockAddresses], key, blockAddresses) // cleanup old block address @@ -384,7 +374,6 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } addresses := make(map[string][]outpoint) unspentTxs := make(map[string][]byte) - removedUnspentTxs := make(map[string][]outpoint) btxIDs := make([][]byte, len(block.Txs)) // first process all outputs, build mapping of addresses to outpoints and mappings of unspent txs to addresses for txi, tx := range block.Txs { @@ -412,7 +401,9 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } unspentTxs[string(btxID)] = txAddrs } - // locate addresses spent by this tx and add them to addresses map them in format txid ^index + // locate addresses spent by this tx and remove them from unspent addresses + // keep them so that they be stored for DisconnectBlock functionality + spentTxs := make(map[string][]outpoint) for txi, tx := range block.Txs { spendingTxid := btxIDs[txi] for i, input := range tx.Vin { @@ -428,6 +419,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo stxID := string(btxID) unspentAddrs, inThisBlock := unspentTxs[stxID] if !inThisBlock { + // else find it in previous blocks unspentAddrs, err = d.getUnspentTx(btxID) if err != nil { return err @@ -443,9 +435,14 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo glog.Warningf("rocksdb: height %d, tx %v vin %v in inputs but missing in unspentTxs", block.Height, tx.Txid, i) continue } - rut := removedUnspentTxs[string(addrID)] - rut = append(rut, outpoint{btxID, int32(input.Vout)}) - removedUnspentTxs[string(addrID)] = rut + // record what was removed from unspentTx + // skip transactions that were created in this block + saddrID := string(addrID) + if _, exists := addresses[saddrID]; !exists { + rut := spentTxs[saddrID] + rut = append(rut, outpoint{btxID, int32(input.Vout)}) + spentTxs[saddrID] = rut + } err = d.addAddrIDToRecords(op, wb, addresses, addrID, spendingTxid, int32(^i), block.Height) if err != nil { return err @@ -453,7 +450,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo unspentTxs[stxID] = unspentAddrs } } - if err := d.writeAddressRecords(wb, block, op, addresses, removedUnspentTxs); err != nil { + if err := d.writeAddressRecords(wb, block, op, addresses, spentTxs); err != nil { return err } // save unspent txs from current block @@ -545,7 +542,7 @@ func (d *RocksDB) unpackOutpoints(buf []byte) ([]outpoint, error) { txidUnpackedLen := d.chainParser.PackedTxidLen() outpoints := make([]outpoint, 0) for i := 0; i < len(buf); { - btxID := buf[i : i+txidUnpackedLen] + btxID := append([]byte(nil), buf[i:i+txidUnpackedLen]...) i += txidUnpackedLen vout, voutLen := unpackVarint(buf[i:]) i += voutLen @@ -565,7 +562,7 @@ func (d *RocksDB) unpackNOutpoints(buf []byte) ([]outpoint, int, error) { if p+txidUnpackedLen >= len(buf) { return nil, 0, errors.New("Inconsistent data in unpackNOutpoints") } - btxID := buf[p : p+txidUnpackedLen] + btxID := append([]byte(nil), buf[p:p+txidUnpackedLen]...) p += txidUnpackedLen vout, voutLen := unpackVarint(buf[p:]) p += voutLen @@ -659,7 +656,7 @@ func (d *RocksDB) getBlockAddresses(key []byte) ([][]byte, [][]outpoint, error) return d.unpackBlockAddresses(b.Data()) } -func (d *RocksDB) fullAddressesScan(lower uint32, higher uint32) ([][]byte, [][]byte, error) { +func (d *RocksDB) allAddressesScan(lower uint32, higher uint32) ([][]byte, [][]byte, error) { glog.Infof("db: doing full scan of addresses column") addrKeys := [][]byte{} addrValues := [][]byte{} @@ -708,81 +705,74 @@ func (d *RocksDB) fullAddressesScan(lower uint32, higher uint32) ([][]byte, [][] // it finds the data in blockaddresses column if available, // otherwise by doing quite slow full scan of addresses column func (d *RocksDB) DisconnectBlockRange(lower uint32, higher uint32) error { - - // TODO - it is still a mess - glog.Infof("db: disconnecting blocks %d-%d", lower, higher) addrKeys := [][]byte{} - addrValues := [][]byte{} + addrOutpoints := [][]byte{} addrUnspentOutpoints := [][]outpoint{} keep := d.chainParser.KeepBlockAddresses() var err error - doFullScan := true if keep > 0 { for height := lower; height <= higher; height++ { - key := packUint(height) - addresses, unspentOutpoints, err := d.getBlockAddresses(key) + addresses, unspentOutpoints, err := d.getBlockAddresses(packUint(height)) if err != nil { glog.Error(err) - goto GoFullScan + return err } for i, addrID := range addresses { - addrKey := append(addrID, key...) + addrKey := packAddressKey(addrID, height) val, err := d.db.GetCF(d.ro, d.cfh[cfAddresses], addrKey) if err != nil { - goto GoFullScan + glog.Error(err) + return err } addrKeys = append(addrKeys, addrKey) - addrValue := append([]byte(nil), val.Data()...) - addrUnspentOutpoints = append(addrUnspentOutpoints, unspentOutpoints[i]) + av := append([]byte(nil), val.Data()...) val.Free() - addrValues = append(addrValues, addrValue) + addrOutpoints = append(addrOutpoints, av) + addrUnspentOutpoints = append(addrUnspentOutpoints, unspentOutpoints[i]) } } - doFullScan = false - GoFullScan: - } - if doFullScan { - addrKeys, addrValues, err = d.fullAddressesScan(lower, higher) + } else { + addrKeys, addrOutpoints, err = d.allAddressesScan(lower, higher) if err != nil { return err } } + glog.Infof("rocksdb: about to disconnect %d addresses ", len(addrKeys)) wb := gorocksdb.NewWriteBatch() defer wb.Destroy() unspentTxs := make(map[string][]byte) - for i, addrKey := range addrKeys { + for addrIndex, addrKey := range addrKeys { if glog.V(2) { glog.Info("address ", hex.EncodeToString(addrKey)) } + // delete address:height from the index wb.DeleteCF(d.cfh[cfAddresses], addrKey) - outpoints := addrUnspentOutpoints[i] - addrID, height, err := unpackAddressKey(addrKey) + addrID, _, err := unpackAddressKey(addrKey) + if err != nil { + return err + } + // recreate unspentTxs, which were spent by this block (that is being disconnected) + for _, o := range addrUnspentOutpoints[addrIndex] { + stxID := string(o.btxID) + txAddrs, exists := unspentTxs[stxID] + if !exists { + txAddrs, err = d.getUnspentTx(o.btxID) + if err != nil { + return err + } + } + txAddrs = appendPackedAddrID(txAddrs, addrID, uint32(o.vout), 1) + unspentTxs[stxID] = txAddrs + } + // delete unspentTxs from this block + outpoints, err := d.unpackOutpoints(addrOutpoints[addrIndex]) if err != nil { return err } for _, o := range outpoints { - if glog.V(2) { - glog.Info("tx ", height, " ", hex.EncodeToString(o.btxID), " ", o.vout) - } - // recreate unspentTxs from inputs - if o.vout < 0 { - stxID := string(o.btxID) - txAddrs, exists := unspentTxs[stxID] - if !exists { - txAddrs, err = d.getUnspentTx(o.btxID) - if err != nil { - return err - } - } - txAddrs = appendPackedAddrID(txAddrs, addrID, uint32(^o.vout), 1) - unspentTxs[stxID] = txAddrs - } else { - // remove from cfUnspentTxs - wb.DeleteCF(d.cfh[cfUnspentTxs], o.btxID) - } - // delete cached transaction + wb.DeleteCF(d.cfh[cfUnspentTxs], o.btxID) wb.DeleteCF(d.cfh[cfTransactions], o.btxID) } } @@ -869,12 +859,12 @@ func (d *RocksDB) DeleteTx(txid string) error { // Helpers -func packAddressKey(addrID []byte, height uint32) ([]byte, error) { +func packAddressKey(addrID []byte, height uint32) []byte { bheight := packUint(height) buf := make([]byte, 0, len(addrID)+len(bheight)) buf = append(buf, addrID...) buf = append(buf, bheight...) - return buf, nil + return buf } func unpackAddressKey(key []byte) ([]byte, uint32, error) { diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index d354643d..4c99cebd 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -83,11 +83,11 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { i := 0 for it.SeekToFirst(); it.Valid(); it.Next() { if i >= len(kp) { - return errors.Errorf("Expected less rows in column %v", col) + return errors.Errorf("Expected less rows in column %v", cfNames[col]) } key := hex.EncodeToString(it.Key().Data()) if key != kp[i].Key { - return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", key, col, i, kp[i].Key) + return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", key, cfNames[col], i, kp[i].Key) } val := hex.EncodeToString(it.Value().Data()) var valOK bool @@ -97,12 +97,12 @@ func checkColumn(d *RocksDB, col int, kp []keyPair) error { valOK = kp[i].CompareFunc(val) } if !valOK { - return errors.Errorf("Incorrect value %v found in column %v row %v, expecting %v", val, col, i, kp[i].Value) + return errors.Errorf("Incorrect value %v found in column %v row %v, expecting %v", val, cfNames[col], i, kp[i].Value) } i++ } if i != len(kp) { - return errors.Errorf("Expected more rows in column %v: got %v, expected %v", col, i, len(kp)) + return errors.Errorf("Expected more rows in column %v: got %v, expected %v", cfNames[col], i, len(kp)) } return nil } @@ -337,7 +337,7 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { keyPair{"000370d6", "", func(v string) bool { return compareFuncBlockAddresses(t, v, []string{ - addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "02" + "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00", + addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "00", //+ "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00", addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "00", addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00", addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "00", @@ -422,7 +422,7 @@ func testTxCache(t *testing.T, d *RocksDB, b *bchain.Block, tx *bchain.Tx) { // 4) Test tx caching functionality // 5) Disconnect block 2 - expect error // 6) Disconnect the block 2 using blockaddresses column -// 7) Reconnect block 2 and disconnect blocks 1 and 2 using full scan +// 7) Reconnect block 2 and disconnect blocks 1 and 2 using full scan - expect error // After each step, the content of DB is examined and any difference against expected state is regarded as failure func TestRocksDB_Index_UTXO(t *testing.T) { d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}}) @@ -612,6 +612,26 @@ func Test_unpackBlockAddresses(t *testing.T) { }, }, }, + { + name: "1", + args: args{"3276A914B434EB0C1A3B7A02E8A29CC616E791EF1E0BF51F88AC003276A9143F8BA3FDA3BA7B69F5818086E12223C6DD25E3C888AC003276A914A08EAE93007F22668AB5E4A9C83C8CD1C325E3E088AC02EFFD9EF509383D536B1C8AF5BF434C8EFBF521A4F2BEFD4022BBD68694B4AC75003276A9148BDF0AA3C567AA5975C2E61321B8BEBBE7293DF688AC0200B2C06055E5E90E9C82BD4181FDE310104391A7FA4F289B1704E5D90CAA3840022EA9144A21DB08FB6882CB152E1FF06780A430740F77048702EFFD9EF509383D536B1C8AF5BF434C8EFBF521A4F2BEFD4022BBD68694B4AC75023276A914CCAAAF374E1B06CB83118453D102587B4273D09588AC003276A9148D802C045445DF49613F6A70DDD2E48526F3701F88AC00"}, + want: []string{"76a914b434eb0c1a3b7a02e8a29cc616e791ef1e0bf51f88ac", "76a9143f8ba3fda3ba7b69f5818086e12223c6dd25e3c888ac", "76a914a08eae93007f22668ab5e4a9c83c8cd1c325e3e088ac", "76a9148bdf0aa3c567aa5975c2e61321b8bebbe7293df688ac", "a9144a21db08fb6882cb152e1ff06780a430740f770487", "76a914ccaaaf374e1b06cb83118453d102587b4273d09588ac", "76a9148d802c045445df49613f6a70ddd2e48526f3701f88ac"}, + want2: [][]hexoutpoint{ + []hexoutpoint{}, + []hexoutpoint{}, + []hexoutpoint{ + hexoutpoint{"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", 0}, + }, + []hexoutpoint{ + hexoutpoint{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 1}, + }, + []hexoutpoint{ + hexoutpoint{"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", 1}, + }, + []hexoutpoint{}, + []hexoutpoint{}, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From ab2ab365a97a094f38df0e4082d80db2294f06fe Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Fri, 27 Apr 2018 10:53:33 +0200 Subject: [PATCH 24/37] added `subversion` field to result of getinfo --- bchain/coins/blockchain.go | 4 ++++ bchain/coins/btc/bitcoinrpc.go | 7 +++++++ bchain/coins/eth/ethrpc.go | 4 ++++ bchain/types.go | 1 + configs/bch-testnet.json | 3 ++- configs/bch.json | 3 ++- server/socketio.go | 1 + 7 files changed, 21 insertions(+), 2 deletions(-) diff --git a/bchain/coins/blockchain.go b/bchain/coins/blockchain.go index 39f01e6e..c90332ae 100644 --- a/bchain/coins/blockchain.go +++ b/bchain/coins/blockchain.go @@ -85,6 +85,10 @@ func (c *blockChainWithMetrics) GetNetworkName() string { return c.b.GetNetworkName() } +func (c *blockChainWithMetrics) GetSubversion() string { + return c.b.GetSubversion() +} + func (c *blockChainWithMetrics) GetBestBlockHash() (v string, err error) { defer func(s time.Time) { c.observeRPCLatency("GetBestBlockHash", s, err) }(time.Now()) return c.b.GetBestBlockHash() diff --git a/bchain/coins/btc/bitcoinrpc.go b/bchain/coins/btc/bitcoinrpc.go index af6a3617..cb894517 100644 --- a/bchain/coins/btc/bitcoinrpc.go +++ b/bchain/coins/btc/bitcoinrpc.go @@ -29,6 +29,7 @@ type BitcoinRPC struct { Mempool *bchain.UTXOMempool ParseBlocks bool mq *bchain.MQ + Subversion string } type configuration struct { @@ -38,6 +39,7 @@ type configuration struct { RPCTimeout int `json:"rpcTimeout"` Parse bool `json:"parse"` ZeroMQBinding string `json:"zeroMQBinding"` + Subversion string `json:"subversion"` } // NewBitcoinRPC returns new BitcoinRPC instance. @@ -60,6 +62,7 @@ func NewBitcoinRPC(config json.RawMessage, pushHandler func(bchain.NotificationT user: c.RPCUser, password: c.RPCPass, ParseBlocks: c.Parse, + Subversion: c.Subversion, } mq, err := bchain.NewMQ(c.ZeroMQBinding, pushHandler) @@ -119,6 +122,10 @@ func (b *BitcoinRPC) GetNetworkName() string { return b.Network } +func (b *BitcoinRPC) GetSubversion() string { + return b.Subversion +} + // getblockhash type cmdGetBlockHash struct { diff --git a/bchain/coins/eth/ethrpc.go b/bchain/coins/eth/ethrpc.go index 01137fb9..26c844bc 100644 --- a/bchain/coins/eth/ethrpc.go +++ b/bchain/coins/eth/ethrpc.go @@ -240,6 +240,10 @@ func (b *EthereumRPC) GetNetworkName() string { return b.Network } +func (b *EthereumRPC) GetSubversion() string { + return "" +} + func (b *EthereumRPC) getBestHeader() (*ethtypes.Header, error) { b.bestHeaderMu.Lock() defer b.bestHeaderMu.Unlock() diff --git a/bchain/types.go b/bchain/types.go index ddc49f4b..a2f4d283 100644 --- a/bchain/types.go +++ b/bchain/types.go @@ -123,6 +123,7 @@ type BlockChain interface { // chain info IsTestnet() bool GetNetworkName() string + GetSubversion() string // requests GetBestBlockHash() (string, error) GetBestBlockHeight() (uint32, error) diff --git a/configs/bch-testnet.json b/configs/bch-testnet.json index 606eb66a..d95bc22d 100644 --- a/configs/bch-testnet.json +++ b/configs/bch-testnet.json @@ -4,5 +4,6 @@ "rpcPass": "rpc", "rpcTimeout": 25, "parse": true, - "zeroMQBinding": "tcp://127.0.0.1:18434" + "zeroMQBinding": "tcp://127.0.0.1:18434", + "subversion": "/Bitcoin ABC:0.17.0(EB8.0; bitcore-sl)/" } diff --git a/configs/bch.json b/configs/bch.json index e124c7af..4a4a60db 100644 --- a/configs/bch.json +++ b/configs/bch.json @@ -4,5 +4,6 @@ "rpcPass": "rpc", "rpcTimeout": 25, "parse": true, - "zeroMQBinding": "tcp://127.0.0.1:8434" + "zeroMQBinding": "tcp://127.0.0.1:8434", + "subversion": "/Bitcoin ABC:0.17.0(EB8.0; bitcore-sl)/" } diff --git a/server/socketio.go b/server/socketio.go index f7cb7007..f9f6e9d4 100644 --- a/server/socketio.go +++ b/server/socketio.go @@ -599,6 +599,7 @@ func (s *SocketIoServer) getInfo() (res resultGetInfo, err error) { res.Result.Blocks = int(height) res.Result.Testnet = s.chain.IsTestnet() res.Result.Network = s.chain.GetNetworkName() + res.Result.Subversion = s.chain.GetSubversion() return } From 5eb9f613a5d86394fa45ebdb9614434bbffd1169 Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Fri, 27 Apr 2018 11:09:44 +0200 Subject: [PATCH 25/37] fixed tests --- bchain/coins/btc/bitcoinparser_test.go | 3 +++ bchain/coins/zec/zcashparser_test.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/bchain/coins/btc/bitcoinparser_test.go b/bchain/coins/btc/bitcoinparser_test.go index 35adacea..6982128f 100644 --- a/bchain/coins/btc/bitcoinparser_test.go +++ b/bchain/coins/btc/bitcoinparser_test.go @@ -135,6 +135,7 @@ var testTx1 = bchain.Tx{ "3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK", }, }, + Address: bchain.NewBaseAddress("3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK"), }, }, } @@ -165,6 +166,7 @@ var testTx2 = bchain.Tx{ "2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu", }, }, + Address: bchain.NewBaseAddress("2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu"), }, { Value: 9.20081157, @@ -175,6 +177,7 @@ var testTx2 = bchain.Tx{ "2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D", }, }, + Address: bchain.NewBaseAddress("2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D"), }, }, } diff --git a/bchain/coins/zec/zcashparser_test.go b/bchain/coins/zec/zcashparser_test.go index c7af46c3..2fdf9ed4 100644 --- a/bchain/coins/zec/zcashparser_test.go +++ b/bchain/coins/zec/zcashparser_test.go @@ -33,6 +33,7 @@ var testTx1 = bchain.Tx{ "t1Y4yL14ACHaAbjemkdpW7nYNHWnv1yQbDA", }, }, + Address: bchain.NewBaseAddress("t1Y4yL14ACHaAbjemkdpW7nYNHWnv1yQbDA"), }, }, } @@ -65,6 +66,7 @@ var testTx2 = bchain.Tx{ "t1VmHTTwpEtwvojxodN2CSQqLYi1hzY3cAq", }, }, + Address: bchain.NewBaseAddress("t1VmHTTwpEtwvojxodN2CSQqLYi1hzY3cAq"), }, { Value: .1, @@ -75,6 +77,7 @@ var testTx2 = bchain.Tx{ "t1ecxMXpphUTRQXGLXnVhJ6ucqD3DZipddg", }, }, + Address: bchain.NewBaseAddress("t1ecxMXpphUTRQXGLXnVhJ6ucqD3DZipddg"), }, }, } From def8ada73095b0c0419837086a0630a904d1d89d Mon Sep 17 00:00:00 2001 From: Jakub Matys Date: Fri, 27 Apr 2018 12:00:16 +0200 Subject: [PATCH 26/37] added tests of BCashParser and bcashAddress --- bchain/coins/bch/bcashparser_test.go | 250 +++++++++++++++++++++++++++ 1 file changed, 250 insertions(+) create mode 100644 bchain/coins/bch/bcashparser_test.go diff --git a/bchain/coins/bch/bcashparser_test.go b/bchain/coins/bch/bcashparser_test.go new file mode 100644 index 00000000..0bad2eb8 --- /dev/null +++ b/bchain/coins/bch/bcashparser_test.go @@ -0,0 +1,250 @@ +package bch + +import ( + "blockbook/bchain" + "blockbook/bchain/coins/btc" + "bytes" + "encoding/hex" + "reflect" + "testing" +) + +func TestBcashAddressEncodeAddress(t *testing.T) { + addr := bcashAddress{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", GetChainParams("main")} + got1, err := addr.EncodeAddress(bchain.DefaultAddress) + if err != nil { + t.Errorf("EncodeAddress() error = %v", err) + return + } + if got1 != "13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji" { + t.Errorf("EncodeAddress() got1 = %v, want %v", got1, "13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji") + } + got2, err := addr.EncodeAddress(bchain.BCashAddress) + if err != nil { + t.Errorf("EncodeAddress() error = %v", err) + return + } + if got2 != "bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf" { + t.Errorf("EncodeAddress() got2 = %v, want %v", got2, "bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf") + } +} + +func TestBcashAddressAreEqual(t *testing.T) { + addr := bcashAddress{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", GetChainParams("main")} + got1, err := addr.AreEqual("13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji") + if err != nil { + t.Errorf("AreEqual() error = %v", err) + return + } + if got1 != true { + t.Errorf("AreEqual() got1 = %v, want %v", got1, true) + } + got2, err := addr.AreEqual("bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf") + if err != nil { + t.Errorf("AreEqual() error = %v", err) + return + } + if got2 != true { + t.Errorf("AreEqual() got2 = %v, want %v", got2, true) + } + got3, err := addr.AreEqual("1HoKgKQh7ZNomWURmS9Tk3z8JM2MWm7S1w") + if err != nil { + t.Errorf("AreEqual() error = %v", err) + return + } + if got3 != false { + t.Errorf("AreEqual() got3 = %v, want %v", got3, false) + } + got4, err := addr.AreEqual("bitcoincash:qzuyf0gpqj7q5wfck3nyghhklju7r0k3ksmq6d0vch") + if err != nil { + t.Errorf("AreEqual() error = %v", err) + return + } + if got4 != false { + t.Errorf("AreEqual() got4 = %v, want %v", got4, false) + } +} + +func TestBcashAddressInSlice(t *testing.T) { + addr := bcashAddress{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", GetChainParams("main")} + got1, err := addr.InSlice([]string{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", "bitcoincash:qzuyf0gpqj7q5wfck3nyghhklju7r0k3ksmq6d0vch"}) + if err != nil { + t.Errorf("InSlice() error = %v", err) + return + } + if got1 != true { + t.Errorf("InSlice() got1 = %v, want %v", got1, true) + } + got2, err := addr.InSlice([]string{"1HoKgKQh7ZNomWURmS9Tk3z8JM2MWm7S1w", "bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf"}) + if err != nil { + t.Errorf("InSlice() error = %v", err) + return + } + if got2 != true { + t.Errorf("InSlice() got2 = %v, want %v", got2, true) + } + got3, err := addr.InSlice([]string{"1HoKgKQh7ZNomWURmS9Tk3z8JM2MWm7S1w", "1E6Np6dUPYpBSdLMLuwBF8sRQ3cngdaRRY"}) + if err != nil { + t.Errorf("InSlice() error = %v", err) + return + } + if got3 != false { + t.Errorf("InSlice() got3 = %v, want %v", got3, false) + } + got4, err := addr.InSlice([]string{"bitcoincash:qzuyf0gpqj7q5wfck3nyghhklju7r0k3ksmq6d0vch", "bitcoincash:qz8emmpenqgeg7et8xsz8prvhy6cqcalyyjcamt7e9"}) + if err != nil { + t.Errorf("InSlice() error = %v", err) + return + } + if got4 != false { + t.Errorf("InSlice() got4 = %v, want %v", got4, false) + } +} + +func TestAddressToOutputScript(t *testing.T) { + parser := BCashParser{&btc.BitcoinParser{Params: GetChainParams("test")}} + want, err := hex.DecodeString("76a9144fa927fd3bcf57d4e3c582c3d2eb2bd3df8df47c88ac") + if err != nil { + panic(err) + } + got1, err := parser.AddressToOutputScript("mnnAKPTSrWjgoi3uEYaQkHA1QEC5btFeBr") + if err != nil { + t.Errorf("AddressToOutputScript() error = %v", err) + return + } + if !bytes.Equal(got1, want) { + t.Errorf("AddressToOutputScript() got1 = %v, want %v", got1, want) + } + got2, err := parser.AddressToOutputScript("bchtest:qp86jfla8084048rckpv85ht90falr050s03ejaesm") + if err != nil { + t.Errorf("AddressToOutputScript() error = %v", err) + return + } + if !bytes.Equal(got2, want) { + t.Errorf("AddressToOutputScript() got2 = %v, want %v", got2, want) + } +} + +var testTx1 = bchain.Tx{ + Hex: "01000000017f9a22c9cbf54bd902400df746f138f37bcf5b4d93eb755820e974ba43ed5f42040000006a4730440220037f4ed5427cde81d55b9b6a2fd08c8a25090c2c2fff3a75c1a57625ca8a7118022076c702fe55969fa08137f71afd4851c48e31082dd3c40c919c92cdbc826758d30121029f6da5623c9f9b68a9baf9c1bc7511df88fa34c6c2f71f7c62f2f03ff48dca80feffffff019c9700000000000017a9146144d57c8aff48492c9dfb914e120b20bad72d6f8773d00700", + Blocktime: 1519053802, + Txid: "056e3d82e5ffd0e915fb9b62797d76263508c34fe3e5dbed30dd3e943930f204", + LockTime: 512115, + Vin: []bchain.Vin{ + { + ScriptSig: bchain.ScriptSig{ + Hex: "4730440220037f4ed5427cde81d55b9b6a2fd08c8a25090c2c2fff3a75c1a57625ca8a7118022076c702fe55969fa08137f71afd4851c48e31082dd3c40c919c92cdbc826758d30121029f6da5623c9f9b68a9baf9c1bc7511df88fa34c6c2f71f7c62f2f03ff48dca80", + }, + Txid: "425fed43ba74e9205875eb934d5bcf7bf338f146f70d4002d94bf5cbc9229a7f", + Vout: 4, + Sequence: 4294967294, + }, + }, + Vout: []bchain.Vout{ + { + Value: 0.00038812, + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: "a9146144d57c8aff48492c9dfb914e120b20bad72d6f87", + Addresses: []string{ + "3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK", + }, + }, + Address: &bcashAddress{"3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK", GetChainParams("main")}, + }, + }, +} +var testTxPacked1 = "0001e2408ba8d7af5401000000017f9a22c9cbf54bd902400df746f138f37bcf5b4d93eb755820e974ba43ed5f42040000006a4730440220037f4ed5427cde81d55b9b6a2fd08c8a25090c2c2fff3a75c1a57625ca8a7118022076c702fe55969fa08137f71afd4851c48e31082dd3c40c919c92cdbc826758d30121029f6da5623c9f9b68a9baf9c1bc7511df88fa34c6c2f71f7c62f2f03ff48dca80feffffff019c9700000000000017a9146144d57c8aff48492c9dfb914e120b20bad72d6f8773d00700" + +var testTx2 = bchain.Tx{ + Hex: "010000000001019d64f0c72a0d206001decbffaa722eb1044534c74eee7a5df8318e42a4323ec10000000017160014550da1f5d25a9dae2eafd6902b4194c4c6500af6ffffffff02809698000000000017a914cd668d781ece600efa4b2404dc91fd26b8b8aed8870553d7360000000017a914246655bdbd54c7e477d0ea2375e86e0db2b8f80a8702473044022076aba4ad559616905fa51d4ddd357fc1fdb428d40cb388e042cdd1da4a1b7357022011916f90c712ead9a66d5f058252efd280439ad8956a967e95d437d246710bc9012102a80a5964c5612bb769ef73147b2cf3c149bc0fd4ecb02f8097629c94ab013ffd00000000", + Blocktime: 1235678901, + Txid: "474e6795760ebe81cb4023dc227e5a0efe340e1771c89a0035276361ed733de7", + LockTime: 0, + Vin: []bchain.Vin{ + { + ScriptSig: bchain.ScriptSig{ + Hex: "160014550da1f5d25a9dae2eafd6902b4194c4c6500af6", + }, + Txid: "c13e32a4428e31f85d7aee4ec7344504b12e72aaffcbde0160200d2ac7f0649d", + Vout: 0, + Sequence: 4294967295, + }, + }, + Vout: []bchain.Vout{ + { + Value: .1, + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: "a914cd668d781ece600efa4b2404dc91fd26b8b8aed887", + Addresses: []string{ + "2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu", + }, + }, + Address: &bcashAddress{"2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu", GetChainParams("test")}, + }, + { + Value: 9.20081157, + N: 1, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: "a914246655bdbd54c7e477d0ea2375e86e0db2b8f80a87", + Addresses: []string{ + "2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D", + }, + }, + Address: &bcashAddress{"2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D", GetChainParams("test")}, + }, + }, +} +var testTxPacked2 = "0007c91a899ab7da6a010000000001019d64f0c72a0d206001decbffaa722eb1044534c74eee7a5df8318e42a4323ec10000000017160014550da1f5d25a9dae2eafd6902b4194c4c6500af6ffffffff02809698000000000017a914cd668d781ece600efa4b2404dc91fd26b8b8aed8870553d7360000000017a914246655bdbd54c7e477d0ea2375e86e0db2b8f80a8702473044022076aba4ad559616905fa51d4ddd357fc1fdb428d40cb388e042cdd1da4a1b7357022011916f90c712ead9a66d5f058252efd280439ad8956a967e95d437d246710bc9012102a80a5964c5612bb769ef73147b2cf3c149bc0fd4ecb02f8097629c94ab013ffd00000000" + +func Test_UnpackTx(t *testing.T) { + type args struct { + packedTx string + parser *BCashParser + } + tests := []struct { + name string + args args + want *bchain.Tx + want1 uint32 + wantErr bool + }{ + { + name: "btc-1", + args: args{ + packedTx: testTxPacked1, + parser: &BCashParser{&btc.BitcoinParser{Params: GetChainParams("main")}}, + }, + want: &testTx1, + want1: 123456, + wantErr: false, + }, + { + name: "testnet-1", + args: args{ + packedTx: testTxPacked2, + parser: &BCashParser{&btc.BitcoinParser{Params: GetChainParams("test")}}, + }, + want: &testTx2, + want1: 510234, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, _ := hex.DecodeString(tt.args.packedTx) + got, got1, err := tt.args.parser.UnpackTx(b) + if (err != nil) != tt.wantErr { + t.Errorf("unpackTx() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("unpackTx() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("unpackTx() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} From 0a55ca61f674bb43f0d6f084ad05846cd1c252e0 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Sun, 29 Apr 2018 00:17:30 +0200 Subject: [PATCH 27/37] Sync using indexv2 - WIP --- blockbook.go | 4 +-- db/sync.go | 93 ++++++++++++++++++++++++++++++++-------------------- 2 files changed, 59 insertions(+), 38 deletions(-) diff --git a/blockbook.go b/blockbook.go index 9df288f1..7f70cbe4 100644 --- a/blockbook.go +++ b/blockbook.go @@ -235,8 +235,8 @@ func main() { return } } else if !*synchronize { - if err = syncWorker.ConnectBlocksParallelInChunks(height, until); err != nil { - glog.Error("connectBlocksParallelInChunks ", err) + if err = syncWorker.ConnectBlocksParallel(height, until); err != nil { + glog.Error("connectBlocksParallel ", err) return } } diff --git a/db/sync.go b/db/sync.go index e0216583..4557a738 100644 --- a/db/sync.go +++ b/db/sync.go @@ -115,7 +115,7 @@ func (w *SyncWorker) resyncIndex(onNewBlock func(hash string)) error { } if remoteBestHeight-w.startHeight > uint32(w.syncChunk) { glog.Infof("resync: parallel sync of blocks %d-%d, using %d workers", w.startHeight, remoteBestHeight, w.syncWorkers) - err = w.connectBlocksParallel(w.startHeight, remoteBestHeight) + err = w.ConnectBlocksParallel(w.startHeight, remoteBestHeight) if err != nil { return err } @@ -184,7 +184,7 @@ func (w *SyncWorker) connectBlocks(onNewBlock func(hash string)) error { return nil } -func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error { +func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { type hashHeight struct { hash string height uint32 @@ -194,6 +194,11 @@ func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error { hch := make(chan hashHeight, w.syncWorkers) hchClosed := atomic.Value{} hchClosed.Store(false) + lastConnectedBlock := int(lower) - 1 + connectedCh := make([]chan struct{}, w.syncWorkers) + var connectedMux sync.Mutex + totalWaitDuration := time.Duration(0) + totalWaitCount := 0 work := func(i int) { defer wg.Done() var err error @@ -217,10 +222,52 @@ func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error { if w.dryRun { continue } + // check if the block is the next in line to be connected + // if not, wait for the previous block connect to complete + chi := int(hh.height) % w.syncWorkers + waitForBlock := false + waitDuration := time.Duration(0) + glog.Info(i, " Going to connect block ", hh.height) + connectedMux.Lock() + if uint32(lastConnectedBlock+1) != hh.height { + if connectedCh[chi] != nil { + glog.Fatal("Channel ", chi, " is not nil!") + } + connectedCh[chi] = make(chan struct{}) + waitForBlock = true + } + connectedMux.Unlock() + if waitForBlock { + start := time.Now() + glog.Info(i, " Waiting for block ", hh.height, " ", chi) + <-connectedCh[chi] + if hchClosed.Load() == true { + glog.Error("Worker ", i, " connect block error ", err, ". Exiting...") + return + } + waitDuration = time.Since(start) + connectedCh[chi] = nil + } err = w.db.ConnectBlock(block) if err != nil { glog.Error("Worker ", i, " connect block ", hh.height, " ", hh.hash, " error ", err) } + connectedMux.Lock() + if lastConnectedBlock < int(hh.height) { + lastConnectedBlock = int(hh.height) + } + chi = (chi + 1) % w.syncWorkers + if connectedCh[chi] != nil { + glog.Info(i, " closing channel ", chi) + close(connectedCh[chi]) + connectedCh[chi] = nil + } + totalWaitDuration += waitDuration + if waitDuration > 0 { + totalWaitCount++ + } + glog.Info("Connected block ", hh.height) + connectedMux.Unlock() } glog.Info("Worker ", i, " exiting...") } @@ -246,7 +293,7 @@ ConnectLoop: } hch <- hashHeight{hash, h} if h > 0 && h%1000 == 0 { - glog.Info("connecting block ", h, " ", hash) + glog.Info("connecting block ", h, " ", hash, " block wait time ", totalWaitDuration, " wait count ", totalWaitCount) } h++ } @@ -254,6 +301,13 @@ ConnectLoop: close(hch) // signal stop to workers that are in w.chain.GetBlockWithoutHeader error loop hchClosed.Store(true) + connectedMux.Lock() + for _, ch := range connectedCh { + if ch != nil { + close(ch) + } + } + connectedMux.Unlock() wg.Wait() return err } @@ -296,39 +350,6 @@ func (w *SyncWorker) connectBlockChunk(lower, higher uint32) error { return nil } -// ConnectBlocksParallelInChunks connect blocks in chunks -func (w *SyncWorker) ConnectBlocksParallelInChunks(lower, higher uint32) error { - var wg sync.WaitGroup - - work := func(i int) { - defer wg.Done() - - offset := uint32(w.syncChunk * i) - stride := uint32(w.syncChunk * w.syncWorkers) - - for low := lower + offset; low <= higher; low += stride { - high := low + uint32(w.syncChunk-1) - if high > higher { - high = higher - } - err := w.connectBlockChunk(low, high) - if err != nil { - if err == bchain.ErrBlockNotFound { - break - } - glog.Fatalf("connectBlocksParallel %d-%d %v", low, high, err) - } - } - } - for i := 0; i < w.syncWorkers; i++ { - wg.Add(1) - go work(i) - } - wg.Wait() - - return nil -} - func (w *SyncWorker) isBlockConnected(height uint32) (bool, error) { local, err := w.db.GetBlockHash(height) if err != nil { From df2a6b55511cb2ec7d05f3a9e40d6b2b43de80ad Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Sun, 29 Apr 2018 21:35:45 +0200 Subject: [PATCH 28/37] Sync using indexv2 - WIP --- db/sync.go | 151 +++++++++++++++-------------------------------------- 1 file changed, 42 insertions(+), 109 deletions(-) diff --git a/db/sync.go b/db/sync.go index 4557a738..fb681bd4 100644 --- a/db/sync.go +++ b/db/sync.go @@ -175,6 +175,9 @@ func (w *SyncWorker) connectBlocks(onNewBlock func(hash string)) error { if onNewBlock != nil { onNewBlock(res.block.Hash) } + if res.block.Height > 0 && res.block.Height%1000 == 0 { + glog.Info("connected block ", res.block.Height, " ", res.block.Hash) + } } if lastRes.block != nil { @@ -191,15 +194,27 @@ func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { } var err error var wg sync.WaitGroup + bch := make(chan *bchain.Block, w.syncWorkers) hch := make(chan hashHeight, w.syncWorkers) hchClosed := atomic.Value{} hchClosed.Store(false) - lastConnectedBlock := int(lower) - 1 - connectedCh := make([]chan struct{}, w.syncWorkers) - var connectedMux sync.Mutex + var getBlockMux sync.Mutex + getBlockCond := sync.NewCond(&getBlockMux) totalWaitDuration := time.Duration(0) - totalWaitCount := 0 - work := func(i int) { + lastConnectedBlock := int(lower) - 1 + writeBlockDone := make(chan struct{}) + writeBlock := func() { + defer close(writeBlockDone) + for b := range bch { + // glog.Info("WriteBlock ", b.Height) + err = w.db.ConnectBlock(b) + if err != nil { + glog.Error("WriteBlock worker ", b.Height, " ", b.Hash, " error ", err) + } + } + glog.Info("WriteBlock exiting...") + } + getBlock := func(i int) { defer wg.Done() var err error var block *bchain.Block @@ -222,59 +237,33 @@ func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { if w.dryRun { continue } - // check if the block is the next in line to be connected - // if not, wait for the previous block connect to complete - chi := int(hh.height) % w.syncWorkers - waitForBlock := false - waitDuration := time.Duration(0) - glog.Info(i, " Going to connect block ", hh.height) - connectedMux.Lock() - if uint32(lastConnectedBlock+1) != hh.height { - if connectedCh[chi] != nil { - glog.Fatal("Channel ", chi, " is not nil!") + start := time.Now() + getBlockMux.Lock() + for { + if uint32(lastConnectedBlock+1) == hh.height { + lastConnectedBlock = int(hh.height) + // get data to writeBlock routine + // glog.Info("Worker ", i, " have block ", hh.height, ". Sending.") + bch <- block + totalWaitDuration += time.Since(start) + getBlockCond.Broadcast() + getBlockMux.Unlock() + break } - connectedCh[chi] = make(chan struct{}) - waitForBlock = true - } - connectedMux.Unlock() - if waitForBlock { - start := time.Now() - glog.Info(i, " Waiting for block ", hh.height, " ", chi) - <-connectedCh[chi] + // glog.Info("Worker ", i, " have block ", hh.height, ". Waiting.") + getBlockCond.Wait() if hchClosed.Load() == true { - glog.Error("Worker ", i, " connect block error ", err, ". Exiting...") - return + break } - waitDuration = time.Since(start) - connectedCh[chi] = nil } - err = w.db.ConnectBlock(block) - if err != nil { - glog.Error("Worker ", i, " connect block ", hh.height, " ", hh.hash, " error ", err) - } - connectedMux.Lock() - if lastConnectedBlock < int(hh.height) { - lastConnectedBlock = int(hh.height) - } - chi = (chi + 1) % w.syncWorkers - if connectedCh[chi] != nil { - glog.Info(i, " closing channel ", chi) - close(connectedCh[chi]) - connectedCh[chi] = nil - } - totalWaitDuration += waitDuration - if waitDuration > 0 { - totalWaitCount++ - } - glog.Info("Connected block ", hh.height) - connectedMux.Unlock() } glog.Info("Worker ", i, " exiting...") } for i := 0; i < w.syncWorkers; i++ { wg.Add(1) - go work(i) + go getBlock(i) } + go writeBlock() var hash string ConnectLoop: @@ -293,7 +282,7 @@ ConnectLoop: } hch <- hashHeight{hash, h} if h > 0 && h%1000 == 0 { - glog.Info("connecting block ", h, " ", hash, " block wait time ", totalWaitDuration, " wait count ", totalWaitCount) + glog.Info("connecting block ", h, " ", hash, " wait for writeBlock ", totalWaitDuration) } h++ } @@ -301,70 +290,14 @@ ConnectLoop: close(hch) // signal stop to workers that are in w.chain.GetBlockWithoutHeader error loop hchClosed.Store(true) - connectedMux.Lock() - for _, ch := range connectedCh { - if ch != nil { - close(ch) - } - } - connectedMux.Unlock() + // first wait for the getBlock routines to finish and then close bch channel + getBlockCond.Broadcast() wg.Wait() + close(bch) + <-writeBlockDone return err } -func (w *SyncWorker) connectBlockChunk(lower, higher uint32) error { - connected, err := w.isBlockConnected(higher) - if err != nil || connected { - // if higher is over the best block, continue with lower block, otherwise return error - if err != bchain.ErrBlockNotFound { - return err - } - } - - height := lower - hash, err := w.chain.GetBlockHash(lower) - if err != nil { - return err - } - - for height <= higher { - block, err := w.chain.GetBlock(hash, height) - if err != nil { - return err - } - hash = block.Next - height = block.Height + 1 - if w.dryRun { - continue - } - err = w.db.ConnectBlock(block) - if err != nil { - return err - } - if block.Height%1000 == 0 { - glog.Info("connected block ", block.Height, " ", block.Hash) - go w.metrics.IndexDBSize.Set(float64(w.db.DatabaseSizeOnDisk())) - } - } - - return nil -} - -func (w *SyncWorker) isBlockConnected(height uint32) (bool, error) { - local, err := w.db.GetBlockHash(height) - if err != nil { - return false, err - } - remote, err := w.chain.GetBlockHash(height) - if err != nil { - return false, err - } - if local != remote { - return false, nil - } - return true, nil -} - type blockResult struct { block *bchain.Block err error From b3b8512958e739b15c748e7cf833cd6ce6a753a6 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Mon, 30 Apr 2018 14:50:19 +0200 Subject: [PATCH 29/37] Sync using indexv2 --- blockbook.go | 2 +- db/sync.go | 60 +++++++++++++++++++++++++++++----------------------- 2 files changed, 34 insertions(+), 28 deletions(-) diff --git a/blockbook.go b/blockbook.go index 7f70cbe4..4eecef01 100644 --- a/blockbook.go +++ b/blockbook.go @@ -51,7 +51,7 @@ var ( prof = flag.String("prof", "", "http server binding [address]:port of the interface to profiling data /debug/pprof/ (default no profiling)") syncChunk = flag.Int("chunk", 100, "block chunk size for processing") - syncWorkers = flag.Int("workers", 8, "number of workers to process blocks") + syncWorkers = flag.Int("workers", 8, "number of workers to process blocks (default 8)") dryRun = flag.Bool("dryrun", false, "do not index blocks, only download") httpServerBinding = flag.String("httpserver", "", "http server binding [address]:port, (default no http server)") diff --git a/db/sync.go b/db/sync.go index fb681bd4..7f25a56a 100644 --- a/db/sync.go +++ b/db/sync.go @@ -187,6 +187,7 @@ func (w *SyncWorker) connectBlocks(onNewBlock func(hash string)) error { return nil } +// ConnectBlocksParallel uses parallel goroutines to get data from blockchain daemon func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { type hashHeight struct { hash string @@ -200,21 +201,24 @@ func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { hchClosed.Store(false) var getBlockMux sync.Mutex getBlockCond := sync.NewCond(&getBlockMux) - totalWaitDuration := time.Duration(0) - lastConnectedBlock := int(lower) - 1 + lastConnectedBlock := lower - 1 writeBlockDone := make(chan struct{}) - writeBlock := func() { + writeBlockWorker := func() { defer close(writeBlockDone) + lastBlock := lower - 1 for b := range bch { - // glog.Info("WriteBlock ", b.Height) - err = w.db.ConnectBlock(b) - if err != nil { - glog.Error("WriteBlock worker ", b.Height, " ", b.Hash, " error ", err) + if lastBlock+1 != b.Height { + glog.Error("writeBlockWorker skipped block, last connected block", lastBlock, ", new block ", b.Height) } + err := w.db.ConnectBlock(b) + if err != nil { + glog.Error("writeBlockWorker ", b.Height, " ", b.Hash, " error ", err) + } + lastBlock = b.Height } glog.Info("WriteBlock exiting...") } - getBlock := func(i int) { + getBlockWorker := func(i int) { defer wg.Done() var err error var block *bchain.Block @@ -224,10 +228,10 @@ func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { if err != nil { // signal came while looping in the error loop if hchClosed.Load() == true { - glog.Error("Worker ", i, " connect block error ", err, ". Exiting...") + glog.Error("getBlockWorker ", i, " connect block error ", err, ". Exiting...") return } - glog.Error("Worker ", i, " connect block error ", err, ". Retrying...") + glog.Error("getBlockWorker ", i, " connect block error ", err, ". Retrying...") w.metrics.IndexResyncErrors.With(common.Labels{"error": err.Error()}).Inc() time.Sleep(time.Millisecond * 500) } else { @@ -237,34 +241,32 @@ func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error { if w.dryRun { continue } - start := time.Now() getBlockMux.Lock() for { - if uint32(lastConnectedBlock+1) == hh.height { - lastConnectedBlock = int(hh.height) - // get data to writeBlock routine - // glog.Info("Worker ", i, " have block ", hh.height, ". Sending.") + // we must make sure that the blocks are written to db in the correct order + if lastConnectedBlock+1 == hh.height { + // we have the right block, pass it to the writeBlockWorker + lastConnectedBlock = hh.height bch <- block - totalWaitDuration += time.Since(start) getBlockCond.Broadcast() - getBlockMux.Unlock() break } - // glog.Info("Worker ", i, " have block ", hh.height, ". Waiting.") - getBlockCond.Wait() + // break the endless loop on OS signal if hchClosed.Load() == true { break } + // wait for the time this block is top be passed to the writeBlockWorker + getBlockCond.Wait() } + getBlockMux.Unlock() } - glog.Info("Worker ", i, " exiting...") + glog.Info("getBlockWorker ", i, " exiting...") } for i := 0; i < w.syncWorkers; i++ { wg.Add(1) - go getBlock(i) + go getBlockWorker(i) } - go writeBlock() - + go writeBlockWorker() var hash string ConnectLoop: for h := lower; h <= higher; { @@ -282,16 +284,20 @@ ConnectLoop: } hch <- hashHeight{hash, h} if h > 0 && h%1000 == 0 { - glog.Info("connecting block ", h, " ", hash, " wait for writeBlock ", totalWaitDuration) + glog.Info("connecting block ", h, " ", hash) } h++ } } close(hch) - // signal stop to workers that are in w.chain.GetBlockWithoutHeader error loop + // signal stop to workers that are in a loop hchClosed.Store(true) - // first wait for the getBlock routines to finish and then close bch channel - getBlockCond.Broadcast() + // broadcast syncWorkers times to unstuck all waiting getBlockWorkers + for i := 0; i < w.syncWorkers; i++ { + getBlockCond.Broadcast() + } + // first wait for the getBlockWorkers to finish and then close bch channel + // so that the getBlockWorkers do not write to the closed channel wg.Wait() close(bch) <-writeBlockDone From 526b7534d6028f59b0f99a2a3e6f8e95a25d06cb Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Mon, 30 Apr 2018 15:07:31 +0200 Subject: [PATCH 30/37] Revert the order of transactions in socket.io interface --- server/socketio.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/server/socketio.go b/server/socketio.go index f7cb7007..08c9535b 100644 --- a/server/socketio.go +++ b/server/socketio.go @@ -249,17 +249,20 @@ func unmarshalGetAddressRequest(params []byte) (addr []string, opts addrOpts, er return } -func uniqueTxids(txids []string) []string { - uniqueTxids := make([]string, 0, len(txids)) +// bitcore returns txids from the newest to the oldest, we have to revert the order +func uniqueTxidsInReverse(txids []string) []string { + i := len(txids) + ut := make([]string, i) txidsMap := make(map[string]struct{}) for _, txid := range txids { _, e := txidsMap[txid] if !e { - uniqueTxids = append(uniqueTxids, txid) + i-- + ut[i] = txid txidsMap[txid] = struct{}{} } } - return uniqueTxids + return ut[i:] } type resultAddressTxids struct { @@ -296,7 +299,7 @@ func (s *SocketIoServer) getAddressTxids(addr []string, opts *addrOpts) (res res return res, err } } - res.Result = uniqueTxids(txids) + res.Result = uniqueTxidsInReverse(txids) return res, nil } From 880e3e8025513ad00ba5097eb421c46ac05a1eac Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Mon, 30 Apr 2018 18:54:48 +0200 Subject: [PATCH 31/37] Fix rocksdb test to handle nondeterministic order of addresses in value --- db/rocksdb_test.go | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index 4c99cebd..eda94c15 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -246,14 +246,22 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { } if err := checkColumn(d, cfUnspentTxs, []keyPair{ keyPair{ - "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", - addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00" + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", - nil, + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", "", + func(v string) bool { + return compareFuncBlockAddresses(t, v, []string{ + addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00", + addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02", + }) + }, }, keyPair{ - "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", - addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00" + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", - nil, + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", "", + func(v string) bool { + return compareFuncBlockAddresses(t, v, []string{ + addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00", + addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", + }) + }, }, }); err != nil { { @@ -324,9 +332,13 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { nil, }, keyPair{ - "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", - addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00" + addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "02", - nil, + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", "", + func(v string) bool { + return compareFuncBlockAddresses(t, v, []string{ + addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00", + addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "02", + }) + }, }, }); err != nil { { @@ -337,7 +349,7 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { keyPair{"000370d6", "", func(v string) bool { return compareFuncBlockAddresses(t, v, []string{ - addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "00", //+ "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00", + addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "00", addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "00", addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00", addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "00", From 7de872697917c1a99c2c039e347e64eb3149ee91 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Tue, 1 May 2018 22:48:58 +0200 Subject: [PATCH 32/37] Cleanup mempool usage, remove obsolete GetMempoolSpentOutput --- bchain/coins/blockchain.go | 4 ---- bchain/coins/btc/bitcoinrpc.go | 5 ----- bchain/coins/eth/ethrpc.go | 4 ---- bchain/mempool_nonutxo.go | 4 ++-- bchain/mempool_utxo.go | 6 +++--- bchain/types.go | 1 - server/https.go | 6 ------ server/socketio.go | 17 +++-------------- 8 files changed, 8 insertions(+), 39 deletions(-) diff --git a/bchain/coins/blockchain.go b/bchain/coins/blockchain.go index c90332ae..7e068e67 100644 --- a/bchain/coins/blockchain.go +++ b/bchain/coins/blockchain.go @@ -149,10 +149,6 @@ func (c *blockChainWithMetrics) GetMempoolTransactions(address string) (v []stri return c.b.GetMempoolTransactions(address) } -func (c *blockChainWithMetrics) GetMempoolSpentOutput(outputTxid string, vout uint32) (v string) { - return c.b.GetMempoolSpentOutput(outputTxid, vout) -} - func (c *blockChainWithMetrics) GetMempoolEntry(txid string) (v *bchain.MempoolEntry, err error) { defer func(s time.Time) { c.observeRPCLatency("GetMempoolEntry", s, err) }(time.Now()) return c.b.GetMempoolEntry(txid) diff --git a/bchain/coins/btc/bitcoinrpc.go b/bchain/coins/btc/bitcoinrpc.go index cb894517..686efcf2 100644 --- a/bchain/coins/btc/bitcoinrpc.go +++ b/bchain/coins/btc/bitcoinrpc.go @@ -575,11 +575,6 @@ func (b *BitcoinRPC) GetMempoolTransactions(address string) ([]string, error) { return b.Mempool.GetTransactions(address) } -// GetMempoolSpentOutput returns transaction in mempool which spends given outpoint -func (b *BitcoinRPC) GetMempoolSpentOutput(outputTxid string, vout uint32) string { - return b.Mempool.GetSpentOutput(outputTxid, vout) -} - // EstimateSmartFee returns fee estimation. func (b *BitcoinRPC) EstimateSmartFee(blocks int, conservative bool) (float64, error) { glog.V(1).Info("rpc: estimatesmartfee ", blocks) diff --git a/bchain/coins/eth/ethrpc.go b/bchain/coins/eth/ethrpc.go index 26c844bc..9193a9d8 100644 --- a/bchain/coins/eth/ethrpc.go +++ b/bchain/coins/eth/ethrpc.go @@ -486,10 +486,6 @@ func (b *EthereumRPC) GetMempoolTransactions(address string) ([]string, error) { return b.Mempool.GetTransactions(address) } -func (b *EthereumRPC) GetMempoolSpentOutput(outputTxid string, vout uint32) string { - return "" -} - func (b *EthereumRPC) GetMempoolEntry(txid string) (*bchain.MempoolEntry, error) { return nil, errors.New("GetMempoolEntry: not implemented") } diff --git a/bchain/mempool_nonutxo.go b/bchain/mempool_nonutxo.go index d30149f6..7a18ca4a 100644 --- a/bchain/mempool_nonutxo.go +++ b/bchain/mempool_nonutxo.go @@ -22,13 +22,13 @@ func NewNonUTXOMempool(chain BlockChain) *NonUTXOMempool { // GetTransactions returns slice of mempool transactions for given address func (m *NonUTXOMempool) GetTransactions(address string) ([]string, error) { - m.mux.Lock() - defer m.mux.Unlock() parser := m.chain.GetChainParser() addrID, err := parser.GetAddrIDFromAddress(address) if err != nil { return nil, err } + m.mux.Lock() + defer m.mux.Unlock() outpoints := m.addrIDToTx[string(addrID)] txs := make([]string, 0, len(outpoints)) for _, o := range outpoints { diff --git a/bchain/mempool_utxo.go b/bchain/mempool_utxo.go index fbd73724..d653e0a9 100644 --- a/bchain/mempool_utxo.go +++ b/bchain/mempool_utxo.go @@ -7,7 +7,7 @@ import ( "github.com/golang/glog" ) -// addrIndex and outpoint are used also in nonutxo mempool +// addrIndex and outpoint are used also in non utxo mempool type addrIndex struct { addrID string n int32 @@ -39,13 +39,13 @@ func NewUTXOMempool(chain BlockChain) *UTXOMempool { // GetTransactions returns slice of mempool transactions for given address func (m *UTXOMempool) GetTransactions(address string) ([]string, error) { - m.mux.Lock() - defer m.mux.Unlock() parser := m.chain.GetChainParser() addrID, err := parser.GetAddrIDFromAddress(address) if err != nil { return nil, err } + m.mux.Lock() + defer m.mux.Unlock() outpoints := m.addrIDToTx[string(addrID)] txs := make([]string, 0, len(outpoints)+len(outpoints)/2) for _, o := range outpoints { diff --git a/bchain/types.go b/bchain/types.go index 73b2b56b..c5a88630 100644 --- a/bchain/types.go +++ b/bchain/types.go @@ -141,7 +141,6 @@ type BlockChain interface { // mempool ResyncMempool(onNewTxAddr func(txid string, addr string)) error GetMempoolTransactions(address string) ([]string, error) - GetMempoolSpentOutput(outputTxid string, vout uint32) string GetMempoolEntry(txid string) (*MempoolEntry, error) // parser GetChainParser() BlockChainParser diff --git a/server/https.go b/server/https.go index cf073e9e..62232f68 100644 --- a/server/https.go +++ b/server/https.go @@ -196,12 +196,6 @@ func (s *HTTPServer) transactions(w http.ResponseWriter, r *http.Request) { txList := transactionList{} err = s.db.GetTransactions(address, lower, higher, func(txid string, vout uint32, isOutput bool) error { txList.Txid = append(txList.Txid, txid) - if isOutput { - input := s.chain.GetMempoolSpentOutput(txid, vout) - if input != "" { - txList.Txid = append(txList.Txid, txid) - } - } return nil }) if err != nil { diff --git a/server/socketio.go b/server/socketio.go index 98465a2f..a654122c 100644 --- a/server/socketio.go +++ b/server/socketio.go @@ -127,7 +127,6 @@ func (s *SocketIoServer) txRedirect(w http.ResponseWriter, r *http.Request) { type addrOpts struct { Start int `json:"start"` End int `json:"end"` - QueryMempol bool `json:"queryMempol"` QueryMempoolOnly bool `json:"queryMempoolOnly"` From int `json:"from"` To int `json:"to"` @@ -276,27 +275,17 @@ func (s *SocketIoServer) getAddressTxids(addr []string, opts *addrOpts) (res res if !opts.QueryMempoolOnly { err = s.db.GetTransactions(address, lower, higher, func(txid string, vout uint32, isOutput bool) error { txids = append(txids, txid) - if isOutput && opts.QueryMempol { - input := s.chain.GetMempoolSpentOutput(txid, vout) - if input != "" { - txids = append(txids, txid) - } - } return nil }) if err != nil { return res, err } - } - if opts.QueryMempoolOnly || opts.QueryMempol { - mtxids, err := s.chain.GetMempoolTransactions(address) + } else { + m, err := s.chain.GetMempoolTransactions(address) if err != nil { return res, err } - txids = append(txids, mtxids...) - } - if err != nil { - return res, err + txids = append(txids, m...) } } res.Result = uniqueTxidsInReverse(txids) From 3f4af2095146b38b0bb723da7d44a25f602cb3b3 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Tue, 1 May 2018 22:56:50 +0200 Subject: [PATCH 33/37] Recover from ocasional panic caused by zmq during shutdown --- bchain/mq.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/bchain/mq.go b/bchain/mq.go index d53d9994..299fcc19 100644 --- a/bchain/mq.go +++ b/bchain/mq.go @@ -62,13 +62,19 @@ func NewMQ(binding string, callback func(NotificationType)) (*MQ, error) { } func (mq *MQ) run(callback func(NotificationType)) { + defer func() { + if r := recover(); r != nil { + glog.Error("MQ loop recovered from ", r) + } + mq.isRunning = false + close(mq.finished) + glog.Info("MQ loop terminated") + }() mq.isRunning = true for { msg, err := mq.socket.RecvMessageBytes(0) if err != nil { if zmq.AsErrno(err) == zmq.Errno(zmq.ETERM) || err.Error() == "Socket is closed" { - close(mq.finished) - glog.Info("MQ loop terminated") break } glog.Error("MQ RecvMessageBytes error ", err, ", ", zmq.AsErrno(err)) @@ -96,7 +102,6 @@ func (mq *MQ) run(callback func(NotificationType)) { callback(nt) } } - mq.isRunning = false } // Shutdown stops listening to the ZeroMQ and closes the connection From 7281fb27b14a3c9770cbbf5271a453d856768dbc Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Tue, 1 May 2018 23:43:36 +0200 Subject: [PATCH 34/37] Implement UTXO mempool indexv2 --- bchain/mempool_nonutxo.go | 5 ++-- bchain/mempool_utxo.go | 60 ++++++++++++++++++--------------------- 2 files changed, 30 insertions(+), 35 deletions(-) diff --git a/bchain/mempool_nonutxo.go b/bchain/mempool_nonutxo.go index 7a18ca4a..946dd89b 100644 --- a/bchain/mempool_nonutxo.go +++ b/bchain/mempool_nonutxo.go @@ -55,8 +55,9 @@ func (m *NonUTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) erro return err } parser := m.chain.GetChainParser() - newTxToInputOutput := make(map[string][]addrIndex, len(m.txToInputOutput)+1) - newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+1) + // allocate slightly larger capacity of the maps + newTxToInputOutput := make(map[string][]addrIndex, len(m.txToInputOutput)+5) + newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+5) for _, txid := range txs { io, exists := m.txToInputOutput[txid] if !exists { diff --git a/bchain/mempool_utxo.go b/bchain/mempool_utxo.go index d653e0a9..0ad1836f 100644 --- a/bchain/mempool_utxo.go +++ b/bchain/mempool_utxo.go @@ -18,21 +18,15 @@ type outpoint struct { vout int32 } -type inputOutput struct { - outputs []addrIndex - inputs []outpoint -} - // UTXOMempool is mempool handle. type UTXOMempool struct { chain BlockChain mux sync.Mutex - txToInputOutput map[string]inputOutput + txToInputOutput map[string][]addrIndex addrIDToTx map[string][]outpoint - inputs map[outpoint]string } -// NewMempool creates new mempool handler. +// NewUTXOMempool creates new mempool handler. func NewUTXOMempool(chain BlockChain) *UTXOMempool { return &UTXOMempool{chain: chain} } @@ -47,29 +41,18 @@ func (m *UTXOMempool) GetTransactions(address string) ([]string, error) { m.mux.Lock() defer m.mux.Unlock() outpoints := m.addrIDToTx[string(addrID)] - txs := make([]string, 0, len(outpoints)+len(outpoints)/2) + txs := make([]string, 0, len(outpoints)) for _, o := range outpoints { txs = append(txs, o.txid) - i := m.inputs[o] - if i != "" { - txs = append(txs, i) - } } return txs, nil } -// GetSpentOutput returns transaction which spends given outpoint -func (m *UTXOMempool) GetSpentOutput(outputTxid string, vout uint32) string { - o := outpoint{txid: outputTxid, vout: int32(vout)} - return m.inputs[o] -} - -func (m *UTXOMempool) updateMappings(newTxToInputOutput map[string]inputOutput, newAddrIDToTx map[string][]outpoint, newInputs map[outpoint]string) { +func (m *UTXOMempool) updateMappings(newTxToInputOutput map[string][]addrIndex, newAddrIDToTx map[string][]outpoint) { m.mux.Lock() defer m.mux.Unlock() m.txToInputOutput = newTxToInputOutput m.addrIDToTx = newAddrIDToTx - m.inputs = newInputs } // Resync gets mempool transactions and maps outputs to transactions. @@ -83,9 +66,9 @@ func (m *UTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) error { return err } parser := m.chain.GetChainParser() - newTxToInputOutput := make(map[string]inputOutput, len(m.txToInputOutput)+1) - newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+1) - newInputs := make(map[outpoint]string, len(m.inputs)+1) + // allocate slightly larger capacity of the maps + newTxToInputOutput := make(map[string][]addrIndex, len(m.txToInputOutput)+5) + newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+5) for _, txid := range txs { io, exists := m.txToInputOutput[txid] if !exists { @@ -94,7 +77,7 @@ func (m *UTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) error { glog.Error("cannot get transaction ", txid, ": ", err) continue } - io.outputs = make([]addrIndex, 0, len(tx.Vout)) + io = make([]addrIndex, 0, len(tx.Vout)+len(tx.Vin)) for _, output := range tx.Vout { addrID, err := parser.GetAddrIDFromVout(&output) if err != nil { @@ -102,29 +85,40 @@ func (m *UTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) error { continue } if len(addrID) > 0 { - io.outputs = append(io.outputs, addrIndex{string(addrID), int32(output.N)}) + io = append(io, addrIndex{string(addrID), int32(output.N)}) } if onNewTxAddr != nil && len(output.ScriptPubKey.Addresses) == 1 { onNewTxAddr(tx.Txid, output.ScriptPubKey.Addresses[0]) } } - io.inputs = make([]outpoint, 0, len(tx.Vin)) for _, input := range tx.Vin { if input.Coinbase != "" { continue } - io.inputs = append(io.inputs, outpoint{input.Txid, int32(input.Vout)}) + // TODO - possibly get from DB unspenttxs - however some output txs can be in mempool only + itx, err := m.chain.GetTransaction(input.Txid) + if err != nil { + glog.Error("cannot get transaction ", input.Txid, ": ", err) + continue + } + if int(input.Vout) >= len(itx.Vout) { + glog.Error("Vout len in transaction ", input.Txid, " ", len(itx.Vout), " input.Vout=", input.Vout) + continue + } + addrID, err := parser.GetAddrIDFromVout(&itx.Vout[input.Vout]) + if err != nil { + glog.Error("error in addrID in ", input.Txid, " ", input.Vout, ": ", err) + continue + } + io = append(io, addrIndex{string(addrID), int32(^input.Vout)}) } } newTxToInputOutput[txid] = io - for _, si := range io.outputs { + for _, si := range io { newAddrIDToTx[si.addrID] = append(newAddrIDToTx[si.addrID], outpoint{txid, si.n}) } - for _, i := range io.inputs { - newInputs[i] = txid - } } - m.updateMappings(newTxToInputOutput, newAddrIDToTx, newInputs) + m.updateMappings(newTxToInputOutput, newAddrIDToTx) glog.Info("Mempool: resync finished in ", time.Since(start), ", ", len(m.txToInputOutput), " transactions in mempool") return nil } From b2f24e82f9e267463e87e79e002a8c4349cd8139 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Wed, 2 May 2018 16:51:14 +0200 Subject: [PATCH 35/37] Fix bitcore incompatible lookupDetailedTransaction in test.html --- static/test.html | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/static/test.html b/static/test.html index 45c27e86..c911fe34 100644 --- a/static/test.html +++ b/static/test.html @@ -182,12 +182,18 @@ function lookupDetailedTransaction(hash, format, f) { const method = 'getDetailedTransaction'; - const params = [ + const af = parseInt(format) + var params = [ hash, - { - addressFormat: parseInt(format), - }, ]; + if (af !== 0) { + params = [ + hash, + { + addressFormat: af, + }, + ]; + } return socket.send({ method, params }, f); } @@ -437,4 +443,4 @@ document.getElementById('serverAddress').value = window.location.protocol.replace("http", "ws") + "//" + window.location.host; - + \ No newline at end of file From 83edc33b3ff7934ac67e356b334d064d68a10b88 Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Thu, 3 May 2018 01:03:20 +0200 Subject: [PATCH 36/37] Fix processing of transactions in ConnectBlock --- db/rocksdb.go | 21 ++++++++++++--------- db/rocksdb_test.go | 41 ++++++++++++++++++++++++++++++++++++++++- server/socketio.go | 6 +++--- 3 files changed, 55 insertions(+), 13 deletions(-) diff --git a/db/rocksdb.go b/db/rocksdb.go index 9c220b40..12432834 100644 --- a/db/rocksdb.go +++ b/db/rocksdb.go @@ -374,6 +374,7 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } addresses := make(map[string][]outpoint) unspentTxs := make(map[string][]byte) + thisBlockTxs := make(map[string]struct{}) btxIDs := make([][]byte, len(block.Txs)) // first process all outputs, build mapping of addresses to outpoints and mappings of unspent txs to addresses for txi, tx := range block.Txs { @@ -399,7 +400,9 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } txAddrs = appendPackedAddrID(txAddrs, addrID, output.N, len(tx.Vout)-i) } - unspentTxs[string(btxID)] = txAddrs + stxID := string(btxID) + unspentTxs[stxID] = txAddrs + thisBlockTxs[stxID] = struct{}{} } // locate addresses spent by this tx and remove them from unspent addresses // keep them so that they be stored for DisconnectBlock functionality @@ -415,30 +418,30 @@ func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Blo } return err } - // try to find the tx in current block + // find the tx in current block or already processed stxID := string(btxID) - unspentAddrs, inThisBlock := unspentTxs[stxID] - if !inThisBlock { + unspentAddrs, exists := unspentTxs[stxID] + if !exists { // else find it in previous blocks unspentAddrs, err = d.getUnspentTx(btxID) if err != nil { return err } if unspentAddrs == nil { - glog.Warningf("rocksdb: height %d, tx %v in inputs but missing in unspentTxs", block.Height, tx.Txid) + glog.Warningf("rocksdb: height %d, tx %v, input tx %v vin %v %v missing in unspentTxs", block.Height, tx.Txid, input.Txid, input.Vout, i) continue } } var addrID []byte addrID, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout) if addrID == nil { - glog.Warningf("rocksdb: height %d, tx %v vin %v in inputs but missing in unspentTxs", block.Height, tx.Txid, i) + glog.Warningf("rocksdb: height %d, tx %v, input tx %v vin %v %v not found in unspentAddrs", block.Height, tx.Txid, input.Txid, input.Vout, i) continue } - // record what was removed from unspentTx + // record what was spent in this tx // skip transactions that were created in this block - saddrID := string(addrID) - if _, exists := addresses[saddrID]; !exists { + if _, exists := thisBlockTxs[stxID]; !exists { + saddrID := string(addrID) rut := spentTxs[saddrID] rut = append(rut, outpoint{btxID, int32(input.Vout)}) spentTxs[saddrID] = rut diff --git a/db/rocksdb_test.go b/db/rocksdb_test.go index eda94c15..ef927d36 100644 --- a/db/rocksdb_test.go +++ b/db/rocksdb_test.go @@ -148,6 +148,12 @@ func getTestUTXOBlock1(t *testing.T, d *RocksDB) *bchain.Block { Hex: addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d), }, }, + bchain.Vout{ + N: 2, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d), + }, + }, }, Blocktime: 22549300001, Time: 22549300001, @@ -195,10 +201,12 @@ func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block { bchain.Tx{ Txid: "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", Vin: []bchain.Vin{ + // spending an output in the same block bchain.Vin{ Txid: "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", Vout: 0, }, + // spending an output in the previous block bchain.Vin{ Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", Vout: 1, @@ -221,6 +229,26 @@ func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block { Blocktime: 22549400001, Time: 22549400001, }, + // transaction from the same address in the previous block + bchain.Tx{ + Txid: "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07", + Vin: []bchain.Vin{ + bchain.Vin{ + Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", + Vout: 2, + }, + }, + Vout: []bchain.Vout{ + bchain.Vout{ + N: 0, + ScriptPubKey: bchain.ScriptPubKey{ + Hex: addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d), + }, + }, + }, + Blocktime: 22549400002, + Time: 22549400002, + }, }, } } @@ -239,6 +267,7 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", nil}, keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", nil}, keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", nil}, + keyPair{addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "04", nil}, }); err != nil { { t.Fatal(err) @@ -260,6 +289,7 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { return compareFuncBlockAddresses(t, v, []string{ addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00", addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02", + addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "04", }) }, }, @@ -273,7 +303,7 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { if noBlockAddresses { blockAddressesKp = []keyPair{} } else { - // the values in cfBlockAddresses have random order, must use CompareFunc + // the values in cfBlockAddresses are in random order, must use CompareFunc blockAddressesKp = []keyPair{ keyPair{"000370d5", "", func(v string) bool { @@ -282,6 +312,7 @@ func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) { addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "00", addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00", addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "00", + addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "00", }) }, }, @@ -308,12 +339,14 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", nil}, keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", nil}, keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", nil}, + keyPair{addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "04", nil}, keyPair{addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00" + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "01", nil}, keyPair{addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "02", nil}, keyPair{addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "00", nil}, keyPair{addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "02", nil}, keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "01", nil}, keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "03", nil}, + keyPair{addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "000370d6", "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07" + "00" + "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07" + "01", nil}, keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "03", nil}, }); err != nil { { @@ -340,6 +373,11 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { }) }, }, + keyPair{ + "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07", + addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "00", + nil, + }, }); err != nil { { t.Fatal(err) @@ -356,6 +394,7 @@ func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) { addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02" + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", + addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "04", }) }, }, diff --git a/server/socketio.go b/server/socketio.go index a654122c..89bfdc7f 100644 --- a/server/socketio.go +++ b/server/socketio.go @@ -670,9 +670,9 @@ func (s *SocketIoServer) getDetailedTransaction(txid string, opts txOpts) (res r } for _, vout := range tx.Vout { ao := txOutputs{ - Satoshis: int64(vout.Value * 1E8), - Script: &vout.ScriptPubKey.Hex, - SpentIndex: int(vout.N), + Satoshis: int64(vout.Value * 1E8), + Script: &vout.ScriptPubKey.Hex, + // SpentIndex: int(vout.N), } if vout.Address != nil { a, err := vout.Address.EncodeAddress(opts.AddressFormat) From 7b79ac97e8e2f66e468f98bb77e1317c963938db Mon Sep 17 00:00:00 2001 From: Martin Boehm Date: Thu, 3 May 2018 14:49:19 +0200 Subject: [PATCH 37/37] Fix usage message --- blockbook.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockbook.go b/blockbook.go index 4eecef01..1aedb3c0 100644 --- a/blockbook.go +++ b/blockbook.go @@ -51,7 +51,7 @@ var ( prof = flag.String("prof", "", "http server binding [address]:port of the interface to profiling data /debug/pprof/ (default no profiling)") syncChunk = flag.Int("chunk", 100, "block chunk size for processing") - syncWorkers = flag.Int("workers", 8, "number of workers to process blocks (default 8)") + syncWorkers = flag.Int("workers", 8, "number of workers to process blocks") dryRun = flag.Bool("dryrun", false, "do not index blocks, only download") httpServerBinding = flag.String("httpserver", "", "http server binding [address]:port, (default no http server)") @@ -62,7 +62,7 @@ var ( explorerURL = flag.String("explorer", "", "address of blockchain explorer") - coin = flag.String("coin", "btc", "coin name (default btc)") + coin = flag.String("coin", "btc", "coin name") ) var (