merged latest nomp code
This commit is contained in:
commit
67d036a2ce
267
README.md
267
README.md
@ -5,6 +5,10 @@ This portal is an extremely efficient, highly scalable, all-in-one, easy to setu
|
||||
entirely in Node.js. It contains a stratum poolserver; reward/payment/share processor; and a (*not yet completed*)
|
||||
responsive user-friendly front-end website featuring mining instructions, in-depth live statistics, and an admin center.
|
||||
|
||||
#### Production Usage Notice
|
||||
This is beta software. All of the following are things that can change and break an existing NOMP setup: functionality of any feature, structure of configuration files and structure of redis data. If you use this software in production then *DO NOT* pull new code straight into production usage because it can and often will break your setup and require you to tweak things like config files or redis data.
|
||||
|
||||
|
||||
#### Table of Contents
|
||||
* [Features](#features)
|
||||
* [Attack Mitigation](#attack-mitigation)
|
||||
@ -148,6 +152,8 @@ a good pool operator. For starters be sure to read:
|
||||
|
||||
#### 1) Downloading & Installing
|
||||
|
||||
[**Redis security warning**](http://redis.io/topics/security): be sure firewall access to redis - an easy way is to include `bind 127.0.0.1` in your `redis.conf` file
|
||||
|
||||
Clone the repository and run `npm update` for all the dependencies to be installed:
|
||||
|
||||
```bash
|
||||
@ -167,6 +173,10 @@ Explanation for each field:
|
||||
/* Specifies the level of log output verbosity. Anything more severy than the level specified
|
||||
will also be logged. */
|
||||
"logLevel": "debug", //or "warning", "error"
|
||||
|
||||
/* By default NOMP logs to console and gives pretty colors. If you direct that output to a
|
||||
log file then disable this feature to avoid nasty characters in your log file. */
|
||||
"logColors": true,
|
||||
|
||||
|
||||
/* The NOMP CLI (command-line interface) will listen for commands on this port. For example,
|
||||
@ -181,10 +191,54 @@ Explanation for each field:
|
||||
"enabled": true,
|
||||
"forks": "auto"
|
||||
},
|
||||
|
||||
/* Pool config file will inherit these default values if they are not set. */
|
||||
"defaultPoolConfigs": {
|
||||
|
||||
/* Poll RPC daemons for new blocks every this many milliseconds. */
|
||||
"blockRefreshInterval": 1000,
|
||||
|
||||
/* If no new blocks are available for this many seconds update and rebroadcast job. */
|
||||
"jobRebroadcastTimeout": 55,
|
||||
|
||||
/* Disconnect workers that haven't submitted shares for this many seconds. */
|
||||
"connectionTimeout": 600,
|
||||
|
||||
/* (For MPOS mode) Store the block hashes for shares that aren't block candidates. */
|
||||
"emitInvalidBlockHashes": false,
|
||||
|
||||
/* This option will only authenticate miners using an address or mining key. */
|
||||
"validateWorkerUsername": true,
|
||||
|
||||
/* Enable for client IP addresses to be detected when using a load balancer with TCP
|
||||
proxy protocol enabled, such as HAProxy with 'send-proxy' param:
|
||||
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt */
|
||||
"tcpProxyProtocol": false,
|
||||
|
||||
/* If under low-diff share attack we can ban their IP to reduce system/network load. If
|
||||
running behind HAProxy be sure to enable 'tcpProxyProtocol', otherwise you'll end up
|
||||
banning your own IP address (and therefore all workers). */
|
||||
"banning": {
|
||||
"enabled": true,
|
||||
"time": 600, //How many seconds to ban worker for
|
||||
"invalidPercent": 50, //What percent of invalid shares triggers ban
|
||||
"checkThreshold": 500, //Perform check when this many shares have been submitted
|
||||
"purgeInterval": 300 //Every this many seconds clear out the list of old bans
|
||||
},
|
||||
|
||||
/* Used for storing share and block submission data and payment processing. */
|
||||
"redis": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 6379
|
||||
}
|
||||
},
|
||||
|
||||
/* This is the front-end. Its not finished. When it is finished, this comment will say so. */
|
||||
"website": {
|
||||
"enabled": true,
|
||||
/* If you are using a reverse-proxy like nginx to display the website then set this to
|
||||
127.0.0.1 to not expose the port. */
|
||||
"host": "0.0.0.0",
|
||||
"port": 80,
|
||||
/* Used for displaying stratum connection data on the Getting Started page. */
|
||||
"stratumHost": "cryppit.com",
|
||||
@ -284,13 +338,22 @@ Here is an example of the required fields:
|
||||
{
|
||||
"name": "Litecoin",
|
||||
"symbol": "ltc",
|
||||
"algorithm": "scrypt", //or "sha256", "scrypt-jane", "scrypt-n", "quark", "x11"
|
||||
"txMessages": false, //or true (not required, defaults to false)
|
||||
"mposDiffMultiplier": 256, //only for x11 coins in mpos mode, set to 256 (optional)
|
||||
"algorithm": "scrypt",
|
||||
|
||||
/* Magic value only required for setting up p2p block notifications. It is found in the daemon
|
||||
source code as the pchMessageStart variable.
|
||||
For example, litecoin mainnet magic: http://git.io/Bi8YFw
|
||||
And for litecoin testnet magic: http://git.io/NXBYJA */
|
||||
"peerMagic": "fbc0b6db" //optional
|
||||
"peerMagicTestnet": "fcc1b7dc" //optional
|
||||
|
||||
//"txMessages": false, //options - defaults to false
|
||||
|
||||
//"mposDiffMultiplier": 256, //options - only for x11 coins in mpos mode
|
||||
}
|
||||
````
|
||||
|
||||
For additional documentation how to configure coins *(especially important for scrypt-n and scrypt-jane coins)*
|
||||
For additional documentation how to configure coins and their different algorithms
|
||||
see [these instructions](//github.com/zone117x/node-stratum-pool#module-usage).
|
||||
|
||||
|
||||
@ -307,131 +370,41 @@ Description of options:
|
||||
|
||||
"address": "mi4iBXbBsydtcc5yFmsff2zCFVX4XG7qJc", //Address to where block rewards are given
|
||||
|
||||
"blockRefreshInterval": 1000, //How often to poll RPC daemons for new blocks, in milliseconds
|
||||
/* Block rewards go to the configured pool wallet address to later be paid out to miners,
|
||||
except for a percentage that can go to, for examples, pool operator(s) as pool fees or
|
||||
or to donations address. Addresses or hashed public keys can be used. Here is an example
|
||||
of rewards going to the main pool op, a pool co-owner, and NOMP donation. */
|
||||
"rewardRecipients": {
|
||||
"n37vuNFkXfk15uFnGoVyHZ6PYQxppD3QqK": 1.5, //1.5% goes to pool op
|
||||
"mirj3LtZxbSTharhtXvotqtJXUY7ki5qfx": 0.5, //0.5% goes to a pool co-owner
|
||||
|
||||
/* How many milliseconds should have passed before new block transactions will trigger a new
|
||||
job broadcast. */
|
||||
"txRefreshInterval": 20000,
|
||||
|
||||
/* Some miner apps will consider the pool dead/offline if it doesn't receive anything new jobs
|
||||
for around a minute, so every time we broadcast jobs, set a timeout to rebroadcast
|
||||
in this many seconds unless we find a new job. Set to zero or remove to disable this. */
|
||||
"jobRebroadcastTimeout": 55,
|
||||
|
||||
//instanceId: 37, //Recommend not using this because a crypto-random one will be generated
|
||||
|
||||
/* Some attackers will create thousands of workers that use up all available socket connections,
|
||||
usually the workers are zombies and don't submit shares after connecting. This feature
|
||||
detects those and disconnects them. */
|
||||
"connectionTimeout": 600, //Remove workers that haven't been in contact for this many seconds
|
||||
|
||||
/* Sometimes you want the block hashes even for shares that aren't block candidates. */
|
||||
"emitInvalidBlockHashes": false,
|
||||
|
||||
/* We use proper maximum algorithm difficulties found in the coin daemon source code. Most
|
||||
miners/pools that deal with scrypt use a guesstimated one that is about 5.86% off from the
|
||||
actual one. So here we can set a tolerable threshold for if a share is slightly too low
|
||||
due to mining apps using incorrect max diffs and this pool using correct max diffs. */
|
||||
"shareVariancePercent": 10,
|
||||
|
||||
/* Enable for client IP addresses to be detected when using a load balancer with TCP proxy
|
||||
protocol enabled, such as HAProxy with 'send-proxy' param:
|
||||
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt */
|
||||
"tcpProxyProtocol": false,
|
||||
|
||||
|
||||
/* This determines what to do with submitted shares (and stratum worker authentication).
|
||||
You have two options:
|
||||
1) Enable internal and disable mpos = this portal to handle all share payments.
|
||||
2) Enable mpos and disable internal = shares will be inserted into MySQL database
|
||||
for MPOS to process. */
|
||||
"shareProcessing": {
|
||||
|
||||
"internal": {
|
||||
"enabled": true,
|
||||
|
||||
/* When workers connect, to receive payments, their address must be used as the worker
|
||||
name. If this option is true, on worker authentication, their address will be
|
||||
verified via a validateaddress API call to the daemon. Miners with invalid addresses
|
||||
will be rejected. */
|
||||
"validateWorkerAddress": true,
|
||||
|
||||
/* Every this many seconds get submitted blocks from redis, use daemon RPC to check
|
||||
their confirmation status, if confirmed then get shares from redis that contributed
|
||||
to block and send out payments. */
|
||||
"paymentInterval": 30,
|
||||
|
||||
/* Minimum number of coins that a miner must earn before sending payment. Typically,
|
||||
a higher minimum means less transactions fees (you profit more) but miners see
|
||||
payments less frequently (they dislike). Opposite for a lower minimum payment. */
|
||||
"minimumPayment": 0.001,
|
||||
|
||||
/* Minimum number of coins to keep in pool wallet. It is recommended to deposit at
|
||||
at least this many coins into the pool wallet when first starting the pool. */
|
||||
"minimumReserve": 10,
|
||||
|
||||
/* (2% default) What percent fee your pool takes from the block reward. */
|
||||
"feePercent": 0.02,
|
||||
|
||||
/* Name of the daemon account to use when moving coin profit within daemon wallet. */
|
||||
"feeCollectAccount": "feesCollected",
|
||||
|
||||
/* Your address that receives pool revenue from fees. */
|
||||
"feeReceiveAddress": "LZz44iyF4zLCXJTU8RxztyyJZBntdS6fvv",
|
||||
|
||||
/* How many coins from fee revenue must accumulate on top of the
|
||||
minimum reserve amount in order to trigger withdrawal to fee address. The higher
|
||||
this threshold, the less of your profit goes to transactions fees. */
|
||||
"feeWithdrawalThreshold": 5,
|
||||
|
||||
/* This daemon is used to send out payments. It MUST be for the daemon that owns the
|
||||
configured 'address' that receives the block rewards, otherwise the daemon will not
|
||||
be able to confirm blocks or send out payments. */
|
||||
"daemon": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 19332,
|
||||
"user": "litecoinrpc",
|
||||
"password": "testnet"
|
||||
},
|
||||
|
||||
/* Redis database used for storing share and block submission data. */
|
||||
"redis": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 6379
|
||||
}
|
||||
},
|
||||
|
||||
/* Enabled mpos and shares will be inserted into share table in a MySQL database. You may
|
||||
also want to use the "emitInvalidBlockHashes" option below if you require it. */
|
||||
"mpos": {
|
||||
"enabled": false,
|
||||
"host": "127.0.0.1", //MySQL db host
|
||||
"port": 3306, //MySQL db port
|
||||
"user": "me", //MySQL db user
|
||||
"password": "mypass", //MySQL db password
|
||||
"database": "ltc", //MySQL db database name
|
||||
|
||||
/* Unregistered workers can automatically be registered (added to database) on stratum
|
||||
worker authentication if this is true. */
|
||||
"autoCreateWorker": false,
|
||||
|
||||
/* For when miner's authenticate: set to "password" for both worker name and password to
|
||||
be checked for in the database, set to "worker" for only work name to be checked, or
|
||||
don't use this option (set to "none") for no auth checks */
|
||||
"stratumAuth": "password"
|
||||
}
|
||||
/* 0.1% donation to NOMP. This pubkey can accept any type of coin, please leave this in
|
||||
your config to help support NOMP development. */
|
||||
"22851477d63a085dbc2398c8430af1c09e7343f6": 0.1
|
||||
},
|
||||
|
||||
/* If a worker is submitting a high threshold of invalid shares we can temporarily ban their IP
|
||||
to reduce system/network load. Also useful to fight against flooding attacks. If running
|
||||
behind something like HAProxy be sure to enable 'tcpProxyProtocol', otherwise you'll end up
|
||||
banning your own IP address (and therefore all workers). */
|
||||
"banning": {
|
||||
"paymentProcessing": {
|
||||
"enabled": true,
|
||||
"time": 600, //How many seconds to ban worker for
|
||||
"invalidPercent": 50, //What percent of invalid shares triggers ban
|
||||
"checkThreshold": 500, //Check invalid percent when this many shares have been submitted
|
||||
"purgeInterval": 300 //Every this many seconds clear out the list of old bans
|
||||
|
||||
/* Every this many seconds get submitted blocks from redis, use daemon RPC to check
|
||||
their confirmation status, if confirmed then get shares from redis that contributed
|
||||
to block and send out payments. */
|
||||
"paymentInterval": 30,
|
||||
|
||||
/* Minimum number of coins that a miner must earn before sending payment. Typically,
|
||||
a higher minimum means less transactions fees (you profit more) but miners see
|
||||
payments less frequently (they dislike). Opposite for a lower minimum payment. */
|
||||
"minimumPayment": 0.01,
|
||||
|
||||
/* This daemon is used to send out payments. It MUST be for the daemon that owns the
|
||||
configured 'address' that receives the block rewards, otherwise the daemon will not
|
||||
be able to confirm blocks or send out payments. */
|
||||
"daemon": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 19332,
|
||||
"user": "testuser",
|
||||
"password": "testpass"
|
||||
}
|
||||
},
|
||||
|
||||
/* Each pool can have as many ports for your miners to connect to as you wish. Each port can
|
||||
@ -456,28 +429,20 @@ Description of options:
|
||||
}
|
||||
},
|
||||
|
||||
/* For redundancy, recommended to have at least two daemon instances running in case one
|
||||
drops out-of-sync or offline. */
|
||||
/* More than one daemon instances can be setup in case one drops out-of-sync or dies. */
|
||||
"daemons": [
|
||||
{ //Main daemon instance
|
||||
"host": "127.0.0.1",
|
||||
"port": 19332,
|
||||
"user": "litecoinrpc",
|
||||
"password": "testnet"
|
||||
},
|
||||
{ //Backup daemon instance
|
||||
"host": "127.0.0.1",
|
||||
"port": 19344,
|
||||
"user": "litecoinrpc",
|
||||
"password": "testnet"
|
||||
"user": "testuser",
|
||||
"password": "testpass"
|
||||
}
|
||||
],
|
||||
|
||||
|
||||
/* This allows the pool to connect to the daemon as a node peer to receive block updates.
|
||||
It may be the most efficient way to get block updates (faster than polling, less
|
||||
intensive than blocknotify script). It requires additional setup: the 'magic' field must
|
||||
be exact (extracted from the coin source code). */
|
||||
intensive than blocknotify script). It requires the additional field "peerMagic" in
|
||||
the coin config. */
|
||||
"p2p": {
|
||||
"enabled": false,
|
||||
|
||||
@ -490,13 +455,26 @@ Description of options:
|
||||
/* If your coin daemon is new enough (i.e. not a shitcoin) then it will support a p2p
|
||||
feature that prevents the daemon from spamming our peer node with unnecessary
|
||||
transaction data. Assume its supported but if you have problems try disabling it. */
|
||||
"disableTransactions": true,
|
||||
"disableTransactions": true
|
||||
},
|
||||
|
||||
/* Enabled this mode and shares will be inserted into in a MySQL database. You may also want
|
||||
to use the "emitInvalidBlockHashes" option below if you require it. The config options
|
||||
"redis" and "paymentProcessing" will be ignored/unused if this is enabled. */
|
||||
"mposMode": {
|
||||
"enabled": false,
|
||||
"host": "127.0.0.1", //MySQL db host
|
||||
"port": 3306, //MySQL db port
|
||||
"user": "me", //MySQL db user
|
||||
"password": "mypass", //MySQL db password
|
||||
"database": "ltc", //MySQL db database name
|
||||
|
||||
/* Magic value is different for main/testnet and for each coin. It is found in the daemon
|
||||
source code as the pchMessageStart variable.
|
||||
For example, litecoin mainnet magic: http://git.io/Bi8YFw
|
||||
And for litecoin testnet magic: http://git.io/NXBYJA */
|
||||
"magic": "fcc1b7dc"
|
||||
/* Checks for valid password in database when miners connect. */
|
||||
"checkPassword": true,
|
||||
|
||||
/* Unregistered workers can automatically be registered (added to database) on stratum
|
||||
worker authentication if this is true. */
|
||||
"autoCreateWorker": false
|
||||
}
|
||||
}
|
||||
|
||||
@ -572,6 +550,7 @@ Credits
|
||||
* [Alex Petrov / sysmanalex](https://github.com/sysmanalex) - contributed the pure C block notify script
|
||||
* [svirusxxx](//github.com/svirusxxx) - sponsored development of MPOS mode
|
||||
* [icecube45](//github.com/icecube45) - helping out with the repo wiki
|
||||
* [Fcases](//github.com/Fcases) - ordered me a pizza <3
|
||||
* Those that contributed to [node-stratum-pool](//github.com/zone117x/node-stratum-pool#credits)
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
{
|
||||
"name": "Battlecoin",
|
||||
"symbol": "BCX",
|
||||
"algorithm": "sha256"
|
||||
"algorithm": "sha256",
|
||||
"peerMagic": "03e803e4",
|
||||
"peerMagicTestnet": "cdf2c0ef"
|
||||
}
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
{
|
||||
"name": "Dogecoin",
|
||||
"symbol": "DOGE",
|
||||
"algorithm": "scrypt"
|
||||
}
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "c0c0c0c0",
|
||||
"peerMagicTestnet": "fcc1b7dc"
|
||||
}
|
||||
|
||||
5
coins/einsteinium.json
Normal file
5
coins/einsteinium.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"name": "Einsteinium",
|
||||
"symbol": "EMC2",
|
||||
"algorithm": "scrypt"
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "Fastcoin",
|
||||
"symbol": "FST",
|
||||
"algorithm": "scrypt"
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "fbc0b6db"
|
||||
}
|
||||
5
coins/feathercoin.json
Normal file
5
coins/feathercoin.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"name": "Feathercoin",
|
||||
"symbol": "FTC",
|
||||
"algorithm": "scrypt"
|
||||
}
|
||||
7
coins/globalcoin.json
Normal file
7
coins/globalcoin.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"name": "Globalcoin",
|
||||
"symbol": "GLC",
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "fcd9b7dd",
|
||||
"peerMagicTestnet": "fbc0b8db"
|
||||
}
|
||||
@ -1,5 +1,7 @@
|
||||
{
|
||||
"name": "GlobalDenomination",
|
||||
"symbol": "GDN",
|
||||
"algorithm": "x11"
|
||||
"algorithm": "x11",
|
||||
"peerMagic": "fec3b9de",
|
||||
"peerMagicTestnet": "fec4bade"
|
||||
}
|
||||
|
||||
7
coins/grandcoin.json
Normal file
7
coins/grandcoin.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"name": "Grandcoin",
|
||||
"symbol": "GDC",
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "fdc1a5db",
|
||||
"txMessages": true
|
||||
}
|
||||
5
coins/groestlcoin.json
Normal file
5
coins/groestlcoin.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"name": "GroestlCoin",
|
||||
"symbol": "GRS",
|
||||
"algorithm": "groestl"
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "Hirocoin",
|
||||
"symbol": "hic",
|
||||
"symbol": "HIRO",
|
||||
"algorithm": "x11",
|
||||
"mposDiffMultiplier": 256
|
||||
}
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
{
|
||||
"name": "Litecoin",
|
||||
"symbol": "LTC",
|
||||
"algorithm": "scrypt"
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "fbc0b6db",
|
||||
"peerMagicTestnet": "fcc1b7dc"
|
||||
}
|
||||
@ -1,5 +1,7 @@
|
||||
{
|
||||
"name": "Lottocoin",
|
||||
"symbol": "LOT",
|
||||
"algorithm": "scrypt"
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "a5fdb6c1",
|
||||
"peerMagicTestnet": "fdc3b6f1"
|
||||
}
|
||||
5
coins/myriadcoin.json
Normal file
5
coins/myriadcoin.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"name": "Myriadcoin",
|
||||
"symbol": "MYR",
|
||||
"algorithm": "scrypt"
|
||||
}
|
||||
7
coins/saffroncoin.json
Normal file
7
coins/saffroncoin.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"name": "saffroncoin",
|
||||
"symbol": "SFR",
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "cf0567ea",
|
||||
"peerMagicTestnet": "01f555a4"
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"name": "Starcoin",
|
||||
"symbol": "STR",
|
||||
"algorithm": "scrypt"
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "e4e8effd"
|
||||
}
|
||||
|
||||
6
coins/ultimatecoin.json
Normal file
6
coins/ultimatecoin.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "Ultimatecoin",
|
||||
"symbol": "ULT",
|
||||
"algorithm": "scrypt",
|
||||
"peerMagic": "f9f7c0e8"
|
||||
}
|
||||
@ -1,5 +1,7 @@
|
||||
{
|
||||
"name": "Vertcoin",
|
||||
"symbol": "VTC",
|
||||
"algorithm": "scrypt-n"
|
||||
"algorithm": "scrypt-n",
|
||||
"peerMagic": "fabfb5da",
|
||||
"peerMagicTestnet": "76657274"
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
{
|
||||
"logLevel": "debug",
|
||||
"logColors": true,
|
||||
|
||||
"cliPort": 17117,
|
||||
|
||||
@ -8,8 +9,29 @@
|
||||
"forks": "auto"
|
||||
},
|
||||
|
||||
"defaultPoolConfigs": {
|
||||
"blockRefreshInterval": 1000,
|
||||
"jobRebroadcastTimeout": 55,
|
||||
"connectionTimeout": 600,
|
||||
"emitInvalidBlockHashes": false,
|
||||
"validateWorkerUsername": true,
|
||||
"tcpProxyProtocol": false,
|
||||
"banning": {
|
||||
"enabled": true,
|
||||
"time": 600,
|
||||
"invalidPercent": 50,
|
||||
"checkThreshold": 500,
|
||||
"purgeInterval": 300
|
||||
},
|
||||
"redis": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 6379
|
||||
}
|
||||
},
|
||||
|
||||
"website": {
|
||||
"enabled": true,
|
||||
"host": "0.0.0.0",
|
||||
"port": 80,
|
||||
"stratumHost": "cryppit.com",
|
||||
"stats": {
|
||||
@ -79,12 +101,5 @@
|
||||
"usePoloniex": true,
|
||||
"useCryptsy": true,
|
||||
"useMintpal": true
|
||||
},
|
||||
|
||||
"redisBlockNotifyListener": {
|
||||
"enabled": false,
|
||||
"redisPort": 6379,
|
||||
"redisHost": "hostname",
|
||||
"psubscribeKey": "newblocks:*"
|
||||
}
|
||||
}
|
||||
|
||||
59
init.js
59
init.js
@ -4,9 +4,10 @@ var os = require('os');
|
||||
var cluster = require('cluster');
|
||||
|
||||
var async = require('async');
|
||||
var extend = require('extend');
|
||||
|
||||
var PoolLogger = require('./libs/logUtil.js');
|
||||
var CliListener = require('./libs/cliListener.js');
|
||||
var RedisBlocknotifyListener = require('./libs/redisblocknotifyListener.js');
|
||||
var PoolWorker = require('./libs/poolWorker.js');
|
||||
var PaymentProcessor = require('./libs/paymentProcessor.js');
|
||||
var Website = require('./libs/website.js');
|
||||
@ -26,7 +27,8 @@ var poolConfigs;
|
||||
|
||||
|
||||
var logger = new PoolLogger({
|
||||
logLevel: portalConfig.logLevel
|
||||
logLevel: portalConfig.logLevel,
|
||||
logColors: portalConfig.logColors
|
||||
});
|
||||
|
||||
|
||||
@ -49,6 +51,15 @@ try{
|
||||
if (cluster.isMaster)
|
||||
logger.warning('POSIX', 'Connection Limit', '(Safe to ignore) Must be ran as root to increase resource limits');
|
||||
}
|
||||
finally {
|
||||
// Find out which user used sudo through the environment variable
|
||||
var uid = parseInt(process.env.SUDO_UID);
|
||||
// Set our server's uid to that user
|
||||
if (uid) {
|
||||
process.setuid(uid);
|
||||
logger.debug('POSIX', 'Connection Limit', 'Raised to 100K concurrent connections, now running as non-root user: ' + process.getuid());
|
||||
}
|
||||
}
|
||||
}
|
||||
catch(e){
|
||||
if (cluster.isMaster)
|
||||
@ -56,7 +67,6 @@ catch(e){
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (cluster.isWorker){
|
||||
|
||||
switch(process.env.workerType){
|
||||
@ -132,6 +142,7 @@ var buildPoolConfigs = function(){
|
||||
|
||||
var coinProfile = JSON.parse(JSON.minify(fs.readFileSync(coinFilePath, {encoding: 'utf8'})));
|
||||
poolOptions.coin = coinProfile;
|
||||
poolOptions.coin.name = poolOptions.coin.name.toLowerCase();
|
||||
|
||||
if (poolOptions.coin.name in configs){
|
||||
|
||||
@ -144,6 +155,19 @@ var buildPoolConfigs = function(){
|
||||
return;
|
||||
}
|
||||
|
||||
for (var option in portalConfig.defaultPoolConfigs){
|
||||
if (!(option in poolOptions)){
|
||||
var toCloneOption = portalConfig.defaultPoolConfigs[option];
|
||||
var clonedOption = {};
|
||||
if (toCloneOption.constructor === Object)
|
||||
extend(true, clonedOption, toCloneOption);
|
||||
else
|
||||
clonedOption = toCloneOption;
|
||||
poolOptions[option] = clonedOption;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
configs[poolOptions.coin.name] = poolOptions;
|
||||
|
||||
if (!(coinProfile.algorithm in algos)){
|
||||
@ -161,13 +185,6 @@ var spawnPoolWorkers = function(){
|
||||
|
||||
Object.keys(poolConfigs).forEach(function(coin){
|
||||
var p = poolConfigs[coin];
|
||||
var internalEnabled = p.shareProcessing && p.shareProcessing.internal && p.shareProcessing.internal.enabled;
|
||||
var mposEnabled = p.shareProcessing && p.shareProcessing.mpos && p.shareProcessing.mpos.enabled;
|
||||
|
||||
if (!internalEnabled && !mposEnabled){
|
||||
logger.error('Master', coin, 'Share processing is not configured so a pool cannot be started for this coin.');
|
||||
delete poolConfigs[coin];
|
||||
}
|
||||
|
||||
if (!Array.isArray(p.daemons) || p.daemons.length < 1){
|
||||
logger.error('Master', coin, 'No daemons configured so a pool cannot be started for this coin.');
|
||||
@ -340,31 +357,13 @@ var processCoinSwitchCommand = function(params, options, reply){
|
||||
};
|
||||
|
||||
|
||||
var startRedisBlockListener = function(){
|
||||
//block notify options
|
||||
//setup block notify here and use IPC to tell appropriate pools
|
||||
|
||||
if (!portalConfig.redisBlockNotifyListener.enabled) return;
|
||||
|
||||
var listener = new RedisBlocknotifyListener(portalConfig.redisBlockNotifyListener);
|
||||
listener.on('log', function(text){
|
||||
logger.debug('Master', 'blocknotify', text);
|
||||
}).on('hash', function (message) {
|
||||
var ipcMessage = {type:'blocknotify', coin: message.coin, hash: message.hash};
|
||||
Object.keys(cluster.workers).forEach(function(id) {
|
||||
cluster.workers[id].send(ipcMessage);
|
||||
});
|
||||
});
|
||||
listener.start();
|
||||
};
|
||||
|
||||
|
||||
var startPaymentProcessor = function(){
|
||||
|
||||
var enabledForAny = false;
|
||||
for (var pool in poolConfigs){
|
||||
var p = poolConfigs[pool];
|
||||
var enabled = p.enabled && p.shareProcessing && p.shareProcessing.internal && p.shareProcessing.internal.enabled;
|
||||
var enabled = p.enabled && p.paymentProcessing && p.paymentProcessing.enabled;
|
||||
if (enabled){
|
||||
enabledForAny = true;
|
||||
break;
|
||||
@ -435,8 +434,6 @@ var startProfitSwitch = function(){
|
||||
|
||||
startPaymentProcessor();
|
||||
|
||||
startRedisBlockListener();
|
||||
|
||||
startWebsite();
|
||||
|
||||
startProfitSwitch();
|
||||
|
||||
@ -26,6 +26,9 @@ var listener = module.exports = function listener(port){
|
||||
});
|
||||
c.on('end', function () {
|
||||
|
||||
});
|
||||
c.on('error', function () {
|
||||
|
||||
});
|
||||
}
|
||||
catch(e){
|
||||
|
||||
@ -30,6 +30,7 @@ var PoolLogger = function (configuration) {
|
||||
|
||||
|
||||
var logLevelInt = severityValues[configuration.logLevel];
|
||||
var logColors = configuration.logColors;
|
||||
|
||||
|
||||
|
||||
@ -45,16 +46,28 @@ var PoolLogger = function (configuration) {
|
||||
}
|
||||
|
||||
var entryDesc = dateFormat(new Date(), 'yyyy-mm-dd HH:MM:ss') + ' [' + system + ']\t';
|
||||
entryDesc = severityToColor(severity, entryDesc);
|
||||
if (logColors) {
|
||||
entryDesc = severityToColor(severity, entryDesc);
|
||||
|
||||
var logString =
|
||||
entryDesc +
|
||||
('[' + component + '] ').italic;
|
||||
var logString =
|
||||
entryDesc +
|
||||
('[' + component + '] ').italic;
|
||||
|
||||
if (subcat)
|
||||
logString += ('(' + subcat + ') ').bold.grey
|
||||
if (subcat)
|
||||
logString += ('(' + subcat + ') ').bold.grey;
|
||||
|
||||
logString += text.grey;
|
||||
logString += text.grey;
|
||||
}
|
||||
else {
|
||||
var logString =
|
||||
entryDesc +
|
||||
'[' + component + '] ';
|
||||
|
||||
if (subcat)
|
||||
logString += '(' + subcat + ') ';
|
||||
|
||||
logString += text;
|
||||
}
|
||||
|
||||
console.log(logString);
|
||||
|
||||
|
||||
@ -2,31 +2,30 @@ var mysql = require('mysql');
|
||||
var cluster = require('cluster');
|
||||
module.exports = function(logger, poolConfig){
|
||||
|
||||
var mposConfig = poolConfig.shareProcessing.mpos;
|
||||
var mposConfig = poolConfig.mposMode;
|
||||
var coin = poolConfig.coin.name;
|
||||
|
||||
var connection;
|
||||
var connection = mysql.createPool({
|
||||
host: mposConfig.host,
|
||||
port: mposConfig.port,
|
||||
user: mposConfig.user,
|
||||
password: mposConfig.password,
|
||||
database: mposConfig.database
|
||||
});
|
||||
|
||||
|
||||
var logIdentify = 'MySQL';
|
||||
var logComponent = coin;
|
||||
|
||||
function connect(){
|
||||
|
||||
connection = mysql.createPool({
|
||||
host: mposConfig.host,
|
||||
port: mposConfig.port,
|
||||
user: mposConfig.user,
|
||||
password: mposConfig.password,
|
||||
database: mposConfig.database
|
||||
});
|
||||
|
||||
|
||||
}
|
||||
connect();
|
||||
|
||||
this.handleAuth = function(workerName, password, authCallback){
|
||||
|
||||
|
||||
if (poolConfig.validateWorkerUsername !== true && mposConfig.autoCreateWorker !== true){
|
||||
authCallback(true);
|
||||
return;
|
||||
}
|
||||
|
||||
connection.query(
|
||||
'SELECT password FROM pool_worker WHERE username = LOWER(?)',
|
||||
[workerName.toLowerCase()],
|
||||
@ -65,16 +64,15 @@ module.exports = function(logger, poolConfig){
|
||||
}
|
||||
}
|
||||
);
|
||||
}else{
|
||||
}
|
||||
else{
|
||||
authCallback(false);
|
||||
}
|
||||
}
|
||||
else if (mposConfig.stratumAuth === 'worker')
|
||||
authCallback(true);
|
||||
else if (result[0].password === password)
|
||||
authCallback(true)
|
||||
else
|
||||
else if (mposConfig.checkPassword && result[0].password !== password)
|
||||
authCallback(false);
|
||||
else
|
||||
authCallback(true);
|
||||
}
|
||||
);
|
||||
|
||||
|
||||
@ -13,9 +13,8 @@ module.exports = function(logger){
|
||||
|
||||
Object.keys(poolConfigs).forEach(function(coin) {
|
||||
var poolOptions = poolConfigs[coin];
|
||||
if (poolOptions.shareProcessing &&
|
||||
poolOptions.shareProcessing.internal &&
|
||||
poolOptions.shareProcessing.internal.enabled)
|
||||
if (poolOptions.paymentProcessing &&
|
||||
poolOptions.paymentProcessing.enabled)
|
||||
enabledPools.push(coin);
|
||||
});
|
||||
|
||||
@ -27,14 +26,14 @@ module.exports = function(logger){
|
||||
coins.forEach(function(coin){
|
||||
|
||||
var poolOptions = poolConfigs[coin];
|
||||
var processingConfig = poolOptions.shareProcessing.internal;
|
||||
var processingConfig = poolOptions.paymentProcessing;
|
||||
var logSystem = 'Payments';
|
||||
var logComponent = coin;
|
||||
|
||||
logger.debug(logSystem, logComponent, 'Payment processing setup to run every '
|
||||
+ processingConfig.paymentInterval + ' second(s) with daemon ('
|
||||
+ processingConfig.daemon.user + '@' + processingConfig.daemon.host + ':' + processingConfig.daemon.port
|
||||
+ ') and redis (' + processingConfig.redis.host + ':' + processingConfig.redis.port + ')');
|
||||
+ ') and redis (' + poolOptions.redis.host + ':' + poolOptions.redis.port + ')');
|
||||
|
||||
});
|
||||
});
|
||||
@ -45,68 +44,66 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
|
||||
|
||||
var coin = poolOptions.coin.name;
|
||||
var processingConfig = poolOptions.shareProcessing.internal;
|
||||
var processingConfig = poolOptions.paymentProcessing;
|
||||
|
||||
var logSystem = 'Payments';
|
||||
var logComponent = coin;
|
||||
|
||||
var processingPayments = true;
|
||||
var daemon = new Stratum.daemon.interface([processingConfig.daemon], function(severity, message){
|
||||
logger[severity](logSystem, logComponent, message);
|
||||
});
|
||||
var redisClient = redis.createClient(poolOptions.redis.port, poolOptions.redis.host);
|
||||
|
||||
var daemon;
|
||||
var redisClient;
|
||||
var magnitude;
|
||||
var minPaymentSatoshis;
|
||||
var coinPrecision;
|
||||
|
||||
var paymentInterval;
|
||||
|
||||
async.parallel([
|
||||
|
||||
function(callback){
|
||||
daemon = new Stratum.daemon.interface([processingConfig.daemon]);
|
||||
daemon.once('online', function(){
|
||||
daemon.cmd('validateaddress', [poolOptions.address], function(result){
|
||||
if (!result[0].response || !result[0].response.ismine){
|
||||
logger.error(logSystem, logComponent,
|
||||
daemon.cmd('validateaddress', [poolOptions.address], function(result) {
|
||||
if (result.error){
|
||||
logger.error(logSystem, logComponent, 'Error with payment processing daemon ' + JSON.stringify(result.error));
|
||||
callback(true);
|
||||
}
|
||||
else if (!result.response || !result.response.ismine) {
|
||||
logger.error(logSystem, logComponent,
|
||||
'Daemon does not own pool address - payment processing can not be done with this daemon, '
|
||||
+ JSON.stringify(result[0].response));
|
||||
return;
|
||||
}
|
||||
+ JSON.stringify(result.response));
|
||||
callback(true);
|
||||
}
|
||||
else{
|
||||
callback()
|
||||
});
|
||||
}).once('connectionFailed', function(error){
|
||||
logger.error(logSystem, logComponent, 'Failed to connect to daemon for payment processing: config ' +
|
||||
JSON.stringify(processingConfig.daemon) + ', error: ' +
|
||||
JSON.stringify(error));
|
||||
callback('Error connecting to deamon');
|
||||
}).on('error', function(error){
|
||||
logger.error(logSystem, logComponent, 'Daemon error ' + JSON.stringify(error));
|
||||
}).init();
|
||||
}
|
||||
}, true);
|
||||
},
|
||||
function(callback){
|
||||
|
||||
redisClient = redis.createClient(processingConfig.redis.port, processingConfig.redis.host);
|
||||
redisClient.on('ready', function(){
|
||||
if (callback) {
|
||||
callback();
|
||||
callback = null;
|
||||
daemon.cmd('getbalance', [], function(result){
|
||||
if (result.error){
|
||||
callback(true);
|
||||
return;
|
||||
}
|
||||
logger.debug(logSystem, logComponent, 'Connected to redis at '
|
||||
+ processingConfig.redis.host + ':' + processingConfig.redis.port + ' for payment processing');
|
||||
}).on('end', function(){
|
||||
logger.error(logSystem, logComponent, 'Connection to redis database as been ended');
|
||||
}).once('error', function(){
|
||||
if (callback) {
|
||||
logger.error(logSystem, logComponent, 'Failed to connect to redis at '
|
||||
+ processingConfig.redis.host + ':' + processingConfig.redis.port + ' for payment processing');
|
||||
callback('Error connecting to redis');
|
||||
callback = null;
|
||||
try {
|
||||
var d = result.data.split('result":')[1].split(',')[0].split('.')[1];
|
||||
magnitude = parseInt('10' + new Array(d.length).join('0'));
|
||||
minPaymentSatoshis = parseInt(processingConfig.minimumPayment * magnitude);
|
||||
coinPrecision = magnitude.toString().length - 1;
|
||||
callback();
|
||||
}
|
||||
catch(e){
|
||||
logger.error(logSystem, logComponent, 'Error detecting number of satoshis in a coin, cannot do payment processing. Tried parsing: ' + result.data);
|
||||
callback(true);
|
||||
}
|
||||
});
|
||||
|
||||
}, true, true);
|
||||
}
|
||||
], function(err){
|
||||
if (err){
|
||||
setupFinished(false);
|
||||
return;
|
||||
}
|
||||
setInterval(function(){
|
||||
paymentInterval = setInterval(function(){
|
||||
try {
|
||||
processPayments();
|
||||
} catch(e){
|
||||
@ -118,97 +115,78 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
});
|
||||
|
||||
|
||||
/* Call redis to check if previous sendmany and/or redis cleanout commands completed successfully.
|
||||
If sendmany worked fine but redis commands failed you HAVE TO run redis commands again
|
||||
(manually) to prevent double payments. If sendmany failed too you can safely delete
|
||||
coin + '_finalRedisCommands' string from redis to let pool calculate payments again. */
|
||||
function checkPreviousPaymentsStatus(callback) {
|
||||
redisClient.get(coin + '_finalRedisCommands', function(error, reply) {
|
||||
if (error){
|
||||
callback('Could not get finalRedisCommands - ' + JSON.stringify(error));
|
||||
return;
|
||||
}
|
||||
if (reply) {
|
||||
callback('Payments stopped because of the critical error - failed commands saved in '
|
||||
+ coin + '_finalRedisCommands redis set:\n' + reply);
|
||||
return;
|
||||
} else {
|
||||
/* There was no error in previous sendmany and/or redis cleanout commands
|
||||
so we can safely continue */
|
||||
processingPayments = false;
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
/* Number.toFixed gives us the decimal places we want, but as a string. parseFloat turns it back into number
|
||||
we don't care about trailing zeros in this case. */
|
||||
var toPrecision = function(value, precision){
|
||||
return parseFloat(value.toFixed(precision));
|
||||
var satoshisToCoins = function(satoshis){
|
||||
return parseFloat((satoshis / magnitude).toFixed(coinPrecision));
|
||||
};
|
||||
|
||||
var coinsToSatoshies = function(coins){
|
||||
return coins * magnitude;
|
||||
};
|
||||
|
||||
/* Deal with numbers in smallest possible units (satoshis) as much as possible. This greatly helps with accuracy
|
||||
when rounding and whatnot. When we are storing numbers for only humans to see, store in whole coin units. */
|
||||
|
||||
var processPayments = function(){
|
||||
|
||||
|
||||
var startPaymentProcess = Date.now();
|
||||
|
||||
async.waterfall([
|
||||
var timeSpentRPC = 0;
|
||||
var timeSpentRedis = 0;
|
||||
|
||||
function(callback) {
|
||||
if (processingPayments) {
|
||||
checkPreviousPaymentsStatus(function(error){
|
||||
if (error) {
|
||||
logger.error(logSystem, logComponent, error);
|
||||
callback('Check finished - previous payments processing error');
|
||||
return;
|
||||
}
|
||||
callback();
|
||||
});
|
||||
return;
|
||||
}
|
||||
callback();
|
||||
},
|
||||
var startTimeRedis;
|
||||
var startTimeRPC;
|
||||
|
||||
var startRedisTimer = function(){ startTimeRedis = Date.now() };
|
||||
var endRedisTimer = function(){ timeSpentRedis += Date.now() - startTimeRedis };
|
||||
|
||||
var startRPCTimer = function(){ startTimeRPC = Date.now(); };
|
||||
var endRPCTimer = function(){ timeSpentRPC += Date.now() - startTimeRedis };
|
||||
|
||||
async.waterfall([
|
||||
|
||||
/* Call redis to get an array of rounds - which are coinbase transactions and block heights from submitted
|
||||
blocks. */
|
||||
function(callback){
|
||||
|
||||
redisClient.smembers(coin + '_blocksPending', function(error, results){
|
||||
startRedisTimer();
|
||||
redisClient.multi([
|
||||
['hgetall', coin + ':balances'],
|
||||
['smembers', coin + ':blocksPending']
|
||||
]).exec(function(error, results){
|
||||
endRedisTimer();
|
||||
|
||||
if (error){
|
||||
logger.error(logSystem, logComponent, 'Could not get blocks from redis ' + JSON.stringify(error));
|
||||
callback('Check finished - redis error for getting blocks');
|
||||
return;
|
||||
}
|
||||
if (results.length === 0){
|
||||
callback('Check finished - no pending blocks in redis');
|
||||
callback(true);
|
||||
return;
|
||||
}
|
||||
|
||||
var rounds = results.map(function(r){
|
||||
|
||||
|
||||
var workers = {};
|
||||
for (var w in results[0]){
|
||||
workers[w] = {balance: coinsToSatoshies(parseInt(results[0][w]))};
|
||||
}
|
||||
|
||||
var rounds = results[1].map(function(r){
|
||||
var details = r.split(':');
|
||||
return {
|
||||
category: details[0].category,
|
||||
blockHash: details[0],
|
||||
txHash: details[1],
|
||||
height: details[2],
|
||||
reward: details[3],
|
||||
serialized: r
|
||||
};
|
||||
});
|
||||
|
||||
callback(null, rounds);
|
||||
callback(null, workers, rounds);
|
||||
});
|
||||
},
|
||||
|
||||
/* Does a batch rpc call to daemon with all the transaction hashes to see if they are confirmed yet.
|
||||
It also adds the block reward amount to the round object - which the daemon gives also gives us. */
|
||||
function(rounds, callback){
|
||||
function(workers, rounds, callback){
|
||||
|
||||
var batchRPCcommand = rounds.map(function(r){
|
||||
return ['gettransaction', [r.txHash]];
|
||||
@ -216,11 +194,14 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
|
||||
batchRPCcommand.push(['getaccount', [poolOptions.address]]);
|
||||
|
||||
startRPCTimer();
|
||||
daemon.batchCmd(batchRPCcommand, function(error, txDetails){
|
||||
endRPCTimer();
|
||||
|
||||
if (error || !txDetails){
|
||||
callback('Check finished - daemon rpc error with batch gettransactions ' +
|
||||
JSON.stringify(error));
|
||||
logger.error(logSystem, logComponent, 'Check finished - daemon rpc error with batch gettransactions '
|
||||
+ JSON.stringify(error));
|
||||
callback(true);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -235,71 +216,65 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
|
||||
var round = rounds[i];
|
||||
|
||||
if (tx.error && tx.error.code === -5 || round.blockHash !== tx.result.blockhash){
|
||||
|
||||
/* Block was dropped from coin daemon even after it happily accepted it earlier. */
|
||||
|
||||
//If we find another block at the same height then this block was drop-kicked orphaned
|
||||
var dropKicked = rounds.filter(function(r){
|
||||
return r.height === round.height && r.blockHash !== round.blockHash && r.category !== 'dropkicked';
|
||||
}).length > 0;
|
||||
|
||||
if (dropKicked){
|
||||
logger.warning(logSystem, logComponent,
|
||||
'A block was drop-kicked orphaned'
|
||||
+ ' - we found a better block at the same height, blockHash '
|
||||
+ round.blockHash + " round " + round.height);
|
||||
round.category = 'dropkicked';
|
||||
}
|
||||
else{
|
||||
/* We have no other blocks that match this height so convert to orphan in order for
|
||||
shares from the round to be rewarded. */
|
||||
round.category = 'orphan';
|
||||
}
|
||||
if (tx.error && tx.error.code === -5){
|
||||
logger.warning(logSystem, logComponent, 'Daemon reports invalid transaction: ' + round.txHash);
|
||||
round.category = 'kicked';
|
||||
return;
|
||||
}
|
||||
else if (!tx.result.details || (tx.result.details && tx.result.details.length === 0)){
|
||||
logger.warning(logSystem, logComponent, 'Daemon reports no details for transaction: ' + round.txHash);
|
||||
round.category = 'kicked';
|
||||
return;
|
||||
}
|
||||
else if (tx.error || !tx.result){
|
||||
logger.error(logSystem, logComponent,
|
||||
'Error with requesting transaction from block daemon: ' + JSON.stringify(tx));
|
||||
logger.error(logSystem, logComponent, 'Odd error with gettransaction ' + round.txHash + ' '
|
||||
+ JSON.stringify(tx));
|
||||
return;
|
||||
}
|
||||
else{
|
||||
round.category = tx.result.details[0].category;
|
||||
if (round.category === 'generate')
|
||||
round.amount = tx.result.amount;
|
||||
|
||||
var generationTx = tx.result.details.filter(function(tx){
|
||||
return tx.address === poolOptions.address;
|
||||
})[0];
|
||||
|
||||
|
||||
if (!generationTx && tx.result.details.length === 1){
|
||||
generationTx = tx.result.details[0];
|
||||
}
|
||||
|
||||
if (!generationTx){
|
||||
logger.error(logSystem, logComponent, 'Missing output details to pool address for transaction '
|
||||
+ round.txHash);
|
||||
return;
|
||||
}
|
||||
|
||||
round.category = generationTx.category;
|
||||
if (round.category === 'generate') {
|
||||
round.reward = generationTx.amount || generationTx.value;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
var canDeleteShares = function(r){
|
||||
for (var i = 0; i < rounds.length; i++){
|
||||
var compareR = rounds[i];
|
||||
if ((compareR.height === r.height)
|
||||
&& (compareR.category !== 'kicked')
|
||||
&& (compareR.category !== 'orphan')
|
||||
&& (compareR.serialized !== r.serialized)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
var magnitude;
|
||||
|
||||
//Filter out all rounds that are immature (not confirmed or orphaned yet)
|
||||
rounds = rounds.filter(function(r){
|
||||
switch (r.category) {
|
||||
|
||||
case 'generate':
|
||||
/* Here we calculate the smallest unit in this coin's currency; the 'satoshi'.
|
||||
The rpc.getblocktemplate.amount tells us how much we get in satoshis, while the
|
||||
rpc.gettransaction.amount tells us how much we get in whole coin units. Therefore,
|
||||
we simply divide the two to get the magnitude. I don't know math, there is probably
|
||||
a better term than 'magnitude'. Sue me or do a pull request to fix it. */
|
||||
var roundMagnitude = Math.round(r.reward / r.amount);
|
||||
|
||||
if (!magnitude) {
|
||||
magnitude = roundMagnitude;
|
||||
|
||||
if (roundMagnitude % 10 !== 0)
|
||||
logger.error(logSystem, logComponent,
|
||||
'Satosihis in coin is not divisible by 10 which is very odd');
|
||||
}
|
||||
else if (magnitude != roundMagnitude) {
|
||||
/* Magnitude for a coin should ALWAYS be the same. For BTC and most coins there are
|
||||
100,000,000 satoshis in one coin unit. */
|
||||
logger.error(logSystem, logComponent,
|
||||
'Magnitude in a round was different than in another round. HUGE PROBLEM.');
|
||||
}
|
||||
return true;
|
||||
|
||||
case 'dropkicked':
|
||||
case 'orphan':
|
||||
case 'kicked':
|
||||
r.canDeleteShares = canDeleteShares(r);
|
||||
case 'generate':
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -307,35 +282,30 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
});
|
||||
|
||||
|
||||
if (rounds.length === 0){
|
||||
callback('Check finished - no confirmed or orphaned blocks found');
|
||||
}
|
||||
else{
|
||||
callback(null, rounds, magnitude, addressAccount);
|
||||
}
|
||||
callback(null, workers, rounds, addressAccount);
|
||||
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
/* Does a batch redis call to get shares contributed to each round. Then calculates the reward
|
||||
amount owned to each miner for each round. */
|
||||
function(rounds, magnitude, addressAccount, callback){
|
||||
function(workers, rounds, addressAccount, callback){
|
||||
|
||||
|
||||
var shareLookups = rounds.map(function(r){
|
||||
return ['hgetall', coin + '_shares:round' + r.height]
|
||||
return ['hgetall', coin + ':shares:round' + r.height]
|
||||
});
|
||||
|
||||
|
||||
startRedisTimer();
|
||||
redisClient.multi(shareLookups).exec(function(error, allWorkerShares){
|
||||
endRedisTimer();
|
||||
|
||||
if (error){
|
||||
callback('Check finished - redis error with multi get rounds share')
|
||||
callback('Check finished - redis error with multi get rounds share');
|
||||
return;
|
||||
}
|
||||
|
||||
var orphanMergeCommands = [];
|
||||
var workerRewards = {};
|
||||
|
||||
|
||||
rounds.forEach(function(round, i){
|
||||
var workerShares = allWorkerShares[i];
|
||||
@ -347,287 +317,202 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
}
|
||||
|
||||
switch (round.category){
|
||||
case 'kicked':
|
||||
case 'orphan':
|
||||
/* Each block that gets orphaned, all the shares go into the current round so that
|
||||
miners still get a reward for their work. This seems unfair to those that just
|
||||
started mining during this current round, but over time it balances out and rewards
|
||||
loyal miners. */
|
||||
Object.keys(workerShares).forEach(function(worker){
|
||||
orphanMergeCommands.push(['hincrby', coin + '_shares:roundCurrent',
|
||||
worker, workerShares[worker]]);
|
||||
});
|
||||
round.workerShares = workerShares;
|
||||
break;
|
||||
|
||||
case 'generate':
|
||||
/* We found a confirmed block! Now get the reward for it and calculate how much
|
||||
we owe each miner based on the shares they submitted during that block round. */
|
||||
var reward = round.reward * (1 - processingConfig.feePercent);
|
||||
var reward = parseInt(round.reward * magnitude);
|
||||
|
||||
var totalShares = Object.keys(workerShares).reduce(function(p, c){
|
||||
return p + parseFloat(workerShares[c])
|
||||
}, 0);
|
||||
|
||||
for (var worker in workerShares){
|
||||
var percent = parseFloat(workerShares[worker]) / totalShares;
|
||||
for (var workerAddress in workerShares){
|
||||
var percent = parseFloat(workerShares[workerAddress]) / totalShares;
|
||||
var workerRewardTotal = Math.floor(reward * percent);
|
||||
if (!(worker in workerRewards)) workerRewards[worker] = 0;
|
||||
workerRewards[worker] += workerRewardTotal;
|
||||
var worker = workers[workerAddress] = (workers[workerAddress] || {});
|
||||
worker.reward = (worker.reward || 0) + workerRewardTotal;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
callback(null, rounds, magnitude, workerRewards, orphanMergeCommands, addressAccount);
|
||||
callback(null, workers, rounds, addressAccount);
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
/* Does a batch call to redis to get worker existing balances from coin_balances*/
|
||||
function(rounds, magnitude, workerRewards, orphanMergeCommands, addressAccount, callback){
|
||||
|
||||
var workers = Object.keys(workerRewards);
|
||||
|
||||
redisClient.hmget([coin + '_balances'].concat(workers), function(error, results){
|
||||
if (error && workers.length !== 0){
|
||||
callback('Check finished - redis error with multi get balances ' + JSON.stringify(error));
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
var workerBalances = {};
|
||||
|
||||
for (var i = 0; i < workers.length; i++){
|
||||
workerBalances[workers[i]] = (parseInt(results[i]) || 0);
|
||||
}
|
||||
|
||||
|
||||
callback(null, rounds, magnitude, workerRewards, orphanMergeCommands, workerBalances, addressAccount);
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
|
||||
/* Calculate if any payments are ready to be sent and trigger them sending
|
||||
Get balance different for each address and pass it along as object of latest balances such as
|
||||
{worker1: balance1, worker2, balance2}
|
||||
when deciding the sent balance, it the difference should be -1*amount they had in db,
|
||||
if not sending the balance, the differnce should be +(the amount they earned this round)
|
||||
*/
|
||||
function(rounds, magnitude, workerRewards, orphanMergeCommands, workerBalances, addressAccount, callback){
|
||||
function(workers, rounds, addressAccount, callback) {
|
||||
|
||||
//number of satoshis in a single coin unit - this can be different for coins so we calculate it :)
|
||||
|
||||
daemon.cmd('getbalance', [addressAccount || ''], function(results){
|
||||
|
||||
var totalBalance = results[0].response * magnitude;
|
||||
var toBePaid = 0;
|
||||
var workerPayments = {};
|
||||
|
||||
|
||||
var balanceUpdateCommands = [];
|
||||
var workerPayoutsCommand = [];
|
||||
|
||||
/* Here we add up all workers' previous unpaid balances plus their current rewards as we are
|
||||
about to check if they reach the payout threshold. */
|
||||
for (var worker in workerRewards){
|
||||
workerPayments[worker] = ((workerPayments[worker] || 0) + workerRewards[worker]);
|
||||
}
|
||||
for (var worker in workerBalances){
|
||||
workerPayments[worker] = ((workerPayments[worker] || 0) + workerBalances[worker]);
|
||||
}
|
||||
|
||||
/* Here we check if any of the workers reached their payout threshold, or delete them from the
|
||||
pending payment ledger (the workerPayments object). */
|
||||
if (Object.keys(workerPayments).length > 0){
|
||||
var coinPrecision = magnitude.toString().length - 1;
|
||||
for (var worker in workerPayments){
|
||||
if (workerPayments[worker] < processingConfig.minimumPayment * magnitude){
|
||||
/* The workers total earnings (balance + current reward) was not enough to warrant
|
||||
a transaction, so we will store their balance in the database. Next time they
|
||||
are rewarded it might reach the payout threshold. */
|
||||
balanceUpdateCommands.push([
|
||||
'hincrby',
|
||||
coin + '_balances',
|
||||
worker,
|
||||
workerRewards[worker]
|
||||
]);
|
||||
delete workerPayments[worker];
|
||||
}
|
||||
else{
|
||||
//If worker had a balance that is about to be paid out, subtract it from the database
|
||||
if (workerBalances[worker] !== 0){
|
||||
balanceUpdateCommands.push([
|
||||
'hincrby',
|
||||
coin + '_balances',
|
||||
worker,
|
||||
-1 * workerBalances[worker]
|
||||
]);
|
||||
}
|
||||
var rewardInPrecision = (workerRewards[worker] / magnitude).toFixed(coinPrecision);
|
||||
workerPayoutsCommand.push(['hincrbyfloat', coin + '_payouts', worker, rewardInPrecision]);
|
||||
toBePaid += workerPayments[worker];
|
||||
}
|
||||
var trySend = function (withholdPercent) {
|
||||
var addressAmounts = {};
|
||||
var totalSent = 0;
|
||||
for (var w in workers) {
|
||||
var worker = workers[w];
|
||||
worker.balance = worker.balance || 0;
|
||||
worker.reward = worker.reward || 0;
|
||||
var toSend = (worker.balance + worker.reward) * (1 - withholdPercent);
|
||||
if (toSend >= minPaymentSatoshis) {
|
||||
totalSent += toSend;
|
||||
var address = worker.address = (worker.address || getProperAddress(w));
|
||||
worker.sent = addressAmounts[address] = satoshisToCoins(toSend);
|
||||
worker.balanceChange = Math.min(worker.balance, toSend) * -1;
|
||||
}
|
||||
else {
|
||||
worker.balanceChange = Math.max(toSend - worker.balance, 0);
|
||||
worker.sent = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// txfee included in feeAmountToBeCollected
|
||||
var leftOver = toBePaid / (1 - processingConfig.feePercent);
|
||||
var feeAmountToBeCollected = toPrecision(leftOver * processingConfig.feePercent, coinPrecision);
|
||||
var balanceLeftOver = totalBalance - toBePaid - feeAmountToBeCollected;
|
||||
var minReserveSatoshis = processingConfig.minimumReserve * magnitude;
|
||||
if (balanceLeftOver < minReserveSatoshis){
|
||||
/* TODO: Need to convert all these variables into whole coin units before displaying because
|
||||
humans aren't good at reading satoshi units. */
|
||||
callback('Check finished - payments would wipe out minimum reserve, tried to pay out ' +
|
||||
(toBePaid/magnitude) + ' and collect ' + (feeAmountToBeCollected/magnitude) + ' as fees' +
|
||||
' but only have ' + (totalBalance/magnitude) + '. Left over balance would be ' + (balanceLeftOver/magnitude) +
|
||||
', needs to be at least ' + (minReserveSatoshis/magnitude));
|
||||
if (Object.keys(addressAmounts).length === 0){
|
||||
callback(null, workers, rounds);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* Move pending blocks into either orphan for confirmed sets, and delete their no longer
|
||||
required round/shares data. */
|
||||
var movePendingCommands = [];
|
||||
var roundsToDelete = [];
|
||||
rounds.forEach(function(r){
|
||||
|
||||
var destinationSet = (function(){
|
||||
switch(r.category){
|
||||
case 'orphan': return '_blocksOrphaned';
|
||||
case 'generate': return '_blocksConfirmed';
|
||||
case 'dropkicked': return '_blocksDropKicked';
|
||||
}
|
||||
})();
|
||||
movePendingCommands.push(['smove', coin + '_blocksPending', coin + destinationSet, r.serialized]);
|
||||
if (r.category === 'generate')
|
||||
roundsToDelete.push(coin + '_shares:round' + r.height)
|
||||
});
|
||||
|
||||
var finalRedisCommands = [];
|
||||
|
||||
if (movePendingCommands.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(movePendingCommands);
|
||||
|
||||
if (orphanMergeCommands.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(orphanMergeCommands);
|
||||
|
||||
if (balanceUpdateCommands.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(balanceUpdateCommands);
|
||||
|
||||
if (workerPayoutsCommand.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(workerPayoutsCommand);
|
||||
|
||||
if (roundsToDelete.length > 0)
|
||||
finalRedisCommands.push(['del'].concat(roundsToDelete));
|
||||
|
||||
if (toBePaid !== 0)
|
||||
finalRedisCommands.push(['hincrbyfloat', coin + '_stats', 'totalPaid', (toBePaid / magnitude).toFixed(coinPrecision)]);
|
||||
|
||||
finalRedisCommands.push(['del', coin + '_finalRedisCommands']);
|
||||
|
||||
finalRedisCommands.push(['bgsave']);
|
||||
|
||||
callback(null, magnitude, workerPayments, finalRedisCommands, addressAccount);
|
||||
|
||||
});
|
||||
},
|
||||
|
||||
function(magnitude, workerPayments, finalRedisCommands, addressAccount, callback) {
|
||||
/* Save final redis cleanout commands in case something goes wrong during payments */
|
||||
redisClient.set(coin + '_finalRedisCommands', JSON.stringify(finalRedisCommands), function(error, reply) {
|
||||
if (error){
|
||||
callback('Check finished - error with saving finalRedisCommands' + JSON.stringify(error));
|
||||
return;
|
||||
}
|
||||
callback(null, magnitude, workerPayments, finalRedisCommands, addressAccount);
|
||||
});
|
||||
},
|
||||
|
||||
function(magnitude, workerPayments, finalRedisCommands, addressAccount, callback){
|
||||
|
||||
//This does the final all-or-nothing atom transaction if block deamon sent payments
|
||||
var finalizeRedisTx = function(){
|
||||
redisClient.multi(finalRedisCommands).exec(function(error, results){
|
||||
if (error){
|
||||
callback('Error with final redis commands for cleaning up ' + JSON.stringify(error));
|
||||
return;
|
||||
daemon.cmd('sendmany', [addressAccount || '', addressAmounts], function (result) {
|
||||
//Check if payments failed because wallet doesn't have enough coins to pay for tx fees
|
||||
if (result.error && result.error.code === -6) {
|
||||
var higherPercent = withholdPercent + 0.01;
|
||||
logger.warning(logSystem, logComponent, 'Not enough funds to cover the tx fees for sending out payments, decreasing rewards by '
|
||||
+ (higherPercent * 100) + '% and retrying');
|
||||
trySend(higherPercent);
|
||||
}
|
||||
processingPayments = false;
|
||||
logger.debug(logSystem, logComponent, 'Payments processing performed an interval');
|
||||
else if (result.error) {
|
||||
logger.error(logSystem, logComponent, 'Error trying to send payments with RPC sendmany '
|
||||
+ JSON.stringify(result.error));
|
||||
callback(true);
|
||||
}
|
||||
else {
|
||||
logger.debug(logSystem, logComponent, 'Sent out a total of ' + (totalSent / magnitude)
|
||||
+ ' to ' + Object.keys(addressAmounts).length + ' workers');
|
||||
if (withholdPercent > 0) {
|
||||
logger.warning(logSystem, logComponent, 'Had to withhold ' + (withholdPercent * 100)
|
||||
+ '% of reward from miners to cover transaction fees. '
|
||||
+ 'Fund pool wallet with coins to prevent this from happening');
|
||||
}
|
||||
callback(null, workers, rounds);
|
||||
}
|
||||
}, true, true);
|
||||
};
|
||||
trySend(0);
|
||||
|
||||
},
|
||||
function(workers, rounds, callback){
|
||||
|
||||
var totalPaid = 0;
|
||||
|
||||
var balanceUpdateCommands = [];
|
||||
var workerPayoutsCommand = [];
|
||||
|
||||
for (var w in workers) {
|
||||
var worker = workers[w];
|
||||
if (worker.balanceChange !== 0){
|
||||
balanceUpdateCommands.push([
|
||||
'hincrbyfloat',
|
||||
coin + ':balances',
|
||||
w,
|
||||
satoshisToCoins(worker.balanceChange)
|
||||
]);
|
||||
}
|
||||
if (worker.sent !== 0){
|
||||
workerPayoutsCommand.push(['hincrbyfloat', coin + ':payouts', w, worker.sent]);
|
||||
totalPaid += worker.sent;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
var movePendingCommands = [];
|
||||
var roundsToDelete = [];
|
||||
var orphanMergeCommands = [];
|
||||
|
||||
var moveSharesToCurrent = function(r){
|
||||
var workerShares = r.workerShares;
|
||||
Object.keys(workerShares).forEach(function(worker){
|
||||
orphanMergeCommands.push(['hincrby', coin + ':shares:roundCurrent',
|
||||
worker, workerShares[worker]]);
|
||||
});
|
||||
};
|
||||
|
||||
if (Object.keys(workerPayments).length === 0){
|
||||
finalizeRedisTx();
|
||||
}
|
||||
else{
|
||||
rounds.forEach(function(r){
|
||||
|
||||
//This is how many decimal places to round a coin down to
|
||||
var coinPrecision = magnitude.toString().length - 1;
|
||||
var addressAmounts = {};
|
||||
var totalAmountUnits = 0;
|
||||
for (var address in workerPayments){
|
||||
var coinUnits = toPrecision(workerPayments[address] / magnitude, coinPrecision);
|
||||
var properAddress = getProperAddress(address);
|
||||
if (!properAddress){
|
||||
logger.error(logSystem, logComponent, 'Could not convert pubkey ' + address + ' into address');
|
||||
continue;
|
||||
}
|
||||
addressAmounts[properAddress] = coinUnits;
|
||||
totalAmountUnits += coinUnits;
|
||||
switch(r.category){
|
||||
case 'kicked':
|
||||
movePendingCommands.push(['smove', coin + ':blocksPending', coin + ':blocksKicked', r.serialized]);
|
||||
case 'orphan':
|
||||
movePendingCommands.push(['smove', coin + ':blocksPending', coin + ':blocksOrphaned', r.serialized]);
|
||||
if (r.canDeleteShares){
|
||||
moveSharesToCurrent(r);
|
||||
roundsToDelete.push(coin + ':shares:round' + r.height);
|
||||
}
|
||||
return;
|
||||
case 'generate':
|
||||
movePendingCommands.push(['smove', coin + ':blocksPending', coin + ':blocksConfirmed', r.serialized]);
|
||||
roundsToDelete.push(coin + ':shares:round' + r.height);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug(logSystem, logComponent, 'Payments to be sent to: ' + JSON.stringify(addressAmounts));
|
||||
});
|
||||
|
||||
processingPayments = true;
|
||||
daemon.cmd('sendmany', [addressAccount || '', addressAmounts], function(results){
|
||||
var finalRedisCommands = [];
|
||||
|
||||
if (results[0].error){
|
||||
callback('Check finished - error with sendmany ' + JSON.stringify(results[0].error));
|
||||
return;
|
||||
}
|
||||
if (movePendingCommands.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(movePendingCommands);
|
||||
|
||||
finalizeRedisTx();
|
||||
if (orphanMergeCommands.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(orphanMergeCommands);
|
||||
|
||||
var totalWorkers = Object.keys(workerPayments).length;
|
||||
if (balanceUpdateCommands.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(balanceUpdateCommands);
|
||||
|
||||
logger.debug(logSystem, logComponent, 'Payments sent, a total of ' + totalAmountUnits
|
||||
+ ' ' + poolOptions.coin.symbol + ' was sent to ' + totalWorkers + ' miners');
|
||||
if (workerPayoutsCommand.length > 0)
|
||||
finalRedisCommands = finalRedisCommands.concat(workerPayoutsCommand);
|
||||
|
||||
daemon.cmd('gettransaction', [results[0].response], function(results){
|
||||
if (results[0].error){
|
||||
callback('Check finished - error with gettransaction ' + JSON.stringify(results[0].error));
|
||||
return;
|
||||
}
|
||||
var feeAmountUnits = parseFloat((totalAmountUnits / (1 - processingConfig.feePercent) * processingConfig.feePercent).toFixed(coinPrecision));
|
||||
var poolFees = feeAmountUnits - results[0].response.fee;
|
||||
daemon.cmd('move', [addressAccount || '', processingConfig.feeCollectAccount, poolFees], function(results){
|
||||
if (results[0].error){
|
||||
callback('Check finished - error with move ' + JSON.stringify(results[0].error));
|
||||
return;
|
||||
}
|
||||
callback(null, poolFees + ' ' + poolOptions.coin.symbol + ' collected as pool fee');
|
||||
});
|
||||
});
|
||||
});
|
||||
if (roundsToDelete.length > 0)
|
||||
finalRedisCommands.push(['del'].concat(roundsToDelete));
|
||||
|
||||
if (totalPaid !== 0)
|
||||
finalRedisCommands.push(['hincrbyfloat', coin + ':stats', 'totalPaid', totalPaid]);
|
||||
|
||||
if (finalRedisCommands.length === 0){
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
}
|
||||
], function(error, result){
|
||||
|
||||
startRedisTimer();
|
||||
redisClient.multi(finalRedisCommands).exec(function(error, results){
|
||||
endRedisTimer();
|
||||
if (error){
|
||||
clearInterval(paymentInterval);
|
||||
logger.error(logSystem, logComponent,
|
||||
'Payments sent but could not update redis. ' + JSON.stringify(error)
|
||||
+ ' Disabling payment processing to prevent possible double-payouts. The redis commands in '
|
||||
+ coin + '_finalRedisCommands.txt must be ran manually');
|
||||
fs.writeFile(coin + '_finalRedisCommands.txt', JSON.stringify(finalRedisCommands), function(err){
|
||||
logger.error('Could not write finalRedisCommands.txt, you are fucked.');
|
||||
});
|
||||
}
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
], function(){
|
||||
|
||||
var paymentProcessTime = Date.now() - startPaymentProcess;
|
||||
logger.debug(logSystem, logComponent, 'Finished interval - time spent: '
|
||||
+ paymentProcessTime + 'ms total, ' + timeSpentRedis + 'ms redis, '
|
||||
+ timeSpentRPC + 'ms daemon RPC');
|
||||
|
||||
if (error)
|
||||
logger.debug(logSystem, logComponent, '[Took ' + paymentProcessTime + 'ms] ' + error);
|
||||
|
||||
else{
|
||||
logger.debug(logSystem, logComponent, '[' + paymentProcessTime + 'ms] ' + result);
|
||||
// not sure if we need some time to let daemon update the wallet balance
|
||||
setTimeout(withdrawalProfit, 1000);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
@ -639,37 +524,5 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||
else return address;
|
||||
};
|
||||
|
||||
var withdrawalProfit = function(){
|
||||
|
||||
if (!processingConfig.feeWithdrawalThreshold) return;
|
||||
|
||||
logger.debug(logSystem, logComponent, 'Profit withdrawal started');
|
||||
daemon.cmd('getbalance', [processingConfig.feeCollectAccount], function(results){
|
||||
|
||||
// We have to pay some tx fee here too but maybe we shoudn't really care about it too much as long as fee is less
|
||||
// then minimumReserve value. Because in this case even if feeCollectAccount account will have negative balance
|
||||
// total wallet balance will be positive and feeCollectAccount account will be refilled during next payment processing.
|
||||
var withdrawalAmount = results[0].response;
|
||||
|
||||
if (withdrawalAmount < processingConfig.feeWithdrawalThreshold){
|
||||
logger.debug(logSystem, logComponent, 'Not enough profit to withdraw yet');
|
||||
}
|
||||
else{
|
||||
|
||||
var withdrawal = {};
|
||||
withdrawal[processingConfig.feeReceiveAddress] = withdrawalAmount;
|
||||
|
||||
daemon.cmd('sendmany', [processingConfig.feeCollectAccount, withdrawal], function(results){
|
||||
if (results[0].error){
|
||||
logger.debug(logSystem, logComponent, 'Profit withdrawal of ' + withdrawalAmount + ' failed - error with sendmany '
|
||||
+ JSON.stringify(results[0].error));
|
||||
return;
|
||||
}
|
||||
logger.debug(logSystem, logComponent, 'Profit sent, a total of ' + withdrawalAmount
|
||||
+ ' ' + poolOptions.coin.symbol + ' was sent to ' + processingConfig.feeReceiveAddress);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ -108,10 +108,8 @@ module.exports = function(logger){
|
||||
diff: function(){}
|
||||
};
|
||||
|
||||
var shareProcessing = poolOptions.shareProcessing;
|
||||
|
||||
//Functions required for MPOS compatibility
|
||||
if (shareProcessing && shareProcessing.mpos && shareProcessing.mpos.enabled){
|
||||
if (poolOptions.mposMode && poolOptions.mposMode.enabled){
|
||||
var mposCompat = new MposCompatibility(logger, poolOptions);
|
||||
|
||||
handlers.auth = function(port, workerName, password, authCallback){
|
||||
@ -128,38 +126,32 @@ module.exports = function(logger){
|
||||
}
|
||||
|
||||
//Functions required for internal payment processing
|
||||
else if (shareProcessing && shareProcessing.internal && shareProcessing.internal.enabled){
|
||||
else{
|
||||
|
||||
var shareProcessor = new ShareProcessor(logger, poolOptions);
|
||||
|
||||
handlers.auth = function(port, workerName, password, authCallback){
|
||||
if (shareProcessing.internal.validateWorkerAddress !== true)
|
||||
if (poolOptions.validateWorkerUsername !== true)
|
||||
authCallback(true);
|
||||
else {
|
||||
port = port.toString();
|
||||
if (portalConfig.switching) {
|
||||
for (var switchName in portalConfig.switching) {
|
||||
if (portalConfig.switching[switchName].enabled && Object.keys(portalConfig.switching[switchName].ports).indexOf(port) !== -1) {
|
||||
if (workerName.length === 40) {
|
||||
try {
|
||||
new Buffer(workerName, 'hex');
|
||||
authCallback(true);
|
||||
}
|
||||
catch (e) {
|
||||
authCallback(false);
|
||||
}
|
||||
}
|
||||
else
|
||||
authCallback(false);
|
||||
return;
|
||||
}
|
||||
if (workerName.length === 40) {
|
||||
try {
|
||||
new Buffer(workerName, 'hex');
|
||||
authCallback(true);
|
||||
}
|
||||
catch (e) {
|
||||
authCallback(false);
|
||||
}
|
||||
}
|
||||
else {
|
||||
pool.daemon.cmd('validateaddress', [workerName], function (results) {
|
||||
var isValid = results.filter(function (r) {
|
||||
return r.response.isvalid
|
||||
}).length > 0;
|
||||
authCallback(isValid);
|
||||
});
|
||||
}
|
||||
|
||||
pool.daemon.cmd('validateaddress', [workerName], function(results){
|
||||
var isValid = results.filter(function(r){return r.response.isvalid}).length > 0;
|
||||
authCallback(isValid);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@ -238,10 +230,7 @@ module.exports = function(logger){
|
||||
});*/
|
||||
|
||||
redisClient.hgetall("proxyState", function(error, obj) {
|
||||
if (error || obj == null) {
|
||||
//logger.debug(logSystem, logComponent, logSubCat, 'No last proxy state found in redis');
|
||||
}
|
||||
else {
|
||||
if (!error && obj) {
|
||||
proxyState = obj;
|
||||
logger.debug(logSystem, logComponent, logSubCat, 'Last proxy state loaded from redis');
|
||||
}
|
||||
@ -258,64 +247,49 @@ module.exports = function(logger){
|
||||
|
||||
var algorithm = portalConfig.switching[switchName].algorithm;
|
||||
|
||||
if (portalConfig.switching[switchName].enabled === true) {
|
||||
var initalPool = proxyState.hasOwnProperty(algorithm) ? proxyState[algorithm] : _this.getFirstPoolForAlgorithm(algorithm);
|
||||
proxySwitch[switchName] = {
|
||||
algorithm: algorithm,
|
||||
ports: portalConfig.switching[switchName].ports,
|
||||
currentPool: initalPool,
|
||||
servers: []
|
||||
};
|
||||
if (!portalConfig.switching[switchName].enabled) return;
|
||||
|
||||
|
||||
// Copy diff and vardiff configuation into pools that match our algorithm so the stratum server can pick them up
|
||||
//
|
||||
// Note: This seems a bit wonky and brittle - better if proxy just used the diff config of the port it was
|
||||
// routed into instead.
|
||||
//
|
||||
/*if (portalConfig.proxy[algorithm].hasOwnProperty('varDiff')) {
|
||||
proxySwitch[algorithm].varDiff = new Stratum.varDiff(proxySwitch[algorithm].port, portalConfig.proxy[algorithm].varDiff);
|
||||
proxySwitch[algorithm].diff = portalConfig.proxy[algorithm].diff;
|
||||
}*/
|
||||
var initalPool = proxyState.hasOwnProperty(algorithm) ? proxyState[algorithm] : _this.getFirstPoolForAlgorithm(algorithm);
|
||||
proxySwitch[switchName] = {
|
||||
algorithm: algorithm,
|
||||
ports: portalConfig.switching[switchName].ports,
|
||||
currentPool: initalPool,
|
||||
servers: []
|
||||
};
|
||||
|
||||
|
||||
|
||||
Object.keys(pools).forEach(function (coinName) {
|
||||
var p = pools[coinName];
|
||||
if (poolConfigs[coinName].coin.algorithm === algorithm) {
|
||||
for (var port in portalConfig.switching[switchName].ports) {
|
||||
if (portalConfig.switching[switchName].ports[port].varDiff)
|
||||
p.setVarDiff(port, portalConfig.switching[switchName].ports[port].varDiff);
|
||||
}
|
||||
Object.keys(pools).forEach(function (coinName) {
|
||||
var p = pools[coinName];
|
||||
if (poolConfigs[coinName].coin.algorithm === algorithm) {
|
||||
for (var port in portalConfig.switching[switchName].ports) {
|
||||
if (portalConfig.switching[switchName].ports[port].varDiff)
|
||||
p.setVarDiff(port, portalConfig.switching[switchName].ports[port].varDiff);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
Object.keys(proxySwitch[switchName].ports).forEach(function(port){
|
||||
var f = net.createServer(function(socket) {
|
||||
var currentPool = proxySwitch[switchName].currentPool;
|
||||
|
||||
logger.debug(logSystem, 'Connect', logSubCat, 'Connection to '
|
||||
+ switchName + ' from '
|
||||
+ socket.remoteAddress + ' on '
|
||||
+ port + ' routing to ' + currentPool);
|
||||
|
||||
pools[currentPool].getStratumServer().handleNewClient(socket);
|
||||
|
||||
}).listen(parseInt(port), function() {
|
||||
logger.debug(logSystem, logComponent, logSubCat, 'Switching "' + switchName
|
||||
+ '" listening for ' + algorithm
|
||||
+ ' on port ' + port
|
||||
+ ' into ' + proxySwitch[switchName].currentPool);
|
||||
});
|
||||
proxySwitch[switchName].servers.push(f);
|
||||
});
|
||||
|
||||
|
||||
Object.keys(proxySwitch[switchName].ports).forEach(function(port){
|
||||
var f = net.createServer(function(socket) {
|
||||
var currentPool = proxySwitch[switchName].currentPool;
|
||||
|
||||
logger.debug(logSystem, 'Connect', logSubCat, 'Connection to '
|
||||
+ switchName + ' from '
|
||||
+ socket.remoteAddress + ' on '
|
||||
+ port + ' routing to ' + currentPool);
|
||||
|
||||
pools[currentPool].getStratumServer().handleNewClient(socket);
|
||||
|
||||
}).listen(parseInt(port), function() {
|
||||
logger.debug(logSystem, logComponent, logSubCat, 'Switching "' + switchName
|
||||
+ '" listening for ' + algorithm
|
||||
+ ' on port ' + port
|
||||
+ ' into ' + proxySwitch[switchName].currentPool);
|
||||
});
|
||||
proxySwitch[switchName].servers.push(f);
|
||||
});
|
||||
|
||||
|
||||
}
|
||||
else {
|
||||
//logger.debug(logSystem, logComponent, logSubCat, 'Proxy pool for ' + algorithm + ' disabled.');
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@ -399,7 +399,7 @@ module.exports = function(logger){
|
||||
Object.keys(profitStatus[algo]).forEach(function(symbol){
|
||||
var coinName = profitStatus[algo][symbol].name;
|
||||
var poolConfig = poolConfigs[coinName];
|
||||
var daemonConfig = poolConfig.shareProcessing.internal.daemon;
|
||||
var daemonConfig = poolConfig.paymentProcessing.daemon;
|
||||
daemonTasks.push(function(callback){
|
||||
_this.getDaemonInfoForCoin(symbol, daemonConfig, callback)
|
||||
});
|
||||
@ -419,11 +419,10 @@ module.exports = function(logger){
|
||||
});
|
||||
};
|
||||
this.getDaemonInfoForCoin = function(symbol, cfg, callback){
|
||||
var daemon = new Stratum.daemon.interface([cfg]);
|
||||
daemon.on('error', function(error){
|
||||
logger.error(logSystem, symbol, JSON.stringify(error));
|
||||
var daemon = new Stratum.daemon.interface([cfg], function(severity, message){
|
||||
logger[severity](logSystem, symbol, message);
|
||||
callback(null); // fail gracefully for each coin
|
||||
}).init();
|
||||
});
|
||||
|
||||
daemon.cmd('getblocktemplate', [{"capabilities": [ "coinbasetxn", "workid", "coinbase/append" ]}], function(result) {
|
||||
if (result[0].error != null) {
|
||||
|
||||
@ -1,36 +0,0 @@
|
||||
var events = require('events');
|
||||
var redis = require('redis');
|
||||
|
||||
var listener = module.exports = function listener(options){
|
||||
|
||||
var _this = this;
|
||||
var redisConnection;
|
||||
|
||||
var emitLog = function(text){
|
||||
_this.emit('log', text);
|
||||
};
|
||||
|
||||
|
||||
this.start = function(){
|
||||
redisConnection = redis.createClient(options.redisPort, options.redisHost);
|
||||
redisConnection.on("pmessage", function (pattern, channel, message) {
|
||||
var coinname = channel.split(':')[1];
|
||||
var blockhash = message;
|
||||
//emitLog("Redis: Received block for "+coinname+" - hash: "+blockhash);
|
||||
_this.emit('hash', {
|
||||
"coin" : coinname,
|
||||
"hash" : blockhash
|
||||
});
|
||||
});
|
||||
redisConnection.on('connect', function (err, data) {
|
||||
emitLog("Redis connected");
|
||||
});
|
||||
redisConnection.psubscribe(options.psubscribeKey);
|
||||
emitLog("Connecting to redis!");
|
||||
}
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
listener.prototype.__proto__ = events.EventEmitter.prototype;
|
||||
@ -16,10 +16,10 @@ value: a hash with..
|
||||
|
||||
module.exports = function(logger, poolConfig){
|
||||
|
||||
var internalConfig = poolConfig.shareProcessing.internal;
|
||||
var redisConfig = internalConfig.redis;
|
||||
var redisConfig = poolConfig.redis;
|
||||
var coin = poolConfig.coin.name;
|
||||
|
||||
|
||||
var forkId = process.env.forkId;
|
||||
var logSystem = 'Pool';
|
||||
var logComponent = coin;
|
||||
@ -45,33 +45,32 @@ module.exports = function(logger, poolConfig){
|
||||
var redisCommands = [];
|
||||
|
||||
if (isValidShare){
|
||||
redisCommands.push(['hincrbyfloat', coin + '_shares:roundCurrent', shareData.worker, shareData.difficulty]);
|
||||
redisCommands.push(['hincrby', coin + '_stats', 'validShares', 1]);
|
||||
redisCommands.push(['hincrbyfloat', coin + ':shares:roundCurrent', shareData.worker, shareData.difficulty]);
|
||||
redisCommands.push(['hincrby', coin + ':stats', 'validShares', 1]);
|
||||
|
||||
/* Stores share diff, worker, and unique value with a score that is the timestamp. Unique value ensures it
|
||||
doesn't overwrite an existing entry, and timestamp as score lets us query shares from last X minutes to
|
||||
generate hashrate for each worker and pool. */
|
||||
var dateNow = Date.now();
|
||||
redisCommands.push(['zadd', coin + '_hashrate', dateNow / 1000 | 0, [shareData.difficulty, shareData.worker, dateNow].join(':')]);
|
||||
var hashrateData = [shareData.difficulty, shareData.worker, dateNow];
|
||||
redisCommands.push(['zadd', coin + ':hashrate', dateNow / 1000 | 0, hashrateData.join(':')]);
|
||||
}
|
||||
else{
|
||||
redisCommands.push(['hincrby', coin + '_stats', 'invalidShares', 1]);
|
||||
redisCommands.push(['hincrby', coin + ':stats', 'invalidShares', 1]);
|
||||
}
|
||||
|
||||
if (isValidBlock){
|
||||
redisCommands.push(['rename', coin + '_shares:roundCurrent', coin + '_shares:round' + shareData.height]);
|
||||
redisCommands.push(['sadd', coin + '_blocksPending', [shareData.blockHash, shareData.txHash, shareData.height, shareData.reward].join(':')]);
|
||||
redisCommands.push(['hincrby', coin + '_stats', 'validBlocks', 1]);
|
||||
redisCommands.push(['rename', coin + ':shares:roundCurrent', coin + ':shares:round' + shareData.height]);
|
||||
redisCommands.push(['sadd', coin + ':blocksPending', [shareData.blockHash, shareData.txHash, shareData.height].join(':')]);
|
||||
redisCommands.push(['hincrby', coin + ':stats', 'validBlocks', 1]);
|
||||
}
|
||||
else if (shareData.blockHash){
|
||||
redisCommands.push(['hincrby', coin + '_stats', 'invalidBlocks', 1]);
|
||||
redisCommands.push(['hincrby', coin + ':stats', 'invalidBlocks', 1]);
|
||||
}
|
||||
|
||||
connection.multi(redisCommands).exec(function(err, replies){
|
||||
if (err)
|
||||
logger.error(logSystem, logComponent, logSubCat, 'Error with share processor multi ' + JSON.stringify(err));
|
||||
//else
|
||||
//logger.debug(logSystem, logComponent, logSubCat, 'Share data and stats recorded');
|
||||
});
|
||||
|
||||
|
||||
|
||||
@ -35,14 +35,7 @@ module.exports = function(logger, portalConfig, poolConfigs){
|
||||
|
||||
var poolConfig = poolConfigs[coin];
|
||||
|
||||
if (!poolConfig.shareProcessing || !poolConfig.shareProcessing.internal){
|
||||
logger.error(logSystem, coin, 'Cannot do stats without internal share processing setup');
|
||||
canDoStats = false;
|
||||
return;
|
||||
}
|
||||
|
||||
var internalConfig = poolConfig.shareProcessing.internal;
|
||||
var redisConfig = internalConfig.redis;
|
||||
var redisConfig = poolConfig.redis;
|
||||
|
||||
for (var i = 0; i < redisClients.length; i++){
|
||||
var client = redisClients[i];
|
||||
@ -115,19 +108,19 @@ module.exports = function(logger, portalConfig, poolConfigs){
|
||||
var redisCommands = [];
|
||||
|
||||
|
||||
var redisComamndTemplates = [
|
||||
['zremrangebyscore', '_hashrate', '-inf', '(' + windowTime],
|
||||
['zrangebyscore', '_hashrate', windowTime, '+inf'],
|
||||
['hgetall', '_stats'],
|
||||
['scard', '_blocksPending'],
|
||||
['scard', '_blocksConfirmed'],
|
||||
['scard', '_blocksOrphaned']
|
||||
var redisCommandTemplates = [
|
||||
['zremrangebyscore', ':hashrate', '-inf', '(' + windowTime],
|
||||
['zrangebyscore', ':hashrate', windowTime, '+inf'],
|
||||
['hgetall', ':stats'],
|
||||
['scard', ':blocksPending'],
|
||||
['scard', ':blocksConfirmed'],
|
||||
['scard', ':blocksOrphaned']
|
||||
];
|
||||
|
||||
var commandsPerCoin = redisComamndTemplates.length;
|
||||
var commandsPerCoin = redisCommandTemplates.length;
|
||||
|
||||
client.coins.map(function(coin){
|
||||
redisComamndTemplates.map(function(t){
|
||||
redisCommandTemplates.map(function(t){
|
||||
var clonedTemplates = t.slice(0);
|
||||
clonedTemplates[1] = coin + clonedTemplates[1];
|
||||
redisCommands.push(clonedTemplates);
|
||||
@ -151,7 +144,8 @@ module.exports = function(logger, portalConfig, poolConfigs){
|
||||
poolStats: {
|
||||
validShares: replies[i + 2] ? (replies[i + 2].validShares || 0) : 0,
|
||||
validBlocks: replies[i + 2] ? (replies[i + 2].validBlocks || 0) : 0,
|
||||
invalidShares: replies[i + 2] ? (replies[i + 2].invalidShares || 0) : 0
|
||||
invalidShares: replies[i + 2] ? (replies[i + 2].invalidShares || 0) : 0,
|
||||
totalPaid: replies[i + 2] ? (replies[i + 2].totalPaid || 0) : 0
|
||||
},
|
||||
blocks: {
|
||||
pending: replies[i + 3],
|
||||
|
||||
@ -151,15 +151,17 @@ module.exports = function(logger){
|
||||
for (var pName in poolConfigs){
|
||||
if (pName.toLowerCase() === c)
|
||||
return {
|
||||
daemon: poolConfigs[pName].shareProcessing.internal.daemon,
|
||||
daemon: poolConfigs[pName].paymentProcessing.daemon,
|
||||
address: poolConfigs[pName].address
|
||||
}
|
||||
}
|
||||
})();
|
||||
var daemon = new Stratum.daemon.interface([coinInfo.daemon]);
|
||||
var daemon = new Stratum.daemon.interface([coinInfo.daemon], function(severity, message){
|
||||
logger[severity](logSystem, c, message);
|
||||
});
|
||||
daemon.cmd('dumpprivkey', [coinInfo.address], function(result){
|
||||
if (result[0].error){
|
||||
logger.error(logSystem, 'daemon', 'Could not dumpprivkey for ' + c + ' ' + JSON.stringify(result[0].error));
|
||||
logger.error(logSystem, c, 'Could not dumpprivkey for ' + c + ' ' + JSON.stringify(result[0].error));
|
||||
cback();
|
||||
return;
|
||||
}
|
||||
@ -272,9 +274,15 @@ module.exports = function(logger){
|
||||
res.send(500, 'Something broke!');
|
||||
});
|
||||
|
||||
app.listen(portalConfig.website.port, function(){
|
||||
logger.debug(logSystem, 'Server', 'Website started on port ' + portalConfig.website.port);
|
||||
});
|
||||
try {
|
||||
app.listen(portalConfig.website.port, portalConfig.website.host, function () {
|
||||
logger.debug(logSystem, 'Server', 'Website started on ' + portalConfig.website.host + ':' + portalConfig.website.port);
|
||||
});
|
||||
}
|
||||
catch(e){
|
||||
logger.error(logSystem, 'Server', 'Could not start website on ' + portalConfig.website.host + ':' + portalConfig.website.port
|
||||
+ ' - its either in use or you do not have permission');
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
@ -45,7 +45,8 @@
|
||||
"node-watch": "*",
|
||||
"request": "*",
|
||||
"nonce": "*",
|
||||
"bignum": "*"
|
||||
"bignum": "*",
|
||||
"extend": "*"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.10"
|
||||
|
||||
@ -3,54 +3,22 @@
|
||||
"coin": "litecoin.json",
|
||||
|
||||
"address": "n4jSe18kZMCdGcZqaYprShXW6EH1wivUK1",
|
||||
"blockRefreshInterval": 1000,
|
||||
"txRefreshInterval": 20000,
|
||||
"jobRebroadcastTimeout": 55,
|
||||
"connectionTimeout": 600,
|
||||
"emitInvalidBlockHashes": false,
|
||||
"shareVariancePercent": 15,
|
||||
|
||||
"tcpProxyProtocol": false,
|
||||
|
||||
"shareProcessing": {
|
||||
"internal": {
|
||||
"enabled": true,
|
||||
"validateWorkerAddress": true,
|
||||
"paymentInterval": 20,
|
||||
"minimumPayment": 70,
|
||||
"minimumReserve": 10,
|
||||
"feePercent": 0.05,
|
||||
"feeCollectAccount": "feesCollected",
|
||||
"feeReceiveAddress": "mppaGeNaSbG1Q7S6V3gL5uJztMhucgL9Vh",
|
||||
"feeWithdrawalThreshold": 5,
|
||||
"daemon": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 19332,
|
||||
"user": "litecoinrpc",
|
||||
"password": "testnet"
|
||||
},
|
||||
"redis": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 6379
|
||||
}
|
||||
},
|
||||
"mpos": {
|
||||
"enabled": false,
|
||||
"host": "127.0.0.1",
|
||||
"port": 3306,
|
||||
"user": "me",
|
||||
"password": "mypass",
|
||||
"database": "ltc",
|
||||
"stratumAuth": "password"
|
||||
}
|
||||
"rewardRecipients": {
|
||||
"n37vuNFkXfk15uFnGoVyHZ6PYQxppD3QqK": 1.5,
|
||||
"22851477d63a085dbc2398c8430af1c09e7343f6": 0.1
|
||||
},
|
||||
|
||||
"banning": {
|
||||
"paymentProcessing": {
|
||||
"enabled": true,
|
||||
"time": 600,
|
||||
"invalidPercent": 50,
|
||||
"checkThreshold": 500,
|
||||
"purgeInterval": 300
|
||||
"paymentInterval": 20,
|
||||
"minimumPayment": 70,
|
||||
"daemon": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 19332,
|
||||
"user": "testuser",
|
||||
"password": "testpass"
|
||||
}
|
||||
},
|
||||
|
||||
"ports": {
|
||||
@ -76,22 +44,27 @@
|
||||
{
|
||||
"host": "127.0.0.1",
|
||||
"port": 19332,
|
||||
"user": "litecoinrpc",
|
||||
"password": "testnet"
|
||||
},
|
||||
{
|
||||
"host": "127.0.0.1",
|
||||
"port": 19344,
|
||||
"user": "litecoinrpc",
|
||||
"password": "testnet"
|
||||
"user": "testuser",
|
||||
"password": "testpass"
|
||||
}
|
||||
],
|
||||
|
||||
"p2p": {
|
||||
"enabled": false,
|
||||
"enabled": true,
|
||||
"host": "127.0.0.1",
|
||||
"port": 19333,
|
||||
"disableTransactions": true,
|
||||
"magic": "fcc1b7dc"
|
||||
"disableTransactions": true
|
||||
},
|
||||
|
||||
"mposMode": {
|
||||
"enabled": false,
|
||||
"host": "127.0.0.1",
|
||||
"port": 3306,
|
||||
"user": "me",
|
||||
"password": "mypass",
|
||||
"database": "ltc",
|
||||
"checkPassword": true,
|
||||
"autoCreateWorker": false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -46,7 +46,6 @@
|
||||
<th>Orphaned</th>
|
||||
<th>Hashrate</th>
|
||||
</tr>
|
||||
</tr>
|
||||
</thead>
|
||||
{{ for(var pool in it.stats.pools) { }}
|
||||
<tr class="pure-table-odd">
|
||||
|
||||
Loading…
Reference in New Issue
Block a user