diff --git a/src/_constants.js b/src/_constants.js new file mode 100644 index 0000000..d329b3c --- /dev/null +++ b/src/_constants.js @@ -0,0 +1,29 @@ +module.exports = { + app: { + REFRESH_INTERVAL: 1 * 60 * 1000 // 1 min + }, + request: { + MAX_SESSION_TIMEOUT: 60 * 24 * 60 * 60 * 1000, //60 days + INVALID_SERVER_MSG: "INCORRECT_SERVER_ERROR" //Should be reflected in public backend script + }, + market: { + MINIMUM_BUY_REQUIREMENT: 0.1 + }, + price: { + MIN_TIME: 1 * 60 * 60 * 1000, // 1 hr + DOWN_RATE: 0.2 / 100, //0.2% dec + UP_RATE: 0.5 / 100, //0.5 % inc + MAX_DOWN_PER_DAY: 4.8 / 100, //max 4.8% dec + MAX_UP_PER_DAY: 12 / 100, //max 12% inc + TOP_RANGE: 10 / 100, //top 10% + REC_HISTORY_INTERVAL: 30 * 60 * 1000 //30 mins + }, + backup: { + SHARE_THRESHOLD: 50 / 100, // 50% + HASH_N_ROW: 100, + SINK_KEY_INDICATOR: '$$$', + BACKUP_INTERVAL: 1 * 60 * 1000, //1 min + BACKUP_SYNC_TIMEOUT: 10 * 60 * 1000, //10 mins + CHECKSUM_INTERVAL: 15, //times of BACKUP_INTERVAL + } +} \ No newline at end of file diff --git a/src/app.js b/src/app.js index ce01d54..8adad10 100644 --- a/src/app.js +++ b/src/app.js @@ -4,7 +4,9 @@ const express = require('express'); //const sessions = require('express-session'); const Request = require('./request'); -const REFRESH_INTERVAL = 1 * 60 * 1000; +const { + REFRESH_INTERVAL +} = require("./_constants")["app"]; module.exports = function App(secret, DB) { diff --git a/src/backup/head.js b/src/backup/head.js index f3865a8..6c70666 100644 --- a/src/backup/head.js +++ b/src/backup/head.js @@ -4,7 +4,11 @@ const K_Bucket = require('../../public/KBucket'); const slave = require('./slave'); const sync = require('./sync'); const WebSocket = require('ws'); -const shareThreshold = 50 / 100; + +const { + SINK_KEY_INDICATOR, + SHARE_THRESHOLD +} = require("../_constants")["backup"]; var DB, app, wss, tokenList; //Container for database and app var nodeList, nodeURL, nodeKBucket; //Container for (backup) node list @@ -24,7 +28,7 @@ function generateShares(sinkKey) { return null; else { let N = nextNodes.length + 1, - th = Math.ceil(aliveNodes.length * shareThreshold) + 1, + th = Math.ceil(aliveNodes.length * SHARE_THRESHOLD) + 1, shares, refShare, mappedShares = {}; shares = floCrypto.createShamirsSecretShares(sinkKey, N, th); refShare = shares.pop(); @@ -52,7 +56,7 @@ function sendSharesToNodes(sinkID, shares) { function storeSink(sinkID, sinkPrivKey) { global.sinkID = sinkID; global.sinkPrivKey = sinkPrivKey; - let encryptedKey = Crypto.AES.encrypt(slave.SINK_KEY_INDICATOR + sinkPrivKey, global.myPrivKey); + let encryptedKey = Crypto.AES.encrypt(SINK_KEY_INDICATOR + sinkPrivKey, global.myPrivKey); DB.query('INSERT INTO sinkShares (floID, share) VALUE (?, ?) AS new ON DUPLICATE KEY UPDATE share=new.share', [sinkID, encryptedKey]) .then(_ => console.log('SinkID:', sinkID, '|SinkEnKey:', encryptedKey)) .catch(error => console.error(error)); @@ -101,8 +105,8 @@ collectShares.retrive = function(floID, sinkID, share) { self.shares = {}; } else if (self.sinkID !== sinkID) return console.error("Something is wrong! Slaves are sending different sinkID"); - if (share.startsWith(slave.SINK_KEY_INDICATOR)) { - let sinkKey = share.substring(slave.SINK_KEY_INDICATOR.length); + if (share.startsWith(SINK_KEY_INDICATOR)) { + let sinkKey = share.substring(SINK_KEY_INDICATOR.length); console.debug("Received sinkKey:", sinkID, sinkKey); self.verify(sinkKey); } else @@ -199,10 +203,10 @@ function informLiveNodes(init) { DB.query("SELECT floID, share FROM sinkShares ORDER BY time_ DESC LIMIT 1").then(result => { if (result.length) { let share = Crypto.AES.decrypt(result[0].share, global.myPrivKey); - if (share.startsWith(slave.SINK_KEY_INDICATOR)) { + if (share.startsWith(SINK_KEY_INDICATOR)) { //sinkKey is already present in DB, use it directly collectShares.active = false; - global.sinkPrivKey = share.substring(slave.SINK_KEY_INDICATOR.length); + global.sinkPrivKey = share.substring(SINK_KEY_INDICATOR.length); global.sinkID = floCrypto.getFloID(global.sinkPrivKey); if (global.sinkID != result[0].floID) { console.warn("sinkID and sinkKey in DB are not pair!"); @@ -259,7 +263,7 @@ function slaveConnect(floID, pubKey, ws) { pubKey: global.myPubKey })); else if (nodeShares === null || //The 1st backup is connected - Object.keys(connectedSlaves).length < Math.pow(shareThreshold, 2) * Object.keys(nodeShares).length) //re-calib shares for better + Object.keys(connectedSlaves).length < Math.pow(SHARE_THRESHOLD, 2) * Object.keys(nodeShares).length) //re-calib shares for better sendSharesToNodes(global.sinkID, generateShares(global.sinkPrivKey)) else if (nodeShares[floID]) sendShare(ws, global.sinkID, nodeShares[floID]); diff --git a/src/backup/slave.js b/src/backup/slave.js index 4f1e56a..d788119 100644 --- a/src/backup/slave.js +++ b/src/backup/slave.js @@ -1,10 +1,12 @@ 'use strict'; -const WAIT_TIME = 10 * 60 * 1000, - BACKUP_INTERVAL = 1 * 60 * 1000, - CHECKSUM_INTERVAL = 15, //times of BACKUP_INTERVAL - SINK_KEY_INDICATOR = '$$$', - HASH_ROW_COUNT = 100; +const { + BACKUP_INTERVAL, + BACKUP_SYNC_TIMEOUT, + CHECKSUM_INTERVAL, + SINK_KEY_INDICATOR, + HASH_N_ROW +} = require("../_constants")["backup"]; var DB; //Container for Database connection var masterWS = null; //Container for Master websocket connection @@ -80,7 +82,7 @@ requestInstance.open = function(ws = null) { //Check if there is an active request if (self.request) { console.log("A request is already active"); - if (self.last_response_time < Date.now() - WAIT_TIME) + if (self.last_response_time < Date.now() - BACKUP_SYNC_TIMEOUT) self.close(); else return; @@ -365,7 +367,7 @@ function verifyHash(hashes) { const getHash = table => new Promise((res, rej) => { DB.query("SHOW COLUMNS FROM " + table).then(result => { let columns = result.map(r => r["Field"]).sort(); - DB.query(`SELECT CEIL(id/${HASH_ROW_COUNT}) as group_id, MD5(GROUP_CONCAT(${columns.map(c => `IFNULL(${c}, "NULL")`).join()})) as hash FROM ${table} GROUP BY group_id ORDER BY group_id`) + DB.query(`SELECT CEIL(id/${HASH_N_ROW}) as group_id, MD5(GROUP_CONCAT(${columns.map(c => `IFNULL(${c}, "NULL")`).join()})) as hash FROM ${table} GROUP BY group_id ORDER BY group_id`) .then(result => res(Object.fromEntries(result.map(r => [r.group_id, r.hash])))) .catch(error => rej(error)) }).catch(error => rej(error)) @@ -389,9 +391,9 @@ function verifyHash(hashes) { if (result[t].status === "fulfilled") { mismatch[tables[t]] = result[t].value; //Data that are incorrect/missing/deleted //Data to be deleted (incorrect data will be added by resync) - let id_end = result[t].value[1].map(i => i * HASH_ROW_COUNT); //eg if i=2 AND H_R_C = 5 then id_end = 2 * 5 = 10 (ie, range 6-10) + let id_end = result[t].value[1].map(i => i * HASH_N_ROW); //eg if i=2 AND H_R_C = 5 then id_end = 2 * 5 = 10 (ie, range 6-10) Promise.allSettled(id_end.map(i => - DB.query(`DELETE FROM ${tables[t]} WHERE id BETWEEN ${i - HASH_ROW_COUNT + 1} AND ${i}`))) //eg, i - HASH_ROW_COUNT + 1 = 10 - 5 + 1 = 6 + DB.query(`DELETE FROM ${tables[t]} WHERE id BETWEEN ${i - HASH_N_ROW + 1} AND ${i}`))) //eg, i - HASH_N_ROW + 1 = 10 - 5 + 1 = 6 .then(_ => null); } else console.error(result[t].reason); @@ -414,7 +416,6 @@ function requestTableChunks(tables, ws) { } module.exports = { - SINK_KEY_INDICATOR, set DB(db) { DB = db; }, diff --git a/src/backup/sync.js b/src/backup/sync.js index c1067f7..7f6f237 100644 --- a/src/backup/sync.js +++ b/src/backup/sync.js @@ -1,5 +1,10 @@ +'use strict'; + +const { + HASH_N_ROW +} = require("../_constants")["backup"]; + var DB; //Container for database -const HASH_ROW_COUNT = 100; //Backup Transfer function sendBackupData(timestamp, checksum, ws) { @@ -115,7 +120,7 @@ function sendTableHash(tables, ws) { const getHash = table => new Promise((res, rej) => { DB.query("SHOW COLUMNS FROM " + table).then(result => { let columns = result.map(r => r["Field"]).sort(); - DB.query(`SELECT CEIL(id/${HASH_ROW_COUNT}) as group_id, MD5(GROUP_CONCAT(${columns.map(c => `IFNULL(${c}, "NULL")`).join()})) as hash FROM ${table} GROUP BY group_id ORDER BY group_id`) + DB.query(`SELECT CEIL(id/${HASH_N_ROW}) as group_id, MD5(GROUP_CONCAT(${columns.map(c => `IFNULL(${c}, "NULL")`).join()})) as hash FROM ${table} GROUP BY group_id ORDER BY group_id`) .then(result => res(Object.fromEntries(result.map(r => [r.group_id, r.hash])))) .catch(error => rej(error)) }).catch(error => rej(error)) @@ -162,8 +167,8 @@ function sendTableData(tables, ws) { function tableSync_delete(tables, ws) { let getDelete = (table, group_id) => new Promise((res, rej) => { - let id_end = group_id * HASH_ROW_COUNT, - id_start = id_end - HASH_ROW_COUNT + 1; + let id_end = group_id * HASH_N_ROW, + id_start = id_end - HASH_N_ROW + 1; DB.query("SELECT * FROM _backup WHERE t_name=? AND mode is NULL AND (id BETWEEN ? AND ?)", [table, id_start, id_end]) .then(result => res(result)) .catch(error => rej(error)) @@ -187,8 +192,8 @@ function tableSync_delete(tables, ws) { function tableSync_data(tables, ws) { const sendTable = (table, group_id) => new Promise((res, rej) => { - let id_end = group_id * HASH_ROW_COUNT, - id_start = id_end - HASH_ROW_COUNT + 1; + let id_end = group_id * HASH_N_ROW, + id_start = id_end - HASH_N_ROW + 1; DB.query(`SELECT * FROM ${table} WHERE id BETWEEN ? AND ?`, [id_start, id_end]).then(data => { ws.send(JSON.stringify({ table, @@ -202,8 +207,8 @@ function tableSync_data(tables, ws) { }); }); const getUpdate = (table, group_id) => new Promise((res, rej) => { - let id_end = group_id * HASH_ROW_COUNT, - id_start = id_end - HASH_ROW_COUNT + 1; + let id_end = group_id * HASH_N_ROW, + id_start = id_end - HASH_N_ROW + 1; DB.query("SELECT * FROM _backup WHERE t_name=? AND mode=TRUE AND (id BETWEEN ? AND ?)", [table, id_start, id_end]) .then(result => res(result)) .catch(error => rej(error)) diff --git a/src/market.js b/src/market.js index ec509a5..ec10256 100644 --- a/src/market.js +++ b/src/market.js @@ -1,7 +1,10 @@ 'use strict'; const coupling = require('./coupling'); -const MINIMUM_BUY_REQUIREMENT = 0.1; + +const { + MINIMUM_BUY_REQUIREMENT +} = require('./_constants')["market"]; var DB, assetList; //container for database and allowed assets @@ -213,7 +216,7 @@ function confirmDepositFLO() { confirmDepositFLO.checkTx = function(sender, txid) { return new Promise((resolve, reject) => { - const receiver = global.myFloID; //receiver should be market's floID (ie, adminID) + let receiver = global.myFloID; //receiver should be market's floID (ie, adminID) floBlockchainAPI.getTx(txid).then(tx => { let vin_sender = tx.vin.filter(v => v.addr === sender) if (!vin_sender.length) @@ -345,7 +348,7 @@ function confirmDepositToken() { confirmDepositToken.checkTx = function(sender, txid) { return new Promise((resolve, reject) => { - const receiver = global.sinkID; //receiver should be market's floID (ie, sinkID) + let receiver = global.sinkID; //receiver should be market's floID (ie, sinkID) tokenAPI.getTx(txid).then(tx => { if (tx.parsedFloData.type !== "transfer") return reject([true, "Transaction type not 'transfer'"]); @@ -376,7 +379,7 @@ function withdrawToken(floID, token, amount) { else if ((!assetList.includes(token) && token !== floGlobals.currency) || token === "FLO") return reject(INVALID("Invalid Token")); //Check for FLO balance (transaction fee) - const required_flo = floGlobals.sendAmt + floGlobals.fee; + let required_flo = floGlobals.sendAmt + floGlobals.fee; getAssetBalance.check(floID, "FLO", required_flo).then(_ => { getAssetBalance.check(floID, token, amount).then(_ => { consumeAsset(floID, "FLO", required_flo).then(txQueries => { diff --git a/src/price.js b/src/price.js index 55168ac..b3accf4 100644 --- a/src/price.js +++ b/src/price.js @@ -1,12 +1,14 @@ 'use strict'; -const MIN_TIME = 10 * 1000, // 1 * 60 * 60 * 1000, - DOWN_RATE = 0.2 / 100, - UP_RATE = 0.5 / 100, - MAX_DOWN_PER_DAY = 4.8 / 100, - MAX_UP_PER_DAY = 12 / 100, - TOP_RANGE = 10 / 100, - REC_HISTORY_INTERVAL = 5 * 60 * 1000; // 1 * 60 * 60 * 1000; +const { + MIN_TIME, + DOWN_RATE, + UP_RATE, + MAX_DOWN_PER_DAY, + MAX_UP_PER_DAY, + TOP_RANGE, + REC_HISTORY_INTERVAL +} = require("./_constants")["price"]; var DB; //container for database diff --git a/src/request.js b/src/request.js index 08371df..b64e7b6 100644 --- a/src/request.js +++ b/src/request.js @@ -1,6 +1,12 @@ 'use strict'; const market = require("./market"); + +const { + MAX_SESSION_TIMEOUT, + INVALID_SERVER_MSG +} = require("./_constants")["request"]; + var DB, trustedIDs, secret; //container for database global.INVALID = function(message) { @@ -17,12 +23,7 @@ global.INTERNAL = function INTERNAL(message) { } INTERNAL.e_code = 500; -// creating 24 hours from milliseconds -const oneDay = 1000 * 60 * 60 * 24; -const maxSessionTimeout = 60 * oneDay; - var serving; -const INVALID_SERVER_MSG = "INCORRECT_SERVER_ERROR"; function validateRequestFromFloID(request, sign, floID, proxy = true) { return new Promise((resolve, reject) => { @@ -33,7 +34,7 @@ function validateRequestFromFloID(request, sign, floID, proxy = true) { DB.query("SELECT " + (proxy ? "session_time, proxyKey AS pubKey FROM UserSession" : "pubKey FROM Users") + " WHERE floID=?", [floID]).then(result => { if (result.length < 1) return reject(INVALID(proxy ? "Session not active" : "User not registered")); - if (proxy && result[0].session_time + maxSessionTimeout < Date.now()) + if (proxy && result[0].session_time + MAX_SESSION_TIMEOUT < Date.now()) return reject(INVALID("Session Expired! Re-login required")); let req_str = validateRequest(request, sign, result[0].pubKey); req_str instanceof INVALID ? reject(req_str) : resolve(req_str); @@ -496,7 +497,7 @@ module.exports = { set trustedIDs(ids) { trustedIDs = ids; }, - set assetList(assets){ + set assetList(assets) { market.assetList = assets; }, set DB(db) {