Merge pull request #9 from sairajzero/dev-nodejs

nodejs conversion
This commit is contained in:
Sai Raj 2021-07-22 04:24:53 +05:30 committed by GitHub
commit 53fcbbc84d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 9360 additions and 33023 deletions

View File

@ -1,3 +0,0 @@
SERVER_PWD=<server_password>
BROWSER=firefox
PORT=7130

6
.gitattributes vendored
View File

@ -1,6 +0,0 @@
*.gitattributes linguist-vendored
util/mongoose.c linguist-vendored
util/mongoose.h linguist-vendored
.gitattributes export-ignore
util/* export-ignore

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
node_modules/
package-lock.json
config.json
parameters.json

View File

@ -1,23 +0,0 @@
# SuperNodeStorage
FLO Supernode Storage is a Cloud Storage progam for FLO Dapps
## Installation
1. Download or clone the [repo](https://github.com/ranchimall/SuperNodeStorage):
git clone https://github.com/ranchimall/SuperNodeStorage
2. Add a strong <server_password> in `.config` file
3. Change other configurations (if needed)
4. Host and publish the domain name or IP with port
## Usage
1. Start the app using the following command in terminal. The server WSS will be started and the supernode html-js will be opened in the browser.
./start_supernode.sh
2. (Only for first time login) Enter the <server_password> and <private_key> when prompted
The Supernode storage will automatically start
NOTE: The <server_password> and <private_key> will be stored securedly in IndexedDB of the browser
NOTE: Users may add `start_supernode` to bootup process to automatically start the supernode during boot up

File diff suppressed because it is too large Load Diff

Binary file not shown.

9
config-sample.json Normal file
View File

@ -0,0 +1,9 @@
{
"privateKey": "FLO_Private_Key_of_SuperNode",
"port": "8080",
"sql_user": "mySQL_user",
"sql_pwd": "mySQL_password",
"sql_db": "supernode",
"sql_host": "localhost"
}

2
launch.js Normal file
View File

@ -0,0 +1,2 @@
const start = require('./src/main')
start();

View File

@ -1,2 +0,0 @@
All logs are stored in this directory
NOTE: DO NOT delete this directory

141
src/client.js Normal file
View File

@ -0,0 +1,141 @@
var DB, _list; //container for database and _list (stored n serving)
function processIncomingData(data) {
return new Promise((resolve, reject) => {
try {
data = JSON.parse(data);
} catch (error) {
return reject("Data not in JSON-Format")
}
let curTime = Date.now()
if (!data.time || data.time > curTime + floGlobals.sn_config.delayDelta ||
data.time < curTime - floGlobals.sn_config.delayDelta)
return reject("Invalid Time");
else {
let process;
if (data.request) //Request
process = processRequestFromUser(data.request);
else if (data.message) //Store data
process = processDataFromUser(data);
else if (data) //Tag data
process = processTagFromUser(gid, uid, data);
/*
else if (data.edit)
return processEditFromUser(gid, uid, data);
else if (data.delete)
return processDeleteFromUser(gid, uid, data);
*/
else
return reject("Invalid Data-format")
process.then(result => resolve(result))
.catch(error => reject(error))
}
})
}
function processDataFromUser(data) {
return new Promise((resolve, reject) => {
if (!floCrypto.validateAddr(data.receiverID))
return reject("Invalid receiverID")
let closeNode = kBucket.closestNode(data.receiverID)
if (!_list.serving.includes(closeNode))
return reject("Incorrect Supernode")
if (!floCrypto.validateAddr(data.receiverID))
return reject("Invalid senderID")
if (data.senderID !== floCrypto.getFloID(data.pubKey))
return reject("Invalid pubKey")
let hashcontent = ["receiverID", "time", "application", "type", "message", "comment"]
.map(d => data[d]).join("|")
if (!floCrypto.verifySign(hashcontent, data.sign, data.pubKey))
return reject("Invalid signature")
DB.addData(closeNode, {
vectorClock: `${Date.now()}_${data.senderID}`,
senderID: data.senderID,
receiverID: data.receiverID,
time: data.time,
application: data.application,
type: data.type,
message: data.message,
comment: data.comment,
sign: data.sign,
pubKey: data.pubKey
}).then(result => resolve([result, 'DATA']))
.catch(error => reject(error))
})
}
function processRequestFromUser(request) {
return new Promise((resolve, reject) => {
if (!floCrypto.validateAddr(request.receiverID))
return reject("Invalid receiverID");
let closeNode = kBucket.closestNode(request.receiverID)
if (!_list.serving.includes(closeNode))
return reject("Incorrect Supernode");
DB.searchData(closeNode, request)
.then(result => resolve([result]))
.catch(error => reject(error))
})
}
function processTagFromUser(data) {
return new Promise((resolve, reject) => {
if (!floCrypto.validateAddr(data.receiverID))
return reject("Invalid receiverID")
if (!(data.application in floGlobals.appList))
return reject("Invalid application")
if (!floCrypto.validateAddr(data.requestorID) ||
!floGlobals.appSubAdmins.includes(data.requestorID))
return reject("Invalid requestorID")
if (data.requestorID !== floCrypto.getFloID(data.pubKey))
return reject("Invalid pubKey")
let closeNode = kBucket.closestNode(data.receiverID)
if (!_list.serving.includes(closeNode))
return reject("Incorrect Supernode")
let hashcontent = ["time", "application", "tag"]
.map(d => data[d]).join("|");
if (!floCrypto.verifySign(hashcontent, data.sign, data.pubKey))
return reject("Invalid signature");
let tag = [null, undefined, ""].includes(data.tag) ? null : [data.tag].toString();
DB.tagData(closeNode, data.vectorClock, tag, data.time, data.pubKey, data.sign)
.then(result => resolve([result, 'TAG']))
.catch(error => reject(error))
})
}
function checkIfRequestSatisfy(request, data) {
if (!request || request.mostRecent || request.receiverID !== data.receiverID)
return false;
if (request.atKey && request.atKey !== data.vectorClock)
return false;
if (request.lowerVectorClock && request.lowerVectorClock > data.vectorClock)
return false;
if (request.upperVectorClock && request.upperVectorClock < data.vectorClock)
return false;
if (request.application !== data.application)
return false;
if (request.comment && request.comment !== data.comment)
return false;
if (request.type && request.type !== data.type)
return false;
if (request.senderID) {
if (Array.isArray(request.senderID) && !request.senderID.includes(data.senderID))
return false;
else if (request.senderID !== data.senderID)
return false;
}
return true;
}
module.exports = {
setParameters,
checkIfRequestSatisfy,
processRequestFromUser,
processIncomingData,
set DB(db){
DB = db
},
set _list(list){
_list = list
}
}

369
src/database.js Normal file
View File

@ -0,0 +1,369 @@
'use strict';
var mysql = require('mysql');
const Base_Tables = {
LastTxs: {
ID: "CHAR(34) NOT NULL",
N: "INT NOT NULL",
PRIMARY: "KEY (ID)"
},
Configs: {
NAME: "VARCHAR(64) NOT NULL",
VAL: "VARCHAR(512) NOT NULL",
PRIMARY: "KEY (NAME)"
},
SuperNodes: {
FLO_ID: "CHAR(34) NOT NULL",
PUB_KEY: "CHAR(66) NOT NULL",
URI: "VARCHAR(256) NOT NULL",
PRIMARY: "KEY (FLO_ID)"
},
Applications: {
APP_NAME: "VARCHAR(64) NOT NULL",
ADMIN_ID: "CHAR(34) NOT NULL",
SUB_ADMINS: "VARCHAR(MAX)",
PRIMARY: "KEY (APP_NAME)"
}
}
const H_struct = {
VECTOR_CLOCK: "vectorClock",
SENDER_ID: "senderID",
RECEIVER_ID: "receiverID",
TYPE: "type",
APPLICATION: "application",
TIME: "time",
PUB_KEY: "pubKey"
}
const B_struct = {
MESSAGE: "message",
SIGNATURE: "sign",
COMMENT: "comment"
}
const L_struct = {
STATUS: "status_n",
LOG_TIME: "log_time"
}
const T_struct = {
TAG: "tag",
TAG_TIME: "tag_time",
TAG_KEY: "tag_key",
TAG_SIGN: "tag_sign"
}
function Database(user, password, dbname, host = 'localhost') {
const db = {};
db.query = (s, v) => new Promise((res, rej) => {
const fn = (e, r) => e ? rej(e) : res(r)
v ? db.conn.query(s, v, fn) : db.conn.query(s, fn)
});
db.createBase = function() {
return new Promise((resolve, reject) => {
let statements = []
for (let t in Base_Tables)
statements.push("CREATE TABLE IF NOT EXISTS " + t + "( " +
Object.keys(Base_Tables[t]).map(a => a + " " + Base_Tables[t][a]).join(", ") + " )");
Promise.all(statements.forEach(s => db.query(s)))
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.setLastTx = function(id, n) {
return new Promise((resolve, reject) => {
let statement = "INSERT INTO LastTxs (ID, N) VALUES (?, ?)" +
" ON DUPLICATE KEY UPDATE N=?";
db.query(statement, [id, n, n])
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.setConfig = function(name, value) {
return new Promise((resolve, reject) => {
let statement = "INSERT INTO Configs (NAME, VAL) VALUES (?, ?)" +
" ON DUPLICATE KEY UPDATE VAL=?";
db.query(statement, [name, value, value])
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.addSuperNode = function(id, pubKey, uri) {
let statement = "INSERT INTO SuperNodes (FLO_ID, PUB_KEY, URI) VALUES (?, ?, ?)" +
" ON DUPLICATE KEY UPDATE URI=?";
db.query(statement, [id, pubKey, uri, uri])
.then(result => resolve(result))
.catch(error => reject(error))
}
db.rmSuperNode = function(id) {
let statement = "DELETE FROM SuperNodes" +
" WHERE FLO_ID=?";
db.query(statement, id)
.then(result => resolve(result))
.catch(error => reject(error))
}
db.setSubAdmin = function(appName, subAdmins) {
let statement = "UPDATE Applications" +
" SET SUB_ADMINS=?" +
" WHERE APP_NAME=?";
db.query(statement, [subAdmins.join(","), appName])
.then(result => resolve(result))
.catch(error => reject(error))
}
db.addApp = function(appName, adminID) {
let statement = "INSERT INTO Applications (APP_NAME, ADMIN_ID) VALUES (?, ?)" +
" ON DUPLICATE KEY UPDATE ADMIN_ID=?";
db.query(statement, [appName, adminID, adminID])
.then(result => resolve(result))
.catch(error => reject(error))
}
db.rmApp = function(appName) {
let statement = "DELETE FROM Applications" +
" WHERE APP_NAME=" + appName;
db.query(statement)
.then(result => resolve(result))
.catch(error => reject(error))
}
db.getBase = function() {
return new Promise((resolve, reject) => {
let tables = Object.keys(Base_Tables);
Promise.all(tables.forEach(t => db.query("SELECT * FROM " + t))).then(result => {
let tmp = Object.fromEntries(tables.map((t, i) => [t, result[i]]))
result = {};
result.lastTx = Object.fromEntries(tmp.LastTxs.map(a => [a.ID, a.N]));
result.sn_config = Object.fromEntries(tmp.Configs.map(a => [a.NAME, a.VAL]));
result.appList = Object.fromEntries(tmp.Applications.map(a => [a.APP_NAME, a.ADMIN_ID]));
result.appSubAdmins = Object.fromEntries(tmp.Applications.map(a => [a.APP_NAME, a.SUB_ADMINS.split(",")]))
result.supernodes = Object.fromEntries(tmp.SuperNodes.map(a.FLO_ID, {
pubKey: a.PUB_KEY,
uri: a.URI
}))
resolve(result)
}).catch(error => reject(error))
})
}
db.createTable = function(snID) {
return new Promise((resolve, reject) => {
let statement = "CREATE TABLE IF NOT EXISTS _" + snID + " ( " +
H_struct.VECTOR_CLOCK + " VARCHAR(50) NOT NULL, " +
H_struct.SENDER_ID + " CHAR(34) NOT NULL, " +
H_struct.RECEIVER_ID + " CHAR(34) NOT NULL, " +
H_struct.APPLICATION + " VARCHAR(128) NOT NULL, " +
H_struct.TYPE + " VARCHAR(1024), " +
B_struct.MESSAGE + " TEXT NOT NULL, " +
B_struct.TIME + " INT NOT NULL, " +
B_struct.SIGNATURE + " VARCHAR(160) NOT NULL, " +
B_struct.PUB_KEY + " CHAR(66) NOT NULL, " +
B_struct.COMMENT + " VARCHAR(1024), " +
L_struct.STATUS + " INT NOT NULL, " +
L_struct.LOG_TIME + " INT NOT NULL, " +
T_struct.TAG + " VARCHAR (1024), " +
T_struct.TAG_TIME + " INT, " +
T_struct.TAG_KEY + " CHAR(66), " +
T_struct.TAG_SIGN + " VARCHAR(160), " +
"PRIMARY KEY (" + H_struct.VECTOR_CLOCK + ")" +
" )";
db.query(statement)
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.dropTable = function(snID) {
return new Promise((resolve, reject) => {
let statement = "DROP TABLE _" + snID;
db.query(statement)
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.addData = function(snID, data) {
return new Promise((resolve, reject) => {
let attr = Object.keys(H_struct).map(a => H_struct[a]);
let values = attr.map(a => data[a]);
let statement = "INSERT INTO _" + snID +
" (" + attr.join(", ") + ", " + L_struct.STATUS + ", " + L_struct.LOG_TIME + ") " +
"VALUES (" + attr.map(a => '?').join(", ") + ", 1, " + Date.now() + ")";
data = Object.fromEntries(attr.map((a, i) => [
[a, values[i]]
]));
db.query(statement, values)
.then(result => resolve(data))
.catch(error => reject(error))
})
}
db.tagData = function(snID, vectorClock, tag, tagTime, tagKey, tagSign) {
return new Promise((resolve, reject) => {
let data = {
[T_struct.TAG]: tag,
[T_struct.TAG_TIME]: tagTime,
[T_struct.TAG_KEY]: tagKey,
[T_struct.TAG_SIGN]: tagSign,
[L_struct.LOG_TIME]: Date.now()
}
let attr = Object.keys(data);
let values = attr.map(a => data[a]).concat(vectorClock);
data[H_struct.VECTOR_CLOCK] = vectorClock; //also add vectorClock to resolve data
let statement = "UPDATE _" + snID +
" SET " + attr.map(a => a + "=?").join(", ") +
" WHERE " + H_struct.VECTOR_CLOCK + "=?";
db.query(statement, values)
.then(result => resolve(data))
.catch(error => reject(error))
})
}
db.searchData = function(snID, request) {
return new Promise((resolve, reject) => {
let conditionArr = [];
if (request.lowerVectorClock || request.upperVectorClock || request.atKey) {
if (request.lowerVectorClock && request.upperVectorClock)
conditionArr.push(`${H_struct.VECTOR_CLOCK} BETWEEN '${request.lowerVectorClock}' AND '${request.upperVectorClock}'`);
else if (request.atKey)
conditionArr.push(`${H_struct.VECTOR_CLOCK} = '${request.atKey}'`)
else if (request.lowerVectorClock)
conditionArr.push(`${H_struct.VECTOR_CLOCK} >= '${request.lowerVectorClock}'`)
else if (request.upperVectorClock)
conditionArr.push(`${H_struct.VECTOR_CLOCK} <= '${request.upperVectorClock}'`)
}
conditionArr.push(`${H_struct.APPLICATION} = '${request.application}'`);
conditionArr.push(`${H_struct.RECEIVER_ID} = '${request.receiverID}'`)
if (request.comment)
conditionArr.push(`${B_struct.COMMENT} = '${request.comment}'`)
if (request.type)
conditionArr.push(`${H_struct.TYPE} = '${request.type}'`)
if (request.senderID) {
if (Array.isArray(request.senderID))
conditionArr.push(`${H_struct.SENDER_ID} IN ('${request.senderID.join("', '")}')`);
else
conditionArr.push(`${H_struct.SENDER_ID} = '${request.senderID}'`)
}
//console.log(conditionArr);
let attr = Object.keys(H_struct).concat(Object.keys(B_struct))
let statement = "SELECT (" + attr.join(", ") + ")" +
" FROM _" + snID +
" WHERE " + conditionArr.join(" AND ") +
request.mostRecent ? "LIMIT 1" : (" ORDER BY " + H_struct.VECTOR_CLOCK);
db.query(statement)
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.lastLogTime = function(snID) {
return new Promise((resolve, reject) => {
let statement = "SELECT MAX(" + L_struct.LOG_TIME + ") FROM _" + snID;
db.query(statement)
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.createGetLastLog = function(snID) {
return new Promise((resolve, reject) => {
db.createTable(snID).then(result => {
db.lastLogTime(snID)
.then(result => resolve(result))
.catch(error => reject(error))
}).catch(error => reject(error))
})
}
db.getData = function(snID, logtime) {
return new Promise((resolve, reject) => {
let statement = "SELECT * FROM _" + snID +
" WHERE " + L_struct.LOG_TIME + ">=" + logtime +
" ORDER BY " + L_struct.LOG_TIME;
db.query(statement)
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.storeData = function(snID, data) {
return new Promise((resolve, reject) => {
let u_attr = Object.keys(B_struct).map(a => B_struct[a])
let attr = Object.keys(H_struct).map(a => H_struct[a]).concat(u_attr);
let values = attr.map(a => data[a]);
let u_values = u_attr.map(a => data[a]);
let statement = "INSERT INTO _" + snID +
" (" + attr.join(", ") + ", " + L_struct.STATUS + ", " + L_struct.LOG_TIME + ") " +
"VALUES (" + attr.map(a => '?').join(", ") + ", 1, " + Date.now() + ") " +
"ON DUPLICATE KEY UPDATE " + u_attr.map(a => a + "=?").join(", ");
db.query(statement, values.concat(u_values))
.then(result => resolve(data))
.catch(error => reject(error))
})
}
db.storeTag = function(snID, data) {
return new Promise((resolve, reject) => {
let attr = Object.keys(T_struct).map(a => T_struct[a]).concat(L_struct.LOG_TIME);
let values = attr.map(a => data[a]);
let statement = "UPDATE _" + snID +
" SET " + attr.map(a => a + "=?").join(", ") +
" WHERE " + H_struct.VECTOR_CLOCK + "=" + data[H_struct.VECTOR_CLOCK];
db.query(statement, values)
.then(result => resolve(data))
.catch(error => reject(error))
})
}
db.clearAuthorisedAppData = function(snID, app, adminID, subAdmins, timestamp) {
return new Promise((resolve, reject) => {
let statement = "DELETE FROM _" + snID +
" WHERE ( " + H_struct.TIME + "<? AND " +
H_struct.APPLICATION + "=? AND " +
T_struct.TAG + " IS NULL ) AND ( " +
H_struct.RECEIVER_ID + " != ? OR " +
H_struct.SENDER_ID + " NOT IN (" + subAdmins.map(a => "?").join(", ") + ") )";
db.query(statement, [timestamp, app].concat(subAdmins).push(adminID))
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.clearUnauthorisedAppData = function(snID, appList, timestamp) {
return new Promise((resolve, reject) => {
let statement = "DELETE FROM _" + snID +
"WHERE " + H_struct.TIME + "<? AND " +
H_struct.APPLICATION + " NOT IN (" + appList.map(a => "?").join(", ") + ")";
db.query(statement, [timestamp].concat(appList))
.then(result => resolve(result))
.catch(error => reject(error))
})
}
db.close = function() {
db.conn.end();
}
return new Promise((resolve, reject) => {
let conn = mysql.createConnection({
host: host,
user: user,
password: password,
database: dbname
});
conn.connect((err) => {
if (err) return reject(err);
db.conn = conn;
resolve(db)
});
})
}
module.exports = Database;

436
src/floBlockchainAPI.js Normal file
View File

@ -0,0 +1,436 @@
'use strict';
/* FLO Blockchain Operator to send/receive data from blockchain using API calls*/
(function(GLOBAL){
const floBlockchainAPI = GLOBAL.floBlockchainAPI = {
util: {
serverList: floGlobals.apiURL[floGlobals.blockchain].slice(0),
curPos: floCrypto.randInt(0, floGlobals.apiURL[floGlobals.blockchain].length - 1),
fetch_retry: function(apicall) {
return new Promise((resolve, reject) => {
this.serverList.splice(this.curPos, 1);
this.curPos = floCrypto.randInt(0, this.serverList.length - 1)
this.fetch_api(apicall)
.then(result => resolve(result))
.catch(error => reject(error));
})
},
fetch_api: function(apicall) {
return new Promise((resolve, reject) => {
if (this.serverList.length === 0)
reject("No floSight server working")
else {
fetch(this.serverList[this.curPos] + apicall).then(response => {
if (response.ok)
response.json().then(data => resolve(data));
else {
this.fetch_retry(apicall)
.then(result => resolve(result))
.catch(error => reject(error));
}
}).catch(error => {
this.fetch_retry(apicall)
.then(result => resolve(result))
.catch(error => reject(error));
})
}
})
}
},
//Promised function to get data from API
promisedAPI: function(apicall) {
return new Promise((resolve, reject) => {
console.log(apicall)
this.util.fetch_api(apicall)
.then(result => resolve(result))
.catch(error => reject(error));
});
},
//Get balance for the given Address
getBalance: function(addr) {
return new Promise((resolve, reject) => {
this.promisedAPI(`api/addr/${addr}/balance`)
.then(balance => resolve(parseFloat(balance)))
.catch(error => reject(error));
});
},
//Write Data into blockchain
writeData: function(senderAddr, data, privKey, receiverAddr = floGlobals.adminID) {
return new Promise((resolve, reject) => {
if (typeof data != "string")
data = JSON.stringify(data);
this.sendTx(senderAddr, receiverAddr, floGlobals.sendAmt, privKey, data)
.then(txid => resolve(txid))
.catch(error => reject(error))
});
},
//Send Tx to blockchain
sendTx: function(senderAddr, receiverAddr, sendAmt, privKey, floData = '') {
return new Promise((resolve, reject) => {
if (!floCrypto.validateAddr(senderAddr))
reject(`Invalid address : ${senderAddr}`);
else if (!floCrypto.validateAddr(receiverAddr))
reject(`Invalid address : ${receiverAddr}`);
if (privKey.length < 1 || !floCrypto.verifyPrivKey(privKey, senderAddr))
reject("Invalid Private key!");
else if (typeof sendAmt !== 'number' || sendAmt <= 0)
reject(`Invalid sendAmt : ${sendAmt}`);
else {
var trx = bitjs.transaction();
var utxoAmt = 0.0;
var fee = floGlobals.fee;
this.promisedAPI(`api/addr/${senderAddr}/utxo`).then(utxos => {
for (var i = utxos.length - 1;
(i >= 0) && (utxoAmt < sendAmt + fee); i--) {
if (utxos[i].confirmations) {
trx.addinput(utxos[i].txid, utxos[i].vout, utxos[i]
.scriptPubKey)
utxoAmt += utxos[i].amount;
} else break;
}
if (utxoAmt < sendAmt + fee)
reject("Insufficient balance!");
else {
trx.addoutput(receiverAddr, sendAmt);
var change = utxoAmt - sendAmt - fee;
if (change > 0)
trx.addoutput(senderAddr, change);
trx.addflodata(floData.replace(/\n/g, ' '));
var signedTxHash = trx.sign(privKey, 1);
this.broadcastTx(signedTxHash)
.then(txid => resolve(txid))
.catch(error => reject(error))
}
}).catch(error => reject(error))
}
});
},
//merge all UTXOs of a given floID into a single UTXO
mergeUTXOs: function(floID, privKey, floData = '') {
return new Promise((resolve, reject) => {
if (!floCrypto.validateAddr(floID))
return reject(`Invalid floID`);
if (!floCrypto.verifyPrivKey(privKey, floID))
return reject("Invalid Private Key")
var trx = bitjs.transaction();
var utxoAmt = 0.0;
var fee = floGlobals.fee;
this.promisedAPI(`api/addr/${floID}/utxo`).then(utxos => {
for (var i = utxos.length - 1; i >= 0; i--) {
if (utxos[i].confirmations) {
trx.addinput(utxos[i].txid, utxos[i].vout, utxos[i]
.scriptPubKey)
utxoAmt += utxos[i].amount;
}
}
trx.addoutput(floID, utxoAmt - fee);
trx.addflodata(floData.replace(/\n/g, ' '));
var signedTxHash = trx.sign(privKey, 1);
this.broadcastTx(signedTxHash)
.then(txid => resolve(txid))
.catch(error => reject(error))
}).catch(error => reject(error))
})
},
/**Write data into blockchain from (and/or) to multiple floID
* @param {Array} senderPrivKeys List of sender private-keys
* @param {string} data FLO data of the txn
* @param {Array} receivers List of receivers
* @param {boolean} preserveRatio (optional) preserve ratio or equal contribution
* @return {Promise}
*/
writeDataMultiple: function(senderPrivKeys, data, receivers = [floGlobals.adminID], preserveRatio = true) {
return new Promise((resolve, reject) => {
if (!Array.isArray(senderPrivKeys))
return reject("Invalid senderPrivKeys: SenderPrivKeys must be Array")
if (!preserveRatio) {
let tmp = {};
let amount = (floGlobals.sendAmt * receivers.length) / senderPrivKeys.length;
senderPrivKeys.forEach(key => tmp[key] = amount);
senderPrivKeys = tmp
}
if (!Array.isArray(receivers))
return reject("Invalid receivers: Receivers must be Array")
else {
let tmp = {};
let amount = floGlobals.sendAmt;
receivers.forEach(floID => tmp[floID] = amount);
receivers = tmp
}
if (typeof data != "string")
data = JSON.stringify(data);
this.sendTxMultiple(senderPrivKeys, receivers, data)
.then(txid => resolve(txid))
.catch(error => reject(error))
})
},
/**Send Tx from (and/or) to multiple floID
* @param {Array or Object} senderPrivKeys List of sender private-key (optional: with coins to be sent)
* @param {Object} receivers List of receivers with respective amount to be sent
* @param {string} floData FLO data of the txn
* @return {Promise}
*/
sendTxMultiple: function(senderPrivKeys, receivers, floData = '') {
return new Promise((resolve, reject) => {
let senders = {},
preserveRatio;
//check for argument validations
try {
let invalids = {
InvalidSenderPrivKeys: [],
InvalidSenderAmountFor: [],
InvalidReceiverIDs: [],
InvalidReceiveAmountFor: []
}
let inputVal = 0,
outputVal = 0;
//Validate sender privatekeys (and send amount if passed)
//conversion when only privateKeys are passed (preserveRatio mode)
if (Array.isArray(senderPrivKeys)) {
senderPrivKeys.forEach(key => {
try {
if (!key)
invalids.InvalidSenderPrivKeys.push(key);
else {
let floID = floCrypto.getFloID(key);
senders[floID] = {
wif: key
}
}
} catch (error) {
invalids.InvalidSenderPrivKeys.push(key)
}
})
preserveRatio = true;
}
//conversion when privatekeys are passed with send amount
else {
for (let key in senderPrivKeys) {
try {
if (!key)
invalids.InvalidSenderPrivKeys.push(key);
else {
if (typeof senderPrivKeys[key] !== 'number' || senderPrivKeys[
key] <= 0)
invalids.InvalidSenderAmountFor.push(key)
else
inputVal += senderPrivKeys[key];
let floID = floCrypto.getFloID(key);
senders[floID] = {
wif: key,
coins: senderPrivKeys[key]
}
}
} catch (error) {
invalids.InvalidSenderPrivKeys.push(key)
}
}
preserveRatio = false;
}
//Validate the receiver IDs and receive amount
for (let floID in receivers) {
if (!floCrypto.validateAddr(floID))
invalids.InvalidReceiverIDs.push(floID)
if (typeof receivers[floID] !== 'number' || receivers[floID] <= 0)
invalids.InvalidReceiveAmountFor.push(floID)
else
outputVal += receivers[floID];
}
//Reject if any invalids are found
for (let i in invalids)
if (!invalids[i].length)
delete invalids[i];
if (Object.keys(invalids).length)
return reject(invalids);
//Reject if given inputVal and outputVal are not equal
if (!preserveRatio && inputVal != outputVal)
return reject(
`Input Amount (${inputVal}) not equal to Output Amount (${outputVal})`)
} catch (error) {
return reject(error)
}
//Get balance of senders
let promises = []
for (let floID in senders)
promises.push(this.getBalance(floID))
Promise.all(promises).then(results => {
let totalBalance = 0,
totalFee = floGlobals.fee,
balance = {};
//Divide fee among sender if not for preserveRatio
if (!preserveRatio)
var dividedFee = totalFee / Object.keys(senders).length;
//Check if balance of each sender is sufficient enough
let insufficient = [];
for (let floID in senders) {
balance[floID] = parseFloat(results.shift());
if (isNaN(balance[floID]) || (preserveRatio && balance[floID] <=
totalFee) || (!preserveRatio && balance[floID] < senders[floID]
.coins + dividedFee))
insufficient.push(floID)
totalBalance += balance[floID];
}
if (insufficient.length)
return reject({
InsufficientBalance: insufficient
})
//Calculate totalSentAmount and check if totalBalance is sufficient
let totalSendAmt = totalFee;
for (floID in receivers)
totalSendAmt += receivers[floID];
if (totalBalance < totalSendAmt)
return reject("Insufficient total Balance")
//Get the UTXOs of the senders
let promises = []
for (floID in senders)
promises.push(this.promisedAPI(`api/addr/${floID}/utxo`))
Promise.all(promises).then(results => {
let wifSeq = [];
var trx = bitjs.transaction();
for (floID in senders) {
let utxos = results.shift();
let sendAmt;
if (preserveRatio) {
let ratio = (balance[floID] / totalBalance);
sendAmt = totalSendAmt * ratio;
} else
sendAmt = senders[floID].coins + dividedFee;
let wif = senders[floID].wif;
let utxoAmt = 0.0;
for (let i = utxos.length - 1;
(i >= 0) && (utxoAmt < sendAmt); i--) {
if (utxos[i].confirmations) {
trx.addinput(utxos[i].txid, utxos[i].vout, utxos[i]
.scriptPubKey)
wifSeq.push(wif);
utxoAmt += utxos[i].amount;
}
}
if (utxoAmt < sendAmt)
return reject("Insufficient balance:" + floID);
let change = (utxoAmt - sendAmt);
if (change > 0)
trx.addoutput(floID, change);
}
for (floID in receivers)
trx.addoutput(floID, receivers[floID]);
trx.addflodata(floData.replace(/\n/g, ' '));
for (let i = 0; i < wifSeq.length; i++)
trx.signinput(i, wifSeq[i], 1);
var signedTxHash = trx.serialize();
this.broadcastTx(signedTxHash)
.then(txid => resolve(txid))
.catch(error => reject(error))
}).catch(error => reject(error))
}).catch(error => reject(error))
})
},
//Broadcast signed Tx in blockchain using API
broadcastTx: function(signedTxHash) {
return new Promise((resolve, reject) => {
var request = new XMLHttpRequest();
var url = this.util.serverList[this.util.curPos] + 'api/tx/send';
console.log(url)
if (signedTxHash.length < 1)
reject("Empty Signature");
else {
var params = `{"rawtx":"${signedTxHash}"}`;
request.open('POST', url, true);
//Send the proper header information along with the request
request.setRequestHeader('Content-type', 'application/json');
request.onload = function() {
if (request.readyState == 4 && request.status == 200) {
console.log(request.response);
resolve(JSON.parse(request.response).txid.result);
} else
reject(request.responseText);
}
request.send(params);
}
})
},
//Read Txs of Address between from and to
readTxs: function(addr, from, to) {
return new Promise((resolve, reject) => {
this.promisedAPI(`api/addrs/${addr}/txs?from=${from}&to=${to}`)
.then(response => resolve(response))
.catch(error => reject(error))
});
},
//Read All Txs of Address (newest first)
readAllTxs: function(addr) {
return new Promise((resolve, reject) => {
this.promisedAPI(`api/addrs/${addr}/txs?from=0&to=1`).then(response => {
this.promisedAPI(`api/addrs/${addr}/txs?from=0&to=${response.totalItems}0`)
.then(response => resolve(response.items))
.catch(error => reject(error));
}).catch(error => reject(error))
});
},
/*Read flo Data from txs of given Address
options can be used to filter data
limit : maximum number of filtered data (default = 1000, negative = no limit)
ignoreOld : ignore old txs (default = 0)
sentOnly : filters only sent data
pattern : filters data that starts with a pattern
contains : filters data that contains a string
filter : custom filter funtion for floData (eg . filter: d => {return d[0] == '$'})
*/
readData: function(addr, options = {}) {
options.limit = options.limit | 0
options.ignoreOld = options.ignoreOld | 0
return new Promise((resolve, reject) => {
this.promisedAPI(`api/addrs/${addr}/txs?from=0&to=1`).then(response => {
var newItems = response.totalItems - options.ignoreOld;
this.promisedAPI(`api/addrs/${addr}/txs?from=0&to=${newItems*2}`).then(
response => {
if (options.limit <= 0)
options.limit = response.items.length;
var filteredData = [];
for (i = 0; i < (response.totalItems - options.ignoreOld) &&
filteredData.length < options.limit; i++) {
if (options.sentOnly && response.items[i].vin[0].addr !==
addr)
continue;
if (options.pattern) {
try {
let jsonContent = JSON.parse(response.items[i]
.floData)
if (!Object.keys(jsonContent).includes(options
.pattern))
continue;
} catch (error) {
continue;
}
}
if (options.filter && !options.filter(response.items[i].floData))
continue;
filteredData.push(response.items[i].floData);
}
resolve({
totalTxs: response.totalItems,
data: filteredData
});
}).catch(error => {
reject(error);
});
}).catch(error => {
reject(error);
});
});
}
}
})(typeof global !== "undefined" ? global : window)

274
src/floCrypto.js Normal file
View File

@ -0,0 +1,274 @@
'use strict';
(function(GLOBAL) {
var floCrypto = GLOBAL.floCrypto = {}
const p = BigInteger("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16);
const ecparams = EllipticCurve.getSECCurveByName("secp256k1");
function exponent1() {
return p.add(BigInteger.ONE).divide(BigInteger("4"))
}
function calculateY(x) {
let exp = exponent1();
// x is x value of public key in BigInteger format without 02 or 03 or 04 prefix
return x.modPow(BigInteger("3"), p).add(BigInteger("7")).mod(p).modPow(exp, p)
}
function getUncompressedPublicKey(compressedPublicKey) {
// Fetch x from compressedPublicKey
let pubKeyBytes = Crypto.util.hexToBytes(compressedPublicKey);
const prefix = pubKeyBytes.shift() // remove prefix
let prefix_modulus = prefix % 2;
pubKeyBytes.unshift(0) // add prefix 0
let x = new BigInteger(pubKeyBytes)
let xDecimalValue = x.toString()
// Fetch y
let y = calculateY(x);
let yDecimalValue = y.toString();
// verify y value
let resultBigInt = y.mod(BigInteger("2"));
let check = resultBigInt.toString() % 2;
if (prefix_modulus !== check)
yDecimalValue = y.negate().mod(p).toString();
return {
x: xDecimalValue,
y: yDecimalValue
};
}
function getSenderPublicKeyString() {
privateKey = ellipticCurveEncryption.senderRandom();
senderPublicKeyString = ellipticCurveEncryption.senderPublicString(privateKey);
return {
privateKey: privateKey,
senderPublicKeyString: senderPublicKeyString
}
}
function deriveSharedKeySender(receiverCompressedPublicKey, senderPrivateKey) {
try {
let receiverPublicKeyString = getUncompressedPublicKey(receiverCompressedPublicKey);
var senderDerivedKey = ellipticCurveEncryption.senderSharedKeyDerivation(
receiverPublicKeyString.x, receiverPublicKeyString.y, senderPrivateKey);
return senderDerivedKey;
} catch (error) {
return new Error(error);
}
}
function deriveReceiverSharedKey(senderPublicKeyString, receiverPrivateKey) {
return ellipticCurveEncryption.receiverSharedKeyDerivation(
senderPublicKeyString.XValuePublicString,
senderPublicKeyString.YValuePublicString, receiverPrivateKey);
}
function getReceiverPublicKeyString(privateKey) {
return ellipticCurveEncryption.receiverPublicString(privateKey);
}
function wifToDecimal(pk_wif, isPubKeyCompressed = false) {
let pk = Bitcoin.Base58.decode(pk_wif)
pk.shift()
pk.splice(-4, 4)
//If the private key corresponded to a compressed public key, also drop the last byte (it should be 0x01).
if (isPubKeyCompressed == true) pk.pop()
pk.unshift(0)
privateKeyDecimal = BigInteger(pk).toString()
privateKeyHex = Crypto.util.bytesToHex(pk)
return {
privateKeyDecimal: privateKeyDecimal,
privateKeyHex: privateKeyHex
}
}
//generate a random Interger within range
floCrypto.randInt = function(min, max) {
min = Math.ceil(min);
max = Math.floor(max);
return Math.floor(Math.random() * (max - min + 1)) + min;
}
//generate a random String within length (options : alphaNumeric chars only)
floCrypto.randString = function(length, alphaNumeric = true) {
var result = '';
if (alphaNumeric)
var characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
else
var characters =
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_+-./*?@#&$<>=[]{}():';
for (var i = 0; i < length; i++)
result += characters.charAt(Math.floor(Math.random() * characters.length));
return result;
}
//Encrypt Data using public-key
floCrypto.encryptData = function(data, publicKeyHex) {
var senderECKeyData = getSenderPublicKeyString();
var senderDerivedKey = deriveSharedKeySender(
publicKeyHex, senderECKeyData.privateKey);
let senderKey = senderDerivedKey.XValue + senderDerivedKey.YValue;
let secret = Crypto.AES.encrypt(data, senderKey);
return {
secret: secret,
senderPublicKeyString: senderECKeyData.senderPublicKeyString
};
}
//Decrypt Data using private-key
floCrypto.decryptData = function(data, privateKeyHex) {
var receiverECKeyData = {};
if (typeof privateKeyHex !== "string") throw new Error("No private key found.");
let privateKey = wifToDecimal(privateKeyHex, true);
if (typeof privateKey.privateKeyDecimal !== "string") throw new Error(
"Failed to detremine your private key.");
receiverECKeyData.privateKey = privateKey.privateKeyDecimal;
var receiverDerivedKey = deriveReceiverSharedKey(
data.senderPublicKeyString, receiverECKeyData.privateKey);
let receiverKey = receiverDerivedKey.XValue + receiverDerivedKey.YValue;
let decryptMsg = Crypto.AES.decrypt(data.secret, receiverKey);
return decryptMsg;
}
//Sign data using private-key
floCrypto.signData = function(data, privateKeyHex) {
var key = new Bitcoin.ECKey(privateKeyHex);
if(key.priv === null)
return false;
key.setCompressed(true);
//var privateKeyArr = key.getBitcoinPrivateKeyByteArray();
//var privateKey = BigInteger.fromByteArrayUnsigned(privateKeyArr);
var messageHash = Crypto.SHA256(data);
var messageHashBigInteger = new BigInteger(messageHash);
var messageSign = Bitcoin.ECDSA.sign(messageHashBigInteger, key.priv);
var sighex = Crypto.util.bytesToHex(messageSign);
return sighex;
}
//Verify signatue of the data using public-key
floCrypto.verifySign = function(data, signatureHex, publicKeyHex) {
var msgHash = Crypto.SHA256(data);
var messageHashBigInteger = new BigInteger(msgHash);
var sigBytes = Crypto.util.hexToBytes(signatureHex);
var signature = Bitcoin.ECDSA.parseSig(sigBytes);
var publicKeyPoint = ecparams.getCurve().decodePointHex(publicKeyHex);
var verify = Bitcoin.ECDSA.verifyRaw(messageHashBigInteger,
signature.r, signature.s, publicKeyPoint);
return verify;
}
//Generates a new flo ID and returns private-key, public-key and floID
floCrypto.generateNewID = function() {
try {
var key = new Bitcoin.ECKey(false);
key.setCompressed(true);
return {
floID: key.getBitcoinAddress(),
pubKey: key.getPubKeyHex(),
privKey: key.getBitcoinWalletImportFormat()
}
} catch (e) {
console.error(e);
}
}
//Returns public-key from private-key
floCrypto.getPubKeyHex = function(privateKeyHex) {
if (!privateKeyHex)
return null;
var key = new Bitcoin.ECKey(privateKeyHex);
if (key.priv == null)
return null;
key.setCompressed(true);
return key.getPubKeyHex();
}
//Returns flo-ID from public-key or private-key
floCrypto.getFloID = function(keyHex) {
if (!keyHex)
return null;
try {
var key = new Bitcoin.ECKey(keyHex);
if (key.priv == null)
key.setPub(keyHex);
return key.getBitcoinAddress();
} catch (e) {
return null;
}
}
//Verify the private-key for the given public-key or flo-ID
floCrypto.verifyPrivKey = function(privateKeyHex, publicHex_ID) {
if (!privateKeyHex || !publicHex_ID)
return false;
try {
var key = new Bitcoin.ECKey(privateKeyHex);
if (key.priv == null)
return false;
key.setCompressed(true);
if (publicHex_ID === key.getBitcoinAddress())
return true;
else if (publicHex_ID === key.getPubKeyHex())
return true;
else
return false;
} catch (e) {
console.error(e);
}
}
//Check if the given Address is valid or not
floCrypto.validateAddr = function(inpAddr) {
if (!inpAddr)
return false;
try {
var addr = new Bitcoin.Address(inpAddr);
return true;
} catch {
return false;
}
}
//Split the str using shamir's Secret and Returns the shares
floCrypto.createShamirsSecretShares = function(str, total_shares, threshold_limit) {
try {
if (str.length > 0) {
var strHex = shamirSecretShare.str2hex(str);
return shamirSecretShare.share(strHex, total_shares, threshold_limit);
}
return false;
} catch {
return false
}
}
//Verifies the shares and str
floCrypto.verifyShamirsSecret = function(sharesArray, str) {
if(str == false)
return false;
try {
if (sharesArray.length > 0) {
var comb = shamirSecretShare.combine(sharesArray.slice(0, sharesArray.length));
return (shamirSecretShare.hex2str(comb) === str ? true : false)
}
return false;
} catch {
return false;
}
}
//Returns the retrived secret by combining the shamirs shares
floCrypto.retrieveShamirSecret = function(sharesArray) {
try {
if (sharesArray.length > 0) {
var comb = shamirSecretShare.combine(sharesArray.slice(0, sharesArray.length));
return shamirSecretShare.hex2str(comb);
}
return false;
} catch {
return false;
}
}
})(typeof global !== "undefined" ? global : window)

30
src/floGlobals.js Normal file
View File

@ -0,0 +1,30 @@
const floGlobals = {
//Required for all
blockchain: "FLO",
//Required for blockchain API operators
apiURL: {
FLO: ['https://explorer.mediciland.com/', 'https://livenet.flocha.in/', 'https://flosight.duckdns.org/', 'http://livenet-explorer.floexperiments.com'],
FLO_TEST: ['https://testnet-flosight.duckdns.org', 'https://testnet.flocha.in/']
},
SNStorageID: "FNaN9McoBAEFUjkRmNQRYLmBF8SpS7Tgfk",
//sendAmt: 0.001,
//fee: 0.0005,
//Required for Supernode operations
supernodes: {}, //each supnernode must be stored as floID : {uri:<uri>,pubKey:<publicKey>}
appList: {},
appSubAdmins: {},
sn_config: {}
/* List of supernode configurations (all blockchain controlled by SNStorageID)
backupDepth - (Interger) Number of backup nodes
refreshDelay - (Interger) Count of requests for triggering read-blockchain and autodelete
deleteDelay - (Interger) Maximum duration (milliseconds) an unauthorised data is stored
errorFeedback - (Boolean) Send error (if any) feedback to the requestor
delayDelta - (Interger) Maximum allowed delay from the data-time
*/
}
(typeof global !== "undefined" ? global : window).cryptocoin = floGlobals.blockchain;
('object' === typeof module) ? module.export = floGlobals : null;

700
src/intra.js Normal file
View File

@ -0,0 +1,700 @@
'use strict';
const WebSocket = require('ws');
//CONSTANTS
const SUPERNODE_INDICATOR = '$',
//Message type
ORDER_BACKUP = "orderBackup",
STORE_BACKUP_DATA = "backupData",
STORE_MIGRATED_DATA = "migratedData",
DELETE_MIGRATED_DATA = "migratedDelete",
//DELETE_BACKUP_DATA = "backupDelete",
TAG_BACKUP_DATA = "backupTag",
//EDIT_BACKUP_DATA = "backupEdit",
INITIATE_REFRESH = "initiateRefresh",
DATA_REQUEST = "dataRequest",
DATA_SYNC = "dataSync",
BACKUP_HANDSHAKE_INIT = "handshakeInitate",
BACKUP_HANDSHAKE_END = "handshakeEnd",
RETRY_TIMEOUT = 5 * 60 * 1000, //5 mins
MIGRATE_WAIT_DELAY = 5 * 60 * 1000; //5 mins
var DB, refresher //container for database and refresher
//List of node backups stored
const _list = {};
Object.defineProperty(_list, 'delete', {
value: function(id) {
delete this[id];
}
});
Object.defineProperty(_list, 'get', {
value: function(keys = null) {
if (keys === null) keys = Object.keys(this);
if (Array.isArray(keys))
return Object.fromEntries(keys.map(k => [k, this[k]]));
else
return this[keys];
}
})
Object.defineProperty(_list, 'stored', {
get: function() {
return Object.keys(this);
}
});
Object.defineProperty(_list, 'serving', {
get: function() {
let serveList = []
for (let id in this)
if (this[id] === 0)
serveList.push(id);
return serveList;
}
});
//Node container
function NodeContainer() {
var _ws, _id, _onmessage, _onclose;
Object.defineProperty(this, 'set', {
value: function(id, ws) {
if (_ws !== undefined)
this.close();
_id = id;
_ws = ws;
if (_onmessage)
_ws.onmessage = _onmessage;
if (_onclose)
_ws.onclose = _onclose;
}
})
Object.defineProperty(this, 'id', {
get: function() {
return _id;
}
})
Object.defineProperty(this, 'readyState', {
get: function() {
if (_ws instanceof WebSocket)
return _ws.readyState;
else
return null;
}
})
Object.defineProperty(this, 'send', {
value: function(packet) {
_ws.send(packet);
}
});
Object.defineProperty(this, 'onmessage', {
set: function(fn) {
if (fn instanceof Function)
_onmessage = fn;
}
})
Object.defineProperty(this, 'onclose', {
set: function(fn) {
if (fn instanceof Function)
_onclose = fn;
}
})
Object.defineProperty(this, 'is', {
value: function(ws) {
return ws === _ws;
}
})
Object.defineProperty(this, 'close', {
value: function() {
if (_ws.readyState === 1) {
_ws.onclose = () => console.warn('Closing: ' + _id)
_ws.close();
}
_ws = _id = undefined;
}
})
}
//Container for next-node
const _nextNode = new NodeContainer();
_nextNode.onmessage = evt => processTaskFromNextNode(evt.data);
_nextNode.onclose = evt => reconnectNextNode();
//Container for prev-node
const _prevNode = new NodeContainer();
_prevNode.onmessage = evt => processTaskFromPrevNode(evt.data);
_prevNode.onclose = evt => _prevNode.close();
//Packet processing
const packet_ = {}
packet_.constuct = function(message) {
const packet = {
from: myFloID,
message: message,
time: Date.now()
}
packet.sign = floCrypto.signData(this.s(packet), myPrivKey);
return SUPERNODE_INDICATOR + JSON.stringify(packet)
}
packet_.s = d => [JSON.stringify(d.message), d.time].join("|");
packet_.parse = function(str) {
let packet = JSON.parse(str.substring(SUPERNODE_INDICATOR.length))
let curTime = Date.now();
if (packet.time > curTime - floGlobals.sn_config.delayDelta &&
packet.from in floGlobals.supernodes &&
floCrypto.verifySign(this.s(packet), packet.sign, floGlobals.supernodes[packet.from].pubKey)) {
if (!Array.isArray(packet.message))
packet.message = [packet.message];
return packet;
}
}
//-----NODE CONNECTORS (WEBSOCKET)-----
//Connect to Node websocket
function connectToNode(snID) {
return new Promise((resolve, reject) => {
if (!(snID in floGlobals.supernodes))
return reject(`${snID} is not a supernode`)
const ws = new WebSocket("wss://" + floGlobals.supernodes[nextNodeID].uri + "/");
ws.on("error", () => reject(`${snID} is offline`));
ws.on('open', () => resolve(ws));
})
}
//Connect to Node websocket thats online
function connectToActiveNode(snID, reverse = false) {
return new Promise((resolve, reject) => {
if (!(snID in floGlobals.supernodes))
return reject(`${snID} is not a supernode`)
if (snID === myFloID)
return reject(`Reached end of circle. Next node avaiable is self`)
connectToNode(snID)
.then(ws => resolve(ws))
.catch(error => {
var next = reverse ? kBucket.prevNode(snID) : kBucket.nextNode(snID)
connectToActiveNode(next, reverse)
.then(ws => resolve(ws))
.catch(error => reject(error))
})
})
}
//Connect to next available node
function connectToNextNode() {
return new Promise((resolve, reject) => {
let nextNodeID = kBucket.nextNode(nodeID);
connectToActiveNode(nextNodeID).then(ws => {
_nextNode.set(nextNodeID, ws)
_nextNode.send(packet_.constuct({
type: BACKUP_HANDSHAKE_INIT
}));
resolve("BACKUP_HANDSHAKE_INIT: " + nextNodeID)
}).catch(error => reject(error))
})
}
function connectToAliveNodes(nodes = null) {
if (!Array.isArray(nodes)) nodes = Object.keys(floGlobals.supernodes);
return new Promise((resolve, reject) => {
Promise.allSettled(nodes.map(n => connectToNode(n))).then(results => {
let ws_connections = {};
nodes.forEach((n, i) =>
ws_connections[n] = (results.status === "fulfilled") ? results[i].value : null);
resolve(ws_connections);
}).catch(error => reject(error))
})
}
//Connect to all given nodes [Array] (Default: All super-nodes)
function connectToAllActiveNodes(nodes = null) {
if (!Array.isArray(nodes)) nodes = Object.keys(floGlobals.supernodes);
return new Promise((resolve, reject) => {
Promise.allSettled(nodes.map(n => connectToActiveNode(n))).then(results => {
let ws_connections = {};
nodes.forEach((n, i) =>
ws_connections[n] = (results.status === "fulfilled") ? results[i].value : null);
resolve(ws_connections);
}).catch(error => reject(error))
})
}
//-----PROCESS TASKS-----
//Tasks from next-node
function processTaskFromNextNode(packet) {
var {
from,
message
} = packet_.parse(packet)
if (message) {
message.forEach(task => {
switch (task.type) {
case RECONNECT_NEXT_NODE: //Triggered when a node inbetween is available
reconnectNextNode()
break;
case BACKUP_HANDSHAKE_END:
handshakeEnd()
break;
case DATA_REQUEST:
sendStoredData(task.nodes, _nextNode)
break;
case DATA_SYNC:
dataSyncIndication(task.id, task.status, from)
break;
case STORE_BACKUP_DATA:
storeBackupData(task.data)
break;
default:
console.log("Invalid task type:" + task.type + "from next-node")
}
})
}
}
//Tasks from prev-node
function processTaskFromPrevNode(packet) {
var {
from,
message
} = packet_.parse(packet)
if (message) {
message.forEach(task => {
switch (task.type) {
case ORDER_BACKUP:
orderBackup(task.order)
break;
case STORE_BACKUP_DATA:
storeBackupData(task.data, from, packet)
break;
case TAG_BACKUP_DATA:
tagBackupData(task.data, from, packet)
break;
case DATA_REQUEST:
sendStoredData(task.nodes, _prevNode)
break;
case DATA_SYNC:
dataSyncIndication(task.id, task.status, from)
break;
case DELETE_MIGRATED_DATA:
deleteMigratedData(task.data, from, packet)
break;
default:
console.log("Invalid task type:" + task.type + "from prev-node")
}
});
}
}
//Tasks from any supernode
function processTaskFromSupernode(packet, ws) {
var {
from,
message
} = packet_.parse(packet)
if (message) {
message.forEach(task => {
switch (task.type) {
case BACKUP_HANDSHAKE_INIT:
handshakeMid(from, ws)
break;
case STORE_MIGRATED_DATA:
storeMigratedData(task.data)
break;
case INITIATE_REFRESH:
initiateRefresh()
break;
default:
console.log("Invalid task type:" + task.type + "from super-node")
}
});
}
}
//-----HANDSHAKE PROCESS-----
//Acknowledge handshake
function handshakeMid(id, ws) {
if (_prevNode.id && _prevNode.id in floGlobals.supernodes) {
if (kBucket.innerNodes(_prevNode.id, myFloID).includes(id)) {
//close existing prev-node connection
_prevNode.send(packet_.constuct({
type: RECONNECT_NEXT_NODE
}));
_prevNode.close();
//set the new prev-node connection
_prevNode.set(id, ws);
_prevNode.send(packet_.constuct({
type: BACKUP_HANDSHAKE_END
}))
} else {
//Incorrect order, existing prev-node is already after the incoming node
ws.send(packet_.constuct({
type: RECONNECT_NEXT_NODE
}))
return;
}
} else {
//set the new prev-node connection
_prevNode.set(id, ws);
_prevNode.send(packet_.constuct({
type: BACKUP_HANDSHAKE_END
}))
}
//Reorder storelist
let nodes = kBucket.innerNodes(_prevNode.id, myFloID).concat(myFloID),
req_sync = [],
new_order = [];
nodes.forEach(n => {
switch (_list[n]) {
case 0: //Do nothing
break;
case undefined:
req_sync.push(n);
default:
_list[n] = 0;
new_order.push(n);
}
});
if (!req_sync.length && !new_order.length)
return; //No order change and no need for any data sync
else
handshakeMid.requestData(req_sync, new_order);
}
handshakeMid.requestData = function(req_sync, new_order) {
if (handshakeMid.timeout) {
clearTimeout(handshakeMid.timeout)
delete handshakeMid.timeout;
}
Promise.allSettled(req_sync.map(n => DB.createGetLastLog(n))).then(result => {
let tasks = [],
lastlogs = {},
failed = [],
order = [],
failed_order = [];
req_sync.forEach((s, i) => {
if (result[i].status === "fulfilled")
lastlogs[s] = result[i].value;
else
failed.push(s)
});
if (Object.keys(lastlogs).length)
tasks.push({
type: DATA_REQUEST,
nodes: lastlogs
});
new_order.forEach(n => {
if (failed.includes(n))
failed_order.push(n)
else
order.push(n)
});
if (order.length)
tasks.push({
type: ORDER_BACKUP,
order: _list.get(order)
})
_nextNode.send(packet_.constuct(tasks));
if (failed.length)
handshakeMid.timeout = setTimeout(_ => handshakeMid.requestData(failed, failed_order), RETRY_TIMEOUT)
});
}
//Complete handshake
function handshakeEnd() {
console.log("Backup connected: " + _nextNode.id)
_nextNode.send(packet_.constuct({
type: ORDER_BACKUP,
order: _list.get()
}))
}
//Reconnect to next available node
function reconnectNextNode() {
_nextNode.close();
connectToNextNode()
.then(result => console.log(result))
.catch(error => {
//Case: No other node is online
console.error(error);
//Serve all nodes
for (let sn in floGlobals.supernodes)
DB.createTable(sn)
.then(result => _list[sn] = 0)
.catch(error => console.error(error))
})
}
//-----BACKUP TASKS-----
//Order the stored backup
function orderBackup(order) {
let new_order = [];
let cur_serve = kBucket.innerNodes(_prevNode.id, myFloID);
for (let n in order) {
if (order[n] + 1 !== _list[n]) {
if (order[n] >= floGlobals.sn_config.backupDepth)
DB.dropTable(n).then(_ => null)
.catch(error => console.error(error))
.finally(_ => _list.delete(n))
else if (_list[n] !== 0 || !cur_serve.includes(n)) {
_list[n] = order[n] + 1;
new_order.push(n);
}
}
}
if (new_order.length) {
_nextNode.send(packet_.constuct({
type: ORDER_BACKUP,
order: _list.get(new_order)
}));
}
}
//Send stored data
function sendStoredData(lastlogs, node) {
for (n in lastlogs) {
if (_list.stored.includes(n)) {
DB.getData(n, lastlogs[n]).then(result => {
node.send(packet_.constuct({
type: DATA_SYNC,
id: n,
status: true
}))
console.info(`START: ${snID} data sync(send) to ${node.id}`)
//TODO: efficiently handle large number of data instead of loading all into memory
result.forEach(d => node.send(packet_.constuct({
type: STORE_BACKUP_DATA,
data: d
})))
console.info(`END: ${snID} data sync(send) to ${node.id}`)
node.send(packet_.constuct({
type: DATA_SYNC,
id: n,
status: false
}))
}).catch(error => console.error(error))
}
}
}
//Indicate sync of data
function dataSyncIndication(snID, status, from) {
console.info(`${status ? 'START':'END'}: ${snID} data sync(receive) form ${from}`);
}
//Store (backup) data
function storeBackupData(data, from, packet) {
let closestNode = kBucket.closestNode(data.receiverID);
if (_list.stored.includes(closestNode)) {
DB.storeData(closestNode, data);
if (_list[closestNode] < floGlobals.sn_config.backupDepth &&
_nextNode.id !== from)
_nextNode.send(packet);
}
}
//Tag (backup) data
function tagBackupData(data, from, packet) {
let closestNode = kBucket.closestNode(data.receiverID);
if (_list.stored.includes(closestNode)) {
DB.storeTag(closestNode, data);
if (_list[closestNode] < floGlobals.sn_config.backupDepth &&
_nextNode.id !== from)
_nextNode.send(packet);
}
}
//Store (migrated) data
function storeMigratedData(data) {
let closestNode = kBucket.closestNode(data.receiverID);
if (_list.serving.includes(closestNode)) {
DB.storeData(closestNode, data);
_nextNode.send(packet_.constuct({
type: STORE_BACKUP_DATA,
data: data
}));
}
}
//Delete (migrated) data
function deleteMigratedData(old_sn, vectorClock, receiverID, from, packet) {
let closestNode = kBucket.closestNode(receiverID);
if (old_sn !== closestNode && _list.stored.includes(old_sn)) {
DB.deleteData(old_sn, vectorClock);
if (_list[old_sn] < floGlobals.sn_config.backupDepth &&
_nextNode.id !== from)
_nextNode.send(packet);
}
}
function initiateRefresh() {
refresher.invoke(false)
}
//Forward incoming to next node
function forwardToNextNode(mode, data) {
var modeMap = {
'TAG': TAG_BACKUP_DATA,
'DATA': STORE_BACKUP_DATA
}
if (mode in modeMap && _nextNode.id)
_nextNode.send(packet_.constuct({
type: modeMap[mode],
data: data
}));
}
//Data migration processor
function dataMigration(node_change, flag) {
if (!Object.keys(node_change))
return;
console.log("Node list changed! Data migration required");
if (flag) dataMigration.intimateAllNodes(); //Initmate All nodes to call refresher
let new_nodes = [],
del_nodes = [];
for (let n in node_change)
(node_change[n] ? new_nodes : del_nodes).push(n);
if (del_nodes.includes(_prevNode.id)) {
_list[_prevNode.id] = 0; //Temporary serve for the deleted node
_prevNode.close();
}
setTimeout(() => {
//reconnect next node if current next node is deleted
if (del_nodes.includes(_nextNode.id))
reconnectNextNode();
else { //reconnect next node if there are newly added nodes in between self and current next node
let innerNodes = kBucket.innerNodes(myFloID, _nextNode.id)
if (new_nodes.filter(n => innerNodes.includes(n)).length)
reconnectNextNode();
}
setTimeout(() => {
dataMigration.process_new(new_nodes);
dataMigration.process_del(del_nodes);
}, MIGRATE_WAIT_DELAY);
}, MIGRATE_WAIT_DELAY)
}
//data migration sub-process: Deleted nodes
dataMigration.process_del = async function(del_nodes) {
if (!del_nodes.length)
return;
let process_nodes = del_nodes.filter(n => _list.serving.includes(n))
if (process_nodes.length) {
connectToAllActiveNodes().then(ws_connections => {
let remaining = process_nodes.length;
process_nodes.forEach(n => {
DB.getData(n, 0).then(result => {
console.info(`START: Data migration for ${n}`);
//TODO: efficiently handle large number of data instead of loading all into memory
result.forEach(d => {
let closest = kBucket.closestNode(d.receiverID);
if (_list.serving.includes(closest)) {
DB.storeData(closest, d);
_nextNode.send(packet_.constuct({
type: STORE_BACKUP_DATA,
data: d
}));
} else
ws_connections[closest].send(packet_.constuct({
type: STORE_MIGRATED_DATA,
data: d
}));
})
console.info(`END: Data migration for ${n}`);
_list.delete(n);
DB.dropTable(n);
remaining--;
}).catch(error => reject(error))
});
const interval = setInterval(() => {
if (remaining <= 0) {
for (let c in ws_connections)
if (ws_connections[c])
ws_connections[c].close()
clearInterval(interval);
}
}, RETRY_TIMEOUT);
}).catch(error => reject(error))
}
del_nodes.forEach(n => {
if (!process_nodes.includes(n) && _list.stored.includes(n)) {
_list.delete(n);
DB.dropTable(n);
}
})
}
//data migration sub-process: Added nodes
dataMigration.process_new = async function(new_nodes) {
if (!new_nodes.length)
return;
connectToAllActiveNodes(new_nodes).then(ws_connections => {
let process_nodes = _list.serving,
remaining = process_nodes.length;
process_nodes.forEach(n => {
DB.getData(n, 0).then(result => {
//TODO: efficiently handle large number of data instead of loading all into memory
result.forEach(d => {
let closest = kBucket.closestNode(d.receiverID);
if (new_nodes.includes(closest)) {
if (_list.serving.includes(closest)) {
DB.storeData(closest, d);
_nextNode.send(packet_.constuct({
type: STORE_BACKUP_DATA,
data: d
}));
} else
ws_connections[closest].send(packet_.constuct({
type: STORE_MIGRATED_DATA,
data: d
}));
_nextNode.send(packet_.constuct({
type: DELETE_MIGRATED_DATA,
vectorClock: d.vectorClock,
receiverID: d.receiverID,
snID: n
}));
}
})
remaining--;
}).catch(error => reject(error))
});
const interval = setInterval(() => {
if (remaining <= 0) {
for (let c in ws_connections)
if (ws_connections[c])
ws_connections[c].close()
clearInterval(interval);
}
}, RETRY_TIMEOUT);
}).catch(error => reject(error))
}
dataMigration.intimateAllNodes = function() {
connectToAliveNodes().then(ws_connections => {
let packet = packet_.constuct({
type: INITIATE_REFRESH
})
for (let n in ws_connections)
if (ws_connections[n]) {
ws_connections[n].send(packet)
ws_connections[n].close()
}
}).catch(error => reject(error))
}
//-----EXPORTS-----
module.exports = {
processTaskFromSupernode,
setBlockchainParameters,
forwardToNextNode,
dataMigration,
SUPERNODE_INDICATOR,
_list,
set DB(db) {
DB = db
},
set refresher(r) {
refresher = r
}
}

111
src/kBucket.js Normal file
View File

@ -0,0 +1,111 @@
'use strict';
require('./lib/BuildKBucket')
(function(GLOBAL) {
var kBucket = GLOBAL.kBucket = {}
var SNKB, SNCO;
function decodeID(floID) {
let k = bitjs.Base58.decode(floID)
k.shift()
k.splice(-4, 4)
const decodedId = Crypto.util.bytesToHex(k);
const nodeIdBigInt = new BigInteger(decodedId, 16);
const nodeIdBytes = nodeIdBigInt.toByteArrayUnsigned();
const nodeIdNewInt8Array = new Uint8Array(nodeIdBytes);
return nodeIdNewInt8Array;
}
function distanceOf(floID) {
let decodedId = decodeID(floID);
return SNKB.distance(SNKB.localNodeId, decodedId);
}
function constructKB(list, refID) {
let KB = new BuildKBucket({
localNodeId: decodeID(refID)
});
list.forEach(id => KB.add({
id: decodeID(id),
floID: floID
}));
return KB;
}
kBucket.launch = function() {
return new Promise((resolve, reject) => {
try {
let superNodeList = Object.keys(floGlobals.supernodes);
let masterID = floGlobals.SNStorageID;
SNKB = constructKB(superNodeList, masterID);
SNCO = superNodeList.map(sn => [distanceOf(sn), sn])
.sort((a, b) => a[0] - b[0])
.map(a => a[1])
resolve('SuperNode KBucket formed');
} catch (error) {
reject(error);
}
});
}
kBucket.innerNodes = function(id1, id2) {
if (!SNCO.includes(id1) || !SNCO.includes(id2))
throw Error('Given nodes are not supernode');
let iNodes = []
for (let i = SNCO.indexOf(id1) + 1; SNCO[i] != id2; i++) {
if (i < SNCO.length)
iNodes.push(SNCO[i])
else i = -1
}
return iNodes
}
kBucket.outterNodes = function(id1, id2) {
if (!SNCO.includes(id1) || !SNCO.includes(id2))
throw Error('Given nodes are not supernode');
let oNodes = []
for (let i = SNCO.indexOf(id2) + 1; SNCO[i] != id1; i++) {
if (i < SNCO.length)
oNodes.push(SNCO[i])
else i = -1
}
return oNodes
}
kBucket.prevNode = function(id, N = 1) {
let n = N || SNCO.length;
if (!SNCO.includes(id))
throw Error('Given node is not supernode');
let pNodes = []
for (let i = 0, j = SNCO.indexOf(id) - 1; i < n; j--) {
if (j == SNCO.indexOf(id))
break;
else if (j > -1)
pNodes[i++] = SNCO[j]
else j = SNCO.length
}
return (N == 1 ? pNodes[0] : pNodes)
}
kBucket.nextNode = function(id, N = 1) {
let n = N || SNCO.length;
if (!SNCO.includes(id))
throw Error('Given node is not supernode');
let nNodes = []
for (let i = 0, j = SNCO.indexOf(id) + 1; i < n; j++) {
if (j == SNCO.indexOf(id))
break;
else if (j < SNCO.length)
nNodes[i++] = SNCO[j]
else j = -1
}
return (N == 1 ? nNodes[0] : nNodes)
}
kBucket.closestNode = function(id, N = 1) {
let decodedId = decodeID(id);
let n = N || SNCO.length;
let cNodes = SNKB.closest(decodedId, n)
.map(k => k.floID)
return (N == 1 ? cNodes[0] : cNodes)
}
})(typeof global !== "undefined" ? global : window)

6578
src/lib.js Normal file

File diff suppressed because it is too large Load Diff

405
src/lib/BuildKBucket.js Normal file
View File

@ -0,0 +1,405 @@
'use strict';
/*Kademlia DHT K-bucket implementation as a binary tree.*/
(function(GLOBAL) {
/**
* Implementation of a Kademlia DHT k-bucket used for storing
* contact (peer node) information.
*
* @extends EventEmitter
*/
GLOBAL.BuildKBucket = function BuildKBucket(options = {}) {
/**
* `options`:
* `distance`: Function
* `function (firstId, secondId) { return distance }` An optional
* `distance` function that gets two `id` Uint8Arrays
* and return distance (as number) between them.
* `arbiter`: Function (Default: vectorClock arbiter)
* `function (incumbent, candidate) { return contact; }` An optional
* `arbiter` function that givent two `contact` objects with the same `id`
* returns the desired object to be used for updating the k-bucket. For
* more details, see [arbiter function](#arbiter-function).
* `localNodeId`: Uint8Array An optional Uint8Array representing the local node id.
* If not provided, a local node id will be created via `randomBytes(20)`.
* `metadata`: Object (Default: {}) Optional satellite data to include
* with the k-bucket. `metadata` property is guaranteed not be altered by,
* it is provided as an explicit container for users of k-bucket to store
* implementation-specific data.
* `numberOfNodesPerKBucket`: Integer (Default: 20) The number of nodes
* that a k-bucket can contain before being full or split.
* `numberOfNodesToPing`: Integer (Default: 3) The number of nodes to
* ping when a bucket that should not be split becomes full. KBucket will
* emit a `ping` event that contains `numberOfNodesToPing` nodes that have
* not been contacted the longest.
*
* @param {Object=} options optional
*/
this.localNodeId = options.localNodeId || window.crypto.getRandomValues(new Uint8Array(20))
this.numberOfNodesPerKBucket = options.numberOfNodesPerKBucket || 20
this.numberOfNodesToPing = options.numberOfNodesToPing || 3
this.distance = options.distance || this.distance
// use an arbiter from options or vectorClock arbiter by default
this.arbiter = options.arbiter || this.arbiter
this.metadata = Object.assign({}, options.metadata)
this.createNode = function() {
return {
contacts: [],
dontSplit: false,
left: null,
right: null
}
}
this.ensureInt8 = function(name, val) {
if (!(val instanceof Uint8Array)) {
throw new TypeError(name + ' is not a Uint8Array')
}
}
/**
* @param {Uint8Array} array1
* @param {Uint8Array} array2
* @return {Boolean}
*/
this.arrayEquals = function(array1, array2) {
if (array1 === array2) {
return true
}
if (array1.length !== array2.length) {
return false
}
for (let i = 0, length = array1.length; i < length; ++i) {
if (array1[i] !== array2[i]) {
return false
}
}
return true
}
this.ensureInt8('option.localNodeId as parameter 1', this.localNodeId)
this.root = this.createNode()
/**
* Default arbiter function for contacts with the same id. Uses
* contact.vectorClock to select which contact to update the k-bucket with.
* Contact with larger vectorClock field will be selected. If vectorClock is
* the same, candidat will be selected.
*
* @param {Object} incumbent Contact currently stored in the k-bucket.
* @param {Object} candidate Contact being added to the k-bucket.
* @return {Object} Contact to updated the k-bucket with.
*/
this.arbiter = function(incumbent, candidate) {
return incumbent.vectorClock > candidate.vectorClock ? incumbent : candidate
}
/**
* Default distance function. Finds the XOR
* distance between firstId and secondId.
*
* @param {Uint8Array} firstId Uint8Array containing first id.
* @param {Uint8Array} secondId Uint8Array containing second id.
* @return {Number} Integer The XOR distance between firstId
* and secondId.
*/
this.distance = function(firstId, secondId) {
let distance = 0
let i = 0
const min = Math.min(firstId.length, secondId.length)
const max = Math.max(firstId.length, secondId.length)
for (; i < min; ++i) {
distance = distance * 256 + (firstId[i] ^ secondId[i])
}
for (; i < max; ++i) distance = distance * 256 + 255
return distance
}
/**
* Adds a contact to the k-bucket.
*
* @param {Object} contact the contact object to add
*/
this.add = function(contact) {
this.ensureInt8('contact.id', (contact || {}).id)
let bitIndex = 0
let node = this.root
while (node.contacts === null) {
// this is not a leaf node but an inner node with 'low' and 'high'
// branches; we will check the appropriate bit of the identifier and
// delegate to the appropriate node for further processing
node = this._determineNode(node, contact.id, bitIndex++)
}
// check if the contact already exists
const index = this._indexOf(node, contact.id)
if (index >= 0) {
this._update(node, index, contact)
return this
}
if (node.contacts.length < this.numberOfNodesPerKBucket) {
node.contacts.push(contact)
return this
}
// the bucket is full
if (node.dontSplit) {
// we are not allowed to split the bucket
// we need to ping the first this.numberOfNodesToPing
// in order to determine if they are alive
// only if one of the pinged nodes does not respond, can the new contact
// be added (this prevents DoS flodding with new invalid contacts)
return this
}
this._split(node, bitIndex)
return this.add(contact)
}
/**
* Get the n closest contacts to the provided node id. "Closest" here means:
* closest according to the XOR metric of the contact node id.
*
* @param {Uint8Array} id Contact node id
* @param {Number=} n Integer (Default: Infinity) The maximum number of
* closest contacts to return
* @return {Array} Array Maximum of n closest contacts to the node id
*/
this.closest = function(id, n = Infinity) {
this.ensureInt8('id', id)
if ((!Number.isInteger(n) && n !== Infinity) || n <= 0) {
throw new TypeError('n is not positive number')
}
let contacts = []
for (let nodes = [this.root], bitIndex = 0; nodes.length > 0 && contacts.length < n;) {
const node = nodes.pop()
if (node.contacts === null) {
const detNode = this._determineNode(node, id, bitIndex++)
nodes.push(node.left === detNode ? node.right : node.left)
nodes.push(detNode)
} else {
contacts = contacts.concat(node.contacts)
}
}
return contacts
.map(a => [this.distance(a.id, id), a])
.sort((a, b) => a[0] - b[0])
.slice(0, n)
.map(a => a[1])
}
/**
* Counts the total number of contacts in the tree.
*
* @return {Number} The number of contacts held in the tree
*/
this.count = function() {
// return this.toArray().length
let count = 0
for (const nodes = [this.root]; nodes.length > 0;) {
const node = nodes.pop()
if (node.contacts === null) nodes.push(node.right, node.left)
else count += node.contacts.length
}
return count
}
/**
* Determines whether the id at the bitIndex is 0 or 1.
* Return left leaf if `id` at `bitIndex` is 0, right leaf otherwise
*
* @param {Object} node internal object that has 2 leafs: left and right
* @param {Uint8Array} id Id to compare localNodeId with.
* @param {Number} bitIndex Integer (Default: 0) The bit index to which bit
* to check in the id Uint8Array.
* @return {Object} left leaf if id at bitIndex is 0, right leaf otherwise.
*/
this._determineNode = function(node, id, bitIndex) {
// *NOTE* remember that id is a Uint8Array and has granularity of
// bytes (8 bits), whereas the bitIndex is the bit index (not byte)
// id's that are too short are put in low bucket (1 byte = 8 bits)
// (bitIndex >> 3) finds how many bytes the bitIndex describes
// bitIndex % 8 checks if we have extra bits beyond byte multiples
// if number of bytes is <= no. of bytes described by bitIndex and there
// are extra bits to consider, this means id has less bits than what
// bitIndex describes, id therefore is too short, and will be put in low
// bucket
const bytesDescribedByBitIndex = bitIndex >> 3
const bitIndexWithinByte = bitIndex % 8
if ((id.length <= bytesDescribedByBitIndex) && (bitIndexWithinByte !== 0)) {
return node.left
}
const byteUnderConsideration = id[bytesDescribedByBitIndex]
// byteUnderConsideration is an integer from 0 to 255 represented by 8 bits
// where 255 is 11111111 and 0 is 00000000
// in order to find out whether the bit at bitIndexWithinByte is set
// we construct (1 << (7 - bitIndexWithinByte)) which will consist
// of all bits being 0, with only one bit set to 1
// for example, if bitIndexWithinByte is 3, we will construct 00010000 by
// (1 << (7 - 3)) -> (1 << 4) -> 16
if (byteUnderConsideration & (1 << (7 - bitIndexWithinByte))) {
return node.right
}
return node.left
}
/**
* Get a contact by its exact ID.
* If this is a leaf, loop through the bucket contents and return the correct
* contact if we have it or null if not. If this is an inner node, determine
* which branch of the tree to traverse and repeat.
*
* @param {Uint8Array} id The ID of the contact to fetch.
* @return {Object|Null} The contact if available, otherwise null
*/
this.get = function(id) {
this.ensureInt8('id', id)
let bitIndex = 0
let node = this.root
while (node.contacts === null) {
node = this._determineNode(node, id, bitIndex++)
}
// index of uses contact id for matching
const index = this._indexOf(node, id)
return index >= 0 ? node.contacts[index] : null
}
/**
* Returns the index of the contact with provided
* id if it exists, returns -1 otherwise.
*
* @param {Object} node internal object that has 2 leafs: left and right
* @param {Uint8Array} id Contact node id.
* @return {Number} Integer Index of contact with provided id if it
* exists, -1 otherwise.
*/
this._indexOf = function(node, id) {
for (let i = 0; i < node.contacts.length; ++i) {
if (this.arrayEquals(node.contacts[i].id, id)) return i
}
return -1
}
/**
* Removes contact with the provided id.
*
* @param {Uint8Array} id The ID of the contact to remove.
* @return {Object} The k-bucket itself.
*/
this.remove = function(id) {
this.ensureInt8('the id as parameter 1', id)
let bitIndex = 0
let node = this.root
while (node.contacts === null) {
node = this._determineNode(node, id, bitIndex++)
}
const index = this._indexOf(node, id)
if (index >= 0) {
const contact = node.contacts.splice(index, 1)[0]
}
return this
}
/**
* Splits the node, redistributes contacts to the new nodes, and marks the
* node that was split as an inner node of the binary tree of nodes by
* setting this.root.contacts = null
*
* @param {Object} node node for splitting
* @param {Number} bitIndex the bitIndex to which byte to check in the
* Uint8Array for navigating the binary tree
*/
this._split = function(node, bitIndex) {
node.left = this.createNode()
node.right = this.createNode()
// redistribute existing contacts amongst the two newly created nodes
for (const contact of node.contacts) {
this._determineNode(node, contact.id, bitIndex).contacts.push(contact)
}
node.contacts = null // mark as inner tree node
// don't split the "far away" node
// we check where the local node would end up and mark the other one as
// "dontSplit" (i.e. "far away")
const detNode = this._determineNode(node, this.localNodeId, bitIndex)
const otherNode = node.left === detNode ? node.right : node.left
otherNode.dontSplit = true
}
/**
* Returns all the contacts contained in the tree as an array.
* If this is a leaf, return a copy of the bucket. `slice` is used so that we
* don't accidentally leak an internal reference out that might be
* accidentally misused. If this is not a leaf, return the union of the low
* and high branches (themselves also as arrays).
*
* @return {Array} All of the contacts in the tree, as an array
*/
this.toArray = function() {
let result = []
for (const nodes = [this.root]; nodes.length > 0;) {
const node = nodes.pop()
if (node.contacts === null) nodes.push(node.right, node.left)
else result = result.concat(node.contacts)
}
return result
}
/**
* Updates the contact selected by the arbiter.
* If the selection is our old contact and the candidate is some new contact
* then the new contact is abandoned (not added).
* If the selection is our old contact and the candidate is our old contact
* then we are refreshing the contact and it is marked as most recently
* contacted (by being moved to the right/end of the bucket array).
* If the selection is our new contact, the old contact is removed and the new
* contact is marked as most recently contacted.
*
* @param {Object} node internal object that has 2 leafs: left and right
* @param {Number} index the index in the bucket where contact exists
* (index has already been computed in a previous
* calculation)
* @param {Object} contact The contact object to update.
*/
this._update = function(node, index, contact) {
// sanity check
if (!this.arrayEquals(node.contacts[index].id, contact.id)) {
throw new Error('wrong index for _update')
}
const incumbent = node.contacts[index]
const selection = this.arbiter(incumbent, contact)
// if the selection is our old contact and the candidate is some new
// contact, then there is nothing to do
if (selection === incumbent && incumbent !== contact) return
node.contacts.splice(index, 1) // remove old contact
node.contacts.push(selection) // add more recent contact version
}
}
})(typeof global !== "undefined" ? global : window)

218
src/main.js Normal file
View File

@ -0,0 +1,218 @@
const config = require("../config.json")
global.floGlobals = require("./floGlobals")
require('./set_globals')
require('./lib')
require('./kBucket')
require('./floCrypto')
require('./floBlockchainAPI')
const Database = require("./database")
const intra = require('./intra')
const client = require('./client')
const Server = require('./server')
var DB; //Container for Database object
function startNode() {
//Set myPrivKey, myPubKey, myFloID
global.myPrivKey = config["privateKey"]
global.myPubKey = floCrypto.getPubKeyHex(config["privateKey"])
global.myFloID = floCrypto.getFloID(config["privateKey"])
//DB connect
Database(config["sql_user"], config["sql_pwd"], config["sql_db"], config["sql_host"]).then(db => {
DB = db;
//Set DB to client and intra scripts
intra.DB = DB;
client.DB = DB;
client._list = intra._list;
loadBase().then(base => {
//Set base data from DB to floGlobals
floGlobals.supernodes = base.supernodes;
floGlobals.sn_config = base.sn_config;
floGlobals.appList = base.appList;
floGlobals.appSubAdmins = base.appSubAdmins;
refreshData.base = base;
refreshData.invoke();
//Start Server
const server = new Server(config["port"], client, intra);
server.refresher = refreshData;
intra.refresher = refreshData;
}).catch(error => reject(error))
}).catch(error => reject(error))
}
function loadBase(DB) {
return new Promise((resolve, reject) => {
DB.createBase().then(result => {
DB.getBase(DB)
.then(result => resolve(result))
.catch(error => reject(error))
}).catch(error => reject(error))
})
}
const refreshData = {
count: null,
base: null,
invoke(flag = true) {
this.count = floGlobals.sn_config.refreshDelay;
refreshBlockchainData(this.base, flag).then(result => {
console.log(result)
diskCleanUp(this.base)
.then(result => console.info(result))
.catch(warn => console.warn(warn))
}).catch(error => console.error(error))
},
get countdown() {
this.count--;
if (this.count <= 0)
this.invoke();
}
}
function refreshBlockchainData(base, flag) {
return new Promise((resolve, reject) => {
readSupernodeConfigFromAPI(base, flag).then(result => {
console.log(result)
kBucket.launch().then(result => {
console.log(result)
readAppSubAdminListFromAPI(base)
.then(result => console.log(result))
.catch(warn => console.warn(warn))
.finally(_ => resolve("Refreshed Data from blockchain"))
}).catch(error => reject(error))
}).catch(error => reject(error))
})
}
function readSupernodeConfigFromAPI(base, flag) {
return new Promise((resolve, reject) => {
floBlockchainAPI.readData(floGlobals.SNStorageID, {
ignoreOld: base.lastTx[floGlobals.SNStorageID],
sentOnly: true,
pattern: "SuperNodeStorage"
}).then(result => {
let promises = [],
node_change = {}
result.data.reverse().forEach(data => {
var content = JSON.parse(data).SuperNodeStorage;
if (content.removeNodes)
for (let sn of content.removeNodes) {
promises.push(DB.rmSuperNode(sn));
delete base.supernodes[sn];
if (node_change[sn] === true)
delete node_change[sn];
else
node_change[sn] = false;
}
if (content.newNodes)
for (let sn in content.newNodes) {
promises.push(DB.addSuperNode(sn, content.newNodes[sn].pubKey, content.newNodes[sn].uri));
base.supernodes[sn] = {
pubKey: content.newNodes[sn].pubKey,
uri: content.newNodes[sn].uri
};
if (node_change[sn] === false)
delete node_change[sn];
else
node_change[sn] = true;
}
if (content.config)
for (let c in content.config) {
promises.push(DB.setConfig(c, content.config[c]));
base.sn_config[c] = content.config[c];
}
if (content.removeApps)
for (let app of content.removeApps) {
promises.push(DB.rmApp(app));
delete base.appList;
}
if (content.addApps)
for (let app in content.addApps) {
promises.push(DB.addApp(app, content.addApps[app]));
base.appList[app] = content.addApps[app];
}
});
promises.push(DB.setLastTx(floGlobals.SNStorageID, result.totalTxs));
//Check if all save process were successful
Promise.allSettled(promises).then(results => {
if (results.reduce((a, r) => r.status === "rejected" ? ++a : a, 0))
console.warn("Some data might not have been saved in database correctly");
else
console.log("All data are saved in database");
});
//Process data migration if nodes are changed
if (Object.keys(node_change))
intra.dataMigration(node_change, flag)
resolve('Updated Supernode Configuration');
}).catch(error => reject(error))
})
}
function readAppSubAdminListFromAPI(base) {
var promises = [];
//Load for each apps
for (let app in base.appList) {
promises.push(new Promise((resolve, reject) => {
floBlockchainAPI.readData(base.appList[app], {
ignoreOld: base.lastTx[`${app}_${base.appList[app]}`],
sentOnly: true,
pattern: app
}).then(result => {
let subAdmins = new Set(base.appSubAdmins[app]);
result.data.reverse().forEach(data => {
let content = JSON.parse(result.data[i])[app];
if (Array.isArray(content.removeSubAdmin))
content.removeSubAdmin.forEach(sa => subAdmins.delete(sa));
if (Array.isArray(content.addSubAdmin))
content.addSubAdmin.forEach(sa => subAdmins.add(sa));
});
base.appSubAdmins[app] = Array.from(subAdmins);
Promise.allSettled([
DB.setLastTx(`${app}_${base.appList[app]}`, result.totalTxs),
DB.setSubAdmin(app, base.appSubAdmins[app])
]).then(results => {
if (results.reduce((a, r) => r.status === "rejected" ? ++a : a, 0))
console.warn(`SubAdmin list for app(${app}) might not have been saved in database`);
});
resolve("Loaded subAdmin List for APP:" + app);
}).catch(error => reject([app, error]))
}));
}
return new Promise((resolve, reject) => {
Promise.allSettled(promises).then(results => {
if (results.reduce((a, r) => r.status === "rejected" ? ++a : a, 0)) {
let error = Object.fromEntries(results.filter(r => r.status === "rejected").map(r => r.reason));
console.error(JSON.stringify(error));
reject(`subAdmin List for APPS(${Object.keys(error)} might not have loaded correctly`);
} else
resolve("Loaded subAdmin List for all APPs successfully");
});
})
}
function diskCleanUp(base) {
return new Promise((resolve, reject) => {
let time = Date.now() - base.sn_config.deleteDelay,
promises = [];
intra._list.serving.forEach(sn => {
//delete all when app is not authorised.
promises.push(DB.clearUnauthorisedAppData(sn, Object.keys(base.appList), time));
//for each authorised app: delete unofficial data (untaged, unknown sender/receiver)
for (let app in base.appList)
promises.push(DB.clearAuthorisedAppData(sn, app, base.appList[app], base.subAdmins[app], time));
})
Promise.allSettled(promises).then(results => {
let failed = results.filter(r => r.status === "rejected").map(r => r.reason)
if (failed.length) {
console.error(JSON.stringify(failed));
let success = results.length - failed.length;
reject(`Disk clean-up process has failed at ${100 * success/results.length}%. (Success:${success}|Failed:${failed.count})`)
} else
resolve("Disk clean-up process finished successfully (100%)");
}).catch(error => reject(error))
})
}
module.exports = startNode;

75
src/server.js Normal file
View File

@ -0,0 +1,75 @@
const http = require('http')
const WebSocket = require('ws')
module.exports = function Server(port, client, intra) {
var refresher; //container for refresher
const server = http.createServer((req, res) => {
if (req.method === "GET") {
//GET request (requesting data)
req.on('end', () => {
let i = req.url.indexOf("?");
if (i !== -1) {
var request = JSON.parse(req.url.substring(i));
client.processRequestFromUser(request)
.then(result => res.end(JSON.parse(result[0])))
.catch(error => res.end(error.toString()))
}
})
}
if (req.method === "POST") {
let data = '';
req.on('data', chunk => data += chunk)
req.on('end', () => {
console.log(data);
//process the data storing
client.processIncomingData(data).then(result => {
res.end(result[0]);
if (result[1]) {
refresher.countdown;
if (result[1] === 'DATA')
sendToLiveRequests(result[0])
intra.forwardToNextNode(result[1], result[0])
}
}).catch(error => res.end(error.toString()))
})
}
});
server.listen(port, (err) => {
if (!err)
console.log(`Server running at port ${port}`);
})
const wsServer = new WebSocket.Server({
server
});
wsServer.on('connection', function connection(ws) {
ws.onmessage = function(evt) {
let message = evt.data;
if (message.startsWith(intra.SUPERNODE_INDICATOR))
intra.processTaskFromSupernode(message, ws);
else {
var request = JSON.parse(message);
client.processRequestFromUser(JSON.parse(message))
.then(result => {
ws.send(JSON.parse(result[0]))
ws._liveReq = request;
}).catch(error => {
if (floGlobals.sn_config.errorFeedback)
ws.send(error.toString())
})
}
}
});
Object.defineProperty(this, "http", {
get: () => server
});
Object.defineProperty(this, "webSocket", {
get: () => wsServer
});
Object.defineProperty(this, "refresher", {
set: (r) => refresher = r
})
}

8
src/set_globals.js Normal file
View File

@ -0,0 +1,8 @@
'use strict';
//fetch for node js (used in floBlockchainAPI.js)
global.fetch = require("node-fetch");
//Set browser parameters from parameters.json
const parameters = require('../parameters.json')
for(let p in parameters)
global[p] = parameters[p];

View File

@ -1,19 +0,0 @@
#!/bin/sh
current_date=$(date)
echo "$current_date : Starting Supernode" >> logs/app.log
#Read configurations
IFS="="
while read -r var value
do
export "$var"="${value}"
done < .config
#Start the app
echo $current_date >> logs/server.log
app/supernodeWSS.bin $PORT $SERVER_PWD >> logs/server.log &
echo $current_date >> logs/browser.log
$BROWSER app/index.html >> logs/browser.log &
wait

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,177 +0,0 @@
/*
* Copyright (c) 2014 Cesanta Software Limited
* All rights reserved
*/
#include "mongoose.h"
static sig_atomic_t s_signal_received = 0;
static struct mg_serve_http_opts s_http_server_opts;
static char *s_http_port, *server_pwd;
static struct mg_connection *sn_c = NULL; //supernode_js_connection
static void signal_handler(int sig_num) {
signal(sig_num, signal_handler); // Reinstantiate signal handler
s_signal_received = sig_num;
}
static int is_websocket(const struct mg_connection *nc) {
return nc->flags & MG_F_IS_WEBSOCKET;
}
//System_display [wss => console]
static void sys_display(struct mg_connection *nc, char type[25]){
char addr[32];
mg_sock_addr_to_str(&nc->sa, addr, sizeof(addr), MG_SOCK_STRINGIFY_IP | MG_SOCK_STRINGIFY_PORT);
printf("%s\t%s\n", addr, type);
}
//System_unicast [wss => nc]
static void sys_unicast(struct mg_connection *nc, const struct mg_str msg) {
if (nc != NULL)
mg_send_websocket_frame(nc, WEBSOCKET_OP_TEXT, msg.p, (int)msg.len);
}
//System_broadcast [wss => all]
static void sys_broadcast(struct mg_connection *nc, const struct mg_str msg) {
struct mg_connection *c;
for (c = mg_next(nc->mgr, NULL); c != NULL; c = mg_next(nc->mgr, c)) {
if (c == nc) continue;
mg_send_websocket_frame(c, WEBSOCKET_OP_TEXT, msg.p, (int)msg.len);
}
}
//Broadcast incoming message [sn_c => all]
static void broadcast(const struct mg_str msg) {
printf("BROADCAST\t[%d]\n", (int)msg.len-1);
struct mg_connection *c;
for (c = mg_next(sn_c->mgr, NULL); c != NULL; c = mg_next(sn_c->mgr, c)) {
if (c == sn_c) continue;
mg_send_websocket_frame(c, WEBSOCKET_OP_TEXT, msg.p, (int)msg.len);
}
}
//Groupcast incoming message [sn_c => gid]
static void groupcast(const struct mg_str msg) {
char gid[35];
snprintf(gid, sizeof(gid), "%.*s", 34, &msg.p[1]);
printf("GROUPCAST\t[%d]\n", (int)msg.len - 36);
struct mg_connection *c;
for (c = mg_next(sn_c->mgr, NULL); c != NULL; c = mg_next(sn_c->mgr, c)) {
if (c == sn_c) continue;
else if (!strcmp(c->gid, gid))
mg_send_websocket_frame(c, WEBSOCKET_OP_TEXT, msg.p, (int)msg.len);
}
}
//Unicast incoming message [sn_c => uid-gid]
static void unicast(const struct mg_str msg) {
char uid[6], gid[35];
snprintf(uid, sizeof(uid), "%.*s", 5, &msg.p[1]);
snprintf(gid, sizeof(gid), "%.*s", 34, &msg.p[7]);
printf("UNICAST\t\t[%d]\n", (int)msg.len - 42);
struct mg_connection *c;
for (c = mg_next(sn_c->mgr, NULL); c != NULL; c = mg_next(sn_c->mgr, c)) {
if (c == sn_c) continue;
else if (!strcmp(c->gid, gid) && !strcmp(c->uid, uid))
mg_send_websocket_frame(c, WEBSOCKET_OP_TEXT, msg.p, (int)msg.len);
}
}
//Forward incoming message [user => sn_c]
static void forward(const struct mg_str msg) {
printf("FORWARD\t\t[%d]\n", (int)msg.len - 41);
if(sn_c != NULL)
mg_send_websocket_frame(sn_c, WEBSOCKET_OP_TEXT, msg.p, (int)msg.len);
else
printf("WARNING: No supernode connected\n");
}
static void ev_handler(struct mg_connection *nc, int ev, void *ev_data) {
switch (ev) {
case MG_EV_WEBSOCKET_HANDSHAKE_DONE: {
/*New Websocket Connection*/
sys_display(nc, "+Connected+");
if (sn_c != NULL)
sys_unicast(nc, mg_mk_str("$+"));
else
sys_unicast(nc, mg_mk_str("$-"));
break;
}
case MG_EV_WEBSOCKET_FRAME: {
/* New Websocket Message*/
struct websocket_message *wm = (struct websocket_message *) ev_data;
struct mg_str d = {(char *) wm->data, wm->size};
if (d.p[0] == '$') {
if (sn_c == NULL) {
char pass[100];
snprintf(pass, sizeof(pass), "%.*s",(int)d.len-1, &d.p[1]);
if (!strcmp(pass, server_pwd)) {
sn_c = nc;
sys_unicast(nc, mg_mk_str("$Access Granted"));
sys_display(nc, "*Supernode LoggedIn*");
sys_broadcast(nc, mg_mk_str("$+"));
} else
sys_unicast(nc, mg_mk_str("$Access Denied"));
} else
sys_unicast(nc, mg_mk_str("$Access Locked"));
} else if (nc == sn_c) {
switch (d.p[0]) {
case '@': unicast(d); break;
case '#': groupcast(d); break;
default: broadcast(d);
}
} else {
snprintf(nc->gid, sizeof(nc->gid), "%.*s", 34, &d.p[0]);
snprintf(nc->uid, sizeof(nc->uid), "%.*s", 5, &d.p[35]);
forward(d);
}
break;
}
case MG_EV_HTTP_REQUEST: {
mg_serve_http(nc, (struct http_message *) ev_data, s_http_server_opts);
break;
}
case MG_EV_CLOSE: {
/* Websocket Disconnected */
if (is_websocket(nc)) {
if(nc == sn_c) {
sn_c = NULL;
sys_display(nc,"!Supernode LoggedOut!");
sys_broadcast(nc, mg_mk_str("$-"));
} else
sys_display(nc, "-Disconnected-");
}
break;
}
}
}
int main(int argc, char** argv) {
s_http_port = argv[1];
server_pwd = argv[2];
struct mg_mgr mgr;
struct mg_connection *nc;
signal(SIGTERM, signal_handler);
signal(SIGINT, signal_handler);
setvbuf(stdout, NULL, _IOLBF, 0);
setvbuf(stderr, NULL, _IOLBF, 0);
mg_mgr_init(&mgr, NULL);
nc = mg_bind(&mgr, s_http_port, ev_handler);
mg_set_protocol_http_websocket(nc);
s_http_server_opts.document_root = "app/"; // Serve current directory
s_http_server_opts.enable_directory_listing = "no";
printf("Started on port %s\n", s_http_port);
while (s_signal_received == 0) {
mg_mgr_poll(&mgr, 200);
}
mg_mgr_free(&mgr);
return 0;
}