diff --git a/.gitignore b/.gitignore index ef62eda..9213d1c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -node_modules/ -package-lock.json -config.json -param.json \ No newline at end of file +/node_modules/ +/package-lock.json +/args/config.json +/args/param.json \ No newline at end of file diff --git a/README.md b/README.md index 716b033..9823980 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,11 @@ npm install ### Configuration #### General Configuration -Copy `config-sample.json` to `config.json` and Edit the values as required. +In `args/` directory, Copy `config-sample.json` to `config.json`. +``` +cp args/config-sample.json args/config.json +``` +Edit the values in `args/config.json` as required. ``` { "privateKey": "", @@ -43,10 +47,10 @@ Copy `config-sample.json` to `config.json` and Edit the values as required. ***Recommended*** *(optional)* Create and use a MySQL user instead of root. Remember to give access to the database to the user. -#### Parameter Generation -Open `gen-param.html` in a browser and download `param.json` to `SuperNodeStorage` directory. +#### Parameter Generation *(Optional)* +Open `args/gen-param.html` in a browser and download `param.json` to `SuperNodeStorage/args` directory. -*Note: param.json is used for controlled random values used by SecureRandom in Cryptography* +*Note: `param.json` is used for controlled random values used by SecureRandom in Cryptography. If this step is skipped, `param-default.json` will be used as default parameter* ## Starting the Server After successful installation and configuration using the above steps, SuperNodeStorage can be started using: diff --git a/config-sample.json b/args/config-sample.json similarity index 100% rename from config-sample.json rename to args/config-sample.json diff --git a/gen-param.html b/args/gen-param.html similarity index 100% rename from gen-param.html rename to args/gen-param.html diff --git a/args/param-default.json b/args/param-default.json new file mode 100644 index 0000000..6476705 --- /dev/null +++ b/args/param-default.json @@ -0,0 +1,37 @@ +{ + "screen": { + "height": 1160, + "width": 2000, + "colorDepth": 24, + "availHeight": 1080, + "availWidth": 1920, + "pixelDepth": 24 + }, + "navigator": { + "userAgent": "Node/14.17.3 (Linux; aarch64; arm)", + "plugins": [{ + "name": "MySQL", + "filename": "mysql", + "description": "A node.js driver for mysql. It is written in JavaScript, does not require compiling, and is 100% MIT licensed." + }, { + "name": "WebSocket", + "filename": "ws", + "description": "Simple to use, blazing fast and thoroughly tested websocket client and server for Node.js" + }, { + "name": "Node fetch", + "filename": "node-fetch", + "description": "A light-weight module that brings window.fetch to node.js" + }], + "mimeTypes": [{ + "description": "", + "type": "application/pdf", + "suffixes": "pdf" + }], + "cookieEnabled": true, + "language": "en-US" + }, + "history": { + "length": 512 + }, + "location": "protocol://subdomain.example.domain/path" +} \ No newline at end of file diff --git a/args/post-install.js b/args/post-install.js new file mode 100644 index 0000000..25b9fb2 --- /dev/null +++ b/args/post-install.js @@ -0,0 +1,11 @@ +let message = +`SupernodeStorage is installed. + +To complete the setup: +1. Copy args/config-sample.json to args/config.json and Edit the values as required +2. (Optional) Open args/gen-param.html and Download param.json to args/ directory + +To start the node, Run: +npm start +`; +console.log(message); \ No newline at end of file diff --git a/package.json b/package.json index 5d685b0..7d48682 100644 --- a/package.json +++ b/package.json @@ -10,8 +10,8 @@ }, "devDependencies": {}, "scripts": { - "postinstall": "node post-install.js", - "start": "node launch.js" + "postinstall": "node args/post-install.js", + "start": "node start.js" }, "repository": { "type": "git", diff --git a/post-install.js b/post-install.js deleted file mode 100644 index b7d4ebd..0000000 --- a/post-install.js +++ /dev/null @@ -1,11 +0,0 @@ -let message = -`SupernodeStorage is installed. - -To complete the setup: -1. Open gen-param.html and Download param.json to this directory -2. Copy config-sample.json to config.json and Edit the values - -To start the node, Run: -npm start -`; -console.log(message); \ No newline at end of file diff --git a/src/intra.js b/src/intra.js index d3b4a21..fee5d19 100644 --- a/src/intra.js +++ b/src/intra.js @@ -125,7 +125,7 @@ _prevNode.onclose = evt => _prevNode.close(); //Packet processing const packet_ = {}; -packet_.constuct = function(message) { +packet_.construct = function(message) { const packet = { from: myFloID, message: message, @@ -189,7 +189,7 @@ function connectToNextNode() { let nextNodeID = kBucket.nextNode(myFloID); connectToActiveNode(nextNodeID).then(ws => { _nextNode.set(nextNodeID, ws); - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: BACKUP_HANDSHAKE_INIT })); resolve("BACKUP_HANDSHAKE_INIT: " + nextNodeID); @@ -247,7 +247,7 @@ function processTaskFromNextNode(packet) { dataSyncIndication(task.id, task.status, from); break; case STORE_BACKUP_DATA: - storeBackupData(task.data); + storeBackupData(task.data, from, packet); break; default: console.log("Invalid task type:" + task.type + "from next-node"); @@ -324,18 +324,18 @@ function handshakeMid(id, ws) { if (_prevNode.id && _prevNode.id in floGlobals.supernodes) { if (kBucket.innerNodes(_prevNode.id, myFloID).includes(id)) { //close existing prev-node connection - _prevNode.send(packet_.constuct({ + _prevNode.send(packet_.construct({ type: RECONNECT_NEXT_NODE })); _prevNode.close(); //set the new prev-node connection _prevNode.set(id, ws); - _prevNode.send(packet_.constuct({ + _prevNode.send(packet_.construct({ type: BACKUP_HANDSHAKE_END })); } else { //Incorrect order, existing prev-node is already after the incoming node - ws.send(packet_.constuct({ + ws.send(packet_.construct({ type: RECONNECT_NEXT_NODE })); return; @@ -343,7 +343,7 @@ function handshakeMid(id, ws) { } else { //set the new prev-node connection _prevNode.set(id, ws); - _prevNode.send(packet_.constuct({ + _prevNode.send(packet_.construct({ type: BACKUP_HANDSHAKE_END })); }; @@ -404,7 +404,7 @@ handshakeMid.requestData = function(req_sync, new_order) { type: ORDER_BACKUP, order: _list.get(order) }); - _nextNode.send(packet_.constuct(tasks)); + _nextNode.send(packet_.construct(tasks)); if (failed.length) handshakeMid.timeout = setTimeout(_ => handshakeMid.requestData(failed, failed_order), RETRY_TIMEOUT); }); @@ -413,7 +413,7 @@ handshakeMid.requestData = function(req_sync, new_order) { //Complete handshake function handshakeEnd() { console.log("Backup connected: " + _nextNode.id); - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: ORDER_BACKUP, order: _list.get() })); @@ -478,7 +478,7 @@ orderBackup.requestData = function(req_sync, new_order) { failed.push(s); }); if (Object.keys(lastlogs).length) - _prevNode.send(packet_.constuct({ + _prevNode.send(packet_.construct({ type: DATA_REQUEST, nodes: lastlogs })); @@ -489,7 +489,7 @@ orderBackup.requestData = function(req_sync, new_order) { order.push(n); }); if (order.length) //TODO: maybe should wait for sync to finish? - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: ORDER_BACKUP, order: _list.get(order) })); @@ -503,19 +503,19 @@ function sendStoredData(lastlogs, node) { for (let n in lastlogs) { if (_list.stored.includes(n)) { DB.getData(n, lastlogs[n]).then(result => { - node.send(packet_.constuct({ + node.send(packet_.construct({ type: DATA_SYNC, id: n, status: true })); console.log(`START: ${n} data sync(send) to ${node.id}`); //TODO: efficiently handle large number of data instead of loading all into memory - result.forEach(d => node.send(packet_.constuct({ + result.forEach(d => node.send(packet_.construct({ type: STORE_BACKUP_DATA, data: d }))); console.log(`END: ${n} data sync(send) to ${node.id}`); - node.send(packet_.constuct({ + node.send(packet_.construct({ type: DATA_SYNC, id: n, status: false @@ -555,7 +555,7 @@ function storeMigratedData(data) { let closestNode = kBucket.closestNode(data.receiverID); if (_list.serving.includes(closestNode)) { DB.storeData(closestNode, data); - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: STORE_BACKUP_DATA, data: data })); @@ -563,11 +563,11 @@ function storeMigratedData(data) { }; //Delete (migrated) data -function deleteMigratedData(old_sn, vectorClock, receiverID, from, packet) { - let closestNode = kBucket.closestNode(receiverID); - if (old_sn !== closestNode && _list.stored.includes(old_sn)) { - DB.deleteData(old_sn, vectorClock); - if (_list[old_sn] < floGlobals.sn_config.backupDepth && _nextNode.id !== from) +function deleteMigratedData(data, from, packet) { + let closestNode = kBucket.closestNode(data.receiverID); + if (data.snID !== closestNode && _list.stored.includes(data.snID)) { + DB.deleteData(data.snID, data.vectorClock); + if (_list[data.snID] < floGlobals.sn_config.backupDepth && _nextNode.id !== from) _nextNode.send(packet); }; }; @@ -583,7 +583,7 @@ function forwardToNextNode(mode, data) { 'DATA': STORE_BACKUP_DATA }; if (mode in modeMap && _nextNode.id) - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: modeMap[mode], data: data })); @@ -635,12 +635,12 @@ dataMigration.process_del = async function(del_nodes) { let closest = kBucket.closestNode(d.receiverID); if (_list.serving.includes(closest)) { DB.storeData(closest, d); - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: STORE_BACKUP_DATA, data: d })); } else - ws_connections[closest].send(packet_.constuct({ + ws_connections[closest].send(packet_.construct({ type: STORE_MIGRATED_DATA, data: d })); @@ -684,20 +684,22 @@ dataMigration.process_new = async function(new_nodes) { if (new_nodes.includes(closest)) { if (_list.serving.includes(closest)) { DB.storeData(closest, d); - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: STORE_BACKUP_DATA, data: d })); } else - ws_connections[closest].send(packet_.constuct({ + ws_connections[closest].send(packet_.construct({ type: STORE_MIGRATED_DATA, data: d })); - _nextNode.send(packet_.constuct({ + _nextNode.send(packet_.construct({ type: DELETE_MIGRATED_DATA, - vectorClock: d.vectorClock, - receiverID: d.receiverID, - snID: n + data: { + vectorClock: d.vectorClock, + receiverID: d.receiverID, + snID: n + } })); }; }); @@ -717,7 +719,7 @@ dataMigration.process_new = async function(new_nodes) { dataMigration.intimateAllNodes = function() { connectToAliveNodes().then(ws_connections => { - let packet = packet_.constuct({ + let packet = packet_.construct({ type: INITIATE_REFRESH }); for (let n in ws_connections) diff --git a/src/main.js b/src/main.js index 5793072..bbbe930 100644 --- a/src/main.js +++ b/src/main.js @@ -1,4 +1,4 @@ -const config = require("../config.json"); +const config = require('../args/config.json'); global.floGlobals = require("./floGlobals"); require('./set_globals'); require('./lib'); diff --git a/src/set_globals.js b/src/set_globals.js index 7a13650..b743c93 100644 --- a/src/set_globals.js +++ b/src/set_globals.js @@ -2,7 +2,13 @@ //fetch for node js (used in floBlockchainAPI.js) global.fetch = require("node-fetch"); -//Set browser paramaters from param.json -const param = require('../param.json'); -for(let p in param) - global[p] = param[p]; +//Set browser paramaters from param.json (or param-default.json) +var param; +try { + param = require('../args/param.json'); +} catch { + param = require('../args/param-default.json'); +} finally { + for (let p in param) + global[p] = param[p]; +} \ No newline at end of file diff --git a/launch.js b/start.js similarity index 100% rename from launch.js rename to start.js