Backup Sync improvements

- Removed: Immutable table-type (Previously immutable tables are now considered as mutable tables in backup feature)
- Backup sync-data from master are temporally cached and then processed after receiving all sync-data
- Backup sync-data from cache are now processed in ordered manner (to prevent foreign key constrain error, repeated data, re-adding a deleted data)
- Updated SQL Schema
This commit is contained in:
sairajzero 2022-02-08 06:26:03 +05:30
parent 3fb401a226
commit a06c5b4c7b
5 changed files with 158 additions and 112 deletions

View File

@ -34,9 +34,11 @@ CREATE TABLE TrustedList(
/* User Data */ /* User Data */
CREATE TABLE Users ( CREATE TABLE Users (
id INT NOT NULL AUTO_INCREMENT,
floID CHAR(34) NOT NULL, floID CHAR(34) NOT NULL,
pubKey CHAR(66) NOT NULL, pubKey CHAR(66) NOT NULL,
created DATETIME DEFAULT CURRENT_TIMESTAMP, created DATETIME DEFAULT CURRENT_TIMESTAMP,
KEY(id),
PRIMARY KEY(floID) PRIMARY KEY(floID)
); );
@ -82,11 +84,13 @@ CREATE TABLE UserTag (
/* User Requests */ /* User Requests */
CREATE TABLE Request_Log( CREATE TABLE RequestLog(
id INT NOT NULL AUTO_INCREMENT,
floID CHAR(34) NOT NULL, floID CHAR(34) NOT NULL,
request TEXT NOT NULL, request TEXT NOT NULL,
sign TEXT NOT NULL, sign TEXT NOT NULL,
request_time DATETIME DEFAULT CURRENT_TIMESTAMP, request_time DATETIME DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(id),
FOREIGN KEY (floID) REFERENCES Users(floID) FOREIGN KEY (floID) REFERENCES Users(floID)
); );
@ -159,25 +163,30 @@ CREATE TABLE OutputToken (
/* Transaction Data */ /* Transaction Data */
CREATE TABLE PriceHistory ( CREATE TABLE PriceHistory (
id INT NOT NULL AUTO_INCREMENT,
asset VARCHAR(64) NOT NULL, asset VARCHAR(64) NOT NULL,
rate FLOAT NOT NULL, rate FLOAT NOT NULL,
rec_time DATETIME DEFAULT CURRENT_TIMESTAMP, rec_time DATETIME DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(id),
FOREIGN KEY (asset) REFERENCES AssetList(asset) FOREIGN KEY (asset) REFERENCES AssetList(asset)
); );
CREATE TABLE TransactionHistory ( CREATE TABLE TransactionHistory (
id INT NOT NULL AUTO_INCREMENT,
seller CHAR(34) NOT NULL, seller CHAR(34) NOT NULL,
buyer CHAR(34) NOT NULL, buyer CHAR(34) NOT NULL,
asset VARCHAR(64) NOT NULL, asset VARCHAR(64) NOT NULL,
quantity FLOAT NOT NULL, quantity FLOAT NOT NULL,
unitValue DECIMAL(10, 2) NOT NULL, unitValue DECIMAL(10, 2) NOT NULL,
tx_time DATETIME DEFAULT CURRENT_TIMESTAMP, tx_time DATETIME DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(id),
FOREIGN KEY (buyer) REFERENCES Users(floID), FOREIGN KEY (buyer) REFERENCES Users(floID),
FOREIGN KEY (seller) REFERENCES Users(floID), FOREIGN KEY (seller) REFERENCES Users(floID),
FOREIGN KEY (asset) REFERENCES AssetList(asset) FOREIGN KEY (asset) REFERENCES AssetList(asset)
); );
CREATE TABLE AuditTransaction( CREATE TABLE AuditTransaction(
id INT NOT NULL AUTO_INCREMENT,
rec_time DATETIME DEFAULT CURRENT_TIMESTAMP, rec_time DATETIME DEFAULT CURRENT_TIMESTAMP,
unit_price FLOAT NOT NULL, unit_price FLOAT NOT NULL,
quantity FLOAT NOT NULL, quantity FLOAT NOT NULL,
@ -193,6 +202,7 @@ CREATE TABLE AuditTransaction(
buyer_new_asset FLOAT NOT NULL, buyer_new_asset FLOAT NOT NULL,
buyer_old_cash FLOAT NOT NULL, buyer_old_cash FLOAT NOT NULL,
buyer_new_cash FLOAT NOT NULL, buyer_new_cash FLOAT NOT NULL,
PRIMARY KEY(id),
FOREIGN KEY (sellerID) REFERENCES Users(floID), FOREIGN KEY (sellerID) REFERENCES Users(floID),
FOREIGN KEY (buyerID) REFERENCES Users(floID), FOREIGN KEY (buyerID) REFERENCES Users(floID),
FOREIGN KEY (asset) REFERENCES AssetList(asset) FOREIGN KEY (asset) REFERENCES AssetList(asset)
@ -208,6 +218,14 @@ CREATE TABLE _backup (
PRIMARY KEY(t_name, id) PRIMARY KEY(t_name, id)
); );
CREATE table _backupCache(
id INT AUTO_INCREMENT,
t_name TINYTEXT,
data_cache LONGTEXT,
status BOOLEAN,
PRIMARY KEY(id)
);
CREATE TABLE sinkShares( CREATE TABLE sinkShares(
floID CHAR(34) NOT NULL, floID CHAR(34) NOT NULL,
share TEXT, share TEXT,
@ -215,6 +233,20 @@ CREATE TABLE sinkShares(
PRIMARY KEY(floID) PRIMARY KEY(floID)
); );
CREATE TRIGGER Users_I AFTER INSERT ON Users
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('Users', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER Users_U AFTER UPDATE ON Users
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('Users', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER Users_D AFTER DELETE ON Users
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('Users', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT;
CREATE TRIGGER RequestLog_I AFTER INSERT ON RequestLog
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('RequestLog', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER RequestLog_U AFTER UPDATE ON RequestLog
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('RequestLog', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER RequestLog_D AFTER DELETE ON RequestLog
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('RequestLog', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT;
CREATE TRIGGER UserSession_I AFTER INSERT ON UserSession CREATE TRIGGER UserSession_I AFTER INSERT ON UserSession
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserSession', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT; FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserSession', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER UserSession_U AFTER UPDATE ON UserSession CREATE TRIGGER UserSession_U AFTER UPDATE ON UserSession
@ -283,4 +315,25 @@ FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserTag', NEW.id) ON DUPL
CREATE TRIGGER UserTag_U AFTER UPDATE ON UserTag CREATE TRIGGER UserTag_U AFTER UPDATE ON UserTag
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserTag', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT; FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserTag', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER UserTag_D AFTER DELETE ON UserTag CREATE TRIGGER UserTag_D AFTER DELETE ON UserTag
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserTag', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT; FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('UserTag', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT;
CREATE TRIGGER PriceHistory_I AFTER INSERT ON PriceHistory
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('PriceHistory', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER PriceHistory_U AFTER UPDATE ON PriceHistory
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('PriceHistory', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER PriceHistory_D AFTER DELETE ON PriceHistory
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('PriceHistory', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT;
CREATE TRIGGER AuditTransaction_I AFTER INSERT ON AuditTransaction
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('AuditTransaction', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER AuditTransaction_U AFTER UPDATE ON AuditTransaction
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('AuditTransaction', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER AuditTransaction_D AFTER DELETE ON AuditTransaction
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('AuditTransaction', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT;
CREATE TRIGGER TransactionHistory_I AFTER INSERT ON TransactionHistory
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('TransactionHistory', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER TransactionHistory_U AFTER UPDATE ON TransactionHistory
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('TransactionHistory', NEW.id) ON DUPLICATE KEY UPDATE mode=TRUE, timestamp=DEFAULT;
CREATE TRIGGER TransactionHistory_D AFTER DELETE ON TransactionHistory
FOR EACH ROW INSERT INTO _backup (t_name, id) VALUES ('TransactionHistory', OLD.id) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=DEFAULT;

View File

@ -8,17 +8,17 @@ TRUNCATE InputToken;
TRUNCATE OutputFLO; TRUNCATE OutputFLO;
TRUNCATE OutputToken; TRUNCATE OutputToken;
TRUNCATE PriceHistory; TRUNCATE PriceHistory;
TRUNCATE Request_Log; TRUNCATE RequestLog;
TRUNCATE SellOrder; TRUNCATE SellOrder;
TRUNCATE UserSession; TRUNCATE UserSession;
TRUNCATE UserTag; TRUNCATE UserTag;
TRUNCATE TransactionHistory; TRUNCATE TransactionHistory;
TRUNCATE Vault; TRUNCATE Vault;
TRUNCATE Users; DELETE FROM Users;
/* Blockchain data */ /* Blockchain data */
TRUNCATE LastTx; TRUNCATE LastTx;
TRUNCATE NodeList; TRUNCATE NodeList;
TRUNCATE TrustedList; TRUNCATE TrustedList;
TRUNCATE TagList; DELETE FROM TagList;
TRUNCATE AssetList; DELETE FROM AssetList;

View File

@ -21,8 +21,7 @@ function sendBackup(timestamp, ws) {
timestamp = timestamp.substring(0, timestamp.length - 1); timestamp = timestamp.substring(0, timestamp.length - 1);
let promises = [ let promises = [
send_dataSync(timestamp, ws), send_dataSync(timestamp, ws),
send_deleteSync(timestamp, ws), send_deleteSync(timestamp, ws)
send_dataImmutable(timestamp, ws)
]; ];
Promise.allSettled(promises).then(result => { Promise.allSettled(promises).then(result => {
let failedSync = []; let failedSync = [];
@ -65,7 +64,7 @@ function send_dataSync(timestamp, ws) {
.then(data => { .then(data => {
ws.send(JSON.stringify({ ws.send(JSON.stringify({
table, table,
command: "SYNC_ADD_UPDATE", command: "SYNC_UPDATE",
data data
})); }));
res(table); res(table);
@ -79,7 +78,7 @@ function send_dataSync(timestamp, ws) {
let sync_needed = {}; let sync_needed = {};
result.forEach(r => r.t_name in sync_needed ? sync_needed[r.t_name].push(r.id) : sync_needed[r.t_name] = [r.id]); result.forEach(r => r.t_name in sync_needed ? sync_needed[r.t_name].push(r.id) : sync_needed[r.t_name] = [r.id]);
ws.send(JSON.stringify({ ws.send(JSON.stringify({
command: "SYNC_ADD_UPDATE_HEADER", command: "SYNC_HEADER",
add_data: result add_data: result
})); }));
let promises = []; let promises = [];
@ -100,43 +99,6 @@ function send_dataSync(timestamp, ws) {
}); });
} }
function send_dataImmutable(timestamp, ws) {
const immutable_tables = {
Users: "created",
Request_Log: "request_time",
TransactionHistory: "tx_time",
PriceHistory: "rec_time"
};
const sendTable = (table, timeCol) => new Promise((res, rej) => {
DB.query(`SELECT * FROM ${table} WHERE ${timeCol} > ?`, [timestamp])
.then(data => {
ws.send(JSON.stringify({
table,
command: "SYNC_ADD_IMMUTABLE",
data
}));
res(table);
}).catch(error => {
console.error(error);
rej(table);
});
});
return new Promise((resolve, reject) => {
let promises = [];
for (let table in immutable_tables)
promises.push(sendTable(table, immutable_tables[table]));
Promise.allSettled(promises).then(result => {
let failedTables = [];
result.forEach(r => r.status === "rejected" ? failedTables.push(r.reason) : null);
if (failedTables.length)
reject(["dataImmutable", failedTables]);
else
resolve("dataImmutable");
});
})
}
//Shares //Shares
function generateNewSink() { function generateNewSink() {
let sink = floCrypto.generateNewID(); let sink = floCrypto.generateNewID();

View File

@ -43,17 +43,7 @@ function stopSlaveProcess() {
function requestBackupSync(ws) { function requestBackupSync(ws) {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const tables = { DB.query('SELECT MAX(timestamp) as last_time FROM _backup').then(result => {
Users: "created",
Request_Log: "request_time",
TransactionHistory: "tx_time",
//PriceHistory: "rec_time",
_backup: "timestamp"
};
let subs = [];
for (let t in tables)
subs.push(`SELECT MAX(${tables[t]}) as ts FROM ${t}`);
DB.query(`SELECT MAX(ts) as last_time FROM (${subs.join(' UNION ')}) AS Z`).then(result => {
let request = { let request = {
floID: global.myFloID, floID: global.myFloID,
pubKey: myPubKey, pubKey: myPubKey,
@ -70,9 +60,9 @@ function requestBackupSync(ws) {
const requestInstance = { const requestInstance = {
ws: null, ws: null,
delete_sync: null, cache: null,
add_sync: null, delete_count: null,
immutable_sync: null, add_count: null,
delete_data: null, delete_data: null,
add_data: null, add_data: null,
total_add: null, total_add: null,
@ -101,6 +91,8 @@ requestInstance.open = function(ws = null) {
requestBackupSync(ws).then(request => { requestBackupSync(ws).then(request => {
self.request = request; self.request = request;
self.cache = [];
self.add_count = self.delete_count = 0;
self.ws = ws; self.ws = ws;
}).catch(error => console.error(error)) }).catch(error => console.error(error))
} }
@ -111,9 +103,9 @@ requestInstance.close = function() {
self.ws.close(); self.ws.close();
self.onetime = null; self.onetime = null;
self.ws = null; self.ws = null;
self.delete_sync = null; self.cache = null;
self.add_sync = null; self.delete_count = null;
self.immutable_sync = null; self.add_count = null;
self.delete_data = null; self.delete_data = null;
self.add_data = null; self.add_data = null;
self.total_add = null; self.total_add = null;
@ -124,7 +116,7 @@ requestInstance.close = function() {
function processDataFromMaster(message) { function processDataFromMaster(message) {
try { try {
message = JSON.parse(message); message = JSON.parse(message);
console.debug(message); console.debug("Master:", message);
if (message.command.startsWith("SYNC")) if (message.command.startsWith("SYNC"))
processBackupData(message); processBackupData(message);
else switch (message.command) { else switch (message.command) {
@ -174,35 +166,87 @@ function processBackupData(response) {
break; break;
case "SYNC_END": case "SYNC_END":
if (response.status) { if (response.status) {
if (self.total_add !== self.add_sync) if (self.total_add !== self.add_count)
console.info(`Backup Sync Instance finished!, ${self.total_add - self.add_sync} packets not received.`); console.info(`Backup Sync Instance finished!, ${self.total_add - self.add_count} packets not received.`);
else else
console.info("Backup Sync Instance finished successfully"); console.info("Backup Sync Instance finished successfully");
updateBackupTable(self.add_data, self.delete_data) storeBackupData(self.cache, self.add_data, self.delete_data);
} else } else
console.info("Backup Sync was not successful! Failed info: ", response.info); console.info("Backup Sync was not successful! Failed info: ", response.info);
self.close(); self.close();
break; break;
case "SYNC_DELETE": case "SYNC_DELETE":
self.delete_data = response.delete_data; self.delete_data = response.delete_data;
self.delete_sync += 1; self.delete_count += 1;
deleteData(response.delete_data); self.cache.push(cacheBackupData(null, response.delete_data));
break; break;
case "SYNC_ADD_UPDATE_HEADER": case "SYNC_HEADER":
self.add_data = response.add_data; self.add_data = response.add_data;
self.total_add = Object.keys(response.add_data).length; self.total_add = Object.keys(response.add_data).length;
break; break;
case "SYNC_ADD_UPDATE": case "SYNC_UPDATE":
self.add_sync += 1; self.add_count += 1;
addUpdateData(response.table, response.data); self.cache.push(cacheBackupData(response.table, response.data));
break;
case "SYNC_ADD_IMMUTABLE":
self.immutable_sync += 1;
addImmutableData(response.table, response.data);
break; break;
} }
} }
const cacheBackupData = (tableName, dataCache) => new Promise((resolve, reject) => {
DB.query("INSERT INTO _backupCache (t_name, data_cache) VALUE (?, ?)", [tableName, JSON.stringify(dataCache)])
.then(_ => resolve(true)).catch(error => {
console.error(error);
reject(false);
})
});
function storeBackupData(cache_promises, add_header, delete_header) {
Promise.allSettled(cache_promises).then(_ => {
console.log("START: BackupCache -> Tables");
//Process 'Users' table 1st as it provides foreign key attribute to other tables
DB.query("SELECT * FROM _backupCache WHERE t_name=?", ["Users"]).then(data => {
Promise.allSettled(data.map(d => updateTableData("Users", JSON.parse(d.data_cache)))).then(result => {
storeBackupData.commit(data, result).then(_ => {
DB.query("SELECT * FROM _backupCache WHERE t_name IS NOT NULL").then(data => {
Promise.allSettled(data.map(d => updateTableData(d.t_name, JSON.parse(d.data_cache)))).then(result => {
storeBackupData.commit(data, result).then(_ => {
DB.query("SELECT * FROM _backupCache WHERE t_name IS NULL").then(data => {
Promise.allSettled(data.map(d => deleteTableData(JSON.parse(d.data_cache)))).then(result => {
storeBackupData.commit(data, result).then(_ => {
console.log("END: BackupCache -> Tables");
updateBackupTable(add_header, delete_header);
});
})
})
})
})
}).catch(error => {
console.error(error);
console.warn("ABORT: BackupCache -> Tables")
});
})
})
}).catch(error => {
console.error(error);
console.warn("ABORT: BackupCache -> Tables")
})
}).catch(error => reject(error))
}
storeBackupData.commit = function(data, result) {
let promises = [];
for (let i = 0; i < data.length; i++)
switch (result[i].status) {
case "fulfilled":
promises.push(DB.query("DELETE FROM _backupCache WHERE id=?", data[i].id));
break;
case "rejected":
console.error(result[i].reason);
promises.push(DB.query("UPDATE _backupCache SET status=FALSE WHERE id=?", data[i].id));
break;
}
return Promise.allSettled(promises);
}
function updateBackupTable(add_data, delete_data) { function updateBackupTable(add_data, delete_data) {
//update _backup table for added data //update _backup table for added data
DB.transaction(add_data.map(r => [ DB.transaction(add_data.map(r => [
@ -210,47 +254,34 @@ function updateBackupTable(add_data, delete_data) {
[r.t_name, r.id, validateValue(r.timestamp), validateValue(r.timestamp)] [r.t_name, r.id, validateValue(r.timestamp), validateValue(r.timestamp)]
])).then(_ => null).catch(error => console.error(error)); ])).then(_ => null).catch(error => console.error(error));
//update _backup table for deleted data //update _backup table for deleted data
let del_queries = [];
delete_data.forEach(r => del_queries.push([]));
DB.transaction(delete_data.map(r => [ DB.transaction(delete_data.map(r => [
"INSERT INTO _backup (t_name, id, mode, timestamp) VALUES (?, ?, NULL, ?) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=?", "INSERT INTO _backup (t_name, id, mode, timestamp) VALUES (?, ?, NULL, ?) ON DUPLICATE KEY UPDATE mode=NULL, timestamp=?",
[r.t_name, r.id, validateValue(r.timestamp), validateValue(r.timestamp)] [r.t_name, r.id, validateValue(r.timestamp), validateValue(r.timestamp)]
])).then(_ => null).catch(error => console.error(error)); ])).then(_ => null).catch(error => console.error(error));
} }
function deleteData(data) { function deleteTableData(data) {
let delete_needed = {}; return new Promise((resolve, reject) => {
data.forEach(r => r.t_name in delete_needed ? delete_needed[r.t_name].push(r.id) : delete_needed[r.t_name] = [r.id]); let delete_needed = {};
let queries = []; data.forEach(r => r.t_name in delete_needed ? delete_needed[r.t_name].push(r.id) : delete_needed[r.t_name] = [r.id]);
for (let table in delete_needed) let queries = [];
queries.push(`DELETE FROM ${table} WHERE id IN (${delete_needed[table]})`); for (let table in delete_needed)
DB.transaction(queries).then(_ => null).catch(error => console.error(error)); queries.push(`DELETE FROM ${table} WHERE id IN (${delete_needed[table]})`);
DB.transaction(queries).then(_ => resolve(true)).catch(error => reject(error));
})
} }
function addUpdateData(table, data) { function updateTableData(table, data) {
if (!data.length) return new Promise((resolve, reject) => {
return; if (!data.length)
let cols = Object.keys(data[0]), return resolve(null);
_mark = "(" + Array(cols.length).fill('?') + ")"; let cols = Object.keys(data[0]),
let values = data.map(r => cols.map(c => validateValue(r[c]))).flat(); _mark = "(" + Array(cols.length).fill('?') + ")";
let statement = `INSERT INTO ${table} (${cols}) VALUES ${Array(data.length).fill(_mark)} AS new` + let values = data.map(r => cols.map(c => validateValue(r[c]))).flat();
" ON DUPLICATE KEY UPDATE " + cols.filter(c => c != 'id').map(c => c + " = new." + c); let statement = `INSERT INTO ${table} (${cols}) VALUES ${Array(data.length).fill(_mark)} AS new` +
DB.query(statement, values).then(_ => null).catch(error => console.error(error)); " ON DUPLICATE KEY UPDATE " + cols.map(c => c + " = new." + c);
} DB.query(statement, values).then(_ => resolve(true)).catch(error => reject(error));
})
function addImmutableData(table, data) {
if (!data.length)
return;
const primaryKeys = {
Users: "floID"
};
let cols = Object.keys(data[0]),
_mark = "(" + Array(cols.length).fill('?') + ")";
let values = data.map(r => cols.map(c => validateValue(r[c]))).flat();
let statement = `INSERT INTO ${table} (${cols}) VALUES ${Array(data.length).fill(_mark)}`;
if (table in primaryKeys)
statement += ` ON DUPLICATE KEY UPDATE ${primaryKeys[table]}=${primaryKeys[table]}`;
DB.query(statement, values).then(_ => null).catch(error => console.error(error));
} }
const validateValue = val => (typeof val === "string" && /\.\d{3}Z$/.test(val)) ? val.substring(0, val.length - 1) : val; const validateValue = val => (typeof val === "string" && /\.\d{3}Z$/.test(val)) ? val.substring(0, val.length - 1) : val;

View File

@ -57,7 +57,7 @@ function validateRequest(request, sign, pubKey) {
function storeRequest(floID, req_str, sign) { function storeRequest(floID, req_str, sign) {
console.debug(floID, req_str); console.debug(floID, req_str);
DB.query("INSERT INTO Request_Log (floID, request, sign) VALUES (?,?,?)", [floID, req_str, sign]) DB.query("INSERT INTO RequestLog (floID, request, sign) VALUES (?,?,?)", [floID, req_str, sign])
.then(_ => null).catch(error => console.error(error)); .then(_ => null).catch(error => console.error(error));
} }