From decf51fab0196746bc740f168a0a4c254ee60a44 Mon Sep 17 00:00:00 2001 From: Ludovic 'Archivist' Lagouardette Date: Mon, 8 Jul 2019 13:55:41 +0200 Subject: [PATCH] Added the operation for sharding --- include/database.hpp | 21 +++++++++++++++++++++ include/network.hpp | 7 ++++++- src/izaro-storage.cpp | 43 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/include/database.hpp b/include/database.hpp index ce8dd1c..a3a79ca 100644 --- a/include/database.hpp +++ b/include/database.hpp @@ -1,6 +1,7 @@ #pragma once #include "fsized_map.h" #include +#include struct metadata_t{ bitops::regulated record_cnt; @@ -296,6 +297,26 @@ public: return tmp.second; } + record try_allocate(const record_identifier& target) + { + auto attempt = read(target); + if(attempt.first.timestamp == 0) + { + db_page rnd_page; + { + std::random_device dev; + std::minstd_rand temprng(dev()); + + auto tmp = (std::array*)&rnd_page; + std::generate(tmp->begin(), tmp->end(), temprng); + } + + return write(target, rnd_page); + } + + return record{}; + } + std::pair stepped_read(const record_identifier& target) { std::pair ret; ret.second.fill(0); diff --git a/include/network.hpp b/include/network.hpp index 972ebbb..eb2d280 100644 --- a/include/network.hpp +++ b/include/network.hpp @@ -8,7 +8,12 @@ enum class db_op : uint32_t { read = 1, write = 2, remove = 3, - stats = 4 + stats = 4, + sread = 5, + swrite = 6, + sallocate = 7, + sremove = 3, + confirm = 8 }; struct [[gnu::packed]] received_data { diff --git a/src/izaro-storage.cpp b/src/izaro-storage.cpp index 0b3cc62..484b53a 100644 --- a/src/izaro-storage.cpp +++ b/src/izaro-storage.cpp @@ -233,6 +233,7 @@ int main( reply_size = sizeof(reply.rep_id) + sizeof(reply.identifier); } break; + // case db_op::sremove: case db_op::remove: { reply.rep_id = recv.rep_id; @@ -264,6 +265,48 @@ int main( reply_size = sizeof(reply); } break; + case db_op::sallocate: + { + reply.rep_id = recv.rep_id; + reply.identifier = run_db.try_allocate(recv.identifier); + reply_size = sizeof(reply.rep_id) + sizeof(reply.identifier); + } + break; + case db_op::sread: + { + reply.rep_id = recv.rep_id; + auto req = run_db.stepped_read(recv.identifier); + if(req.first.timestamp != 0) + { + reply.identifier = req.first; + reply.page = req.second; + reply_size = sizeof(reply); + } + else + { + reply_size = sizeof(reply.rep_id) + sizeof(reply.identifier); + } + } + break; + case db_op::swrite: + { + reply.rep_id = recv.rep_id; + try{ + auto req = run_db.stepped_write(recv.identifier, recv.page); + reply.identifier = req; + } catch (...) { + std::cerr << "cluster overfull"<< std::endl; + } + reply_size = sizeof(reply.rep_id) + sizeof(reply.identifier); + } + break; + case db_op::confirm: + { + reply.rep_id = recv.rep_id; + run_db.confirm(recv.identifier, *(bitops::regulated*)&recv.page); + reply_size = sizeof(reply.rep_id); + } + break; default: std::cerr << "bad_request " << (uint32_t)static_cast(recv.op) << std::endl; continue;