|
|
@ -0,0 +1,321 @@ |
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "gp/array.hpp"
|
|
|
|
#include "gp/algorithm/min_max.hpp"
|
|
|
|
#include "gp/algorithm/modifiers.hpp"
|
|
|
|
#include "gp/algorithm/repeat.hpp"
|
|
|
|
#include "gp/bitops.hpp"
|
|
|
|
#include "gp/buffer.hpp"
|
|
|
|
#include "gp/pair.hpp"
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
|
|
|
|
#include <cstdint>
|
|
|
|
|
|
|
|
namespace gp { |
|
|
|
template<size_t sz> |
|
|
|
class memory_vdisk { |
|
|
|
static_assert(sz%128 == 0, "in memory disk expects 128 bytes page alignment"); |
|
|
|
gp::array<uint8_t, sz> data{gp::zero_t{}}; |
|
|
|
|
|
|
|
public: |
|
|
|
gp::buffer<uint8_t> read(gp::buffer<uint8_t> buffer, uint64_t offset) { |
|
|
|
auto it = data.begin()+offset; |
|
|
|
auto ret = buffer; |
|
|
|
for(auto& c : buffer) { |
|
|
|
c = *(it++); |
|
|
|
if(it == data.end()) { |
|
|
|
ret = buffer.slice_start(it - (data.begin() + offset)); |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
gp::buffer<uint8_t> write(gp::buffer<uint8_t> buffer, uint64_t offset) { |
|
|
|
auto it = data.begin()+offset; |
|
|
|
auto ret = buffer; |
|
|
|
for(auto& c : buffer) { |
|
|
|
auto cpy = it++; |
|
|
|
*(cpy) = c; |
|
|
|
if(it == data.end()) { |
|
|
|
ret = buffer.slice_start(it - (data.begin() + offset)); |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
constexpr uint64_t size() const noexcept { |
|
|
|
return sz; |
|
|
|
} |
|
|
|
|
|
|
|
static constexpr size_t page_size() noexcept { |
|
|
|
return 128; |
|
|
|
} |
|
|
|
|
|
|
|
constexpr uint64_t page_count() const noexcept { |
|
|
|
return size() / page_size(); |
|
|
|
} |
|
|
|
}; |
|
|
|
|
|
|
|
template<typename vdisk_ptr> |
|
|
|
class tagfs { |
|
|
|
vdisk_ptr disk; |
|
|
|
constexpr static size_t page_size = gp::remove_reference<decltype(*disk)>::type::page_size(); |
|
|
|
constexpr static gp::array<uint8_t, page_size> empty_page{gp::zero_t{}}; |
|
|
|
|
|
|
|
struct disk_root { |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> magic; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> first_allocator_page; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_shuttle; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_page_count; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> tag_list_node; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> page_count; |
|
|
|
}; |
|
|
|
|
|
|
|
struct tree_node { |
|
|
|
constexpr static size_t node_pages = page_size/sizeof(uint64_t) - 2; |
|
|
|
|
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> node_level; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> node_size; |
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> data_pages[node_pages]; |
|
|
|
|
|
|
|
auto allocate_or_get_node_n(tagfs& fs, uint64_t index) { |
|
|
|
struct ret_struct{ |
|
|
|
gp::array<uint8_t, page_size> page; |
|
|
|
uint64_t page_id; |
|
|
|
bool must_update; |
|
|
|
}; |
|
|
|
|
|
|
|
ret_struct ret; |
|
|
|
|
|
|
|
if(auto attempt = fail_or_get_node_n(fs, index)) { |
|
|
|
ret.page = attempt.value(); |
|
|
|
ret.page_id = 0; |
|
|
|
ret.must_update = false; |
|
|
|
} else { |
|
|
|
ret.page_id = data_pages[index] = fs.allocate_page(); |
|
|
|
ret.must_update = true; |
|
|
|
fs.disk->read(ret.page.as_buffer(), ret.page_id*page_size); |
|
|
|
} |
|
|
|
|
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
auto fail_or_get_node_n(tagfs& fs, uint64_t index) -> gp::optional<gp::array<uint8_t, page_size>> { |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
auto get_page_at_rec(uint64_t page_offset) { |
|
|
|
struct ret_struct { |
|
|
|
bool still_ongoing; |
|
|
|
uint64_t next_node; |
|
|
|
uint64_t next_page; |
|
|
|
}; |
|
|
|
if(node_level) { |
|
|
|
auto transmit = page_offset % node_pages; |
|
|
|
auto local = page_offset / node_pages; |
|
|
|
gp_config::assertion(local < node_pages, "node can't be within the page"); |
|
|
|
gp_config::assertion(local < node_size, "node not within the page"); |
|
|
|
return ret_struct{true, data_pages[local], transmit}; |
|
|
|
} else { |
|
|
|
gp_config::assertion(page_offset < node_pages, "node can't be within the page"); |
|
|
|
gp_config::assertion(page_offset < node_size, "node not within the page"); |
|
|
|
return ret_struct{false, 0, data_pages[page_offset]}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
uint64_t get_page_at(vdisk_ptr& disk, uint64_t page_offset) { |
|
|
|
auto [ongoing, page] = get_page_at_rec(page_offset); |
|
|
|
if(!ongoing) return page; |
|
|
|
gp::array<tree_node, 1> explore; |
|
|
|
do { |
|
|
|
disk->read(explore.template cast<char>(), page*page_size); |
|
|
|
auto [t_ongoing, t_page] = explore.begin().get_page_at_rec(page); |
|
|
|
ongoing = t_ongoing; |
|
|
|
page = t_page; |
|
|
|
} while(ongoing); |
|
|
|
return page; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
uint64_t set_page_at_rec(uint64_t page_offset, gp::array<uint8_t, page_size>* page_data, gp::buffer<uint64_t> page_list) { |
|
|
|
struct ret_struct { |
|
|
|
bool still_ongoing; |
|
|
|
uint64_t next_page; |
|
|
|
}; |
|
|
|
if(node_level) { |
|
|
|
auto transmit = page_offset % node_pages; |
|
|
|
auto local = page_offset / node_pages; |
|
|
|
|
|
|
|
return ret_struct{true, transmit}; |
|
|
|
} else { |
|
|
|
return ret_struct{false, data_pages[page_offset]}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
uint64_t set_page_at(vdisk_ptr& disk, uint64_t page_offset) { |
|
|
|
auto [ongoing, page] = get_page_at_rec(page_offset); |
|
|
|
if(!ongoing) return page; |
|
|
|
gp::array<tree_node, 1> explore; |
|
|
|
do { |
|
|
|
disk->read(explore.template cast<char>(), page*page_size); |
|
|
|
auto [t_ongoing, t_page] = explore.begin().get_page_at_rec(page); |
|
|
|
ongoing = t_ongoing; |
|
|
|
page = t_page; |
|
|
|
} while(ongoing); |
|
|
|
return page; |
|
|
|
} |
|
|
|
}; |
|
|
|
|
|
|
|
struct file_description { |
|
|
|
gp::endian_wrapper<uint32_t, gp::endian::little> reference_counter; |
|
|
|
}; |
|
|
|
|
|
|
|
public: |
|
|
|
tagfs(vdisk_ptr&& _disk) |
|
|
|
: disk(std::move(_disk)) |
|
|
|
{} |
|
|
|
|
|
|
|
private: |
|
|
|
disk_root get_disk_root() { |
|
|
|
disk_root vret; |
|
|
|
disk->read(gp::buffer<disk_root>{&vret, 1}.template cast<uint8_t>(), 0); |
|
|
|
return vret; |
|
|
|
} |
|
|
|
|
|
|
|
void set_disk_root(disk_root& root) { |
|
|
|
disk->write(gp::buffer<disk_root>{&root, 1}.template cast<uint8_t>(), 0); |
|
|
|
} |
|
|
|
|
|
|
|
gp::optional<uint64_t> try_set_bit(gp::buffer<uint8_t> page) { |
|
|
|
uint64_t idx = 0; |
|
|
|
for(auto& elem : page) { |
|
|
|
if(elem != 0xff) { |
|
|
|
uint8_t copy = elem; |
|
|
|
uint8_t setter = 1; |
|
|
|
gp::repeat(8, [&](){ |
|
|
|
bool value = copy & 1; |
|
|
|
if(!value) { |
|
|
|
return; |
|
|
|
} |
|
|
|
copy >>= 1; |
|
|
|
setter <<= 1; |
|
|
|
++idx; |
|
|
|
}); |
|
|
|
elem |= setter; |
|
|
|
return idx; |
|
|
|
} |
|
|
|
idx += 8; |
|
|
|
} |
|
|
|
return gp::nullopt; |
|
|
|
} |
|
|
|
|
|
|
|
uint64_t next_shuttle_page(disk_root root, uint64_t shuttle) { |
|
|
|
return |
|
|
|
shuttle + 1 == root.first_allocator_page + root.allocator_page_count ? |
|
|
|
root.first_allocator_page |
|
|
|
: shuttle + 1; |
|
|
|
} |
|
|
|
|
|
|
|
bool try_unset_bit(gp::buffer<uint8_t> page, uint64_t idx) { |
|
|
|
uint8_t& target_byte = *(page.begin()+(idx/8)); |
|
|
|
uint8_t flipper = 1 << (idx%8); |
|
|
|
if(target_byte & flipper) { |
|
|
|
target_byte ^= flipper; |
|
|
|
return true; |
|
|
|
} |
|
|
|
return false; |
|
|
|
} |
|
|
|
|
|
|
|
uint64_t allocate_page() { |
|
|
|
disk_root root = get_disk_root(); |
|
|
|
uint64_t begin_page = root.first_allocator_page; |
|
|
|
uint64_t shuttle_page = root.allocator_shuttle; |
|
|
|
uint64_t end_page = root.first_allocator_page + root.allocator_page_count; |
|
|
|
gp::array<uint8_t, page_size> page_contents; |
|
|
|
|
|
|
|
gp::optional<uint64_t> page; |
|
|
|
do |
|
|
|
{ |
|
|
|
auto allocator_page = disk->read(page_contents.as_buffer(), shuttle_page*page_size); |
|
|
|
if(shuttle_page == end_page - 1) { |
|
|
|
uint64_t existing_pages = root.page_count - end_page; |
|
|
|
uint64_t allocable_pages = root.allocator_page_count*8*page_size; |
|
|
|
if(existing_pages < allocable_pages) { |
|
|
|
uint64_t extra = allocable_pages - existing_pages; |
|
|
|
extra /= 8; |
|
|
|
allocator_page = allocator_page.slice_start(page_size - extra); |
|
|
|
} |
|
|
|
} |
|
|
|
page = try_set_bit(allocator_page); |
|
|
|
if(!page.has_value()) { |
|
|
|
root.allocator_shuttle = (shuttle_page = next_shuttle_page(shuttle_page)); |
|
|
|
} else { |
|
|
|
disk->write(page_contents.as_buffer(), shuttle_page*page_size); |
|
|
|
page.value() += page_size*8*(shuttle_page-begin_page); |
|
|
|
} |
|
|
|
} |
|
|
|
while(!page.has_value()); |
|
|
|
set_disk_root(root); |
|
|
|
return page.value() + end_page; |
|
|
|
} |
|
|
|
|
|
|
|
bool deallocate_page(uint64_t page) { |
|
|
|
disk_root root = get_disk_root(); |
|
|
|
page -= root.first_allocator_page + root.allocator_page_count; |
|
|
|
uint64_t discriminant = page_size*8; |
|
|
|
uint64_t allocator_page = page/discriminant; |
|
|
|
uint64_t pos_page = page%discriminant; |
|
|
|
gp::array<uint8_t, page_size> store; |
|
|
|
disk->read(store.as_buffer(), page_size*allocator_page); |
|
|
|
bool ret = try_unset_bit(store.as_buffer(), pos_page); |
|
|
|
disk->write(store.as_buffer(), page_size*allocator_page); |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
void clear_page(uint64_t page) { |
|
|
|
disk->write(empty_page.as_buffer(), page*page_size); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
constexpr static gp::pair<uint64_t, uint64_t> split_pages(uint64_t pagecount) { |
|
|
|
auto datapage_count = (8*pagecount*page_size)/(1+8*page_size); |
|
|
|
auto allocator_pages = pagecount - datapage_count; |
|
|
|
|
|
|
|
return {allocator_pages, datapage_count}; |
|
|
|
} |
|
|
|
public: |
|
|
|
void format() { |
|
|
|
auto sz = disk->size(); |
|
|
|
auto page_sz = page_size; |
|
|
|
auto page_count = sz /page_sz; |
|
|
|
auto remaining_pages = page_count; |
|
|
|
|
|
|
|
disk_root root; |
|
|
|
// tagmebro
|
|
|
|
root.magic = 0x7461676D6562726F; |
|
|
|
root.page_count = page_count; |
|
|
|
root.first_allocator_page = 1; |
|
|
|
root.allocator_shuttle = 1; |
|
|
|
|
|
|
|
// Removing the root page
|
|
|
|
remaining_pages -= 1; |
|
|
|
|
|
|
|
// calculating datapages
|
|
|
|
auto [allocator_pages, datapage_count] = split_pages(remaining_pages); |
|
|
|
static_assert(split_pages(page_size*8+1).first == 1, "ideal 1 allocator page split doesn't work"); |
|
|
|
static_assert(split_pages(page_size*8+2).first == 2, "worst 2 allocator page split doesn't work"); |
|
|
|
|
|
|
|
root.allocator_page_count = allocator_pages; |
|
|
|
|
|
|
|
for(uint64_t offset = 0; offset < allocator_pages; ++offset) { |
|
|
|
clear_page(root.first_allocator_page); |
|
|
|
} |
|
|
|
|
|
|
|
root.tag_list_node = 0; |
|
|
|
set_disk_root(root); |
|
|
|
} |
|
|
|
}; |
|
|
|
} |