Bladeren bron

Made the base of the file-system allocation engine

tagfs
Ludovic 'Archivist' Lagouardette 4 jaren geleden
bovenliggende
commit
ca5866f524
2 gewijzigde bestanden met toevoegingen van 218 en 0 verwijderingen
  1. +8
    -0
      include/gp/array.hpp
  2. +210
    -0
      include/gp/tagfs/tagfs.hpp

+ 8
- 0
include/gp/array.hpp Bestand weergeven

@ -28,6 +28,14 @@ namespace gp{
}
}
template<typename fn>
array(fn& func)
{
for(auto& elem : ary) {
elem = fn();
}
}
template<typename ...U>
array(U&& ...values)
: ary{gp::move((T&&)values)...}

+ 210
- 0
include/gp/tagfs/tagfs.hpp Bestand weergeven

@ -0,0 +1,210 @@
#pragma once
#include "gp/array.hpp"
#include "gp/algorithm/min_max.hpp"
#include "gp/algorithm/repeat.hpp"
#include "gp/bitops.hpp"
#include "gp/buffer.hpp"
#include "gp/pointers.hpp"
#include <atomic>
#include <cstdint>
template<size_t sz>
class memory_vdisk {
static_assert(sz%128 == 0, "in memory disk expects 128 bytes page alignment");
alignas(128) gp::array<uint8_t, sz> data;
gp::buffer<uint8_t> read(gp::buffer<uint8_t> buffer, uint64_t offset) {
auto it = data.begin()+offset;
auto ret = buffer;
for(auto& c : buffer) {
c = *(it++);
if(it == data.end()) {
ret = buffer.slice_start(it - (data.begin() + offset));
break;
}
}
return ret;
}
gp::buffer<uint8_t> write(gp::buffer<uint8_t> buffer, uint64_t offset) {
auto it = data.begin()+offset;
auto ret = buffer;
for(auto& c : buffer) {
*(it++) = c;
if(it == data.end()) {
ret = buffer.slice_start(it - (data.begin() + offset));
break;
}
}
return ret;
}
constexpr uint64_t size() const noexcept {
return sz;
}
static constexpr size_t page_size() noexcept {
return 128;
}
constexpr uint64_t page_count() const noexcept {
return size() / page_size();
}
};
template<typename vdisk_ptr>
class tagfs {
vdisk_ptr disk;
const gp::array<uint8_t, decltype(*disk)::page_size()> empty_page;
struct disk_root {
gp::endian_wrapper<uint64_t, gp::endian::little> magic;
gp::endian_wrapper<uint64_t, gp::endian::little> first_allocator_page;
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_shuttle;
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_page_count;
gp::endian_wrapper<uint64_t, gp::endian::little> tag_list_node;
gp::endian_wrapper<uint64_t, gp::endian::little> page_count;
};
struct file_description {
gp::endian_wrapper<uint32_t, gp::endian::little> reference_counter;
};
tagfs(vdisk_ptr&& _disk)
: disk(gp::forward<vdisk_ptr>(disk))
, empty_page{[](){return 0;}}
{}
disk_root get_disk_root() {
gp::array<disk_root, 1> vret;
return *disk->read(vret.as_buffer().template cast<uint8_t>(), 0).template cast<disk_root>().begin();
}
void set_disk_root(disk_root& root) {
gp::array<disk_root, 1> vpar{root};
disk->write(vpar.as_buffer().template cast<uint8_t>(), 0);
}
gp::optional<uint64_t> try_set_bit(gp::buffer<uint8_t> page) {
uint64_t idx = 0;
for(auto& elem : page) {
if(elem != 0xff) {
uint8_t copy = elem;
uint8_t setter = 1;
gp::repeat(8, [&](){
bool value = copy & 1;
if(!value) {
return;
}
copy >>= 1;
setter <<= 1;
++idx;
});
elem |= setter;
return idx;
}
idx += 8;
}
return gp::nullopt;
}
uint64_t next_shuttle_page(disk_root root, uint64_t shuttle) {
return
shuttle + 1 == root.first_allocator_page + root.allocator_page_count ?
root.first_allocator_page
: shuttle + 1;
}
bool try_unset_bit(gp::buffer<uint8_t> page, uint64_t idx) {
uint8_t& target_byte = *(page.begin()+(idx/8));
uint8_t flipper = 1 << (idx%8);
if(target_byte & flipper) {
target_byte ^= flipper;
return true;
}
return false;
}
uint64_t allocate_page() {
disk_root root = get_disk_root();
uint64_t begin_page = root.first_allocator_page;
uint64_t shuttle_page = root.allocator_shuttle;
uint64_t end_page = root.first_allocator_page + root.allocator_page_count;
gp::array<uint8_t, decltype(*disk)::page_size()> page_contents;
gp::optional<uint64_t> page;
do
{
auto allocator_page = disk->read(page_contents.as_buffer(), shuttle_page*decltype(*disk)::page_size());
if(shuttle_page == end_page - 1) {
uint64_t existing_pages = root.page_count - end_page;
uint64_t allocable_pages = root.allocator_page_count*8*decltype(*disk)::page_size();
if(existing_pages < allocable_pages) {
uint64_t extra = allocable_pages - existing_pages;
extra /= 8;
allocator_page = allocator_page.slice_start(decltype(*disk)::page_size() - extra);
}
}
page = try_set_bit(allocator_page);
if(!page.has_value()) {
root.allocator_shuttle = (shuttle_page = next_shuttle_page(shuttle_page));
} else {
disk->write(page_contents.as_buffer(), shuttle_page*decltype(*disk)::page_size());
page.value += decltype(*disk)::page_size()*8*(shuttle_page-begin_page);
}
}
while(!page.has_value());
set_disk_root(root);
return page.value() + end_page;
}
bool deallocate_page(uint64_t page) {
disk_root root = get_disk_root();
page -= root.first_allocator_page + root.allocator_page_count;
uint64_t discriminant = decltype(*disk)::page_size()*8;
uint64_t allocator_page = page/discriminant;
uint64_t pos_page = page%discriminant;
gp::array<uint8_t, decltype(*disk)::page_size()> store;
disk->read(store.as_buffer(), decltype(*disk)::page_size()*allocator_page);
bool ret = try_unset_bit(store.as_buffer(), pos_page);
disk->write(store.as_buffer(), decltype(*disk)::page_size()*allocator_page);
return ret;
}
void clear_page(uint64_t page) {
disk->write(empty_page.as_buffer(), page*decltype(*disk)::page_size());
}
void format() {
auto sz = disk->size();
auto page_sz = disk->page_size();
auto page_count = sz /page_sz;
auto remaining_pages = page_count;
disk_root root;
// tagmebro
root.magic = 0x7461676D6562726F;
root.page_count = page_count;
root.first_allocator_page = 1;
root.allocator_shuttle = 1;
// Removing the root page
remaining_pages -= 1;
// calculating datapages
auto datapage_count = (8*remaining_pages*page_sz)/(1+8*page_sz);
auto allocator_pages = remaining_pages - datapage_count;
root.allocator_page_count = allocator_pages;
for(uint64_t offset = 0; offset < allocator_pages; ++offset) {
clear_page(root.first_allocator_page);
}
root.tag_list_node = 0;
set_disk_root(root);
}
};

Laden…
Annuleren
Opslaan