Browse Source

Basic tests for tagfs

tagfs
Ludovic 'Archivist' Lagouardette 4 years ago
parent
commit
e778628185
4 changed files with 243 additions and 195 deletions
  1. +18
    -12
      include/gp/pair.hpp
  2. +201
    -183
      include/gp/tagfs/tagfs.hpp
  3. +1
    -0
      tests.cpp
  4. +23
    -0
      tests/tagfs_test.cpp

+ 18
- 12
include/gp/pair.hpp View File

@ -8,28 +8,34 @@ namespace gp{
T1 first;
T2 second;
pair() : first(), second() {}
constexpr pair()
: first()
, second()
{}
pair(const T1& a, const T2& b) : first(a), second(b) {}
constexpr pair(const T1& a, const T2& b)
: first(a)
, second(b)
{}
pair(pair&& v)
k">constexpr pair(pair&& v)
: first(gp::move(v.first))
, second(gp::move(v.second))
{}
template<typename U1, typename U2>
pair(U1&& a, U2&& b)
k">constexpr pair(U1&& a, U2&& b)
: first(gp::forward<U1>(a))
, second(gp::forward<U2>(b))
{}
template<typename U1, typename U2>
pair(pair<U1, U2>&& v)
k">constexpr pair(pair<U1, U2>&& v)
: first(gp::move(v.first))
, second(gp::move(v.second))
{}
pair& operator=(pair&& v)
k">constexpr pair& operator=(pair&& v)
{
first = gp::move(v.first);
second = gp::move(v.second);
@ -38,17 +44,17 @@ namespace gp{
};
template<typename F, typename S>
bool operator==(const pair<F, S>& lhs, const pair<F, S>& rhs) {
">constexpr bool operator==(const pair<F, S>& lhs, const pair<F, S>& rhs) {
return lhs.first == rhs.first and lhs.second == rhs.second;
}
template<typename F, typename S>
bool operator!=(const pair<F, S>& lhs, const pair<F, S>& rhs) {
">constexpr bool operator!=(const pair<F, S>& lhs, const pair<F, S>& rhs) {
return lhs.first != rhs.first or lhs.second != rhs.second;
}
template<typename F, typename S>
bool operator<=(const pair<F, S>& lhs, const pair<F, S>& rhs) {
">constexpr bool operator<=(const pair<F, S>& lhs, const pair<F, S>& rhs) {
if(lhs.first > rhs.first) {
return false;
} else if(lhs.first == rhs.first) {
@ -58,7 +64,7 @@ namespace gp{
}
template<typename F, typename S>
bool operator>=(const pair<F, S>& lhs, const pair<F, S>& rhs) {
">constexpr bool operator>=(const pair<F, S>& lhs, const pair<F, S>& rhs) {
if(lhs.first < rhs.first) {
return false;
} else if(lhs.first == rhs.first) {
@ -68,12 +74,12 @@ namespace gp{
}
template<typename F, typename S>
bool operator<(const pair<F, S>& lhs, const pair<F, S>& rhs) {
">constexpr bool operator<(const pair<F, S>& lhs, const pair<F, S>& rhs) {
return !(lhs >= rhs);
}
template<typename F, typename S>
bool operator>(const pair<F, S>& lhs, const pair<F, S>& rhs) {
">constexpr bool operator>(const pair<F, S>& lhs, const pair<F, S>& rhs) {
return !(lhs <= rhs);
}
}

+ 201
- 183
include/gp/tagfs/tagfs.hpp View File

@ -2,209 +2,227 @@
#include "gp/array.hpp"
#include "gp/algorithm/min_max.hpp"
#include "gp/algorithm/modifiers.hpp"
#include "gp/algorithm/repeat.hpp"
#include "gp/bitops.hpp"
#include "gp/buffer.hpp"
#include "gp/pointers.hpp"
#include "gp/pair.hpp"
#include <atomic>
#include <cstdint>
template<size_t sz>
class memory_vdisk {
static_assert(sz%128 == 0, "in memory disk expects 128 bytes page alignment");
alignas(128) gp::array<uint8_t, sz> data;
gp::buffer<uint8_t> read(gp::buffer<uint8_t> buffer, uint64_t offset) {
auto it = data.begin()+offset;
auto ret = buffer;
for(auto& c : buffer) {
c = *(it++);
if(it == data.end()) {
ret = buffer.slice_start(it - (data.begin() + offset));
break;
namespace gp {
template<size_t sz>
class memory_vdisk {
static_assert(sz%128 == 0, "in memory disk expects 128 bytes page alignment");
alignas(128) gp::array<uint8_t, sz> data;
public:
gp::buffer<uint8_t> read(gp::buffer<uint8_t> buffer, uint64_t offset) {
auto it = data.begin()+offset;
auto ret = buffer;
for(auto& c : buffer) {
c = *(it++);
if(it == data.end()) {
ret = buffer.slice_start(it - (data.begin() + offset));
break;
}
}
return ret;
}
return ret;
}
gp::buffer<uint8_t> write(gp::buffer<uint8_t> buffer, uint64_t offset) {
auto it = data.begin()+offset;
auto ret = buffer;
for(auto& c : buffer) {
*(it++) = c;
if(it == data.end()) {
ret = buffer.slice_start(it - (data.begin() + offset));
break;
gp::buffer<uint8_t> write(gp::buffer<uint8_t> buffer, uint64_t offset) {
auto it = data.begin()+offset;
auto ret = buffer;
for(auto& c : buffer) {
*(it++) = c;
if(it == data.end()) {
ret = buffer.slice_start(it - (data.begin() + offset));
break;
}
}
return ret;
}
return ret;
}
constexpr uint64_t size() const noexcept {
return sz;
}
static constexpr size_t page_size() noexcept {
return 128;
}
constexpr uint64_t page_count() const noexcept {
return size() / page_size();
}
};
template<typename vdisk_ptr>
class tagfs {
vdisk_ptr disk;
const gp::array<uint8_t, decltype(*disk)::page_size()> empty_page;
struct disk_root {
gp::endian_wrapper<uint64_t, gp::endian::little> magic;
gp::endian_wrapper<uint64_t, gp::endian::little> first_allocator_page;
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_shuttle;
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_page_count;
gp::endian_wrapper<uint64_t, gp::endian::little> tag_list_node;
gp::endian_wrapper<uint64_t, gp::endian::little> page_count;
};
struct file_description {
gp::endian_wrapper<uint32_t, gp::endian::little> reference_counter;
constexpr uint64_t size() const noexcept {
return sz;
}
static constexpr size_t page_size() noexcept {
return 128;
}
constexpr uint64_t page_count() const noexcept {
return size() / page_size();
}
};
tagfs(vdisk_ptr&& _disk)
: disk(gp::forward<vdisk_ptr>(disk))
, empty_page{[](){return 0;}}
{}
disk_root get_disk_root() {
gp::array<disk_root, 1> vret;
return *disk->read(vret.as_buffer().template cast<uint8_t>(), 0).template cast<disk_root>().begin();
}
void set_disk_root(disk_root& root) {
gp::array<disk_root, 1> vpar{root};
disk->write(vpar.as_buffer().template cast<uint8_t>(), 0);
}
gp::optional<uint64_t> try_set_bit(gp::buffer<uint8_t> page) {
uint64_t idx = 0;
for(auto& elem : page) {
if(elem != 0xff) {
uint8_t copy = elem;
uint8_t setter = 1;
gp::repeat(8, [&](){
bool value = copy & 1;
if(!value) {
return;
}
copy >>= 1;
setter <<= 1;
++idx;
});
elem |= setter;
return idx;
}
idx += 8;
template<typename vdisk_ptr>
class tagfs {
vdisk_ptr disk;
constexpr static size_t page_size = gp::remove_reference<decltype(*disk)>::type::page_size();
const gp::array<uint8_t, page_size> empty_page;
struct disk_root {
gp::endian_wrapper<uint64_t, gp::endian::little> magic;
gp::endian_wrapper<uint64_t, gp::endian::little> first_allocator_page;
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_shuttle;
gp::endian_wrapper<uint64_t, gp::endian::little> allocator_page_count;
gp::endian_wrapper<uint64_t, gp::endian::little> tag_list_node;
gp::endian_wrapper<uint64_t, gp::endian::little> page_count;
};
struct file_description {
gp::endian_wrapper<uint32_t, gp::endian::little> reference_counter;
};
public:
tagfs(vdisk_ptr&& _disk)
: disk(gp::forward<vdisk_ptr>(disk))
, empty_page{[](){return 0;}}
{}
private:
disk_root get_disk_root() {
gp::array<disk_root, 1> vret;
return *disk->read(vret.as_buffer().template cast<uint8_t>(), 0).template cast<disk_root>().begin();
}
return gp::nullopt;
}
uint64_t next_shuttle_page(disk_root root, uint64_t shuttle) {
return
shuttle + 1 == root.first_allocator_page + root.allocator_page_count ?
root.first_allocator_page
: shuttle + 1;
}
bool try_unset_bit(gp::buffer<uint8_t> page, uint64_t idx) {
uint8_t& target_byte = *(page.begin()+(idx/8));
uint8_t flipper = 1 << (idx%8);
if(target_byte & flipper) {
target_byte ^= flipper;
return true;
void set_disk_root(disk_root& root) {
gp::array<disk_root, 1> vpar{root};
disk->write(vpar.as_buffer().template cast<uint8_t>(), 0);
}
return false;
}
uint64_t allocate_page() {
disk_root root = get_disk_root();
uint64_t begin_page = root.first_allocator_page;
uint64_t shuttle_page = root.allocator_shuttle;
uint64_t end_page = root.first_allocator_page + root.allocator_page_count;
gp::array<uint8_t, decltype(*disk)::page_size()> page_contents;
gp::optional<uint64_t> page;
do
{
auto allocator_page = disk->read(page_contents.as_buffer(), shuttle_page*decltype(*disk)::page_size());
if(shuttle_page == end_page - 1) {
uint64_t existing_pages = root.page_count - end_page;
uint64_t allocable_pages = root.allocator_page_count*8*decltype(*disk)::page_size();
if(existing_pages < allocable_pages) {
uint64_t extra = allocable_pages - existing_pages;
extra /= 8;
allocator_page = allocator_page.slice_start(decltype(*disk)::page_size() - extra);
gp::optional<uint64_t> try_set_bit(gp::buffer<uint8_t> page) {
uint64_t idx = 0;
for(auto& elem : page) {
if(elem != 0xff) {
uint8_t copy = elem;
uint8_t setter = 1;
gp::repeat(8, [&](){
bool value = copy & 1;
if(!value) {
return;
}
copy >>= 1;
setter <<= 1;
++idx;
});
elem |= setter;
return idx;
}
idx += 8;
}
page = try_set_bit(allocator_page);
if(!page.has_value()) {
root.allocator_shuttle = (shuttle_page = next_shuttle_page(shuttle_page));
} else {
disk->write(page_contents.as_buffer(), shuttle_page*decltype(*disk)::page_size());
page.value += decltype(*disk)::page_size()*8*(shuttle_page-begin_page);
return gp::nullopt;
}
uint64_t next_shuttle_page(disk_root root, uint64_t shuttle) {
return
shuttle + 1 == root.first_allocator_page + root.allocator_page_count ?
root.first_allocator_page
: shuttle + 1;
}
bool try_unset_bit(gp::buffer<uint8_t> page, uint64_t idx) {
uint8_t& target_byte = *(page.begin()+(idx/8));
uint8_t flipper = 1 << (idx%8);
if(target_byte & flipper) {
target_byte ^= flipper;
return true;
}
return false;
}
while(!page.has_value());
set_disk_root(root);
return page.value() + end_page;
}
bool deallocate_page(uint64_t page) {
disk_root root = get_disk_root();
page -= root.first_allocator_page + root.allocator_page_count;
uint64_t discriminant = decltype(*disk)::page_size()*8;
uint64_t allocator_page = page/discriminant;
uint64_t pos_page = page%discriminant;
gp::array<uint8_t, decltype(*disk)::page_size()> store;
disk->read(store.as_buffer(), decltype(*disk)::page_size()*allocator_page);
bool ret = try_unset_bit(store.as_buffer(), pos_page);
disk->write(store.as_buffer(), decltype(*disk)::page_size()*allocator_page);
return ret;
}
void clear_page(uint64_t page) {
disk->write(empty_page.as_buffer(), page*decltype(*disk)::page_size());
}
void format() {
auto sz = disk->size();
auto page_sz = disk->page_size();
auto page_count = sz /page_sz;
auto remaining_pages = page_count;
disk_root root;
// tagmebro
root.magic = 0x7461676D6562726F;
root.page_count = page_count;
root.first_allocator_page = 1;
root.allocator_shuttle = 1;
// Removing the root page
remaining_pages -= 1;
// calculating datapages
auto datapage_count = (8*remaining_pages*page_sz)/(1+8*page_sz);
auto allocator_pages = remaining_pages - datapage_count;
root.allocator_page_count = allocator_pages;
for(uint64_t offset = 0; offset < allocator_pages; ++offset) {
clear_page(root.first_allocator_page);
uint64_t allocate_page() {
disk_root root = get_disk_root();
uint64_t begin_page = root.first_allocator_page;
uint64_t shuttle_page = root.allocator_shuttle;
uint64_t end_page = root.first_allocator_page + root.allocator_page_count;
gp::array<uint8_t, page_size> page_contents;
gp::optional<uint64_t> page;
do
{
auto allocator_page = disk->read(page_contents.as_buffer(), shuttle_page*page_size);
if(shuttle_page == end_page - 1) {
uint64_t existing_pages = root.page_count - end_page;
uint64_t allocable_pages = root.allocator_page_count*8*page_size;
if(existing_pages < allocable_pages) {
uint64_t extra = allocable_pages - existing_pages;
extra /= 8;
allocator_page = allocator_page.slice_start(page_size - extra);
}
}
page = try_set_bit(allocator_page);
if(!page.has_value()) {
root.allocator_shuttle = (shuttle_page = next_shuttle_page(shuttle_page));
} else {
disk->write(page_contents.as_buffer(), shuttle_page*page_size);
page.value() += page_size*8*(shuttle_page-begin_page);
}
}
while(!page.has_value());
set_disk_root(root);
return page.value() + end_page;
}
bool deallocate_page(uint64_t page) {
disk_root root = get_disk_root();
page -= root.first_allocator_page + root.allocator_page_count;
uint64_t discriminant = page_size*8;
uint64_t allocator_page = page/discriminant;
uint64_t pos_page = page%discriminant;
gp::array<uint8_t, page_size> store;
disk->read(store.as_buffer(), page_size*allocator_page);
bool ret = try_unset_bit(store.as_buffer(), pos_page);
disk->write(store.as_buffer(), page_size*allocator_page);
return ret;
}
void clear_page(uint64_t page) {
disk->write(empty_page.as_buffer(), page*page_size);
}
constexpr static gp::pair<uint64_t, uint64_t> split_pages(uint64_t pagecount) {
auto datapage_count = (8*pagecount*page_size)/(1+8*page_size);
auto allocator_pages = pagecount - datapage_count;
return {allocator_pages, datapage_count};
}
root.tag_list_node = 0;
set_disk_root(root);
}
};
void format() {
auto sz = disk->size();
auto page_sz = page_size;
auto page_count = sz /page_sz;
auto remaining_pages = page_count;
disk_root root;
// tagmebro
root.magic = 0x7461676D6562726F;
root.page_count = page_count;
root.first_allocator_page = 1;
root.allocator_shuttle = 1;
// Removing the root page
remaining_pages -= 1;
// calculating datapages
auto [allocator_pages, datapage_count] = split_pages(remaining_pages);
static_assert(split_pages(page_size*8+1).first == 1, "ideal 1 allocator page split doesn't work");
static_assert(split_pages(page_size*8+2).first == 2, "worst 2 allocator page split doesn't work");
root.allocator_page_count = allocator_pages;
for(uint64_t offset = 0; offset < allocator_pages; ++offset) {
clear_page(root.first_allocator_page);
}
root.tag_list_node = 0;
set_disk_root(root);
}
};
}

+ 1
- 0
tests.cpp View File

@ -7,6 +7,7 @@
#include "meta_test.cpp"
#include "pair_test.cpp"
#include "quotient_filter.cpp"
#include "tagfs_test.cpp"
#include "test_scaffold.h"
#include <iostream>

+ 23
- 0
tests/tagfs_test.cpp View File

@ -0,0 +1,23 @@
#include "gp/tagfs/tagfs.hpp"
#include "test_scaffold.h"
#include <random>
#include <string>
struct tagfs_test : public test_scaffold {
tagfs_test() {
name = __FILE__ ":1";
}
virtual int run() {
bool result = true;
gp::memory_vdisk<128*1025> disk;
auto fs = gp::tagfs{&disk};
return !result;
}
};
append_test dummy_56d46qds(new tagfs_test{});

Loading…
Cancel
Save