|
@ -36,7 +36,8 @@ namespace gp { |
|
|
auto it = data.begin()+offset; |
|
|
auto it = data.begin()+offset; |
|
|
auto ret = buffer; |
|
|
auto ret = buffer; |
|
|
for(auto& c : buffer) { |
|
|
for(auto& c : buffer) { |
|
|
*(it++) = c; |
|
|
|
|
|
|
|
|
auto cpy = it++; |
|
|
|
|
|
*(cpy) = c; |
|
|
if(it == data.end()) { |
|
|
if(it == data.end()) { |
|
|
ret = buffer.slice_start(it - (data.begin() + offset)); |
|
|
ret = buffer.slice_start(it - (data.begin() + offset)); |
|
|
break; |
|
|
break; |
|
@ -64,8 +65,6 @@ namespace gp { |
|
|
constexpr static size_t page_size = gp::remove_reference<decltype(*disk)>::type::page_size(); |
|
|
constexpr static size_t page_size = gp::remove_reference<decltype(*disk)>::type::page_size(); |
|
|
constexpr static gp::array<uint8_t, page_size> empty_page{gp::zero_t{}}; |
|
|
constexpr static gp::array<uint8_t, page_size> empty_page{gp::zero_t{}}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct disk_root { |
|
|
struct disk_root { |
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> magic; |
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> magic; |
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> first_allocator_page; |
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> first_allocator_page; |
|
@ -75,13 +74,108 @@ namespace gp { |
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> page_count; |
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> page_count; |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
struct tree_node { |
|
|
|
|
|
constexpr static size_t node_pages = page_size/sizeof(uint64_t) - 2; |
|
|
|
|
|
|
|
|
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> node_level; |
|
|
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> node_size; |
|
|
|
|
|
gp::endian_wrapper<uint64_t, gp::endian::little> data_pages[node_pages]; |
|
|
|
|
|
|
|
|
|
|
|
auto allocate_or_get_node_n(tagfs& fs, uint64_t index) { |
|
|
|
|
|
struct ret_struct{ |
|
|
|
|
|
gp::array<uint8_t, page_size> page; |
|
|
|
|
|
uint64_t page_id; |
|
|
|
|
|
bool must_update; |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
ret_struct ret; |
|
|
|
|
|
|
|
|
|
|
|
if(auto attempt = fail_or_get_node_n(fs, index)) { |
|
|
|
|
|
ret.page = attempt.value(); |
|
|
|
|
|
ret.page_id = 0; |
|
|
|
|
|
ret.must_update = false; |
|
|
|
|
|
} else { |
|
|
|
|
|
ret.page_id = data_pages[index] = fs.allocate_page(); |
|
|
|
|
|
ret.must_update = true; |
|
|
|
|
|
fs.disk->read(ret.page.as_buffer(), ret.page_id*page_size); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto fail_or_get_node_n(tagfs& fs, uint64_t index) -> gp::optional<gp::array<uint8_t, page_size>> { |
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto get_page_at_rec(uint64_t page_offset) { |
|
|
|
|
|
struct ret_struct { |
|
|
|
|
|
bool still_ongoing; |
|
|
|
|
|
uint64_t next_node; |
|
|
|
|
|
uint64_t next_page; |
|
|
|
|
|
}; |
|
|
|
|
|
if(node_level) { |
|
|
|
|
|
auto transmit = page_offset % node_pages; |
|
|
|
|
|
auto local = page_offset / node_pages; |
|
|
|
|
|
gp_config::assertion(local < node_pages, "node can't be within the page"); |
|
|
|
|
|
gp_config::assertion(local < node_size, "node not within the page"); |
|
|
|
|
|
return ret_struct{true, data_pages[local], transmit}; |
|
|
|
|
|
} else { |
|
|
|
|
|
gp_config::assertion(page_offset < node_pages, "node can't be within the page"); |
|
|
|
|
|
gp_config::assertion(page_offset < node_size, "node not within the page"); |
|
|
|
|
|
return ret_struct{false, 0, data_pages[page_offset]}; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
uint64_t get_page_at(vdisk_ptr& disk, uint64_t page_offset) { |
|
|
|
|
|
auto [ongoing, page] = get_page_at_rec(page_offset); |
|
|
|
|
|
if(!ongoing) return page; |
|
|
|
|
|
gp::array<tree_node, 1> explore; |
|
|
|
|
|
do { |
|
|
|
|
|
disk->read(explore.template cast<char>(), page*page_size); |
|
|
|
|
|
auto [t_ongoing, t_page] = explore.begin().get_page_at_rec(page); |
|
|
|
|
|
ongoing = t_ongoing; |
|
|
|
|
|
page = t_page; |
|
|
|
|
|
} while(ongoing); |
|
|
|
|
|
return page; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint64_t set_page_at_rec(uint64_t page_offset, gp::array<uint8_t, page_size>* page_data, gp::buffer<uint64_t> page_list) { |
|
|
|
|
|
struct ret_struct { |
|
|
|
|
|
bool still_ongoing; |
|
|
|
|
|
uint64_t next_page; |
|
|
|
|
|
}; |
|
|
|
|
|
if(node_level) { |
|
|
|
|
|
auto transmit = page_offset % node_pages; |
|
|
|
|
|
auto local = page_offset / node_pages; |
|
|
|
|
|
|
|
|
|
|
|
return ret_struct{true, transmit}; |
|
|
|
|
|
} else { |
|
|
|
|
|
return ret_struct{false, data_pages[page_offset]}; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
uint64_t set_page_at(vdisk_ptr& disk, uint64_t page_offset) { |
|
|
|
|
|
auto [ongoing, page] = get_page_at_rec(page_offset); |
|
|
|
|
|
if(!ongoing) return page; |
|
|
|
|
|
gp::array<tree_node, 1> explore; |
|
|
|
|
|
do { |
|
|
|
|
|
disk->read(explore.template cast<char>(), page*page_size); |
|
|
|
|
|
auto [t_ongoing, t_page] = explore.begin().get_page_at_rec(page); |
|
|
|
|
|
ongoing = t_ongoing; |
|
|
|
|
|
page = t_page; |
|
|
|
|
|
} while(ongoing); |
|
|
|
|
|
return page; |
|
|
|
|
|
} |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
struct file_description { |
|
|
struct file_description { |
|
|
gp::endian_wrapper<uint32_t, gp::endian::little> reference_counter; |
|
|
gp::endian_wrapper<uint32_t, gp::endian::little> reference_counter; |
|
|
}; |
|
|
}; |
|
|
|
|
|
|
|
|
public: |
|
|
public: |
|
|
tagfs(vdisk_ptr&& _disk) |
|
|
tagfs(vdisk_ptr&& _disk) |
|
|
: disk(gp::forward<vdisk_ptr>(disk)) |
|
|
|
|
|
|
|
|
: disk(std::move(_disk)) |
|
|
{} |
|
|
{} |
|
|
|
|
|
|
|
|
private: |
|
|
private: |
|
|