use super::*;
pub(crate) trait Alloc {
fn can_alloc<K: Storable, V: Storable>(hdr: &Header) -> bool;
fn can_compact<K: Storable, V: Storable>(hdr: &Header) -> bool;
fn truncate_left<K: Storable, V: Storable>(
page: &mut MutPage,
n: usize,
) -> Option<(*const K, *const V)>;
fn alloc_insert<K: Storable, V: Storable>(new: &mut MutPage, n: &mut isize, r: u64) -> usize;
fn set_right_child(new: &mut MutPage, n: isize, r: u64);
fn offset_slice<'a, T: LoadPage, K: Storable, V: Storable>(
page: crate::Page<'a>,
) -> Offsets<'a>;
fn first<'a, K, V>(off: &Offsets<'a>) -> u64;
fn kv<'a, K: Storable, V: Storable>(page: crate::Page, off: u64) -> (&'a K, &'a V, u64);
}
#[derive(Debug, Clone)]
pub enum Offsets<'a> {
Slice(&'a [u64]),
Range(core::ops::Range<usize>),
}
impl<'a> Offsets<'a> {
pub fn split_at(&self, mid: usize) -> (Self, Self) {
match self {
Offsets::Slice(s) if mid < s.len() => {
let (a, b) = s.split_at(mid);
(Offsets::Slice(a), Offsets::Slice(b))
}
Offsets::Slice(s) => {
debug_assert_eq!(mid, s.len());
(Offsets::Slice(s), Offsets::Slice(&[][..]))
}
Offsets::Range(r) => (
Offsets::Range(r.start..r.start + mid),
Offsets::Range(r.start + mid..r.end),
),
}
}
}
pub struct Leaf {}
pub struct Internal {}
impl Alloc for Leaf {
fn can_alloc<K: Storable, V: Storable>(hdr: &Header) -> bool {
let f = core::mem::size_of::<Tuple<K, V>>();
let al = core::mem::align_of::<Tuple<K, V>>();
let header_size = (HDR + al - 1) & !(al - 1);
header_size + (hdr.n() as usize + 1) * f <= PAGE_SIZE
}
fn can_compact<K: Storable, V: Storable>(hdr: &Header) -> bool {
Self::can_alloc::<K, V>(hdr)
}
fn set_right_child(_: &mut MutPage, _: isize, _: u64) {}
fn offset_slice<'a, T: LoadPage, K: Storable, V: Storable>(
page: crate::Page<'a>,
) -> Offsets<'a> {
let hdr = header(page);
Offsets::Range(0..(hdr.n() as usize))
}
fn first<'a, K, V>(off: &Offsets<'a>) -> u64 {
match off {
Offsets::Slice(_) => unreachable!(),
Offsets::Range(r) => {
let size = core::mem::size_of::<Tuple<K, V>>();
let al = core::mem::align_of::<Tuple<K, V>>();
let hdr_size = (HDR + al - 1) & !(al - 1);
(hdr_size + r.start * size) as u64
}
}
}
fn kv<'a, K: Storable, V: Storable>(page: crate::Page, off: u64) -> (&'a K, &'a V, u64) {
unsafe {
let tup = &*(page.data.as_ptr().add(off as usize) as *const Tuple<K, V>);
(&tup.k, &tup.v, 0)
}
}
fn alloc_insert<K: Storable, V: Storable>(new: &mut MutPage, n: &mut isize, _: u64) -> usize {
let f = core::mem::size_of::<Tuple<K, V>>();
let al = core::mem::align_of::<Tuple<K, V>>();
let hdr_size = (HDR + al - 1) & !(al - 1);
let hdr_n = header_mut(new).n();
unsafe {
core::ptr::copy(
new.0.data.add(hdr_size + (*n as usize) * f),
new.0.data.add(hdr_size + (*n as usize) * f + f),
(hdr_n as usize - (*n as usize)) * f,
);
}
let hdr = header_mut(new);
hdr.set_n(hdr.n() + 1);
hdr.incr(f);
hdr_size + (*n as usize) * f
}
fn truncate_left<K: Storable, V: Storable>(
page: &mut MutPage,
n: usize,
) -> Option<(*const K, *const V)> {
let f = core::mem::size_of::<Tuple<K, V>>();
let al = core::mem::align_of::<Tuple<K, V>>();
let hdr_size = (HDR + al - 1) & !(al - 1);
let hdr = header_mut(page);
let hdr_n = hdr.n();
hdr.set_n(hdr_n - n as u16);
hdr.decr(n * f);
unsafe {
let mut swap: core::mem::MaybeUninit<Tuple<K, V>> = core::mem::MaybeUninit::uninit();
core::ptr::copy(
page.0.data.add(hdr_size + (n - 1) * f),
swap.as_mut_ptr() as *mut u8,
f,
);
core::ptr::copy(
page.0.data.add(hdr_size + n * f),
page.0.data.add(hdr_size),
(hdr_n as usize - n) * f,
);
core::ptr::copy(
swap.as_ptr() as *mut u8,
page.0.data.add(hdr_size + (hdr_n as usize - n) * f),
f,
);
let tup =
&*(page.0.data.add(hdr_size + (hdr_n as usize - n) * f) as *const Tuple<K, V>);
Some((&tup.k, &tup.v))
}
}
}
impl Alloc for Internal {
fn can_alloc<K: Storable, V: Storable>(hdr: &Header) -> bool {
(HDR as usize) + (hdr.n() as usize + 1) * 8 + core::mem::size_of::<Tuple<K, V>>()
< hdr.data() as usize
}
fn can_compact<K: Storable, V: Storable>(hdr: &Header) -> bool {
(HDR as usize)
+ ((hdr.left_page() & 0xfff) as usize)
+ 8
+ core::mem::size_of::<Tuple<K, V>>()
<= PAGE_SIZE
}
fn set_right_child(page: &mut MutPage, n: isize, r: u64) {
unsafe {
debug_assert_eq!(r & 0xfff, 0);
let ptr = page.0.data.offset(HDR as isize + n * 8) as *mut u64;
let off = u64::from_le(*ptr) & 0xfff;
*ptr = (r | off as u64).to_le();
}
}
fn offset_slice<'a, T, K: Storable, V: Storable>(page: crate::Page<'a>) -> Offsets<'a> {
let hdr = header(page);
unsafe {
Offsets::Slice(core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const u64,
hdr.n() as usize,
))
}
}
fn first<'a, K, V>(off: &Offsets<'a>) -> u64 {
match off {
Offsets::Slice(s) => s[0],
Offsets::Range(_) => unreachable!(),
}
}
fn kv<'a, K: Storable, V: Storable>(page: crate::Page, off: u64) -> (&'a K, &'a V, u64) {
unsafe {
let tup = &*(page.data.as_ptr().add((off & 0xfff) as usize) as *const Tuple<K, V>);
(&tup.k, &tup.v, off & !0xfff)
}
}
fn truncate_left<K: Storable, V: Storable>(
page: &mut MutPage,
n: usize,
) -> Option<(*const K, *const V)> {
let hdr_n = header_mut(page).n();
unsafe {
core::ptr::copy(
page.0.data.add(HDR + (n - 1) * 8),
page.0.data.add(HDR - 8),
(hdr_n as usize - n + 1) * 8,
);
}
let size = (8 + core::mem::size_of::<Tuple<K, V>>()) * (hdr_n as usize - n);
let hdr = header_mut(page);
hdr.set_n(hdr_n - n as u16);
hdr.set_occupied(size as u64);
None
}
fn alloc_insert<K: Storable, V: Storable>(new: &mut MutPage, n: &mut isize, r: u64) -> usize {
let hdr = header_mut(new);
let data = hdr.data() as u16;
let off_new = data - core::mem::size_of::<Tuple<K, V>>() as u16;
hdr.set_data(off_new);
hdr.set_n(hdr.n() + 1);
hdr.incr(8 + core::mem::size_of::<Tuple<K, V>>());
let hdr_n = hdr.n();
unsafe {
core::ptr::copy(
new.0.data.add(HDR + (*n as usize) * 8),
new.0.data.add(HDR + (*n as usize) * 8 + 8),
(hdr_n as usize - (*n as usize)) * 8,
);
let ptr = new.0.data.offset(HDR as isize + *n * 8) as *mut u64;
*ptr = (r | off_new as u64).to_le();
}
off_new as usize
}
}
#[async_trait]
impl<K: Storable, V: Storable> crate::btree::page_unsized::AllocWrite<K, V> for Internal {
async fn alloc_write<T: AllocPage>(
_txn: &mut T,
new: &mut MutPage,
k0: &K,
v0: &V,
l: u64,
r: u64,
n: &mut isize,
) {
alloc_write::<K, V, Self>(new, k0, v0, l, r, n)
}
fn set_left_child(new: &mut MutPage, n: isize, l: u64) {
if l > 0 {
Internal::set_right_child(new, n - 1, l);
}
}
}
#[async_trait]
impl<K: Storable, V: Storable> crate::btree::page_unsized::AllocWrite<K, V> for Leaf {
async fn alloc_write<T: AllocPage>(
_txn: &mut T,
new: &mut MutPage,
k0: &K,
v0: &V,
l: u64,
r: u64,
n: &mut isize,
) {
alloc_write::<K, V, Self>(new, k0, v0, l, r, n)
}
fn set_left_child(_new: &mut MutPage, _n: isize, l: u64) {
debug_assert_eq!(l, 0);
}
}
fn alloc_write<K: Storable, V: Storable, L: Alloc>(
new: &mut MutPage,
k0: &K,
v0: &V,
l: u64,
r: u64,
n: &mut isize,
) {
let off_new = L::alloc_insert::<K, V>(new, n, r);
unsafe {
let new_ptr = &mut *(new.0.data.add(off_new as usize) as *mut Tuple<K, V>);
core::ptr::copy_nonoverlapping(k0, &mut new_ptr.k, 1);
core::ptr::copy_nonoverlapping(v0, &mut new_ptr.v, 1);
}
if l > 0 {
L::set_right_child(new, *n - 1, l);
}
*n += 1;
}