H3FVSQIQGFCFKCPXVOSFHP4OSUOBBURJESCZNGQTNDAAD3WQSBEQC
OTWDDJE7TTE73D6BGF4ZN6BH2NFUFLPME2VJ3CPALH463UGWLEIQC
OP6SVMOD2GTQ7VNJ4E5KYFG4MIYA7HBMXJTADALMZH4PY7OQRMZQC
WS4ZQM4RMIHZ6XZKSDQJGHN5SSSWFL4H236USOPUA33S6RC53RFAC
6DMPXOAT5GQ3BQQOMUZN2GMBQPRA4IB7CCPHTQTIFGO3KWWAKF3QC
DV4A2LR7Q5LAEGAQHLO34PZCHGJUHPAMRZFGT7GUFNKVQKPJNOYQC
QEUTVAZ4F4EJXRDMWDMYXF6XEDMX7YPVG4IIXEKPIR3K54E5W5OAC
EAAYH6BQWDK52EC5RG3BEZQU3FJPN5RRRN4U5KDKDVPKXBVJMNDAC
ONES3V466GLO5CXKRF5ENK7VFOQPWM3YXLVRGWB56V5SH3W7XNBQC
X3QVVQIS7B7L3XYZAWL3OOBUXOJ6RMOKQ45YMLLGAHYPEEKZ45ZAC
S4V4QZ5CF5LUDYWNR2UMWH6CHJDJ5FPGAZCQYM5GY7FJMJV4NN4QC
UAQX27N4PI4LHEW6LSHJETIE5MV7JTEMPLTJFYUBMYVPC43H7VOAC
YWFYZNLZ5JHLIFVBRKZK4TSWVPROUPRG77ZB5M7UHT2OKPL4ZSRQC
impl Representable for [u8] {
type PageOffsets = core::iter::Empty<u64>;
fn page_offsets(&self) -> Self::PageOffsets {
core::iter::empty()
}
const ALIGN: usize = 2;
const SIZE: Option<usize> = None;
fn compare<T>(&self, _: &T, b: &Self) -> core::cmp::Ordering {
self.cmp(b)
}
fn size(&self) -> usize {
2 + self.len()
}
unsafe fn from_raw_ptr<'a, T>(_: &T, p: *const u8) -> &'a Self {
let len = u16::from_le(*(p as *const u16));
assert_ne!(len, 0);
assert_eq!(len & 0xf000, 0);
core::slice::from_raw_parts(p.add(2), len as usize)
}
unsafe fn onpage_size(p: *const u8) -> usize {
let len = u16::from_le(*(p as *const u16));
2 + len as usize
}
unsafe fn write_to_page(&self, p: *mut u8) {
assert!(self.len() <= 510);
*(p as *mut u16) = (self.len() as u16).to_le();
core::ptr::copy_nonoverlapping(self.as_ptr(), p.add(2), self.len())
}
}
unsafe fn entry_size<K: Representable, V: Representable>(k: *const u8) -> usize {
let ks = (&*(k as *const K)).size();
unsafe fn entry_size<K: Representable+?Sized, V: Representable+?Sized>(k: *const u8) -> usize {
let ks = K::onpage_size(k);
use super::*;
use core::cmp::Ordering;
use log::*;
mod put;
mod alloc;
use alloc::*;
mod header;
use header::*;
mod rebalance;
use rebalance::*;
const PAGE_SIZE: usize = 4096;
#[derive(Debug)]
pub struct Page<K: ?Sized, V: ?Sized> {
k: core::marker::PhantomData<K>,
v: core::marker::PhantomData<V>,
}
impl<
K: Representable+?Sized + core::fmt::Debug,
V: Representable+?Sized + core::fmt::Debug,
> super::BTreeMutPage<K, V> for Page<K, V>
{
fn init(page: &mut MutPage) {
debug!("init {:?}", page);
let h = header_mut(page);
h.init();
debug!("init: {:?}", h);
}
fn clean(page: &mut MutPage) {
let hdr = header_mut(page);
hdr.clean();
}
fn size(m: &ModifiedPage<K, V, Self>) -> usize {
let mut occupied = {
let hdr = header(m.page.as_page());
(hdr.left_page() & 0xfff) as usize
};
occupied += HDR;
if m.skip_first {
occupied -= Self::current_size(m.page.as_page(), &m.c1) as usize;
}
if let Some((k, v)) = m.ins {
occupied += crate::alloc_size(k, v) as usize;
if let Some((k, v)) = m.ins2 {
occupied += crate::alloc_size(k, v) as usize;
}
if m.c1.is_leaf {
if m.ins2.is_some() {
occupied += 4;
} else {
occupied += 2;
}
} else if m.ins2.is_some() {
occupied += 16
} else {
occupied += 8
}
}
occupied
}
fn put<'a, T: AllocPage>(
txn: &mut T,
page: CowPage,
mutable: bool,
c: &Cursor,
k0: &'a K,
v0: &'a V,
k1v1: Option<(&'a K, &'a V)>,
l: u64,
r: u64,
) -> Result<Put<'a, K, V>, T::Error> {
if r == 0 {
put::put::<_, _, _, Leaf>(txn, page, mutable, c.cur, k0, v0, k1v1, 0, 0)
} else {
put::put::<_, _, _, Internal>(txn, page, mutable, c.cur, k0, v0, k1v1, l, r)
}
}
fn replace<'a, T: AllocPage>(
txn: &mut T,
page: CowPage,
mutable: bool,
c: &Self::Cursor,
k0: &'a K,
v0: &'a V,
k1v1: Option<(&'a K, &'a V)>,
l: u64,
r: u64,
) -> Result<Put<'a, K, V>, T::Error> {
// We're never in a leaf if we're doing this.
let new = Self::del(txn, page, c, l)?;
Self::put(txn, new.0, mutable, c, k0, v0, k1v1, l, r)
}
fn update_left_child<T: AllocPage>(
txn: &mut T,
page: CowPage,
mutable: bool,
c: &Self::Cursor,
r: u64,
) -> Result<Ok, T::Error> {
assert!(!c.is_leaf);
let freed;
debug!(
"update_left_child: {:?} {:?} {:?}",
page,
mutable,
header(page.as_page())
);
let page = if mutable && Self::is_dirty(page.as_page()) {
freed = 0;
unsafe { core::mem::transmute(page) }
} else {
let mut new = txn.alloc_page()?;
debug!("new = {:?}", new);
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut new);
let l = header(page.as_page()).left_page() & !0xfff;
let hdr = header_mut(&mut new);
hdr.set_left_page(l);
let s = Internal::offset_slice::<K, V>(page.as_page());
let mut n = 0;
clone::<K, V, Internal>(page.as_page(), &mut new, s, &mut n);
let b = if Self::is_dirty(page.as_page()) { 1 } else { 0 };
freed = page.offset | b;
new
};
assert!(c.cur < c.total + 1);
unsafe {
let off = (page.0.data.add(HDR) as *mut u64).offset(c.cur as isize - 1);
*off = (r | (u64::from_le(*off) & 0xfff)).to_le();
}
Ok(Ok { page, freed })
}
fn del<T: AllocPage>(txn: &mut T, page: crate::CowPage, c: &Cursor, l: u64) -> Result<MutPage, T::Error> {
debug!("del: {:?} {:?}", page, l);
assert!(c.cur < c.total);
if Self::is_dirty(page.as_page()) {
let p = page.data;
let mut page: MutPage = unsafe { core::mem::transmute(page) };
let hdr = header_mut(&mut page);
unsafe {
if c.is_leaf {
let n = hdr.n() as usize;
let ptr = p.add(HDR + c.cur * 2) as *mut u16;
let kv_ptr = p.add((*ptr) as usize);
let size = entry_size::<K, V>(kv_ptr);
core::ptr::copy(ptr.offset(1), ptr, n - c.cur);
hdr.decr(size);
} else {
let ptr = p.add(HDR + c.cur * 8) as *mut u64;
let off = (u64::from_le(*ptr) & 0xfff) as usize;
let kv_ptr = p.add(off);
let size = entry_size::<K, V>(kv_ptr);
core::ptr::copy(ptr.offset(1), ptr, hdr.n() as usize - c.cur);
(&mut *hdr).decr(size);
}
};
if l > 0 {
assert!(!c.is_leaf);
// Updating the left page if necessary.
unsafe {
let off = (p.add(HDR) as *mut u64).offset(c.cur as isize - 1);
*off = (l | (u64::from_le(*off) & 0xfff)).to_le();
}
}
hdr.set_n(hdr.n() - 1);
Ok(page)
} else {
unsafe {
let mut new = txn.alloc_page()?;
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut new);
if c.is_leaf {
let s = Leaf::offset_slice::<K, V>(page.as_page());
let (s0, s1) = s.split_at(c.cur);
let mut n = 0;
clone::<K, V, Leaf>(page.as_page(), &mut new, s0, &mut n);
clone::<K, V, Leaf>(page.as_page(), &mut new, s1, &mut n);
} else {
let s = Internal::offset_slice::<K, V>(page.as_page());
let (s0, s1) = s.split_at(c.cur);
let mut n = 0;
clone::<K, V, Internal>(page.as_page(), &mut new, s0, &mut n);
let off = (page.data.add(HDR) as *mut u64).offset(n - 1);
*off = l.to_le();
clone::<K, V, Internal>(page.as_page(), &mut new, s1, &mut n);
}
Ok(new)
}
}
}
fn merge_or_rebalance<'a, T: AllocPage>(
txn: &mut T,
m: &mut Concat<'a, K, V, Self>,
) -> Result<Op<'a, T, K, V>, T::Error> {
let hdr_size = HDR;
let mid_size = if m.modified.c0.is_leaf {
2 + alloc_size::<K, V>(m.mid.0, m.mid.1)
} else {
8 + alloc_size::<K, V>(m.mid.0, m.mid.1)
};
let mod_size = Self::size(&m.modified);
let occupied = {
let hdr = header(m.other.as_page());
(hdr.left_page() & 0xfff) as usize
};
let size = mod_size + mid_size + occupied - hdr_size;
debug!(
"size = {:?} {:?} {:?} {:?} {:?}",
mod_size, mid_size, occupied, hdr_size, size
);
if size <= PAGE_SIZE {
// merge
let mut new = txn.alloc_page()?;
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut new);
unsafe {
if m.modified.c0.is_leaf {
merge::<_, _, _, Leaf>(txn, &mut new, m)
} else {
merge::<_, _, _, Internal>(txn, &mut new, m)
}
}
let b0 = if Self::is_dirty(m.modified.page.as_page()) {
1
} else {
0
};
let b1 = if Self::is_dirty(m.other.as_page()) {
1
} else {
0
};
return Ok(Op::Merged {
page: new,
freed: [m.modified.page.offset | b0, m.other.offset | b1],
marker: core::marker::PhantomData,
});
}
let rc = <Page<K, V>>::first_cursor(m.other.as_page());
let first_size = <Page<K, V>>::current_size(m.other.as_page(), &rc);
// If we can't rebalance, modify and return.
if mod_size >= PAGE_SIZE / 2 || occupied - first_size < PAGE_SIZE / 2 {
if let Some((k, v)) = m.modified.ins {
return Ok(Op::Put(Self::replace(
txn,
m.modified.page,
m.modified.mutable,
&m.modified.c1,
k,
v,
m.modified.ins2,
m.modified.l,
m.modified.r,
)?));
} else {
return Ok(Op::Put(Put::Ok(Ok {
page: Self::del(txn, m.modified.page, &m.modified.c1, m.modified.l)?,
freed: 0,
})));
}
}
if m.mod_is_left {
if m.modified.c0.is_leaf {
rebalance_left::<_, _, _, Leaf>(txn, m)
} else {
rebalance_left::<_, _, _, Internal>(txn, m)
}
} else {
if m.modified.c0.is_leaf {
rebalance_right::<_, _, _, Leaf>(txn, m)
} else {
rebalance_right::<_, _, _, Internal>(txn, m)
}
}
}
}
impl<K: Representable + ?Sized, V: Representable + ?Sized> super::BTreePage<K, V>
for Page<K, V>
{
fn is_dirty(page: crate::Page) -> bool {
header(page).is_dirty()
}
fn is_empty(_: crate::Page, c: &Self::Cursor) -> bool {
c.cur >= c.total
}
type Cursor = Cursor;
fn first_cursor(p: crate::Page) -> Self::Cursor {
let hdr = header(p);
Cursor {
cur: 0,
total: hdr.n() as usize,
is_leaf: hdr.is_leaf(),
}
}
fn last_cursor(p: crate::Page) -> Self::Cursor {
let hdr = header(p);
let total = hdr.n() as usize;
Cursor {
cur: total - 1,
total,
is_leaf: hdr.is_leaf(),
}
}
unsafe fn unchecked_current<'a, T: LoadPage>(
txn: &T,
page: crate::Page<'a>,
c: &Self::Cursor,
) -> (&'a K, &'a V, u64) {
if c.is_leaf {
let off = {
u16::from_le(*(page.data.as_ptr().add(HDR + c.cur * 2) as *const u16)) as usize
};
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().add(off as usize));
(K::from_raw_ptr(txn, k as *const u8), V::from_raw_ptr(txn, v as *const u8), 0)
} else {
let off = u64::from_le(*(page.data.as_ptr().add(HDR) as *const u64).add(c.cur));
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().add((off & 0xfff) as usize));
(K::from_raw_ptr(txn, k as *const u8), V::from_raw_ptr(txn, v as *const u8), off & !0xfff)
}
}
unsafe fn unchecked_current_ptr(page: crate::Page, c: &Self::Cursor) -> *const u8 {
page.data.as_ptr().add(if c.is_leaf {
u16::from_le(*(page.data.as_ptr().add(HDR + c.cur * 2) as *const u16)) as usize
} else {
(u64::from_le(*(page.data.as_ptr().add(HDR + c.cur * 8) as *const u64)) & 0xfff)
as usize
})
}
fn current_size(page: crate::Page, c: &Self::Cursor) -> usize {
unsafe {
if c.is_leaf {
2 + entry_size::<K, V>(page.data.as_ptr().add(u16::from_le(
*(page.data.as_ptr().add(HDR) as *const u16).add(c.cur),
)
as usize))
} else {
8 + entry_size::<K, V>(page.data.as_ptr().add(
(u64::from_le(*(page.data.as_ptr().add(HDR) as *const u64).add(c.cur)) & 0xfff)
as usize,
))
}
}
}
fn move_next(_page: crate::Page, c: &mut Self::Cursor) -> bool {
if c.cur < c.total {
c.cur += 1;
true
} else {
false
}
}
fn move_prev(_page: crate::Page, c: &mut Self::Cursor) -> bool {
if c.cur > 0 {
c.cur -= 1;
true
} else {
false
}
}
fn left_child(page: crate::Page, c: &Self::Cursor) -> u64 {
if c.is_leaf {
0
} else {
let off = unsafe { *(page.data.as_ptr().add((HDR + c.cur * 8) - 8) as *const u64) };
u64::from_le(off) & !0xfff
}
}
fn right_child(page: crate::Page, c: &Self::Cursor) -> u64 {
if c.is_leaf {
0
} else {
let off = unsafe { *(page.data.as_ptr().add(HDR + c.cur * 8) as *const u64) };
u64::from_le(off) & !0xfff
}
}
fn set_cursor<'a, T: LoadPage>(
txn: &T,
page: crate::Page,
c: &mut Cursor,
k0: &K,
v0: Option<&V>,
) -> Result<(&'a K, &'a V, u64), usize> {
unsafe {
let lookup = lookup(txn, page, c, k0, v0);
debug!("set_cursor, {:?} lookup = {:?}", page.offset, lookup);
let result;
c.cur = match lookup {
Ok(n) => {
result = if c.is_leaf {
let off = {
let off =
u16::from_le(*(page.data.as_ptr().add(HDR + n * 2) as *const u16));
(0, off)
};
Ok(Leaf::kv(txn, page, off))
} else {
let off =
u64::from_le(*(page.data.as_ptr().add(HDR + n * 8) as *const u64));
Ok(Internal::kv(txn, page, (off & !0xfff, (off & 0xfff) as u16)))
};
n
}
Err(n) => {
result = Err(n);
n
}
};
result
}
}
fn split_at(_: crate::Page, c: &Self::Cursor) -> (Self::Cursor, Self::Cursor) {
(
Cursor {
cur: 0,
total: c.cur,
is_leaf: c.is_leaf,
},
*c,
)
}
}
unsafe fn lookup<T, K: Representable + ?Sized, V: Representable + ?Sized>(
txn: &T,
page: crate::Page,
c: &mut Cursor,
k0: &K,
v0: Option<&V>,
) -> Result<usize, usize> {
let hdr = header(page);
c.total = hdr.n() as usize;
c.is_leaf = hdr.is_leaf();
if c.is_leaf {
let s = core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const u16,
hdr.n() as usize,
);
if let Some(v0) = v0 {
s.binary_search_by(|&off| {
let off = u16::from_le(off);
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().offset(off as isize));
let k = K::from_raw_ptr(txn, k as *const u8);
match k.compare(txn, k0) {
Ordering::Equal => {
let v = V::from_raw_ptr(txn, v as *const u8);
v.compare(txn, v0)
},
cmp => cmp,
}
})
} else {
s.binary_search_by(|&off| {
let off = u16::from_le(off);
let (k, _) = read::<T, K, V>(txn, page.data.as_ptr().offset(off as isize));
let k = K::from_raw_ptr(txn, k);
k.compare(txn, k0)
})
}
} else {
let s = core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const u64,
hdr.n() as usize,
);
if let Some(v0) = v0 {
s.binary_search_by(|&off| {
let off = u64::from_le(off) & 0xfff;
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().offset(off as isize & 0xfff));
let k = K::from_raw_ptr(txn, k);
match k.compare(txn, k0) {
Ordering::Equal => {
let v = V::from_raw_ptr(txn, v);
v.compare(txn, v0)
},
cmp => cmp,
}
})
} else {
s.binary_search_by(|&off| {
let off = u64::from_le(off) & 0xfff;
let (k, _) = read::<T, K, V>(txn, page.data.as_ptr().offset(off as isize & 0xfff));
let k = K::from_raw_ptr(txn, k);
k.compare(txn, k0)
})
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct Cursor {
cur: usize,
total: usize,
is_leaf: bool,
}
unsafe fn modify<
T: LoadPage,
K: Representable + ?Sized + core::fmt::Debug,
V: Representable + ?Sized + core::fmt::Debug,
L: Alloc,
>(
txn: &T,
new: &mut MutPage,
m: &mut ModifiedPage<K, V, Page<K, V>>,
n: &mut isize,
) {
debug!("modify {:?}", m);
let mut l = <Page<K, V>>::left_child(m.page.as_page(), &m.c0);
while let Some((k, v, r)) = <Page<K, V>>::next(txn, m.page.as_page(), &mut m.c0) {
alloc::<_, _, L>(new, k, v, l, r, n);
l = 0;
}
if let Some((k, v)) = m.ins {
if let Some((k2, v2)) = m.ins2 {
alloc::<_, _, L>(new, k, v, 0, 0, n);
alloc::<_, _, L>(new, k2, v2, m.l, m.r, n);
} else {
alloc::<_, _, L>(new, k, v, m.l, m.r, n);
}
} else {
l = m.l
}
let mut is_first = m.skip_first;
while let Some((k, v, r)) = <Page<K, V>>::next(txn, m.page.as_page(), &mut m.c1) {
if is_first {
is_first = false;
continue;
}
alloc::< _, _, L>(new, k, v, l, r, n);
l = 0;
}
}
unsafe fn merge<
T: LoadPage,
K: Representable + ?Sized + core::fmt::Debug,
V: Representable + ?Sized + core::fmt::Debug,
L: Alloc,
>(
txn: &T,
new: &mut MutPage,
m: &mut Concat<K, V, Page<K, V>>,
) {
let mut n = 0;
if m.mod_is_left {
modify::<_, _, _, L>(txn, new, &mut m.modified, &mut n);
let mut rc = <Page<K, V>>::first_cursor(m.other.as_page());
let l = <Page<K, V>>::left_child(m.other.as_page(), &rc);
alloc::<_, _, L>(new, m.mid.0, m.mid.1, 0, l, &mut n);
while let Some((k, v, r)) = <Page<K, V>>::next(txn, m.other.as_page(), &mut rc) {
alloc::<_, _, L>(new, k, v, 0, r, &mut n);
}
} else {
let mut rc = <Page<K, V>>::first_cursor(m.other.as_page());
let mut l = <Page<K, V>>::left_child(m.other.as_page(), &rc);
while let Some((k, v, r)) = <Page<K, V>>::next(txn, m.other.as_page(), &mut rc) {
alloc::<_, _, L>(new, k, v, l, r, &mut n);
l = 0;
}
alloc::<_, _, L>(new, m.mid.0, m.mid.1, 0, 0, &mut n);
modify::<_, _, _, L>(txn, new, &mut m.modified, &mut n);
}
}
fn clone<K: Representable + ?Sized, V: Representable + ?Sized, L: Alloc>(
page: crate::Page,
new: &mut MutPage,
s: Offsets<L::Offset>,
n: &mut isize,
) {
match s {
Offsets::Slice(s) => {
debug!("clone: {:?}", s);
for off in s.iter() {
let (r, off): (u64, u16) = (*off).into();
debug!("clone: {:?} {:?}", r, off);
unsafe {
let ptr = page.data.as_ptr().add(off as usize);
debug!("ptr = {:?}", core::slice::from_raw_parts(ptr, 24));
let size = entry_size::<K, V>(ptr);
debug!("size: {:?}", size);
let hdr = header_mut(new);
let off_new = L::alloc::<K, V>(hdr, size, K::ALIGN.max(V::ALIGN));
debug!("off_new: {:?}", off_new);
core::ptr::copy_nonoverlapping(ptr, new.0.data.offset(off_new as isize), size);
L::set_offset(new, *n, r, off_new);
}
*n += 1;
}
}
}
}
fn alloc<K: Representable + ?Sized, V: Representable + ?Sized, L: Alloc>(
new: &mut MutPage,
k0: &K,
v0: &V,
l: u64,
r: u64,
n: &mut isize,
) {
let size = alloc_size(k0, v0);
let off_new = L::alloc_insert::<K, V>(new, n, size, r);
unsafe {
let new_ptr = new.0.data.add(off_new as usize);
k0.write_to_page(new_ptr);
let ks = k0.size();
let v_ptr = new_ptr.add((ks + V::ALIGN - 1) & !(V::ALIGN - 1));
v0.write_to_page(v_ptr);
}
if l > 0 {
L::set_right_child(new, *n - 1, l);
}
*n += 1;
}
use super::*;
pub(crate) fn rebalance_left<
'a,
T: AllocPage,
K: Representable + ?Sized + core::fmt::Debug,
V: Representable + ?Sized + core::fmt::Debug,
L: Alloc,
>(
txn: &mut T,
m: &mut Concat<'a, K, V, Page<K, V>>,
) -> Result<Op<'a, T, K, V>, T::Error> {
assert!(m.mod_is_left);
// First element of the right page. We'll rotate it to the left
// page.
let rc = <Page<K, V>>::first_cursor(m.other.as_page());
let rl = <Page<K, V>>::left_child(m.other.as_page(), &rc);
let (k, v, r) = unsafe { <Page<K, V>>::unchecked_current(txn, m.other.as_page(), &rc) };
let mut freed = [0, 0];
let b = if header(m.modified.page.as_page()).is_dirty() {
1
} else {
0
};
freed[0] = m.modified.page.offset | b;
let mut new_left = if let Some((k, v)) = m.modified.ins {
if let Put::Ok(Ok { page, .. }) = <Page<K, V>>::replace(
txn,
m.modified.page,
m.modified.mutable,
&m.modified.c1,
k,
v,
m.modified.ins2,
m.modified.l,
m.modified.r,
)? {
page
} else {
unreachable!()
}
} else {
<Page<K, V>>::del(txn, m.modified.page, &m.modified.c1, m.modified.l)?
};
let mut n = header(new_left.0.as_page()).n() as isize;
alloc::<K, V, L>(&mut new_left, m.mid.0, m.mid.1, 0, rl, &mut n);
let (new_right, k, v): (_, &K, &V) = unsafe {
(
<Page<K, V>>::del(txn, m.other, &rc, r)?,
core::mem::transmute(k),
core::mem::transmute(v),
)
};
if new_right.0.offset != m.other.offset {
let hdr = &*header(m.other.as_page());
let b = if hdr.is_dirty() { 1 } else { 0 };
freed[1] = m.other.offset | b
}
Ok(Op::Rebalanced {
l: new_left.0.offset,
r: new_right.0.offset,
k: unsafe { core::mem::transmute(k) },
v: unsafe { core::mem::transmute(v) },
freed,
})
}
pub(crate) fn rebalance_right<
'a,
T: AllocPage,
K: Representable + ?Sized + core::fmt::Debug,
V: Representable + ?Sized + core::fmt::Debug,
L: Alloc,
>(
txn: &mut T,
m: &mut Concat<'a, K, V, Page<K, V>>,
) -> Result<Op<'a, T, K, V>, T::Error> {
assert!(!m.mod_is_left);
// Take the last element of the left page.
let lc = <Page<K, V>>::last_cursor(m.other.as_page());
let (k0, v0, r0) = unsafe { <Page<K, V>>::unchecked_current(txn, m.other.as_page(), &lc) };
// First element of the right page.
let rc = <Page<K, V>>::first_cursor(m.modified.page.as_page());
let rl = <Page<K, V>>::left_child(m.modified.page.as_page(), &rc);
let mut freed = [0, 0];
let b = if header(m.modified.page.as_page()).is_dirty() {
1
} else {
0
};
freed[0] = m.modified.page.offset | b;
let mut new_right = if let Some((k, v)) = m.modified.ins {
if let Put::Ok(Ok { page, .. }) = <Page<K, V>>::replace(
txn,
m.modified.page,
m.modified.mutable,
&m.modified.c1,
k,
v,
m.modified.ins2,
m.modified.l,
m.modified.r,
)? {
page
} else {
unreachable!()
}
} else {
<Page<K, V>>::del(txn, m.modified.page, &m.modified.c1, m.modified.l)?
};
if let Put::Ok(Ok { page, .. }) =
<Page<K, V>>::put(txn, new_right.0, true, &rc, m.mid.0, m.mid.1, None, r0, rl)?
{
new_right = page
} else {
unreachable!()
};
let new_left = <Page<K, V>>::del(txn, m.other, &lc, 0)?;
if new_left.0.offset != m.other.offset {
let hdr = &*header(m.other.as_page());
let b = if hdr.is_dirty() { 1 } else { 0 };
freed[1] = m.other.offset | b
}
Ok(Op::Rebalanced {
l: new_left.0.offset,
r: new_right.0.offset,
k: unsafe { core::mem::transmute(k0) },
v: unsafe { core::mem::transmute(v0) },
freed,
})
}
use super::*;
pub(crate) fn put<
'a,
T: AllocPage,
K: Representable + ?Sized + core::fmt::Debug,
V: Representable + ?Sized + core::fmt::Debug,
L: Alloc,
>(
txn: &mut T,
page: CowPage,
mutable: bool,
u: usize,
k0: &'a K,
v0: &'a V,
k1v1: Option<(&'a K, &'a V)>,
l: u64,
r: u64,
) -> Result<Put<'a, K, V>, T::Error> {
let size = alloc_size(k0, v0)
+ if let Some((k1, v1)) = k1v1 {
alloc_size(k1, v1)
} else {
0
};
let hdr = header(page.as_page());
let is_dirty = hdr.is_dirty();
debug!("put {:?} {:?} {:?}", u, mutable, is_dirty);
if mutable && is_dirty && L::can_alloc::<K, V>(header(page.as_page()), size) {
debug!("can alloc");
let mut page = unsafe { core::mem::transmute(page) };
let mut n = u as isize;
if let Some((k1, v1)) = k1v1 {
alloc::<_, _, L>(&mut page, k0, v0, 0, 0, &mut n);
alloc::<_, _, L>(&mut page, k1, v1, l, r, &mut n);
} else {
alloc::<_, _, L>(&mut page, k0, v0, l, r, &mut n);
}
Ok(Put::Ok(Ok { page, freed: 0 }))
} else if L::can_compact::<K, V>(hdr, size) {
let mut new = txn.alloc_page()?;
debug!("can compact: {:?}", new);
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut new);
L::set_right_child(&mut new, -1, hdr.left_page() & !0xfff);
let s = L::offset_slice::<K, V>(page.as_page());
let (s0, s1) = s.split_at(u as usize);
let mut n = 0;
clone::<K, V, L>(page.as_page(), &mut new, s0, &mut n);
if let Some((k1, v1)) = k1v1 {
alloc::<_, _, L>(&mut new, k0, v0, 0, 0, &mut n);
alloc::<_, _, L>(&mut new, k1, v1, l, r, &mut n);
} else {
alloc::<_, _, L>(&mut new, k0, v0, l, r, &mut n);
}
clone::<K, V, L>(page.as_page(), &mut new, s1, &mut n);
let b0 = if is_dirty { 1 } else { 0 };
return Ok(Put::Ok(Ok {
page: new,
freed: page.offset | b0,
}));
} else {
debug!("split");
return split_unsized::<_, _, _, L>(txn, page.as_page(), u, k0, v0, k1v1, l, r);
}
}
fn split_unsized<
'a,
T: AllocPage,
K: Representable + ?Sized + core::fmt::Debug,
V: Representable + ?Sized + core::fmt::Debug,
L: Alloc,
>(
txn: &mut T,
page: crate::Page,
u: usize,
k0: &'a K,
v0: &'a V,
k1v1: Option<(&'a K, &'a V)>,
l0: u64,
r0: u64,
) -> Result<Put<'a, K, V>, T::Error> {
let hdr = header(page);
let n0 = hdr.n();
let mut left = txn.alloc_page()?;
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut left);
let mut right = txn.alloc_page()?;
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut right);
let left_child = header(page).left_page() & !0xfff;
L::set_right_child(&mut left, -1, left_child);
let mut split = (core::ptr::null(), core::ptr::null());
let mut current_page = &mut left;
let mut n = 0;
let s = unsafe {
core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const LeafOffset,
n0 as usize,
)
};
let mut total = HDR;
let mut is_left = true;
for (uu, off) in s.iter().enumerate() {
let (r, off): (u64, u16) = (*off).into();
unsafe {
let ptr = page.data.as_ptr().add(off as usize);
let size = entry_size::<K, V>(ptr) + L::extra_size::<K, V>();
total += size;
if is_left && total >= PAGE_SIZE / 2 {
is_left = false;
split = read::<T, K, V>(txn, ptr);
L::set_right_child(&mut right, -1, r);
current_page = &mut right;
n = 0;
} else {
let off_new = L::alloc::<K, V>(header_mut(current_page), size, K::ALIGN.max(V::ALIGN));
core::ptr::copy_nonoverlapping(
ptr,
current_page.0.data.offset(off_new as isize),
size,
);
L::set_offset(current_page, n, r, off_new);
n += 1;
}
}
if uu == u {
if let Some((k1, v1)) = k1v1 {
alloc::<K, V, L>(current_page, k0, v0, 0, l0, &mut n);
total += alloc_size(k0, v0) + L::extra_size::<K, V>();
alloc::<K, V, L>(current_page, k1, v1, 0, r0, &mut n);
total += alloc_size(k1, v1) + L::extra_size::<K, V>();
} else {
alloc::<K, V, L>(current_page, k0, v0, l0, r0, &mut n);
total += alloc_size(k0, v0) + L::extra_size::<K, V>();
}
}
}
assert!(!split.0.is_null());
Ok(Put::Split {
split_key: unsafe { K::from_raw_ptr(txn, split.0) },
split_value: unsafe { V::from_raw_ptr(txn, split.1) },
left,
right,
freed: if hdr.is_dirty() {
page.offset | 1
} else {
page.offset
},
})
}
#[derive(Debug)]
#[repr(C)]
pub struct Header {
n: u16,
data: u16,
crc: u32,
left_page: u64,
}
impl Header {
pub fn init(&mut self) {
self.n = (1u16).to_le(); // dirty page
self.data = 4096_u16.to_le();
self.crc = 0;
self.left_page = 0;
}
pub fn n(&self) -> u16 {
u16::from_le(self.n) >> 4
}
pub fn set_n(&mut self, n: u16) {
let dirty = u16::from_le(self.n) & 1;
self.n = ((n << 4) | dirty).to_le()
}
pub fn is_dirty(&self) -> bool {
u16::from_le(self.n) & 1 != 0
}
pub fn left_page(&self) -> u64 {
u64::from_le(self.left_page)
}
pub fn set_left_page(&mut self, l: u64) {
self.left_page = l.to_le()
}
pub fn data(&self) -> u16 {
u16::from_le(self.data)
}
pub fn set_data(&mut self, d: u16) {
self.data = d.to_le()
}
pub fn decr(&mut self, s: usize) {
self.left_page = (self.left_page() - s as u64).to_le();
}
pub fn set_occupied(&mut self, size: u64) {
self.left_page = ((self.left_page() & !0xfff) | size).to_le()
}
pub fn incr(&mut self, s: usize) {
self.left_page = (self.left_page() + s as u64).to_le();
}
pub fn is_leaf(&self) -> bool {
u64::from_le(self.left_page) <= 0xfff
}
pub fn clean(&mut self) {
self.n = (u16::from_le(self.n) & 0xfff).to_le()
}
}
pub const HDR: usize = core::mem::size_of::<Header>();
pub fn header<'a>(page: crate::Page<'a>) -> &'a Header {
unsafe { &*(page.data.as_ptr() as *const Header) }
}
pub fn header_mut(page: &mut crate::MutPage) -> &mut Header {
unsafe { &mut *(page.0.data as *mut Header) }
}
use super::*;
pub(crate) trait Alloc {
fn extra_size<K: Representable + ?Sized, V: Representable + ?Sized>() -> usize;
fn can_alloc<K: Representable + ?Sized, V: Representable + ?Sized>(hdr: &Header, size: usize) -> bool;
fn can_compact<K: Representable + ?Sized, V: Representable + ?Sized>(hdr: &Header, size: usize) -> bool;
fn alloc<K: Representable+?Sized, V: Representable+?Sized>(hdr: &mut Header, size: usize, align: usize) -> u16;
// n = number of items to remove
fn truncate_left<T, K: Representable + ?Sized, V: Representable + ?Sized>(
txn: &T,
page: &mut MutPage,
n: usize,
) -> Option<(*const K, *const V)>;
fn alloc_insert<K: Representable + ?Sized, V: Representable + ?Sized>(
new: &mut MutPage,
n: &mut isize,
size: usize,
r: u64,
) -> usize;
fn set_offset(new: &mut MutPage, n: isize, r: u64, off: u16);
fn set_right_child(new: &mut MutPage, n: isize, r: u64);
type Offset: Into<(u64, u16)> + Copy + core::fmt::Debug;
fn offset_slice<'a, K: Representable + ?Sized, V: Representable + ?Sized>(
page: crate::Page<'a>,
) -> Offsets<'a, Self::Offset>;
fn kv<'a, T, K: Representable + ?Sized, V: Representable + ?Sized>(
txn: &T,
page: crate::Page,
off: (u64, u16),
) -> (&'a K, &'a V, u64);
}
#[derive(Debug, Clone)]
pub enum Offsets<'a, A> {
Slice(&'a [A]),
}
impl<'a, A: Into<(u64, u16)> + Copy> Offsets<'a, A> {
pub fn split_at(&self, mid: usize) -> (Self, Self) {
match self {
Offsets::Slice(s) if mid < s.len() => {
debug!("split_at: {:?} {:?}", s.len(), mid);
let (a, b) = s.split_at(mid);
(Offsets::Slice(a), Offsets::Slice(b))
}
Offsets::Slice(s) => {
debug_assert_eq!(mid, s.len());
(Offsets::Slice(s), Offsets::Slice(&[][..]))
}
}
}
}
pub struct Leaf {}
pub struct Internal {}
impl Alloc for Leaf {
fn extra_size<K: Representable + ?Sized, V: Representable + ?Sized>() -> usize {
2
}
fn can_alloc<K: Representable + ?Sized, V: Representable + ?Sized>(hdr: &Header, size: usize) -> bool {
debug!("can_alloc: {:?} {:?} {:?}", hdr.n(), size, hdr.data());
HDR + (hdr.n() as usize) * 2 + 2 + size <= hdr.data() as usize
}
fn can_compact<K: Representable + ?Sized, V: Representable + ?Sized>(hdr: &Header, size: usize) -> bool {
debug!("can_compact: {:?} {:?}", hdr.left_page(), size);
HDR + ((hdr.left_page() & 0xfff) as usize) + 2 + size <= 4096
}
fn truncate_left<T, K: Representable + ?Sized, V: Representable + ?Sized>(
_txn: &T,
page: &mut MutPage,
n: usize,
) -> Option<(*const K, *const V)> {
debug!("truncate_left {:?} {:?}", page, n);
let hdr_n = header_mut(page).n();
unsafe {
core::ptr::copy(
page.0.data.add(HDR + n * 2),
page.0.data.add(HDR),
(hdr_n as usize - n) * 2,
);
}
let deleted_offsets = unsafe {
core::slice::from_raw_parts(page.0.data.add(HDR) as *const u16, n as usize)
};
let deleted_size: u64 = deleted_offsets
.iter()
.map(|&off| {
2 + unsafe { entry_size::<K, V>(page.0.data.add(off as usize)) } as u64
})
.sum();
let hdr = header_mut(page);
hdr.set_n(hdr_n - n as u16);
hdr.decr(deleted_size as usize);
None
}
fn alloc<K: Representable+?Sized, V: Representable+?Sized>(hdr: &mut Header, size: usize, align: usize) -> u16 {
debug!("alloc = {:?} {:?}", hdr.data(), size);
let mut data = hdr.data() - size as u16;
data &= !((align - 1) as u16);
hdr.set_data(data);
hdr.set_n(hdr.n() + 1);
hdr.incr(size + Self::extra_size::<K, V>());
data
}
fn alloc_insert<K: Representable + ?Sized, V: Representable + ?Sized>(
new: &mut MutPage,
n: &mut isize,
size: usize,
_: u64,
) -> usize {
let (hdr_n, off_new) = {
let hdr = header_mut(new);
(
hdr.n(),
Self::alloc::<K, V>(&mut *hdr, size, K::ALIGN.max(V::ALIGN)),
)
};
// Making space for the new offset
unsafe {
core::ptr::copy(
new.0.data.add(HDR + (*n as usize) * 2),
new.0.data.add(HDR + (*n as usize) * 2 + 2),
(hdr_n as usize - (*n as usize)) * 2,
);
}
Self::set_offset(new, *n, 0, off_new);
off_new as usize
}
fn set_offset(new: &mut MutPage, n: isize, _: u64, off: u16) {
unsafe {
let ptr = new.0.data.offset(HDR as isize + n * 2) as *mut u16;
*ptr = off.to_le();
}
}
fn set_right_child(_: &mut MutPage, _: isize, _: u64) {}
type Offset = LeafOffset;
fn offset_slice<'a, K: Representable + ?Sized, V: Representable + ?Sized>(
page: crate::Page<'a>,
) -> Offsets<'a, Self::Offset> {
let hdr = header(page);
unsafe {
Offsets::Slice(core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const LeafOffset,
hdr.n() as usize,
))
}
}
fn kv<'a, T, K: Representable + ?Sized, V: Representable + ?Sized>(
txn: &T,
page: crate::Page,
(_, off): (u64, u16),
) -> (&'a K, &'a V, u64) {
unsafe {
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().add(off as usize));
(K::from_raw_ptr(txn, k), V::from_raw_ptr(txn, v), 0)
}
}
}
impl Alloc for Internal {
fn extra_size<K: Representable + ?Sized, V: Representable + ?Sized>() -> usize {
8
}
fn can_alloc<K: Representable + ?Sized, V: Representable + ?Sized>(hdr: &Header, size: usize) -> bool {
(HDR as usize) + (hdr.n() as usize) * 8 + 8 + size < hdr.data() as usize
}
fn can_compact<K: Representable + ?Sized, V: Representable + ?Sized>(hdr: &Header, size: usize) -> bool {
debug!("can_compact: {:?} {:?}", hdr.left_page(), size);
(HDR as usize) + ((hdr.left_page() & 0xfff) as usize) + 8 + size < 4096
}
fn truncate_left<T, K: Representable + ?Sized, V: Representable + ?Sized>(
_txn: &T,
page: &mut MutPage,
n: usize,
) -> Option<(*const K, *const V)> {
// The following line copies the left child of the last entry
// (hence the `- 8` and `- 1`)
let hdr_n = header_mut(page).n();
unsafe {
core::ptr::copy(
page.0.data.add(HDR + (n - 1) * 8),
page.0.data.add(HDR - 8),
(hdr_n as usize - n + 1) * 8,
);
}
let size = {
let offsets = unsafe {
core::slice::from_raw_parts(
page.0.data.add(HDR + n * 8) as *const u16,
hdr_n as usize - n as usize,
)
};
offsets
.iter()
.map(|&off| {
8 + unsafe { entry_size::<K, V>(page.0.data.add(off as usize)) } as u64
})
.sum()
};
let hdr = header_mut(page);
hdr.set_n(hdr_n - n as u16);
debug!("truncate_left {:?} {:?}", hdr.left_page(), size);
hdr.set_occupied(size);
None
}
fn alloc<K: Representable+?Sized, V: Representable+?Sized>(hdr: &mut Header, size: usize, align: usize) -> u16 {
let mut data = hdr.data() - size as u16;
data -= data % (align as u16);
hdr.set_data(data);
hdr.set_n(hdr.n() + 1);
hdr.incr(size + Self::extra_size::<K, V>());
data
}
fn alloc_insert<K: Representable + ?Sized, V: Representable + ?Sized>(
new: &mut MutPage,
n: &mut isize,
size: usize,
r: u64,
) -> usize {
let (hdr_n, off_new) = {
let hdr = header_mut(new);
(
hdr.n(),
Self::alloc::<K, V>(&mut *hdr, size, K::ALIGN.max(V::ALIGN)),
)
};
unsafe {
core::ptr::copy(
new.0.data.add(HDR + (*n as usize) * 8),
new.0.data.add(HDR + (*n as usize) * 8 + 8),
(hdr_n as usize - (*n as usize)) * 8,
);
}
Self::set_offset(new, *n, r, off_new);
off_new as usize
}
fn set_offset(new: &mut MutPage, n: isize, r: u64, off: u16) {
unsafe {
let ptr = new.0.data.offset(HDR as isize + n * 8) as *mut u64;
*ptr = (r | off as u64).to_le();
}
}
fn set_right_child(page: &mut MutPage, n: isize, r: u64) {
unsafe {
let ptr = page.0.data.offset(HDR as isize + n * 8) as *mut u64;
let off = u64::from_le(*ptr) & 0xfff;
*ptr = (r | off as u64).to_le();
}
}
type Offset = InternalOffset;
fn offset_slice<'a, K: Representable + ?Sized, V: Representable + ?Sized>(
page: crate::Page<'a>,
) -> Offsets<'a, Self::Offset> {
let hdr = header(page);
unsafe {
Offsets::Slice(core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const InternalOffset,
hdr.n() as usize,
))
}
}
fn kv<'a, T, K: Representable + ?Sized, V: Representable + ?Sized>(
txn: &T,
page: crate::Page,
(r, off): (u64, u16),
) -> (&'a K, &'a V, u64) {
unsafe {
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().add(off as usize));
(K::from_raw_ptr(txn, k), V::from_raw_ptr(txn, v), r)
}
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct LeafOffset(u16);
impl Into<(u64, u16)> for LeafOffset {
fn into(self) -> (u64, u16) {
(0, self.0)
}
}
impl Into<usize> for LeafOffset {
fn into(self) -> usize {
self.0 as usize
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct InternalOffset(u64);
impl Into<(u64, u16)> for InternalOffset {
fn into(self) -> (u64, u16) {
(self.0 & !0xfff, (self.0 & 0xfff) as u16)
}
}
impl Into<usize> for InternalOffset {
fn into(self) -> usize {
self.0 as usize
}
}
if m.c1.is_leaf {
if fixed_size::<K, V>().is_none() {
if m.ins2.is_some() {
occupied += 4;
} else {
occupied += 2;
}
if !m.c1.is_leaf {
if m.ins2.is_some() {
occupied += 16
} else {
occupied += 8
if let Some(f) = fixed_size::<K, V>() {
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
let off = c.cur * f;
let kv_ptr = p.add(hdr_size + off);
core::ptr::copy(kv_ptr.add(f), kv_ptr, f * (n - c.cur - 1));
hdr.decr(f);
} else {
let ptr = p.add(HDR + c.cur * 2) as *mut u16;
let kv_ptr = p.add((*ptr) as usize);
let size = entry_size::<K, V>(kv_ptr);
core::ptr::copy(ptr.offset(1), ptr, n - c.cur);
hdr.decr(size);
}
let f = core::mem::size_of::<Tuple<K, V>>();
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
let off = c.cur * f;
let kv_ptr = p.add(hdr_size + off);
core::ptr::copy(kv_ptr.add(f), kv_ptr, f * (n - c.cur - 1));
hdr.decr(f);
if let Some(f) = fixed_size::<K, V>() {
let al = K::ALIGN.max(V::ALIGN);
hdr_size = (HDR + al - 1) & !(al - 1);
f
} else {
2 + alloc_size::<K, V>(m.mid.0, m.mid.1)
}
let f = core::mem::size_of::<Tuple<K, V>>();
let al = K::ALIGN.max(V::ALIGN);
hdr_size = (HDR + al - 1) & !(al - 1);
f
if let Some(f) = fixed_size::<K, V>() {
let al = K::ALIGN.max(V::ALIGN);
let hdr = (HDR + al - 1) & !(al - 1);
hdr + c.cur * f
} else {
u16::from_le(*(page.data.as_ptr().add(HDR + c.cur * 2) as *const u16)) as usize
}
let f = core::mem::size_of::<Tuple<K, V>>();
let al = K::ALIGN.max(V::ALIGN);
let hdr = (HDR + al - 1) & !(al - 1);
hdr + c.cur * f
if let Some(f) = fixed_size::<K, V>() {
f
} else {
2 + entry_size::<K, V>(page.data.as_ptr().add(u16::from_le(
*(page.data.as_ptr().add(HDR) as *const u16).add(c.cur),
)
as usize))
}
core::mem::size_of::<Tuple<K, V>>()
}
}
fn fixed_size<K: Representable, V: Representable>() -> Option<usize> {
if let (Some(ks), Some(vs)) = (K::SIZE, V::SIZE) {
let s = ((ks + V::ALIGN - 1) & !(V::ALIGN - 1)) + vs;
let al = K::ALIGN.max(V::ALIGN);
Some((s + al - 1) & !(al - 1))
} else {
None
if fixed_size::<K, V>().is_some() {
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
let s = core::slice::from_raw_parts(
page.data.as_ptr().add(hdr_size) as *const Tuple<K, V>,
hdr.n() as usize,
);
if let Some(v0) = v0 {
s.binary_search_by(|tup| match tup.k.compare(txn, &k0) {
Ordering::Equal => tup.v.compare(txn, &v0),
c => c,
})
} else {
s.binary_search_by(|tup| tup.k.compare(txn, k0))
}
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
let s = core::slice::from_raw_parts(
page.data.as_ptr().add(hdr_size) as *const Tuple<K, V>,
hdr.n() as usize,
);
if let Some(v0) = v0 {
s.binary_search_by(|tup| match tup.k.compare(txn, &k0) {
Ordering::Equal => tup.v.compare(txn, &v0),
c => c,
})
let s = core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const u16,
hdr.n() as usize,
);
if let Some(v0) = v0 {
s.binary_search_by(|&off| {
let off = u16::from_le(off);
let (k, v) = read::<T, K, V>(txn, page.data.as_ptr().offset(off as isize));
let k = K::from_raw_ptr(txn, k as *const u8);
match k.compare(txn, k0) {
Ordering::Equal => {
let v = V::from_raw_ptr(txn, v as *const u8);
v.compare(txn, v0)
},
cmp => cmp,
}
})
} else {
s.binary_search_by(|&off| {
let off = u16::from_le(off);
let (k, _) = read::<T, K, V>(txn, page.data.as_ptr().offset(off as isize));
let k = K::from_raw_ptr(txn, k);
k.compare(txn, k0)
})
}
s.binary_search_by(|tup| tup.k.compare(txn, k0))
let (new_right, k, v) = match fixed_size::<K, V>() {
Some(f) if r == 0 && right_hdr.is_dirty() => {
// Rewrite the deleted element at the end of the page, so that
// the pointer is still valid.
let data = m.other.data;
let mut other = MutPage(m.other);
let right_hdr = header_mut(&mut other);
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
let n = right_hdr.n() as usize;
right_hdr.decr(f);
right_hdr.set_n((n - 1) as u16);
let mut t: MaybeUninit<Tuple<K, V>> = MaybeUninit::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
data.add(hdr_size) as *mut Tuple<K, V>,
t.as_mut_ptr(),
1,
);
core::ptr::copy(
data.add(hdr_size + f) as *const Tuple<K, V>,
data.add(hdr_size) as *mut Tuple<K, V>,
n - 1,
);
let last = data.add(hdr_size + f * (n - 1)) as *mut Tuple<K, V>;
core::ptr::copy_nonoverlapping(t.as_ptr(), last, 1);
debug!("tuple = {:?}", t.assume_init());
(other, &(&*last).k, &(&*last).v)
}
let f = core::mem::size_of::<Tuple<K, V>>();
let (new_right, k, v) = if r == 0 && right_hdr.is_dirty() {
// Rewrite the deleted element at the end of the page, so that
// the pointer is still valid.
let data = m.other.data;
let mut other = MutPage(m.other);
let right_hdr = header_mut(&mut other);
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
let n = right_hdr.n() as usize;
right_hdr.decr(f);
right_hdr.set_n((n - 1) as u16);
let mut t: MaybeUninit<Tuple<K, V>> = MaybeUninit::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
data.add(hdr_size) as *mut Tuple<K, V>,
t.as_mut_ptr(),
1,
);
core::ptr::copy(
data.add(hdr_size + f) as *const Tuple<K, V>,
data.add(hdr_size) as *mut Tuple<K, V>,
n - 1,
);
let last = data.add(hdr_size + f * (n - 1)) as *mut Tuple<K, V>;
core::ptr::copy_nonoverlapping(t.as_ptr(), last, 1);
debug!("tuple = {:?}", t.assume_init());
(other, &(&*last).k, &(&*last).v)
if let Some(s) = fixed_size::<K, V>() {
assert!(k1v1.is_none());
return split::<_, _, _, L>(txn, page, mutable, s, u, k0, v0, l, r);
} else {
return split_unsized::<_, _, _, L>(txn, page.as_page(), u, k0, v0, k1v1, l, r);
}
let s = core::mem::size_of::<Tuple<K, V>>();
assert!(k1v1.is_none());
return split::<_, _, _, L>(txn, page, mutable, s, u, k0, v0, l, r);
fn split_unsized<
'a,
T: AllocPage,
K: Representable + core::fmt::Debug,
V: Representable + core::fmt::Debug,
L: Alloc,
>(
txn: &mut T,
page: crate::Page,
u: usize,
k0: &'a K,
v0: &'a V,
k1v1: Option<(&'a K, &'a V)>,
l0: u64,
r0: u64,
) -> Result<Put<'a, K, V>, T::Error> {
let hdr = header(page);
let n0 = hdr.n();
let mut left = txn.alloc_page()?;
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut left);
let mut right = txn.alloc_page()?;
<Page<K, V> as BTreeMutPage<K, V>>::init(&mut left);
let left_child = header(page).left_page() & !0xfff;
L::set_right_child(&mut left, -1, left_child);
let mut split = (core::ptr::null(), core::ptr::null());
let mut current_page = &mut left;
let mut n = 0;
let s = unsafe {
core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const LeafOffset,
n0 as usize,
)
};
let mut total = HDR;
let mut is_left = true;
for off in s.iter() {
let (r, off): (u64, u16) = (*off).into();
unsafe {
let ptr = page.data.as_ptr().add(off as usize);
let size = entry_size::<K, V>(ptr) + L::extra_size::<T, K, V>();
total += size;
if is_left && total >= PAGE_SIZE / 2 {
is_left = false;
split = read::<T, K, V>(txn, ptr);
L::set_right_child(&mut right, -1, r);
current_page = &mut right;
} else {
let off_new = L::alloc(header_mut(current_page), size, K::ALIGN.max(V::ALIGN));
core::ptr::copy_nonoverlapping(
ptr,
current_page.0.data.offset(off_new as isize),
size,
);
L::set_offset(current_page, n, r, off_new);
}
}
n += 1;
if n == u as isize {
if let Some((k1, v1)) = k1v1 {
alloc::<K, V, L>(current_page, k0, v0, 0, l0, &mut n);
total += alloc_size(k0, v0) + L::extra_size::<T, K, V>();
alloc::<K, V, L>(current_page, k1, v1, 0, r0, &mut n);
total += alloc_size(k1, v1) + L::extra_size::<T, K, V>();
n += 2;
} else {
alloc::<K, V, L>(current_page, k0, v0, l0, r0, &mut n);
total += alloc_size(k0, v0) + L::extra_size::<T, K, V>();
n += 1;
}
}
}
assert!(!split.0.is_null());
Ok(Put::Split {
split_key: unsafe { K::from_raw_ptr(txn, split.0) },
split_value: unsafe { V::from_raw_ptr(txn, split.1) },
left,
right,
freed: if hdr.is_dirty() {
page.offset | 1
} else {
page.offset
},
})
}
if let Some(f) = fixed_size::<K, V>() {
let al = K::ALIGN.max(V::ALIGN);
let header_size = (HDR + al - 1) & !(al - 1);
header_size + (hdr.n() as usize) * f + size < hdr.data() as usize
} else {
HDR + (hdr.n() as usize) * 2 + 2 + size < hdr.data() as usize
}
let f = core::mem::size_of::<Tuple<K, V>>();
let al = K::ALIGN.max(V::ALIGN);
let header_size = (HDR + al - 1) & !(al - 1);
header_size + (hdr.n() as usize) * f + size < hdr.data() as usize
if fixed_size::<K, V>().is_some() {
let al = K::ALIGN.max(V::ALIGN);
let header_size = (HDR + al - 1) & !(al - 1);
header_size + ((hdr.left_page() & 0xfff) as usize) + size
< hdr.data() as usize
} else {
HDR + ((hdr.left_page() & 0xfff) as usize) + 2 + size < 4096
}
let al = K::ALIGN.max(V::ALIGN);
let header_size = (HDR + al - 1) & !(al - 1);
header_size + ((hdr.left_page() & 0xfff) as usize) + size
< hdr.data() as usize
if let Some(f) = fixed_size::<K, V>() {
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
// debug!("{:?} {:?} {:?}", new, hdr.n(), *n);
let hdr_n = header_mut(page).n();
let f = core::mem::size_of::<Tuple<K, V>>();
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
// debug!("{:?} {:?} {:?}", new, hdr.n(), *n);
let hdr_n = header_mut(page).n();
unsafe {
let mut swap: core::mem::MaybeUninit<Tuple<K, V>> =
core::mem::MaybeUninit::uninit();
core::ptr::copy(
page.0.data.add(hdr_size + (n - 1) * f),
swap.as_mut_ptr() as *mut u8,
f,
);
core::ptr::copy(
page.0.data.add(hdr_size + n * f),
page.0.data.add(hdr_size),
(hdr_n as usize - n) * f,
);
core::ptr::copy(
swap.as_ptr() as *mut u8,
page.0.data.add(hdr_size + (hdr_n as usize - n) * f),
f,
);
}
debug!("{:?} - {:?}", hdr_n, n);
let hdr = header_mut(page);
hdr.set_n(hdr_n - n as u16);
hdr.decr(n * f);
unsafe {
let (k, v) = read::<T, K, V>(
txn,
page.0.data.add(hdr_size + (hdr_n as usize - n) * f),
);
Some((K::from_raw_ptr(txn, k), V::from_raw_ptr(txn, v)))
}
} else {
let hdr_n = header_mut(page).n();
unsafe {
core::ptr::copy(
page.0.data.add(HDR + n * 2),
page.0.data.add(HDR),
(hdr_n as usize - n) * 2,
);
}
let deleted_offsets = unsafe {
core::slice::from_raw_parts(page.0.data.add(HDR) as *const u16, n as usize)
};
let deleted_size: u64 = deleted_offsets
.iter()
.map(|&off| {
2 + unsafe { entry_size::<K, V>(page.0.data.add(off as usize)) } as u64
})
.sum();
let hdr = header_mut(page);
hdr.set_n(hdr_n - n as u16);
hdr.decr(deleted_size as usize);
None
unsafe {
let mut swap: core::mem::MaybeUninit<Tuple<K, V>> =
core::mem::MaybeUninit::uninit();
core::ptr::copy(
page.0.data.add(hdr_size + (n - 1) * f),
swap.as_mut_ptr() as *mut u8,
f,
);
core::ptr::copy(
page.0.data.add(hdr_size + n * f),
page.0.data.add(hdr_size),
(hdr_n as usize - n) * f,
);
core::ptr::copy(
swap.as_ptr() as *mut u8,
page.0.data.add(hdr_size + (hdr_n as usize - n) * f),
f,
);
}
debug!("{:?} - {:?}", hdr_n, n);
let hdr = header_mut(page);
hdr.set_n(hdr_n - n as u16);
hdr.decr(n * f);
unsafe {
let (k, v) = read::<T, K, V>(
txn,
page.0.data.add(hdr_size + (hdr_n as usize - n) * f),
);
Some((K::from_raw_ptr(txn, k), V::from_raw_ptr(txn, v)))
if let Some(f) = fixed_size::<K, V>() {
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
// debug!("{:?} {:?} {:?}", new, hdr.n(), *n);
let hdr_n = header_mut(new).n();
unsafe {
core::ptr::copy(
new.0.data.add(hdr_size + (*n as usize) * f),
new.0.data.add(hdr_size + (*n as usize) * f + f),
(hdr_n as usize - (*n as usize)) * f,
);
}
let hdr = header_mut(new);
hdr.set_n(hdr.n() + 1);
hdr.incr(f);
hdr_size + (*n as usize) * f
} else {
let (hdr_n, off_new) = {
let hdr = header_mut(new);
(
hdr.n(),
Self::alloc(&mut *hdr, 2 + size, K::ALIGN.max(V::ALIGN)),
)
};
unsafe {
core::ptr::copy(
new.0.data.add(HDR + (*n as usize) * 2),
new.0.data.add(HDR + (*n as usize) * 2 + 2),
(hdr_n as usize - (*n as usize)) * 2,
);
}
Self::set_offset(new, *n, 0, off_new);
off_new as usize
let f = core::mem::size_of::<Tuple<K, V>>();
let al = K::ALIGN.max(V::ALIGN);
let hdr_size = (HDR + al - 1) & !(al - 1);
// debug!("{:?} {:?} {:?}", new, hdr.n(), *n);
let hdr_n = header_mut(new).n();
unsafe {
core::ptr::copy(
new.0.data.add(hdr_size + (*n as usize) * f),
new.0.data.add(hdr_size + (*n as usize) * f + f),
(hdr_n as usize - (*n as usize)) * f,
);
if fixed_size::<K, V>().is_some() {
Offsets::Range(0..(hdr.n() as usize))
} else {
unsafe {
Offsets::Slice(core::slice::from_raw_parts(
page.data.as_ptr().add(HDR) as *const LeafOffset,
hdr.n() as usize,
))
}
}
Offsets::Range(0..(hdr.n() as usize))
} else {
let offsets = unsafe {
core::slice::from_raw_parts(
page.0.data.add(HDR + n * 8) as *const u16,
hdr_n as usize - n as usize,
)
};
offsets
.iter()
.map(|&off| {
8 + unsafe { entry_size::<K, V>(page.0.data.add(off as usize)) } as u64
})
.sum()
type UP<K, V> = sanakirja_core::btree::page_unsized::Page<K, V>;
#[test]
fn slice() {
env_logger::try_init().unwrap_or(());
let env = Env::new_anon(409600000, 1).unwrap();
let mut txn = Env::mut_txn_begin(&env).unwrap();
let mut db = create_db_::<MutTxn<&Env, ()>, u64, [u8], UP<u64, [u8]>>(&mut txn).unwrap();
let n = 1580u64;
let m = 1000;
let mut values = Vec::with_capacity(n as usize);
for i in 0..n {
debug!("=============== putting {:?}", i);
let alpha = b"abcdefgihjklmnopqrstuvwxyz";
let a = &alpha[..((i as usize * 7) % 25) + 1];
if put(&mut txn, &mut db, &i, &a[..]).unwrap() {
values.push((i * i) % m);
}
}
values.sort();
}