#![no_std]
pub mod btree;
pub const PAGE_SIZE: usize = 4096;
pub trait Storable: core::fmt::Debug {
fn compare<T: LoadPage>(&self, _txn: &T, _b: &Self) -> core::cmp::Ordering {
unimplemented!()
}
fn page_references(&self) -> Self::PageReferences;
unsafe fn drop<T: AllocPage>(&self, txn: &mut T) -> Result<(), T::Error> {
for p in self.page_references() {
txn.decr_rc(p)?;
}
Ok(())
}
type PageReferences: Iterator<Item = u64>;
}
#[cfg(feature = "typeids")]
pub trait TypeId {
fn type_id() -> [u8; 32];
}
#[cfg(feature = "typeids")]
pub use sha2;
#[cfg(feature = "typeids")]
use sha2::Digest;
#[cfg(feature = "typeids")]
impl TypeId for () {
fn type_id() -> [u8; 32] {
[0; 32]
}
}
#[macro_export]
macro_rules! direct_repr {
($t: ty) => {
impl $crate::Storable for $t {
type PageReferences = core::iter::Empty<u64>;
fn page_references(&self) -> Self::PageReferences {
core::iter::empty()
}
fn compare<T>(&self, _: &T, b: &Self) -> core::cmp::Ordering {
self.cmp(b)
}
}
impl $crate::UnsizedStorable for $t {
const ALIGN: usize = core::mem::align_of::<$t>();
fn size(&self) -> usize {
core::mem::size_of::<Self>()
}
unsafe fn onpage_size(_: *const u8) -> usize {
core::mem::size_of::<Self>()
}
unsafe fn write_to_page(&self, p: *mut u8) {
core::ptr::copy_nonoverlapping(self, p as *mut Self, 1)
}
unsafe fn from_raw_ptr<'a, T>(_: &T, p: *const u8) -> &'a Self {
&*(p as *const Self)
}
}
};
}
direct_repr!(());
direct_repr!(u8);
direct_repr!(i8);
direct_repr!(u16);
direct_repr!(i16);
direct_repr!(u32);
direct_repr!(i32);
direct_repr!(u64);
direct_repr!(i64);
direct_repr!([u8; 16]);
#[cfg(feature = "std")]
extern crate std;
#[cfg(feature = "std")]
direct_repr!(std::net::Ipv4Addr);
#[cfg(feature = "std")]
direct_repr!(std::net::Ipv6Addr);
#[cfg(feature = "std")]
direct_repr!(std::net::IpAddr);
#[cfg(feature = "std")]
direct_repr!(std::net::SocketAddr);
#[cfg(feature = "std")]
direct_repr!(std::time::SystemTime);
#[cfg(feature = "std")]
direct_repr!(std::time::Duration);
#[cfg(feature = "uuid")]
direct_repr!(uuid::Uuid);
#[cfg(feature = "ed25519")]
direct_repr!(ed25519_zebra::VerificationKeyBytes);
pub trait UnsizedStorable: Storable {
const ALIGN: usize;
fn size(&self) -> usize;
unsafe fn onpage_size(_: *const u8) -> usize;
unsafe fn write_to_page(&self, _: *mut u8) {
unimplemented!()
}
unsafe fn write_to_page_alloc<T: AllocPage>(&self, _: &mut T, p: *mut u8) {
self.write_to_page(p)
}
unsafe fn from_raw_ptr<'a, T>(_: &T, p: *const u8) -> &'a Self;
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
struct Ref {
p: u64,
len: u64,
}
pub union Slice<'b> {
len: u16,
page: Ref,
mem: Mem<'b>,
}
#[derive(Clone, Copy)]
#[repr(C)]
struct Mem<'b> {
_len: u16,
m: &'b [u8],
}
impl<'a> core::fmt::Debug for Slice<'a> {
fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(fmt, "Slice({:?})", unsafe { self.len })
}
}
impl<'a> core::convert::From<&'a [u8]> for Slice<'a> {
fn from(m: &'a [u8]) -> Slice<'a> {
let s = Slice {
mem: Mem { _len: 513, m },
};
s
}
}
impl<'a> Slice<'a> {
pub fn as_bytes<T: LoadPage>(&self, txn: &T) -> Result<&[u8], T::Error> {
Ok(unsafe {
let len = u16::from_le(self.len) & 0xfff;
if len == 512 {
let p = txn.load_page(u64::from_le(self.page.p) & !0xfff)?;
core::slice::from_raw_parts(p.data, u64::from_le(self.page.len) as usize)
} else if len == 513 {
self.mem.m
} else {
core::slice::from_raw_parts(
(&self.len as *const u16 as *const u8).add(2),
len as usize,
)
}
})
}
}
#[cfg(feature = "typeids")]
impl<'a> TypeId for Slice<'a> {
fn type_id() -> [u8; 32] {
let mut h = sha2::Sha256::new();
h.update(b"sanakirja-core::Slice");
h.finalize().into()
}
}
impl<'a> Storable for Slice<'a> {
type PageReferences = Pages;
fn page_references(&self) -> Self::PageReferences {
unsafe {
let len = u16::from_le(self.len);
if len == 512 {
let plen = u64::from_le(self.page.len);
let len_up = ((plen + PAGE_SIZE as u64 - 1) / PAGE_SIZE as u64) * PAGE_SIZE as u64;
let offset = u64::from_le(self.page.p) & !0xfff;
Pages {
offset,
limit: offset + len_up,
}
} else {
Pages {
offset: 0,
limit: 0,
}
}
}
}
fn compare<T: LoadPage>(&self, t: &T, b: &Self) -> core::cmp::Ordering {
self.as_bytes(t).unwrap().cmp(b.as_bytes(t).unwrap())
}
}
pub struct Pages {
offset: u64,
limit: u64,
}
impl Iterator for Pages {
type Item = u64;
fn next(&mut self) -> Option<Self::Item> {
if self.offset >= self.limit {
None
} else {
let o = self.offset;
self.offset += PAGE_SIZE as u64;
Some(o)
}
}
}
impl<'b> UnsizedStorable for Slice<'b> {
const ALIGN: usize = 8;
fn size(&self) -> usize {
let s = unsafe {
if u16::from_le(self.len) == 512 {
16
} else if u16::from_le(self.len) == 513 {
if self.mem.m.len() > 510 {
16
} else {
2 + self.mem.m.len()
}
} else {
u16::from_le(self.len) as usize
}
};
s
}
unsafe fn from_raw_ptr<'a, T>(_: &T, p: *const u8) -> &'a Self {
&*(p as *const Self)
}
unsafe fn onpage_size(p: *const u8) -> usize {
let p = &*(p as *const Self);
if u16::from_le(p.len) == 512 {
16
} else if u16::from_le(p.len) == 513 {
2 + p.mem.m.len()
} else {
u16::from_le(p.len) as usize
}
}
unsafe fn write_to_page_alloc<T: AllocPage>(&self, t: &mut T, p: *mut u8) {
if self.len == 512 {
core::ptr::copy(&self.page as *const Ref as *const u8, p, 16)
} else if self.len == 513 {
if self.mem.m.len() > 510 {
let len = self.mem.m.len();
let page = t
.alloc_contiguous((((len + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE) as u64)
.unwrap();
assert!(page.0.offset > 0);
core::ptr::copy_nonoverlapping(self.mem.m.as_ptr(), page.0.data, len);
let p = &mut *(p as *mut Ref);
p.len = (self.mem.m.len() as u64).to_le();
p.p = (page.0.offset | 512).to_le();
} else {
let len = self.mem.m.len();
*(p as *mut u16) = (len as u16).to_le();
core::ptr::copy_nonoverlapping(self.mem.m.as_ptr(), p.add(2), len)
}
} else {
core::ptr::copy(
&self.len as *const u16 as *const u8,
p,
2 + u16::from_le(self.len) as usize,
)
}
}
}
impl Storable for [u8] {
type PageReferences = core::iter::Empty<u64>;
fn page_references(&self) -> Self::PageReferences {
core::iter::empty()
}
fn compare<T>(&self, _: &T, b: &Self) -> core::cmp::Ordering {
self.cmp(b)
}
}
impl UnsizedStorable for [u8] {
const ALIGN: usize = 2;
fn size(&self) -> usize {
2 + self.len()
}
unsafe fn from_raw_ptr<'a, T>(_: &T, p: *const u8) -> &'a Self {
let len = u16::from_le(*(p as *const u16));
assert_ne!(len, 0);
assert_eq!(len & 0xf000, 0);
core::slice::from_raw_parts(p.add(2), len as usize)
}
unsafe fn onpage_size(p: *const u8) -> usize {
let len = u16::from_le(*(p as *const u16));
2 + len as usize
}
unsafe fn write_to_page_alloc<T: AllocPage>(&self, _txn: &mut T, p: *mut u8) {
assert!(self.len() <= 510);
*(p as *mut u16) = (self.len() as u16).to_le();
core::ptr::copy_nonoverlapping(self.as_ptr(), p.add(2), self.len())
}
}
unsafe fn read<T: LoadPage, K: UnsizedStorable + ?Sized, V: UnsizedStorable + ?Sized>(
_txn: &T,
k: *const u8,
) -> (*const u8, *const u8) {
let s = K::onpage_size(k);
let v = k.add(s);
let al = v.align_offset(V::ALIGN);
let v = v.add(al);
(k, v)
}
unsafe fn entry_size<K: UnsizedStorable + ?Sized, V: UnsizedStorable + ?Sized>(
k: *const u8,
) -> usize {
assert_eq!(k.align_offset(K::ALIGN), 0);
let ks = K::onpage_size(k);
let v_off = (ks + V::ALIGN - 1) & !(V::ALIGN - 1);
let v_ptr = k.add(v_off);
let vs = V::onpage_size(v_ptr);
let ka = K::ALIGN.max(V::ALIGN);
let size = v_off + vs;
(size + ka - 1) & !(ka - 1)
}
#[derive(Debug)]
#[repr(C)]
pub struct CowPage {
pub data: *mut u8,
pub offset: u64,
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct Page<'a> {
pub data: &'a [u8; PAGE_SIZE],
pub offset: u64,
}
impl CowPage {
pub fn as_page(&self) -> Page {
Page {
data: unsafe { &*(self.data as *const [u8; PAGE_SIZE]) },
offset: self.offset,
}
}
#[cfg(feature = "crc32")]
pub unsafe fn crc(&self, hasher: &crc32fast::Hasher) -> u32 {
crc(self.data, hasher)
}
#[cfg(feature = "crc32")]
pub unsafe fn crc_check(&self, hasher: &crc32fast::Hasher) -> bool {
crc_check(self.data, hasher)
}
}
#[derive(Debug)]
pub struct MutPage(pub CowPage);
impl MutPage {
#[cfg(not(feature = "crc32"))]
pub unsafe fn clear_dirty(&mut self) {
*self.0.data &= 0xfe
}
#[cfg(feature = "crc32")]
pub unsafe fn clear_dirty(&mut self, hasher: &crc32fast::Hasher) {
*self.0.data &= 0xfe;
let crc_ = (self.0.data as *mut u32).add(1);
*crc_ = crc(self.0.data, hasher)
}
}
#[cfg(feature = "crc32")]
pub unsafe fn crc(data: *mut u8, hasher: &crc32fast::Hasher) -> u32 {
let mut hasher = hasher.clone();
hasher.reset();
unsafe {
let x = [(*data) & 0xfe];
hasher.update(&x[..]);
hasher.update(core::slice::from_raw_parts(data.add(1), 3));
hasher.update(core::slice::from_raw_parts(data.add(8), PAGE_SIZE - 8));
}
hasher.finalize()
}
#[cfg(feature = "crc32")]
pub unsafe fn crc_check(data: *mut u8, hasher: &crc32fast::Hasher) -> bool {
let crc_ = unsafe { u32::from_le(*(data as *const u32).add(1)) };
crc(data, hasher) == crc_
}
#[cfg(not(feature = "crc32"))]
pub fn clear_dirty(p: *mut u8) {
unsafe { *p &= 0xfe }
}
#[cfg(feature = "crc32")]
pub fn clear_dirty(p: *mut u8, hasher: &crc32fast::Hasher) {
unsafe {
*p &= 0xfe;
let crc_ = (p as *mut u32).add(1);
*crc_ = crc(p, hasher)
}
}
unsafe impl Sync for CowPage {}
unsafe impl Send for CowPage {}
impl CowPage {
pub fn is_dirty(&self) -> bool {
unsafe { (*self.data) & 1 != 0 }
}
}
pub trait LoadPage {
type Error: core::fmt::Debug;
unsafe fn load_page(&self, off: u64) -> Result<CowPage, Self::Error>;
unsafe fn load_page_contiguous(&self, _off: u64, _len: u64) -> Result<CowPage, Self::Error> {
unimplemented!()
}
fn rc(&self, _off: u64) -> Result<u64, Self::Error> {
Ok(0)
}
}
pub trait AllocPage: LoadPage {
unsafe fn alloc_page(&mut self) -> Result<MutPage, Self::Error>;
unsafe fn alloc_page_no_dirty(&mut self) -> Result<MutPage, Self::Error> {
unimplemented!()
}
unsafe fn alloc_contiguous(&mut self, length: u64) -> Result<MutPage, Self::Error>;
fn incr_rc(&mut self, off: u64) -> Result<usize, Self::Error>;
unsafe fn decr_rc(&mut self, off: u64) -> Result<usize, Self::Error>;
unsafe fn decr_rc_owned(&mut self, off: u64) -> Result<usize, Self::Error>;
}