LVNADDHM446QNUHG3VTNBOHNJGCDROOFBVGO4WXMUUAN6Y4LNCNAC
JGESN2Z4ON5VXCCVBUJDKUHFRDI5IFDKGB5FJRKJEYSLM5PQCMEAC
P7OMU7PBSC76C6FLDU3TT6ZRSKMNRTYAA4IM65ICVIHSMNUQWRBAC
UAUV6SYV72FWMQVALN5YFTANLHIG6B5JHTZLJJUN5SN7SALYAC3AC
5WBYLJAC3MP52G4F5KHVTXOGCBYS7RFACMFQEAAA2D2TON4OBAGQC
NKL44S6YUCLZDH5EDDUJB5BHX2QWDP7SI5RROBVAUXKAU4ADKPAQC
2HKNCI4IG65FEWNSRBJ5SESMN4STJ6WSCNODP3UK6LBQMDHYPNTAC
7G5Y53RBBKF4QDPTVC2MFG45XHDOLEFPHA4XQIPJNW4RXCULMRQQC
VDX2MMS55XMEHH2TEMOOW225K6NGKB4QXPQFQ2WJLVXJK25BSXMQC
GJA75BA6ZNFQL6DG7YSRQKJWGX52YJKL3DZO3OWO6D4HVH6JS7NQC
LD2TP6CDUHET2ISQB3P4JOTQTIDDPRLSZJTIYWAO7JTHKUWOF7MQC
ZXJGCQSAYBMPR75JD2NSK6UX5MD4X7O3BL2VCP6PR4EZCFJKMKXAC
YZHIW6KGNEA34657YJAFI7BWYYRA7KBEX6X6IXA2UDCDMWMRBJPQC
FPQSJNYT7D5PN2ZTAQKVBQDGSE2S63CQXCFEGO3USV36B6KMDKTAC
XJHUBLK673LN2VVQ5FIOENXMS5OFSFWXM5GSP2X7XEVOL74WV6OAC
}
fn benchPugh() !void {
var sl = try SkipList.Pugh(BENCH_KEY, BENCH_VALUE, {}, comptime std.sort.asc(BENCH_KEY))
.init(std.heap.page_allocator, BENCH_SKIP_SIZE, BENCH_ITEMS);
defer sl.deinit();
var rand = std.rand.DefaultPrng.init(@intCast(std.time.milliTimestamp()));
const rnd = rand.random();
var timer = try std.time.Timer.start();
var i: usize = 0;
while (i < BENCH_ITEMS) : (i += 1) {
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / 2);
const query_type = rnd.intRangeAtMostBiased(u8, 0, 99);
switch (query_type) {
0 => { // 1% remove
_ = try sl.remove(key);
},
1...9 => { // 9% insert
_ = try sl.insert(key, {});
},
else => { // 90% search
_ = sl.search(key);
},
}
}
std.debug.print("Pugh: {d} ms\n", .{timer.read() / std.time.ns_per_ms});
// try benchXev();
var i: usize = 0;
while (i < BENCH_ITEMS) : (i += 1) {
// BENCH_ITEMS/20 means 50% of each call (remove, insert, search) hits a target
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / 20);
for (0..BENCH_ITEMS) |_| {
fn insert(skiplist: SL, key: BENCH_KEY, val: BENCH_VALUE) void {
_ = @call(std.builtin.CallModifier.auto, SL.insert, .{ skiplist, key, val }) catch unreachable;
fn insert(skiplist: SL, rnd: std.Random) void {
for (0..BATCH_SIZE) |_| {
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / BENCH_LIMIT_FACTOR);
_ = @call(std.builtin.CallModifier.auto, SL.insert, .{ skiplist, key, {} }) catch unreachable;
}
fn remove(skiplist: SL, key: BENCH_KEY) void {
_ = @call(std.builtin.CallModifier.auto, SL.remove, .{ skiplist, key });
fn remove(skiplist: SL, rnd: std.Random) void {
for (0..BATCH_SIZE) |_| {
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / BENCH_LIMIT_FACTOR);
_ = @call(std.builtin.CallModifier.auto, SL.remove, .{ skiplist, key });
}
fn search(skiplist: SL, key: BENCH_KEY) void {
_ = @call(std.builtin.CallModifier.auto, SL.search, .{ skiplist, key });
fn search(skiplist: SL, rnd: std.Random) void {
for (0..BATCH_SIZE) |_| {
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / BENCH_LIMIT_FACTOR);
_ = @call(std.builtin.CallModifier.auto, SL.search, .{ skiplist, key });
}
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / 20);
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), std.math.maxInt(BENCH_KEY));
_ = try sl.insert(key, {});
}
std.debug.print("Prepared skiplist with {d} elements...\n", .{BENCH_ITEMS});
for (0..BENCH_ITEMS / BATCH_SIZE) |_| {
const query_type = rnd.intRangeAtMostBiased(u8, 0, 99);
switch (query_type) {
0...1 => { // 2% remove
pool.spawnWg(&wg, Functions.remove, .{ sl, rnd });
},
2...10 => { // 9% insert
pool.spawnWg(&wg, Functions.insert, .{ sl, rnd });
},
else => { // 89% search
pool.spawnWg(&wg, Functions.search, .{ sl, rnd });
},
}
}
var timer = try std.time.Timer.start();
pool.waitAndWork(&wg);
std.debug.print("Lazy {d} threads: {d} ms\n", .{
try std.Thread.getCpuCount(),
timer.read() / std.time.ns_per_ms,
});
}
fn benchLazyXev() !void {
const xev = @import("xev");
var sl = try SL.init(std.heap.page_allocator);
defer sl.deinit();
var rand = std.rand.DefaultPrng.init(@intCast(std.time.milliTimestamp()));
const rnd = rand.random();
var pool = xev.ThreadPool.init(xev.ThreadPool.Config{});
defer {
pool.shutdown();
pool.deinit();
}
const wg = std.Thread.WaitGroup{};
for (0..BENCH_ITEMS) |_| {
// const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / 20);
pool.spawnWg(&wg, Functions.search, .{ sl, key });
const task = xev.ThreadPool.Task{
.callback = struct {
fn callback(_: *xev.ThreadPool.Task) void {
defer wg.finish();
// _ = sl.search(key);
}
}.callback,
};
const batch = xev.ThreadPool.Batch.from(task);
wg.start();
xev.ThreadPool.schedule(batch);
}
fn benchXev() !void {
const xev = @import("xev").IO_Uring;
var rand = std.rand.DefaultPrng.init(@intCast(std.time.milliTimestamp()));
const rnd = rand.random();
const Callbacks = struct {
fn search(
sl: ?*SL,
_: *xev.Loop,
c: *xev.Completion,
result: xev.Timer.RunError!void,
) xev.CallbackAction {
_ = result catch unreachable;
const key: usize = @intFromPtr(c.userdata);
_ = sl.?.search(key);
std.log.debug("searched for {d}", .{c.userdata.?});
return .disarm;
}
};
var sl = try SL.init(std.heap.page_allocator);
defer sl.deinit();
var loop = try xev.Loop.init(xev.Options{});
defer loop.deinit();
var keys = try std.ArrayList(usize).initCapacity(std.heap.page_allocator, BENCH_ITEMS);
var comps = try std.ArrayList(xev.Completion).initCapacity(std.heap.page_allocator, BENCH_ITEMS);
for (0..BENCH_ITEMS) |i| {
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / 20);
keys.appendAssumeCapacity(key);
const query_type = rnd.intRangeAtMostBiased(u8, 0, 99);
switch (query_type) {
0 => { // 1% remove
// pool.spawnWg(&wg, Functions.remove, .{ sl, key });
},
1...9 => { // 9% insert
// pool.spawnWg(&wg, Functions.insert, .{ sl, key, {} });
},
else => { // 90% search
comps.appendAssumeCapacity(xev.Completion{
.userdata = &keys.items[i],
});
var timer = try xev.Timer.init();
defer timer.deinit();
timer.run(&loop, &comps.items[i], 0, SL, &sl, Callbacks.search);
},
}
}
try loop.run(.until_done);
fn benchPugh() !void {
var sl = try SkipList.Pugh(BENCH_KEY, BENCH_VALUE, {}, comptime std.sort.asc(BENCH_KEY))
.init(std.heap.page_allocator, BENCH_SKIP_SIZE, BENCH_ITEMS);
defer sl.deinit();
var rand = std.rand.DefaultPrng.init(@intCast(std.time.milliTimestamp()));
const rnd = rand.random();
var timer = try std.time.Timer.start();
var i: usize = 0;
while (i < BENCH_ITEMS) : (i += 1) {
const key = rnd.intRangeAtMostBiased(BENCH_KEY, std.math.minInt(BENCH_KEY), BENCH_ITEMS / 2);
const query_type = rnd.intRangeAtMostBiased(u8, 0, 99);
switch (query_type) {
0 => { // 1% remove
_ = try sl.remove(key);
},
1...9 => { // 9% insert
_ = try sl.insert(key, {});
},
else => { // 90% search
_ = sl.search(key);
},
}
}
std.debug.print("Pugh: {d} ms\n", .{timer.read() / std.time.ns_per_ms});
}
if (builtin.mode == .Debug) {
var lit = locks.iterator(.{});
while (lit.next()) |lvl| {
if (preds[lvl].? == preds[level].?) continue :NEXT;
}
@panic("level is not locked...");
}
}
if (builtin.mode == .Debug) {
for (node.nexts) |item| {
std.debug.assert(node.nexts.len == rlevel + 1);
if (item != null) {
std.debug.assert(item.?.key != null);
continue;
}
std.debug.assert(item == null);
}
.{
.name = "skiplist",
.version = "0.0.0",
.minimum_zig_version = "0.13.0",
// This field is optional.
// Each dependency must either provide a `url` and `hash`, or a `path`.
// `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
// Once all dependencies are fetched, `zig build` no longer requires
// internet connectivity.
.dependencies = .{
// See `zig fetch --save <url>` for a command-line interface for adding dependencies.
//.example = .{
// // When updating this field to a new URL, be sure to delete the corresponding
// // `hash`, otherwise you are communicating that you expect to find the old hash at
// // the new URL.
// .url = "https://example.com/foo.tar.gz",
//
// // This is computed from the file contents of the directory of files that is
// // obtained after fetching `url` and applying the inclusion rules given by
// // `paths`.
// //
// // This field is the source of truth; packages do not come from a `url`; they
// // come from a `hash`. `url` is just one of many possible mirrors for how to
// // obtain a package matching this `hash`.
// //
// // Uses the [multihash](https://multiformats.io/multihash/) format.
// .hash = "...",
//
// // When this is provided, the package is found in a directory relative to the
// // build root. In this case the package's hash is irrelevant and therefore not
// // computed. This field and `url` are mutually exclusive.
// .path = "foo",
// // When this is set to `true`, a package is declared to be lazily
// // fetched. This makes the dependency only get fetched if it is
// // actually used.
// .lazy = false,
//},
.libxev = .{
.url = "https://github.com/mitchellh/libxev/archive/db6a52bafadf00360e675fefa7926e8e6c0e9931.tar.gz",
.hash = "12206029de146b685739f69b10a6f08baee86b3d0a5f9a659fa2b2b66c9602078bbf",
},
},
// Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that
// is computed for this package. Only files listed here will remain on disk
// when using the zig package manager. As a rule of thumb, one should list
// files required for compilation plus any license(s).
// Paths are relative to the build root. Use the empty string (`""`) to refer to
// the build root itself.
// A directory listed here means that all files within, recursively, are included.
.paths = .{
"build.zig",
"build.zig.zon",
"src",
},
}