V34YWVR66ERHUV6SOYQWJQLYMPLWEIJDXCR44B3SLPN2CRYIR45QC ZMNSRH5SD3LEUSRTVRZODEIOZIAOPXJQFW4DNMLAKOWVEAQUTFLAC XRCSCQWQKVYASIMAJO7JVUJXHXE44FZROCJPBW2BR7EE4RPEIBKAC R5F5KMWWZ4VS67CSIWRNRM4LNPPXVXSQZLHEZMT6XZODV4AK2MBQC RNEXG5IFDKMHSUR6RMNTI3Y32ORLVMZ6UJYKHLV2XBMT2QONBTVQC WAJ2J7G4QZE4LLGGZNIQ3ZIBCR3CSFOH3V2NIK2JQPALFHARRNMAC 2CU67A3QXXPH722WBAL74O6P2N2RRXFBQVPT3A5QAXTUCQ3UUU5AC OF6NBX4Y6UWXBSSENWBBYH3AU4TI5FCD5S7WKYMINBUBCKA2MD4AC TB4YBE4CMWCLSKJ43QF6IU5HVYUUO33BLXVG7XDRLJS3IFIBQLYAC 46RTAHPCLRHCLSYGKZIFIFRIWPJ54TJJ2WM5M5FZNNHUSZZZZAYQC 3H3DSWLBWCE7C43W6LHICCPJ5KQDLASLQPIKJOK2KAE6OFF2L6WAC FOORIA7SEZCLKDBNMV6KEDTQOJJVAH57BQFBRSQMET6FERJPHCVQC CWEVCXXC3D5NLLIFIYEWTXHPLX4EU5XH3YUUTMV7GYBLSQXT5VPQC FRUDIRWXGFOZERET3DNUNAZ5HSA3G32JZX6WMIXNGZOACTTCRIQAC I4AKZYZM5LOD7SNZRNWY34KWOHE34QPEWO6TIGHYMJACXOJKBMPQC 3N3FYEBBZ6RLFCFKG2FNHHRDUZBOVBAAXAHBUPSOB3FN5HMWLVDAC const scr = blk: {// PVSif (found_pv) {// Once you've found a move with a score that is between alpha and beta,// the rest of the moves are searched with the goal of proving that they are all bad.// It's possible to do this a bit faster than a search that worries that one// of the remaining moves might be good.const s = -try negamax(gs, -alpha - 1, -alpha, depth - 1);
var scr: isize = undefined;// PVSif (found_pv) {// Once you've found a move with a score that is between alpha and beta,// the rest of the moves are searched with the goal of proving that they are all bad.// It's possible to do this a bit faster than a search that worries that one// of the remaining moves might be good.scr = -try negamax(gs, -alpha - 1, -alpha, depth - 1);
// If the algorithm finds out that it was wrong, and that one of the// subsequent moves was better than the first PV move, it has to search again,// in the normal alpha-beta manner. This happens sometimes, and it's a waste of time,// but generally not often enough to counteract the savings gained from doing the// "bad move proof" search referred to earlier.if (s > alpha and s < beta) {break :blk -try negamax(gs, -beta, -alpha, depth - 1);
// If the algorithm finds out that it was wrong, and that one of the// subsequent moves was better than the first PV move, it has to search again,// in the normal alpha-beta manner. This happens sometimes, and it's a waste of time,// but generally not often enough to counteract the savings gained from doing the// "bad move proof" search referred to earlier.if (scr > alpha and scr < beta) {scr = -try negamax(gs, -beta, -alpha, depth - 1);}} else {// Normal negamax without PVSif (moves_searched == 0) {scr = -try negamax(gs, -beta, -alpha, depth - 1);} else {// Conditions to consider LMRif (moves_searched >= FULL_DEPTH_MOVES anddepth >= REDUCTION_LIMIT and!mp.move.capture and mp.move.prom == .none and!gs.inCheck()){// reduced-depth, narrow score search for lame movesscr = -try negamax(gs, -alpha - 1, -alpha, depth - 2);
// Keep the fast search scorebreak :blk s;
// hack to ensure that full-depth search is donescr = alpha + 1;}// Found a good move during LMRif (scr > alpha) {// full depth with narrowed scorescr = -try negamax(gs, -alpha - 1, -alpha, depth - 1);// if LRM fails do a full searchif (scr > alpha and scr < beta) {scr = -try negamax(gs, -beta, -alpha, depth - 1);}
const king_square = switch (self.side) {.white => @intToEnum(Square, @ctz(@bitCast(BoardType, self.bitboards[@enumToInt(Chess.PE.K)]),)),.black => @intToEnum(Square, @ctz(@bitCast(BoardType, self.bitboards[@enumToInt(Chess.PE.k)]),)),};if (self.isSquareAttacked(@enumToInt(king_square), self.side.enemy())) {// self.restore(bckup);return false;}
if (self.inCheck()) return false;
pub fn inCheck(self: *const @This()) bool {const king_square = switch (self.side) {.white => @intToEnum(Square, @ctz(@bitCast(BoardType, self.bitboards[@enumToInt(Chess.PE.K)]),)),.black => @intToEnum(Square, @ctz(@bitCast(BoardType, self.bitboards[@enumToInt(Chess.PE.k)]),)),};if (self.isSquareAttacked(@enumToInt(king_square), self.side.enemy())) return true;return false;}