RN7DWEQWABNYJGXEL6TLXEPJRHPDJ5QFWR2N7D42CTLPSGVJWFIAC use rand::{seq::IteratorRandom, SeedableRng};use rand_pcg::Pcg64Mcg;use crate::game::Game;pub struct RandomPlayer {rng: Pcg64Mcg,}impl RandomPlayer {pub fn new(seed: u64) -> RandomPlayer {RandomPlayer {rng: Pcg64Mcg::seed_from_u64(seed),}}pub fn act<G: Game>(&mut self, game: &G) -> <G as Game>::Action {game.actions().as_ref().into_iter().choose(&mut self.rng).unwrap().clone()}}
use std::num::NonZeroU8;use enum_map::Enum;use crate::game::{Game, Player};#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]pub struct MatrixAction(NonZeroU8);impl MatrixAction {pub fn new(a: u8) -> Self {Self(a.try_into().unwrap())}}#[derive(Debug, Clone)]pub struct MatrixGame<const N: usize> {history: [Option<MatrixAction>; 2],payouts: [[f32; N]; N],actions: [MatrixAction; N],}pub const RPS_PAYOUTS: [[f32; 3]; 3] = [[0., -1., 1.], [1., 0., -1.], [-1., 1., 0.]];impl<const N: usize> MatrixGame<N> {pub fn new(payouts: [[f32; N]; N]) -> MatrixGame<N> {let mut actions = [MatrixAction(NonZeroU8::new(1).unwrap()); N];for i in 0..N {actions[i] = MatrixAction((i as u8 + 1).try_into().unwrap());}MatrixGame {history: Default::default(),payouts,actions,}}}impl<const N: usize> Game for MatrixGame<N> {type Action = MatrixAction;type Actions = [MatrixAction; N];type InfoSet = [Option<MatrixAction>; 2];const NUM_PLAYERS: usize = 2;const HAS_CHANCE: bool = false;fn actions(&self) -> &Self::Actions {&self.actions}fn info_set(&self, _p: Player) -> &Self::InfoSet {if self.is_terminal() {&self.history} else {&[None, None]}}fn player(&self) -> Player {match self.history[0] {None => Player::new_agent(1),Some(_) => Player::new_agent(2),}}fn play(&mut self, action: Self::Action) {self.history[self.player().into_idx()] = Some(action);}fn is_terminal(&self) -> bool {self.history[1].is_some()}fn value(&self, p: Player) -> f32 {let act_p0: usize = self.history[0].unwrap().0.get().into();let act_p1: usize = self.history[1].unwrap().0.get().into();let val_p0 = self.payouts[act_p0 - 1][act_p1 - 1];if p.into_usize() == 1 {val_p0} else {-val_p0}}}// #[cfg(test)]// mod tests {// use super::*;// #[test]// fn it_works() {// let mut rps = MatrixGame::new(RPS_PAYOUTS);// assert!(!rps.is_terminal());// assert_eq!(rps.player(), 0);// rps.play(MatrixAction::new(1));// assert_eq!(rps.info_set(0), [Some(MatrixAction::new(1)), None]);// assert_eq!(rps.info_set(1), [None, None]);// assert!(!rps.is_terminal());// assert_eq!(rps.player(), 1);// rps.play(MatrixAction::new(2));// assert_eq!(// rps.info_set(0),// [Some(MatrixAction::new(1)), Some(MatrixAction::new(2))]// );// assert_eq!(// rps.info_set(1),// [Some(MatrixAction::new(1)), Some(MatrixAction::new(2))]// );// assert!(rps.is_terminal());// assert_eq!(rps.value(0), -1.0);// assert_eq!(rps.value(1), 1.0);// let mut rps = MatrixGame::new(RPS_PAYOUTS);// let mut rand_play = RandomPlayer::new(4);// rps.play(rand_play.act(&rps));// rps.play(rand_play.act(&rps));// assert!(-1. <= rps.value(0));// assert!(rps.value(0) <= 1.0);// assert!(-1. <= rps.value(1));// assert!(rps.value(1) <= 1.0);// }// }
use dcfr::{blotto::ColonelBlotto,game::{Game, Player},lcfr::{expected_value, TabularLcfr},matrix_game::{MatrixGame, RPS_PAYOUTS},};use indicatif::ProgressIterator;use itertools::Itertools;use ordered_float::OrderedFloat;fn main() {let game = MatrixGame::new(RPS_PAYOUTS);let mut cfr_trainer = TabularLcfr::new();for _ in (0..10000).progress() {cfr_trainer.single_iter(&game);}let game = ColonelBlotto::<10, 4>::new();let mut cfr_trainer = TabularLcfr::new();for _ in (0..100).progress() {cfr_trainer.single_iter(&game);}let start_strat = &cfr_trainer.strategy_sum[&(Player::new_agent(1), [None, None])];let s_sum = start_strat.iter().sum::<f32>();for (s, assignment) in start_strat.iter().zip(game.actions().iter()).sorted_by_key(|(s, _a)| OrderedFloat(**s)){println!("{:?}: {:.5}", assignment, s / s_sum);}// let start_strat = &cfr_trainer.strategy[&(0, [None, None])];// for (s, assignment) in start_strat// .iter()// .zip(game.actions().iter())// .sorted_by_key(|(s, _a)| OrderedFloat(**s))// {// println!("{:?}: {:.5}", assignment, s);// }// let start_strat = &cfr_trainer.regret_sum[&(0, [None, None])];// for (s, assignment) in start_strat// .iter()// .zip(game.actions().iter())// .sorted_by_key(|(s, _a)| OrderedFloat(**s))// {// println!("{:?}: {:.5}", assignment, s);// }println!("evs = {:?}",expected_value(game, &|player, infoset| {let strat = &cfr_trainer.strategy_sum[&(player, infoset.clone())];let s_sum: f32 = strat.iter().sum();strat.iter().map(|s| s / s_sum).collect()}));}
use rand::{seq::IteratorRandom, SeedableRng};use rand_pcg::Pcg64Mcg;use std::{collections::BTreeMap, hash::Hash};trait Game: Clone {type Action;type Actions: IntoIterator<Item = Self::Action>;type Player: Copy;type InfoSet: Clone + Hash + Eq;fn player(&self) -> Self::Player;fn info_set(&self, p: Self::Player) -> Self::InfoSet;fn actions(&self) -> Self::Actions;fn play(&mut self, action: Self::Action);fn is_terminal(&self) -> bool;fn value(&self, p: Self::Player) -> f32;}#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]struct MatrixAction(usize);#[derive(Debug, Clone)]struct MatrixGame<const N: usize> {history: [Option<MatrixAction>; 2],payouts: [[f32; N]; N],}const RPS_PAYOUTS: [[f32; 3]; 3] = [[0., -1., 1.], [1., 0., -1.], [-1., 1., 0.]];impl<const N: usize> MatrixGame<N> {fn new(payouts: [[f32; N]; N]) -> MatrixGame<N> {MatrixGame {history: Default::default(),payouts,}}}impl<const N: usize> Game for MatrixGame<N> {type Action = MatrixAction;type Actions = [Self::Action; N];type Player = usize;type InfoSet = [Option<MatrixAction>; 2];
#![feature(portable_simd, default_free_fn)]
fn info_set(&self, p: Self::Player) -> Self::InfoSet {let mut infoset = self.history.clone();if !self.is_terminal() {infoset[1 - p] = None;}infoset}fn player(&self) -> Self::Player {match self.history[0] {None => 0,Some(_) => 1,}}fn play(&mut self, action: Self::Action) {self.history[self.player()] = Some(action);}fn is_terminal(&self) -> bool {self.history[1].is_some()}fn value(&self, p: Self::Player) -> f32 {let val_p0 = self.payouts[self.history[0].unwrap().0][self.history[1].unwrap().0];if p == 0 {val_p0} else {-val_p0}}}struct RandomPlayer {rng: Pcg64Mcg,}impl RandomPlayer {fn new(seed: u64) -> RandomPlayer {RandomPlayer {rng: Pcg64Mcg::seed_from_u64(seed),}}fn act<G: Game>(&mut self, game: &G) -> <G as Game>::Action {game.actions().into_iter().choose(&mut self.rng).unwrap()}}struct TabularLcfr<G: Game> {regret_sum: BTreeMap<(G::Player, G::InfoSet, G::Action), f32>,avg_strat: BTreeMap<(G::Player, G::InfoSet, G::Action), f32>,}impl<G: Game> TabularLcfr<G> {fn single_iter(&mut self, game: &mut G) {}fn walk(&mut self, game: &mut G) {}}#[cfg(test)]mod tests {use super::*;#[test]fn it_works() {let mut rps = MatrixGame::new(RPS_PAYOUTS);assert!(!rps.is_terminal());assert_eq!(rps.player(), 0);rps.play(MatrixAction(0));assert_eq!(rps.info_set(0), [Some(MatrixAction(0)), None]);assert_eq!(rps.info_set(1), [None, None]);assert!(!rps.is_terminal());assert_eq!(rps.player(), 1);rps.play(MatrixAction(1));assert_eq!(rps.info_set(0),[Some(MatrixAction(0)), Some(MatrixAction(1))]);assert_eq!(rps.info_set(1),[Some(MatrixAction(0)), Some(MatrixAction(1))]);assert!(rps.is_terminal());assert_eq!(rps.value(0), -1.0);assert_eq!(rps.value(1), 1.0);let mut rps = MatrixGame::new(RPS_PAYOUTS);let mut rand_play = RandomPlayer::new(4);rps.play(rand_play.act(&rps));rps.play(rand_play.act(&rps));assert!(-1. <= rps.value(0));assert!(rps.value(0) <= 1.0);assert!(-1. <= rps.value(1));assert!(rps.value(1) <= 1.0);}}
use game::Game;use game::Player;
use std::collections::BTreeMap;use tinyvec::TinyVec;use crate::game::{Game, Player};pub type ActionVec = TinyVec<[f32; 6]>;pub type ReachVec = TinyVec<[f32; 6]>;#[derive(Default)]pub struct TabularLcfr<G: Game> {iteration: usize,pub regret_sum: BTreeMap<(Player, G::InfoSet), ActionVec>,pub strategy: BTreeMap<(Player, G::InfoSet), ActionVec>,pub strategy_sum: BTreeMap<(Player, G::InfoSet), ActionVec>,}impl<G: Game> TabularLcfr<G> {pub fn new() -> TabularLcfr<G> {TabularLcfr {iteration: 0,regret_sum: Default::default(),strategy: Default::default(),strategy_sum: Default::default(),}}pub fn single_iter(&mut self, game: &G)whereG::Actions: Clone,G::InfoSet: Clone,{let reach: ReachVec = vec![1.0f32; G::NUM_PLAYERS].as_slice().into();for player in game.players().as_ref() {self.walk(game, *player, 1, reach.clone());}self.iteration += 1;let l_factor = self.iteration as f32 / (self.iteration as f32 + 1.);self.regret_sum.values_mut().flat_map(|xs| xs.iter_mut()).for_each(|x| *x *= l_factor);self.strategy_sum.values_mut().flat_map(|xs| xs.iter_mut()).for_each(|x| *x *= l_factor);}pub fn walk(&mut self, game: &G, player: Player, t: usize, reach: ReachVec) -> f32whereG::Actions: Clone,G::InfoSet: Clone,{// dbg!((player, t, &reach));if game.is_terminal() {// return dbg!(game.value(player));return game.value(player);}let p = game.player();if p.is_chance() {todo!()}let info_set = game.info_set(p);let actions = game.actions();let zero_action_vec: ActionVec = actions.clone().as_ref().iter().map(|_| 0.0).collect();let sigma_t_i = self.strategy.entry((p, info_set.clone())).or_insert_with(|| {actions.clone().as_ref().iter().map(|_| 1. / actions.as_ref().len() as f32).collect()}).clone();let mut v_sigma = 0.0;let mut v_sigma_i_vec = zero_action_vec.clone();let v_sigma_i = v_sigma_i_vec.as_mut_slice();for (a_idx, a) in actions.clone().as_ref().iter().enumerate() {let sigma_t_i_a = sigma_t_i[a_idx];let mut ha = game.clone();ha.play(a.clone());let mut reach = reach.clone();reach[p.into_idx()] *= sigma_t_i_a;let v_s_i_a = self.walk(&mut ha, player, t, reach);v_sigma_i[a_idx] = v_s_i_a;v_sigma += sigma_t_i_a * v_s_i_a;}let r_i = self.regret_sum.entry((p, info_set.clone())).or_insert_with(|| zero_action_vec.clone()).as_mut_slice();let s_i = self.strategy_sum.entry((p, info_set.clone())).or_insert_with(|| zero_action_vec.clone()).as_mut_slice();if p == player {let pi_self = reach[p.into_idx()];let pi_other = reach[1 - p.into_idx()];for a_idx in 0..actions.as_ref().len() {r_i[a_idx] += pi_other * (v_sigma_i[a_idx] - v_sigma);s_i[a_idx] += pi_self * sigma_t_i[a_idx];}}let sigma_t_i = self.strategy.get_mut(&(p, info_set.clone())).unwrap();let regret_sum = r_i.iter().cloned().map(|r| r.max(0.0)).sum::<f32>();if regret_sum == 0.0 {sigma_t_i.iter_mut().for_each(|stia| *stia = 1.0 / actions.as_ref().len() as f32);} else {for (stia, ria) in sigma_t_i.iter_mut().zip(r_i.iter()) {*stia = ria.max(0.0) / regret_sum;}}// dbg!((v_sigma, info_set));v_sigma}}pub fn expected_value<G, FnStrat>(game: G, strategy: &FnStrat) -> ActionVecwhereG: Game,FnStrat: Fn(Player, &G::InfoSet) -> ActionVec,{if game.is_terminal() {return game.players().as_ref().into_iter().map(|p| game.value(*p)).collect();}let p = game.player();let infoset = game.info_set(p);let mut ev: ActionVec = game.players().as_ref().into_iter().map(|_| 0.0).collect();game.actions().as_ref().into_iter().map(|a| {let mut game = game.clone();game.play(a.clone());expected_value(game, strategy)}).zip(strategy(p, infoset)).for_each(|(v, s)| {ev.iter_mut().zip(v.iter()).for_each(|(evp, vp)| *evp += s * vp)});ev}
use std::default::default;use enum_map::Enum;use rand::{seq::SliceRandom, SeedableRng};use tinyvec::{tiny_vec, TinyVec};use crate::{Game, Player};#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Enum)]pub enum Card {#[default]One,Two,Three,Four,Five,Six,Seven,Eight,Nine,Ten,Jack,Queen,King,}#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Enum)]pub enum Action {Bet,Check,Fold,Call,#[default]Shuffle,Deal(Card),}#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]pub struct History(Player, Action);type InfoSet = TinyVec<[Option<History>; 15]>;type ActionVec = TinyVec<[Action; 2]>;#[derive(Debug, Clone)]pub struct Kuhn<const N: usize = 2> {deck: TinyVec<[Card; 6]>,bets: TinyVec<[u8; 6]>,current_bet: u8,first_bet: Option<Player>,current_player: Player,history: InfoSet,info_sets: Vec<InfoSet>,winner: Option<Player>,rng: rand_pcg::Pcg64,shuffle: ActionVec,bet: ActionVec,call: ActionVec,}impl<const N: usize> Kuhn<N> {pub fn new(seed: u64) -> Self {Kuhn {deck: (Card::LENGTH - N - 1..Card::LENGTH).map(Enum::from_usize).collect(),bets: [0u8; N].into_iter().collect(),current_bet: 0,first_bet: None,current_player: Player::Chance,history: default(),info_sets: vec![default(); N],winner: None,rng: SeedableRng::seed_from_u64(seed),shuffle: [Action::Shuffle].into_iter().collect(),bet: tiny_vec!(Action::Bet, Action::Check),call: tiny_vec!(Action::Call, Action::Fold),}}fn calculate_possible_winner(&mut self) {if self.first_bet == Some(self.current_player)|| self.history[self.history.len() - 1]== Some(History(Player::from_usize(N), Action::Check)){self.winner = Some(self.deck.iter().zip(self.players()).skip(1).filter(|(_card, p)| self.bets[p.into_idx()] == self.current_bet).max().expect("couldn't find max card").1,)}}fn push_history(&mut self, h: History) {self.history.push(Some(h));for i in 0..N {self.info_sets[i].push(match h {History(p, Action::Deal(_)) if p.into_idx() != i => None,_ => Some(h),});}}}impl<const N: usize> Game for Kuhn<N> {type Action = Action;type Actions = TinyVec<[Action; 2]>;type InfoSet = InfoSet;const HAS_CHANCE: bool = true;const NUM_PLAYERS: usize = N;fn player(&self) -> Player {self.current_player}fn actions(&self) -> &Self::Actions {if self.current_player == Player::Chance {&self.shuffle} else if self.current_bet == 1 {&self.bet} else {&self.call}}fn play(&mut self, action: Self::Action) {match action {Action::Shuffle => {self.deck.shuffle(&mut self.rng);for i in 1..=N {self.push_history(History(Player::from_usize(i), Action::Deal(self.deck[i])));self.bets[i - 1] = 1;}self.current_bet = 1;}a @ Action::Bet => {self.first_bet = Some(self.current_player);self.current_bet += 1;self.bets[self.current_player.into_idx()] += 1;self.push_history(History(self.current_player, a));}a @ Action::Call => {self.bets[self.current_player.into_idx()] = self.current_bet;self.push_history(History(self.current_player, a));}a @ (Action::Check | Action::Fold) => {self.push_history(History(self.current_player, a));}Action::Deal(_) => unreachable!("players can't deal"),}self.current_player = Player::from_usize(self.current_player.into_usize() % N + 1);self.calculate_possible_winner();}fn value(&self, p: Player) -> f32 {if let Some(winner) = self.winner {if p == winner {f32::from(self.bets.iter().sum::<u8>() - self.bets[p.into_idx()])} else {-f32::from(self.bets[p.into_idx()])}} else {unreachable!("non-terminal game has no value")}}fn info_set(&self, p: Player) -> &Self::InfoSet {&self.info_sets[p.into_idx()]}fn is_terminal(&self) -> bool {self.winner.is_some()}}#[cfg(test)]mod tests {use std::mem::{size_of, size_of_val};use more_asserts::assert_le;use rstest::*;use super::*;use Action::*;use Player::*;#[test]fn test_sizes() {assert_le!(size_of::<History>(), 2);assert_le!(size_of::<InfoSet>(), 32);let kuhn = Kuhn::<2>::new(4);assert_le!(size_of_val(&kuhn), 256);assert_le!(size_of_val(&kuhn.deck), 24);assert_le!(size_of_val(&kuhn.bets), 24);}#[test]fn test_all_check() {let mut kuhn = Kuhn::<2>::new(4);assert!(!kuhn.is_terminal());assert_eq!(kuhn.player(), Chance);assert_eq!(kuhn.actions().as_ref(), &[Shuffle]);kuhn.play(Shuffle);assert!(!kuhn.is_terminal());assert_eq!(kuhn.player(), Agent1);assert_eq!(kuhn.actions().as_ref(), &[Bet, Check]);kuhn.play(Check);assert!(!kuhn.is_terminal());assert_eq!(kuhn.player(), Agent2);assert_eq!(kuhn.actions().as_ref(), &[Bet, Check]);kuhn.play(Check);assert!(kuhn.is_terminal());if kuhn.deck[1] > kuhn.deck[2] {assert_eq!(kuhn.value(Agent1), 1.0);assert_eq!(kuhn.value(Agent2), -1.0);} else {assert_eq!(kuhn.value(Agent1), -1.0);assert_eq!(kuhn.value(Agent2), 1.0);}}#[rstest]#[case(&[Bet, Call], None, 2.0)]#[case(&[Bet, Fold], Some(Agent1), 1.0)]#[case(&[Check, Check], None, 1.0)]#[case(&[Check, Bet, Call], None, 2.0)]#[case(&[Check, Bet, Fold], Some(Agent2), 1.0)]fn test_game_history(#[case] actions: &[Action],#[case] winner: Option<Player>,#[case] value: f32,) {let mut kuhn = Kuhn::<2>::new(4);kuhn.play(Shuffle);for a in actions {assert!(!kuhn.is_terminal());assert!(kuhn.actions().contains(a));kuhn.play(*a);}assert!(kuhn.is_terminal());let winner = winner.or(kuhn.winner).unwrap();assert_eq!(kuhn.value(winner), value, "{:?}", kuhn);assert_eq!(kuhn.value(Agent1), -kuhn.value(Agent2));}}
use std::{fmt::Debug, hash::Hash};use enum_map::Enum;use tinyvec::TinyVec;#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Enum)]pub enum Player {#[default]Chance,Agent1,Agent2,Agent3,Agent4,Agent5,Agent6,Agent7,Agent8,Agent9,}impl Player {pub fn new_agent(n: u8) -> Player {assert_ne!(n, 0);Self::from_usize(n as usize)}pub fn is_chance(self) -> bool {match self {Self::Chance => true,_ => false,}}pub fn into_idx(self) -> usize {self.into_usize() - 1}}pub trait Game: Clone {type Action: Clone;type Actions: AsRef<[Self::Action]>;type InfoSet: Clone + Hash + Eq + Ord + Debug;const NUM_PLAYERS: usize;const HAS_CHANCE: bool;fn player(&self) -> Player;fn info_set(&self, p: Player) -> &Self::InfoSet;fn actions(&self) -> &Self::Actions;fn play(&mut self, action: Self::Action);fn is_terminal(&self) -> bool;fn value(&self, p: Player) -> f32;fn players(&self) -> TinyVec<[Player; 8]> {let mut players = TinyVec::new();if Self::HAS_CHANCE {players.push(Player::Chance);}for i in 1..=Self::NUM_PLAYERS {players.push(Player::new_agent(i as u8));}players}}
use std::cmp::Ordering;use crate::game::{Game, Player};#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]pub struct CbAssignment<const FIELDS: usize>([u8; FIELDS]);#[derive(Debug, Clone)]pub struct ColonelBlotto<const SOLDIERS: usize, const FIELDS: usize> {history: [Option<CbAssignment<FIELDS>>; 2],actions: Vec<CbAssignment<FIELDS>>,}impl<const SOLDIERS: usize, const FIELDS: usize> ColonelBlotto<SOLDIERS, FIELDS> {pub fn new() -> Self {let mut actions = vec![];let mut assignment = [0; FIELDS];Self::create_actions(0, &mut assignment, SOLDIERS as u8, &mut actions);Self {history: Default::default(),actions,}}fn create_actions(i: usize,partial_action: &mut [u8; FIELDS],soldiers_left: u8,out: &mut Vec<CbAssignment<FIELDS>>,) {if i == FIELDS - 1 {partial_action[i] = soldiers_left;out.push(CbAssignment(partial_action.clone()))} else {for a in 0..=soldiers_left {partial_action[i] = a;Self::create_actions(i + 1, partial_action, soldiers_left - a, out);}}}}impl<const SOLDIERS: usize, const FIELDS: usize> Game for ColonelBlotto<SOLDIERS, FIELDS> {type Action = CbAssignment<FIELDS>;type Actions = Vec<Self::Action>;type InfoSet = [Option<CbAssignment<FIELDS>>; 2];const NUM_PLAYERS: usize = 2;const HAS_CHANCE: bool = false;fn play(&mut self, action: Self::Action) {self.history[self.player().into_idx()] = Some(action);}fn value(&self, p: Player) -> f32 {self.history[p.into_idx()].as_ref().unwrap().0.iter().zip(self.history[1 - p.into_idx()].as_ref().unwrap().0).enumerate().map(|(i, (fp, fo))| match fp.cmp(&fo) {Ordering::Less => -(1 + i as i16),Ordering::Equal => 0,Ordering::Greater => 1 + i as i16,}).sum::<i16>().clamp(-1, 1).into()}fn player(&self) -> Player {match self.history[0] {None => Player::new_agent(1),Some(_) => Player::new_agent(2),}}fn actions(&self) -> &Self::Actions {&self.actions}fn info_set(&self, _p: Player) -> &Self::InfoSet {if self.is_terminal() {&self.history} else {&[None, None]}}fn is_terminal(&self) -> bool {self.history[1].is_some()}}
[toolchain]channel = "nightly"
rstest = "0.17.0"tinyvec = { version = "1.6.0", features = ["std", "alloc", "tinyvec_macros", "rustc_1_57"] }[profile.release]debug = true