pijul_org / pijul

New table to track partial repository outputs

By blabla on May 4, 2018
This patch is not signed.
9W9QmQBNHCA8yMzLUQxpVfUphZ4oxPCGgHxV8vzD4s2QyzqZA5W65Wrt1PtiqAKaRKDAipicemA7HbdiTPa43AqA
This patch is in the following branches:
latest
master
testing


1
2
3
4
5
6
7
8
9
10
11


14


17

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
        partial_path: Option<&str>,
                self.output_repository::<&[&str]>(branch_name, &r, &[][..], &pending, &local_pending)?;
use backend::*;
use patch::*;
use rand;
use record::{InodeUpdate, RecordState};
use std::collections::HashSet;
use std::path::Path;
use {Error, Result};
mod apply;
pub mod find_alive;
mod repair_deleted_context;
use diff;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::RepoRoot;

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
use fs_representation::{RepoRoot, in_repo_root};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
use output;
use output::ConflictingFile;

impl<U: Transaction, R> GenericTxn<U, R> {
    /// Return the patch id corresponding to `e`, or `internal` if `e==None`.
    pub fn internal_hash(&self, e: &Option<Hash>, internal: PatchId) -> PatchId {
        match *e {
            Some(Hash::None) => ROOT_PATCH_ID.clone(),
            Some(ref h) => self.get_internal(h.as_ref()).unwrap().to_owned(),
            None => internal.clone(),
        }
    }

    /// Fetch the internal key for this external key (or `internal` if
    /// `key.patch` is `None`).
    pub fn internal_key(&self, key: &Key<Option<Hash>>, internal: PatchId) -> Key<PatchId> {
        // debug!("internal_key: {:?} {:?}", key, internal);
        Key {
            patch: self.internal_hash(&key.patch, internal),
            line: key.line.clone(),
        }
    }

    pub fn internal_key_unwrap(&self, key: &Key<Option<Hash>>) -> Key<PatchId> {
        Key {
            patch: self
                .get_internal(key.patch.as_ref().unwrap().as_ref())
                .unwrap()
                .to_owned(),
            line: key.line.clone(),
        }
    }
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Assumes all patches have been downloaded. The third argument
    /// `remote_patches` needs to contain at least all the patches we
    /// want to apply, and the fourth one `local_patches` at least all
    /// the patches the other repository doesn't have.
    pub fn apply_patches<F, P: output::ToPrefixes>(
        &mut self,
        diff_algorithm: diff::Algorithm,
        branch: &mut Branch,
        r: &RepoRoot<impl AsRef<Path>>,
        remote_patches: &[(Hash, Patch)],
        partial_paths: P,
        mut f: F,












1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601

602
603
604
605
606
607
608
609

610
611
612
613
614

615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
        self.txn
            .set_root(Root::Partials as usize, self.dbs.touched_files);
    pub fn iter_partials<'a>(&'a self, key: Option<&SmallStr>) -> PartialsIterator<'a, U> {
        PartialsIterator(
            self.txn
                .iter(&self.dbs.partials, key.map(|k| (k.to_unsafe(), None))),
        )
    pub fn put_partials(&mut self, name: SmallStr, path: Key<PatchId>) -> Result<bool> {
            name.to_unsafe(),
    pub fn del_partials(&mut self, name: &SmallStr) -> Result<bool> {
            name.to_unsafe(),
        )? {
use hex;
use rand;
use sanakirja;
use sanakirja::Representable;
pub use sanakirja::Transaction;
use std;
use std::path::Path;
use {Error, Result};

pub use self::patch_id::*;

fn from_hex(hex: &str, s: &mut [u8]) -> bool {
    let hex = hex.as_bytes();
    if hex.len() <= 2 * s.len() {
        let mut i = 0;
        while i < hex.len() {
            let h = hex[i].to_ascii_lowercase();
            if h >= b'0' && h <= b'9' {
                s[i / 2] = s[i / 2] << 4 | (h - b'0')
            } else if h >= b'a' && h <= b'f' {
                s[i / 2] = s[i / 2] << 4 | (h - b'a' + 10)
            } else {
                return false;
            }
            i += 1
        }
        if i & 1 == 1 {
            s[i / 2] = s[i / 2] << 4
        }
        true
    } else {
        false
    }
}
mod edge;
mod file_header;
mod file_id;
mod hash;
mod inode;
mod key;
mod patch_id;
mod small_string;

pub use self::edge::*;
pub use self::file_header::*;
pub use self::file_id::*;
pub use self::hash::*;
pub use self::inode::*;
pub use self::key::*;
pub use self::small_string::*;

pub type NodesDb = sanakirja::Db<self::key::Key<PatchId>, self::edge::Edge>;

/// The type of patch application numbers.
pub type ApplyTimestamp = u64;

/// The u64 is the epoch time in seconds when this patch was applied
/// to the repository.
type PatchSet = sanakirja::Db<self::patch_id::PatchId, ApplyTimestamp>;

type RevPatchSet = sanakirja::Db<ApplyTimestamp, self::patch_id::PatchId>;

pub struct Dbs {
    /// A map of the files in the working copy.
    tree: sanakirja::Db<self::file_id::UnsafeFileId, self::inode::Inode>,
    /// The reverse of tree.
    revtree: sanakirja::Db<self::inode::Inode, self::file_id::UnsafeFileId>,
    /// A map from inodes (in tree) to keys in branches.
    inodes: sanakirja::Db<self::inode::Inode, self::file_header::FileHeader>,
    /// The reverse of inodes, minus the header.
    revinodes: sanakirja::Db<self::key::Key<PatchId>, self::inode::Inode>,
    /// Text contents of keys.
    contents: sanakirja::Db<self::key::Key<PatchId>, sanakirja::value::UnsafeValue>,
    /// A map from external patch hashes to internal ids.
    internal: sanakirja::Db<self::hash::UnsafeHash, self::patch_id::PatchId>,
    /// The reverse of internal.
    external: sanakirja::Db<self::patch_id::PatchId, self::hash::UnsafeHash>,
    /// A reverse map of patch dependencies, i.e. (k,v) is in this map
    /// means that v depends on k.
    revdep: sanakirja::Db<self::patch_id::PatchId, self::patch_id::PatchId>,
    /// A map from branch names to graphs.
    branches:
        sanakirja::Db<self::small_string::UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>,
    /// A map of edges to patches that remove them.
    cemetery: sanakirja::Db<(self::key::Key<PatchId>, self::edge::Edge), self::patch_id::PatchId>,
    /// Dependencies
    dep: sanakirja::Db<self::patch_id::PatchId, self::patch_id::PatchId>,
    /// Files touched by patches.
    touched_files: sanakirja::Db<self::key::Key<PatchId>, self::patch_id::PatchId>,
    /// Partial checkouts: branch -> partial
    partials: sanakirja::Db<self::small_string::UnsafeSmallStr, self::key::Key<PatchId>>,
}

/// Common type for both mutable transactions (`MutTxn`) and immutable
/// transaction (`Txn`). All of `Txn`'s methods are also `MutTxn`'s
/// methods.
pub struct GenericTxn<T, R> {
    #[doc(hidden)]
    pub txn: T,
    #[doc(hidden)]
    pub rng: R,
    #[doc(hidden)]
    pub dbs: Dbs,
}

/// A mutable transaction on a repository.
pub type MutTxn<'env, R> = GenericTxn<sanakirja::MutTxn<'env, ()>, R>;
/// An immutable transaction on a repository.
pub type Txn<'env> = GenericTxn<sanakirja::Txn<'env>, ()>;

/// The default name of a branch, for users who start working before
/// choosing branch names (or like the default name, "master").
pub const DEFAULT_BRANCH: &'static str = "master";

/// A repository. All operations on repositories must be done via transactions.
pub struct Repository {
    pub env: sanakirja::Env,
}

#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Root {
    Tree,
    RevTree,
    Inodes,
    RevInodes,
    Contents,
    Internal,
    External,
    RevDep,
    Branches,
    Cemetery,
    TouchedFiles,
    Dep,
    RevTouchedFiles,
    Partials,
}

trait OpenDb: Transaction {
    fn open_db<K: Representable, V: Representable>(
        &mut self,
        num: Root,
    ) -> Result<sanakirja::Db<K, V>> {
        if let Some(db) = self.root(num as usize) {
            Ok(db)
        } else {
            Err(Error::NoDb(num))
        }
    }
}

impl<'a, T> OpenDb for sanakirja::MutTxn<'a, T> {
    fn open_db<K: Representable, V: Representable>(
        &mut self,
        num: Root,
    ) -> Result<sanakirja::Db<K, V>> {
        if let Some(db) = self.root(num as usize) {
            Ok(db)
        } else {
            Ok(self.create_db()?)
        }
    }
}
impl<'a> OpenDb for sanakirja::Txn<'a> {}

// Repositories need at least 2^5 = 32 pages, each of size 2^12.
const MIN_REPO_SIZE: u64 = 1 << 17;

impl Repository {
    #[doc(hidden)]
    pub fn size(&self) -> u64 {
        self.env.size()
    }

    #[doc(hidden)]
    pub fn repository_size<P: AsRef<Path>>(path: P) -> Result<u64> {
        let size = sanakirja::Env::file_size(path.as_ref())?;
        debug!("repository_size = {:?}", size);
        Ok(size)
    }

    /// Open a repository, possibly increasing the size of the
    /// underlying file if `size_increase` is `Some(…)`.
    pub fn open<P: AsRef<Path>>(path: P, size_increase: Option<u64>) -> Result<Self> {
        let size = if let Some(size) = size_increase {
            Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                + std::cmp::max(size, MIN_REPO_SIZE)
        } else {
            if let Ok(len) = Repository::repository_size(path.as_ref()) {
                std::cmp::max(len, MIN_REPO_SIZE)
            } else {
                MIN_REPO_SIZE
            }
        };
        Ok(Repository {
            env: sanakirja::Env::new(path, size)?,
        })
    }

    /// Open a repository, possibly increasing the size of the
    /// underlying file if `size_increase` is `Some(…)`.
    pub unsafe fn open_nolock<P: AsRef<Path>>(path: P, size_increase: Option<u64>) -> Result<Self> {
        let size = if let Some(size) = size_increase {
            Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                + std::cmp::max(size, MIN_REPO_SIZE)
        } else {
            if let Ok(len) = Repository::repository_size(path.as_ref()) {
                std::cmp::max(len, MIN_REPO_SIZE)
            } else {
                MIN_REPO_SIZE
            }
        };
        debug!("sanakirja::Env::new_nolock");
        Ok(Repository {
            env: sanakirja::Env::new_nolock(path, size)?,
        })
    }

    /// Close a repository. It is undefined behaviour to use it afterwards.
    pub unsafe fn close(&mut self) {
        self.env.close()
    }

    /// Start an immutable transaction. Immutable transactions can run
    /// concurrently.
    pub fn txn_begin(&self) -> Result<Txn> {
        let mut txn = self.env.txn_begin()?;
        let dbs = Dbs::new(&mut txn)?;
        let repo = GenericTxn {
            txn: txn,
            rng: (),
            dbs: dbs,
        };
        Ok(repo)
    }

    /// Start a mutable transaction. Mutable transactions exclude each
    /// other, but can in principle be run concurrently with immutable
    /// transactions. In that case, the immutable transaction only
    /// have access to the state of the repository immediately before
    /// the mutable transaction started.
    pub fn mut_txn_begin<R: rand::Rng>(&self, r: R) -> Result<MutTxn<R>> {
        let mut txn = self.env.mut_txn_begin()?;
        let dbs = Dbs::new(&mut txn)?;
        let repo = GenericTxn {
            txn: txn,
            rng: r,
            dbs: dbs,
        };
        Ok(repo)
    }
}

impl Dbs {
    fn new<T: OpenDb>(txn: &mut T) -> Result<Self> {
        let external = txn.open_db(Root::External)?;
        let branches = txn.open_db(Root::Branches)?;
        let tree = txn.open_db(Root::Tree)?;
        let revtree = txn.open_db(Root::RevTree)?;
        let inodes = txn.open_db(Root::Inodes)?;
        let revinodes = txn.open_db(Root::RevInodes)?;
        let internal = txn.open_db(Root::Internal)?;
        let contents = txn.open_db(Root::Contents)?;
        let revdep = txn.open_db(Root::RevDep)?;
        let cemetery = txn.open_db(Root::Cemetery)?;
        let dep = txn.open_db(Root::Dep)?;
        let touched_files = txn.open_db(Root::TouchedFiles)?;
        let partials = txn.open_db(Root::Partials)?;

        Ok(Dbs {
            external,
            branches,
            inodes,
            tree,
            revtree,
            revinodes,
            internal,
            revdep,
            contents,
            cemetery,
            dep,
            touched_files,
            partials,
        })
    }
}

/// The representation of a branch. The "application number" of a
/// patch on a branch is the state of the application counter at the
/// time the patch has been applied to that branch.
#[derive(Debug)]
pub struct Branch {
    /// The table containing the branch graph.
    pub db: NodesDb,
    /// The map of all patches applied to that branch, ordered by patch hash.
    pub patches: PatchSet,
    /// The map of all patches applied to that branch, ordered by application number.
    pub revpatches: RevPatchSet,
    /// The number of patches that have been applied on that branch,
    /// including patches that are no longer on the branch (i.e. that
    /// have been unrecorded).
    pub apply_counter: u64,
    /// Branch name.
    pub name: small_string::SmallString,
}

use sanakirja::Commit;
/// Branches and commits.
impl<'env, R: rand::Rng> MutTxn<'env, R> {
    /// Open a branch by name, creating an empty branch with that name
    /// if the name doesn't exist.
    pub fn open_branch<'name>(&mut self, name: &str) -> Result<Branch> {
        let name = small_string::SmallString::from_str(name);
        let (branch, patches, revpatches, counter) = if let Some(x) =
            self.txn
                .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
        {
            x
        } else {
            (
                self.txn.create_db()?,
                self.txn.create_db()?,
                self.txn.create_db()?,
                0,
            )
        };
        Ok(Branch {
            db: branch,
            patches: patches,
            revpatches: revpatches,
            name: name,
            apply_counter: counter,
        })
    }

    /// Commit a branch. This is a extremely important thing to do on
    /// branches, and it is not done automatically when committing
    /// transactions.
    ///
    /// **I repeat: not calling this method before committing a
    /// transaction might cause database corruption.**
    pub fn commit_branch(&mut self, branch: Branch) -> Result<()> {
        debug!("Commit_branch. This is not too safe.");
        // Since we are replacing the value, we don't want to
        // decrement its reference counter (which del would do), hence
        // the transmute.
        //
        // This would normally be wrong. The only reason it works is
        // because we know that dbs_branches has never been forked
        // from another database, hence all the reference counts to
        // its elements are 1 (and therefore represented as "not
        // referenced" in Sanakirja.
        let mut dbs_branches: sanakirja::Db<UnsafeSmallStr, (u64, u64, u64, u64)> =
            unsafe { std::mem::transmute(self.dbs.branches) };

        debug!("Commit_branch, dbs_branches = {:?}", dbs_branches);
        self.txn.del(
            &mut self.rng,
            &mut dbs_branches,
            branch.name.as_small_str().to_unsafe(),
            None,
        )?;
        debug!("Commit_branch, dbs_branches = {:?}", dbs_branches);
        self.dbs.branches = unsafe { std::mem::transmute(dbs_branches) };
        self.txn.put(
            &mut self.rng,
            &mut self.dbs.branches,
            branch.name.as_small_str().to_unsafe(),
            (
                branch.db,
                branch.patches,
                branch.revpatches,
                branch.apply_counter,
            ),
        )?;
        debug!("Commit_branch, self.dbs.branches = {:?}", self.dbs.branches);
        Ok(())
    }

    /// Rename a branch. The branch still needs to be committed after
    /// this operation.
    pub fn rename_branch(&mut self, branch: &mut Branch, new_name: &str) -> Result<()> {
        debug!("Commit_branch. This is not too safe.");
        // Since we are replacing the value, we don't want to
        // decrement its reference counter (which del would do), hence
        // the transmute.
        //
        // Read the note in `commit_branch` to understand why this
        // works.
        let name_exists = self.get_branch(new_name).is_some();
        if name_exists {
            Err(Error::BranchNameAlreadyExists(new_name.to_string()))
        } else {
            let mut dbs_branches: sanakirja::Db<UnsafeSmallStr, (u64, u64, u64, u64)> =
                unsafe { std::mem::transmute(self.dbs.branches) };
            self.txn.del(
                &mut self.rng,
                &mut dbs_branches,
                branch.name.as_small_str().to_unsafe(),
                None,
            )?;
            self.dbs.branches = unsafe { std::mem::transmute(dbs_branches) };
            branch.name.clone_from_str(new_name);
            Ok(())
        }
    }

    /// Commit a transaction. **Be careful to commit all open branches
    /// before**.
    pub fn commit(mut self) -> Result<()> {
        self.txn.set_root(Root::Tree as usize, self.dbs.tree);
        self.txn.set_root(Root::RevTree as usize, self.dbs.revtree);
        self.txn.set_root(Root::Inodes as usize, self.dbs.inodes);
        self.txn
            .set_root(Root::RevInodes as usize, self.dbs.revinodes);
        self.txn
            .set_root(Root::Contents as usize, self.dbs.contents);
        self.txn
            .set_root(Root::Internal as usize, self.dbs.internal);
        self.txn
            .set_root(Root::External as usize, self.dbs.external);
        self.txn
            .set_root(Root::Branches as usize, self.dbs.branches);
        self.txn.set_root(Root::RevDep as usize, self.dbs.revdep);
        self.txn
            .set_root(Root::Cemetery as usize, self.dbs.cemetery);
        self.txn.set_root(Root::Dep as usize, self.dbs.dep);
        self.txn
            .set_root(Root::TouchedFiles as usize, self.dbs.touched_files);
        self.txn
            .set_root(Root::Partials as usize, self.dbs.partials);

        self.txn.commit()?;
        Ok(())
    }
}

use sanakirja::value::*;
use sanakirja::{Cursor, RevCursor};
pub struct TreeIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeFileId, Inode>);

impl<'a, T: Transaction + 'a> Iterator for TreeIterator<'a, T> {
    type Item = (FileId<'a>, Inode);
    fn next(&mut self) -> Option<Self::Item> {
        debug!("tree iter");
        if let Some((k, v)) = self.0.next() {
            debug!("tree iter: {:?} {:?}", k, v);
            unsafe { Some((FileId::from_unsafe(k), v)) }
        } else {
            None
        }
    }
}

pub struct RevtreeIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Inode, UnsafeFileId>);

impl<'a, T: Transaction + 'a> Iterator for RevtreeIterator<'a, T> {
    type Item = (Inode, FileId<'a>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((k, FileId::from_unsafe(v))) }
        } else {
            None
        }
    }
}

pub struct NodesIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Key<PatchId>, Edge>);

impl<'a, T: Transaction> Iterator for NodesIterator<'a, T> {
    type Item = (Key<PatchId>, Edge);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            Some((k, v))
        } else {
            None
        }
    }
}
pub struct BranchIterator<'a, T: Transaction + 'a>(
    Cursor<'a, T, UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>,
);

impl<'a, T: Transaction + 'a> Iterator for BranchIterator<'a, T> {
    type Item = Branch;
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe {
                Some(Branch {
                    name: SmallStr::from_unsafe(k).to_owned(),
                    db: v.0,
                    patches: v.1,
                    revpatches: v.2,
                    apply_counter: v.3,
                })
            }
        } else {
            None
        }
    }
}

pub struct PatchesIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, ApplyTimestamp>);

impl<'a, T: Transaction + 'a> Iterator for PatchesIterator<'a, T> {
    type Item = (PatchId, ApplyTimestamp);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct RevAppliedIterator<'a, T: Transaction + 'a>(RevCursor<'a, T, ApplyTimestamp, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for RevAppliedIterator<'a, T> {
    type Item = (ApplyTimestamp, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct AppliedIterator<'a, T: Transaction + 'a>(Cursor<'a, T, ApplyTimestamp, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for AppliedIterator<'a, T> {
    type Item = (ApplyTimestamp, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct InodesIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Inode, FileHeader>);

impl<'a, T: Transaction + 'a> Iterator for InodesIterator<'a, T> {
    type Item = (Inode, FileHeader);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct InternalIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeHash, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for InternalIterator<'a, T> {
    type Item = (HashRef<'a>, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((HashRef::from_unsafe(k), v)) }
        } else {
            None
        }
    }
}
pub struct ExternalIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, UnsafeHash>);

impl<'a, T: Transaction + 'a> Iterator for ExternalIterator<'a, T> {
    type Item = (PatchId, HashRef<'a>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((k, HashRef::from_unsafe(v))) }
        } else {
            None
        }
    }
}

pub struct RevdepIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for RevdepIterator<'a, T> {
    type Item = (PatchId, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct DepIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for DepIterator<'a, T> {
    type Item = (PatchId, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct ContentsIterator<'a, T: Transaction + 'a>(
    &'a T,
    Cursor<'a, T, Key<PatchId>, UnsafeValue>,
);

impl<'a, T: Transaction + 'a> Iterator for ContentsIterator<'a, T> {
    type Item = (Key<PatchId>, Value<'a, T>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.1.next() {
            unsafe { Some((k, Value::from_unsafe(&v, self.0))) }
        } else {
            None
        }
    }
}

pub struct CemeteryIterator<'a, T: Transaction + 'a>(Cursor<'a, T, (Key<PatchId>, Edge), PatchId>);

impl<'a, T: Transaction + 'a> Iterator for CemeteryIterator<'a, T> {
    type Item = ((Key<PatchId>, Edge), PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some(((k, v), w)) = self.1.next() {
        if let Some(((k, v), w)) = self.0.next() {
            Some(((k, v), w))
        } else {
            None
        }
    }
}

pub struct TouchedIterator<'a, T: Transaction + 'a>(&'a T, Cursor<'a, T, Key<PatchId>, PatchId>);
pub struct TouchedIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Key<PatchId>, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for TouchedIterator<'a, T> {
    type Item = (Key<PatchId>, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.1.next() {
        if let Some((k, v)) = self.0.next() {
            Some((k, v))
        } else {
            None
        }
    }
}

pub struct PartialsIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeSmallStr, Key<PatchId>>);

impl<'a, T: Transaction + 'a> Iterator for PartialsIterator<'a, T> {
    type Item = (SmallStr<'a>, Key<PatchId>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((SmallStr::from_unsafe(k), v)) }
        } else {
            None
        }
    }
}

mod dump {
    use super::*;
    use sanakirja;

    impl<U: Transaction, R> GenericTxn<U, R> {
        pub fn dump(&self) {
            debug!("============= dumping Tree");
            for (k, v) in self.iter_tree(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping Inodes");
            for (k, v) in self.iter_inodes(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping RevDep");
            for (k, v) in self.iter_revdep(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping Internal");
            for (k, v) in self.iter_internal(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping External");
            for (k, v) in self.iter_external(None) {
                debug!("> {:?} {:?} {:?}", k, v, v.to_base58());
            }
            debug!("============= dumping Contents");
            {
                sanakirja::debug(&self.txn, &[&self.dbs.contents], "dump_contents", true);
            }
            debug!("============= dumping Partials");
            for (k, v) in self.iter_partials("") {
                debug!("> {:?} {:?}", k, v);
            }
            debug!("============= dumping Branches");
            for (br, (db, patches, revpatches, counter)) in self.txn.iter(&self.dbs.branches, None)
            {
                debug!("patches: {:?} {:?}", patches, revpatches);
                debug!(
                    "============= dumping Patches in branch {:?}, counter = {:?}",
                    br, counter
                );
                for (k, v) in self.txn.iter(&patches, None) {
                    debug!("> {:?} {:?}", k, v)
                }
                debug!("============= dumping RevPatches in branch {:?}", br);
                for (k, v) in self.txn.iter(&revpatches, None) {
                    debug!("> {:?} {:?}", k, v)
                }
                debug!("============= dumping Nodes in branch {:?}", br);
                unsafe {
                    // sanakirja::debug(&self.txn, &[&db], path);
                    debug!("> {:?}", SmallStr::from_unsafe(br));
                    for (k, v) in self.txn.iter(&db, None) {
                        debug!(">> {:?} {:?}", k, v)
                    }
                }
            }
        }
    }
}

/// An iterator for nodes adjacent to `key` through an edge with flags smaller than `max_flag`.
pub struct AdjIterator<'a, U: Transaction + 'a> {
    it: NodesIterator<'a, U>,
    key: Key<PatchId>,
    /// iter as long as the flag is smaller than this
    max_flag: EdgeFlags,
}

impl<'a, U: Transaction + 'a> Iterator for AdjIterator<'a, U> {
    type Item = Edge;
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((v, e)) = self.it.next() {
            if v == self.key && e.flag <= self.max_flag {
                Some(e)
            } else {
                None
            }
        } else {
            None
        }
    }
}

/*
macro_rules! iterate_parents {
    ($txn:expr, $branch:expr, $key:expr, $flag: expr) => { {
        let edge = Edge::zero($flag|PARENT_EDGE);
        $txn.iter_nodes(& $branch, Some(($key, Some(&edge))))
            .take_while(|&(k, parent)| {
                *k == *$key && parent.flag <= $flag|PARENT_EDGE|PSEUDO_EDGE
            })
            .map(|(_,b)| b)
    } }
}
*/
use std::collections::{BTreeMap, HashSet};
#[derive(Debug)]
pub struct PageCounts {
    pub tree: usize,
    pub revtree: usize,
    pub inodes: usize,
    pub revinodes: usize,
    pub contents: usize,
    pub internal: usize,
    pub external: usize,
    pub revdep: usize,
    pub branch_table: usize,
    pub cemetery: usize,
    pub dep: usize,
    pub touched_files: usize,
    pub partials: usize,
    pub branches: BTreeMap<String, usize>,
}

impl PageCounts {
    pub fn sum(&self) -> usize {
        self.tree
            + self.revtree
            + self.inodes
            + self.revinodes
            + self.contents
            + self.internal
            + self.external
            + self.revdep
            + self.branch_table
            + self.cemetery
            + self.dep
            + self.touched_files
            + self.partials
            + self.branches.iter().map(|x| x.1).sum::<usize>()
    }
}

impl<'a> Txn<'a> {
    pub fn page_counts(&self) -> PageCounts {
        let mut references = HashSet::new();
        self.txn.references(&mut references, self.dbs.tree);
        let tree = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.revtree);
        let revtree = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.inodes);
        let inodes = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.revinodes);
        let revinodes = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.contents);
        let contents = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.internal);
        let internal = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.external);
        let external = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.revdep);
        let revdep = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.branches);
        let branch_table = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.cemetery);
        let cemetery = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.dep);
        let dep = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.touched_files);
        let touched_files = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.partials);
        let partials = references.len();

        let mut branches = BTreeMap::new();
        for br in self.iter_branches(None) {
            references.clear();
            self.txn.references(&mut references, br.db);
            branches.insert(br.name.as_str().to_string(), references.len());
        }

        PageCounts {
            tree,
            revtree,
            inodes,
            revinodes,
            contents,
            internal,
            external,
            revdep,
            branch_table,
            cemetery,
            dep,
            touched_files,
            partials,
            branches,
        }
    }
}

impl<U: Transaction, R> GenericTxn<U, R> {
    /// Does this repository has a branch called `name`?
    pub fn has_branch(&self, name: &str) -> bool {
        let name = small_string::SmallString::from_str(name);
        self.txn
            .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
            .is_some()
    }

    /// Get the branch with the given name, if it exists.
    pub fn get_branch<'name>(&self, name: &str) -> Option<Branch> {
        let name = small_string::SmallString::from_str(name);
        if let Some((branch, patches, revpatches, counter)) =
            self.txn
                .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
        {
            Some(Branch {
                db: branch,
                patches: patches,
                revpatches: revpatches,
                apply_counter: counter,
                name: name,
            })
        } else {
            None
        }
    }

    /// Return the first edge of this `key` if `edge` is `None`, and
    /// a pointer to the edge in the database if `edge` is `Some`.
    pub fn get_nodes<'a>(
        &'a self,
        branch: &Branch,
        key: Key<PatchId>,
        edge: Option<Edge>,
    ) -> Option<Edge> {
        self.txn.get(&branch.db, key, edge)
    }

    /// An iterator over keys and edges, in branch `branch`, starting
    /// from key and edge specified by `key`. If `key` is `None`, the
    /// iterations start from the first key and first edge. If `key`
    /// is of the form `Some(a, None)`, they start from the first edge
    /// of key `a`. If `key` is of the from `Some(a, Some(b))`, they
    /// start from the first key and edge that is at least `(a, b)`.
    pub fn iter_nodes<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<(Key<PatchId>, Option<Edge>)>,
    ) -> NodesIterator<'a, U> {
        NodesIterator(self.txn.iter(&branch.db, key.map(|(k, v)| (k, v))))
    }

    pub fn iter_adjacent<'a>(
        &'a self,
        branch: &'a Branch,
        key: Key<PatchId>,
        min_flag: EdgeFlags,
        max_flag: EdgeFlags,
    ) -> AdjIterator<'a, U> {
        let edge = Edge::zero(min_flag);
        AdjIterator {
            it: self.iter_nodes(branch, Some((key, Some(edge)))),
            key,
            max_flag: max_flag,
        }
    }

    pub fn iter_parents<'a>(
        &'a self,
        branch: &'a Branch,
        key: Key<PatchId>,
        flag: EdgeFlags,
    ) -> AdjIterator<'a, U> {
        let edge = Edge::zero(flag | EdgeFlags::PARENT_EDGE);
        AdjIterator {
            it: self.iter_nodes(branch, Some((key, Some(edge)))),
            key,
            max_flag: flag | EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE,
        }
    }

    pub fn iter_folder_children<'a>(
        &'a self,
        branch: &'a Branch,
        key: Key<PatchId>,
        flag: EdgeFlags,
    ) -> AdjIterator<'a, U> {
        let edge = Edge::zero(flag | EdgeFlags::FOLDER_EDGE);
        AdjIterator {
            it: self.iter_nodes(branch, Some((key, Some(edge)))),
            key,
            max_flag: flag
                | EdgeFlags::FOLDER_EDGE
                | EdgeFlags::PSEUDO_EDGE
                | EdgeFlags::EPSILON_EDGE,
        }
    }

    /// An iterator over branches in the database, starting from the
    /// given branch name.
    pub fn iter_branches<'a>(&'a self, key: Option<&SmallStr>) -> BranchIterator<'a, U> {
        BranchIterator(
            self.txn
                .iter(&self.dbs.branches, key.map(|k| (k.to_unsafe(), None))),
        )
    }

    /// An iterator over branches in the database, starting from the
    /// given branch name.
    pub fn iter_partials<'a>(&'a self, branch: &str) -> PartialsIterator<'a, U> {
        let key = SmallString::from_str(branch);
        PartialsIterator(self.txn.iter(
            &self.dbs.partials,
            Some((key.as_small_str().to_unsafe(), None)),
        ))
    }

    /// An iterator over patches in a branch, in the alphabetical
    /// order of their hash.
    pub fn iter_patches<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<PatchId>,
    ) -> PatchesIterator<'a, U> {
        PatchesIterator(self.txn.iter(&branch.patches, key.map(|k| (k, None))))
    }

    /// An iterator over patches in a branch, in the reverse order in
    /// which they were applied.
    pub fn rev_iter_applied<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<ApplyTimestamp>,
    ) -> RevAppliedIterator<'a, U> {
        RevAppliedIterator(
            self.txn
                .rev_iter(&branch.revpatches, key.map(|k| (k, None))),
        )
    }

    /// An iterator over patches in a branch in the order in which
    /// they were applied.
    pub fn iter_applied<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<ApplyTimestamp>,
    ) -> AppliedIterator<'a, U> {
        AppliedIterator(self.txn.iter(&branch.revpatches, key.map(|k| (k, None))))
    }

    /// An iterator over files and directories currently tracked by
    /// Pijul, starting from the given `FileId`. The `Inode`s returned
    /// by the iterator can be used to form new `FileId`s and traverse
    /// the tree from top to bottom.
    ///
    /// The set of tracked files is changed by the following
    /// operations: outputting the repository, adding, deleting and
    /// moving files. It is not related to branches, but only to the
    /// files actually present on the file system.
    pub fn iter_tree<'a>(&'a self, key: Option<(&FileId, Option<Inode>)>) -> TreeIterator<'a, U> {
        debug!("iter_tree: {:?}", key);
        TreeIterator(
            self.txn
                .iter(&self.dbs.tree, key.map(|(k, v)| (k.to_unsafe(), v))),
        )
    }

    /// An iterator over files and directories, following directories
    /// in the opposite direction.
    pub fn iter_revtree<'a>(
        &'a self,
        key: Option<(Inode, Option<&FileId>)>,
    ) -> RevtreeIterator<'a, U> {
        RevtreeIterator(self.txn.iter(
            &self.dbs.revtree,
            key.map(|(k, v)| (k, v.map(|v| v.to_unsafe()))),
        ))
    }

    /// An iterator over the "inodes" database, which contains
    /// correspondences between files on the filesystem and the files
    /// in the graph.
    pub fn iter_inodes<'a>(
        &'a self,
        key: Option<(Inode, Option<FileHeader>)>,
    ) -> InodesIterator<'a, U> {
        InodesIterator(self.txn.iter(&self.dbs.inodes, key))
    }

    /// Iterator over the `PatchId` to `Hash` correspondence.
    pub fn iter_external<'a>(
        &'a self,
        key: Option<(PatchId, Option<HashRef>)>,
    ) -> ExternalIterator<'a, U> {
        ExternalIterator(self.txn.iter(
            &self.dbs.external,
            key.map(|(k, v)| (k, v.map(|v| v.to_unsafe()))),
        ))
    }

    /// Iterator over the `Hash` to `PatchId` correspondence.
    pub fn iter_internal<'a>(
        &'a self,
        key: Option<(HashRef, Option<PatchId>)>,
    ) -> InternalIterator<'a, U> {
        InternalIterator(
            self.txn
                .iter(&self.dbs.internal, key.map(|(k, v)| (k.to_unsafe(), v))),
        )
    }

    /// Iterator over reverse dependencies (`(k, v)` is in the reverse
    /// dependency table if `v` depends on `k`, and both are in at
    /// least one branch).
    pub fn iter_revdep<'a>(
        &'a self,
        key: Option<(PatchId, Option<PatchId>)>,
    ) -> RevdepIterator<'a, U> {
        RevdepIterator(self.txn.iter(&self.dbs.revdep, key))
    }

    /// Iterator over dependencies.
    pub fn iter_dep<'a>(&'a self, key: Option<(PatchId, Option<PatchId>)>) -> DepIterator<'a, U> {
        DepIterator(self.txn.iter(&self.dbs.dep, key))
    }

    /// An iterator over line contents (common to all branches).
    pub fn iter_contents<'a>(&'a self, key: Option<Key<PatchId>>) -> ContentsIterator<'a, U> {
        ContentsIterator(
            &self.txn,
            self.txn.iter(&self.dbs.contents, key.map(|k| (k, None))),
        )
    }

    /// An iterator over edges in the cemetery.
    pub fn iter_cemetery<'a>(&'a self, key: Key<PatchId>, edge: Edge) -> CemeteryIterator<'a, U> {
        CemeteryIterator(self.txn.iter(&self.dbs.cemetery, Some(((key, edge), None))))
    }

    /// An iterator over patches that touch a certain file.
    pub fn iter_touched<'a>(&'a self, key: Key<PatchId>) -> TouchedIterator<'a, U> {
        TouchedIterator(self.txn.iter(&self.dbs.touched_files, Some((key, None))))
    }

    /// Tell whether a patch touches a file
    pub fn get_touched<'a>(&'a self, key: Key<PatchId>, patch: PatchId) -> bool {
        self.txn
            .get(&self.dbs.touched_files, key, Some(patch))
            .is_some()
    }

    /// Get the `Inode` of a give `FileId`. A `FileId` is itself
    /// composed of an inode and a name, hence this can be used to
    /// traverse the tree of tracked files from top to bottom.
    pub fn get_tree<'a>(&'a self, key: &FileId) -> Option<Inode> {
        self.txn.get(&self.dbs.tree, key.to_unsafe(), None)
    }

    /// Get the parent `FileId` of a given `Inode`. A `FileId` is
    /// itself composed of an `Inode` and a name, so this can be used
    /// to traverse the tree of tracked files from bottom to top
    /// (starting from a leaf).
    pub fn get_revtree<'a>(&'a self, key: Inode) -> Option<FileId<'a>> {
        self.txn
            .get(&self.dbs.revtree, key, None)
            .map(|e| unsafe { FileId::from_unsafe(e) })
    }

    /// Get the key in branches for the given `Inode`, as well as
    /// meta-information on the file (permissions, and whether it has
    /// been moved or deleted compared to the branch).
    ///
    /// This table is updated every time the repository is output, and
    /// when files are moved or deleted. It is meant to be
    /// synchronised with the current branch (if any).
    pub fn get_inodes<'a>(&'a self, key: Inode) -> Option<FileHeader> {
        self.txn.get(&self.dbs.inodes, key, None)
    }

    /// Get the `Inode` corresponding to `key` in branches (see the
    /// documentation for `get_inodes`).
    pub fn get_revinodes(&self, key: Key<PatchId>) -> Option<Inode> {
        self.txn.get(&self.dbs.revinodes, key, None)
    }

    /// Get the contents of a line.
    pub fn get_contents<'a>(&'a self, key: Key<PatchId>) -> Option<Value<'a, U>> {
        if let Some(e) = self.txn.get(&self.dbs.contents, key, None) {
            unsafe { Some(Value::from_unsafe(&e, &self.txn)) }
        } else {
            None
        }
    }

    /// Get the `PatchId` (or internal patch identifier) of the
    /// provided patch hash.
    pub fn get_internal(&self, key: HashRef) -> Option<PatchId> {
        match key {
            HashRef::None => Some(ROOT_PATCH_ID),
            h => self.txn.get(&self.dbs.internal, h.to_unsafe(), None),
        }
    }

    /// Get the `HashRef` (external patch identifier) of the provided
    /// internal patch identifier.
    pub fn get_external<'a>(&'a self, key: PatchId) -> Option<HashRef<'a>> {
        self.txn
            .get(&self.dbs.external, key, None)
            .map(|e| unsafe { HashRef::from_unsafe(e) })
    }

    /// Get the patch number in the branch. Patch numbers are
    /// guaranteed to always increase when a new patch is applied, but
    /// are not necessarily consecutive.
    pub fn get_patch(&self, patch_set: &PatchSet, patchid: PatchId) -> Option<ApplyTimestamp> {
        self.txn.get(patch_set, patchid, None)
    }

    /// Get the smallest patch id that depends on `patch` (and is at
    /// least `dep` in alphabetical order if `dep`, is `Some`).
    pub fn get_revdep(&self, patch: PatchId, dep: Option<PatchId>) -> Option<PatchId> {
        self.txn.get(&self.dbs.revdep, patch, dep)
    }

    /// Get the smallest patch id that `patch` depends on (and is at
    /// least `dep` in alphabetical order if `dep`, is `Some`).
    pub fn get_dep(&self, patch: PatchId, dep: Option<PatchId>) -> Option<PatchId> {
        self.txn.get(&self.dbs.dep, patch, dep)
    }

    /// Dump the graph of a branch into a writer, in dot format.
    pub fn debug<W>(&self, branch_name: &str, w: &mut W, exclude_parents: bool)
    where
        W: std::io::Write,
    {
        debug!("debugging branch {:?}", branch_name);
        let mut styles = Vec::with_capacity(16);
        for i in 0..32 {
            let flag = EdgeFlags::from_bits(i as u8).unwrap();
            styles.push(
                ("color=").to_string()
                    + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                        ", style=dashed"
                    } else {
                        ""
                    }
                    + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                        ", style=dotted"
                    } else {
                        ""
                    },
            )
        }
        w.write(b"digraph{\n").unwrap();
        let branch = self.get_branch(branch_name).unwrap();

        let mut cur: Key<PatchId> = ROOT_KEY.clone();
        for (k, v) in self.iter_nodes(&branch, None) {
            if k != cur {
                let cont = if let Some(cont) = self.get_contents(k) {
                    let cont = cont.into_cow();
                    let cont = &cont[..std::cmp::min(50, cont.len())];
                    format!(
                        "{:?}",
                        match std::str::from_utf8(cont) {
                            Ok(x) => x.to_string(),
                            Err(_) => hex::encode(cont),
                        }
                    )
                } else {
                    "\"\"".to_string()
                };
                // remove the leading and trailing '"'.
                let cont = &cont[1..(cont.len() - 1)];
                write!(
                    w,
                    "n_{}[label=\"{}.{}: {}\"];\n",
                    k.to_hex(),
                    k.patch.to_base58(),
                    k.line.to_hex(),
                    cont.replace("\n", "")
                )
                .unwrap();
                cur = k.clone();
            }
            debug!("debug: {:?}", v);
            let flag = v.flag.bits();
            if !(exclude_parents && v.flag.contains(EdgeFlags::PARENT_EDGE)) {
                write!(
                    w,
                    "n_{}->n_{}[{},label=\"{} {}\"];\n",
                    k.to_hex(),
                    &v.dest.to_hex(),
                    styles[(flag & 0xff) as usize],
                    flag,
                    v.introduced_by.to_base58()
                )
                .unwrap();
            }
        }
        w.write(b"}\n").unwrap();
    }

    /// Dump the graph of a branch into a writer, in dot format.
    pub fn debug_folders<W>(&self, branch_name: &str, w: &mut W)
    where
        W: std::io::Write,
    {
        debug!("debugging branch {:?}", branch_name);
        let mut styles = Vec::with_capacity(16);
        for i in 0..32 {
            let flag = EdgeFlags::from_bits(i as u8).unwrap();
            styles.push(
                ("color=").to_string()
                    + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                        ", style=dashed"
                    } else {
                        ""
                    }
                    + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                        ", style=dotted"
                    } else {
                        ""
                    },
            )
        }
        w.write(b"digraph{\n").unwrap();
        let branch = self.get_branch(branch_name).unwrap();

        let mut nodes = vec![ROOT_KEY];
        while let Some(k) = nodes.pop() {
            let cont = if let Some(cont) = self.get_contents(k) {
                let cont = cont.into_cow();
                let cont = &cont[..std::cmp::min(50, cont.len())];
                if cont.len() > 2 {
                    let (a, b) = cont.split_at(2);
                    let cont = format!("{:?}", std::str::from_utf8(b).unwrap());
                    let cont = &cont[1..(cont.len() - 1)];
                    format!("{} {}", hex::encode(a), cont)
                } else {
                    format!("{}", hex::encode(cont))
                }
            } else {
                "".to_string()
            };
            // remove the leading and trailing '"'.
            write!(
                w,
                "n_{}[label=\"{}.{}: {}\"];\n",
                k.to_hex(),
                k.patch.to_base58(),
                k.line.to_hex(),
                cont.replace("\n", "")
            )
            .unwrap();

            for child in self.iter_adjacent(&branch, k, EdgeFlags::empty(), EdgeFlags::all()) {
                let flag = child.flag.bits();
                write!(
                    w,
                    "n_{}->n_{}[{},label=\"{} {}\"];\n",
                    k.to_hex(),
                    &child.dest.to_hex(),
                    styles[(flag & 0xff) as usize],
                    flag,
                    child.introduced_by.to_base58()
                )
                .unwrap();
                if child.flag.contains(EdgeFlags::FOLDER_EDGE)
                    && !child.flag.contains(EdgeFlags::PARENT_EDGE)
                {
                    nodes.push(child.dest)
                }
            }
        }
        w.write(b"}\n").unwrap();
    }

    /// Is there an alive/pseudo edge from `a` to `b`.
    pub fn is_connected(&self, branch: &Branch, a: Key<PatchId>, b: Key<PatchId>) -> bool {
        self.test_edge(
            branch,
            a,
            b,
            EdgeFlags::empty(),
            EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE,
        )
    }

    /// Is there an alive/pseudo edge from `a` to `b`.
    pub fn test_edge(
        &self,
        branch: &Branch,
        a: Key<PatchId>,
        b: Key<PatchId>,
        min: EdgeFlags,
        max: EdgeFlags,
    ) -> bool {
        debug!("is_connected {:?} {:?}", a, b);
        let mut edge = Edge::zero(min);
        edge.dest = b;
        self.iter_nodes(&branch, Some((a, Some(edge))))
            .take_while(|&(k, v)| k == a && v.dest == b && v.flag <= max)
            .next()
            .is_some()
    }
}

/// Low-level operations on mutable transactions.
impl<'env, R: rand::Rng> MutTxn<'env, R> {
    /// Delete a branch, destroying its associated graph and patch set.
    pub fn drop_branch(&mut self, branch: &str) -> Result<bool> {
        let name = SmallString::from_str(branch);
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.branches,
            name.as_small_str().to_unsafe(),
            None,
        )?)
    }

    /// Add a binding to the graph of a branch. All edges must be
    /// inserted twice, once in each direction, and this method only
    /// inserts one direction.
    pub fn put_edge_one_dir(
        &mut self,
        branch: &mut Branch,
        key: Key<PatchId>,
        edge: Edge,
    ) -> Result<bool> {
        debug!("put_nodes: {:?} {:?}", key, edge);
        Ok(self.txn.put(&mut self.rng, &mut branch.db, key, edge)?)
    }

    /// Same as `put_nodes`, but also adds the reverse edge.
    pub fn put_edge_both_dirs(
        &mut self,
        branch: &mut Branch,
        mut key: Key<PatchId>,
        mut edge: Edge,
    ) -> Result<bool> {
        self.put_edge_one_dir(branch, key, edge)?;
        std::mem::swap(&mut key, &mut edge.dest);
        edge.flag.toggle(EdgeFlags::PARENT_EDGE);
        self.put_edge_one_dir(branch, key, edge)
    }

    /// Delete an edge from a graph.
    pub fn del_edge_one_dir(
        &mut self,
        branch: &mut Branch,
        key: Key<PatchId>,
        edge: Edge,
    ) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut branch.db, key, Some(edge))?)
    }

    /// Same as `del_nodes`, but also deletes the reverse edge.
    pub fn del_edge_both_dirs(
        &mut self,
        branch: &mut Branch,
        mut key: Key<PatchId>,
        mut edge: Edge,
    ) -> Result<bool> {
        self.del_edge_one_dir(branch, key, edge)?;
        std::mem::swap(&mut key, &mut edge.dest);
        edge.flag.toggle(EdgeFlags::PARENT_EDGE);
        self.del_edge_one_dir(branch, key, edge)
    }

    /// Add a file or directory into the tree database, with parent
    /// `key.parent_inode`, name `key.basename` and inode `Inode`
    /// (usually randomly generated, as `Inode`s have no relation
    /// with patches or branches).
    ///
    /// All bindings inserted here must have the reverse inserted into
    /// the revtree database. If `(key, edge)` is inserted here, then
    /// `(edge, key)` must be inserted into revtree.
    pub fn put_tree(&mut self, key: &FileId, edge: Inode) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.tree, key.to_unsafe(), edge)?)
    }

    /// Delete a file or directory from the tree database. Similarly
    /// to the comments in the documentation of the `put_tree` method,
    /// the reverse binding must be delete from the revtree database.
    pub fn del_tree(&mut self, key: &FileId, edge: Option<Inode>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.tree, key.to_unsafe(), edge)?)
    }

    /// Add a file into the revtree database (see the documentation of
    /// the `put_tree` method).
    pub fn put_revtree(&mut self, key: Inode, value: &FileId) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.revtree, key, value.to_unsafe())?)
    }

    /// Delete a file from the revtree database (see the documentation
    /// of the `put_tree` method).
    pub fn del_revtree(&mut self, key: Inode, value: Option<&FileId>) -> Result<bool> {
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.revtree,
            key,
            value.map(|e| e.to_unsafe()),
        )?)
    }

    /// Delete a binding from the `inodes` database, i.e. the
    /// correspondence between branch graphs and the file tree.
    ///
    /// All bindings in inodes must have their reverse in revinodes
    /// (without the `FileMetadata`). `del_revinodes` must be called
    /// immediately before or immediately after calling this method.
    pub fn del_inodes(&mut self, key: Inode, value: Option<FileHeader>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.inodes, key, value)?)
    }

    /// Replace a binding in the inodes database, or insert a new
    /// one if `key` doesn't exist yet in that database.
    ///
    /// All bindings in inodes must have their reverse inserted in
    /// revinodes (without the `FileMetadata`).
    pub fn replace_inodes(&mut self, key: Inode, value: FileHeader) -> Result<bool> {
        self.txn
            .del(&mut self.rng, &mut self.dbs.inodes, key, None)?;
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.inodes, key, value)?)
    }

    /// Replace a binding in the revinodes database, or insert a new
    /// one if `key` doesnt exist yet in that database.
    ///
    /// All bindings in revinodes must have their reverse inserted
    /// inodes (with an extra `FileMetadata`).
    pub fn replace_revinodes(&mut self, key: Key<PatchId>, value: Inode) -> Result<bool> {
        self.txn
            .del(&mut self.rng, &mut self.dbs.revinodes, key, None)?;
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.revinodes, key, value)?)
    }

    /// Delete a binding from the `revinodes` database, i.e. the
    /// correspondence between the file tree and branch graphs.
    ///
    /// All bindings in revinodes must have their reverse in inodes
    /// (with an extra `FileMetadata`). `del_inodes` must be called
    /// immediately before or immediately after calling this method.
    pub fn del_revinodes(&mut self, key: Key<PatchId>, value: Option<Inode>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.revinodes, key, value)?)
    }

    /// Add the contents of a line. Note that this table is common to
    /// all branches.
    pub fn put_contents(&mut self, key: Key<PatchId>, value: UnsafeValue) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.contents, key, value)?)
    }

    /// Remove the contents of a line.
    pub fn del_contents(&mut self, key: Key<PatchId>, value: Option<UnsafeValue>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.contents, key, value)?)
    }

    /// Register the internal identifier of a patch. The
    /// `put_external` method must be called immediately after, or
    /// immediately before this method.
    pub fn put_internal(&mut self, key: HashRef, value: PatchId) -> Result<bool> {
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.internal,
            key.to_unsafe(),
            value,
        )?)
    }

    /// Unregister the internal identifier of a patch. Remember to
    /// also unregister its external id.
    pub fn del_internal(&mut self, key: HashRef) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.internal, key.to_unsafe(), None)?)
    }

    /// Register the extern identifier of a patch. The `put_internal`
    /// method must be called immediately after, or immediately before
    /// this method.
    pub fn put_external(&mut self, key: PatchId, value: HashRef) -> Result<bool> {
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.external,
            key,
            value.to_unsafe(),
        )?)
    }

    /// Unregister the extern identifier of a patch. Remember to also
    /// unregister its internal id.
    pub fn del_external(&mut self, key: PatchId) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.external, key, None)?)
    }

    /// Add a patch id to a branch. This doesn't apply the patch, it
    /// only registers it as applied. The `put_revpatches` method must be
    /// called on the same branch immediately before, or immediately
    /// after.
    pub fn put_patches(
        &mut self,
        branch: &mut PatchSet,
        value: PatchId,
        time: ApplyTimestamp,
    ) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng, branch, value, time)?)
    }

    /// Delete a patch id from a branch. This doesn't unrecord the
    /// patch, it only removes it from the patch set. The
    /// `del_revpatches` method must be called on the same branch
    /// immediately before, or immediately after.
    pub fn del_patches(&mut self, branch: &mut PatchSet, value: PatchId) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng, branch, value, None)?)
    }

    /// Add a patch id to a branch. This doesn't apply the patch, it
    /// only registers it as applied. The `put_patches` method must be
    /// called on the same branch immediately before, or immediately
    /// after.
    pub fn put_revpatches(
        &mut self,
        branch: &mut RevPatchSet,
        time: ApplyTimestamp,
        value: PatchId,
    ) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng, branch, time, value)?)
    }

    /// Delete a patch id from a branch. This doesn't unrecord the
    /// patch, it only removes it from the patch set. The
    /// `del_patches` method must be called on the same branch
    /// immediately before, or immediately after.
    pub fn del_revpatches(
        &mut self,
        revbranch: &mut RevPatchSet,
        timestamp: ApplyTimestamp,
        value: PatchId,
    ) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, revbranch, timestamp, Some(value))?)
    }

    /// Register a reverse dependency. All dependencies of all patches
    /// applied on at least one branch must be registered in this
    /// database, i.e. if a depends on b, then `(b, a)` must be
    /// inserted here.
    pub fn put_revdep(&mut self, patch: PatchId, revdep: PatchId) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.revdep, patch, revdep)?)
    }

    /// Register a dependency. All dependencies of all patches applied
    /// on at least one branch must be registered in this database,
    /// i.e. if a depends on b, then `(b, a)` must be inserted here.
    pub fn put_dep(&mut self, patch: PatchId, dep: PatchId) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng, &mut self.dbs.dep, patch, dep)?)
    }

    /// Remove a reverse dependency. Only call this method when the
    /// patch with identifier `patch` is not applied to any branch.
    pub fn del_revdep(&mut self, patch: PatchId, revdep: Option<PatchId>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.revdep, patch, revdep)?)
    }

    /// Remove a dependency. Only call this method when the patch with
    /// identifier `patch` is not applied to any branch.
    pub fn del_dep(&mut self, patch: PatchId, dep: Option<PatchId>) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng, &mut self.dbs.dep, patch, dep)?)
    }

    /// Add an edge to the cemetery.
    pub fn put_cemetery(&mut self, key: Key<PatchId>, edge: Edge, patch: PatchId) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.cemetery, (key, edge), patch)?)
    }

    /// Delete an edge from the cemetery.
    pub fn del_cemetery(&mut self, key: Key<PatchId>, edge: Edge, patch: PatchId) -> Result<bool> {
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.cemetery,
            (key, edge),
            Some(patch),
        )?)
    }

    /// Add the relation "patch `patch` touches file `file`".
    pub fn put_touched_file(&mut self, file: Key<PatchId>, patch: PatchId) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.touched_files, file, patch)?)
    }

    /// Delete all mentions of `patch` in the table of touched files.
    pub fn del_touched_file(&mut self, file: Key<PatchId>, patch: PatchId) -> Result<bool> {
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.touched_files,
            file,
            Some(patch),
        )?)
    }

    /// Add a partial path to a branch.
    pub fn put_partials(&mut self, name: &str, path: Key<PatchId>) -> Result<bool> {
        let name = small_string::SmallString::from_str(name);
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.partials,
            name.as_small_str().to_unsafe(),
            path,
        )?)
    }

    /// Remove a partial path from a branch.
    pub fn del_partials(&mut self, name: &str) -> Result<bool> {
        let name = small_string::SmallString::from_str(name);
        let mut deleted = false;
        while self.txn.del(
            &mut self.rng,
            &mut self.dbs.partials,
            name.as_small_str().to_unsafe(),
            None,
        )? {
            deleted = true
        }
        Ok(deleted)
    }



















1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
    pub fn prefix_keys(&self, branch: &Branch, path: &str) -> Result<Vec<Key<PatchId>>> {
        for comp in path.split(std::path::MAIN_SEPARATOR) {

            for (_, child) in self.iter_nodes(branch, Some((current_key, Some(&e))))
                .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE)) {

                    let contents = self.get_contents(child.dest).unwrap();
                    if contents.into_cow().split_at(2).1 == comp.as_bytes() {

                        if !is_first {
                            return Err(ErrorKind::FileNameCount(current_key).into())
                        }

                        for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(&e))))
                            .take_while(|&(k, _)| k == child.dest)
                            .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE)) {
                                result.push(grandchild.dest);
                                current_key = grandchild.dest;
                            }
//! Manipulating the internal representation of files and directories
//! tracked by Pijul (i.e. adding files, removing files, getting file
//! names…).

use backend;
use backend::*;
use {Error, Result};
use fs_representation::{RepoPath, in_repo_root};

use rand;
use std;
use std::collections::BTreeMap;
use std::iter::Iterator;
use std::path::{Path, PathBuf};

impl<'env, R: rand::Rng> MutTxn<'env, R> {
    pub fn mark_inode_moved(&mut self, inode: Inode) {
        let mut header = None;
        if let Some(h) = self.get_inodes(inode) {
            header = Some(h.clone())
        }
        if let Some(mut h) = header {
            h.status = FileStatus::Moved;
            self.replace_inodes(inode, h).unwrap();
        }
    }

    /// Create an inode that doesn't exist in the repository, but
    /// doesn't put it into the repository.
    pub fn create_new_inode(&self) -> Inode {
        let mut already_taken = true;
        let mut inode: Inode = ROOT_INODE.clone();
        while already_taken {
            for i in inode.iter_mut() {
                *i = rand::random()
            }
            already_taken = self.get_revtree(inode).is_some();
        }
        inode
    }

    /// Record the information that `parent_inode` is now a parent of
    /// file `filename`, and `filename` has inode `child_inode`.
    fn make_new_child(
        &mut self,
        parent_inode: Inode,
        filename: &str,
        is_dir: bool,
        child_inode: Option<Inode>,
    ) -> Result<Inode> {
        let parent_id = OwnedFileId {
            parent_inode: parent_inode.clone(),
            basename: SmallString::from_str(filename),
        };
        if filename == ".pijul" {
            return Err(Error::CannotAddDotPijul);
        }
        if let Some(inode) = self.get_tree(&parent_id.as_file_id()) {
            // If we already have the file, make sure the file status
            // is Ok (i.e. not zombie, not deleted).
            let mut header = if let Some(header) = self.get_inodes(inode) {
                header.to_owned()
            } else {
                return Err(Error::AlreadyAdded);
            };
            if let FileStatus::Ok = header.status {
            } else {
                header.status = FileStatus::Ok;
                self.replace_inodes(inode, header)?;
            }
            Ok(inode)
        } else {
            // Else, add a new file.

            let child_inode = match child_inode {
                None => self.create_new_inode(),
                Some(i) => i.clone(),
            };
            self.put_tree(&parent_id.as_file_id(), child_inode)?;
            self.put_revtree(child_inode, &parent_id.as_file_id())?;

            if is_dir {
                // If this new file is a directory, add a name-less
                // file id without a reverse in revtree.
                let dir_id = OwnedFileId {
                    parent_inode: child_inode.clone(),
                    basename: SmallString::from_str(""),
                };
                self.put_tree(&dir_id.as_file_id(), child_inode)?;
            };
            Ok(child_inode)
        }
    }

    pub fn add_inode<P: AsRef<Path>>(
        &mut self,
        inode: Option<Inode>,
        path: &RepoPath<P>,
        is_dir: bool,
    ) -> Result<()> {
        if let Some(parent) = path.parent() {
            let (mut current_inode, unrecorded_path) =
                self.closest_in_repo_ancestor(&parent).unwrap();

            for c in unrecorded_path {
                current_inode =
                    self.make_new_child(current_inode, c.as_os_str().to_str().unwrap(), true, None)?
            }

            self.make_new_child(
                current_inode,
                path.file_name().unwrap().to_str().unwrap(),
                is_dir,
                inode,
            )?;
        }
        Ok(())
    }

    pub fn inode_is_ancestor_of(&self, a: Inode, mut b: Inode) -> bool {
        loop {
            if a == b {
                return true;
            }
            if let Some(b_parent) = self.get_revtree(b) {
                b = b_parent.parent_inode
            } else {
                return false;
            }
        }
    }

    pub fn move_file(
        &mut self,
        origin: &RepoPath<impl AsRef<Path>>,
        destination: &RepoPath<impl AsRef<Path>>,
        is_dir: bool,
    ) -> Result<()> {
        debug!("move_file: {},{}", origin.display(), destination.display());
        if let Some(parent) = origin.parent() {
            let fileref = OwnedFileId {
                parent_inode: self.find_inode(&parent)?,
                basename: SmallString::from_str(origin.file_name().unwrap().to_str().unwrap()),
            };

            if let Some(inode) = self.get_tree(&fileref.as_file_id()).map(|i| i.clone()) {
                // Now the last inode is in "*inode"
                debug!("txn.del fileref={:?}", fileref);
                self.del_tree(&fileref.as_file_id(), None)?;
                self.del_revtree(inode, None)?;

                debug!("inode={} destination={}", inode.to_hex(), destination.display());
                self.add_inode(Some(inode), destination, is_dir)?;
                self.mark_inode_moved(inode);

                return Ok(());
            }
        }
        Err(Error::FileNotInRepo(origin.to_path_buf()))
    }

    // Deletes a directory, given by its inode, recursively.
    pub fn rec_delete(&mut self, key: Inode) -> Result<bool> {
        debug!("rec_delete, key={:?}", key.to_hex());
        let file_id = OwnedFileId {
            parent_inode: key.clone(),
            basename: SmallString::from_str(""),
        };

        let children: Vec<(_, Inode)> = self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| key == k.parent_inode)
            .filter(|&(ref k, _)| !k.basename.is_empty())
            .map(|(k, v)| (k.to_owned(), v.to_owned()))
            .collect();

        let mut has_recorded_descendants = false;
        for (_, b) in children {
            debug!("deleting from tree {:?}", b);
            has_recorded_descendants |= self.rec_delete(b)?;
        }

        // Now that the directory is empty, mark the corresponding node as deleted (flag '2').
        if let Some(mut header) = self.get_inodes(key).map(|h| h.clone()) {
            // If this is was recorded, mark deleted.
            debug!("key {:?}, header = {:?}", key, header);
            header.status = FileStatus::Deleted;
            self.replace_inodes(key, header)?;
            debug!("after = {:?}", self.get_inodes(key).map(|h| h.clone()));
        } else if !has_recorded_descendants {
            // Else, simply delete from the tree.
            let parent = self.get_revtree(key).unwrap().to_owned();
            debug!("key = {:?}, parent = {:?}", key, parent);
            self.del_tree(&parent.as_file_id(), None)?;
            self.del_revtree(key, None)?;
        }
        Ok(has_recorded_descendants)
    }

    /// Removes a file from the repository.
    pub fn remove_file(&mut self, path: &RepoPath<impl AsRef<Path>>)
                       -> Result<()> {
        debug!("remove_file");
        let inode = self.find_inode(path)?;
        debug!("rec_delete");
        self.rec_delete(inode)?;
        debug!("/rec_delete");
        Ok(())
    }
}

impl<A: Transaction, R> backend::GenericTxn<A, R> {
    /// Traverses the `tree` base recursively, collecting all descendants of `key`.
    fn collect(
        &self,
        key: Inode,
        current_path: &RepoPath<impl AsRef<Path>>,
        basename: &str,
        files: &mut Vec<RepoPath<PathBuf>>,
    ) -> Result<()> {
        debug!("collecting {:?},{:?}", key, basename);
        let add = match self.get_inodes(key) {
            Some(inode) => {
                debug!("node = {:?}", inode);
                inode.status != FileStatus::Deleted
            }
            None => true,
        };
        if add {
            debug!("basename = {:?}", basename);
            
            let next_pb = current_path.join(Path::new(basename));
            if basename.len() > 0 {
                files.push(next_pb.clone())
            }

            debug!("starting iterator, key={:?}", key);
            let fileid = OwnedFileId {
                parent_inode: key.clone(),
                basename: SmallString::from_str(""),
            };
            for (k, v) in self
                .iter_tree(Some((&fileid.as_file_id(), None)))
                .take_while(|&(ref k, _)| k.parent_inode == key)
            {
                debug!("iter: {:?} {:?}", k, v);
                if k.basename.len() > 0 {
                    self.collect(v.to_owned(), &next_pb, k.basename.as_str(), files)?;
                }
            }
            debug!("ending iterator {:?}", {
                let v: Vec<_> = self.iter_tree(Some((&fileid.as_file_id(), None))).collect();
                v
            });
        }
        Ok(())
    }

    /// Returns a vector containing all files in the repository.
    pub fn list_files(&self, inode: Inode) -> Result<Vec<RepoPath<PathBuf>>> {
        debug!("list_files {:?}", inode);
        let mut files = Vec::new();
        self.collect(inode, &in_repo_root(), "", &mut files)?;
        Ok(files)
    }

    /// Returns a list of files under the given inode.
    pub fn list_files_under_inode(
        &self,
        inode: Inode,
    ) -> Vec<(SmallString, Option<Key<PatchId>>, Inode)> {
        let mut result = Vec::new();

        let file_id = OwnedFileId {
            parent_inode: inode,
            basename: SmallString::from_str(""),
        };
        for (k, v) in self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| k.parent_inode == inode)
        {
            let header = self.get_inodes(k.parent_inode).map(|x| x.clone());
            // add: checking that this file has neither been moved nor deleted.
            println!("============= {:?} {:?}", k, v);
            let add = match header {
                Some(ref h) => h.status == FileStatus::Ok,
                None => true,
            };
            if add && k.basename.len() > 0 {
                result.push((
                    k.basename.to_owned(),
                    header.map(|h| h.key.clone()),
                    v.clone(),
                ))
            }
        }

        result
    }

    /// Returns a list of files under the given inode.
    pub fn list_files_under_node(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> BTreeMap<Key<PatchId>, Vec<(FileMetadata, &str)>> {
        let mut result = BTreeMap::new();

        let e = Edge::zero(EdgeFlags::FOLDER_EDGE);
        for (_, child) in self
            .iter_nodes(branch, Some((key, Some(e))))
            .take_while(|&(k, ref v)| {
                k == key && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
            })
        {
            let name = self.get_contents(child.dest).unwrap();
            // This is supposed to be a small string anyway.
            let (perms, basename) = name.as_slice().split_at(2);
            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();

            for (_, grandchild) in self
                .iter_nodes(branch, Some((child.dest, Some(e))))
                .take_while(|&(k, ref v)| {
                    k == child.dest && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
                })
            {
                let names = result.entry(grandchild.dest.to_owned()).or_insert(vec![]);
                names.push((perms, basename))
            }
        }
        result
    }

    pub fn is_directory(&self, inode: &Inode) -> bool {
        let file_id = OwnedFileId {
            parent_inode: inode.clone(),
            basename: SmallString::from_str(""),
        };
        inode == &ROOT_INODE || self.get_tree(&file_id.as_file_id()).is_some()
    }

    /// Splits a path into (1) the deepest inode from the root that is
    /// an ancestor of the path or the path itself and (2) the
    /// remainder of this path
    fn closest_in_repo_ancestor<'a>(
        &self,
        path: &'a RepoPath<impl AsRef<Path>>
    ) -> Result<(Inode, std::iter::Peekable<std::path::Components<'a>>)> {
        let mut components = path.components().peekable();
        let mut fileid = OwnedFileId {
            parent_inode: ROOT_INODE,
            basename: SmallString::from_str(""),
        };

        loop {
            if let Some(c) = components.peek() {
                fileid.basename = SmallString::from_str(c.as_os_str().to_str().unwrap());
                if let Some(v) = self.get_tree(&fileid.as_file_id()) {
                    fileid.parent_inode = v.clone()
                } else {
                    break;
                }
            } else {
                break;
            }
            components.next();
        }
        Ok((fileid.parent_inode.clone(), components))
    }

    /// Find the inode corresponding to that path, or return an error if there's no such inode.
    pub fn find_inode(&self, path: &RepoPath<impl AsRef<Path>>)
                      -> Result<Inode> {
        let (inode, mut remaining_path_components) = self.closest_in_repo_ancestor(path)?;
        if remaining_path_components.next().is_none() {
            Ok(inode)
        } else {
            Err(Error::FileNotInRepo(path.to_path_buf()))
        }
    }

    pub fn file_names(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, FileMetadata, &str)> {
        let mut result = Vec::new();
        let e = Edge::zero(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE);

        debug!("file_names, key {:?}", key);
        for (_, parent) in self
            .iter_nodes(branch, Some((key, Some(e))))
            .take_while(|&(k, _)| k == key)
            .filter(|&(_, ref v)| {
                v.flag
                    .contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
            })
        {
            debug!("file_names, parent {:?}", parent);
            match self.get_contents(parent.dest) {
                Some(ref name) if name.len() >= 2 => {
                    // This is supposed to be a small string anyway.
                    let (perms, basename) = name.as_slice().split_at(2);
                    let perms = FileMetadata::from_contents(perms);
                    let basename = std::str::from_utf8(basename).unwrap();

                    for (_, grandparent) in self
                        .iter_nodes(branch, Some((parent.dest, Some(e))))
                        .take_while(|&(k, _)| k == parent.dest)
                        .filter(|&(_, ref v)| {
                            v.flag
                                .contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
                        })
                    {
                        result.push((grandparent.dest.to_owned(), perms, basename));
                        break;
                    }
                }
                _ => error!("Key: {:?}, file {}, line {}", key, file!(), line!()),
            }
        }
        result
    }

    pub fn prefix_keys(&self, branch: &Branch, path: &RepoPath<impl AsRef<Path>>)
                       -> Result<Vec<Key<PatchId>>> {
        let mut result = Vec::new();
        let e = Edge::zero(EdgeFlags::FOLDER_EDGE);

        let mut current_key = ROOT_KEY;

        for comp in path.components() {
            let mut is_first = true;
            let cur = current_key;
            for (_, child) in self
                .iter_nodes(branch, Some((current_key, Some(e))))
                .take_while(|&(k, _)| k == cur)
                .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE))
            {
                let contents = self.get_contents(child.dest).unwrap();
                if contents.into_cow().split_at(2).1
                    == comp.as_os_str().to_str().expect("file encoding problem").as_bytes() {
                    if !is_first {
                        return Err(Error::FileNameCount(current_key));
                    }

                    for (_, grandchild) in self
                        .iter_nodes(branch, Some((child.dest, Some(e))))
                        .take_while(|&(k, _)| k == child.dest)
                        .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE))
                    {
                        result.push(grandchild.dest);
                        current_key = grandchild.dest;
                    }
                }
            }
        }
        Ok(result)
    }

    /// `name` must be the key for the name of the file.
    pub fn file_paths(&self, branch: &Branch, name: Key<PatchId>) -> Vec<Vec<Key<PatchId>>> {
        let mut stack: Vec<(Key<PatchId>, Option<Vec<Key<PatchId>>>)> = vec![(name, None)];
        let mut result = Vec::new();
        while let Some((name, parents)) = stack.pop() {
            debug!("file_paths: {:?} {:?}", name, parents);
            if name.is_root() {
                result.push(stack.iter().map(|(name, _)| *name).collect());
            } else if let Some(mut parents) = parents {
                // already visited, push the next parent.
                if let Some(p) = parents.pop() {
                    stack.push((name, Some(parents)));
                    stack.push((p, None))
                }
            } else {
                let mut parents = Vec::new();
                let e = Edge::zero(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE);
                for (_, parent) in self
                    .iter_nodes(branch, Some((name, Some(e))))
                    .take_while(|&(k, _)| k == name)
                    .filter(|&(_, ref v)| {
                        v.flag.contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
                    })
                {
                    for (_, grand_parent) in self
                        .iter_nodes(branch, Some((parent.dest, Some(e))))
                        .take_while(|&(k, _)| k == parent.dest)
                        .filter(|&(_, ref v)| {
                            v.flag.contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
                        })
                    {
                        parents.push(grand_parent.dest)
                    }
                }
                if parents.is_empty() {
                    parents.push(ROOT_KEY)
                }
                stack.push((name, Some(parents)))
            }
        }
        result
    }
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235


238


241
242

244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

525
526
    partial_path: Option<&str>,
//! This crate contains the core API to access Pijul repositories.
//!
//! The key object is a `Repository`, on which `Txn` (immutable
//! transactions) and `MutTxn` (mutable transactions) can be started,
//! to perform a variety of operations.
//!
//! Another important object is a `Patch`, which encodes two different pieces of information:
//!
//! - Information about deleted and inserted lines between two versions of a file.
//!
//! - Information about file moves, additions and deletions.
//!
//! The standard layout of a repository is defined in module
//! `fs_representation`, and mainly consists of a directory called
//! `.pijul` at the root of the repository, containing:
//!
//! - a directory called `pristine`, containing a Sanakirja database
//! storing most of the repository information.
//!
//! - a directory called `patches`, actually containing the patches,
//! where each patch is a gzipped compression of the bincode encoding
//! of the `patch::Patch` type.
//!
//! At the moment, users of this library, such as the Pijul
//! command-line tool, may use other files in the `.pijul` directory,
//! such as user preferences, or information about remote branches and
//! repositories.
#![recursion_limit = "128"]
#[macro_use]
extern crate bitflags;
extern crate chrono;
#[macro_use]
extern crate log;

extern crate base64;
extern crate bincode;
extern crate bs58;
extern crate byteorder;
extern crate flate2;
extern crate hex;
extern crate ignore;
extern crate openssl;
extern crate rand;
extern crate sanakirja;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate diffs;
extern crate failure;
extern crate sequoia_openpgp;
extern crate serde_json;
extern crate tempdir;
extern crate toml;

pub use sanakirja::Transaction;
use std::collections::HashSet;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;

#[derive(Debug)]
pub enum Error {
    IO(std::io::Error),
    Sanakirja(sanakirja::Error),
    Bincode(bincode::Error),
    Utf8(std::str::Utf8Error),
    Serde(serde_json::Error),
    OpenSSL(openssl::error::Error),
    OpenSSLStack(openssl::error::ErrorStack),
    Base58Decode(bs58::decode::DecodeError),
    Failure(failure::Error),
    AlreadyAdded,
    FileNotInRepo(PathBuf),
    NoDb(backend::Root),
    WrongHash,
    EOF,
    WrongPatchSignature,
    BranchNameAlreadyExists(String),
    WrongFileHeader(Key<PatchId>),
    FileNameCount(Key<PatchId>),
    MissingDependency(Hash),
    PatchNotOnBranch(PatchId),
    CannotAddDotPijul,
    KeyIsEncrypted,
}

impl std::convert::From<std::io::Error> for Error {
    fn from(e: std::io::Error) -> Self {
        Error::IO(e)
    }
}

impl std::convert::From<failure::Error> for Error {
    fn from(e: failure::Error) -> Self {
        Error::Failure(e)
    }
}

impl std::convert::From<sanakirja::Error> for Error {
    fn from(e: sanakirja::Error) -> Self {
        Error::Sanakirja(e)
    }
}

impl std::convert::From<bincode::Error> for Error {
    fn from(e: bincode::Error) -> Self {
        Error::Bincode(e)
    }
}

impl std::convert::From<serde_json::Error> for Error {
    fn from(e: serde_json::Error) -> Self {
        Error::Serde(e)
    }
}

impl std::convert::From<std::str::Utf8Error> for Error {
    fn from(e: std::str::Utf8Error) -> Self {
        Error::Utf8(e)
    }
}

impl std::convert::From<openssl::error::ErrorStack> for Error {
    fn from(e: openssl::error::ErrorStack) -> Self {
        Error::OpenSSLStack(e)
    }
}

impl std::convert::From<bs58::decode::DecodeError> for Error {
    fn from(e: bs58::decode::DecodeError) -> Self {
        Error::Base58Decode(e)
    }
}

pub type Result<A> = std::result::Result<A, Error>;

impl std::fmt::Display for Error {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        match *self {
            Error::IO(ref e) => e.fmt(fmt),
            Error::Sanakirja(ref e) => e.fmt(fmt),
            Error::Bincode(ref e) => e.fmt(fmt),
            Error::Utf8(ref e) => e.fmt(fmt),
            Error::Serde(ref e) => e.fmt(fmt),
            Error::OpenSSL(ref e) => e.fmt(fmt),
            Error::OpenSSLStack(ref e) => e.fmt(fmt),
            Error::Base58Decode(ref e) => e.fmt(fmt),
            Error::Failure(ref e) => e.fmt(fmt),
            Error::AlreadyAdded => write!(fmt, "Already added"),
            Error::FileNotInRepo(ref file) => write!(fmt, "File {:?} not tracked", file),
            Error::NoDb(ref e) => write!(fmt, "Table missing: {:?}", e),
            Error::WrongHash => write!(fmt, "Wrong hash"),
            Error::EOF => write!(fmt, "EOF"),
            Error::WrongPatchSignature => write!(fmt, "Wrong patch signature"),
            Error::BranchNameAlreadyExists(ref name) => {
                write!(fmt, "Branch {:?} already exists", name)
            }
            Error::WrongFileHeader(ref h) => write!(
                fmt,
                "Wrong file header (possible branch corruption): {:?}",
                h
            ),
            Error::FileNameCount(ref f) => {
                write!(fmt, "Name {:?} doesn't have exactly one child", f)
            }
            Error::MissingDependency(ref f) => write!(fmt, "Missing dependency: {:?}", f),
            Error::PatchNotOnBranch(ref f) => {
                write!(fmt, "The patch is not on this branch {:?}", f)
            }
            Error::CannotAddDotPijul => write!(fmt, "Cannot add a file or directory name .pijul"),
            Error::KeyIsEncrypted => write!(fmt, "Key is encrypted"),
        }
    }
}

impl std::error::Error for Error {
    fn description(&self) -> &str {
        match *self {
            Error::IO(ref e) => e.description(),
            Error::Sanakirja(ref e) => e.description(),
            Error::Bincode(ref e) => e.description(),
            Error::Utf8(ref e) => e.description(),
            Error::Serde(ref e) => e.description(),
            Error::OpenSSL(ref e) => e.description(),
            Error::OpenSSLStack(ref e) => e.description(),
            Error::Base58Decode(ref e) => e.description(),
            Error::Failure(ref e) => e.name().unwrap_or("Unknown Failure"),
            Error::AlreadyAdded => "Already added",
            Error::FileNotInRepo(_) => "File not tracked",
            Error::NoDb(_) => "One of the tables is missing",
            Error::WrongHash => "Wrong hash",
            Error::EOF => "EOF",
            Error::WrongPatchSignature => "Wrong patch signature",
            Error::BranchNameAlreadyExists(_) => "Branch name already exists",
            Error::WrongFileHeader(_) => "Wrong file header (possible branch corruption)",
            Error::FileNameCount(_) => "A file name doesn't have exactly one child",
            Error::MissingDependency(_) => "Missing dependency",
            Error::PatchNotOnBranch(_) => "The patch is not on this branch",
            Error::CannotAddDotPijul => "Cannot add a file or directory name .pijul",
            Error::KeyIsEncrypted => "Key is encrypted",
        }
    }
}

impl Error {
    pub fn lacks_space(&self) -> bool {
        match *self {
            Error::Sanakirja(sanakirja::Error::NotEnoughSpace) => true,
            _ => false,
        }
    }
}

#[macro_use]
mod backend;
mod file_operations;
pub mod fs_representation;

pub mod patch;
pub mod status;

pub mod apply;
mod conflict;
mod diff;
pub mod graph;
mod output;
mod record;
mod unrecord;

pub use backend::{
    ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus, GenericTxn, Hash,
    HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId, Repository, SmallStr, SmallString,
    Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY,
};


>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::{RepoRoot, ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
pub use fs_representation::{RepoRoot, RepoPath};
use fs_representation::{ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
pub use output::{ConflictingFile, Prefixes, ToPrefixes};
pub use patch::{Patch, PatchHeader};
use rand::distributions::Alphanumeric;
use rand::Rng;
pub use record::{InodeUpdate, RecordState};
pub use sanakirja::value::Value;
use std::io::Read;

pub use diff::Algorithm as DiffAlgorithm;

impl<'env, T: rand::Rng> backend::MutTxn<'env, T> {
    pub fn output_changes_file<P: AsRef<Path>>(
        &mut self,
        branch: &Branch,
        fs_repo: &RepoRoot<P>,
    ) -> Result<()> {
        let changes_file = fs_repo.branch_changes_file(branch.name.as_str());
        let mut branch_id: Vec<u8> = vec![b'\n'; ID_LENGTH + 1];
        {
            if let Ok(mut file) = std::fs::File::open(&changes_file) {
                file.read_exact(&mut branch_id)?;
            }
        }
        let mut branch_id = if let Ok(s) = String::from_utf8(branch_id) {
            s
        } else {
            "\n".to_string()
        };
        if branch_id.as_bytes()[0] == b'\n' {
            branch_id.truncate(0);
            let mut rng = rand::thread_rng();
            branch_id.extend(rng.sample_iter(&Alphanumeric).take(ID_LENGTH));
            branch_id.push('\n');
        }

        let mut file = std::fs::File::create(&changes_file)?;
        file.write_all(&branch_id.as_bytes())?;
        for (s, hash) in self.iter_applied(&branch, None) {
            let hash_ext = self.get_external(hash).unwrap();
            writeln!(file, "{}:{}", hash_ext.to_base58(), s)?
        }
        Ok(())
    }

    pub fn branch_patches(&mut self, branch: &Branch) -> HashSet<(backend::Hash, ApplyTimestamp)> {
        self.iter_patches(branch, None)
            .map(|(patch, time)| (self.external_hash(patch).to_owned(), time))
            .collect()
    }

    pub fn fork(&mut self, branch: &Branch, new_name: &str) -> Result<Branch> {
        if branch.name.as_str() == new_name {
            Err(Error::BranchNameAlreadyExists(new_name.to_string()))
        } else {
            Ok(Branch {
                db: self.txn.fork(&mut self.rng, &branch.db)?,
                patches: self.txn.fork(&mut self.rng, &branch.patches)?,
                revpatches: self.txn.fork(&mut self.rng, &branch.revpatches)?,
                name: SmallString::from_str(new_name),
                apply_counter: branch.apply_counter,
            })
        }
    }
    
    pub fn add_file<P: AsRef<Path>>(&mut self, path: &RepoPath<P>, is_dir: bool) -> Result<()> {
        self.add_inode(None, path, is_dir)
    }

    fn file_nodes_fold_<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        root: Key<PatchId>,
        level: usize,
        mut init: A,
        f: &mut F,
    ) -> Result<A> {
        for v in self
            .iter_adjacent(&branch, root, EdgeFlags::empty(), EdgeFlags::all())
            .take_while(|v| {
                v.flag.contains(EdgeFlags::FOLDER_EDGE) && !v.flag.contains(EdgeFlags::PARENT_EDGE)
            })
        {
            debug!("file_nodes_fold_: {:?} {:?}", root, v);
            if level & 1 == 0 && level > 0 {
                init = f(init, root)
            }
            init = self.file_nodes_fold_(branch, v.dest, level + 1, init, f)?
        }
        Ok(init)
    }

    pub fn file_nodes_fold<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        init: A,
        mut f: F,
    ) -> Result<A> {
        self.file_nodes_fold_(branch, ROOT_KEY, 0, init, &mut f)
    }
}

impl<T: Transaction, R> backend::GenericTxn<T, R> {
    /// Tells whether a `key` is alive in `branch`, i.e. is either the
    /// root, or all its ingoing edges are alive.
    pub fn is_alive(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive {:?}?", key);
        let mut alive = key.is_root();
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            alive = alive
                || (!v.flag.contains(EdgeFlags::DELETED_EDGE)
                    && !v.flag.contains(EdgeFlags::PSEUDO_EDGE))
        }
        alive
    }

    /// Tells whether a `key` is alive or zombie in `branch`, i.e. is
    /// either the root, or has at least one of its incoming alive
    /// edge is alive.
    pub fn is_alive_or_zombie(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive_or_zombie {:?}?", key);
        if key == ROOT_KEY {
            return true;
        }
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            debug!("{:?}", v);
            if v.flag.contains(EdgeFlags::PARENT_EDGE) && !v.flag.contains(EdgeFlags::DELETED_EDGE)
            {
                return true;
            }
        }
        false
    }

    /// Test whether `key` has a neighbor with flag `flag0`. If
    /// `include_pseudo`, this includes pseudo-neighbors.
    pub fn has_edge(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        min: EdgeFlags,
        max: EdgeFlags,
    ) -> bool {
        let e = Edge::zero(min);
        if let Some((k, v)) = self.iter_nodes(&branch, Some((key, Some(e)))).next() {
            debug!("has_edge {:?}", v.flag);
            k == key && (v.flag <= max)
        } else {
            false
        }
    }

    /// Tells which paths (of folder nodes) a key is in.
    pub fn get_file<'a>(&'a self, branch: &Branch, key: Key<PatchId>) -> Vec<Key<PatchId>> {
        let mut stack = vec![key.to_owned()];
        let mut seen = HashSet::new();
        let mut names = Vec::new();
        loop {
            match stack.pop() {
                None => break,
                Some(key) if !seen.contains(&key) => {
                    debug!("key {:?}, None", key);
                    seen.insert(key.clone());

                    for v in self.iter_adjacent(branch, key, EdgeFlags::empty(), EdgeFlags::all()) {
                        debug!("all_edges: {:?}", v);
                    }
                    for v in
                        self.iter_adjacent(branch, key, EdgeFlags::PARENT_EDGE, EdgeFlags::all())
                    {
                        debug!("get_file {:?}", v);
                        if v.flag | EdgeFlags::PSEUDO_EDGE
                            == EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE
                        {
                            debug!("push!");
                            stack.push(v.dest.clone())
                        } else if v
                            .flag
                            .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE)
                        {
                            names.push(key);
                        }
                    }
                }
                _ => {}
            }
        }
        debug!("get_file returning {:?}", names);
        names
    }

    pub fn get_file_names<'a>(
        &'a self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, Vec<&'a str>)> {
        let mut names = vec![(key, Vec::new())];
        debug!("inode: {:?}", names);
        // Go back to the root.
        let mut next_names = Vec::new();
        let mut only_roots = false;
        let mut inodes = HashSet::new();
        while !only_roots {
            next_names.clear();
            only_roots = true;
            for (inode, names) in names.drain(..) {
                if !inodes.contains(&inode) {
                    inodes.insert(inode.clone());

                    if inode != ROOT_KEY {
                        only_roots = false;
                    }
                    let names_ = self.file_names(branch, inode);
                    if names_.is_empty() {
                        next_names.push((inode, names));
                        break;
                    } else {
                        debug!("names_ = {:?}", names_);
                        for (inode_, _, base) in names_ {
                            let mut names = names.clone();
                            names.push(base);
                            next_names.push((inode_, names))
                        }
                    }
                }
            }
            std::mem::swap(&mut names, &mut next_names)
        }
        debug!("end: {:?}", names);
        for &mut (_, ref mut name) in names.iter_mut() {
            name.reverse()
        }
        names
    }
}

fn make_remote<'a, I: Iterator<Item = &'a Hash>>(
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    remote: I,
) -> Result<(Vec<(Hash, Patch)>, usize)> {
    use fs_representation::*;
    use std::fs::File;
    use std::io::BufReader;
    let mut patches = Vec::new();
    let mut patches_dir = target.patches_dir();
    let mut size_increase = 0;

    for h in remote {
        patches_dir.push(&patch_file_name(h.as_ref()));

        debug!("opening {:?}", patches_dir);
        let file = try!(File::open(&patches_dir));
        let mut file = BufReader::new(file);
        let (h, _, patch) = Patch::from_reader_compressed(&mut file)?;

        size_increase += patch.size_upper_bound();
        patches.push((h.clone(), patch));

        patches_dir.pop();
    }
    Ok((patches, size_increase))
}

/// Apply a number of patches, guessing the new repository size.  If
/// this fails, the repository size is guaranteed to have been
/// increased by at least some pages, and it is safe to call this
/// function again.
///
/// Also, this function takes a file lock on the repository.
pub fn apply_resize<'a, I, F, P: output::ToPrefixes>(
    diff_algorithm: diff::Algorithm,
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    remote: I,
    partial_path: Option<&Path>,
    partial_paths: P,
    apply_cb: F,













































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59


















60
61
62
63



















64
65
66
67
68
69
70
71
72
73
74
75

76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145

146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181

182
183
184
185
186
187
188
189
190
191
192
193
194

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217

218
219
220
221
222
223

225
226
227


230


233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251

252
253
254

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380

381
382
383
384
385
386

387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409

410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

437
438

439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541

542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563

564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585

586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
fn is_related(prefixes: &[Vec<Key<PatchId>>], key: Key<PatchId>) -> Related {
    if prefixes.is_empty() {
        return Related::Exact
    for pref in prefixes {
                    return Related::Exact
                    return Related::Ancestor
        prefixes: &[Vec<Key<PatchId>>],
        prefixes: &[&str],

        let prefixes = prefixes.iter().flat_map(|pref| self.prefix_keys(&branch, pref)).collect::<Vec<_>>();

        prefixes: &[Vec<Key<PatchId>>],
                            if !prefixes.is_empty() {
                                self.put_partials(branch.name.as_small_str(), output_item.key)?;
                            }
                                &[],
                        if !prefixes.is_empty() {
                            self.put_partials(branch.name.as_small_str(), output_item.key)?;
                        }
        prefixes: &[Vec<Key<PatchId>>],
    pub fn output_repository<P:ToPrefixes>(
        let prefixes = prefixes.to_prefixes(&self, &branch);
        prefixes: &[&str],
        let prefixes = prefixes.iter().flat_map(|pref| self.prefix_keys(&branch, pref)).collect::<Vec<_>>();
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, branch: &Branch) -> Vec<Vec<Key<PatchId>>>;
impl<'a> ToPrefixes for &'a [&'a str] {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, branch: &Branch) -> Vec<Vec<Key<PatchId>>> {
        self.iter().flat_map(|pref| txn.prefix_keys(&branch, pref)).collect()
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, _: &Branch) -> Vec<Vec<Key<PatchId>>> {
        self.iter().map(|pref| {
            let mut result = Vec::new();
            let mut current = *pref;
            loop {
                if current == ROOT_INODE {
                    result.push(ROOT_KEY);
                    break;
                }
                result.push(txn.get_inodes(current).unwrap().key);
                match txn.get_revtree(current) {
                    Some(v) => current = v.parent_inode.clone(),
                    None => break,
                }
            }
            result
        }).collect()
use backend::*;
use graph;
use patch::*;
use rand;
use record::InodeUpdate;
use std;
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
use tempdir;
use {Error, Result};

use super::fs_representation::{RepoRoot, RepoPath, in_repo_root};

#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;

#[cfg(not(windows))]
fn set_permissions(name: &Path, permissions: u16) -> Result<()> {
    let metadata = std::fs::metadata(&name)?;
    let mut current = metadata.permissions();
    debug!(
        "setting mode for {:?} to {:?} (currently {:?})",
        name, permissions, current
    );
    current.set_mode(permissions as u32);
    std::fs::set_permissions(name, current)?;
    Ok(())
}

#[cfg(windows)]
fn set_permissions(_name: &Path, _permissions: u16) -> Result<()> {
    Ok(())
}

#[derive(Debug)]
struct OutputItem {
    parent: Inode,
    meta: FileMetadata,
    key: Key<PatchId>,
    inode: Option<Inode>,
    is_zombie: bool,
    related: Related,
}

#[derive(Debug, PartialEq, Eq)]
pub enum Related {
    No,
    Ancestor,
    Exact,
}

pub struct ConflictingFile {
    pub inode: Inode,
    pub n_conflicts: usize,
    pub path: RepoPath<PathBuf>,
}

pub trait Prefix {
    fn is_related(&self, inode: Option<Inode>, path: &Path) -> Related;
}

impl<'a> Prefix for &'a [&'a Path] {
    fn is_related(&self, _: Option<Inode>, path: &Path) -> Related {
        if self.is_empty() {
            Related::Exact
        } else {
            for &pref in self.iter() {
                if pref == path {
                    return Related::Exact;
                } else if pref.starts_with(path) {
                    return Related::Ancestor;
                }
            }
            Related::No
        }
fn is_related(prefixes: &Prefixes, key: Key<PatchId>) -> Related {
    if prefixes.0.is_empty() {
        return Related::Exact;
    }
}

impl<'a> Prefix for &'a [Vec<Inode>] {
    fn is_related(&self, inode: Option<Inode>, _: &Path) -> Related {
        if self.is_empty() {
            return Related::Exact
        } else if let Some(inode) = inode {
            debug!("is_related {:?} {:?}", self, inode);
            for pref in self.iter() {
                let mut first = true;
                for ancestor in pref.iter() {
                    if inode == *ancestor {
                        if first {
                            return Related::Exact;
                        } else {
                            return Related::Ancestor;
                        }
                    }
                    first = false
    for pref in prefixes.0.iter() {
        let mut is_first = true;
        for &p in pref {
            if p == key {
                if is_first {
                    return Related::Exact;
                } else {
                    return Related::Ancestor;
                }
            }
            is_first = false
        }
        Related::No
    }
    Related::No
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    // Climb up the tree (using revtree).
    fn filename_of_inode(&self, inode: Inode, working_copy: &Path) -> Option<PathBuf> {
        let mut components = Vec::new();
        let mut current = inode;
        loop {
            match self.get_revtree(current) {
                Some(v) => {
                    components.push(v.basename.to_owned());
                    current = v.parent_inode.clone();
                    if current == ROOT_INODE {
                        break;
                    }
                }
                None => {
                    debug!("filename_of_inode: not in tree");
                    return None;
                }
            }
        }
        let mut working_copy = working_copy.to_path_buf();
        for c in components.iter().rev() {
            working_copy.push(c.as_small_str().as_str());
        }
        Some(working_copy)
    }

    /// Collect all the children of key `key` into `files`.
    fn collect_children<P: Prefix + Copy>(
    fn collect_children(
        &mut self,
        branch: &Branch,
        path: RepoPath<&Path>,
        key: Key<PatchId>,
        inode: Inode,
        base_path: &RepoPath<impl AsRef<Path> + std::fmt::Debug>,
        prefixes: &Prefixes,
        files: &mut HashMap<RepoPath<PathBuf>, HashMap<Key<PatchId>, OutputItem>>,
    ) -> Result<()> {
        debug!("collect_children {:?}", base_path);
        let f = EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE;
        for b in self.iter_adjacent(&branch, key, EdgeFlags::empty(), f) {
            debug!("b={:?}", b);
            let cont_b = self.get_contents(b.dest).unwrap();
            let (_, b_key) = self
                .iter_nodes(
                    &branch,
                    Some((b.dest, Some(Edge::zero(EdgeFlags::FOLDER_EDGE)))),
                )
                .next()
                .unwrap();
            let b_inode = self.get_revinodes(b_key.dest);

            // This is supposed to be a small string, so we can do
            // as_slice.
            if cont_b.as_slice().len() < 2 {
                error!("cont_b {:?} b.dest {:?}", cont_b, b.dest);
                return Err(Error::WrongFileHeader(b.dest));
            }
            let (perms, basename) = cont_b.as_slice().split_at(2);

            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();
            debug!("filename: {:?} {:?}", perms, basename);
            let name = path.join(Path::new(basename));
            let related = is_related(&prefixes, b_key.dest);
            debug!("related {:?} = {:?}", base_path, related);
            base_path.pop();
            if related != Related::No {
                let v = files.entry(name).or_insert(HashMap::new());
                if v.get(&b.dest).is_none() {
                    let is_zombie = {
                        let f = EdgeFlags::FOLDER_EDGE
                            | EdgeFlags::PARENT_EDGE
                            | EdgeFlags::DELETED_EDGE;
                        self.iter_adjacent(&branch, b_key.dest, f, f)
                            .next()
                            .is_some()
                    };
                    debug!("is_zombie = {:?}", is_zombie);
                    v.insert(
                        b.dest,
                        OutputItem {
                            parent: inode,
                            meta: perms,
                            key: b_key.dest,
                            inode: b_inode,
                            is_zombie,
                            related,
                        },
                    );
                }
            }
        }
        Ok(())
    }

    /// Collect names of files with conflicts
    ///
    /// As conflicts have an internal representation, it can be determined
    /// exactly which files contain conflicts.
    pub fn list_conflict_files(
        &mut self,
        branch_name: &str,
        prefixes: &[&Path],
        prefixes: &[RepoPath<&Path>],
    ) -> Result<Vec<RepoPath<PathBuf>>> {
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let branch = self.open_branch(branch_name)?;
        let mut base_path = in_repo_root();
        let prefixes = prefixes.to_prefixes(self, &branch);
        self.collect_children(
            &branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            prefixes,
            &prefixes,
            &mut files,
        )?;

        let mut ret = vec![];
        let mut forward = Vec::new();
        while !files.is_empty() {
            next_files.clear();
            for (a, b) in files.drain() {
                for (_, output_item) in b {
                    // (_, meta, inode_key, inode, is_zombie)
                    // Only bother with existing files
                    if let Some(inode) = output_item.inode {
                        if output_item.is_zombie {
                            ret.push(a.clone())
                        }
                        if output_item.meta.is_dir() {
                            self.collect_children(
                                &branch,
                                a.as_ref(),
                                output_item.key,
                                inode,
                                &mut base_path,
                                prefixes,
                                &prefixes,
                                &mut next_files,
                            )?;
                        } else {
                            let mut graph = self.retrieve(&branch, output_item.key);
                            let mut buf = graph::Writer::new(std::io::sink());

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            let n_conflicts =
                                self.output_file(&branch, &mut buf, &mut graph, &mut forward)?;
                            if n_conflicts > 0 {

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            if self.output_file(&branch, &mut buf, &mut graph, &mut forward)? {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                                ret.push(a.clone())
                            }
                        }
                    }
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(ret)
    }

    fn make_conflicting_name(&self, name: &mut RepoPath<PathBuf>, name_key: Key<PatchId>) {
        let basename = {
            let basename = name.file_name().unwrap().to_string_lossy();
            format!("{}.{}", basename, &name_key.patch.to_base58())
        };
        name.set_file_name(std::ffi::OsStr::new(&basename));
    }

    fn output_alive_files<P: Prefix + Copy>(
    fn output_alive_files(
        &mut self,
        branch: &mut Branch,
        prefixes: P,
        prefixes: &Prefixes,
        working_copy: &Path,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!("working copy {:?}", working_copy);
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let mut base_path = RepoPath(PathBuf::new());
        self.collect_children(
            branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            prefixes,
            &mut files,
        )?;

        let mut done = HashSet::new();
        while !files.is_empty() {
            debug!("files {:?}", files);
            next_files.clear();
            for (a, b) in files.drain() {
                let b_len = b.len();
                for (name_key, output_item) in b {
                    // (parent_inode, meta, inode_key, inode, is_zombie)
                    /*let has_several_names = {
                        let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
                        let mut it = self.iter_nodes(branch, Some((inode_key, Some(&e))))
                            .take_while(|&(k, v)| {
                                k == inode_key && v.flag|EdgeFlags::PSEUDO_EDGE == e.flag|EdgeFlags::PSEUDO_EDGE
                            });
                        it.next();
                        it.next().is_some()
                    };*/
                    if !done.insert(output_item.key) {
                        debug!("already done {:?}", output_item.key);
                        continue;
                    }

                    let name = if b_len > 1
                    /*|| has_several_names*/
                    {
                        // debug!("b_len = {:?}, has_several_names {:?}", b_len, has_several_names);
                        let mut name = a.clone();
                        self.make_conflicting_name(&mut name, name_key);
                        Cow::Owned(name.0)
                    } else {
                        Cow::Borrowed(a.as_path())
                    };
                    let file_name = name.file_name().unwrap().to_string_lossy();
                    base_path.push(&file_name);
                    let file_id = OwnedFileId {
                        parent_inode: output_item.parent,
                        basename: SmallString::from_str(&file_name),
                    };
                    let working_copy_name = working_copy.join(name.as_ref());

                    let status = if output_item.is_zombie {
                        FileStatus::Zombie
                    } else {
                        FileStatus::Ok
                    };

                    let inode = if let Some(inode) = output_item.inode {
                        // If the file already exists, find its
                        // current name and rename it if that name
                        // is different.
                        if let Some(ref current_name) = self.filename_of_inode(inode, "".as_ref()) {
                            if current_name != name.as_ref() {
                                let current_name = working_copy.join(current_name);
                                debug!("renaming {:?} to {:?}", current_name, working_copy_name);
                                let parent = self.get_revtree(inode).unwrap().to_owned();
                                self.del_revtree(inode, None)?;
                                self.del_tree(&parent.as_file_id(), None)?;

                                debug!("file_id: {:?}", file_id);
                                if let Some(p) = working_copy_name.parent() {
                                    std::fs::create_dir_all(p)?
                                }
                                if let Err(e) = std::fs::rename(&current_name, &working_copy_name) {
                                    error!(
                                        "while renaming {:?} to {:?}: {:?}",
                                        current_name, working_copy_name, e
                                    )
                                }
                            }
                        }
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        // If the file had been marked for deletion, remove that mark.
                        if let Some(header) = self.get_inodes(inode) {
                            debug!("header {:?}", header);
                            let mut header = header.to_owned();
                            header.status = status;
                            self.replace_inodes(inode, header)?;
                        } else {
                            let header = FileHeader {
                                key: output_item.key,
                                metadata: output_item.meta,
                                status,
                            };
                            debug!("no header {:?}", header);
                            self.replace_inodes(inode, header)?;
                            self.replace_revinodes(output_item.key, inode)?;
                        }
                        inode
                    } else {
                        // Else, create new inode.
                        let inode = self.create_new_inode();
                        let file_header = FileHeader {
                            key: output_item.key,
                            metadata: output_item.meta,
                            status,
                        };
                        self.replace_inodes(inode, file_header)?;
                        self.replace_revinodes(output_item.key, inode)?;
                        debug!("file_id: {:?}", file_id);
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        inode
                    };
                    if output_item.meta.is_dir() {
                        // This is a directory, register it in inodes/trees.
                        std::fs::create_dir_all(&working_copy_name)?;
                        if let Related::Exact = output_item.related {
                            self.collect_children::<&[&Path]>(
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &[][..],
                                &Prefixes(Vec::new()),
                                &mut next_files,
                            )?
                        } else {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                prefixes,
                                &mut next_files,
                            )?
                        }
                    } else {
                        // Output file.
                        info!(
                            "creating file {:?}, key {:?} {:?}",
                            &name, output_item.key, working_copy_name
                        );
                        let mut f =
                            graph::Writer::new(std::fs::File::create(&working_copy_name).unwrap());
                        debug!("done");

                        let mut l = self.retrieve(branch, output_item.key);
                        if log_enabled!(log::Level::Debug) {
                            let mut w = working_copy_name.clone();
                            w.set_extension("pijul_debug");
                            let f = std::fs::File::create(&w)?;
                            l.debug(self, branch, false, false, f)?;
                        }
                        let mut forward = Vec::new();
                        let n_conflicts = self.output_file(branch, &mut f, &mut l, &mut forward)?;
                        if n_conflicts > 0 {
                            conflicts.push(ConflictingFile {
                                inode,
                                n_conflicts,
                                path: RepoPath(name.to_path_buf()),
                            })
                        }
                        self.remove_redundant_edges(branch, &forward)?
                    }
                    base_path.pop();
                    set_permissions(&working_copy_name, output_item.meta.permissions())?
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(())
    }

    fn output_repository_assuming_no_pending_patch<P: Prefix + Copy>(
    fn output_repository_assuming_no_pending_patch(
        &mut self,
        prefixes: P,
        prefixes: &Prefixes,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        pending_patch_id: PatchId,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!(
            "inodes: {:?}",
            self.iter_inodes(None)
                .map(|(u, v)| (u.to_owned(), v.to_owned()))
                .collect::<Vec<_>>()
        );
        // Now, garbage collect dead inodes.
        let dead: Vec<_> = self
            .iter_tree(None)
            .filter_map(|(k, v)| {
                debug!("{:?} {:?}", k, v);
                if let Some(key) = self.get_inodes(v) {
                    if key.key.patch == pending_patch_id || self.is_alive_or_zombie(branch, key.key)
                    {
                        // Don't delete.
                        None
                    } else {
                        Some((
                            k.to_owned(),
                            v,
                            self.filename_of_inode(v, working_copy.repo_root.as_ref()),
                        ))
                    }
                } else {
                    debug!("not in inodes");
                    Some((k.to_owned(), v, None))
                }
            })
            .collect();
        debug!("dead: {:?}", dead);

        // Now, "kill the deads"
        for (ref parent, inode, ref name) in dead {
            self.remove_inode_rec(inode)?;
            debug!("removed");
            if let Some(ref name) = *name {
                debug!("deleting {:?}", name);
                if let Ok(meta) = fs::metadata(name) {
                    if let Err(e) = if meta.is_dir() {
                        fs::remove_dir_all(name)
                    } else {
                        fs::remove_file(name)
                    } {
                        error!("while deleting {:?}: {:?}", name, e);
                    }
                }
            } else {
                self.del_tree(&parent.as_file_id(), Some(inode))?;
                self.del_revtree(inode, Some(&parent.as_file_id()))?;
            }
        }
        debug!("done deleting dead files");
        // Then output alive files. This has to be done *after*
        // removing files, because we a file removed might have the
        // same name as a file added without there being a conflict
        // (depending on the relation between the two patches).
        self.output_alive_files(branch, prefixes, working_copy.repo_root.as_ref(), conflicts)?;
        debug!("done raw_output_repository");
        Ok(())
    }

    fn remove_inode_rec(&mut self, inode: Inode) -> Result<()> {
        // Remove the inode from inodes/revinodes.
        let mut to_kill = vec![inode];
        while let Some(inode) = to_kill.pop() {
            debug!("kill dead {:?}", inode.to_hex());
            let header = self.get_inodes(inode).map(|x| x.to_owned());
            if let Some(header) = header {
                self.del_inodes(inode, None)?;
                self.del_revinodes(header.key, None)?;
                let mut kills = Vec::new();
                // Remove the inode from tree/revtree.
                for (k, v) in self
                    .iter_revtree(Some((inode, None)))
                    .take_while(|&(k, _)| k == inode)
                {
                    kills.push((k.clone(), v.to_owned()))
                }
                for &(k, ref v) in kills.iter() {
                    self.del_tree(&v.as_file_id(), Some(k))?;
                    self.del_revtree(k, Some(&v.as_file_id()))?;
                }
                // If the dead is a directory, remove its descendants.
                let inode_fileid = OwnedFileId {
                    parent_inode: inode.clone(),
                    basename: SmallString::from_str(""),
                };
                to_kill.extend(
                    self.iter_tree(Some((&inode_fileid.as_file_id(), None)))
                        .take_while(|&(ref k, _)| k.parent_inode == inode)
                        .map(|(_, v)| v.to_owned()),
                )
            }
        }
        Ok(())
    }

    pub fn output_repository<P: Prefix + Copy>(
    pub fn output_repository(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefixes: &Prefixes,
        pending: &Patch,
        local_pending: &HashSet<InodeUpdate>,
    ) -> Result<Vec<ConflictingFile>> {
        debug!("begin output repository");

        debug!("applying pending patch");
        let tempdir = tempdir::TempDir::new("pijul")?;
        let hash = pending.save(tempdir.path(), None)?;
        let internal =
            self.apply_local_patch(branch, working_copy, &hash, pending, local_pending, true)?;

        debug!("applied as {:?}", internal.to_base58());

        // let prefixes = prefixes.to_prefixes(&self, &branch);
        debug!("prefixes {:?}", prefixes);
        let mut conflicts = Vec::new();
        self.output_repository_assuming_no_pending_patch(
            prefixes,
            &prefixes,
            branch,
            working_copy,
            internal,
            &mut conflicts,
        )?;

        debug!("unrecording pending patch");
        self.unrecord(branch, internal, pending)?;
        Ok(conflicts)
    }

    pub fn output_repository_no_pending(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefixes: &Prefixes,
    ) -> Result<Vec<ConflictingFile>> {
        debug!("begin output repository {:?}", prefixes);
        debug!("prefixes {:?}", prefixes);
        let mut conflicts = Vec::new();
        self.output_repository_assuming_no_pending_patch(
            prefixes,
            &prefixes,
            branch,
            working_copy,
            ROOT_PATCH_ID,
            &mut conflicts,
        )?;
        Ok(conflicts)
    }

    pub(crate) fn output_partials(&mut self, branch_name: &str, prefixes: &Prefixes) -> Result<()> {
        for p in prefixes.0.iter() {
            self.put_partials(branch_name, p[0])?;
        }
        Ok(())
    }
}

#[derive(Debug)]
pub struct Prefixes(Vec<Vec<Key<PatchId>>>);

impl Prefixes {
    pub fn empty() -> Self {
        Prefixes(Vec::new())
    }
}

pub trait ToPrefixes {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, branch: &Branch) -> Prefixes;
}

impl<'a, P> ToPrefixes for &'a [RepoPath<P>] where P: AsRef<Path>+'a {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, branch: &Branch) -> Prefixes {
        Prefixes(
            self.iter()
                .flat_map(|pref| txn.prefix_keys(&branch, pref))
                .collect(),
        )
    }
}

impl<'a> ToPrefixes for &'a [Inode] {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, _: &Branch) -> Prefixes {
        Prefixes(
            self.iter()
                .map(|pref| {
                    let mut result = Vec::new();
                    let mut current = *pref;
                    loop {
                        if current == ROOT_INODE {
                            result.push(ROOT_KEY);
                            break;
                        }
                        result.push(txn.get_inodes(current).unwrap().key);
                        match txn.get_revtree(current) {
                            Some(v) => current = v.parent_inode.clone(),
                            None => break,
                        }
                    }
                    result
                })
                .collect(),
        )
    }

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

96
97
line = "0.1"
[package]
name = "pijul"
description = "A patch-based distributed version control system, easy to use and fast. Command-line interface."
version = "0.12.0"
authors = ["Pierre-Étienne Meunier","Florent Becker"]
license = "GPL-2.0+"
homepage = "https://pijul.org/"
repository = "https://nest.pijul.com/pijul_org/pijul"

include = [
  "Cargo.toml",
  "Cargo.lock",
  "COPYING",
  "src/meta.rs",
  "src/error.rs",
  "src/commands/add.rs",
  "src/commands/apply.rs",
  "src/commands/ask.rs",
  "src/commands/branches.rs",
  "src/commands/checkout.rs",
  "src/commands/clone.rs",
  "src/commands/credit.rs",
  "src/commands/dependencies.rs",
  "src/commands/diff.rs",
  "src/commands/dist.rs",
  "src/commands/fold_until.rs",
  "src/commands/fork.rs",
  "src/commands/fs_operation.rs",
  "src/commands/generate_completions.rs",
  "src/commands/grep.rs",
  "src/commands/hooks.rs",
  "src/commands/info.rs",
  "src/commands/init.rs",
  "src/commands/key.rs",
  "src/commands/log.rs",
  "src/commands/ls.rs",
  "src/commands/mod.rs",
  "src/commands/mv.rs",
  "src/commands/patch.rs",
  "src/commands/prune.rs",
  "src/commands/pull.rs",
  "src/commands/push.rs",
  "src/commands/record.rs",
  "src/commands/remote.rs",
  "src/commands/remove.rs",
  "src/commands/revert.rs",
  "src/commands/rollback.rs",
  "src/commands/sign.rs",
  "src/commands/ssh_auth_attempts.rs",
  "src/commands/status.rs",
  "src/commands/tag.rs",
  "src/commands/unrecord.rs",
  "src/cli.rs",
  "src/main.rs"
]

[dependencies]
clap = "2.32"
regex = "1.1"
rand = "0.6"
log = "0.4"
chrono = "0.4"
env_logger = "0.6"

futures = "0.1"
tokio = "0.1"
thrussh = "0.20"
thrussh-config = "0.1"
thrussh-keys = "0.11.0"

reqwest = "0.9"

toml = "0.4"
serde = "1.0"
serde_derive = "1.0"
bs58 = "0.2"
hex = "0.3"
term = "0.5"

getch = "0.2"
username = "0.2"
shell-escape = "0.1"
libpijul = "0.12.0"

tar = "0.4"
flate2 = "1.0"
ignore = "0.4"
bincode = "1.1"
tempfile = "3.0"
atty = "0.2"
progrs = "0.1"

base64 = "0.10"
cryptovec = "0.4"

line = "0.1.3"
line = "0.1.15"
rpassword = "2.0"



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110


111
112
113
114
115
116
117
118
119
120

            &[partial][..],
        txn.output_repository(
            &[][..] as &[&str],
use clap::{Arg, ArgMatches, SubCommand};

use super::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{in_repo_root, RepoPath};
use libpijul::patch::UnsignedPatch;
use libpijul::{FileStatus, RecordState, ToPrefixes};
use rand;
use std::collections::HashSet;
use std::path::Path;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("checkout")
        .about("Change the current branch")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Local repository.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .help("Branch to switch to.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("path")
                .long("path")
                .help("Partial path to check out.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("force")
                .short("f")
                .long("force")
                .takes_value(false)
                .help("Only verify that there are no unrecorded files moves, deletions or additions (ignores unrecorded changes in files). Much faster on large repositories."),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    if let Some(branch) = args.value_of("branch") {
        checkout(
            &opts,
            branch,
            args.is_present("force"),
            args.value_of("path").map(|p| RepoPath(Path::new(p))),
        )
    } else {
        Err(Error::NoSuchBranch)
    }
}

pub fn checkout(
    opts: &BasicOptions,
    branch: &str,
    force: bool,
    partial_path: Option<RepoPath<&Path>>,
) -> Result<(), Error> {
    let mut force = force;
    let mut provision = 409600;

    loop {
        match try_checkout(opts, branch, force, provision, partial_path) {
            Err(ref e) if e.lacks_space() => {
                provision = provision * 2;
                force = true;
            }
            x => return x,
        }
    }
}

pub fn try_checkout(
    opts: &BasicOptions,
    branch_name: &str,
    force: bool,
    provision: u64,
    partial_path: Option<RepoPath<&Path>>,
) -> Result<(), Error> {
    let repo = opts.open_and_grow_repo(provision)?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let current_branch = opts.repo_root.get_current_branch()?;
    // We need to check at least that there are no file
    // moves/additions/deletions, because these would be
    // overwritten by the checkout, sometimes causing Pijul to
    // panic.
    if force {
        // Check whether there are file moves.
        if txn
            .iter_inodes(None)
            .any(|(_, ch)| ch.status != FileStatus::Ok)
        {
            return Err(Error::PendingChanges);
        }
    } else {
        // Check whether there are more general changes.
        let mut record = RecordState::new();
        let current_branch = txn.open_branch(&current_branch)?;
        txn.record(
            libpijul::DiffAlgorithm::default(),
            &mut record,
            &current_branch,
            &opts.repo_root,
            &in_repo_root(),
        )?;
        txn.commit_branch(current_branch)?;
        let (changes, _) = record.finish();

                &branch,
                &UnsignedPatch::empty().leave_unsigned(),
        if !changes.is_empty() {
            return Err(Error::PendingChanges);
        }
    }

    debug!("output repository");

    let mut branch = if let Some(branch) = txn.get_branch(branch_name) {
        branch
    } else {
        txn.output_repository::<&[&Path]>(

1
2
3
4
5
6
7
8
9
10

                    session.pull(path, args.to_branch, &mut pullable, args.from_path, true)?;
use clap::{Arg, ArgMatches, SubCommand};
use commands::remote::{parse_remote, Remote};
use commands::{assert_no_containing_repo, create_repo, default_explain, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::{Hash, DEFAULT_BRANCH};
use regex::Regex;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::exit;
use std::path::Path;

        Some(partial)



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105


108


111


114

116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396

397
398
399
400
401

pub fn changes_from_prefixes<T: rand::Rng, P:AsRef<Path>>(
    prefix: Option<&Vec<P>>,
                Some(prefix.as_ref()),
use super::ask::{ask_changes, ChangesDirection};
use super::default_explain;
use chrono;
use clap::{Arg, ArgMatches, SubCommand};
use commands::hooks::run_hook;
use commands::{ask, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul;
use libpijul::fs_representation::{in_repo_root, RepoPath, RepoRoot};
use libpijul::patch::{PatchFlags, Record};
use libpijul::{Hash, InodeUpdate, Key, MutTxn, Patch, PatchId, RecordState, Repository};
use meta::{load_signing_key, Global, Meta};
use rand;
use std::collections::HashSet;
use std::fs::canonicalize;
use std::fs::{metadata, OpenOptions};
use std::io::Write;
use std::mem::drop;
use std::path::{Path, PathBuf};
use std::str::FromStr;

pub fn record_args(sub: StaticSubcommand) -> StaticSubcommand {
    sub.arg(Arg::with_name("repository")
            .long("repository")
            .help("The repository where to record, defaults to the current directory.")
            .takes_value(true)
            .required(false))
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("The branch where to record, defaults to the current branch.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("date")
             .long("date")
             .help("The date to use to record the patch, default is now.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("message")
             .short("m")
             .long("message")
             .help("The name of the patch to record")
             .takes_value(true))
        .arg(Arg::with_name("description")
             .short("d")
             .long("description")
             .help("The description of the patch to record")
             .takes_value(true))
        .arg(Arg::with_name("no-editor")
             .long("no-editor")
             .help("Do not use an editor to write the patch name and description, even if the variable is set in the configuration file")
             .takes_value(false))
        .arg(Arg::with_name("author")
             .short("A")
             .long("author")
             .help("Author of this patch (multiple occurrences allowed)")
             .takes_value(true))
        .arg(Arg::with_name("patience")
             .long("patience")
             .help("Use patience diff instead of the default (Myers diff)")
             .conflicts_with("myers")
             .takes_value(false))
        .arg(Arg::with_name("myers")
             .long("myers")
             .help("Use Myers diff")
             .conflicts_with("patience")
             .takes_value(false))
}

pub fn invocation() -> StaticSubcommand {
    return record_args(
        SubCommand::with_name("record")
            .about("Record changes in the repository")
            .arg(
                Arg::with_name("all")
                    .short("a")
                    .long("all")
                    .help("Answer 'y' to all questions")
                    .takes_value(false),
            )
            .arg(
                Arg::with_name("add-new-files")
                    .short("n")
                    .long("add-new-files")
                    .help("Offer to add files that have been created since the last record")
                    .takes_value(false),
            )
            .arg(
                Arg::with_name("depends-on")
                    .help("Add a dependency to this patch (internal id or hash accepted)")
                    .long("depends-on")
                    .takes_value(true)
                    .multiple(true),
            )
            .arg(
                Arg::with_name("prefix")
                    .help("Prefix to start from")
                    .takes_value(true)
                    .multiple(true),
            ),
    );
}

fn add_untracked_files<T: rand::Rng, P: AsRef<Path> + 'static>(
    txn: &mut MutTxn<T>,
    repo_root: &RepoRoot<P>,

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        if let Err(e) = txn.add_file(&file, m.is_dir()) {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    Ok(untracked)
}

fn append_to_ignore_file(
    repo_root: &RepoRoot<impl AsRef<Path>>,
    lines: &Vec<String>,
) -> Result<(), Error> {
    let ignore_file = repo_root.local_ignore_file();
    let mut file = OpenOptions::new()
        .append(true)
        .create(true)
        .open(ignore_file)?;
    for line in lines {
        file.write_all(line.as_ref())?;
        file.write_all(b"\n")?
    }
    Ok(())
}

fn select_changes(
    algo: libpijul::DiffAlgorithm,
    opts: &BasicOptions,
    add_new_files: bool,
    branch_name: &str,
    yes_to_all: bool,
    prefix: Option<Vec<RepoPath<PathBuf>>>,
) -> Result<(Vec<Record<Vec<Key<Option<Hash>>>>>, HashSet<InodeUpdate>), Error> {
    // Increase by 100 pages. The most things record can write is one
    // write in the branches table, affecting at most O(log n) blocks.
    let repo = opts.open_and_grow_repo(409600)?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let mut to_unadd = if add_new_files {
        add_untracked_files(&mut txn, &opts.repo_root)?
    } else {
        HashSet::new()
    };
    let (changes, syncs) = changes_from_prefixes(
        algo,
        &opts.repo_root,
        &mut txn,
        &branch_name,
        prefix.as_ref(),
    )?;
    let changes: Vec<_> = changes
        .into_iter()
        .map(|x| txn.globalize_record(x))
        .collect();
    if !yes_to_all {
        let (c, i) = ask_changes(
            &txn,
            &opts.repo_root,
            &opts.cwd,
            &changes,
            ChangesDirection::Record,
            &mut to_unadd,
        )?;
        let selected = changes
            .into_iter()
            .enumerate()
            .filter(|&(i, _)| *(c.get(&i).unwrap_or(&false)))
            .map(|(_, x)| x)
            .collect();
        for file in to_unadd {
            txn.remove_file(&file)?
        }
        txn.commit()?;
        append_to_ignore_file(&opts.repo_root, &i)?;
        Ok((selected, syncs))
    } else {
        txn.commit()?;
        Ok((changes, syncs))
    }
}

pub fn run(args: &ArgMatches) -> Result<Option<Hash>, Error> {
    let opts = BasicOptions::from_args(args)?;
    let yes_to_all = args.is_present("all");
    let patch_name_arg = args.value_of("message");
    let patch_descr_arg = args.value_of("description");
    let authors_arg = args.values_of("author").map(|x| x.collect::<Vec<_>>());
    let branch_name = opts.branch();
    let add_new_files = args.is_present("add-new-files");

    let patch_date = args.value_of("date").map_or(Ok(chrono::Utc::now()), |x| {
        chrono::DateTime::from_str(x).map_err(|_| Error::InvalidDate {
            date: String::from(x),
        })
    })?;

    let mut save_meta = false;

    let (mut global, save_global) = Global::load().map(|c| (c, false)).unwrap_or_else(|e| {
        info!("loading global key, error {:?}", e);
        (Global::new(), true)
    });

    let mut meta = match Meta::load(&opts.repo_root) {
        Ok(m) => m,
        Err(_) => {
            save_meta = true;
            Meta::new()
        }
    };

    run_hook(&opts.repo_root, "pre-record", None)?;

    debug!("prefix {:?}", args.value_of("prefix"));
    let prefix = prefix(args, &opts)?;

    let (changes, syncs) = select_changes(
        if args.is_present("patience") {
            libpijul::DiffAlgorithm::Patience
        } else {
            libpijul::DiffAlgorithm::Myers
        },
        &opts,
        add_new_files,
        &branch_name,
        yes_to_all,
        prefix,
    )?;

    if changes.is_empty() {
        println!("Nothing to record");
        Ok(None)
    } else {
        let template = prepare_changes_template(patch_name_arg.unwrap_or(""), &changes);

        let repo = opts.open_repo()?;
        let patch = {
            let txn = repo.txn_begin()?;
            debug!("meta:{:?}", meta);

            let authors = decide_authors(authors_arg, &meta, &global)?;

            if authors.is_empty() {
                return Err(Error::NoAuthor);
            }

            if meta.authors.is_empty() {
                meta.authors = authors.clone();
                save_meta = true;
            }

            if global.author.is_none() {
                global.author = Some(authors[0].clone());
            }

            debug!("authors:{:?}", authors);

            let (patch_name, description) = decide_patch_message(
                patch_name_arg,
                patch_descr_arg,
                template,
                !args.is_present("no-editor"),
                &opts.repo_root,
                &meta,
                &global,
            )?;

            run_hook(&opts.repo_root, "patch-name", Some(&patch_name))?;

            debug!("patch_name:{:?}", patch_name);
            if save_meta {
                meta.save(&opts.repo_root)?
            }
            if save_global {
                global.save().unwrap_or(())
            }
            debug!("new");
            let changes = changes.into_iter().flat_map(|x| x.into_iter()).collect();
            let branch = txn.get_branch(&branch_name).unwrap();

            let mut extra_deps = Vec::new();
            if let Some(deps) = args.values_of("depends-on") {
                for dep in deps {
                    if let Some(hash) = Hash::from_base58(dep) {
                        if let Some(internal) = txn.get_internal(hash.as_ref()) {
                            if txn.get_patch(&branch.patches, internal).is_some() {
                                extra_deps.push(hash)
                            } else {
                                return Err(Error::ExtraDepNotOnBranch { hash });
                            }
                        } else {
                            return Err(Error::PatchNotFound {
                                repo_root: opts.repo_root().to_string_lossy().into_owned(),
                                patch_hash: hash,
                            });
                        }
                    } else if let Some(internal) = PatchId::from_base58(dep) {
                        if let Some(hash) = txn.get_external(internal) {
                            if txn.get_patch(&branch.patches, internal).is_some() {
                                extra_deps.push(hash.to_owned())
                            } else {
                                return Err(Error::ExtraDepNotOnBranch {
                                    hash: hash.to_owned(),
                                });
                            }
                        }
                    } else {
                        return Err(Error::WrongHash);
                    }
                }
            }
            txn.new_patch(
                &branch,
                authors,
                patch_name,
                description,
                patch_date,
                changes,
                extra_deps.into_iter(),
                PatchFlags::empty(),
            )
        };
        drop(repo);

        let patches_dir = opts.repo_root.patches_dir();
        let mut key = meta
            .signing_key
            .or(global.signing_key)
            .and_then(|s| load_signing_key(s).ok());
        let hash = if let Some(ref mut key) = key {
            key.check_author(&patch.header().authors)?;
            patch.save(&patches_dir, key.keys.get_mut(0))?
        } else {
            patch.save(&patches_dir, None)?
        };

        let pristine_dir = opts.pristine_dir();
        let mut increase = 409600;
        let res = loop {
            match record_no_resize(
                &pristine_dir,
                &opts.repo_root,
                &branch_name,
                &hash,
                &patch,
                &syncs,
                increase,
            ) {
                Err(ref e) if e.lacks_space() => increase *= 2,
                e => break e,
            }
        };

        run_hook(&opts.repo_root, "post-record", None)?;

        res
    }
}

pub fn record_no_resize(
    pristine_dir: &Path,
    r: &RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    hash: &Hash,
    patch: &Patch,
    syncs: &HashSet<InodeUpdate>,
    increase: u64,
) -> Result<Option<Hash>, Error> {
    let size_increase = increase + patch.size_upper_bound() as u64;
    let repo = match Repository::open(&pristine_dir, Some(size_increase)) {
        Ok(repo) => repo,
        Err(x) => return Err(Error::Repository(x)),
    };
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    // save patch
    debug!("syncs: {:?}", syncs);
    let mut branch = txn.open_branch(branch_name)?;
    txn.apply_local_patch(&mut branch, r, &hash, &patch, &syncs, false)?;
    txn.commit_branch(branch)?;
    txn.commit()?;
    println!("Recorded patch {}", hash.to_base58());
    Ok(Some(hash.clone()))
}

pub fn explain(res: Result<Option<Hash>, Error>) {
    default_explain(res)
}

pub fn changes_from_prefixes<T: rand::Rng>(
pub fn changes_from_prefixes<T: rand::Rng, P: AsRef<Path>>(
    algo: libpijul::DiffAlgorithm,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    txn: &mut MutTxn<T>,
    branch_name: &str,
    prefix: Option<&Vec<PathBuf>>,

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541


544


547


550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621

623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027

1028
1029
        partial_path: Option<&str>,
use libpijul::fs_representation::{
    branch_changes_base_path, patch_file_name, RepoRoot, PIJUL_DIR_NAME,
};
use libpijul::patch::read_changes;
use libpijul::{
    apply_resize, apply_resize_no_output, apply_resize_patches, apply_resize_patches_no_output,
    ApplyTimestamp, ConflictingFile, Hash, Patch, PatchId, RepoPath, Repository,
};
use regex::Regex;
use reqwest;
use reqwest::async as reqwest_async;

use error::Error;
use std;
use std::collections::hash_set::HashSet;
use std::collections::HashMap;
use std::fs::{copy, hard_link, metadata, rename, File};
use std::path::{Path, PathBuf};
use std::sync::Arc;

use commands::{ask, assert_no_containing_repo, create_repo};
use cryptovec;
use dirs;
use futures;
use futures::{Async, Future, Poll, Stream};
use meta;
use progrs;
use sequoia_openpgp::serialize::Serialize;
use shell_escape::unix::escape;
use std::borrow::Cow;
use std::io::prelude::*;
use std::io::BufReader;
use std::net::ToSocketAddrs;
use tempfile::tempdir_in;
use thrussh;
use thrussh_config;
use thrussh_keys;
use tokio;
use username;

#[derive(Debug)]
pub struct SshRemote<'a> {
    user: Option<&'a str>,
    host: &'a str,
    port: Option<u16>,
    path: &'a str,
    id: &'a str,
    local_repo_root: Option<&'a Path>,
    pijul_cmd: Cow<'static, str>,
}

#[derive(Debug)]
pub enum Remote<'a> {
    Ssh(SshRemote<'a>),
    Uri { uri: &'a str },
    Local { path: RepoRoot<PathBuf> },
}

pub enum Session<'a> {
    Ssh(SshSession<'a>),
    Uri(UriSession<'a>),
    Local(LocalSession<'a>),
}

pub struct SshSession<'a> {
    pub l: tokio::runtime::Runtime,
    path: &'a str,
    pijul_cmd: &'a str,
    pub session: Option<thrussh::client::Connection<thrussh_config::Stream, Client>>,
}

pub struct UriSession<'a> {
    l: tokio::runtime::Runtime,
    uri: &'a str,
    client: reqwest_async::Client,
}

pub struct LocalSession<'a> {
    root: RepoRoot<&'a Path>,
}

impl<'a> Drop for SshSession<'a> {
    fn drop(&mut self) {
        if let Some(mut session) = self.session.take() {
            debug!("disconnecting");
            session.disconnect(thrussh::Disconnect::ByApplication, "finished", "EN");
            if let Err(e) = self.l.block_on(session) {
                error!("While dropping SSH Session: {:?}", e);
            }
        }
    }
}

#[cfg(unix)]
use thrussh_keys::agent::client::AgentClient;
#[cfg(unix)]
use tokio_uds::UnixStream;

pub struct Client {
    pub exit_status: HashMap<thrussh::ChannelId, u32>,
    state: State,
    host: String,
    port: u16,
    channel: Option<thrussh::ChannelId>,
    #[cfg(unix)]
    pub agent: Option<AgentClient<UnixStream>>,
    #[cfg(windows)]
    pub agent: Option<()>,
}

impl Client {
    #[cfg(unix)]
    fn new(port: Option<u16>, host: &str, l: &mut tokio::runtime::Runtime) -> Self {
        let agent = if let Ok(path) = std::env::var("SSH_AUTH_SOCK") {
            l.block_on(
                UnixStream::connect(path).map(thrussh_keys::agent::client::AgentClient::connect),
            )
            .ok()
        } else {
            None
        };
        debug!("Client::new(), agent: {:?}", agent.is_some());
        Client {
            exit_status: HashMap::new(),
            state: State::None,
            port: port.unwrap_or(22),
            host: host.to_string(),
            channel: None,
            agent,
        }
    }

    #[cfg(windows)]
    fn new(port: Option<u16>, host: &str, _: &mut tokio::runtime::Runtime) -> Self {
        Client {
            exit_status: HashMap::new(),
            state: State::None,
            port: port.unwrap_or(22),
            host: host.to_string(),
            channel: None,
            agent: None,
        }
    }
}

enum State {
    None,
    Changes {
        changes: Vec<(Hash, ApplyTimestamp)>,
    },
    DownloadPatch {
        file: File,
    },
    /*SendKey {
        key_pair: meta::SigningKeys,
    },*/
}

enum SendFileState {
    Read(thrussh::client::Connection<thrussh_config::Stream, Client>),
    Wait(thrussh::client::Data<thrussh_config::Stream, Client, Vec<u8>>),
}

struct SendFile {
    f: File,
    buf: Option<Vec<u8>>,
    chan: thrussh::ChannelId,
    state: Option<SendFileState>,
}

impl Future for SendFile {
    type Item = (
        thrussh::client::Connection<thrussh_config::Stream, Client>,
        Vec<u8>,
    );
    type Error = Error;
    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
        debug!("SendFile loop starting");
        loop {
            debug!("sendfile loop");
            match self.state.take() {
                Some(SendFileState::Read(c)) => {
                    debug!("read");
                    let mut buf = self.buf.take().unwrap();
                    buf.resize(BUFFER_SIZE, 0);
                    let len = self.f.read(&mut buf)?;
                    if len == 0 {
                        // If nothing has been read, return.
                        return Ok(Async::Ready((c, buf)));
                    }
                    buf.truncate(len);
                    debug!("sending {:?} bytes, {:?}", len, buf.len());
                    self.state = Some(SendFileState::Wait(c.data(self.chan, None, buf)));
                }
                Some(SendFileState::Wait(mut c)) => {
                    debug!("wait");
                    match c.poll()? {
                        Async::Ready((c, buf)) => {
                            self.buf = Some(buf);
                            self.state = Some(SendFileState::Read(c))
                        }
                        Async::NotReady => {
                            self.state = Some(SendFileState::Wait(c));
                            return Ok(Async::NotReady);
                        }
                    }
                }
                None => unreachable!(),
            }
        }
    }
}

impl thrussh::client::Handler for Client {
    type Error = Error;
    type FutureUnit = futures::Finished<Client, Error>;
    type SessionUnit = futures::Finished<(Client, thrussh::client::Session), Error>;
    type FutureBool = futures::future::FutureResult<(Client, bool), Error>;
    type FutureSign =
        Box<futures::Future<Item = (Self, cryptovec::CryptoVec), Error = Self::Error>>;

    #[cfg(unix)]
    fn auth_publickey_sign(
        mut self,
        key: &thrussh_keys::key::PublicKey,
        mut to_sign: cryptovec::CryptoVec,
    ) -> Self::FutureSign {
        debug!("auth_publickey_sign");
        if let Some(agent) = self.agent.take() {
            use thrussh_keys::encoding::Encoding;
            debug!("using agent");
            Box::new(
                agent
                    .sign_request(key, &to_sign)
                    .then(move |result| match result {
                        Ok((client, sig)) => {
                            debug!("sig = {:?}", sig);
                            if let Some(sig) = sig {
                                to_sign.extend_ssh_string(&sig[..]);
                            }
                            self.agent = Some(client);
                            futures::finished::<_, Error>((self, to_sign))
                        }
                        Err(e) => {
                            error!("SSH agent error: {:?}", e);
                            futures::finished((self, to_sign))
                        }
                    })
                    .from_err(),
            )
        } else {
            debug!("no agent");
            Box::new(futures::finished((self, to_sign)))
        }
    }

    fn data(
        mut self,
        channel: thrussh::ChannelId,
        stream: Option<u32>,
        data: &[u8],
        session: thrussh::client::Session,
    ) -> Self::SessionUnit {
        debug!(
            "data ({:?}): {:?}",
            channel,
            &data[..std::cmp::min(data.len(), 100)]
        );
        if stream == Some(1) {
            std::io::stderr().write(data).unwrap();
        } else if stream == None {
            match self.state {
                State::None => {
                    std::io::stdout().write(data).unwrap();
                }
                State::Changes { ref mut changes } => {
                    let data = std::str::from_utf8(data).unwrap();
                    for l in data.lines() {
                        let mut spl = l.split(':');
                        if let (Some(h), Some(s)) = (spl.next(), spl.next()) {
                            if let (Some(h), Ok(s)) = (Hash::from_base58(h), s.parse()) {
                                changes.push((h, s));
                            }
                        }
                    }
                }
                State::DownloadPatch { ref mut file, .. } => {
                    file.write_all(data).unwrap();
                }
            }
        } else {
            debug!(
                "SSH data received on channel {:?}: {:?} {:?}",
                channel, stream, data
            );
        }
        futures::finished((self, session))
    }
    fn exit_status(
        mut self,
        channel: thrussh::ChannelId,
        exit_status: u32,
        session: thrussh::client::Session,
    ) -> Self::SessionUnit {
        debug!(
            "exit_status received on channel {:?}: {:?}:",
            channel, exit_status
        );
        debug!("self.channel = {:?}", self.channel);
        if let Some(c) = self.channel {
            if channel == c {
                self.exit_status.insert(channel, exit_status);
            }
        }
        debug!("self.exit_status = {:?}", self.exit_status);
        futures::finished((self, session))
    }

    fn check_server_key(
        self,
        server_public_key: &thrussh_keys::key::PublicKey,
    ) -> Self::FutureBool {
        let path = dirs::home_dir().unwrap().join(".ssh").join("known_hosts");
        match thrussh_keys::check_known_hosts_path(&self.host, self.port, server_public_key, &path)
        {
            Ok(true) => futures::done(Ok((self, true))),
            Ok(false) => {
                if let Ok(false) = ask::ask_learn_ssh(&self.host, self.port, "") {
                    // TODO
                    // &server_public_key.fingerprint()) {

                    futures::done(Ok((self, false)))
                } else {
                    thrussh_keys::learn_known_hosts_path(
                        &self.host,
                        self.port,
                        server_public_key,
                        &path,
                    )
                    .unwrap();
                    futures::done(Ok((self, true)))
                }
            }
            Err(e) => {
                if let thrussh_keys::Error::KeyChanged(line) = e {
                    println!(
                        "Host key changed! Someone might be eavesdropping this communication, \
                         refusing to continue. Previous key found line {}",
                        line
                    );
                    futures::done(Ok((self, false)))
                } else {
                    futures::done(Err(From::from(e)))
                }
            }
        }
    }
}

const BUFFER_SIZE: usize = 1 << 14; // 16 kb.

impl<'a> SshSession<'a> {
    pub fn changes(
        &mut self,
        branch: &str,
        path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        let esc_path = escape(Cow::Borrowed(self.path));
        let mut cmd = format!(
            "{} log --repository {} --branch {:?} --hash-only",
            self.pijul_cmd, esc_path, branch
        );
        for p in path {
            cmd.push_str(&format!(" --path {}", p.display()))
        }

        if let Some(ref mut session) = self.session {
            session.handler_mut().state = State::Changes {
                changes: Vec::new(),
            }
        }
        let mut channel = None;
        self.session = Some(
            self.l
                .block_on(
                    self.session
                        .take()
                        .unwrap()
                        .channel_open_session()
                        .and_then(move |(mut connection, chan)| {
                            debug!("exec: {:?}", cmd);
                            channel = Some(chan);
                            connection.handler_mut().exit_status.remove(&chan);
                            connection.handler_mut().channel = Some(chan);
                            connection.exec(chan, false, &cmd);
                            connection.channel_eof(chan);
                            // Wait until channel close.
                            debug!("waiting channel close");
                            connection
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .and_then(move |mut session| {
                                    if session.is_channel_open(chan) {
                                        session.channel_close(chan);
                                    }
                                    session.wait(move |session| !session.is_channel_open(chan))
                                })
                        }),
                )
                .unwrap(),
        );

        if let Some(ref session) = self.session {
            if let Some(channel) = channel {
                if let Some(&exit_code) = session.handler().exit_status.get(&channel) {
                    debug!("exit_code = {:?}", exit_code);
                    if exit_code != 0 {
                        return Ok(Vec::new());
                    }
                }
            }
        }
        if let Some(ref mut session) = self.session {
            match std::mem::replace(&mut session.handler_mut().state, State::None) {
                State::Changes { changes } => {
                    debug!("changes: {:?}", changes);
                    Ok(changes)
                }
                _ => unreachable!(),
            }
        } else {
            unreachable!()
        }
    }

    pub fn send_key(&mut self, key_pair: meta::SigningKeys) -> Result<(), Error> {
        if let Some(ref mut session) = self.session {
            // session.handler_mut().state = State::SendKey { };
            session.handler_mut().channel = None;
        }
        let challenge_cmd = format!("{} key register", self.pijul_cmd);
        let mut data = Vec::new();
        key_pair.tsk.tpk().serialize(&mut data)?;
        self.session = Some(
            self.l.block_on(
                self.session
                    .take()
                    .unwrap()
                    .channel_open_session()
                    .and_then(move |(mut session, channelid)| {
                        session.exec(channelid, false, &challenge_cmd);
                        session
                            .data(channelid, None, data)
                            .and_then(move |(mut session, _)| {
                                session.channel_eof(channelid);
                                session.handler_mut().channel = Some(channelid);
                                session.wait(move |session| {
                                    session.handler().exit_status.get(&channelid).is_some()
                                })
                            })
                    }),
            )?,
        );
        Ok(())
    }

    pub fn fetch_patch(
        &mut self,
        patch_hash: &Hash,
        local_file: PathBuf,
        local_tmp_file: PathBuf,
    ) -> Result<PathBuf, Error> {
        let esc_path = escape(Cow::Borrowed(self.path));
        let cmd = format!(
            "{} patch --repository {} --bin {}",
            self.pijul_cmd,
            esc_path,
            patch_hash.to_base58()
        );
        debug!("cmd {:?} {:?}", cmd, local_file);
        if let Some(ref mut session) = self.session {
            session.handler_mut().state = State::DownloadPatch {
                file: File::create(&local_tmp_file)?,
            };
            session.handler_mut().channel = None;
        }
        self.session = Some(
            self.l
                .block_on(
                    self.session
                        .take()
                        .unwrap()
                        .channel_open_session()
                        .and_then(move |(mut connection, chan)| {
                            connection.handler_mut().exit_status.remove(&chan);
                            connection.handler_mut().channel = Some(chan);
                            connection.exec(chan, false, &cmd);
                            connection.channel_eof(chan);
                            connection
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .and_then(move |mut session| {
                                    if session.is_channel_open(chan) {
                                        session.channel_close(chan);
                                    }
                                    session.wait(move |session| !session.is_channel_open(chan))
                                })
                        }),
                )
                .unwrap(),
        );

        if let Some(ref mut session) = self.session {
            if let State::DownloadPatch { mut file, .. } =
                std::mem::replace(&mut session.handler_mut().state, State::None)
            {
                file.flush()?;
                rename(&local_tmp_file, &local_file)?;
            }
        }
        Ok(local_file)
    }

    pub fn remote_apply(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        patch_hashes: Vec<Hash>,
    ) -> Result<(), Error> {
        let pdir = repo_root.patches_dir();
        let mut exit_status = None;
        let esc_path = escape(Cow::Borrowed(&self.path));
        let apply_cmd = format!(
            "{} apply --repository {} --branch {:?}",
            self.pijul_cmd, esc_path, remote_branch
        );
        let sign_cmd = format!("{} sign --repository {}", self.pijul_cmd, esc_path);

        let session = self.session.take().unwrap();

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                        .fold((session, Vec::new()), move |(session, buf), hash| {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                                futures::future::Either::A((SendFile {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
        self.session = Some(
            self.l
                .block_on(
                    session
                        .channel_open_session()
                        .and_then(move |(session, chan0)| {
                            session
                                .channel_open_session()
                                .and_then(move |(mut session, chan1)| {
                                    session.handler_mut().exit_status.remove(&chan0);
                                    session.handler_mut().channel = Some(chan0);
                                    debug!("exec {:?}", apply_cmd);
                                    session.exec(chan0, false, &apply_cmd);
                                    debug!("exec {:?}", sign_cmd);
                                    session.exec(chan1, false, &sign_cmd);
                                    futures::stream::iter_ok(patch_hashes.into_iter())
                                        .fold((session, Vec::new()), move |(session, buf), hash| {
                                            let mut pdir = pdir.clone();
                                            pdir.push(hash.to_base58());
                                            pdir.set_extension("gz");
                                            let f = std::fs::File::open(&pdir).unwrap();
                                            pdir.set_extension("sig");
                                            if let Ok(sig) = std::fs::File::open(&pdir) {
                                                futures::future::Either::A(
                                                    (SendFile {
                                                        f: f,
                                                        buf: Some(buf),
                                                        chan: chan0,
                                                        state: Some(SendFileState::Read(session)),
                                                    })
                                                    .and_then(move |(session, mut buf)| {
                                                        buf.clear();
                                                        SendFile {
                                                            f: sig,
                                                            buf: Some(buf),
                                                            chan: chan1,
                                                            state: Some(SendFileState::Read(
                                                                session,
                                                            )),
                                                        }
                                                    }),
                                                )
                                            } else {
                                                futures::future::Either::B(SendFile {
                                                    f: f,
                                                    buf: Some(buf),
                                                    chan: chan0,
                                                    state: Some(SendFileState::Read(session)),
                                                })
                                            }
                                        })
                                        .and_then(move |(mut session, _)| {
                                            session.channel_eof(chan0);
                                            session
                                                .wait(move |session| {
                                                    session
                                                        .handler()
                                                        .exit_status
                                                        .get(&chan0)
                                                        .is_some()
                                                })
                                                .map(move |mut session| {
                                                    exit_status = session
                                                        .handler()
                                                        .exit_status
                                                        .get(&chan0)
                                                        .map(|x| *x);
                                                    session.channel_close(chan0);
                                                    session
                                                })
                                        })
                                        .map_err(From::from)

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                                })
                        }),
                )
                .unwrap(),
        );

        if let Some(ref session) = self.session {
            debug!("exit status = {:?}", session.handler().exit_status);
        }
        Ok(())
    }

    pub fn remote_init(&mut self) -> Result<(), Error> {
        let esc_path = escape(Cow::Borrowed(self.path));
        let cmd = format!("{} init {}", self.pijul_cmd, esc_path);
        debug!("command line:{:?}", cmd);

        self.session = Some(
            self.l
                .block_on(
                    self.session
                        .take()
                        .unwrap()
                        .channel_open_session()
                        .and_then(move |(mut session, chan)| {
                            debug!("chan = {:?}", chan);
                            session.handler_mut().exit_status.remove(&chan);
                            session.handler_mut().channel = Some(chan);
                            session.exec(chan, false, &cmd);
                            session.channel_eof(chan);
                            // Wait until channel close.
                            session
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .and_then(move |mut session| {
                                    if session.is_channel_open(chan) {
                                        session.channel_close(chan);
                                    }
                                    session.wait(move |session| !session.is_channel_open(chan))
                                })
                        }),
                )
                .unwrap(),
        );
        Ok(())
    }
}

impl<'a> UriSession<'a> {
    pub fn changes(
        &mut self,
        branch: &str,
        path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        if !path.is_empty() {
            return Err(Error::PartialPullOverHttp);
        }
        let mut uri = self.uri.to_string();
        uri = uri + "/" + PIJUL_DIR_NAME + "/" + &branch_changes_base_path(branch);
        let mut req = reqwest_async::Request::new(reqwest::Method::GET, uri.parse().unwrap());
        req.headers_mut().insert(
            reqwest::header::CONNECTION,
            reqwest::header::HeaderValue::from_static("close"),
        );
        let res: Vec<u8> = self.l.block_on(self.client.execute(req).and_then(
            |resp: reqwest_async::Response| {
                let res = Vec::new();
                let body = resp.into_body();
                body.fold(res, |mut res, x| {
                    res.extend(x.iter());
                    futures::finished::<_, reqwest::Error>(res)
                })
            },
        ))?;
        let changes = read_changes(&mut &res[..]).unwrap_or(Vec::new());
        debug!("http: {:?}", changes);
        Ok(changes)
    }

    pub fn fetch_patch(
        &mut self,
        patch_hash: &Hash,
        local_file: PathBuf,
        local_tmp_file: PathBuf,
    ) -> Result<PathBuf, Error> {
        let ref mut l = self.l;
        let ref mut client = self.client;
        let uri = self.uri.to_string()
            + "/"
            + PIJUL_DIR_NAME
            + "/patches/"
            + &patch_hash.to_base58()
            + ".gz";
        debug!("downloading uri {:?}", uri);

        let req = reqwest_async::Request::new(reqwest::Method::GET, uri.parse().unwrap());
        let uri_sig = self.uri.to_string()
            + "/"
            + PIJUL_DIR_NAME
            + "/patches/"
            + &patch_hash.to_base58()
            + ".sig";
        debug!("{:?}", uri_sig);
        let req_sig = reqwest_async::Request::new(reqwest::Method::GET, uri_sig.parse().unwrap());
        let mut local_sig_file = local_file.clone();
        let mut local_tmp_sig_file = local_tmp_file.clone();
        local_sig_file.set_extension("sig");
        local_tmp_sig_file.set_extension("sig");

        let res = l
            .block_on(
                client
                    .execute(req)
                    .and_then(move |resp| {
                        if resp.status() == reqwest::StatusCode::OK {
                            let res = Vec::new();
                            futures::future::Either::A(
                                resp.into_body()
                                    .fold(res, |mut res, x| {
                                        res.extend(x.iter());
                                        futures::finished::<_, reqwest::Error>(res)
                                    })
                                    .map(|body| {
                                        // debug!("response={:?}", body);
                                        let mut f = File::create(&local_tmp_file).unwrap();
                                        f.write_all(&body).unwrap();
                                        // debug!("patch downloaded through http: {:?}", body);
                                        Some((local_tmp_file, local_file))
                                    }),
                            )
                        } else {
                            futures::future::Either::B(futures::finished(None))
                        }
                    })
                    .join(client.execute(req_sig).then(move |resp| {
                        let resp = if let Ok(resp) = resp {
                            resp
                        } else {
                            return futures::future::Either::B(futures::finished(None));
                        };
                        debug!("sig status {:?}", resp.status());
                        if resp.status() == reqwest::StatusCode::OK {
                            let res = Vec::new();
                            futures::future::Either::A(
                                resp.into_body()
                                    .fold(res, |mut res, x| {
                                        res.extend(x.iter());
                                        futures::finished::<_, reqwest::Error>(res)
                                    })
                                    .map(|body| {
                                        // debug!("response={:?}", body);
                                        let mut f = File::create(&local_tmp_sig_file).unwrap();
                                        f.write_all(&body).unwrap();
                                        // debug!("patch downloaded through http: {:?}", body);
                                        Some((local_tmp_sig_file, local_sig_file))
                                    }),
                            )
                        } else {
                            futures::future::Either::B(futures::finished(None))
                        }
                    })),
            )
            .unwrap();
        if let Some((local_tmp_file, local_file)) = res.0 {
            debug!("renaming {:?} to {:?}", local_tmp_file, local_file);
            rename(&local_tmp_file, &local_file)?;
            if let Some((local_tmp_sig_file, local_sig_file)) = res.1 {
                debug!("renaming {:?} to {:?}", local_tmp_sig_file, local_sig_file);
                rename(&local_tmp_sig_file, &local_sig_file).unwrap_or(());
            }
            Ok(local_file)
        } else {
            Err(Error::PatchNotFound {
                repo_root: self.uri.into(),
                patch_hash: patch_hash.to_owned(),
            })
        }
    }
}

impl<'a> LocalSession<'a> {
    pub fn changes(
        &mut self,
        branch: &str,
        path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        let repo = self.root.open_repo(None)?;
        let txn = repo.txn_begin()?;
        if let Some(branch) = txn.get_branch(&branch) {
            if !path.is_empty() {
                let mut patches = Vec::new();
                for (hash, s) in txn.iter_patches(&branch, None) {
                    for path in path {
                        let inode = txn.find_inode(path).unwrap();
                        let key = txn.get_inodes(inode).unwrap().key;
                        if txn.get_touched(key, hash) {
                            patches.push((txn.get_external(hash).unwrap().to_owned(), s));
                            break;
                        }
                    }
                }
                Ok(patches)
            } else {
                Ok(txn
                    .iter_patches(&branch, None)
                    .map(|(hash, s)| (txn.get_external(hash).unwrap().to_owned(), s))
                    .collect())
            }
        } else {
            Ok(Vec::new())
        }
    }

    pub fn fetch_patch(
        &mut self,
        patch_hash: &Hash,
        local_file: PathBuf,
    ) -> Result<PathBuf, Error> {
        debug!("local downloading {:?}", patch_hash);
        let remote_file = self
            .root
            .patches_dir()
            .join(&patch_file_name(patch_hash.as_ref()));
        debug!("hard linking {:?} to {:?}", remote_file, local_file);
        if hard_link(&remote_file, &local_file).is_err() {
            copy(&remote_file, &local_file)?;
        }
        Ok(local_file)
    }

    pub fn remote_apply(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        patch_hashes: &Vec<Hash>,
    ) -> Result<Vec<ConflictingFile>, Error> {
        let mut remote_path = self.root.patches_dir();
        let mut local_path = repo_root.patches_dir();
        let remote_current_branch = self.root.get_current_branch()?;

        for hash in patch_hashes {
            remote_path.push(&hash.to_base58());
            remote_path.set_extension("gz");

            local_path.push(&hash.to_base58());
            local_path.set_extension("gz");

            debug!("hard linking {:?} to {:?}", local_path, remote_path);
            if metadata(&remote_path).is_err() {
                if hard_link(&local_path, &remote_path).is_err() {
                    copy(&local_path, &remote_path)?;
                }
            }

            remote_path.set_extension("sig");
            local_path.set_extension("sig");

            if metadata(&remote_path).is_err() && metadata(&local_path).is_ok() {
                if hard_link(&local_path, &remote_path).is_err() {
                    copy(&local_path, &remote_path)?;
                }
            }

            local_path.pop();
            remote_path.pop();
        }

        loop {
            let app = if remote_current_branch != remote_branch {
                apply_resize_no_output(&self.root, &remote_branch, patch_hashes.iter(), |_, _| {})
                    .map(|_| Vec::new())
            } else {
                apply_resize(
                    libpijul::DiffAlgorithm::default(),
                    &self.root,
                    &remote_branch,
                    patch_hashes.iter(),
                    &[] as &[RepoPath<&Path>],
                    |_, _| {},
                )
            };
            match app {
                Err(ref e) if e.lacks_space() => debug!("lacks space"),
                Ok(v) => return Ok(v),
                Err(e) => return Err(From::from(e)),
            }
        }
    }
}

#[derive(Debug, Clone)]
pub struct PushablePatches {
    pub pushable: Vec<(Hash, Option<PatchId>, ApplyTimestamp)>,
    pub non_fast_forward: Vec<Hash>,
}

impl<'a> Session<'a> {
    pub fn changes(
        &mut self,
        branch: &str,
        remote_path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        match *self {
            Session::Ssh(ref mut ssh_session) => ssh_session.changes(branch, remote_path),
            Session::Local(ref mut local_session) => local_session.changes(branch, remote_path),
            Session::Uri(ref mut uri_session) => uri_session.changes(branch, remote_path),
        }
    }
    pub fn download_patch(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        patch_hash: &Hash,
    ) -> Result<PathBuf, Error> {
        let patches_dir_ = repo_root.patches_dir();
        let local_file = patches_dir_.join(&patch_file_name(patch_hash.as_ref()));

        if !metadata(&local_file).is_ok() {
            match *self {
                Session::Local(ref mut local_session) => {
                    local_session.fetch_patch(patch_hash, local_file)
                }
                Session::Ssh(ref mut ssh_session) => {
                    let tmp_dir = tempdir_in(&patches_dir_)?;
                    let local_tmp_file = tmp_dir.path().join("patch");
                    ssh_session.fetch_patch(patch_hash, local_file, local_tmp_file)
                }
                Session::Uri(ref mut uri_session) => {
                    let tmp_dir = tempdir_in(&patches_dir_)?;
                    let local_tmp_file = tmp_dir.path().join("patch");
                    uri_session.fetch_patch(patch_hash, local_file, local_tmp_file)
                }
            }
        } else {
            Ok(local_file)
        }
    }

    fn remote_apply(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        patch_hashes: Vec<Hash>,
    ) -> Result<(), Error> {
        match *self {
            Session::Ssh(ref mut ssh_session) => {
                ssh_session.remote_apply(repo_root, remote_branch, patch_hashes)
            }

            Session::Local(ref mut local_session) => local_session
                .remote_apply(repo_root, remote_branch, &patch_hashes)
                .map(|_| ()),

            _ => panic!("upload to URI impossible"),
        }
    }

    pub fn remote_init(&mut self) -> Result<(), Error> {
        match *self {
            Session::Ssh(ref mut ssh_session) => ssh_session.remote_init(),
            Session::Local(ref mut local_session) => {
                assert_no_containing_repo(local_session.root.repo_root)?;
                create_repo(local_session.root.repo_root)
            }
            _ => panic!("remote init not possible"),
        }
    }

    pub fn pullable_patches(
        &mut self,
        remote_branch: &str,
        local_branch: &str,
        target: &RepoRoot<impl AsRef<Path>>,
        remote_path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Pullable, Error> {
        let mut remote_patches: Vec<(Hash, ApplyTimestamp)> = self
            .changes(remote_branch, remote_path)?
            .into_iter()
            .map(|(h, s)| (h.to_owned(), s))
            .collect();
        remote_patches.sort_by(|&(_, ref a), &(_, ref b)| a.cmp(&b));
        let local_patches: HashMap<Hash, ApplyTimestamp> = {
            let repo_dir = target.pristine_dir();
            let repo = Repository::open(&repo_dir, None)?;
            let txn = repo.txn_begin()?;
            if let Some(branch) = txn.get_branch(&local_branch) {
                txn.iter_patches(&branch, None)
                    .map(|(hash, s)| (txn.get_external(hash).unwrap().to_owned(), s))
                    .collect()
            } else {
                HashMap::new()
            }
        };
        debug!("pullable done: {:?}", remote_patches);
        Ok(Pullable {
            local: local_patches.iter().map(|(h, _)| h.to_owned()).collect(),
            remote: remote_patches.into_iter().collect(),
        })
    }

    pub fn pull(
        &mut self,
        target: &RepoRoot<impl AsRef<Path>>,
        to_branch: &str,
        pullable: &mut Vec<(Hash, ApplyTimestamp)>,
        partial_path: Option<&Path>,
        partial_paths: &[RepoPath<impl AsRef<Path>>],
        display_progress: bool,
154
155
156
157
158








159
160
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;

    let mut inode_prefixes = Vec::new();
    if let Some(prefixes) = prefixes {
        for pref in prefixes.iter() {
            let mut inode = txn.find_inode(pref).unwrap();
            let mut prefix = Vec::new();
            prefix.push(inode);
            while let Some(parent) = txn.get_revtree(inode) {
                prefix.push(parent.parent_inode);
                inode = parent.parent_inode;
            }
            inode_prefixes.push(prefix)
            inode_prefixes.push(txn.find_inode(pref).unwrap());
        }

1
2
3
4
5
6

7
8
    RUST_LOG="pijul=debug,libpijul::output=debug" pijul clone a b 2> /tmp/log
#!/usr/bin/env bats

load ../test_helper

@test "add/record/pull/delete/record/pull" {
    make_single_file_repo a toto
    RUST_LOG="pijul=debug" pijul clone a b 2> /tmp/log
    pijul clone a b
    pijul remove --repository b toto
18
19

20
21
    pijul mv foo.rs baz.rs

    RUST_BACKTRACE=1 RUST_LOG="pijul=debug,libpijul::output=debug,libpijul::graph=debug" pijul revert -a baz.rs 2> /tmp/log
    RUST_BACKTRACE=1 RUST_LOG="pijul=debug,libpijul::output=debug" pijul revert -a baz.rs 2> /tmp/log