pijul_org / pijul

First draft of partial clones (checkout and revert are still incorrect for patches that touch multiple parts of the repository)

By pmeunier on May 2, 2018
This patch is not signed.
A1K1zoKn9CwYEFgTCATeuU6TEqonC3gUq5VehtXr2hPr5V7SzxknQfDYxCgL7vavnxMHavZqZG58Krr7iBGo1Ryu
This patch is in the following branches:
latest
master
testing




























1
2
3
4
5


6
7
8
9
10
11


14


17

19
20
21
22

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46
47
48
49
50
51
52
53

54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94

95







96
97
98
99
100
101
102
103
104
105
106
107








108
109
110
111
112
113
114
115
116
117
118

119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135


136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158





159
160
161
162
163
164
165
166
167
168
169
170
171
172




173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188














189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209


210
211
212
213
214
215
216

217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247

248
249
250
251
252
253
254

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273

274
275
276
277
278
279

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

297
298
299
300
301
302


303
304
305
306
307
308
309
310
311
312

313
314
            patch: self.get_internal(key.patch.as_ref().unwrap().as_ref())
        partial_path: Option<&Path>,
            p.changes = changes
                .into_iter()
                .flat_map(|x| x.into_iter())
                .map(|x| self.globalize_change(x))
                .collect();
            self.commit_branch(branch)?;

            if let Some(path) = partial_path {
                self.output_repository(branch_name, &r, &[path][..], &pending, &local_pending)?;
            } else {
                self.output_repository::<&[&Path]>(branch_name, &r, &[][..], &pending, &local_pending)?;
            }
                        try!(self.apply_patches_rec(
                            branch,
                            patches,
                            &dep,
                            patch,
                            new_patches_count
                        ));
        branch_name: &str,
        working_copy: &Path,
                let edge = Edge::zero(
                    EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE | EdgeFlags::DELETED_EDGE,
                );
                    .take_while(|&(k, v)| k == header.key && edge.flag == v.flag)
                let edge = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
use backend::*;
use patch::*;
use rand;
use record::{InodeUpdate, RecordState};
use std::collections::HashSet;


use std::path::Path;
use {Error, Result};
mod apply;
pub mod find_alive;
mod repair_deleted_context;
use diff;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::RepoRoot;

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
use fs_representation::{RepoRoot, in_repo_root};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
use output;
use output::ConflictingFile;

impl<U: Transaction, R> GenericTxn<U, R> {

    /// Return the patch id corresponding to `e`, or `internal` if `e==None`.
    pub fn internal_hash(&self, e: &Option<Hash>, internal: PatchId) -> PatchId {
        match *e {
            Some(Hash::None) => ROOT_PATCH_ID.clone(),
            Some(ref h) => self.get_internal(h.as_ref()).unwrap().to_owned(),
            None => internal.clone(),
        }
    }

    /// Fetch the internal key for this external key (or `internal` if
    /// `key.patch` is `None`).
    pub fn internal_key(&self, key: &Key<Option<Hash>>, internal: PatchId) -> Key<PatchId> {
        // debug!("internal_key: {:?} {:?}", key, internal);
        Key {
            patch: self.internal_hash(&key.patch, internal),
            line: key.line.clone(),
        }
    }

    pub fn internal_key_unwrap(&self, key: &Key<Option<Hash>>) -> Key<PatchId> {
        Key {
            patch: self.get_internal(key.patch.as_ref().unwrap().as_ref()).unwrap().to_owned(),
            patch: self
                .get_internal(key.patch.as_ref().unwrap().as_ref())
                .unwrap()
                .to_owned(),
            line: key.line.clone(),
        }
    }
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {

    /// Assumes all patches have been downloaded. The third argument
    /// `remote_patches` needs to contain at least all the patches we
    /// want to apply, and the fourth one `local_patches` at least all
    /// the patches the other repository doesn't have.
    pub fn apply_patches<F, P: output::ToPrefixes>(
        &mut self,
        diff_algorithm: diff::Algorithm,
        branch: &mut Branch,
        r: &RepoRoot<impl AsRef<Path>>,
        remote_patches: &[(Hash, Patch)],
        partial_paths: P,
        mut f: F,
    ) -> Result<Vec<ConflictingFile>>
    where
        F: FnMut(usize, &Hash),
    {
        let (pending, local_pending) = {
            let mut record = RecordState::new();
            self.record(diff_algorithm, &mut record, branch, r, &in_repo_root())?;
            let (changes, local) = record.finish();
            let mut p = UnsignedPatch::empty();
            p.changes = changes
                .into_iter()
                .flat_map(|x| x.into_iter())
                .map(|x| self.globalize_change(x))
                .collect();
            p.dependencies = self.dependencies(&branch, p.changes.iter());
            (p.leave_unsigned(), local)
        };

        let mut new_patches_count = 0;
        for &(ref p, ref patch) in remote_patches.iter() {
            debug!("apply_patches: {:?}", p);
            self.apply_patches_rec(branch, remote_patches, p, patch, &mut new_patches_count)?;
            f(new_patches_count, p);
        }
        debug!("{} patches applied", new_patches_count);

        if new_patches_count > 0 {
            let partial_paths = partial_paths.to_prefixes(self, &branch);
            self.output_changes_file(&branch, r)?;
            self.commit_branch(branch)?;
            debug!("output_repository");
            self.output_repository(
                branch_name,
                &r,
                None,
                &pending,
                &local_pending,
            )?;
            self.output_partials(branch.name.as_str(), &partial_paths)?;
            self.output_repository(branch, r, &partial_paths, &pending, &local_pending)
        } else {
            debug!("finished apply_patches");
            Ok(Vec::new())
        }
    }

    /// Lower-level applier. This function only applies patches as
    /// found in `patches_dir`, following dependencies recursively. It
    /// outputs neither the repository nor the "changes file" of the
    /// branch, necessary to exchange patches locally or over HTTP.
    pub fn apply_patches_rec(&mut self,
                             branch: &mut Branch,
                             patches: &[(Hash, Patch)],
                             patch_hash: &Hash,
                             patch: &Patch,
                             new_patches_count: &mut usize)
                             -> Result<()> {

    pub fn apply_patches_rec(
        &mut self,
        branch: &mut Branch,
        patches: &[(Hash, Patch)],
        patch_hash: &Hash,
        patch: &Patch,
        new_patches_count: &mut usize,
    ) -> Result<()> {
        let internal = {
            if let Some(internal) = self.get_internal(patch_hash.as_ref()) {
                if self.get_patch(&branch.patches, internal).is_some() {
                    debug!("get_patch returned {:?}", self.get_patch(&branch.patches, internal));
                    debug!(
                        "get_patch returned {:?}",
                        self.get_patch(&branch.patches, internal)
                    );
                    None
                } else {
                    // Doesn't have patch, but the patch is known in
                    // another branch
                    Some(internal.to_owned())
                }
            } else {
                // The patch is totally new to the repository.
                let internal = self.new_internal(patch_hash.as_ref());
                Some(internal)
            }
        };
        if let Some(internal) = internal {

            info!("Now applying patch {:?} {:?} to branch {:?}", patch.name, patch_hash, branch);
            info!(
                "Now applying patch {:?} {:?} to branch {:?}",
                patch.name, patch_hash, branch
            );
            if patch.dependencies().is_empty() {
                info!("Patch {:?} has no dependencies", patch_hash);
            }
            for dep in patch.dependencies().iter() {
                info!("Applying dependency {:?}", dep);
                info!("dep hash {:?}", dep.to_base58());
                let is_applied = {
                    if let Some(dep_internal) = self.get_internal(dep.as_ref()) {
                        self.get_patch(&branch.patches, dep_internal).is_some()
                    } else {
                        false
                    }
                };
                if !is_applied {
                    info!("Not applied");
                    // If `patches` is sorted in topological order,
                    // this shouldn't happen, because the dependencies
                    // have been applied before.
                    if let Some(&(_, ref patch)) = patches.iter().find(|&&(ref a, _)| a == dep) {
                        try!(self.apply_patches_rec(branch,
                                                    patches,
                                                    &dep,
                                                    patch,
                                                    new_patches_count));
                        self.apply_patches_rec(branch, patches, &dep, patch, new_patches_count)?;
                    } else {
                        error!("Dependency not found");
                        return Err(Error::MissingDependency(dep.to_owned()));
                    }
                } else {
                    info!("Already applied");
                }
                let dep_internal = self.get_internal(dep.as_ref()).unwrap().to_owned();
                self.put_revdep(dep_internal, internal)?;
                self.put_dep(internal, dep_internal)?;
            }

            // Sanakirja doesn't let us insert the same pair twice.
            self.put_external(internal, patch_hash.as_ref())?;
            debug!("sanakirja put internal {:?} {:?}", patch_hash, internal);
            self.put_internal(patch_hash.as_ref(), internal)?;
            self.put_internal(patch_hash.as_ref(), internal)?;
            self.register_patch(internal, patch_hash, patch)?;

            let now = branch.apply_counter;
            branch.apply_counter += 1;
            self.apply(branch, &patch, internal, now)?;

            *new_patches_count += 1;

            Ok(())
        } else {
            info!("Patch {:?} has already been applied", patch_hash);
            Ok(())
        }
    }

    /// Apply a patch from a local record: register it, give it a hash, and then apply.
    pub fn apply_local_patch(&mut self,
                             branch_name: &str,
                             working_copy: &Path,
                             hash: &Hash,
                             patch: &Patch,
                             inode_updates: &HashSet<InodeUpdate>,
                             is_pending: bool)
                             is_pending: bool)
                             -> Result<PatchId> {

        info!("registering a patch with {} changes",
              patch.changes().len());
        info!("dependencies: {:?}",
              patch.dependencies());
    pub fn apply_local_patch(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        hash: &Hash,
        patch: &Patch,
        inode_updates: &HashSet<InodeUpdate>,
        is_pending: bool,
    ) -> Result<PatchId> {
        info!("registering a patch with {} changes", patch.changes().len());
        info!("dependencies: {:?}", patch.dependencies());

        // let child_patch = patch.clone();

        let internal: PatchId = self.new_internal(hash.as_ref());

        for dep in patch.dependencies().iter() {
            let dep_internal = self.get_internal(dep.as_ref()).unwrap().to_owned();
            self.put_revdep(dep_internal, internal)?;
            self.put_dep(internal, dep_internal)?;
        }
        self.put_external(internal, hash.as_ref())?;
        self.put_internal(hash.as_ref(), internal)?;
        self.register_patch(internal, hash, patch)?;

        info!("Applying local patch");
        let now = branch.apply_counter;
        self.apply(branch, &patch, internal, now)?;
        debug!("synchronizing tree: {:?}", inode_updates);
        for update in inode_updates.iter() {

            self.update_inode(&branch, internal, update)?;
        }
        debug!("committing branch");
        if !is_pending {
            debug!("not pending, adding to changes");
            branch.apply_counter += 1;
            self.output_changes_file(&branch, working_copy)?;
        }
        trace!("done apply_local_patch");
        Ok(internal)
    }

    fn register_patch(&mut self, internal: PatchId, hash: &Hash, patch: &Patch) -> Result<()> {
        self.put_external(internal, hash.as_ref())?;
        self.put_internal(hash.as_ref(), internal)?;
        for hunk in patch.changes() {
            let inode = match *hunk {
                Change::NewNodes { ref inode, .. } => inode,
                Change::NewEdges { ref inode, .. } => inode,
            };
            let inode = self.internal_key(inode, internal);
            self.put_touched_file(inode, internal)?;
        }
        Ok(())
    }

    /// Update the inodes/revinodes, tree/revtrees databases with the
    /// patch we just applied. This is because files don't really get
    /// moved or deleted before we apply the patch, they are just
    /// "marked as moved/deleted". This function does the actual
    /// update.
    fn update_inode(&mut self, branch: &Branch, internal: PatchId, update: &InodeUpdate) -> Result<()>{
    fn update_inode(
        &mut self,
        branch: &Branch,
        internal: PatchId,
        update: &InodeUpdate,
    ) -> Result<()> {
        match *update {
            InodeUpdate::Add { ref line, ref meta, inode } => {
            InodeUpdate::Add {
                ref line,
                ref meta,
                inode,
            } => {
                let key = FileHeader {
                    metadata: *meta,
                    status: FileStatus::Ok,
                    key: Key {
                        patch: internal.clone(),
                        line: line.clone(),
                    },
                };
                // If this file addition was actually recorded.
                if self.get_nodes(&branch, key.key, None).is_some() {
                    debug!("it's in here!: {:?} {:?}", key, inode);
                    self.replace_inodes(inode, key)?;
                    self.replace_revinodes(key.key, inode)?;
                }
            },
            }
            InodeUpdate::Deleted { inode } => {
                // If this change was actually applied.
                debug!("deleted: {:?}", inode);
                let header = self.get_inodes(inode).unwrap().clone();
                debug!("deleted header: {:?}", header);
                let edge = Edge::zero(EdgeFlags::PARENT_EDGE|EdgeFlags::FOLDER_EDGE|EdgeFlags::DELETED_EDGE);
                let flag =
                    EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE | EdgeFlags::DELETED_EDGE;
                if self
                    .iter_adjacent(&branch, header.key, flag, flag)
                    .any(|v| v.introduced_by == internal)
                {
                    self.del_inodes(inode, Some(header))?;
                    self.del_revinodes(header.key, Some(inode))?;

                    // We might have killed the parent in the same
                    // update.
                    if let Some(parent) = self.get_revtree(inode).map(|x| x.to_owned()) {
                        let parent = parent.as_file_id();
                        self.del_tree(&parent, None)?;
                        self.del_revtree(inode, None)?;
                    }
                }
            },
            }
            InodeUpdate::Moved { inode, metadata } => {
                // If this change was actually applied.
                debug!("moved: {:?}", inode);
                let mut header = self.get_inodes(inode).unwrap().clone();
                debug!("moved header: {:?}", header);
                let edge = Edge::zero(EdgeFlags::PARENT_EDGE|EdgeFlags::FOLDER_EDGE);
                let edge = Edge::zero(EdgeFlags::PARENT_EDGE|EdgeFlags::FOLDER_EDGE);
                let flag = EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE;
                if self
                    .iter_adjacent(&branch, header.key, flag, flag)
                    .any(|v| v.introduced_by == internal)
                {
                    header.status = FileStatus::Ok;
                    header.metadata = metadata;
                    self.replace_inodes(inode, header)?;
                    self.replace_revinodes(header.key, inode)?;
                }
            },
            }
        }



























































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

19
20

21
22

23
24
25
26
27

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81

82
83
84

85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119

120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138



139
140
141
142
143
144
145
146
147
148
149
150
151



152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168

169
170
171
172
173
174
175
176
177
178
179
180

181
182
183




184
185
186
187
188
189
190







191
192
193
194
195
196
197
198

199
200
201




202
203
204
205
206
207
208






209
210
211
212

213
214
215
216
217
218
219
220
221
222
223
224
225


226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241


242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308

309
310
311
312







313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330

331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355




356
357
358
359
360
361
362
363




364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394




395
396
397
398
399
400
401
402
403
404
405
406
407
408
409

410
411
412





413
414
415
416
417
418
419
420
421
422
423

424
425
426
427
428
429
430
431

432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478

479
480
481
482
483
484
485
486







487
488
489
490
491
492
493
494
495
496
497
498
499
500
501

502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528

529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571

572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596

597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670

671
672
673

674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697


698
699
700
701

702
703
704
705

706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839

840
841
842

843
844
845
846
847
848
849
850

851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868





869
870
871
872
873
874
875
876
877
878
879
880
881
882
883






884
885
886
887
888
889
890
891






892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916

917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939




940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958







959
960
961
962
963
964
965
966
967
968







969
970
971
972
973
974
975
976
977
978
979
980
981







982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998



999
1000


1001
1002
1003
1004
1005
1006
1007
1008





1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021



1022
1023
1024
1025
1026
1027
1028
1029





1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040





1041
1042
1043
1044
1045
1046
1047
1048
1049
1050




1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066





1067
1068
1069
1070
1071
1072
1073
1074







1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094


1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115


1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138




1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153




1154
1155
1156
1157
1158
1159




1160
1161
1162
1163




1164
1165
1166
1167
1168
1169
1170
1171

1172
1173
1174
1175
1176
1177
1178











1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201






1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213









1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228









1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245

1246
1247
1248
1249
1250
1251
1252











1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274

1275
1276
1277
1278

1279
1280
1281
1282
1283
1284
1285
1286
1287








1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299










1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321

1322
1323
1324
1325
1326
1327
1328
1329
1330
1331

1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351

1352
1353
1354

1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365





1366
1367
1368
1369
1370
1371
1372




1373
1374
1375
1376

1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401

1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422




1423
1424
1425
1426
1427
1428
1429
1430
1431




1432
1433
1434
1435
1436
1437
1438
1439




1440
1441
1442
1443
1444
1445
1446
1447




1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462




1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473






1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486








1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499








1500
1501
1502
1503
1504
1505
1506
1507
1508




1509
1510
1511
1512
1513
1514








1515
1516
1517
1518
1519
1520


1521
1522
1523
1524




1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535




1536
1537
1538
1539
1540
1541
1542
1543
1544




1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555




1556
1557
1558
1559
1560
1561
1562
1563
1564





1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578




1579
1580
1581
1582
1583
1584
1585





1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598





1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614




1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629




1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704


    cemetery:
        sanakirja::Db<(self::key::Key<PatchId>, self::edge::UnsafeEdge), self::patch_id::PatchId>,
            Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                + std::cmp::max(size, MIN_REPO_SIZE)
        Ok(Repository {
            env: sanakirja::Env::new(path, size)?,
        })
            Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                + std::cmp::max(size, MIN_REPO_SIZE)
        Ok(Repository {
            env: sanakirja::Env::new_nolock(path, size)?,
        })
        let (branch, patches, revpatches, counter) = if let Some(x) =
            self.txn
                .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
        self.txn
            .set_root(Root::RevInodes as usize, self.dbs.revinodes);
        self.txn
            .set_root(Root::Contents as usize, self.dbs.contents);
        self.txn
            .set_root(Root::Internal as usize, self.dbs.internal);
        self.txn
            .set_root(Root::External as usize, self.dbs.external);
        self.txn
            .set_root(Root::Branches as usize, self.dbs.branches);
        self.txn
            .set_root(Root::Cemetery as usize, self.dbs.cemetery);
        self.txn
            .set_root(Root::TouchedFiles as usize, self.dbs.touched_files);
pub struct BranchIterator<'a, T: Transaction + 'a>(
    Cursor<'a, T, UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>,
);
    Cursor<'a, T, Key<PatchId>, UnsafeValue>,
pub struct CemeteryIterator<'a, T: Transaction + 'a>(
    &'a T,
    Cursor<'a, T, (Key<PatchId>, UnsafeEdge), PatchId>,
);
pub struct TouchedIterator<'a, T: Transaction + 'a>(&'a T, Cursor<'a, T, Key<PatchId>, PatchId>);
        if let Some((k, v)) = self.1.next() {
            for (br, (db, patches, revpatches, counter)) in self.txn.iter(&self.dbs.branches, None)
                    br, counter
pub struct ParentsIterator<'a, U: Transaction + 'a> {
    flag: EdgeFlags,
impl<'a, U: Transaction + 'a> Iterator for ParentsIterator<'a, U> {
            self.txn
                .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
        edge: Option<&Edge>,
    ) -> Option<&'a Edge> {
        key: Option<(Key<PatchId>, Option<&Edge>)>,
        NodesIterator(
            self.txn
                .iter(&branch.db, key.map(|(k, v)| (k, v.map(|v| v.to_unsafe())))),
        )
    ) -> ParentsIterator<'a, U> {
            flag: flag | EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE,
        BranchIterator(
            self.txn
                .iter(&self.dbs.branches, key.map(|k| (k.to_unsafe(), None))),
        )
        RevAppliedIterator(
            self.txn
                .rev_iter(&branch.revpatches, key.map(|k| (k, None))),
        )
        TreeIterator(
            self.txn
                .iter(&self.dbs.tree, key.map(|(k, v)| (k.to_unsafe(), v))),
        )
        InternalIterator(
            self.txn
                .iter(&self.dbs.internal, key.map(|(k, v)| (k.to_unsafe(), v))),
        )
        CemeteryIterator(
            &self.txn,
            self.txn
                .iter(&self.dbs.cemetery, Some(((key, edge.to_unsafe()), None))),
        )
        TouchedIterator(
            &self.txn,
            self.txn.iter(&self.dbs.touched_files, Some((key, None))),
        )
                ("color=").to_string() + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                    } + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                    ", style=dotted"
                } else {
                    ""
                },
                ).unwrap();
                ).unwrap();
                ("color=").to_string() + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                    } + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                    ", style=dotted"
                } else {
                    ""
                },
            ).unwrap();
                ).unwrap();
                if child.flag.contains(EdgeFlags::FOLDER_EDGE)
                    && !child.flag.contains(EdgeFlags::PARENT_EDGE)
    pub fn put_nodes(
        edge: &Edge,
        Ok(self.txn
            .put(&mut self.rng, &mut branch.db, key, edge.to_unsafe())?)
    pub fn put_nodes_with_rev(
    pub fn del_nodes(
        edge: Option<&Edge>,
        Ok(self.txn.del(
            &mut self.rng,
            &mut branch.db,
            key,
            edge.map(|e| e.to_unsafe()),
        )?)
    pub fn del_nodes_with_rev(
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.tree, key.to_unsafe(), edge)?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.tree, key.to_unsafe(), edge)?)
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.revtree, key, value.to_unsafe())?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.inodes, key, value)?)
        self.txn
            .del(&mut self.rng, &mut self.dbs.inodes, key, None)?;
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.inodes, key, value)?)
        self.txn
            .del(&mut self.rng, &mut self.dbs.revinodes, key, None)?;
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.revinodes, key, value)?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.revinodes, key, value)?)
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.contents, key, value)?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.contents, key, value)?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.internal, key.to_unsafe(), None)?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.external, key, None)?)
        Ok(self.txn
            .del(&mut self.rng, revbranch, timestamp, Some(value))?)
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.revdep, patch, revdep)?)
        Ok(self.txn
            .del(&mut self.rng, &mut self.dbs.revdep, patch, revdep)?)
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.cemetery,
            (key, unsafe_edge),
            patch,
        )?)
            (key, unsafe_edge),
        Ok(self.txn
            .put(&mut self.rng, &mut self.dbs.touched_files, file, patch)?)
use hex;
use rand;
use sanakirja;
use sanakirja::Representable;
pub use sanakirja::Transaction;
use std;
use std::path::Path;
use {Error, Result};

pub use self::patch_id::*;

fn from_hex(hex: &str, s: &mut [u8]) -> bool {
    let hex = hex.as_bytes();
    if hex.len() <= 2 * s.len() {
        let mut i = 0;
        while i < hex.len() {
            let h = hex[i].to_ascii_lowercase();
            if h >= b'0' && h <= b'9' {
                s[i/2] = s[i/2] << 4 | (h - b'0')
                s[i / 2] = s[i / 2] << 4 | (h - b'0')
            } else if h >= b'a' && h <= b'f' {
                s[i/2] = s[i/2] << 4 | (h - b'a' + 10)
                s[i / 2] = s[i / 2] << 4 | (h - b'a' + 10)
            } else {
                return false
                return false;
            }
            i += 1
        }
        if i & 1 == 1 {
            s[i/2] = s[i/2] << 4
            s[i / 2] = s[i / 2] << 4
        }
        true
    } else {
        false
    }
}
mod edge;
mod file_header;
mod file_id;
mod hash;
mod inode;
mod key;
mod patch_id;
mod small_string;

pub use self::edge::*;
pub use self::file_header::*;
pub use self::file_id::*;
pub use self::hash::*;
pub use self::inode::*;
pub use self::key::*;
pub use self::small_string::*;

pub type NodesDb = sanakirja::Db<self::key::Key<PatchId>, self::edge::Edge>;

/// The type of patch application numbers.
pub type ApplyTimestamp = u64;

/// The u64 is the epoch time in seconds when this patch was applied
/// to the repository.
type PatchSet = sanakirja::Db<self::patch_id::PatchId, ApplyTimestamp>;

type RevPatchSet = sanakirja::Db<ApplyTimestamp, self::patch_id::PatchId>;

pub struct Dbs {
    /// A map of the files in the working copy.
    tree: sanakirja::Db<self::file_id::UnsafeFileId, self::inode::Inode>,
    /// The reverse of tree.
    revtree: sanakirja::Db<self::inode::Inode, self::file_id::UnsafeFileId>,
    /// A map from inodes (in tree) to keys in branches.
    inodes: sanakirja::Db<self::inode::Inode, self::file_header::FileHeader>,
    /// The reverse of inodes, minus the header.
    revinodes: sanakirja::Db<self::key::Key<PatchId>, self::inode::Inode>,
    /// Text contents of keys.
    contents: sanakirja::Db<self::key::Key<PatchId>, sanakirja::value::UnsafeValue>,
    /// A map from external patch hashes to internal ids.
    internal: sanakirja::Db<self::hash::UnsafeHash, self::patch_id::PatchId>,
    /// The reverse of internal.
    external: sanakirja::Db<self::patch_id::PatchId, self::hash::UnsafeHash>,
    /// A reverse map of patch dependencies, i.e. (k,v) is in this map
    /// means that v depends on k.
    revdep: sanakirja::Db<self::patch_id::PatchId, self::patch_id::PatchId>,
    /// A map from branch names to graphs.
    branches: sanakirja::Db<self::small_string::UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>,
    branches:
        sanakirja::Db<self::small_string::UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>,
    /// A map of edges to patches that remove them.
    cemetery: sanakirja::Db<(self::key::Key<PatchId>, self::edge::UnsafeEdge), self::patch_id::PatchId>,
    cemetery: sanakirja::Db<(self::key::Key<PatchId>, self::edge::Edge), self::patch_id::PatchId>,
    /// Dependencies
    dep: sanakirja::Db<self::patch_id::PatchId, self::patch_id::PatchId>,
    /// Files touched by patches.
    touched_files: sanakirja::Db<self::key::Key<PatchId>, self::patch_id::PatchId>,
    /// Partial checkouts: branch -> partial
    partials: sanakirja::Db<self::small_string::UnsafeSmallStr, self::key::Key<PatchId>>,
}

/// Common type for both mutable transactions (`MutTxn`) and immutable
/// transaction (`Txn`). All of `Txn`'s methods are also `MutTxn`'s
/// methods.
pub struct GenericTxn<T, R> {
    #[doc(hidden)]
    pub txn: T,
    #[doc(hidden)]
    pub rng: R,
    #[doc(hidden)]
    pub dbs: Dbs,
}

/// A mutable transaction on a repository.
pub type MutTxn<'env, R> = GenericTxn<sanakirja::MutTxn<'env, ()>, R>;
/// An immutable transaction on a repository.
pub type Txn<'env> = GenericTxn<sanakirja::Txn<'env>, ()>;

/// The default name of a branch, for users who start working before
/// choosing branch names (or like the default name, "master").
pub const DEFAULT_BRANCH: &'static str = "master";

/// A repository. All operations on repositories must be done via transactions.
pub struct Repository {
    pub env: sanakirja::Env,
}

#[derive(Debug,PartialEq, Clone, Copy)]
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Root {
    Tree,
    RevTree,
    Inodes,
    RevInodes,
    Contents,
    Internal,
    External,
    RevDep,
    Branches,
    Cemetery,
    TouchedFiles,
    Dep,
    RevTouchedFiles,
    Partials,
}

trait OpenDb: Transaction {
    fn open_db<K: Representable, V: Representable>(&mut self,
                                                   num: Root)
                                                   -> Result<sanakirja::Db<K, V>> {
    fn open_db<K: Representable, V: Representable>(
        &mut self,
        num: Root,
    ) -> Result<sanakirja::Db<K, V>> {
        if let Some(db) = self.root(num as usize) {
            Ok(db)
        } else {
            Err(Error::NoDb(num))
        }
    }
}

impl<'a, T> OpenDb for sanakirja::MutTxn<'a, T> {
    fn open_db<K: Representable, V: Representable>(&mut self,
                                                   num: Root)
                                                   -> Result<sanakirja::Db<K, V>> {
    fn open_db<K: Representable, V: Representable>(
        &mut self,
        num: Root,
    ) -> Result<sanakirja::Db<K, V>> {
        if let Some(db) = self.root(num as usize) {
            Ok(db)
        } else {
            Ok(self.create_db()?)
        }
    }
}
impl<'a> OpenDb for sanakirja::Txn<'a> {}

// Repositories need at least 2^5 = 32 pages, each of size 2^12.
const MIN_REPO_SIZE: u64 = 1 << 17;

impl Repository {

    #[doc(hidden)]
    pub fn size(&self) -> u64 {
        self.env.size()
    }

    #[doc(hidden)]
    pub fn repository_size<P: AsRef<Path>>(path: P) -> Result<u64> {
        let size = sanakirja::Env::file_size(path.as_ref())?;
        debug!("repository_size = {:?}", size);
        Ok(size)
    }

    /// Open a repository, possibly increasing the size of the underlying file if `size_increase` is `Some(…)`.
    /// Open a repository, possibly increasing the size of the
    /// underlying file if `size_increase` is `Some(…)`.
    pub fn open<P: AsRef<Path>>(path: P, size_increase: Option<u64>) -> Result<Self> {
        let size =
            if let Some(size) = size_increase {
                Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                    + std::cmp::max(size, MIN_REPO_SIZE)
        let size = if let Some(size) = size_increase {
            Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                + std::cmp::max(size, MIN_REPO_SIZE)
        } else {
            if let Ok(len) = Repository::repository_size(path.as_ref()) {
                std::cmp::max(len, MIN_REPO_SIZE)
            } else {
                if let Ok(len) = Repository::repository_size(path.as_ref()) {
                    std::cmp::max(len, MIN_REPO_SIZE)
                } else {
                    MIN_REPO_SIZE
                }
            };
        Ok(Repository { env: try!(sanakirja::Env::new(path, size)) })
                MIN_REPO_SIZE
            }
        };
        Ok(Repository {
            env: sanakirja::Env::new(path, size)?,
        })
    }

    /// Open a repository, possibly increasing the size of the underlying file if `size_increase` is `Some(…)`.
    /// Open a repository, possibly increasing the size of the
    /// underlying file if `size_increase` is `Some(…)`.
    pub unsafe fn open_nolock<P: AsRef<Path>>(path: P, size_increase: Option<u64>) -> Result<Self> {
        let size =
            if let Some(size) = size_increase {
                Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                    + std::cmp::max(size, MIN_REPO_SIZE)
        let size = if let Some(size) = size_increase {
            Repository::repository_size(path.as_ref()).unwrap_or(MIN_REPO_SIZE)
                + std::cmp::max(size, MIN_REPO_SIZE)
        } else {
            if let Ok(len) = Repository::repository_size(path.as_ref()) {
                std::cmp::max(len, MIN_REPO_SIZE)
            } else {
                if let Ok(len) = Repository::repository_size(path.as_ref()) {
                    std::cmp::max(len, MIN_REPO_SIZE)
                } else {
                    MIN_REPO_SIZE
                }
            };
                MIN_REPO_SIZE
            }
        };
        debug!("sanakirja::Env::new_nolock");
        Ok(Repository { env: sanakirja::Env::new_nolock(path, size)? })
        Ok(Repository {
            env: sanakirja::Env::new_nolock(path, size)?,
        })
    }

    /// Close a repository. It is undefined behaviour to use it afterwards.
    pub unsafe fn close(&mut self) {
        self.env.close()
    }

    /// Start an immutable transaction. Immutable transactions can run
    /// concurrently.
    pub fn txn_begin(&self) -> Result<Txn> {
        let mut txn = try!(self.env.txn_begin());
        let dbs = try!(Dbs::new(&mut txn));
        let mut txn = self.env.txn_begin()?;
        let dbs = Dbs::new(&mut txn)?;
        let repo = GenericTxn {
            txn: txn,
            rng: (),
            dbs: dbs,
        };
        Ok(repo)
    }

    /// Start a mutable transaction. Mutable transactions exclude each
    /// other, but can in principle be run concurrently with immutable
    /// transactions. In that case, the immutable transaction only
    /// have access to the state of the repository immediately before
    /// the mutable transaction started.
    pub fn mut_txn_begin<R: rand::Rng>(&self, r: R) -> Result<MutTxn<R>> {
        let mut txn = try!(self.env.mut_txn_begin());
        let dbs = try!(Dbs::new(&mut txn));
        let mut txn = self.env.mut_txn_begin()?;
        let dbs = Dbs::new(&mut txn)?;
        let repo = GenericTxn {
            txn: txn,
            rng: r,
            dbs: dbs,
        };
        Ok(repo)
    }
}

impl Dbs {
    fn new<T: OpenDb>(txn: &mut T) -> Result<Self> {
        let external = txn.open_db(Root::External)?;
        let branches = txn.open_db(Root::Branches)?;
        let tree = txn.open_db(Root::Tree)?;
        let revtree = txn.open_db(Root::RevTree)?;
        let inodes = txn.open_db(Root::Inodes)?;
        let revinodes = txn.open_db(Root::RevInodes)?;
        let internal = txn.open_db(Root::Internal)?;
        let contents = txn.open_db(Root::Contents)?;
        let revdep = txn.open_db(Root::RevDep)?;
        let cemetery = txn.open_db(Root::Cemetery)?;
        let dep = txn.open_db(Root::Dep)?;
        let touched_files = txn.open_db(Root::TouchedFiles)?;
        let partials = txn.open_db(Root::Partials)?;

        Ok(Dbs {
            external,
            branches,
            inodes,
            tree,
            revtree,
            revinodes,
            internal,
            revdep,
            contents,
            cemetery,
            dep,
            touched_files,
            partials,
        })
    }
}

/// The representation of a branch. The "application number" of a
/// patch on a branch is the state of the application counter at the
/// time the patch has been applied to that branch.
#[derive(Debug)]
pub struct Branch {
    /// The table containing the branch graph.
    pub db: NodesDb,
    /// The map of all patches applied to that branch, ordered by patch hash.
    pub patches: PatchSet,
    /// The map of all patches applied to that branch, ordered by application number.
    pub revpatches: RevPatchSet,
    /// The number of patches that have been applied on that branch,
    /// including patches that are no longer on the branch (i.e. that
    /// have been unrecorded).
    pub apply_counter: u64,
    /// Branch name.
    pub name: small_string::SmallString,
}

use sanakirja::Commit;
/// Branches and commits.
impl<'env, R: rand::Rng> MutTxn<'env, R> {

    /// Open a branch by name, creating an empty branch with that name
    /// if the name doesn't exist.
    pub fn open_branch<'name>(&mut self, name: &str) -> Result<Branch> {
        let name = small_string::SmallString::from_str(name);
        let (branch, patches, revpatches, counter) = if let Some(x) = self.txn
            .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None) {
                x
            } else {
                (self.txn.create_db()?, self.txn.create_db()?, self.txn.create_db()?, 0)
            };
            };
        let (branch, patches, revpatches, counter) = if let Some(x) =
            self.txn
                .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
        {
            x
        } else {
            (
                self.txn.create_db()?,
                self.txn.create_db()?,
                self.txn.create_db()?,
                0,
            )
        };
        Ok(Branch {
            db: branch,
            patches: patches,
            revpatches: revpatches,
            name: name,
            apply_counter: counter
            apply_counter: counter,
        })
    }

    /// Commit a branch. This is a extremely important thing to do on
    /// branches, and it is not done automatically when committing
    /// transactions.
    ///
    /// **I repeat: not calling this method before committing a
    /// transaction might cause database corruption.**
    pub fn commit_branch(&mut self, branch: Branch) -> Result<()> {
        debug!("Commit_branch. This is not too safe.");
        // Since we are replacing the value, we don't want to
        // decrement its reference counter (which del would do), hence
        // the transmute.
        //
        // This would normally be wrong. The only reason it works is
        // because we know that dbs_branches has never been forked
        // from another database, hence all the reference counts to
        // its elements are 1 (and therefore represented as "not
        // referenced" in Sanakirja.
        let mut dbs_branches: sanakirja::Db<UnsafeSmallStr, (u64, u64, u64, u64)> =
            unsafe { std::mem::transmute(self.dbs.branches) };

        debug!("Commit_branch, dbs_branches = {:?}", dbs_branches);
        self.txn.del(&mut self.rng,
                     &mut dbs_branches,
                     branch.name.as_small_str().to_unsafe(),
                     None)?;
        self.txn.del(
            &mut self.rng,
            &mut dbs_branches,
            branch.name.as_small_str().to_unsafe(),
            None,
        )?;
        debug!("Commit_branch, dbs_branches = {:?}", dbs_branches);
        self.dbs.branches = unsafe { std::mem::transmute(dbs_branches) };
        self.txn.put(&mut self.rng,
                     &mut self.dbs.branches,
                     branch.name.as_small_str().to_unsafe(),
                     (branch.db, branch.patches, branch.revpatches, branch.apply_counter))?;
        self.txn.put(
            &mut self.rng,
            &mut self.dbs.branches,
            branch.name.as_small_str().to_unsafe(),
            (
                branch.db,
                branch.patches,
                branch.revpatches,
                branch.apply_counter,
            ),
        )?;
        debug!("Commit_branch, self.dbs.branches = {:?}", self.dbs.branches);
        Ok(())
    }

    /// Rename a branch. The branch still needs to be committed after
    /// this operation.
    pub fn rename_branch(&mut self, branch: &mut Branch, new_name: &str) -> Result<()> {
        debug!("Commit_branch. This is not too safe.");
        // Since we are replacing the value, we don't want to
        // decrement its reference counter (which del would do), hence
        // the transmute.
        //
        // Read the note in `commit_branch` to understand why this
        // works.
        let name_exists = self.get_branch(new_name).is_some();
        if name_exists {
            Err(Error::BranchNameAlreadyExists(new_name.to_string()))
        } else {
            let mut dbs_branches: sanakirja::Db<UnsafeSmallStr, (u64, u64, u64, u64)> =
                unsafe { std::mem::transmute(self.dbs.branches) };
            self.txn.del(&mut self.rng,
                         &mut dbs_branches,
                         branch.name.as_small_str().to_unsafe(),
                         None)?;
            self.txn.del(
                &mut self.rng,
                &mut dbs_branches,
                branch.name.as_small_str().to_unsafe(),
                None,
            )?;
            self.dbs.branches = unsafe { std::mem::transmute(dbs_branches) };
            branch.name.clone_from_str(new_name);
            Ok(())
        }
    }

    /// Commit a transaction. **Be careful to commit all open branches
    /// before**.
    pub fn commit(mut self) -> Result<()> {

        self.txn.set_root(Root::Tree as usize, self.dbs.tree);
        self.txn.set_root(Root::RevTree as usize, self.dbs.revtree);
        self.txn.set_root(Root::Inodes as usize, self.dbs.inodes);
        self.txn.set_root(Root::RevInodes as usize, self.dbs.revinodes);
        self.txn.set_root(Root::Contents as usize, self.dbs.contents);
        self.txn.set_root(Root::Internal as usize, self.dbs.internal);
        self.txn.set_root(Root::External as usize, self.dbs.external);
        self.txn.set_root(Root::Branches as usize, self.dbs.branches);
        self.txn
            .set_root(Root::RevInodes as usize, self.dbs.revinodes);
        self.txn
            .set_root(Root::Contents as usize, self.dbs.contents);
        self.txn
            .set_root(Root::Internal as usize, self.dbs.internal);
        self.txn
            .set_root(Root::External as usize, self.dbs.external);
        self.txn
            .set_root(Root::Branches as usize, self.dbs.branches);
        self.txn.set_root(Root::RevDep as usize, self.dbs.revdep);
        self.txn.set_root(Root::Cemetery as usize, self.dbs.cemetery);
        self.txn
            .set_root(Root::Cemetery as usize, self.dbs.cemetery);
        self.txn.set_root(Root::Dep as usize, self.dbs.dep);
        self.txn
            .set_root(Root::TouchedFiles as usize, self.dbs.touched_files);
        self.txn
            .set_root(Root::Partials as usize, self.dbs.partials);

        try!(self.txn.commit());
        self.txn.commit()?;
        Ok(())
    }
}

use sanakirja::value::*;
use sanakirja::{Cursor, RevCursor};
pub struct TreeIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeFileId, Inode>);

impl<'a, T: Transaction + 'a> Iterator for TreeIterator<'a, T> {
    type Item = (FileId<'a>, Inode);
    fn next(&mut self) -> Option<Self::Item> {
        debug!("tree iter");
        if let Some((k, v)) = self.0.next() {
            debug!("tree iter: {:?} {:?}", k, v);
            unsafe { Some((FileId::from_unsafe(k), v)) }
        } else {
            None
        }
    }
}

pub struct RevtreeIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Inode, UnsafeFileId>);

impl<'a, T: Transaction + 'a> Iterator for RevtreeIterator<'a, T> {
    type Item = (Inode, FileId<'a>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((k, FileId::from_unsafe(v))) }
        } else {
            None
        }
    }
}

pub struct NodesIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Key<PatchId>, Edge>);

impl<'a, T: Transaction> Iterator for NodesIterator<'a, T> {
    type Item = (Key<PatchId>, Edge);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            Some((k, v))
        } else {
            None
        }
    }
}
pub struct BranchIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>);
pub struct BranchIterator<'a, T: Transaction + 'a>(
    Cursor<'a, T, UnsafeSmallStr, (NodesDb, PatchSet, RevPatchSet, u64)>,
);

impl<'a, T: Transaction + 'a> Iterator for BranchIterator<'a, T> {
    type Item = Branch;
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some(Branch {
                name: SmallStr::from_unsafe(k).to_owned(),
                db: v.0,
                patches: v.1,
                revpatches: v.2,
                apply_counter: v.3
            }) }
            unsafe {
                Some(Branch {
                    name: SmallStr::from_unsafe(k).to_owned(),
                    db: v.0,
                    patches: v.1,
                    revpatches: v.2,
                    apply_counter: v.3,
                })
            }
        } else {
            None
        }
    }
}


pub struct PatchesIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, ApplyTimestamp>);

impl<'a, T: Transaction + 'a> Iterator for PatchesIterator<'a, T> {
    type Item = (PatchId, ApplyTimestamp);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct RevAppliedIterator<'a, T: Transaction + 'a>(RevCursor<'a, T, ApplyTimestamp, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for RevAppliedIterator<'a, T> {
    type Item = (ApplyTimestamp, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct AppliedIterator<'a, T: Transaction + 'a>(Cursor<'a, T, ApplyTimestamp, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for AppliedIterator<'a, T> {
    type Item = (ApplyTimestamp, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}


pub struct InodesIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Inode, FileHeader>);

impl<'a, T: Transaction + 'a> Iterator for InodesIterator<'a, T> {
    type Item = (Inode, FileHeader);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct InternalIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeHash, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for InternalIterator<'a, T> {
    type Item = (HashRef<'a>, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((HashRef::from_unsafe(k), v)) }
        } else {
            None
        }
    }
}
pub struct ExternalIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, UnsafeHash>);

impl<'a, T: Transaction + 'a> Iterator for ExternalIterator<'a, T> {
    type Item = (PatchId, HashRef<'a>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((k, HashRef::from_unsafe(v))) }
        } else {
            None
        }
    }
}

pub struct RevdepIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for RevdepIterator<'a, T> {
    type Item = (PatchId, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct ContentsIterator<'a, T: Transaction + 'a>(&'a T, Cursor<'a, T, Key<PatchId>, UnsafeValue>);
pub struct DepIterator<'a, T: Transaction + 'a>(Cursor<'a, T, PatchId, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for DepIterator<'a, T> {
    type Item = (PatchId, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        self.0.next()
    }
}

pub struct ContentsIterator<'a, T: Transaction + 'a>(
    &'a T,
    Cursor<'a, T, Key<PatchId>, UnsafeValue>,
);

impl<'a, T: Transaction + 'a> Iterator for ContentsIterator<'a, T> {
    type Item = (Key<PatchId>, Value<'a, T>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.1.next() {
            unsafe { Some((k, Value::from_unsafe(&v, self.0))) }
        } else {
            None
        }
    }
}

pub struct CemeteryIterator<'a, T: Transaction + 'a>(&'a T, Cursor<'a, T, (Key<PatchId>, UnsafeEdge), PatchId>);
pub struct CemeteryIterator<'a, T: Transaction + 'a>(Cursor<'a, T, (Key<PatchId>, Edge), PatchId>);

impl<'a, T: Transaction + 'a> Iterator for CemeteryIterator<'a, T> {
    type Item = ((Key<PatchId>, Edge), PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some(((k, v), w)) = self.0.next() {
            Some(((k, v), w))
        } else {
            None
        }
    }
}

pub struct TouchedIterator<'a, T: Transaction + 'a>(Cursor<'a, T, Key<PatchId>, PatchId>);

impl<'a, T: Transaction + 'a> Iterator for TouchedIterator<'a, T> {
    type Item = (Key<PatchId>, PatchId);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            Some((k, v))
        } else {
            None
        }
    }
}

pub struct PartialsIterator<'a, T: Transaction + 'a>(Cursor<'a, T, UnsafeSmallStr, Key<PatchId>>);

impl<'a, T: Transaction + 'a> Iterator for PartialsIterator<'a, T> {
    type Item = (SmallStr<'a>, Key<PatchId>);
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((k, v)) = self.0.next() {
            unsafe { Some((SmallStr::from_unsafe(k), v)) }
        } else {
            None
        }
    }
}

mod dump {
    use super::*;
    use sanakirja;

    impl<U: Transaction, R> GenericTxn<U, R> {
        pub fn dump(&self) {
            debug!("============= dumping Tree");
            for (k, v) in self.iter_tree(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping Inodes");
            for (k, v) in self.iter_inodes(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping RevDep");
            for (k, v) in self.iter_revdep(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping Internal");
            for (k, v) in self.iter_internal(None) {
                debug!("> {:?} {:?}", k, v)
            }
            debug!("============= dumping External");
            for (k, v) in self.iter_external(None) {
                debug!("> {:?} {:?} {:?}", k, v, v.to_base58());
            }
            debug!("============= dumping Contents");
            {
                sanakirja::debug(&self.txn, &[&self.dbs.contents], "dump_contents", true);
            }
            debug!("============= dumping Partials");
            for (k, v) in self.iter_partials("") {
                debug!("> {:?} {:?}", k, v);
            }
            debug!("============= dumping Branches");
            for (br, (db, patches, revpatches, counter)) in self.txn.iter(&self.dbs.branches, None) {
            for (br, (db, patches, revpatches, counter)) in self.txn.iter(&self.dbs.branches, None)
            {
                debug!("patches: {:?} {:?}", patches, revpatches);
                debug!("============= dumping Patches in branch {:?}, counter = {:?}", br, counter);
                debug!(
                    "============= dumping Patches in branch {:?}, counter = {:?}",
                    br, counter
                );
                for (k, v) in self.txn.iter(&patches, None) {
                    debug!("> {:?} {:?}", k, v)
                }
                debug!("============= dumping RevPatches in branch {:?}", br);
                for (k, v) in self.txn.iter(&revpatches, None) {
                    debug!("> {:?} {:?}", k, v)
                }
                debug!("============= dumping Nodes in branch {:?}", br);
                unsafe {
                    // sanakirja::debug(&self.txn, &[&db], path);
                    debug!("> {:?}", SmallStr::from_unsafe(br));
                    for (k, v) in self.txn.iter(&db, None) {
                        debug!(">> {:?} {:?}", k, v)
                    }
                }
            }
        }
    }
}


pub struct ParentsIterator<'a, U: Transaction+'a> {
/// An iterator for nodes adjacent to `key` through an edge with flags smaller than `max_flag`.
pub struct AdjIterator<'a, U: Transaction + 'a> {
    it: NodesIterator<'a, U>,
    key: Key<PatchId>,
    flag: EdgeFlags
    /// iter as long as the flag is smaller than this
    max_flag: EdgeFlags,
}

impl<'a, U: Transaction+'a> Iterator for ParentsIterator<'a, U> {
impl<'a, U: Transaction + 'a> Iterator for AdjIterator<'a, U> {
    type Item = Edge;
    fn next(&mut self) -> Option<Self::Item> {
        if let Some((v, e)) = self.it.next() {
            if v == self.key && e.flag <= self.max_flag {
                Some(e)
            } else {
                None
            }
        } else {
            None
        }
    }
}

/*
macro_rules! iterate_parents {
    ($txn:expr, $branch:expr, $key:expr, $flag: expr) => { {
        let edge = Edge::zero($flag|PARENT_EDGE);
        $txn.iter_nodes(& $branch, Some(($key, Some(&edge))))
            .take_while(|&(k, parent)| {
                *k == *$key && parent.flag <= $flag|PARENT_EDGE|PSEUDO_EDGE
            })
            .map(|(_,b)| b)
    } }
}
*/
use std::collections::{BTreeMap, HashSet};
#[derive(Debug)]
pub struct PageCounts {
    pub tree: usize,
    pub revtree: usize,
    pub inodes: usize,
    pub revinodes: usize,
    pub contents: usize,
    pub internal: usize,
    pub external: usize,
    pub revdep: usize,
    pub branch_table: usize,
    pub cemetery: usize,
    pub dep: usize,
    pub touched_files: usize,
    pub partials: usize,
    pub branches: BTreeMap<String, usize>,
}

impl PageCounts {
    pub fn sum(&self) -> usize {
        self.tree
            + self.revtree
            + self.inodes
            + self.revinodes
            + self.contents
            + self.internal
            + self.external
            + self.revdep
            + self.branch_table
            + self.cemetery
            + self.dep
            + self.touched_files
            + self.partials
            + self.branches.iter().map(|x| x.1).sum::<usize>()
    }
}

impl<'a> Txn<'a> {
    pub fn page_counts(&self) -> PageCounts {
        let mut references = HashSet::new();
        self.txn.references(&mut references, self.dbs.tree);
        let tree = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.revtree);
        let revtree = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.inodes);
        let inodes = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.revinodes);
        let revinodes = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.contents);
        let contents = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.internal);
        let internal = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.external);
        let external = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.revdep);
        let revdep = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.branches);
        let branch_table = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.cemetery);
        let cemetery = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.dep);
        let dep = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.touched_files);
        let touched_files = references.len();
        references.clear();
        self.txn.references(&mut references, self.dbs.partials);
        let partials = references.len();

        let mut branches = BTreeMap::new();
        for br in self.iter_branches(None) {
            references.clear();
            self.txn.references(&mut references, br.db);
            branches.insert(br.name.as_str().to_string(), references.len());
        }

        PageCounts {
            tree,
            revtree,
            inodes,
            revinodes,
            contents,
            internal,
            external,
            revdep,
            branch_table,
            cemetery,
            dep,
            touched_files,
            partials,
            branches,
        }
    }
}

impl<U: Transaction, R> GenericTxn<U, R> {

    /// Does this repository has a branch called `name`?
    pub fn has_branch(&self, name: &str) -> bool {
        let name = small_string::SmallString::from_str(name);
        self.txn.get(&self.dbs.branches, name.as_small_str().to_unsafe(), None).is_some()
        self.txn
            .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
            .is_some()
    }

    /// Get the branch with the given name, if it exists.
    pub fn get_branch<'name>(&self, name: &str) -> Option<Branch> {
        let name = small_string::SmallString::from_str(name);
        if let Some((branch, patches, revpatches, counter)) = self.txn.get(&self.dbs.branches, name.as_small_str().to_unsafe(), None) {
        if let Some((branch, patches, revpatches, counter)) =
            self.txn
                .get(&self.dbs.branches, name.as_small_str().to_unsafe(), None)
        {
            Some(Branch {
                db: branch,
                patches: patches,
                revpatches: revpatches,
                apply_counter: counter,
                name: name,
            })
        } else {
            None
        }
    }

    /// Return the first edge of this `key` if `edge` is `None`, and
    /// a pointer to the edge in the database if `edge` is `Some`.
    pub fn get_nodes<'a>(&'a self,
                         branch: &Branch,
                         key: Key<PatchId>,
                         edge: Option<&Edge>)
                         -> Option<&'a Edge> {
    pub fn get_nodes<'a>(
        &'a self,
        branch: &Branch,
        key: Key<PatchId>,
        edge: Option<Edge>,
    ) -> Option<Edge> {
        self.txn.get(&branch.db, key, edge)
    }

    /// An iterator over keys and edges, in branch `branch`, starting
    /// from key and edge specified by `key`. If `key` is `None`, the
    /// iterations start from the first key and first edge. If `key`
    /// is of the form `Some(a, None)`, they start from the first edge
    /// of key `a`. If `key` is of the from `Some(a, Some(b))`, they
    /// start from the first key and edge that is at least `(a, b)`.
    pub fn iter_nodes<'a>(&'a self,
                          branch: &'a Branch,
                          key: Option<(Key<PatchId>, Option<&Edge>)>)
                          -> NodesIterator<'a, U> {
        NodesIterator(self.txn.iter(&branch.db,
                                    key.map(|(k, v)| (k, v.map(|v| v.to_unsafe())))))
    pub fn iter_nodes<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<(Key<PatchId>, Option<Edge>)>,
    ) -> NodesIterator<'a, U> {
        NodesIterator(self.txn.iter(&branch.db, key.map(|(k, v)| (k, v))))
    }

    pub fn iter_parents<'a>(&'a self,
                            branch: &'a Branch,
                            key: Key<PatchId>,
                            flag: EdgeFlags)
                            -> ParentsIterator<'a, U> {
        let edge = Edge::zero(flag|EdgeFlags::PARENT_EDGE);
    pub fn iter_adjacent<'a>(
        &'a self,
        branch: &'a Branch,
        key: Key<PatchId>,
        min_flag: EdgeFlags,
        max_flag: EdgeFlags,
    ) -> AdjIterator<'a, U> {
        let edge = Edge::zero(min_flag);
        AdjIterator {
            it: self.iter_nodes(branch, Some((key, Some(edge)))),
            key,
            max_flag: max_flag,
        }
    }

    pub fn iter_parents<'a>(
        &'a self,
        branch: &'a Branch,
        key: Key<PatchId>,
        flag: EdgeFlags,
    ) -> AdjIterator<'a, U> {
        let edge = Edge::zero(flag | EdgeFlags::PARENT_EDGE);
        AdjIterator {
            it: self.iter_nodes(branch, Some((key, Some(edge)))),
            key,
            flag: flag|EdgeFlags::PARENT_EDGE|EdgeFlags::PSEUDO_EDGE
            max_flag: flag | EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE,
        }
    }

    pub fn iter_folder_children<'a>(
        &'a self,
        branch: &'a Branch,
        key: Key<PatchId>,
        flag: EdgeFlags,
    ) -> AdjIterator<'a, U> {
        let edge = Edge::zero(flag | EdgeFlags::FOLDER_EDGE);
        AdjIterator {
            it: self.iter_nodes(branch, Some((key, Some(edge)))),
            key,
            max_flag: flag
                | EdgeFlags::FOLDER_EDGE
                | EdgeFlags::PSEUDO_EDGE
                | EdgeFlags::EPSILON_EDGE,
        }
    }

    /// An iterator over branches in the database, starting from the
    /// given branch name.
    pub fn iter_branches<'a>(&'a self,
                             key: Option<&SmallStr>)
                             -> BranchIterator<'a, U> {
        BranchIterator(self.txn.iter(&self.dbs.branches, key.map(|k| (k.to_unsafe(), None))))
    pub fn iter_branches<'a>(&'a self, key: Option<&SmallStr>) -> BranchIterator<'a, U> {
        BranchIterator(
            self.txn
                .iter(&self.dbs.branches, key.map(|k| (k.to_unsafe(), None))),
        )
    }

    /// An iterator over branches in the database, starting from the
    /// given branch name.
    pub fn iter_partials<'a>(&'a self, branch: &str) -> PartialsIterator<'a, U> {
        let key = SmallString::from_str(branch);
        PartialsIterator(self.txn.iter(
            &self.dbs.partials,
            Some((key.as_small_str().to_unsafe(), None)),
        ))
    }

    /// An iterator over patches in a branch, in the alphabetical
    /// order of their hash.
    pub fn iter_patches<'a>(&'a self,
                            branch: &'a Branch,
                            key: Option<PatchId>)
                            -> PatchesIterator<'a, U> {

        PatchesIterator(self.txn.iter(&branch.patches,
                                      key.map(|k| (k, None))))
    pub fn iter_patches<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<PatchId>,
    ) -> PatchesIterator<'a, U> {
        PatchesIterator(self.txn.iter(&branch.patches, key.map(|k| (k, None))))
    }

    /// An iterator over patches in a branch, in the reverse order in
    /// which they were applied.
    pub fn rev_iter_applied<'a>(&'a self,
                                branch: &'a Branch,
                                key: Option<ApplyTimestamp>)
                                -> RevAppliedIterator<'a, U> {

        RevAppliedIterator(self.txn.rev_iter(&branch.revpatches,
                                             key.map(|k| (k, None))))
    pub fn rev_iter_applied<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<ApplyTimestamp>,
    ) -> RevAppliedIterator<'a, U> {
        RevAppliedIterator(
            self.txn
                .rev_iter(&branch.revpatches, key.map(|k| (k, None))),
        )
    }

    /// An iterator over patches in a branch in the order in which
    /// they were applied.
    pub fn iter_applied<'a>(&'a self,
                            branch: &'a Branch,
                            key: Option<ApplyTimestamp>)
                            -> AppliedIterator<'a, U> {

        AppliedIterator(self.txn.iter(&branch.revpatches,
                                      key.map(|k| (k, None))))
    pub fn iter_applied<'a>(
        &'a self,
        branch: &'a Branch,
        key: Option<ApplyTimestamp>,
    ) -> AppliedIterator<'a, U> {
        AppliedIterator(self.txn.iter(&branch.revpatches, key.map(|k| (k, None))))
    }

    /// An iterator over files and directories currently tracked by
    /// Pijul, starting from the given `FileId`. The `Inode`s returned
    /// by the iterator can be used to form new `FileId`s and traverse
    /// the tree from top to bottom.
    ///
    /// The set of tracked files is changed by the following
    /// operations: outputting the repository, adding, deleting and
    /// moving files. It is not related to branches, but only to the
    /// files actually present on the file system.
    pub fn iter_tree<'a>(&'a self,
                         key: Option<(&FileId, Option<Inode>)>)
                         -> TreeIterator<'a, U> {
    pub fn iter_tree<'a>(&'a self, key: Option<(&FileId, Option<Inode>)>) -> TreeIterator<'a, U> {
        debug!("iter_tree: {:?}", key);
        TreeIterator(self.txn.iter(&self.dbs.tree,
                                   key.map(|(k, v)| (k.to_unsafe(), v))))
        TreeIterator(
            self.txn
                .iter(&self.dbs.tree, key.map(|(k, v)| (k.to_unsafe(), v))),
        )
    }

    /// An iterator over files and directories, following directories
    /// in the opposite direction.
    pub fn iter_revtree<'a>(&'a self,
                            key: Option<(Inode, Option<&FileId>)>)
                            -> RevtreeIterator<'a, U> {
        RevtreeIterator(self.txn.iter(&self.dbs.revtree,
                                      key.map(|(k, v)| (k, v.map(|v| v.to_unsafe())))))
    pub fn iter_revtree<'a>(
        &'a self,
        key: Option<(Inode, Option<&FileId>)>,
    ) -> RevtreeIterator<'a, U> {
        RevtreeIterator(self.txn.iter(
            &self.dbs.revtree,
            key.map(|(k, v)| (k, v.map(|v| v.to_unsafe()))),
        ))
    }

    /// An iterator over the "inodes" database, which contains
    /// correspondences between files on the filesystem and the files
    /// in the graph.
    pub fn iter_inodes<'a>(&'a self,
                           key: Option<(Inode, Option<FileHeader>)>)
                           -> InodesIterator<'a, U> {
    pub fn iter_inodes<'a>(
        &'a self,
        key: Option<(Inode, Option<FileHeader>)>,
    ) -> InodesIterator<'a, U> {
        InodesIterator(self.txn.iter(&self.dbs.inodes, key))
    }

    /// Iterator over the `PatchId` to `Hash` correspondence.
    pub fn iter_external<'a>(&'a self,
                             key: Option<(PatchId, Option<HashRef>)>)
                             -> ExternalIterator<'a, U> {
        ExternalIterator(self.txn.iter(&self.dbs.external,
                                       key.map(|(k, v)| (k, v.map(|v| v.to_unsafe())))))
    pub fn iter_external<'a>(
        &'a self,
        key: Option<(PatchId, Option<HashRef>)>,
    ) -> ExternalIterator<'a, U> {
        ExternalIterator(self.txn.iter(
            &self.dbs.external,
            key.map(|(k, v)| (k, v.map(|v| v.to_unsafe()))),
        ))
    }

    /// Iterator over the `Hash` to `PatchId` correspondence.
    pub fn iter_internal<'a>(&'a self,
                             key: Option<(HashRef, Option<PatchId>)>)
                             -> InternalIterator<'a, U> {
        InternalIterator(self.txn.iter(&self.dbs.internal,
                                       key.map(|(k, v)| (k.to_unsafe(), v))))
    pub fn iter_internal<'a>(
        &'a self,
        key: Option<(HashRef, Option<PatchId>)>,
    ) -> InternalIterator<'a, U> {
        InternalIterator(
            self.txn
                .iter(&self.dbs.internal, key.map(|(k, v)| (k.to_unsafe(), v))),
        )
    }

    /// Iterator over reverse dependencies (`(k, v)` is in the reverse dependency table if `v` depends on `k`, and both are in at least one branch).
    pub fn iter_revdep<'a>(&'a self,
                           key: Option<(PatchId, Option<PatchId>)>)
                           -> RevdepIterator<'a, U> {
    /// Iterator over reverse dependencies (`(k, v)` is in the reverse
    /// dependency table if `v` depends on `k`, and both are in at
    /// least one branch).
    pub fn iter_revdep<'a>(
        &'a self,
        key: Option<(PatchId, Option<PatchId>)>,
    ) -> RevdepIterator<'a, U> {
        RevdepIterator(self.txn.iter(&self.dbs.revdep, key))
    }

    /// Iterator over dependencies.
    pub fn iter_dep<'a>(&'a self, key: Option<(PatchId, Option<PatchId>)>) -> DepIterator<'a, U> {
        DepIterator(self.txn.iter(&self.dbs.dep, key))
    }

    /// An iterator over line contents (common to all branches).
    pub fn iter_contents<'a>(&'a self,
                             key: Option<Key<PatchId>>)
                             -> ContentsIterator<'a, U> {
        ContentsIterator(&self.txn,
                         self.txn.iter(&self.dbs.contents, key.map(|k| (k, None))))
    pub fn iter_contents<'a>(&'a self, key: Option<Key<PatchId>>) -> ContentsIterator<'a, U> {
        ContentsIterator(
            &self.txn,
            self.txn.iter(&self.dbs.contents, key.map(|k| (k, None))),
        )
    }

    /// An iterator over edges in the cemetery.
    pub fn iter_cemetery<'a>(&'a self,
                              key: Key<PatchId>,
                              edge: Edge)
                              -> CemeteryIterator<'a, U> {
        CemeteryIterator(&self.txn,
                          self.txn.iter(&self.dbs.cemetery,
                                        Some(((key, edge.to_unsafe()), None))))
    pub fn iter_cemetery<'a>(&'a self, key: Key<PatchId>, edge: Edge) -> CemeteryIterator<'a, U> {
        CemeteryIterator(self.txn.iter(&self.dbs.cemetery, Some(((key, edge), None))))
    }

    /// An iterator over patches that touch a certain file.
    pub fn iter_touched<'a>(&'a self, key: Key<PatchId>) -> TouchedIterator<'a, U> {
        TouchedIterator(self.txn.iter(&self.dbs.touched_files, Some((key, None))))
    }

    /// Tell whether a patch touches a file
    pub fn get_touched<'a>(&'a self, key: Key<PatchId>, patch: PatchId) -> bool {
        self.txn
            .get(&self.dbs.touched_files, key, Some(patch))
            .is_some()
    }

    /// Get the `Inode` of a give `FileId`. A `FileId` is itself
    /// composed of an inode and a name, hence this can be used to
    /// traverse the tree of tracked files from top to bottom.
    pub fn get_tree<'a>(&'a self, key: &FileId) -> Option<Inode> {
        self.txn
            .get(&self.dbs.tree, key.to_unsafe(), None)
        self.txn.get(&self.dbs.tree, key.to_unsafe(), None)
    }

    /// Get the parent `FileId` of a given `Inode`. A `FileId` is
    /// itself composed of an `Inode` and a name, so this can be used
    /// to traverse the tree of tracked files from bottom to top
    /// (starting from a leaf).
    pub fn get_revtree<'a>(&'a self, key: Inode) -> Option<FileId<'a>> {
        self.txn
            .get(&self.dbs.revtree, key, None)
            .map(|e| unsafe { FileId::from_unsafe(e) })
    }

    /// Get the key in branches for the given `Inode`, as well as
    /// meta-information on the file (permissions, and whether it has
    /// been moved or deleted compared to the branch).
    ///
    /// This table is updated every time the repository is output, and
    /// when files are moved or deleted. It is meant to be
    /// synchronised with the current branch (if any).
    pub fn get_inodes<'a>(&'a self, key: Inode) -> Option<FileHeader> {
        self.txn
            .get(&self.dbs.inodes, key, None)
        self.txn.get(&self.dbs.inodes, key, None)
    }

    /// Get the `Inode` corresponding to `key` in branches (see the
    /// documentation for `get_inodes`).
    pub fn get_revinodes(&self, key: Key<PatchId>) -> Option<Inode> {
        self.txn.get(&self.dbs.revinodes, key, None)
    }

    /// Get the contents of a line.
    pub fn get_contents<'a>(&'a self, key: Key<PatchId>) -> Option<Value<'a, U>> {
        if let Some(e) = self.txn.get(&self.dbs.contents, key, None) {
            unsafe { Some(Value::from_unsafe(&e, &self.txn)) }
        } else {
            None
        }
    }

    /// Get the `PatchId` (or internal patch identifier) of the
    /// provided patch hash.
    pub fn get_internal(&self, key: HashRef) -> Option<PatchId> {
        match key {
            HashRef::None => Some(ROOT_PATCH_ID),
            h => {
                self.txn
                    .get(&self.dbs.internal, h.to_unsafe(), None)
            }
            h => self.txn.get(&self.dbs.internal, h.to_unsafe(), None),
        }
    }

    /// Get the `HashRef` (external patch identifier) of the provided
    /// internal patch identifier.
    pub fn get_external<'a>(&'a self, key: PatchId) -> Option<HashRef<'a>> {
        self.txn
            .get(&self.dbs.external, key, None)
            .map(|e| unsafe { HashRef::from_unsafe(e) })
    }

    /// Get the patch number in the branch. Patch numbers are
    /// guaranteed to always increase when a new patch is applied, but
    /// are not necessarily consecutive.
    pub fn get_patch(&self,
                     patch_set: &PatchSet,
                     patchid: PatchId)
                     -> Option<ApplyTimestamp> {
    pub fn get_patch(&self, patch_set: &PatchSet, patchid: PatchId) -> Option<ApplyTimestamp> {
        self.txn.get(patch_set, patchid, None)
    }

    /// Get the smallest patch id that depends on `patch` (and is at
    /// least `dep` in alphabetical order if `dep`, is `Some`).
    pub fn get_revdep(&self,
                      patch: PatchId,
                      dep: Option<PatchId>)
                      -> Option<PatchId> {
    pub fn get_revdep(&self, patch: PatchId, dep: Option<PatchId>) -> Option<PatchId> {
        self.txn.get(&self.dbs.revdep, patch, dep)
    }

        self.txn
            .get(&self.dbs.revdep,
                 patch,
                 dep)
    /// Get the smallest patch id that `patch` depends on (and is at
    /// least `dep` in alphabetical order if `dep`, is `Some`).
    pub fn get_dep(&self, patch: PatchId, dep: Option<PatchId>) -> Option<PatchId> {
        self.txn.get(&self.dbs.dep, patch, dep)
    }

    /// Dump the graph of a branch into a writer, in dot format.
    pub fn debug<W>(&self, branch_name: &str, w: &mut W, exclude_parents: bool)
        where W: std::io::Write
    where
        W: std::io::Write,
    {
        debug!("debugging branch {:?}", branch_name);
        let mut styles = Vec::with_capacity(16);
        for i in 0..32 {
            let flag = EdgeFlags::from_bits(i as u8).unwrap();
            styles.push(("color=").to_string() + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3] +
                        if flag.contains(EdgeFlags::DELETED_EDGE) {
                ", style=dashed"
            } else {
                ""
            } +
                        if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                ", style=dotted"
            } else {
                ""
            })
            styles.push(
                ("color=").to_string()
                    + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                        ", style=dashed"
                    } else {
                        ""
                    }
                    + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                        ", style=dotted"
                    } else {
                        ""
                    },
            )
        }
        w.write(b"digraph{\n").unwrap();
        let branch = self.get_branch(branch_name).unwrap();

        let mut cur: Key<PatchId> = ROOT_KEY.clone();
        for (k, v) in self.iter_nodes(&branch, None) {
            if k != cur {
                let cont = if let Some(cont) = self.get_contents(k) {
                    let cont = cont.into_cow();
                    let cont = &cont[ .. std::cmp::min(50, cont.len())];
                    format!("{:?}",
                            match std::str::from_utf8(cont) {
                                Ok(x) => x.to_string(),
                                Err(_) => hex::encode(cont),
                            })
                    let cont = &cont[..std::cmp::min(50, cont.len())];
                    format!(
                        "{:?}",
                        match std::str::from_utf8(cont) {
                            Ok(x) => x.to_string(),
                            Err(_) => hex::encode(cont),
                        }
                    )
                } else {
                    "\"\"".to_string()
                };
                // remove the leading and trailing '"'.
                let cont = &cont [1..(cont.len()-1)];
                write!(w,
                       "n_{}[label=\"{}.{}: {}\"];\n",
                       k.to_hex(),
                       k.patch.to_base58(),
                       k.line.to_hex(),
                       cont.replace("\n", "")
                )
                    .unwrap();
                let cont = &cont[1..(cont.len() - 1)];
                write!(
                    w,
                    "n_{}[label=\"{}.{}: {}\"];\n",
                    k.to_hex(),
                    k.patch.to_base58(),
                    k.line.to_hex(),
                    cont.replace("\n", "")
                )
                .unwrap();
                cur = k.clone();
            }
            debug!("debug: {:?}", v);
            let flag = v.flag.bits();
            if !(exclude_parents && v.flag.contains(EdgeFlags::PARENT_EDGE)) {
                write!(w,
                       "n_{}->n_{}[{},label=\"{} {}\"];\n",
                       k.to_hex(),
                       &v.dest.to_hex(),
                       styles[(flag & 0xff) as usize],
                       flag,
                       v.introduced_by.to_base58()
                )
                    .unwrap();
                write!(
                    w,
                    "n_{}->n_{}[{},label=\"{} {}\"];\n",
                    k.to_hex(),
                    &v.dest.to_hex(),
                    styles[(flag & 0xff) as usize],
                    flag,
                    v.introduced_by.to_base58()
                )
                .unwrap();
            }
        }
        w.write(b"}\n").unwrap();
    }

    /// Dump the graph of a branch into a writer, in dot format.
    pub fn debug_folders<W>(&self, branch_name: &str, w: &mut W)
        where W: std::io::Write
    where
        W: std::io::Write,
    {
        debug!("debugging branch {:?}", branch_name);
        let mut styles = Vec::with_capacity(16);
        for i in 0..32 {
            let flag = EdgeFlags::from_bits(i as u8).unwrap();
            styles.push(("color=").to_string() + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3] +
                        if flag.contains(EdgeFlags::DELETED_EDGE) {
                ", style=dashed"
            } else {
                ""
            } +
                        if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                ", style=dotted"
            } else {
                ""
            })
            styles.push(
                ("color=").to_string()
                    + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                        ", style=dashed"
                    } else {
                        ""
                    }
                    + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                        ", style=dotted"
                    } else {
                        ""
                    },
            )
        }
        w.write(b"digraph{\n").unwrap();
        let branch = self.get_branch(branch_name).unwrap();

        let mut nodes = vec![ROOT_KEY];
        while let Some(k) = nodes.pop() {
            let cont = if let Some(cont) = self.get_contents(k) {
                let cont = cont.into_cow();
                let cont = &cont[ .. std::cmp::min(50, cont.len())];
                let cont = &cont[..std::cmp::min(50, cont.len())];
                if cont.len() > 2 {
                    let (a, b) = cont.split_at(2);
                    let cont = format!("{:?}", std::str::from_utf8(b).unwrap());
                    let cont = &cont [1..(cont.len()-1)];
                    let cont = &cont[1..(cont.len() - 1)];
                    format!("{} {}", hex::encode(a), cont)
                } else {
                    format!("{}", hex::encode(cont))
                }
            } else {
                "".to_string()
            };
            // remove the leading and trailing '"'.
            write!(w,
                   "n_{}[label=\"{}.{}: {}\"];\n",
                   k.to_hex(),
                   k.patch.to_base58(),
                   k.line.to_hex(),
                   cont.replace("\n", "")
            )
                .unwrap();
            write!(
                w,
                "n_{}[label=\"{}.{}: {}\"];\n",
                k.to_hex(),
                k.patch.to_base58(),
                k.line.to_hex(),
                cont.replace("\n", "")
            )
            .unwrap();

            for child in self.iter_adjacent(&branch, k, EdgeFlags::empty(), EdgeFlags::all()) {
                let flag = child.flag.bits();
                write!(w,
                       "n_{}->n_{}[{},label=\"{} {}\"];\n",
                       k.to_hex(),
                       &child.dest.to_hex(),
                       styles[(flag & 0xff) as usize],
                       flag,
                       child.introduced_by.to_base58()
                )
                    .unwrap();
                if child.flag.contains(EdgeFlags::FOLDER_EDGE) && !child.flag.contains(EdgeFlags::PARENT_EDGE) {
                write!(
                    w,
                    "n_{}->n_{}[{},label=\"{} {}\"];\n",
                    k.to_hex(),
                    &child.dest.to_hex(),
                    styles[(flag & 0xff) as usize],
                    flag,
                    child.introduced_by.to_base58()
                )
                .unwrap();
                if child.flag.contains(EdgeFlags::FOLDER_EDGE)
                    && !child.flag.contains(EdgeFlags::PARENT_EDGE)
                {
                    nodes.push(child.dest)
                }
            }
        }
        w.write(b"}\n").unwrap();
    }

    /// Is there an alive/pseudo edge from `a` to `b`.
    pub fn is_connected(&self, branch: &Branch, a: Key<PatchId>, b: Key<PatchId>) -> bool {
        self.test_edge(branch, a, b, EdgeFlags::empty(), EdgeFlags::PSEUDO_EDGE|EdgeFlags::FOLDER_EDGE)
        self.test_edge(
            branch,
            a,
            b,
            EdgeFlags::empty(),
            EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE,
        )
    }

    /// Is there an alive/pseudo edge from `a` to `b`.
    pub fn test_edge(&self, branch: &Branch, a: Key<PatchId>, b: Key<PatchId>, min: EdgeFlags, max: EdgeFlags) -> bool {
    pub fn test_edge(
        &self,
        branch: &Branch,
        a: Key<PatchId>,
        b: Key<PatchId>,
        min: EdgeFlags,
        max: EdgeFlags,
    ) -> bool {
        debug!("is_connected {:?} {:?}", a, b);
        let mut edge = Edge::zero(min);
        edge.dest = b;
        self.iter_nodes(&branch, Some((a, Some(edge))))
            .take_while(|&(k, v)| k == a && v.dest == b && v.flag <= max)
            .next()
            .is_some()
    }
}

/// Low-level operations on mutable transactions.
impl<'env, R: rand::Rng> MutTxn<'env, R> {

    /// Delete a branch, destroying its associated graph and patch set.
    pub fn drop_branch(&mut self, branch: &str) -> Result<bool> {
        let name = SmallString::from_str(branch);
        Ok(self.txn.del(&mut self.rng, &mut self.dbs.branches, name.as_small_str().to_unsafe(), None)?)
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.branches,
            name.as_small_str().to_unsafe(),
            None,
        )?)
    }

    /// Add a binding to the graph of a branch. All edges must be
    /// inserted twice, once in each direction, and this method only
    /// inserts one direction.
    pub fn put_nodes(&mut self,
                     branch: &mut Branch,
                     key: Key<PatchId>,
                     edge: &Edge)
                     -> Result<bool> {
    pub fn put_edge_one_dir(
        &mut self,
        branch: &mut Branch,
        key: Key<PatchId>,
        edge: Edge,
    ) -> Result<bool> {
        debug!("put_nodes: {:?} {:?}", key, edge);
        Ok(try!(self.txn.put(&mut self.rng,
                             &mut branch.db,
                             key,
                             edge.to_unsafe())))
        Ok(self.txn.put(&mut self.rng, &mut branch.db, key, edge)?)
    }

    /// Same as `put_nodes`, but also adds the reverse edge.
    pub fn put_nodes_with_rev(&mut self, branch: &mut Branch, mut key: Key<PatchId>, mut edge: Edge) -> Result<bool> {
    pub fn put_edge_both_dirs(
        &mut self,
        branch: &mut Branch,
        mut key: Key<PatchId>,
        mut edge: Edge,
    ) -> Result<bool> {
        self.put_edge_one_dir(branch, key, edge)?;
        std::mem::swap(&mut key, &mut edge.dest);
        edge.flag.toggle(EdgeFlags::PARENT_EDGE);
        self.put_edge_one_dir(branch, key, edge)
    }

    /// Delete an edge from a graph.
    pub fn del_edge_one_dir(
        &mut self,
        branch: &mut Branch,
        key: Key<PatchId>,
        edge: Edge,
    ) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut branch.db, key, Some(edge))?)
    }

    /// Same as `del_nodes`, but also deletes the reverse edge.
    pub fn del_nodes_with_rev(&mut self, branch: &mut Branch, mut key: Key<PatchId>, mut edge: Edge) -> Result<bool> {
    pub fn del_edge_both_dirs(
        &mut self,
        branch: &mut Branch,
        mut key: Key<PatchId>,
        mut edge: Edge,
    ) -> Result<bool> {
        self.del_edge_one_dir(branch, key, edge)?;
        std::mem::swap(&mut key, &mut edge.dest);
        edge.flag.toggle(EdgeFlags::PARENT_EDGE);
        self.del_edge_one_dir(branch, key, edge)
    }

    /// Add a file or directory into the tree database, with parent
    /// `key.parent_inode`, name `key.basename` and inode `Inode`
    /// (usually randomly generated, as `Inode`s have no relation
    /// with patches or branches).
    ///
    /// All bindings inserted here must have the reverse inserted into
    /// the revtree database. If `(key, edge)` is inserted here, then
    /// `(edge, key)` must be inserted into revtree.
    pub fn put_tree(&mut self, key: &FileId, edge: Inode) -> Result<bool> {
        Ok(try!(self.txn.put(&mut self.rng,
                             &mut self.dbs.tree,
                             key.to_unsafe(),
                             edge)))
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.tree, key.to_unsafe(), edge)?)
    }

    /// Delete a file or directory from the tree database. Similarly
    /// to the comments in the documentation of the `put_tree` method,
    /// the reverse binding must be delete from the revtree database.
    pub fn del_tree(&mut self, key: &FileId, edge: Option<Inode>) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.tree,
                        key.to_unsafe(),
                        edge)?)
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.tree, key.to_unsafe(), edge)?)
    }

    /// Add a file into the revtree database (see the documentation of
    /// the `put_tree` method).
    pub fn put_revtree(&mut self, key: Inode, value: &FileId) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.revtree,
                        key,
                        value.to_unsafe())?)
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.revtree, key, value.to_unsafe())?)
    }

    /// Delete a file from the revtree database (see the documentation
    /// of the `put_tree` method).
    pub fn del_revtree(&mut self, key: Inode, value: Option<&FileId>) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.revtree,
                        key,
                        value.map(|e| e.to_unsafe()))?)
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.revtree,
            key,
            value.map(|e| e.to_unsafe()),
        )?)
    }

    /// Delete a binding from the `inodes` database, i.e. the
    /// correspondence between branch graphs and the file tree.
    ///
    /// All bindings in inodes must have their reverse in revinodes
    /// (without the `FileMetadata`). `del_revinodes` must be called
    /// immediately before or immediately after calling this method.
    pub fn del_inodes(&mut self, key: Inode, value: Option<FileHeader>) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.inodes,
                        key,
                        value)?)
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.inodes, key, value)?)
    }

    /// Replace a binding in the inodes database, or insert a new
    /// one if `key` doesn't exist yet in that database.
    ///
    /// All bindings in inodes must have their reverse inserted in
    /// revinodes (without the `FileMetadata`).
    pub fn replace_inodes(&mut self, key: Inode, value: FileHeader) -> Result<bool> {

        self.txn.del(&mut self.rng, &mut self.dbs.inodes, key, None)?;
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.inodes,
                        key,
                        value)?)
        self.txn
            .del(&mut self.rng, &mut self.dbs.inodes, key, None)?;
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.inodes, key, value)?)
    }

    /// Replace a binding in the revinodes database, or insert a new
    /// one if `key` doesnt exist yet in that database.
    ///
    /// All bindings in revinodes must have their reverse inserted
    /// inodes (with an extra `FileMetadata`).
    pub fn replace_revinodes(&mut self, key: Key<PatchId>, value: Inode) -> Result<bool> {
        self.txn.del(&mut self.rng,
                     &mut self.dbs.revinodes,
                     key,
                     None)?;
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.revinodes,
                        key,
                        value)?)
        self.txn
            .del(&mut self.rng, &mut self.dbs.revinodes, key, None)?;
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.revinodes, key, value)?)
    }

    /// Delete a binding from the `revinodes` database, i.e. the
    /// correspondence between the file tree and branch graphs.
    ///
    /// All bindings in revinodes must have their reverse in inodes
    /// (with an extra `FileMetadata`). `del_inodes` must be called
    /// immediately before or immediately after calling this method.
    pub fn del_revinodes(&mut self,
                         key: Key<PatchId>,
                         value: Option<Inode>)
                         -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.revinodes,
                        key,
                        value)?)
    pub fn del_revinodes(&mut self, key: Key<PatchId>, value: Option<Inode>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.revinodes, key, value)?)
    }

    /// Add the contents of a line. Note that this table is common to
    /// all branches.
    pub fn put_contents(&mut self, key: Key<PatchId>, value: UnsafeValue) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.contents,
                        key,
                        value)?)
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.contents, key, value)?)
    }

    /// Remove the contents of a line.
    pub fn del_contents(&mut self,
                        key: Key<PatchId>,
                        value: Option<UnsafeValue>)
                        -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.contents,
                        key,
                        value)?)
    pub fn del_contents(&mut self, key: Key<PatchId>, value: Option<UnsafeValue>) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.contents, key, value)?)
    }



    /// Register the internal identifier of a patch. The
    /// `put_external` method must be called immediately after, or
    /// immediately before this method.
    pub fn put_internal(&mut self, key: HashRef, value: PatchId) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.internal,
                        key.to_unsafe(),
                        value)?)
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.internal,
            key.to_unsafe(),
            value,
        )?)
    }

    /// Unregister the internal identifier of a patch. Remember to
    /// also unregister its external id.
    pub fn del_internal(&mut self, key: HashRef) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.internal,
                        key.to_unsafe(),
                        None)?)
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.internal, key.to_unsafe(), None)?)
    }

    /// Register the extern identifier of a patch. The `put_internal`
    /// method must be called immediately after, or immediately before
    /// this method.
    pub fn put_external(&mut self, key: PatchId, value: HashRef) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.external,
                        key,
                        value.to_unsafe())?)
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.external,
            key,
            value.to_unsafe(),
        )?)
    }

    /// Unregister the extern identifier of a patch. Remember to also
    /// unregister its internal id.
    pub fn del_external(&mut self, key: PatchId) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.external,
                        key,
                        None)?)
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.external, key, None)?)
    }

    /// Add a patch id to a branch. This doesn't apply the patch, it
    /// only registers it as applied. The `put_revpatches` method must be
    /// called on the same branch immediately before, or immediately
    /// after.
    pub fn put_patches(&mut self, branch: &mut PatchSet, value: PatchId, time: ApplyTimestamp) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        branch,
                        value,
                        time)?)
    pub fn put_patches(
        &mut self,
        branch: &mut PatchSet,
        value: PatchId,
        time: ApplyTimestamp,
    ) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng, branch, value, time)?)
    }

    /// Delete a patch id from a branch. This doesn't unrecord the
    /// patch, it only removes it from the patch set. The
    /// `del_revpatches` method must be called on the same branch
    /// immediately before, or immediately after.
    pub fn del_patches(&mut self, branch: &mut PatchSet, value: PatchId) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        branch,
                        value,
                        None)?)
        Ok(self.txn.del(&mut self.rng, branch, value, None)?)
    }

    /// Add a patch id to a branch. This doesn't apply the patch, it
    /// only registers it as applied. The `put_patches` method must be
    /// called on the same branch immediately before, or immediately
    /// after.
    pub fn put_revpatches(&mut self, branch: &mut RevPatchSet, time: ApplyTimestamp, value: PatchId) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        branch,
                        time,
                        value)?)
    pub fn put_revpatches(
        &mut self,
        branch: &mut RevPatchSet,
        time: ApplyTimestamp,
        value: PatchId,
    ) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng, branch, time, value)?)
    }

    /// Delete a patch id from a branch. This doesn't unrecord the
    /// patch, it only removes it from the patch set. The
    /// `del_patches` method must be called on the same branch
    /// immediately before, or immediately after.
    pub fn del_revpatches(&mut self, revbranch: &mut RevPatchSet, timestamp: ApplyTimestamp, value: PatchId) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        revbranch,
                        timestamp,
                        Some(value))?)
    pub fn del_revpatches(
        &mut self,
        revbranch: &mut RevPatchSet,
        timestamp: ApplyTimestamp,
        value: PatchId,
    ) -> Result<bool> {
        Ok(self
            .txn
            .del(&mut self.rng, revbranch, timestamp, Some(value))?)
    }

    /// Register a reverse dependency. All dependencies of all patches
    /// applied on at least one branch must be registered in this
    /// database, i.e. if a depends on b, then `(b, a)` must be
    /// inserted here.
    pub fn put_revdep(&mut self, patch: PatchId, revdep: PatchId) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng,
                        &mut self.dbs.revdep,
                        patch,
                        revdep)?)
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.revdep, patch, revdep)?)
    }

    /// Register a dependency. All dependencies of all patches applied
    /// on at least one branch must be registered in this database,
    /// i.e. if a depends on b, then `(b, a)` must be inserted here.
    pub fn put_dep(&mut self, patch: PatchId, dep: PatchId) -> Result<bool> {
        Ok(self.txn.put(&mut self.rng, &mut self.dbs.dep, patch, dep)?)
    }

    /// Remove a reverse dependency. Only call this method when the
    /// patch with identifier `patch` is not applied to any branch.
    pub fn del_revdep(&mut self, patch: PatchId, revdep: Option<PatchId>) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng,
                        &mut self.dbs.revdep,
                        patch,
                        revdep)?)
        Ok(self
            .txn
            .del(&mut self.rng, &mut self.dbs.revdep, patch, revdep)?)
    }

    /// Remove a dependency. Only call this method when the patch with
    /// identifier `patch` is not applied to any branch.
    pub fn del_dep(&mut self, patch: PatchId, dep: Option<PatchId>) -> Result<bool> {
        Ok(self.txn.del(&mut self.rng, &mut self.dbs.dep, patch, dep)?)
    }

    /// Add an edge to the cemetery.
    pub fn put_cemetery(&mut self, key: Key<PatchId>, edge: Edge, patch: PatchId) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.cemetery, (key, edge), patch)?)
    }

    /// Delete an edge from the cemetery.
    pub fn del_cemetery(&mut self, key: Key<PatchId>, edge: Edge, patch: PatchId) -> Result<bool> {
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.cemetery,
            (key, edge),
            Some(patch),
        )?)
    }

    /// Add the relation "patch `patch` touches file `file`".
    pub fn put_touched_file(&mut self, file: Key<PatchId>, patch: PatchId) -> Result<bool> {
        Ok(self
            .txn
            .put(&mut self.rng, &mut self.dbs.touched_files, file, patch)?)
    }

    /// Delete all mentions of `patch` in the table of touched files.
    pub fn del_touched_file(&mut self, file: Key<PatchId>, patch: PatchId) -> Result<bool> {
        Ok(self.txn.del(
            &mut self.rng,
            &mut self.dbs.touched_files,
            file,
            Some(patch),
        )?)
    }

    /// Add a partial path to a branch.
    pub fn put_partials(&mut self, name: &str, path: Key<PatchId>) -> Result<bool> {
        let name = small_string::SmallString::from_str(name);
        Ok(self.txn.put(
            &mut self.rng,
            &mut self.dbs.partials,
            name.as_small_str().to_unsafe(),
            path,
        )?)
    }

    /// Remove a partial path from a branch.
    pub fn del_partials(&mut self, name: &str) -> Result<bool> {
        let name = small_string::SmallString::from_str(name);
        let mut deleted = false;
        while self.txn.del(
            &mut self.rng,
            &mut self.dbs.partials,
            name.as_small_str().to_unsafe(),
            None,
        )? {
            deleted = true
        }
        Ok(deleted)
    }

    /// Allocate a string (to be inserted in the contents database).
    pub fn alloc_value(&mut self, slice: &[u8]) -> Result<UnsafeValue> {
        Ok(UnsafeValue::alloc_if_needed(&mut self.txn, slice)?)
    }




1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235


238


241
242

244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
    partial_path: Option<&Path>,
    txn.apply_patches(branch_name, target, &patches, partial_path, apply_cb)?;
//! This crate contains the core API to access Pijul repositories.
//!
//! The key object is a `Repository`, on which `Txn` (immutable
//! transactions) and `MutTxn` (mutable transactions) can be started,
//! to perform a variety of operations.
//!
//! Another important object is a `Patch`, which encodes two different pieces of information:
//!
//! - Information about deleted and inserted lines between two versions of a file.
//!
//! - Information about file moves, additions and deletions.
//!
//! The standard layout of a repository is defined in module
//! `fs_representation`, and mainly consists of a directory called
//! `.pijul` at the root of the repository, containing:
//!
//! - a directory called `pristine`, containing a Sanakirja database
//! storing most of the repository information.
//!
//! - a directory called `patches`, actually containing the patches,
//! where each patch is a gzipped compression of the bincode encoding
//! of the `patch::Patch` type.
//!
//! At the moment, users of this library, such as the Pijul
//! command-line tool, may use other files in the `.pijul` directory,
//! such as user preferences, or information about remote branches and
//! repositories.
#![recursion_limit = "128"]
#[macro_use]
extern crate bitflags;
extern crate chrono;
#[macro_use]
extern crate log;

extern crate base64;
extern crate bincode;
extern crate bs58;
extern crate byteorder;
extern crate flate2;
extern crate hex;
extern crate ignore;
extern crate openssl;
extern crate rand;
extern crate sanakirja;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate diffs;
extern crate failure;
extern crate sequoia_openpgp;
extern crate serde_json;
extern crate tempdir;
extern crate toml;

pub use sanakirja::Transaction;
use std::collections::HashSet;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;

#[derive(Debug)]
pub enum Error {
    IO(std::io::Error),
    Sanakirja(sanakirja::Error),
    Bincode(bincode::Error),
    Utf8(std::str::Utf8Error),
    Serde(serde_json::Error),
    OpenSSL(openssl::error::Error),
    OpenSSLStack(openssl::error::ErrorStack),
    Base58Decode(bs58::decode::DecodeError),
    Failure(failure::Error),
    AlreadyAdded,
    FileNotInRepo(PathBuf),
    NoDb(backend::Root),
    WrongHash,
    EOF,
    WrongPatchSignature,
    BranchNameAlreadyExists(String),
    WrongFileHeader(Key<PatchId>),
    FileNameCount(Key<PatchId>),
    MissingDependency(Hash),
    PatchNotOnBranch(PatchId),
    CannotAddDotPijul,
    KeyIsEncrypted,
}

impl std::convert::From<std::io::Error> for Error {
    fn from(e: std::io::Error) -> Self {
        Error::IO(e)
    }
}

impl std::convert::From<failure::Error> for Error {
    fn from(e: failure::Error) -> Self {
        Error::Failure(e)
    }
}

impl std::convert::From<sanakirja::Error> for Error {
    fn from(e: sanakirja::Error) -> Self {
        Error::Sanakirja(e)
    }
}

impl std::convert::From<bincode::Error> for Error {
    fn from(e: bincode::Error) -> Self {
        Error::Bincode(e)
    }
}

impl std::convert::From<serde_json::Error> for Error {
    fn from(e: serde_json::Error) -> Self {
        Error::Serde(e)
    }
}

impl std::convert::From<std::str::Utf8Error> for Error {
    fn from(e: std::str::Utf8Error) -> Self {
        Error::Utf8(e)
    }
}

impl std::convert::From<openssl::error::ErrorStack> for Error {
    fn from(e: openssl::error::ErrorStack) -> Self {
        Error::OpenSSLStack(e)
    }
}

impl std::convert::From<bs58::decode::DecodeError> for Error {
    fn from(e: bs58::decode::DecodeError) -> Self {
        Error::Base58Decode(e)
    }
}

pub type Result<A> = std::result::Result<A, Error>;

impl std::fmt::Display for Error {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        match *self {
            Error::IO(ref e) => e.fmt(fmt),
            Error::Sanakirja(ref e) => e.fmt(fmt),
            Error::Bincode(ref e) => e.fmt(fmt),
            Error::Utf8(ref e) => e.fmt(fmt),
            Error::Serde(ref e) => e.fmt(fmt),
            Error::OpenSSL(ref e) => e.fmt(fmt),
            Error::OpenSSLStack(ref e) => e.fmt(fmt),
            Error::Base58Decode(ref e) => e.fmt(fmt),
            Error::Failure(ref e) => e.fmt(fmt),
            Error::AlreadyAdded => write!(fmt, "Already added"),
            Error::FileNotInRepo(ref file) => write!(fmt, "File {:?} not tracked", file),
            Error::NoDb(ref e) => write!(fmt, "Table missing: {:?}", e),
            Error::WrongHash => write!(fmt, "Wrong hash"),
            Error::EOF => write!(fmt, "EOF"),
            Error::WrongPatchSignature => write!(fmt, "Wrong patch signature"),
            Error::BranchNameAlreadyExists(ref name) => {
                write!(fmt, "Branch {:?} already exists", name)
            }
            Error::WrongFileHeader(ref h) => write!(
                fmt,
                "Wrong file header (possible branch corruption): {:?}",
                h
            ),
            Error::FileNameCount(ref f) => {
                write!(fmt, "Name {:?} doesn't have exactly one child", f)
            }
            Error::MissingDependency(ref f) => write!(fmt, "Missing dependency: {:?}", f),
            Error::PatchNotOnBranch(ref f) => {
                write!(fmt, "The patch is not on this branch {:?}", f)
            }
            Error::CannotAddDotPijul => write!(fmt, "Cannot add a file or directory name .pijul"),
            Error::KeyIsEncrypted => write!(fmt, "Key is encrypted"),
        }
    }
}

impl std::error::Error for Error {
    fn description(&self) -> &str {
        match *self {
            Error::IO(ref e) => e.description(),
            Error::Sanakirja(ref e) => e.description(),
            Error::Bincode(ref e) => e.description(),
            Error::Utf8(ref e) => e.description(),
            Error::Serde(ref e) => e.description(),
            Error::OpenSSL(ref e) => e.description(),
            Error::OpenSSLStack(ref e) => e.description(),
            Error::Base58Decode(ref e) => e.description(),
            Error::Failure(ref e) => e.name().unwrap_or("Unknown Failure"),
            Error::AlreadyAdded => "Already added",
            Error::FileNotInRepo(_) => "File not tracked",
            Error::NoDb(_) => "One of the tables is missing",
            Error::WrongHash => "Wrong hash",
            Error::EOF => "EOF",
            Error::WrongPatchSignature => "Wrong patch signature",
            Error::BranchNameAlreadyExists(_) => "Branch name already exists",
            Error::WrongFileHeader(_) => "Wrong file header (possible branch corruption)",
            Error::FileNameCount(_) => "A file name doesn't have exactly one child",
            Error::MissingDependency(_) => "Missing dependency",
            Error::PatchNotOnBranch(_) => "The patch is not on this branch",
            Error::CannotAddDotPijul => "Cannot add a file or directory name .pijul",
            Error::KeyIsEncrypted => "Key is encrypted",
        }
    }
}

impl Error {
    pub fn lacks_space(&self) -> bool {
        match *self {
            Error::Sanakirja(sanakirja::Error::NotEnoughSpace) => true,
            _ => false,
        }
    }
}

#[macro_use]
mod backend;
mod file_operations;
pub mod fs_representation;

pub mod patch;
pub mod status;

pub mod apply;
mod conflict;
mod diff;
pub mod graph;
mod output;
mod record;
mod unrecord;

pub use backend::{
    ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus, GenericTxn, Hash,
    HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId, Repository, SmallStr, SmallString,
    Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY,
};


>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::{RepoRoot, ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
pub use fs_representation::{RepoRoot, RepoPath};
use fs_representation::{ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
pub use output::{ConflictingFile, Prefixes, ToPrefixes};
pub use patch::{Patch, PatchHeader};
use rand::distributions::Alphanumeric;
use rand::Rng;
pub use record::{InodeUpdate, RecordState};
pub use sanakirja::value::Value;
use std::io::Read;

pub use diff::Algorithm as DiffAlgorithm;

impl<'env, T: rand::Rng> backend::MutTxn<'env, T> {
    pub fn output_changes_file<P: AsRef<Path>>(
        &mut self,
        branch: &Branch,
        fs_repo: &RepoRoot<P>,
    ) -> Result<()> {
        let changes_file = fs_repo.branch_changes_file(branch.name.as_str());
        let mut branch_id: Vec<u8> = vec![b'\n'; ID_LENGTH + 1];
        {
            if let Ok(mut file) = std::fs::File::open(&changes_file) {
                file.read_exact(&mut branch_id)?;
            }
        }
        let mut branch_id = if let Ok(s) = String::from_utf8(branch_id) {
            s
        } else {
            "\n".to_string()
        };
        if branch_id.as_bytes()[0] == b'\n' {
            branch_id.truncate(0);
            let mut rng = rand::thread_rng();
            branch_id.extend(rng.sample_iter(&Alphanumeric).take(ID_LENGTH));
            branch_id.push('\n');
        }

        let mut file = std::fs::File::create(&changes_file)?;
        file.write_all(&branch_id.as_bytes())?;
        for (s, hash) in self.iter_applied(&branch, None) {
            let hash_ext = self.get_external(hash).unwrap();
            writeln!(file, "{}:{}", hash_ext.to_base58(), s)?
        }
        Ok(())
    }

    pub fn branch_patches(&mut self, branch: &Branch) -> HashSet<(backend::Hash, ApplyTimestamp)> {
        self.iter_patches(branch, None)
            .map(|(patch, time)| (self.external_hash(patch).to_owned(), time))
            .collect()
    }

    pub fn fork(&mut self, branch: &Branch, new_name: &str) -> Result<Branch> {
        if branch.name.as_str() == new_name {
            Err(Error::BranchNameAlreadyExists(new_name.to_string()))
        } else {
            Ok(Branch {
                db: self.txn.fork(&mut self.rng, &branch.db)?,
                patches: self.txn.fork(&mut self.rng, &branch.patches)?,
                revpatches: self.txn.fork(&mut self.rng, &branch.revpatches)?,
                name: SmallString::from_str(new_name),
                apply_counter: branch.apply_counter,
            })
        }
    }
    
    pub fn add_file<P: AsRef<Path>>(&mut self, path: &RepoPath<P>, is_dir: bool) -> Result<()> {
        self.add_inode(None, path, is_dir)
    }

    fn file_nodes_fold_<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        root: Key<PatchId>,
        level: usize,
        mut init: A,
        f: &mut F,
    ) -> Result<A> {
        for v in self
            .iter_adjacent(&branch, root, EdgeFlags::empty(), EdgeFlags::all())
            .take_while(|v| {
                v.flag.contains(EdgeFlags::FOLDER_EDGE) && !v.flag.contains(EdgeFlags::PARENT_EDGE)
            })
        {
            debug!("file_nodes_fold_: {:?} {:?}", root, v);
            if level & 1 == 0 && level > 0 {
                init = f(init, root)
            }
            init = self.file_nodes_fold_(branch, v.dest, level + 1, init, f)?
        }
        Ok(init)
    }

    pub fn file_nodes_fold<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        init: A,
        mut f: F,
    ) -> Result<A> {
        self.file_nodes_fold_(branch, ROOT_KEY, 0, init, &mut f)
    }
}

impl<T: Transaction, R> backend::GenericTxn<T, R> {
    /// Tells whether a `key` is alive in `branch`, i.e. is either the
    /// root, or all its ingoing edges are alive.
    pub fn is_alive(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive {:?}?", key);
        let mut alive = key.is_root();
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            alive = alive
                || (!v.flag.contains(EdgeFlags::DELETED_EDGE)
                    && !v.flag.contains(EdgeFlags::PSEUDO_EDGE))
        }
        alive
    }

    /// Tells whether a `key` is alive or zombie in `branch`, i.e. is
    /// either the root, or has at least one of its incoming alive
    /// edge is alive.
    pub fn is_alive_or_zombie(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive_or_zombie {:?}?", key);
        if key == ROOT_KEY {
            return true;
        }
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            debug!("{:?}", v);
            if v.flag.contains(EdgeFlags::PARENT_EDGE) && !v.flag.contains(EdgeFlags::DELETED_EDGE)
            {
                return true;
            }
        }
        false
    }

    /// Test whether `key` has a neighbor with flag `flag0`. If
    /// `include_pseudo`, this includes pseudo-neighbors.
    pub fn has_edge(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        min: EdgeFlags,
        max: EdgeFlags,
    ) -> bool {
        let e = Edge::zero(min);
        if let Some((k, v)) = self.iter_nodes(&branch, Some((key, Some(e)))).next() {
            debug!("has_edge {:?}", v.flag);
            k == key && (v.flag <= max)
        } else {
            false
        }
    }

    /// Tells which paths (of folder nodes) a key is in.
    pub fn get_file<'a>(&'a self, branch: &Branch, key: Key<PatchId>) -> Vec<Key<PatchId>> {
        let mut stack = vec![key.to_owned()];
        let mut seen = HashSet::new();
        let mut names = Vec::new();
        loop {
            match stack.pop() {
                None => break,
                Some(key) if !seen.contains(&key) => {
                    debug!("key {:?}, None", key);
                    seen.insert(key.clone());

                    for v in self.iter_adjacent(branch, key, EdgeFlags::empty(), EdgeFlags::all()) {
                        debug!("all_edges: {:?}", v);
                    }
                    for v in
                        self.iter_adjacent(branch, key, EdgeFlags::PARENT_EDGE, EdgeFlags::all())
                    {
                        debug!("get_file {:?}", v);
                        if v.flag | EdgeFlags::PSEUDO_EDGE
                            == EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE
                        {
                            debug!("push!");
                            stack.push(v.dest.clone())
                        } else if v
                            .flag
                            .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE)
                        {
                            names.push(key);
                        }
                    }
                }
                _ => {}
            }
        }
        debug!("get_file returning {:?}", names);
        names
    }

    pub fn get_file_names<'a>(
        &'a self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, Vec<&'a str>)> {
        let mut names = vec![(key, Vec::new())];
        debug!("inode: {:?}", names);
        // Go back to the root.
        let mut next_names = Vec::new();
        let mut only_roots = false;
        let mut inodes = HashSet::new();
        while !only_roots {
            next_names.clear();
            only_roots = true;
            for (inode, names) in names.drain(..) {
                if !inodes.contains(&inode) {
                    inodes.insert(inode.clone());

                    if inode != ROOT_KEY {
                        only_roots = false;
                    }
                    let names_ = self.file_names(branch, inode);
                    if names_.is_empty() {
                        next_names.push((inode, names));
                        break;
                    } else {
                        debug!("names_ = {:?}", names_);
                        for (inode_, _, base) in names_ {
                            let mut names = names.clone();
                            names.push(base);
                            next_names.push((inode_, names))
                        }
                    }
                }
            }
            std::mem::swap(&mut names, &mut next_names)
        }
        debug!("end: {:?}", names);
        for &mut (_, ref mut name) in names.iter_mut() {
            name.reverse()
        }
        names
    }
}

fn make_remote<'a, I: Iterator<Item = &'a Hash>>(
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    remote: I,
) -> Result<(Vec<(Hash, Patch)>, usize)> {
    use fs_representation::*;
    use std::fs::File;
    use std::io::BufReader;
    let mut patches = Vec::new();
    let mut patches_dir = target.patches_dir();
    let mut size_increase = 0;

    for h in remote {
        patches_dir.push(&patch_file_name(h.as_ref()));

        debug!("opening {:?}", patches_dir);
        let file = try!(File::open(&patches_dir));
        let mut file = BufReader::new(file);
        let (h, _, patch) = Patch::from_reader_compressed(&mut file)?;

        size_increase += patch.size_upper_bound();
        patches.push((h.clone(), patch));

        patches_dir.pop();
    }
    Ok((patches, size_increase))
}

/// Apply a number of patches, guessing the new repository size.  If
/// this fails, the repository size is guaranteed to have been
/// increased by at least some pages, and it is safe to call this
/// function again.
///
/// Also, this function takes a file lock on the repository.
pub fn apply_resize<'a, I, F, P: output::ToPrefixes>(
    diff_algorithm: diff::Algorithm,
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    remote: I,
    partial_paths: P,
    apply_cb: F,






























































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22


23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80

81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

107

108
109
110
111
112
113










114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

















132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

149

150
151
152
153
154
155
156
157

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178



179
180
181
182
183
184
185
186

187
188
189
190
191
192
193
194
195
196
197
198

199
200
201
202
203

204
205
206


207
208
209
210


211
212
213
214
215
216
217
218
219
220
221

222
223

225
226
227


230


233
234
235
236
237
238
239
240
241
242
243

244
245
246
247
248
249
250
251

252
253
254
255
256
257
258
259
260
261

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278


279
280
281
282
283
284
285
286
287
288
289




290
291
292
293
294

295
296
297
298
299
300
301
302
303
304
305
306
307


308
309
310
311
312

313
314
315
316
317
318


319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335

336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352


353
354
355
356
357
358

359
360
361
362
363
364
365


366
367
368
369
370

371
372
373
374
375
376

377
378
379

380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427


428
429
430
431
432
433
434
435
436













437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456

457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475

476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534

535
536
537
538
539
540
541

542
543
544
545
546
547

548
549
550
551
552
553
554

555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
use std::collections::{HashMap, HashSet};
pub trait Prefix {
    fn is_related(&self, inode: Option<Inode>, path: &Path) -> Related;
}

impl<'a> Prefix for &'a [&'a Path] {
    fn is_related(&self, _: Option<Inode>, path: &Path) -> Related {
        if self.is_empty() {
            Related::Exact
        } else {
            for &pref in self.iter() {
                if pref == path {
                    return Related::Exact;
                } else if pref.starts_with(path) {
                    return Related::Ancestor;
                }
            }
            Related::No
        }
}

impl<'a> Prefix for &'a [Vec<Inode>] {
    fn is_related(&self, inode: Option<Inode>, _: &Path) -> Related {
        if self.is_empty() {
            return Related::Exact
        } else if let Some(inode) = inode {
            debug!("is_related {:?} {:?}", self, inode);
            for pref in self.iter() {
                let mut first = true;
                for ancestor in pref.iter() {
                    if inode == *ancestor {
                        if first {
                            return Related::Exact;
                        } else {
                            return Related::Ancestor;
                        }
                    }
                    first = false
        Related::No
    fn collect_children<P: Prefix + Copy>(
        base_path: &mut PathBuf,
        prefixes: P,
        files: &mut HashMap<PathBuf, HashMap<Key<PatchId>, OutputItem>>,
            .take_while(|&(k, b)| {
                k == key
                    && b.flag
                        <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE
            }) {
            let (_, b_key) = self.iter_nodes(&branch, Some((b.dest, Some(&e))))
                return Err(ErrorKind::WrongFileHeader(b.dest).into());
            let name = path.join(basename);
            base_path.push(&basename);
            let related = prefixes.is_related(b_inode, &base_path);
            base_path.pop();
                        let e = Edge::zero(
                            EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE
                                | EdgeFlags::DELETED_EDGE,
                        );
        prefixes: &[&Path],
    ) -> Result<Vec<PathBuf>> {
        let mut base_path = PathBuf::new();
            "".as_ref(),
            prefixes,
                                &a,
                                prefixes,
    fn output_alive_files<P: Prefix + Copy>(
        prefixes: P,
        let mut base_path = PathBuf::new();
            "".as_ref(),
                    let mut name = if b_len > 1
                    base_path.push(Path::new(file_name.as_ref()));
                            self.collect_children::<&[&Path]>(
                                &name,
                                &[][..],
                                &name,
                        debug!(
    fn output_repository_assuming_no_pending_patch<P: Prefix + Copy>(
        prefixes: P,
        working_copy: &Path,
    pub fn output_repository<P: Prefix + Copy>(
        prefixes: P,
        let internal = self.apply_local_patch(
            branch_name,
            working_copy,
            &hash,
            pending,
            local_pending,
            true,
        )?;
            prefixes,
            &mut branch,
        prefixes: &[&Path],
            prefixes,
            &mut branch,
use backend::*;
use graph;
use patch::*;
use rand;
use record::InodeUpdate;
use std;
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
use tempdir;
use {Error, Result};

use super::fs_representation::{RepoRoot, RepoPath, in_repo_root};

#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;

#[cfg(not(windows))]
fn set_permissions(name: &Path, permissions: u16) -> Result<()> {
    let metadata = std::fs::metadata(&name)?;
    let mut current = metadata.permissions();
    debug!("setting mode for {:?} to {:?} (currently {:?})",
           name, permissions, current);
    debug!(
        "setting mode for {:?} to {:?} (currently {:?})",
        name, permissions, current
    );
    current.set_mode(permissions as u32);
    std::fs::set_permissions(name, current)?;
    Ok(())
}

#[cfg(windows)]
fn set_permissions(_name: &Path, _permissions: u16) -> Result<()> {
    Ok(())
}

#[derive(Debug)]
struct OutputItem {
    parent: Inode,
    meta: FileMetadata,
    key: Key<PatchId>,
    inode: Option<Inode>,
    is_zombie: bool,
    related: Related,
}

#[derive(Debug, PartialEq, Eq)]
pub enum Related {
    No,
    Ancestor,
    Exact,
}

pub struct ConflictingFile {
    pub inode: Inode,
    pub n_conflicts: usize,
    pub path: RepoPath<PathBuf>,
}

fn is_related(prefixes: &Prefixes, key: Key<PatchId>) -> Related {
    if prefixes.0.is_empty() {
        return Related::Exact;
    }
    for pref in prefixes.0.iter() {
        let mut is_first = true;
        for &p in pref {
            if p == key {
                if is_first {
                    return Related::Exact;
                } else {
                    return Related::Ancestor;
                }
            }
            is_first = false
        }
    }
    Related::No
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    // Climp up the tree (using revtree).
    // Climb up the tree (using revtree).
    fn filename_of_inode(&self, inode: Inode, working_copy: &Path) -> Option<PathBuf> {
        let mut components = Vec::new();
        let mut current = inode;
        loop {
            match self.get_revtree(current) {
                Some(v) => {
                    components.push(v.basename.to_owned());
                    current = v.parent_inode.clone();
                    if current == ROOT_INODE {
                        break;
                    }
                }
                None => {
                    debug!("filename_of_inode: not in tree");
                    return None;
                }
            }
        }
        let mut working_copy = working_copy.to_path_buf();
        for c in components.iter().rev() {
            working_copy.push(c.as_small_str().as_str());
        }
        Some(working_copy)
    }


    /// Collect all the children of key `key` into `files`.
    pub fn collect_children(
    fn collect_children(
        &mut self,
        branch: &Branch,
        path: RepoPath<&Path>,
        key: Key<PatchId>,
        inode: Inode,
        prefixes: Option<&HashSet<PathBuf>>,
        files: &mut HashMap<PathBuf,
                            HashMap<Key<PatchId>,
                                    (Inode,
                                     FileMetadata,
                                     Key<PatchId>,
                                     Option<Inode>,
                                     bool)>
                            >) -> Result<()> {

        base_path: &RepoPath<impl AsRef<Path> + std::fmt::Debug>,
        prefixes: &Prefixes,
        files: &mut HashMap<RepoPath<PathBuf>, HashMap<Key<PatchId>, OutputItem>>,
    ) -> Result<()> {
        debug!("collect_children {:?}", base_path);
        let f = EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE;
        for b in self.iter_adjacent(&branch, key, EdgeFlags::empty(), f) {
            debug!("b={:?}", b);
            let cont_b = self.get_contents(b.dest).unwrap();
            let (_, b_key) = self
                .iter_nodes(
                    &branch,
                    Some((b.dest, Some(Edge::zero(EdgeFlags::FOLDER_EDGE)))),
                )
                .next()
                .unwrap();
            let b_inode = self.get_revinodes(b_key.dest);

                let perms = FileMetadata::from_contents(perms);
                let basename = std::str::from_utf8(basename).unwrap();
                debug!("filename: {:?} {:?}", perms, basename);
                let name = path.join(basename);
                if let Some(prefixes) = prefixes {
                    debug!("{:?} {:?}", prefixes, name);
                    if !prefixes.iter().any(|x| {
                        // are x and name related?
                        let b_inode = if let Some(b) = b_inode { b } else { return false };
                        let x_inode = if let Ok(inode) = self.find_inode(&x) { inode } else { return false };
                        self.inode_is_ancestor_of(x_inode, b_inode)
                            || self.inode_is_ancestor_of(b_inode, x_inode)
                    }) {
                        // None of the prefixes start with name, abandon this name.
                        continue
                    }
                }
            // This is supposed to be a small string, so we can do
            // as_slice.
            if cont_b.as_slice().len() < 2 {
                error!("cont_b {:?} b.dest {:?}", cont_b, b.dest);
                return Err(Error::WrongFileHeader(b.dest));
            }
            let (perms, basename) = cont_b.as_slice().split_at(2);

            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();
            debug!("filename: {:?} {:?}", perms, basename);
            let name = path.join(Path::new(basename));
            let related = is_related(&prefixes, b_key.dest);
            debug!("related {:?} = {:?}", base_path, related);
            if related != Related::No {
                let v = files.entry(name).or_insert(HashMap::new());
                if v.get(&b.dest).is_none() {

                    let is_zombie = {
                        let e = Edge::zero(EdgeFlags::FOLDER_EDGE|EdgeFlags::PARENT_EDGE|EdgeFlags::DELETED_EDGE);
                        let f = EdgeFlags::FOLDER_EDGE
                            | EdgeFlags::PARENT_EDGE
                            | EdgeFlags::DELETED_EDGE;
                        self.iter_adjacent(&branch, b_key.dest, f, f)
                            .next()
                            .is_some()
                    };
                    debug!("is_zombie = {:?}", is_zombie);
                    v.insert(b.dest, (inode, perms, b_key.dest, b_inode, is_zombie));
                    v.insert(
                        b.dest,
                        OutputItem {
                            parent: inode,
                            meta: perms,
                            key: b_key.dest,
                            inode: b_inode,
                            is_zombie,
                            related,
                        },
                    );
                }
            }
        }
        Ok(())
    }

    /// Collect names of files with conflicts
    ///
    /// As conflicts have an internal representation, it can be determined
    /// exactly which files contain conflicts.
    pub fn list_conflict_files(&mut self,
                               branch_name: &str,
                               prefixes: Option<&HashSet<PathBuf>>) -> Result<Vec<PathBuf>> {
    pub fn list_conflict_files(
        &mut self,
        branch_name: &str,
        prefixes: &[RepoPath<&Path>],
    ) -> Result<Vec<RepoPath<PathBuf>>> {
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let branch = self.open_branch(branch_name)?;
        self.collect_children(&branch, "".as_ref(), ROOT_KEY, ROOT_INODE, prefixes, &mut files)?;
        let mut base_path = in_repo_root();
        let prefixes = prefixes.to_prefixes(self, &branch);
        self.collect_children(
            &branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            &prefixes,
            &mut files,
        )?;

        let mut ret = vec!();
        let mut ret = vec![];
        let mut forward = Vec::new();
        while !files.is_empty() {
            next_files.clear();
            for (a, b) in files.drain() {
                for (_, (_, meta, inode_key, inode, is_zombie)) in b {
                for (_, output_item) in b {
                    // (_, meta, inode_key, inode, is_zombie)
                    // Only bother with existing files
                    if let Some(inode) = inode {
                        if is_zombie {
                    if let Some(inode) = output_item.inode {
                        if output_item.is_zombie {
                            ret.push(a.clone())
                        }
                        if meta.is_dir() {
                            self.collect_children(&branch, &a, inode_key, inode, prefixes, &mut next_files)?;
                        if output_item.meta.is_dir() {
                            self.collect_children(
                                &branch,
                                a.as_ref(),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &prefixes,
                                &mut next_files,
                            )?;
                        } else {
                            let mut graph = self.retrieve(&branch, inode_key);
                            let mut graph = self.retrieve(&branch, output_item.key);
                            let mut buf = graph::Writer::new(std::io::sink());

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            let n_conflicts =
                                self.output_file(&branch, &mut buf, &mut graph, &mut forward)?;
                            if n_conflicts > 0 {

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            if self.output_file(&branch, &mut buf, &mut graph, &mut forward)? {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                                ret.push(a.clone())
                            }
                        }
                    }
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(ret)
    }


    fn make_conflicting_name(&self, name: &mut RepoPath<PathBuf>, name_key: Key<PatchId>) {
        let basename = {
            let basename = name.file_name().unwrap().to_string_lossy();
            format!("{}.{}", basename, &name_key.patch.to_base58())
        };
        name.set_file_name(std::ffi::OsStr::new(&basename));
    }

    fn output_alive_files(&mut self, branch: &mut Branch, prefixes: Option<&HashSet<PathBuf>>, working_copy: &Path) -> Result<()> {
    fn output_alive_files(
        &mut self,
        branch: &mut Branch,
        prefixes: &Prefixes,
        working_copy: &Path,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!("working copy {:?}", working_copy);
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        self.collect_children(branch, "".as_ref(), ROOT_KEY, ROOT_INODE, prefixes, &mut files)?;
        let mut base_path = RepoPath(PathBuf::new());
        self.collect_children(
            branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            prefixes,
            &mut files,
        )?;

        let mut done = HashSet::new();
        while !files.is_empty() {
            debug!("files {:?}", files);
            next_files.clear();
            for (a, b) in files.drain() {
                let b_len = b.len();
                for (name_key, (parent_inode, meta, inode_key, inode, is_zombie)) in b {

                for (name_key, output_item) in b {
                    // (parent_inode, meta, inode_key, inode, is_zombie)
                    /*let has_several_names = {
                        let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
                        let mut it = self.iter_nodes(branch, Some((inode_key, Some(&e))))
                            .take_while(|&(k, v)| {
                                k == inode_key && v.flag|EdgeFlags::PSEUDO_EDGE == e.flag|EdgeFlags::PSEUDO_EDGE
                            });
                        it.next();
                        it.next().is_some()
                    };*/
                    if !done.insert(inode_key) {
                        debug!("already done {:?}", inode_key);
                        continue
                        continue
                    if !done.insert(output_item.key) {
                        debug!("already done {:?}", output_item.key);
                        continue;
                    }

                    let mut name = if b_len > 1 /*|| has_several_names*/ {
                    let name = if b_len > 1
                    /*|| has_several_names*/
                    {
                        // debug!("b_len = {:?}, has_several_names {:?}", b_len, has_several_names);
                        let mut name = a.clone();
                        self.make_conflicting_name(&mut name, name_key);
                        Cow::Owned(name.0)
                    } else {
                        Cow::Borrowed(a.as_path())
                    };
                    let file_name = name.file_name().unwrap().to_string_lossy();
                    base_path.push(&file_name);
                    let file_id = OwnedFileId {
                        parent_inode: parent_inode,
                        basename: SmallString::from_str(&file_name)
                        parent_inode: output_item.parent,
                        basename: SmallString::from_str(&file_name),
                    };
                    let working_copy_name = working_copy.join(name.as_ref());

                    let status = if is_zombie {
                    let status = if output_item.is_zombie {
                        FileStatus::Zombie
                    } else {
                        FileStatus::Ok
                    };

                    let inode = if let Some(inode) = inode {
                    let inode = if let Some(inode) = inode {
                    let inode = if let Some(inode) = output_item.inode {
                        // If the file already exists, find its
                        // current name and rename it if that name
                        // is different.
                        if let Some(ref current_name) = self.filename_of_inode(inode, "".as_ref()) {
                            if current_name != name.as_ref() {
                                let current_name = working_copy.join(current_name);
                                debug!("renaming {:?} to {:?}", current_name, working_copy_name);
                                let parent = self.get_revtree(inode).unwrap().to_owned();
                                self.del_revtree(inode, None)?;
                                self.del_tree(&parent.as_file_id(), None)?;

                                debug!("file_id: {:?}", file_id);
                                if let Some(p) = working_copy_name.parent() {
                                    std::fs::create_dir_all(p)?
                                }
                                if let Err(e) = std::fs::rename(&current_name, &working_copy_name) {
                                    error!("while renaming {:?} to {:?}: {:?}", current_name, working_copy_name, e)
                                    error!(
                                        "while renaming {:?} to {:?}: {:?}",
                                        current_name, working_copy_name, e
                                    )
                                }
                            }
                        }
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        // If the file had been marked for deletion, remove that mark.
                        if let Some(header) = self.get_inodes(inode) {
                            debug!("header {:?}", header);
                            let mut header = header.to_owned();
                            header.status = status;
                            self.replace_inodes(inode, header)?;
                        } else {
                            let header = FileHeader {
                                key: inode_key,
                                metadata: meta,
                                key: output_item.key,
                                metadata: output_item.meta,
                                status,
                            };
                            debug!("no header {:?}", header);
                            self.replace_inodes(inode, header)?;
                            self.replace_revinodes(inode_key, inode)?;
                            self.replace_revinodes(output_item.key, inode)?;
                        }
                        inode
                    } else {
                        // Else, create new inode.
                        let inode = self.create_new_inode();
                        let file_header = FileHeader {
                            key: inode_key,
                            metadata: meta,
                            key: output_item.key,
                            metadata: output_item.meta,
                            status,
                        };
                        self.replace_inodes(inode, file_header)?;
                        self.replace_revinodes(inode_key, inode)?;
                        self.replace_revinodes(output_item.key, inode)?;
                        debug!("file_id: {:?}", file_id);
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        inode
                    };
                    if meta.is_dir() {
                    if output_item.meta.is_dir() {
                        // This is a directory, register it in inodes/trees.
                        std::fs::create_dir_all(&working_copy_name)?;
                        self.collect_children(branch, &name, inode_key, inode, prefixes, &mut next_files)?;
                        if let Related::Exact = output_item.related {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &Prefixes(Vec::new()),
                                &mut next_files,
                            )?
                        } else {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                prefixes,
                                &mut next_files,
                            )?
                        }
                    } else {
                        // Output file.
                        debug!("creating file {:?}", &name);
                        info!(
                            "creating file {:?}, key {:?} {:?}",
                            &name, output_item.key, working_copy_name
                        );
                        let mut f =
                            graph::Writer::new(std::fs::File::create(&working_copy_name).unwrap());
                        debug!("done");
                        let mut l = self.retrieve(branch, output_item.key);
                        if log_enabled!(log::Level::Debug) {
                            let mut w = working_copy_name.clone();
                            w.set_extension("pijul_debug");
                            let f = std::fs::File::create(&w)?;
                            l.debug(self, branch, false, false, f)?;
                        }
                        let mut forward = Vec::new();
                        let n_conflicts = self.output_file(branch, &mut f, &mut l, &mut forward)?;
                        if n_conflicts > 0 {
                            conflicts.push(ConflictingFile {
                                inode,
                                n_conflicts,
                                path: RepoPath(name.to_path_buf()),
                            })
                        }
                        self.remove_redundant_edges(branch, &forward)?
                    }

                    set_permissions(&working_copy_name, meta.permissions())?
                    base_path.pop();
                    set_permissions(&working_copy_name, output_item.meta.permissions())?
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(())
    }


    pub fn output_repository_assuming_no_pending_patch(&mut self,
                                                       prefixes: Option<&HashSet<PathBuf>>,
                                                       branch: &mut Branch,
                                                       working_copy: &Path,
                                                       pending_patch_id: PatchId)
                                                       -> Result<()> {


        debug!("inodes: {:?}",
               self.iter_inodes(None)
               .map(|(u, v)| (u.to_owned(), v.to_owned()))
               .collect::<Vec<_>>()
    fn output_repository_assuming_no_pending_patch(
        &mut self,
        prefixes: &Prefixes,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        pending_patch_id: PatchId,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!(
            "inodes: {:?}",
            self.iter_inodes(None)
                .map(|(u, v)| (u.to_owned(), v.to_owned()))
                .collect::<Vec<_>>()
        );
        // Now, garbage collect dead inodes.
        let dead: Vec<_> = self
            .iter_tree(None)
            .filter_map(|(k, v)| {
                debug!("{:?} {:?}", k, v);
                if let Some(key) = self.get_inodes(v) {
                    if key.key.patch == pending_patch_id || self.is_alive_or_zombie(branch, key.key) {
                    if key.key.patch == pending_patch_id || self.is_alive_or_zombie(branch, key.key)
                    {
                        // Don't delete.
                        None
                    } else {
                        Some((
                            k.to_owned(),
                            v,
                            self.filename_of_inode(v, working_copy.repo_root.as_ref()),
                        ))
                    }
                } else {
                    debug!("not in inodes");
                    Some((k.to_owned(), v, None))
                }
            })
            .collect();
        debug!("dead: {:?}", dead);


        // Now, "kill the deads"
        for (ref parent, inode, ref name) in dead {
            self.remove_inode_rec(inode)?;
            debug!("removed");
            if let Some(ref name) = *name {
                debug!("deleting {:?}", name);
                if let Ok(meta) = fs::metadata(name) {
                    if let Err(e) = if meta.is_dir() {
                        fs::remove_dir_all(name)
                    } else {
                        fs::remove_file(name)
                    } {
                        error!("while deleting {:?}: {:?}", name, e);
                    }
                }
            } else {
                self.del_tree(&parent.as_file_id(), Some(inode))?;
                self.del_revtree(inode, Some(&parent.as_file_id()))?;
            }
        }
        debug!("done deleting dead files");
        // Then output alive files. This has to be done *after*
        // removing files, because we a file removed might have the
        // same name as a file added without there being a conflict
        // (depending on the relation between the two patches).
        self.output_alive_files(branch, prefixes, working_copy.repo_root.as_ref(), conflicts)?;
        debug!("done raw_output_repository");
        Ok(())
    }

    fn remove_inode_rec(&mut self, inode: Inode) -> Result<()> {
        // Remove the inode from inodes/revinodes.
        let mut to_kill = vec![inode];
        while let Some(inode) = to_kill.pop() {
            debug!("kill dead {:?}", inode.to_hex());
            let header = self.get_inodes(inode).map(|x| x.to_owned());
            if let Some(header) = header {
                self.del_inodes(inode, None)?;
                self.del_revinodes(header.key, None)?;
                let mut kills = Vec::new();
                // Remove the inode from tree/revtree.
                for (k, v) in self
                    .iter_revtree(Some((inode, None)))
                    .take_while(|&(k, _)| k == inode)
                {
                    kills.push((k.clone(), v.to_owned()))
                }
                for &(k, ref v) in kills.iter() {
                    self.del_tree(&v.as_file_id(), Some(k))?;
                    self.del_revtree(k, Some(&v.as_file_id()))?;
                }
                // If the dead is a directory, remove its descendants.
                let inode_fileid = OwnedFileId {
                    parent_inode: inode.clone(),
                    basename: SmallString::from_str(""),
                };
                to_kill.extend(
                    self.iter_tree(Some((&inode_fileid.as_file_id(), None)))
                        .take_while(|&(ref k, _)| k.parent_inode == inode)
                        .map(|(_, v)| v.to_owned())
                        .map(|(_, v)| v.to_owned()),
                )
            }
        }
        Ok(())
    }

    pub fn output_repository(
    pub fn output_repository(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefixes: &Prefixes,
        pending: &Patch,
        local_pending: &HashSet<InodeUpdate>
        local_pending: &HashSet<InodeUpdate>,
    ) -> Result<Vec<ConflictingFile>> {
        debug!("begin output repository");

        debug!("applying pending patch");
        let tempdir = tempdir::TempDir::new("pijul")?;
        let hash = pending.save(tempdir.path(), None)?;
        let internal = self.apply_local_patch(branch_name, working_copy, &hash, pending, local_pending, true)?;
        let internal =
            self.apply_local_patch(branch, working_copy, &hash, pending, local_pending, true)?;

        debug!("applied as {:?}", internal.to_base58());

        // let prefixes = prefixes.to_prefixes(&self, &branch);
        debug!("prefixes {:?}", prefixes);
        let mut conflicts = Vec::new();
        self.output_repository_assuming_no_pending_patch(
            &prefixes,
            branch,
            working_copy,
            internal,
            &mut conflicts,
        )?;

        debug!("unrecording pending patch");
        self.unrecord(branch, internal, pending)?;
        Ok(conflicts)
    }

    pub fn output_repository_no_pending(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefixes: &Prefixes,
    ) -> Result<Vec<ConflictingFile>> {
        debug!("begin output repository {:?}", prefixes);
        debug!("prefixes {:?}", prefixes);
        let mut conflicts = Vec::new();
        self.output_repository_assuming_no_pending_patch(
            &prefixes,
            branch,
            working_copy,
            ROOT_PATCH_ID,
            &mut conflicts,
        )?;

                return Err(ErrorKind::WrongHash.into());

                apply_resize(&opts.repo_root, &opts.branch(), remote.iter(), None, |_, _| {})


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231

232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331

333
334


337


340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401

403


406

408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608




609
610
611
612
613
614
615





616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849

850
851
                        show_help,
            }
use getch;
use libpijul::patch::{Change, ChangeContext, Patch, PatchHeader, Record};
use std::io::prelude::*;

use std::collections::{HashMap, HashSet};
use std::ffi::OsString;
use std::io::stdout;
use std::path::{Path, PathBuf};
use std::rc::Rc;

use regex::Regex;

use commands::pretty_repo_path;
use libpijul::fs_representation::{RepoPath, RepoRoot, PIJUL_DIR_NAME};

use atty;
use error::Error;
use libpijul::{EdgeFlags, Hash, LineId, MutTxn, PatchId};
use rand;
use std;
use std::char::from_u32;
use std::fs::{remove_file, File};
use std::process;
use std::str;
use term;
use term::{Attr, StdoutTerminal};

use ignore::gitignore::GitignoreBuilder;
use line;

const BINARY_CONTENTS: &'static str = "<binary contents>";
#[derive(Clone, Copy)]
pub enum Command {
    Pull,
    Push,
    Unrecord,
}

impl Command {
    fn verb(&self) -> &'static str {
        match *self {
            Command::Push => "push",
            Command::Pull => "pull",
            Command::Unrecord => "unrecord",
        }
    }
}

fn print_section(term: &mut Option<Box<StdoutTerminal>>, title: &str, contents: &str) {
    if let Some(ref mut term) = *term {
        term.attr(Attr::Bold).unwrap_or(());
    }
    let mut stdout = std::io::stdout();
    write!(stdout, "{}", title).unwrap_or(());
    if let Some(ref mut term) = *term {
        term.reset().unwrap_or(());
    }
    writeln!(stdout, "{}", contents).unwrap_or(());
}

fn remove_escape_codes(f: &str) -> std::borrow::Cow<str> {
    if f.as_bytes().contains(&27) {
        std::borrow::Cow::Owned(f.chars().filter(|&c| c != 27 as char).collect())
    } else {
        std::borrow::Cow::Borrowed(f)
    }
}

pub fn print_patch_descr(
    term: &mut Option<Box<StdoutTerminal>>,
    hash: &Hash,
    internal: Option<PatchId>,
    patch: &PatchHeader,
) {
    print_section(term, "Hash:", &format!(" {}", &hash.to_base58()));
    if let Some(internal) = internal {
        print_section(term, "Internal id:", &format!(" {}", &internal.to_base58()));
    }

    print_section(
        term,
        "Authors:",
        &format!(" {}", remove_escape_codes(&patch.authors.join(", "))),
    );
    print_section(term, "Timestamp:", &format!(" {}", patch.timestamp));

    let is_tag = if !patch.flag.is_empty() { "TAG: " } else { "" };

    let mut stdout = std::io::stdout();
    writeln!(
        stdout,
        "\n    {}{}",
        is_tag,
        remove_escape_codes(&patch.name)
    )
    .unwrap_or(());
    if let Some(ref d) = patch.description {
        writeln!(stdout, "").unwrap_or(());
        let d = remove_escape_codes(d);
        for descr_line in d.lines() {
            writeln!(stdout, "    {}", descr_line).unwrap_or(());
        }
    }
    writeln!(stdout, "").unwrap_or(());
}

fn check_forced_decision(
    command: Command,
    choices: &HashMap<&Hash, bool>,
    rev_dependencies: &HashMap<&Hash, Vec<&Hash>>,
    a: &Hash,
    b: &Patch,
) -> Option<bool> {
    let covariant = match command {
        Command::Pull | Command::Push => true,
        Command::Unrecord => false,
    };
    // If we've selected patches that depend on a, and this is a pull
    // or a push, select a.
    if let Some(x) = rev_dependencies.get(a) {
        for y in x {
            // Here, y depends on a.
            //
            // If this command is covariant, and we've selected y, select a.
            // If this command is covariant, and we've unselected y, don't do anything.
            //
            // If this command is contravariant, and we've selected y, don't do anything.
            // If this command is contravariant, and we've unselected y, unselect a.
            if let Some(&choice) = choices.get(y) {
                if choice == covariant {
                    return Some(covariant);
                }
            }
        }
    };

    // If we've unselected dependencies of a, unselect a.
    for y in b.dependencies().iter() {
        // Here, a depends on y.
        //
        // If this command is covariant, and we've selected y, don't do anything.
        // If this command is covariant, and we've unselected y, unselect a.
        //
        // If this command is contravariant, and we've selected y, select a.
        // If this command is contravariant, and we've unselected y, don't do anything.

        if let Some(&choice) = choices.get(&y) {
            if choice != covariant {
                return Some(!covariant);
            }
        }
    }

    None
}

fn interactive_ask(
    getch: &getch::Getch,
    a: &Hash,
    patchid: Option<PatchId>,
    b: &Patch,
    command_name: Command,
    show_help: bool,
) -> Result<(char, Option<bool>), Error> {
    let mut term = if atty::is(atty::Stream::Stdout) {
        term::stdout()
    } else {
        None
    };
    print_patch_descr(&mut term, a, patchid, b);

    if show_help {
        display_help(command_name);
        print!("Shall I {} this patch? ", command_name.verb());
    } else {
        print!("Shall I {} this patch? [ynkad?] ", command_name.verb());
    }

    stdout().flush()?;
    match getch.getch().ok().and_then(|x| from_u32(x as u32)) {
        Some(e) => {
            println!("{}", e);
            let e = e.to_uppercase().next().unwrap_or('\0');
            match e {
                'A' => Ok(('Y', Some(true))),
                'D' => Ok(('N', Some(false))),
                e => Ok((e, None)),
            }
        }
        _ => Ok(('\0', None)),
    }
}

fn display_help(c: Command) {
    println!("Available options: ynkad?");
    println!("y: {} this patch", c.verb());
    println!("n: don't {} this patch", c.verb());
    println!("k: go bacK to the previous patch");
    println!("a: {} all remaining patches", c.verb());
    println!("d: finish, skipping all remaining patches");
    println!("")
}

/// Patches might have a dummy "changes" field here.
pub fn ask_patches(
    command: Command,
    patches: &[(Hash, Option<PatchId>, Patch)],
) -> Result<Vec<Hash>, Error> {
    let getch = getch::Getch::new();
    let mut i = 0;

    // Record of the user's choices.
    let mut choices: HashMap<&Hash, bool> = HashMap::new();

    // For each patch, the list of patches that depend on it.
    let mut rev_dependencies: HashMap<&Hash, Vec<&Hash>> = HashMap::new();

    // Decision for the remaining patches ('a' or 'd'), if any.
    let mut final_decision = None;
    let mut show_help = false;

    while i < patches.len() {
        let (ref a, patchid, ref b) = patches[i];
        let forced_decision = check_forced_decision(command, &choices, &rev_dependencies, a, b);

        // Is the decision already forced by a previous choice?
        let e = match final_decision.or(forced_decision) {
            Some(true) => 'Y',
            Some(false) => 'N',
            None => {
                debug!("decision not forced");
                let (current, remaining) = interactive_ask(&getch, a, patchid, b, command, show_help)?;
                let (current, remaining) =
                    interactive_ask(&getch, a, patchid, b, command, show_help)?;
                final_decision = remaining;
                current
            }
        };

        show_help = false;

        debug!("decision: {:?}", e);
        match e {
            'Y' => {
                choices.insert(a, true);
                match command {
                    Command::Pull | Command::Push => {
                        for ref dep in b.dependencies().iter() {
                            let d = rev_dependencies.entry(dep).or_insert(vec![]);
                            d.push(a)
                        }
                    }
                    Command::Unrecord => {}
                }
                i += 1
            }
            'N' => {
                choices.insert(a, false);
                match command {
                    Command::Unrecord => {
                        for ref dep in b.dependencies().iter() {
                            let d = rev_dependencies.entry(dep).or_insert(vec![]);
                            d.push(a)
                        }
                    }
                    Command::Pull | Command::Push => {}
                }
                i += 1
            }
            'K' if i > 0 => {
                let (ref a, _, _) = patches[i];
                choices.remove(a);
                i -= 1
            }
            '?' => {
                show_help = true;
            },
            }
            _ => {}
        }
    }
    Ok(patches
        .into_iter()
        .filter_map(|&(ref hash, _, _)| {
            if let Some(true) = choices.get(hash) {
                Some(hash.to_owned())
            } else {
                None
            }
        })
        .collect())
}

/// Compute the dependencies of this change.
fn change_deps(
    id: usize,
    c: &Record<ChangeContext<Hash>>,
    provided_by: &mut HashMap<LineId, usize>,
) -> HashSet<LineId> {
    let mut s = HashSet::new();
    for c in c.iter() {
        match *c {
            Change::NewNodes {
                ref up_context,
                ref down_context,
                ref line_num,
                ref nodes,
                ..
            } => {
                for cont in up_context.iter().chain(down_context) {
                    if cont.patch.is_none() && !cont.line.is_root() {
                        s.insert(cont.line.clone());
                    }
                }
                for i in 0..nodes.len() {
                    provided_by.insert(*line_num + i, id);
                }
            }
            Change::NewEdges { ref edges, .. } => {
                for e in edges {
                    if e.from.patch.is_none() && !e.from.line.is_root() {
                        s.insert(e.from.line.clone());
                    }
                    if e.to.patch.is_none() && !e.from.line.is_root() {
                        s.insert(e.to.line.clone());
                    }
                }
            }
        }
    }
    s
}


>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
fn print_record<T: rand::Rng>(
    repo_root: &RepoRoot<impl AsRef<Path>>,

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
fn print_change<T: rand::Rng>(

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    term: &mut Option<Box<StdoutTerminal>>,
    cwd: &Path,
    repo: &MutTxn<T>,
    current_file: &mut Option<Rc<RepoPath<PathBuf>>>,
    c: &Record<ChangeContext<Hash>>,
) -> Result<(), Error> {
    match *c {
        Record::FileAdd {
            ref name,
            ref contents,
            ..
        } => {
            if let Some(ref mut term) = *term {
                term.fg(term::color::CYAN).unwrap_or(());
            }
            print!("added file ");
            if let Some(ref mut term) = *term {
                term.reset().unwrap_or(());
            }
            println!("{}", pretty_repo_path(repo_root, name, cwd).display());
            if let Some(ref change) = contents {
                print_change(term, repo, 0, 0, change)?;
            }
            Ok(())
        }
        Record::FileDel {
            ref name,
            ref contents,
            ..
        } => {
            if let Some(ref mut term) = *term {
                term.fg(term::color::MAGENTA).unwrap_or(());
            }
            print!("deleted file: ");
            if let Some(ref mut term) = *term {
                term.reset().unwrap_or(());
            }
            println!("{}", pretty_repo_path(repo_root, name, cwd).display());
            if let Some(ref change) = contents {
                print_change(term, repo, 0, 0, change)?;
            }
            Ok(())
        }
        Record::FileMove { ref new_name, .. } => {
            if let Some(ref mut term) = *term {
                term.fg(term::color::YELLOW).unwrap_or(());
            }
            print!("file moved to: ");
            if let Some(ref mut term) = *term {
                term.reset().unwrap_or(());
            }
            println!("{}", pretty_repo_path(repo_root, new_name, cwd).display());
            Ok(())
        }
        Record::Change {
            ref change,
            ref replacement,
            ref file,
            old_line,
            new_line,
            ..
        } => {

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            if let Some(ref mut term) = *term {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            let mut file_changed = true;
            if let Some(ref cur_file) = *current_file {
                if file == cur_file {
                    file_changed = false;
                }
            }
            if file_changed {
                if let Some(ref mut term) = *term {
                    term.attr(Attr::Bold).unwrap_or(());
                    term.attr(Attr::Underline(true)).unwrap_or(());
                }
                println!("{}", pretty_repo_path(repo_root, file, cwd).display());
                if let Some(ref mut term) = *term {
                    term.reset().unwrap_or(());
                }
                *current_file = Some(file.clone())
            }

            print_change(term, repo, old_line, new_line, change)?;
            if let Some(ref c) = *replacement {
                print_change(term, repo, old_line, new_line, c)?
            }
            Ok(())
        }
    }
}

fn print_change<T: rand::Rng>(
    term: &mut Option<Box<StdoutTerminal>>,
    repo: &MutTxn<T>,
    old_line: usize,
    new_line: usize,
    change: &Change<ChangeContext<Hash>>,
) -> Result<(), Error> {
    match *change {
        Change::NewNodes {
            // ref up_context,ref down_context,ref line_num,
            ref flag,
            ref nodes,
            ..
        } => {
            if flag.contains(EdgeFlags::FOLDER_EDGE) {
                for n in nodes {
                    if n.len() >= 2 {
                        if let Some(ref mut term) = *term {
                            term.fg(term::color::CYAN).unwrap_or(());
                        }
                        print!("new file ");
                        if let Some(ref mut term) = *term {
                            term.reset().unwrap_or(());
                        }
                        println!("{}", str::from_utf8(&n[2..]).unwrap_or(""));
                    }
                }
            } else {
                if new_line > 0 {
                    println!("From line {}\n", new_line);
                }

                for n in nodes {
                    let s = str::from_utf8(n).unwrap_or(BINARY_CONTENTS);
                    if let Some(ref mut term) = *term {
                        term.fg(term::color::GREEN).unwrap_or(());
                    }
                    print!("+ ");
                    if let Some(ref mut term) = *term {
                        term.reset().unwrap_or(());
                    }
                    if s.ends_with("\n") {
                        print!("{}", s);
                    } else {
                        println!("{}", s);
                    }
                }
            }
            Ok(())
        }
        Change::NewEdges {
            ref edges, flag, ..
        } => {
            let mut h_targets = HashSet::with_capacity(edges.len());

            if old_line > 0 {
                println!("From line {}\n", old_line);
            }
            for e in edges {
                let (target, flag) = if !flag.contains(EdgeFlags::PARENT_EDGE) {
                    if h_targets.insert(&e.to) {
                        (Some(&e.to), flag)
                    } else {
                        (None, flag)
                    }
                } else {
                    if h_targets.insert(&e.from) {
                        (Some(&e.from), flag)
                    } else {
                        (None, flag)
                    }
                };
                if let Some(target) = target {
                    let internal = repo.internal_key_unwrap(target);
                    let l = repo.get_contents(internal).unwrap();
                    let l = l.into_cow();
                    let s = str::from_utf8(&l).unwrap_or(BINARY_CONTENTS);

                    if flag.contains(EdgeFlags::DELETED_EDGE) {
                        if let Some(ref mut term) = *term {
                            term.fg(term::color::RED).unwrap_or(());
                        }
                        print!("- ");
                    } else {
                        if let Some(ref mut term) = *term {
                            term.fg(term::color::GREEN).unwrap_or(());
                        }
                        print!("+ ");
                    }
                    if let Some(ref mut term) = *term {
                        term.reset().unwrap_or(());
                    }
                    if s.ends_with("\n") {
                        print!("{}", s)
                    } else {
                        println!("{}", s)
                    }
                }
            }
            Ok(())
        }
    }
}

#[derive(Clone, Copy, Debug)]
pub enum ChangesDirection {
    Record,
    Revert,
}

impl ChangesDirection {
    fn is_record(&self) -> bool {
        match *self {
            ChangesDirection::Record => true,
            _ => false,
        }
    }
    fn verb(&self) -> &str {
        match *self {
            ChangesDirection::Record => "record",
            ChangesDirection::Revert => "revert",
        }
    }
}

fn display_help_changes(
    potential_new_ignore: Option<&RepoPath<impl AsRef<Path>>>,
    direction: ChangesDirection,
) {
    println!("Available options:");
    println!("y: {} this change", direction.verb());
    println!("n: don't {} this change", direction.verb());
    println!(
        "f: {} the rest of the changes to this file",
        direction.verb()
    );
    println!(
        "s: don't {} the rest of the changes to this file",
        direction.verb()
    );
    println!("k: go back to the previous change");
    println!("a: {} all remaining changes", direction.verb());
    println!("d: skip all remaining changes");
    match potential_new_ignore {
        Some(filename) => println!("i: ignore file {}", filename.display()),
        None => (),
    }
    println!("")
}

fn prompt_one_change<T: rand::Rng>(
    repository: &MutTxn<T>,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    change: &Record<ChangeContext<Hash>>,
    current_file: &mut Option<Rc<RepoPath<PathBuf>>>,
    n_changes: usize,
    i: usize,
    direction: ChangesDirection,
    potential_new_ignore: Option<&RepoPath<impl AsRef<Path>>>,
    terminal: &mut Option<Box<StdoutTerminal>>,
    getch: &getch::Getch,
    cwd: &Path,
    show_help: bool,
) -> Result<(char, Option<char>, Option<char>), Error> {
    debug!("changes: {:?}", change);
    print_record(repo_root, terminal, cwd, repository, current_file, &change)?;
    println!("");
    let choices = if potential_new_ignore.is_some() {
        "[ynsfkadi?]"
    } else {
        "[ynsfkad?]"
    };
    if show_help {
        display_help_changes(potential_new_ignore, direction);
        print!("Shall I {} this change? ({}/{}) ",
               direction.verb(),
               i + 1,
               n_changes);
        print!(
            "Shall I {} this change? ({}/{}) ",
            direction.verb(),
            i + 1,
            n_changes
        );
    } else {
        print!("Shall I {} this change? ({}/{}) {} ",
               direction.verb(),
               i + 1,
               n_changes,
               choices);
        print!(
            "Shall I {} this change? ({}/{}) {} ",
            direction.verb(),
            i + 1,
            n_changes,
            choices
        );
    }
    stdout().flush()?;
    match getch.getch().ok().and_then(|x| from_u32(x as u32)) {
        Some(e) => {
            println!("{}\n", e);
            let e = e.to_uppercase().next().unwrap_or('\0');
            match e {
                'A' => Ok(('Y', Some('Y'), None)),
                'D' => Ok(('N', Some('N'), None)),
                'F' => Ok(('Y', None, Some('Y'))),
                'S' => Ok(('N', None, Some('N'))),
                e => Ok((e, None, None)),
            }
        }
        _ => Ok(('\0', None, None)),
    }
}

fn add_to_ignore_file(
    file: &RepoPath<impl AsRef<Path>>,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    new_ignored_patterns: &mut Vec<String>,
    new_ignore_builder: &mut GitignoreBuilder,
) {
    loop {
        let pat = read_line_with_suggestion(
            "Pattern to add to ignore file (relative to repository root, empty to add nothing)? ",
            &file.as_path().to_string_lossy(),
        );
        if pat.is_empty() {
            return;
        };

        let mut ignore_builder = GitignoreBuilder::new(&repo_root.repo_root);
        let add_ok = match ignore_builder.add_line(None, &pat) {
            Ok(i) => match i.build() {
                Ok(i) => i
                    .matched_path_or_any_parents(file.as_path(), false)
                    .is_ignore(),
                Err(e) => {
                    println!("could not match pattern {}: {}", &pat, e);
                    false
                }
            },
            Err(e) => {
                println!("did not understand pattern {}: {}", &pat, e);
                false
            }
        };
        if add_ok {
            new_ignore_builder.add_line(None, &pat).unwrap();
            new_ignored_patterns.push(pat);
            return;
        }
        println!(
            "pattern {} is incorrect or does not match {}",
            pat,
            &file.display()
        );
    }
}

pub fn ask_changes<T: rand::Rng>(
    repository: &MutTxn<T>,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    cwd: &Path,
    changes: &[Record<ChangeContext<Hash>>],
    direction: ChangesDirection,
    to_unadd: &mut HashSet<RepoPath<PathBuf>>,
) -> Result<(HashMap<usize, bool>, Vec<String>), Error> {
    debug!("changes: {:?}", changes);
    let mut terminal = if atty::is(atty::Stream::Stdout) {
        term::stdout()
    } else {
        None
    };
    let getch = getch::Getch::new();
    let mut i = 0;
    let mut choices: HashMap<usize, bool> = HashMap::new();
    let mut new_ignored_patterns: Vec<String> = Vec::new();
    let mut new_ignore_builder = GitignoreBuilder::new(&repo_root.repo_root);
    let mut final_decision = None;
    // determines what decision to make on remaining change in the same file
    let mut file_decision: Option<char> = None;
    let mut provided_by = HashMap::new();
    let mut line_deps = Vec::with_capacity(changes.len());
    for i in 0..changes.len() {
        line_deps.push(change_deps(i, &changes[i], &mut provided_by));
    }
    let mut deps: HashMap<usize, Vec<usize>> = HashMap::new();
    let mut rev_deps: HashMap<usize, Vec<usize>> = HashMap::new();
    for i in 0..changes.len() {
        for dep in line_deps[i].iter() {
            debug!("provided: i {}, dep {:?}", i, dep);
            let p = provided_by.get(dep).unwrap();
            debug!("provided: p= {}", p);

            let e = deps.entry(i).or_insert(Vec::new());
            e.push(*p);

            let e = rev_deps.entry(*p).or_insert(Vec::new());
            e.push(i);
        }
    }

    let empty_deps = Vec::new();
    let mut current_file = None;
    let mut show_help = false;

    while i < changes.len() {
        let decision=
            // If one of our dependencies has been unselected (with "n")
            if deps.get(&i)
            .unwrap_or(&empty_deps)
            .iter()
            .any(|x| { ! *(choices.get(x).unwrap_or(&true)) }) {
                Some(false)
            } else if rev_deps.get(&i).unwrap_or(&empty_deps)
            .iter().any(|x| { *(choices.get(x).unwrap_or(&false)) }) {
                // If we are a dependency of someone selected (with "y").
                Some(true)
            } else {
                None
            };

        let decision = match changes[i] {
            Record::FileAdd { ref name, .. } => {
                let i = new_ignore_builder.build().unwrap();
                if i.matched_path_or_any_parents(name.as_path(), false)
                    .is_ignore()
                {
                    Some(false)
                } else {
                    None
                }
            }
            _ => decision,
        };
        let potential_new_ignore: Option<&RepoPath<PathBuf>> = match direction {
            ChangesDirection::Revert => None,
            ChangesDirection::Record => match changes[i] {
                Record::FileAdd { ref name, .. } => Some(&name),
                _ => None,
            },
        };
        let (e, f, file_d) = match decision {
            Some(true) => ('Y', final_decision, file_decision),
            Some(false) => ('N', final_decision, file_decision),
            None => {
                if let Some(d) = final_decision {
                    (d, Some(d), file_decision)
                } else {
                    let command_decisions = if let Some(ref f) = current_file {
                        file_decision.and_then(|d| match changes[i] {
                            Record::Change { ref file, .. } => {
                                if f == file {
                                    Some((d, final_decision, Some(d)))
                                } else {
                                    None
                                }
                            }
                            _ => None,
                        })
                    } else {
                        None
                    };

                    if let Some(res) = command_decisions {
                        res
                    } else {
                        prompt_one_change(
                            repository,
                            repo_root,
                            &changes[i],
                            &mut current_file,
                            changes.len(),
                            i,
                            direction,
                            potential_new_ignore,
                            &mut terminal,
                            &getch,
                            cwd,
                            show_help,
                        )?
                    }
                }
            }
        };

        show_help = false;

        final_decision = f;
        file_decision = file_d;
        match e {
            'Y' => {
                choices.insert(i, direction.is_record());
                match changes[i] {
                    Record::FileAdd { ref name, .. } => {
                        to_unadd.remove(&name);
                    }
                    _ => (),
                }
                i += 1
            }
            'N' => {
                choices.insert(i, !direction.is_record());
                i += 1
            }
            'K' if i > 0 => {
                choices.remove(&i);
                i -= 1
            }
            'I' => match potential_new_ignore {
                Some(file) => {
                    add_to_ignore_file(
                        file,
                        repo_root,
                        &mut new_ignored_patterns,
                        &mut new_ignore_builder,
                    );
                    choices.insert(i, !direction.is_record());
                    i += 1;
                }
                _ => {}
            },
            '?' => {
                show_help = true;
            },
            }
            _ => {}























1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110








use std::path::Path;
        .arg(Arg::with_name("path")
             .long("path")
             .help("Partial path to check out.")
             .takes_value(true)
        )
        if let Some(partial) = args.value_of("path") {
            txn.output_repository(
                &branch,
                &opts.repo_root,
                &[Path::new(partial)][..],
                &UnsignedPatch::empty().leave_unsigned(),
                &HashSet::new()
            )?
        } else {
            txn.output_repository::<&[&Path]>(
                &branch,
                &opts.repo_root,
                &[][..],
                &UnsignedPatch::empty().leave_unsigned(),
                &HashSet::new()
            )?
        }
use clap::{Arg, ArgMatches, SubCommand};

use super::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{in_repo_root, RepoPath};
use libpijul::patch::UnsignedPatch;
use libpijul::{FileStatus, RecordState, ToPrefixes};
use rand;
use std::collections::HashSet;
use std::path::Path;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("checkout")
        .about("Change the current branch")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Local repository.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .help("Branch to switch to.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("path")
                .long("path")
                .help("Partial path to check out.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("force")
                .short("f")
                .long("force")
                .takes_value(false)
                .help("Only verify that there are no unrecorded files moves, deletions or additions (ignores unrecorded changes in files). Much faster on large repositories."),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    if let Some(branch) = args.value_of("branch") {
        checkout(
            &opts,
            branch,
            args.is_present("force"),
            args.value_of("path").map(|p| RepoPath(Path::new(p))),
        )
    } else {
        Err(Error::NoSuchBranch)
    }
}

pub fn checkout(
    opts: &BasicOptions,
    branch: &str,
    force: bool,
    partial_path: Option<RepoPath<&Path>>,
) -> Result<(), Error> {
    let mut force = force;
    let mut provision = 409600;

    loop {
        match try_checkout(opts, branch, force, provision, partial_path) {
            Err(ref e) if e.lacks_space() => {
                provision = provision * 2;
                force = true;
            }
            x => return x,
        }
    }
}

pub fn try_checkout(
    opts: &BasicOptions,
    branch_name: &str,
    force: bool,
    provision: u64,
    partial_path: Option<RepoPath<&Path>>,
) -> Result<(), Error> {
    let repo = opts.open_and_grow_repo(provision)?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let current_branch = opts.repo_root.get_current_branch()?;
    // We need to check at least that there are no file
    // moves/additions/deletions, because these would be
    // overwritten by the checkout, sometimes causing Pijul to
    // panic.
    if force {
        // Check whether there are file moves.
        if txn
            .iter_inodes(None)
            .any(|(_, ch)| ch.status != FileStatus::Ok)
        {
            return Err(Error::PendingChanges);
        }
    } else {
        // Check whether there are more general changes.
        let mut record = RecordState::new();
        let current_branch = txn.open_branch(&current_branch)?;
        txn.record(
            libpijul::DiffAlgorithm::default(),
            &mut record,
            &current_branch,
            &opts.repo_root,
            &in_repo_root(),
        )?;
        txn.commit_branch(current_branch)?;
        let (changes, _) = record.finish();

        txn.output_repository(
            &branch,
            &opts.repo_root,
            None,
            &UnsignedPatch::empty().leave_unsigned(),
            &HashSet::new()
        )?;
        )?;



























1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33












34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72




73
74
75
76
77
78
79




80
81
82
83
84
85
86
87
88
89




90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106




107
108
109
110
111
112
113
114


115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162



use clap::{SubCommand, ArgMatches, Arg};
use commands::{assert_no_containing_repo, create_repo, default_explain, set_current_branch,
               StaticSubcommand};
use error::{Error, ErrorKind};
use std::path::Path;
            Arg::with_name("remote_path")
                .help("Only pull patches relative to that patch.")
                .takes_value(true),
    pub from_path: Option<&'a str>,
    let from_path = args.value_of("from_path");
            let pushable = to_session.pushable_patches(
                args.from_branch,
                args.to_branch,
                path,
                args.from_path,
            )?;
                    let partial = args.from_path.map(Path::new);
                    let mut pullable:Vec<_> = session.pullable_patches(
                        path,
                        args.from_path
                    )?.iter().collect();
                    session.pull(path, args.to_branch, &mut pullable, partial, true)?;
            writeln!(
                stderr(),
                "error: Cannot clone onto / into existing repository {}",
                p.display()
            ).unwrap();
use clap::{Arg, ArgMatches, SubCommand};
use commands::remote::{parse_remote, Remote};
use commands::{assert_no_containing_repo, create_repo, default_explain, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::{Hash, DEFAULT_BRANCH};
use regex::Regex;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::exit;
use tempfile::tempdir_in;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("clone")
        .about("Clone a remote branch")
        .arg(
            Arg::with_name("from")
                .help("Repository to clone.")
                .required(true),
        )
        .arg(
            Arg::with_name("from_branch")
                .long("from-branch")
                .help("The branch to pull from")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("to_branch")
                .long("to-branch")
                .help("The branch to pull into")
                .takes_value(true),
        )
        .arg(Arg::with_name("to").help("Target."))
        .arg(Arg::with_name("port")
            .short("p")
            .long("port")
            .help("Port of the remote ssh server.")
            .takes_value(true)
            .validator(|val| {
                let x: Result<u16, _> = val.parse();
                match x {
                    Ok(_) => Ok(()),
                    Err(_) => Err(val),
                }
            }));
        .arg(
            Arg::with_name("from_path")
                .long("path")
                .help("Only pull patches relative to that path.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .help("Pull a patch and its dependencies.")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("port")
                .short("p")
                .long("port")
                .help("Port of the remote ssh server.")
                .takes_value(true)
                .validator(|val| {
                    let x: Result<u16, _> = val.parse();
                    match x {
                        Ok(_) => Ok(()),
                        Err(_) => Err(val),
                    }
                }),
        );
}
#[derive(Debug)]
pub struct Params<'a> {
    pub from: Remote<'a>,
    pub from_branch: &'a str,
    pub from_path: Vec<RepoPath<&'a Path>>,
    pub to: Remote<'a>,
    pub to_branch: &'a str,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Params<'a> {
    // At least one must not use its "port" argument
    let from = parse_remote(args.value_of("from").unwrap(),
                            args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
                            None,
                            None);
    let from = parse_remote(
        args.value_of("from").unwrap(),
        args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
        None,
        None,
    );
    let to = if let Some(to) = args.value_of("to") {
        parse_remote(to,
                     args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
                     None,
                     None)
        parse_remote(
            to,
            args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
            None,
            None,
        )
    } else {
        let basename = Regex::new(r"([^/:]+)").unwrap();
        let from = args.value_of("from").unwrap();
        if let Some(to) = basename.captures_iter(from).last().and_then(|to| to.get(1)) {
            parse_remote(to.as_str(),
                         args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
                         None,
                         None)
            parse_remote(
                to.as_str(),
                args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
                None,
                None,
            )
        } else {
            panic!("Could not parse target")
        }
    };
    let from_branch = args.value_of("from_branch").unwrap_or(DEFAULT_BRANCH);
    let from_path = args
        .values_of("from_path")
        .map(|x| x.map(|p| RepoPath(Path::new(p))).collect())
        .unwrap_or(Vec::new());
    let to_branch = args.value_of("to_branch").unwrap_or(from_branch);
    Params {
        from: from,
        from_branch: from_branch,
        to: to,
        to_branch: to_branch,
        from,
        from_branch,
        from_path,
        to,
        to_branch,
    }
}



pub fn run(args_: &ArgMatches) -> Result<(), Error> {
    let args = parse_args(args_);
    debug!("{:?}", args);
    match args.to {
        Remote::Local { path: repo_root } => {
            assert_no_containing_repo(&repo_root.repo_root)?;

            let parent = repo_root.repo_root.parent().unwrap();
            let tmp_dir = tempdir_in(parent)?;
            {
                create_repo(tmp_dir.path())?;
                let tmp_root = RepoRoot {
                    repo_root: tmp_dir.path(),
                };
                let mut session = args.from.session()?;
                let mut pullable: Vec<_> = if let Some(patches) = args_.values_of("patch") {
                    let mut p = Vec::new();
                    for x in patches {
                        p.push((Hash::from_base58(x).unwrap(), 0))
                    }
                    p
                } else {
                    session.changes(args.from_branch, &args.from_path[..])?
                };
                session.pull(
                    &tmp_root,
                    args.to_branch,
                    &mut pullable,
                    &args.from_path,
                    true,
                )?;
                tmp_root.set_current_branch(args.to_branch)?;
            }
            let path = tmp_dir.into_path();
            std::fs::rename(&path, &repo_root.repo_root)?;
            Ok(())
        }
        _ => {
            // Clone between remote repositories.
            match args.from {
                Remote::Local { path } => {
                    let mut to_session = args.to.session()?;
                    debug!("remote init");
                    to_session.remote_init()?;
                    debug!("pushable?");
                    let pushable = to_session.pushable_patches(
                        args.from_branch,
                        args.to_branch,
                        path
                    )).iter().collect();
                    session.pull(path, args.to_branch, &mut pullable, true)?;
















































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16





















17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85

86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
















137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160

161
162
163
164
165

166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
use clap::{SubCommand, ArgMatches, Arg};
use commands::{BasicOptions, StaticSubcommand, ask, default_explain};
use libpijul::fs_representation::{read_patch_nochanges, id_file};
use libpijul::{PatchId, HashRef};
use commands::{ask, default_explain, BasicOptions, StaticSubcommand};
use libpijul::fs_representation::{id_file, read_patch_nochanges};
use libpijul::{HashRef, PatchId};
use std::path::Path;
    fn setup(&mut self) {
        Pager { is_setup: true }
    path: Option<&'a str>,
        let path = args.value_of("path");
fn display_patch(
    pager: &mut Pager,
    settings: &Settings,
    nth: usize,
    patchid: PatchId,
    hash_ext: HashRef,
) -> Result<(), Error> {
    let (matches_regex, o_patch) = if settings.regex.is_empty() {
        (true, None)
    } else {
        let patch = read_patch_nochanges(&settings.opts.repo_root, hash_ext)?;
        let does_match = {
            let descr = match patch.description {
                Some(ref d) => d,
                None => "",
            settings
                .regex
                .iter()
                .any(|ref r| r.is_match(&patch.name) || r.is_match(descr))
        (does_match, Some(patch))
    };
    if !matches_regex {
        return Ok(());
    };
    } else {
            Some(patch) => patch,
        None => return Err(ErrorKind::NoSuchBranch.into()),
    } else if let Some(path) = settings.path {
        let inode = txn.find_inode(Path::new(path))?;
        let key = txn.get_inodes(inode).unwrap().key;
        for (n, (applied, patchid)) in txn.rev_iter_applied(&branch, None).enumerate() {
            if txn.get_touched(key, patchid) {
                debug!("applied: {:?}", applied);
                let hash_ext = txn.get_external(patchid).unwrap();
                debug!("hash: {:?}", hash_ext.to_base58());
                display_patch(&mut pager, &settings, n, patchid, hash_ext)?;
use clap::{Arg, ArgMatches, SubCommand};
use commands::patch::print_patch;
use commands::{ask, default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::RepoPath;
use libpijul::patch::Patch;
use libpijul::{Branch, PatchId, Txn};
use regex::Regex;
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::PathBuf;
use term;

pub fn invocation() -> StaticSubcommand {
    SubCommand::with_name("log")
        .about("List the patches applied to the given branch")
        .arg(Arg::with_name("repository")
             .long("repository")
             .help("Path to the repository to list.")
             .takes_value(true))
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("The branch to list.")
             .takes_value(true))
        .arg(Arg::with_name("internal-id")
             .long("internal-id")
             .help("Display only patches with these internal identifiers.")
             .multiple(true)
             .takes_value(true))
        .arg(Arg::with_name("hash-only")
             .long("hash-only")
             .help("Only display the hash of each path."))
        .arg(Arg::with_name("grep")
             .long("grep")
             .multiple(true)
             .takes_value(true)
             .help("Search patch name and description with a regular expression."))
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Path to the repository to list.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch to list.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("internal-id")
                .long("internal-id")
                .help("Display only patches with these internal identifiers.")
                .multiple(true)
                .takes_value(true),
        )
        .arg(
            Arg::with_name("hash-only")
                .long("hash-only")
                .help("Only display the hash of each path."),
        )
        .arg(
            Arg::with_name("repository-id")
                .long("repository-id")
                .help("display a header with the repository id")
        )
        .arg(
            Arg::with_name("path")
                .long("path")
                .multiple(true)
                .takes_value(true)
                .help("Only display patches that touch the given path."),
        )
        .arg(
            Arg::with_name("grep")
                .long("grep")
                .multiple(true)
                .takes_value(true)
                .help("Search patch name and description with a regular expression."),
        )
        .arg(
            Arg::with_name("last")
                .long("last")
                .takes_value(true)
                .help("Show only the last n patches. If `--first m` is also used, then (a) if the command normally outputs the last patches first, this means the last n patches of the first m ones. (b) Else, it means the first m patches of the last n ones."),
        )
        .arg(
            Arg::with_name("first")
                .long("first")
                .takes_value(true)
                .help("Show only the last n patches. If `--last m` is also used, then (a) if the command normally outputs the last patches first, this means the last m patches of the first n ones. (b) Else, it means the first n patches of the last m ones."),
        )
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .short("p")
                .help("Show patches"),
        )
}

struct Settings<'a> {
    hash_only: bool,
    show_repoid: bool,
    show_patches: bool,
    regex: Vec<Regex>,
    opts: BasicOptions<'a>
    opts: BasicOptions<'a>,
    path: Vec<RepoPath<PathBuf>>,
    first: Option<usize>,
    last: Option<usize>,
}

impl<'a> Settings<'a> {
    fn parse(args: &'a ArgMatches) -> Result<Self, Error> {
        let basic_opts = BasicOptions::from_args(args)?;
        let hash_only = args.is_present("hash-only");
        let first = args.value_of("first").and_then(|x| x.parse().ok());
        let last = args.value_of("last").and_then(|x| x.parse().ok());
        let show_patches = args.is_present("patch");
        let show_repoid = args.is_present("repository-id");
        let mut regex = Vec::new();
        if let Some(regex_args) = args.values_of("grep") {
            for r in regex_args {
                debug!("regex: {:?}", r);
                regex.push(Regex::new(r)?)
            }
        }
        Ok(Settings { hash_only, regex, opts: basic_opts })
        let path = match args.values_of("path") {
            Some(arg_paths) => {
                let mut paths = Vec::new();
                for path in arg_paths {
                    let p = basic_opts.cwd.join(path);
                    let p = if let Ok(p) = std::fs::canonicalize(&p) {
                        p
                    } else {
                        p
                    };
                    paths.push(basic_opts.repo_root.relativize(&p)?.to_owned());
                }
                paths
            }
            None => Vec::new(),
        };
        Ok(Settings {
            hash_only,
            show_patches,
            show_repoid,
            regex,
            opts: basic_opts,
            path,
            first,
            last,
        })
    }
}

fn display_patch(pager: &mut Pager, settings: &Settings,
                 nth: usize, patchid: PatchId, hash_ext: HashRef) ->
    Result<(), Error>
{
    let (matches_regex, o_patch) =
        if settings.regex.is_empty() {
            (true, None)
        } else {
            let patch = read_patch_nochanges(&settings.opts.repo_root, hash_ext)?;
            let does_match = {
                let descr = match patch.description {
                    Some(ref d) => d,
                    None => ""
                };
                settings.regex.iter()
                    .any(|ref r| r.is_match(&patch.name) || r.is_match(descr))
impl<'a> Settings<'a> {
    fn display_patch_(
        &self,
        txn: &Txn,
        branch: &Branch,
        nth: u64,
        patchid: PatchId,
    ) -> Result<(), Error> {
        let hash_ext = txn.get_external(patchid).unwrap();
        debug!("hash: {:?}", hash_ext.to_base58());

        let (matches_regex, o_patch) = if self.regex.is_empty() {
            (true, None)
        } else {
            let patch = self.opts.repo_root.read_patch_nochanges(hash_ext)?;
            let does_match = {
                let descr = match patch.description {
                    Some(ref d) => d,
                    None => "",
                };
                self.regex
                    .iter()
                    .any(|ref r| r.is_match(&patch.name) || r.is_match(descr))
            };
            (does_match, Some(patch))
            (does_match, Some(patch))
        };
        if !matches_regex {
            return Ok(());
        };
    if !matches_regex { return Ok(()) };

        if self.hash_only {
            println!("{}:{}", hash_ext.to_base58(), nth);
        } else {
            let patch = match o_patch {
                None => self.opts.repo_root.read_patch_nochanges(hash_ext)?,
                Some(patch) => patch,
            };
            let mut term = term::stdout();
            ask::print_patch_descr(&mut term, &hash_ext.to_owned(), Some(patchid), &patch);
        }

        if self.show_patches {
            let mut patch_path = self.opts.repo_root.patches_dir().join(hash_ext.to_base58());
            patch_path.set_extension("gz");
            let f = File::open(&patch_path)?;

            let mut f = BufReader::new(f);
            let (hash, _, patch) = Patch::from_reader_compressed(&mut f)?;

            print_patch(&hash, &patch, txn, branch)?;
            println!();
        }

        Ok(())
    }

    fn display_patch(
        &self,
        txn: &Txn,
        branch: &Branch,
        n: u64,
        patchid: PatchId,
    ) -> Result<(), Error> {
        if self.path.is_empty() {
            self.display_patch_(txn, branch, n, patchid)?;
        } else {
            for path in self.path.iter() {
                let inode = txn.find_inode(&path)?;
                let key = if let Some(key) = txn.get_inodes(inode) {
                    key.key
                } else {
                    continue;
                };
                if txn.get_touched(key, patchid) {
                    self.display_patch_(txn, branch, n, patchid)?;
                    break;
                }
            }
        }
        Ok(())
    }

    fn is_touched(&self, txn: &Txn, patchid: PatchId) -> bool {
        self.path.is_empty()
            || self.path.iter().any(|path| {
                if let Ok(inode) = txn.find_inode(&path) {
                    if let Some(key) = txn.get_inodes(inode) {
                        return txn.get_touched(key.key, patchid);
                    }
                }
                false
            })
    }
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let settings = Settings::parse(args)?;
    let repo = settings.opts.open_repo()?;
    let txn = repo.txn_begin()?;
    let branch = match txn.get_branch(&settings.opts.branch()) {
        Some(b) => b,
        None => return Err(Error::NoSuchBranch),
    };

    if settings.show_repoid {
        let id_file = settings.opts.repo_root.id_file();
        let mut f = File::open(&id_file)?;
        let mut s = String::new();
        f.read_to_string(&mut s)?;
        if settings.hash_only {
            println!("{}", s.trim());
        } else {
            println!("Repository id: {}", s.trim());
            println!();
        }
    };
    if settings.hash_only {
        // If in binary form, show the patches in chronological order.
        let start = settings.last.and_then(|last| {
            txn.rev_iter_applied(&branch, None)
                .filter(|(_, patchid)| {
                    // Only select patches that touch the input path
                    // (if that path exists).
                    settings.is_touched(&txn, *patchid)
                })
                .take(last)
                .last()
                .map(|(n, _)| n)
        });
        debug!("start {:?}", start);
        for (n, (applied, patchid)) in txn.iter_applied(&branch, start).enumerate() {
            if let Some(first) = settings.first {
                if n >= first {
                    break;
                }
            }
            settings.display_patch(&txn, &branch, applied, patchid)?
        }
        return Ok(());
    }

    let txn = repo.txn_begin()?;
    if let Some(v) = args.values_of("internal-id") {
        for (n, patchid) in v.filter_map(|x| PatchId::from_base58(x)).enumerate() {
            settings.display_patch(&txn, &branch, n as u64, patchid)?;
        }

    } else {
        let first = if let Some(first) = settings.first {
            txn.iter_applied(&branch, None)
                .filter(|(_, patchid)| settings.is_touched(&txn, *patchid))
                .take(first)
                .last()
                .map(|(n, _)| n)
        } else {
            None
        };
        for (n, (applied, patchid)) in txn.rev_iter_applied(&branch, first).enumerate() {
            if let Some(last) = settings.last {
                if n >= last {
                    break;
                }
            }
            settings.display_patch(&txn, &branch, applied, patchid)?;
        }


























1
2

3
4
5
6
7
8
9
10
11

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
















28
29
30
31
32
33



34
35
36
37
38
39












40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109




110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125

126
127
128
129
130
131
132
133
134
135
136








137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152



153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176






177
178
179
180
181
182
183
184
185
186
187
188

189
190
191
192
193
194
195
196
use clap::{SubCommand, ArgMatches, Arg};
    pub remote_path: Option<&'a str>,
        remote_path: args.value_of("remote_path"),
    r: &Path,
    r: &Path,
    remote_path: Option<&str>,
    let pullable = session.pullable_patches(remote_branch, local_branch, r, remote_path)?;
    let partial = if let Some(partial) = args.remote_path {
        Some(Path::new(partial))
    } else {
        None
    };
            args.remote_path,
            session.pull(&opts.repo_root, &local_branch, &mut pullable, partial, false)?;
    let conflicts =
        if let Some(partial) = partial {
            txn.list_conflict_files(
                &local_branch,
                &[partial],
            )?
        } else {
            txn.list_conflict_files(
                &local_branch,
                &[],
            )?
        };
use clap::{Arg, ArgMatches, SubCommand};

use commands::{BasicOptions, StaticSubcommand, default_explain};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use std::fs::File;
use std::path::Path;

use commands::ask::{ask_patches, Command};
use commands::remote;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::patch::Patch;
use libpijul::{Hash, DEFAULT_BRANCH, ApplyTimestamp, PatchId};
use libpijul::{ApplyTimestamp, Hash, PatchId, DEFAULT_BRANCH};
use meta::{Meta, Repository, DEFAULT_REMOTE};
use progrs;
use std::env::current_dir;
use std::io::BufReader;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("pull")
        .about("Pull from a remote repository")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Repository to list.")
                .takes_value(true),
        )
        .arg(Arg::with_name("remote").help("Repository from which to pull."))
        .arg(Arg::with_name("remote_branch")
            .long("from-branch")
            .help("The branch to pull from. Defaults to master.")
            .takes_value(true))
        .arg(Arg::with_name("local_branch")
            .long("to-branch")
            .help("The branch to pull into. Defaults to the current branch.")
            .takes_value(true))
        .arg(Arg::with_name("all")
             .short("a")
            .long("all")
            .help("Answer 'y' to all questions")
            .takes_value(false))
        .arg(Arg::with_name("set-default")
             .long("set-default")
             .help("Used with --set-remote, sets this remote as the default pull remote.")
        .arg(
            Arg::with_name("remote_branch")
                .long("from-branch")
                .help("The branch to pull from. Defaults to master.")
                .takes_value(true),
        )
        .arg(Arg::with_name("set-remote")
             .long("set-remote")
             .takes_value(true)
        .arg(
            Arg::with_name("local_branch")
                .long("to-branch")
                .help("The branch to pull into. Defaults to the current branch.")
                .takes_value(true),
        )
        .arg(Arg::with_name("port")
            .short("p")
            .long("port")
            .help("Port of the remote ssh server.")
            .takes_value(true)
            .validator(|val| {
                let x: Result<u16, _> = val.parse();
                match x {
                    Ok(_) => Ok(()),
                    Err(_) => Err(val),
                }
            }));
        .arg(
            Arg::with_name("all")
                .short("a")
                .long("all")
                .help("Answer 'y' to all questions")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("set-default")
                .long("set-default")
                .help("Used with --set-remote, sets this remote as the default pull remote."),
        )
        .arg(
            Arg::with_name("set-remote")
                .long("set-remote")
                .help("Name this remote destination.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("remote_path")
                .long("path")
                .help("Only pull patches affecting the part of the repo under that path (relative to the root of the repo).")
                .help("Only pull patches relative to that patch.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("port")
                .short("p")
                .long("port")
                .help("Port of the remote ssh server.")
                .takes_value(true)
                .validator(|val| {
                    let x: Result<u16, _> = val.parse();
                    match x {
                        Ok(_) => Ok(()),
                        Err(_) => Err(val),
                    }
                }),
        );
}

#[derive(Debug)]
pub struct Params<'a> {
    pub remote_id: Option<&'a str>,
    pub set_remote: Option<&'a str>,
    pub yes_to_all: bool,
    pub set_default: bool,
    pub port: Option<u16>,
    pub local_branch: Option<&'a str>,
    pub remote_branch: &'a str,
    pub remote_paths: Vec<RepoPath<&'a Path>>,
}

fn parse_args<'a>(args: &'a ArgMatches) -> Params<'a> {
    Params {
        remote_id: args.value_of("remote"),
        set_remote: args.value_of("set-remote"),
        yes_to_all: args.is_present("all"),
        set_default: args.is_present("set-default"),
        port: args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
        local_branch: args.value_of("local_branch"),
        remote_branch: args.value_of("remote_branch").unwrap_or(DEFAULT_BRANCH),
        remote_paths: if let Some(rem) = args.values_of("remote_path") {
            rem.map(|p| RepoPath(Path::new(p))).collect()
        } else {
            Vec::new()
        },
    }
}

fn fetch_pullable_patches(session: &mut remote::Session,
                          pullable: &[(Hash, ApplyTimestamp)],
                          r: &Path)
                          -> Result<Vec<(Hash, Option<PatchId>, Patch)>, Error> {
fn fetch_pullable_patches(
    session: &mut remote::Session,
    pullable: &[(Hash, ApplyTimestamp)],
    r: &RepoRoot<impl AsRef<Path>>,
) -> Result<Vec<(Hash, Option<PatchId>, Patch)>, Error> {
    let mut patches = Vec::new();

    let (mut p, mut n) = (progrs::start("Pulling patches", pullable.len() as u64), 0);
    for &(ref i, _) in pullable {
        let (hash, _, patch) = {
            let filename = session.download_patch(r, i)?;
            debug!("filename {:?}", filename);
            let file = File::open(&filename)?;
            let mut file = BufReader::new(file);
            Patch::from_reader_compressed(&mut file)?
        };
        p.display({ n += 1; n });
        p.display({
            n += 1;
            n
        });
        assert_eq!(&hash, i);
        patches.push((hash, None, patch));
    }
    p.stop("done");
    Ok(patches)
}

pub fn select_patches(interactive: bool,
                      session: &mut remote::Session,
                      remote_branch: &str,
                      local_branch: &str,
                      r: &Path)
                      -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
    let pullable = session.pullable_patches(remote_branch, local_branch, r)?;
    let mut pullable:Vec<_> = pullable.iter().collect();
pub fn select_patches(
    interactive: bool,
    session: &mut remote::Session,
    remote_branch: &str,
    local_branch: &str,
    r: &RepoRoot<impl AsRef<Path>>,
    remote_paths: &[RepoPath<&Path>],
) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
    let pullable = session.pullable_patches(remote_branch, local_branch, r, remote_paths)?;
    let mut pullable: Vec<_> = pullable.iter().collect();
    pullable.sort_by(|&(_, a), &(_, b)| a.cmp(&b));
    if interactive && !pullable.is_empty() {
        let selected = {
            let patches = fetch_pullable_patches(session, &pullable, r)?;
            ask_patches(Command::Pull, &patches[..])?
        };
        Ok(pullable.into_iter()
           .filter(|&(ref h, _)| selected.contains(h))
           .collect())
        Ok(pullable
            .into_iter()
            .filter(|&(ref h, _)| selected.contains(h))
            .collect())
    } else {
        Ok(pullable)
    }
}

pub fn run(arg_matches: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(arg_matches)?;
    let args = parse_args(arg_matches);
    debug!("pull args {:?}", args);
    let mut meta = Meta::load(&opts.repo_root).unwrap_or(Meta::new());
    let cwd = current_dir()?;
    let local_branch = if let Some(b) = args.local_branch {
        b.to_string()
    } else {
        opts.branch()
    };
    let repo_root = opts.repo_root();
    let conflicts = {
        let remote = meta.pull(args.remote_id, args.port, Some(&cwd), Some(&repo_root))?;
        let mut session = remote.session()?;
        let mut pullable =
            select_patches(!args.yes_to_all,
                           &mut session,
                           args.remote_branch,
                           &local_branch,
                           &opts.repo_root)?;
        let mut pullable = select_patches(
            !args.yes_to_all,
            &mut session,
            args.remote_branch,
            &local_branch,
            &opts.repo_root,
            &args.remote_paths,
        )?;

        // Pulling and applying
        info!("Pulling patch {:?}", pullable);
        if !pullable.is_empty() {
            session.pull(&opts.repo_root, &local_branch, &mut pullable, false)?;
            session.pull(
                &opts.repo_root,
                &local_branch,
                &mut pullable,
                &args.remote_paths,
                false,
            )?
        } else {













1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
















21
22
23
24
25
26




27
28
29
30
31
32












33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86


89


92
93

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133













134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184

use clap::{SubCommand, ArgMatches, Arg};
                .help("Answer 'y' to all questions")
                .takes_value(true),
    pub remote_path: Option<&'a str>,
        remote_path: args.value_of("remote_path"),
        let pushable = session.pushable_patches(
            args.remote_branch,
            args.remote_path,
            let mut pushable: Vec<_> = pushable.into_iter().collect();
            pushable.sort_by(|&(_, _, a), &(_, _, b)| a.cmp(&b));
            for &(ref i, ref internal, _) in pushable.iter() {
                    read_patch(&opts.repo_root, i.as_ref())?,
            pushable.into_iter().map(|(h, _, _)| h).collect()
use clap::{Arg, ArgMatches, SubCommand};

use super::ask;
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::RepoPath;
use meta::{Meta, Repository, DEFAULT_REMOTE};
use std::env::current_dir;
use std::path::Path;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("push")
        .about("Push to a remote repository")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Repository to list.")
                .takes_value(true),
        )
        .arg(Arg::with_name("remote").help("Repository to push to."))
        .arg(Arg::with_name("local_branch")
            .long("from-branch")
            .help("The branch to push from")
            .takes_value(true))
        .arg(Arg::with_name("remote_branch")
            .long("to-branch")
            .help("The branch to push into. Defaults to the current branch.")
            .takes_value(true))
        .arg(Arg::with_name("all")
            .short("a")
            .long("all")
            .help("Answer 'y' to all questions")
            .takes_value(false))
        .arg(Arg::with_name("set-default")
             .long("set-default")
             .help("Used with --set-remote, sets this remote as the default push target.")
        .arg(
            Arg::with_name("local_branch")
                .long("from-branch")
                .help("The branch to push from")
                .takes_value(true),
        )
        .arg(Arg::with_name("set-remote")
             .long("set-remote")
             .help("Set the name of this remote")
             .takes_value(true)
        .arg(
            Arg::with_name("remote_branch")
                .long("to-branch")
                .help("The branch to push into. Defaults to the current branch.")
                .takes_value(true),
        )
        .arg(Arg::with_name("port")
            .short("p")
            .long("port")
            .help("Port of the remote ssh server.")
            .takes_value(true)
            .validator(|val| {
                let x: Result<u16, _> = val.parse();
                match x {
                    Ok(_) => Ok(()),
                    Err(_) => Err(val),
                }
            }));
        .arg(
            Arg::with_name("all")
                .short("a")
                .long("all")
                .help("Answer 'y' to all questions.")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("force")
                .long("force")
                .help("Force the push even if non fast-forward."),
        )
        .arg(
            Arg::with_name("set-default")
                .long("set-default")
                .help("Used with --set-remote, sets this remote as the default push target."),
        )
        .arg(
            Arg::with_name("set-remote")
                .long("set-remote")
                .help("Set the name of this remote")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("remote_path")
                .long("path")
                .help("Only pull patches relative to that patch.")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("port")
                .short("p")
                .long("port")
                .help("Port of the remote ssh server.")
                .takes_value(true)
                .validator(|val| {
                    let x: Result<u16, _> = val.parse();
                    match x {
                        Ok(_) => Ok(()),
                        Err(_) => Err(val),
                    }
                }),
        );
}

#[derive(Debug)]
pub struct Params<'a> {
    pub remote_id: Option<&'a str>,
    pub set_remote: Option<&'a str>,
    pub yes_to_all: bool,
    pub set_default: bool,
    pub port: Option<u16>,
    pub local_branch: Option<&'a str>,

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    pub remote_path: Vec<&'a str>,

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
    pub remote_path: Vec<RepoPath<&'a Path>>,
    pub remote_branch: Option<&'a str>,

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    pub force: bool,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Params<'a> {
    Params {
        remote_id: args.value_of("remote"),
        set_remote: args.value_of("set-remote"),
        yes_to_all: args.is_present("all"),
        set_default: args.is_present("set-default"),
        port: args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
        local_branch: args.value_of("local_branch"),
        remote_branch: args.value_of("remote_branch"),
        remote_path: args
            .values_of("remote_path")
            .map(|x| x.map(|p| RepoPath(Path::new(p))).collect())
            .unwrap_or(Vec::new()),
        force: args.is_present("force"),
    }
}

pub fn run(arg_matches: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(arg_matches)?;
    let args = parse_args(arg_matches);
    let mut meta = Meta::load(&opts.repo_root).unwrap_or(Meta::new());
    let local_branch = args
        .local_branch
        .map(|b| b.to_string())
        .unwrap_or(opts.branch());
    let remote_branch = args
        .remote_branch
        .map(|b| b.to_string())
        .unwrap_or(opts.branch());
    let cwd = current_dir()?;
    let repo_root = opts.repo_root();
    {
        let remote = meta.push(args.remote_id, args.port, Some(&cwd), Some(&repo_root))?;
        debug!("remote: {:?}", remote);
        println!("Pushing to branch {}", remote_branch);
        let mut session = remote.session()?;
        let pushable =
            session.pushable_patches(&local_branch, args.remote_branch, &opts.repo_root)?;
            let pushable = if !args.yes_to_all {
                let mut patches = Vec::new();
                let mut pushable:Vec<_> = pushable.into_iter().collect();
                pushable.sort_by(|&(_, _, a), &(_, _, b)| a.cmp(&b));
                for &(ref i, ref internal, _) in pushable.iter() {
                    patches.push((i.clone(), internal.clone(), read_patch(&opts.repo_root, i.as_ref())?))
                }
                ask::ask_patches(ask::Command::Push, &patches)?
            } else {
                pushable.into_iter().map(|(h, _, _)| h).collect()
            };
        let mut pushable = session.pushable_patches(
            &local_branch,
            &remote_branch,
            &opts.repo_root,
            &args.remote_path,
        )?;
        if !pushable.non_fast_forward.is_empty() && !args.force {
            return Err(Error::NonFastForwardPush);
        }
        let pushable = if !args.yes_to_all {
            let mut patches = Vec::new();
            pushable
                .pushable
                .sort_by(|&(_, _, a), &(_, _, b)| a.cmp(&b));
            for &(ref i, ref internal, _) in pushable.pushable.iter() {
                patches.push((
                    i.clone(),
                    internal.clone(),
                    opts.repo_root.read_patch(i.as_ref())?,
                ))
            }
            ask::ask_patches(ask::Command::Push, &patches)?
        } else {
            pushable.pushable.into_iter().map(|(h, _, _)| h).collect()
        };
        if pushable.is_empty() {
            println!("Nothing to push");
        } else {
            session.push(&opts.repo_root, &remote_branch, pushable)?;
        }
    }

    info!("Saving meta");
    let set_remote = if args.set_default && args.set_remote.is_none() {
        Some(DEFAULT_REMOTE)
    } else {
        args.set_remote
    };
    if let (Some(set_remote), Some(remote_id)) = (set_remote, args.remote_id) {
        let mut repo = Repository::default();
        repo.address = remote_id.to_string();
        repo.port = args.port;

        meta.remote.insert(set_remote.to_string(), repo);
        if args.set_default {
            meta.push = Some(set_remote.to_string());
        }
        meta.save(&opts.repo_root)?;
    }
    Ok(())
}





1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105


108


111


114

116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401

402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424

425
426
427
428
429

430
431
    prefix: Option<Vec<PathBuf>>,
    prefix: Option<&Vec<PathBuf>>,
pub fn prefix(args: &ArgMatches, opts: &BasicOptions) -> Result<Option<Vec<PathBuf>>> {
        let prefixes: Result<Vec<_>> = prefixes
use super::ask::{ask_changes, ChangesDirection};
use super::default_explain;
use chrono;
use clap::{Arg, ArgMatches, SubCommand};
use commands::hooks::run_hook;
use commands::{ask, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul;
use libpijul::fs_representation::{in_repo_root, RepoPath, RepoRoot};
use libpijul::patch::{PatchFlags, Record};
use libpijul::{Hash, InodeUpdate, Key, MutTxn, Patch, PatchId, RecordState, Repository};
use meta::{load_signing_key, Global, Meta};
use rand;
use std::collections::HashSet;
use std::fs::canonicalize;
use std::fs::{metadata, OpenOptions};
use std::io::Write;
use std::mem::drop;
use std::path::{Path, PathBuf};
use std::str::FromStr;

pub fn record_args(sub: StaticSubcommand) -> StaticSubcommand {
    sub.arg(Arg::with_name("repository")
            .long("repository")
            .help("The repository where to record, defaults to the current directory.")
            .takes_value(true)
            .required(false))
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("The branch where to record, defaults to the current branch.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("date")
             .long("date")
             .help("The date to use to record the patch, default is now.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("message")
             .short("m")
             .long("message")
             .help("The name of the patch to record")
             .takes_value(true))
        .arg(Arg::with_name("description")
             .short("d")
             .long("description")
             .help("The description of the patch to record")
             .takes_value(true))
        .arg(Arg::with_name("no-editor")
             .long("no-editor")
             .help("Do not use an editor to write the patch name and description, even if the variable is set in the configuration file")
             .takes_value(false))
        .arg(Arg::with_name("author")
             .short("A")
             .long("author")
             .help("Author of this patch (multiple occurrences allowed)")
             .takes_value(true))
        .arg(Arg::with_name("patience")
             .long("patience")
             .help("Use patience diff instead of the default (Myers diff)")
             .conflicts_with("myers")
             .takes_value(false))
        .arg(Arg::with_name("myers")
             .long("myers")
             .help("Use Myers diff")
             .conflicts_with("patience")
             .takes_value(false))
}

pub fn invocation() -> StaticSubcommand {
    return record_args(
        SubCommand::with_name("record")
            .about("Record changes in the repository")
            .arg(
                Arg::with_name("all")
                    .short("a")
                    .long("all")
                    .help("Answer 'y' to all questions")
                    .takes_value(false),
            )
            .arg(
                Arg::with_name("add-new-files")
                    .short("n")
                    .long("add-new-files")
                    .help("Offer to add files that have been created since the last record")
                    .takes_value(false),
            )
            .arg(
                Arg::with_name("depends-on")
                    .help("Add a dependency to this patch (internal id or hash accepted)")
                    .long("depends-on")
                    .takes_value(true)
                    .multiple(true),
            )
            .arg(
                Arg::with_name("prefix")
                    .help("Prefix to start from")
                    .takes_value(true)
                    .multiple(true),
            ),
    );
}

fn add_untracked_files<T: rand::Rng, P: AsRef<Path> + 'static>(
    txn: &mut MutTxn<T>,
    repo_root: &RepoRoot<P>,

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        if let Err(e) = txn.add_file(&file, m.is_dir()) {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    Ok(untracked)
}

fn append_to_ignore_file(
    repo_root: &RepoRoot<impl AsRef<Path>>,
    lines: &Vec<String>,
) -> Result<(), Error> {
    let ignore_file = repo_root.local_ignore_file();
    let mut file = OpenOptions::new()
        .append(true)
        .create(true)
        .open(ignore_file)?;
    for line in lines {
        file.write_all(line.as_ref())?;
        file.write_all(b"\n")?
    }
    Ok(())
}

fn select_changes(
    algo: libpijul::DiffAlgorithm,
    opts: &BasicOptions,
    add_new_files: bool,
    branch_name: &str,
    yes_to_all: bool,
    prefix: Option<HashSet<PathBuf>>,
    prefix: Option<Vec<RepoPath<PathBuf>>>,
) -> Result<(Vec<Record<Vec<Key<Option<Hash>>>>>, HashSet<InodeUpdate>), Error> {
    // Increase by 100 pages. The most things record can write is one
    // write in the branches table, affecting at most O(log n) blocks.
    let repo = opts.open_and_grow_repo(409600)?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let mut to_unadd = if add_new_files {
        add_untracked_files(&mut txn, &opts.repo_root)?
    } else {
        HashSet::new()
    };
    let (changes, syncs) = changes_from_prefixes(
        algo,
        &opts.repo_root,
        &mut txn,
        &branch_name,
        prefix.as_ref(),
    )?;
    let changes: Vec<_> = changes
        .into_iter()
        .map(|x| txn.globalize_record(x))
        .collect();
    if !yes_to_all {
        let (c, i) = ask_changes(
            &txn,
            &opts.repo_root,
            &opts.cwd,
            &changes,
            ChangesDirection::Record,
            &mut to_unadd,
        )?;
        let selected = changes
            .into_iter()
            .enumerate()
            .filter(|&(i, _)| *(c.get(&i).unwrap_or(&false)))
            .map(|(_, x)| x)
            .collect();
        for file in to_unadd {
            txn.remove_file(&file)?
        }
        txn.commit()?;
        append_to_ignore_file(&opts.repo_root, &i)?;
        Ok((selected, syncs))
    } else {
        txn.commit()?;
        Ok((changes, syncs))
    }
}

pub fn run(args: &ArgMatches) -> Result<Option<Hash>, Error> {
    let opts = BasicOptions::from_args(args)?;
    let yes_to_all = args.is_present("all");
    let patch_name_arg = args.value_of("message");
    let patch_descr_arg = args.value_of("description");
    let authors_arg = args.values_of("author").map(|x| x.collect::<Vec<_>>());
    let branch_name = opts.branch();
    let add_new_files = args.is_present("add-new-files");

    let patch_date = args.value_of("date").map_or(Ok(chrono::Utc::now()), |x| {
        chrono::DateTime::from_str(x).map_err(|_| Error::InvalidDate {
            date: String::from(x),
        })
    })?;

    let mut save_meta = false;

    let (mut global, save_global) = Global::load().map(|c| (c, false)).unwrap_or_else(|e| {
        info!("loading global key, error {:?}", e);
        (Global::new(), true)
    });

    let mut meta = match Meta::load(&opts.repo_root) {
        Ok(m) => m,
        Err(_) => {
            save_meta = true;
            Meta::new()
        }
    };

    run_hook(&opts.repo_root, "pre-record", None)?;

    debug!("prefix {:?}", args.value_of("prefix"));
    let prefix = prefix(args, &opts)?;

    let (changes, syncs) = select_changes(
        if args.is_present("patience") {
            libpijul::DiffAlgorithm::Patience
        } else {
            libpijul::DiffAlgorithm::Myers
        },
        &opts,
        add_new_files,
        &branch_name,
        yes_to_all,
        prefix,
    )?;

    if changes.is_empty() {
        println!("Nothing to record");
        Ok(None)
    } else {
        let template = prepare_changes_template(patch_name_arg.unwrap_or(""), &changes);

        let repo = opts.open_repo()?;
        let patch = {
            let txn = repo.txn_begin()?;
            debug!("meta:{:?}", meta);

            let authors = decide_authors(authors_arg, &meta, &global)?;

            if authors.is_empty() {
                return Err(Error::NoAuthor);
            }

            if meta.authors.is_empty() {
                meta.authors = authors.clone();
                save_meta = true;
            }

            if global.author.is_none() {
                global.author = Some(authors[0].clone());
            }

            debug!("authors:{:?}", authors);

            let (patch_name, description) = decide_patch_message(
                patch_name_arg,
                patch_descr_arg,
                template,
                !args.is_present("no-editor"),
                &opts.repo_root,
                &meta,
                &global,
            )?;

            run_hook(&opts.repo_root, "patch-name", Some(&patch_name))?;

            debug!("patch_name:{:?}", patch_name);
            if save_meta {
                meta.save(&opts.repo_root)?
            }
            if save_global {
                global.save().unwrap_or(())
            }
            debug!("new");
            let changes = changes.into_iter().flat_map(|x| x.into_iter()).collect();
            let branch = txn.get_branch(&branch_name).unwrap();

            let mut extra_deps = Vec::new();
            if let Some(deps) = args.values_of("depends-on") {
                for dep in deps {
                    if let Some(hash) = Hash::from_base58(dep) {
                        if let Some(internal) = txn.get_internal(hash.as_ref()) {
                            if txn.get_patch(&branch.patches, internal).is_some() {
                                extra_deps.push(hash)
                            } else {
                                return Err(Error::ExtraDepNotOnBranch { hash });
                            }
                        } else {
                            return Err(Error::PatchNotFound {
                                repo_root: opts.repo_root().to_string_lossy().into_owned(),
                                patch_hash: hash,
                            });
                        }
                    } else if let Some(internal) = PatchId::from_base58(dep) {
                        if let Some(hash) = txn.get_external(internal) {
                            if txn.get_patch(&branch.patches, internal).is_some() {
                                extra_deps.push(hash.to_owned())
                            } else {
                                return Err(Error::ExtraDepNotOnBranch {
                                    hash: hash.to_owned(),
                                });
                            }
                        }
                    } else {
                        return Err(Error::WrongHash);
                    }
                }
            }
            txn.new_patch(
                &branch,
                authors,
                patch_name,
                description,
                patch_date,
                changes,
                extra_deps.into_iter(),
                PatchFlags::empty(),
            )
        };
        drop(repo);

        let patches_dir = opts.repo_root.patches_dir();
        let mut key = meta
            .signing_key
            .or(global.signing_key)
            .and_then(|s| load_signing_key(s).ok());
        let hash = if let Some(ref mut key) = key {
            key.check_author(&patch.header().authors)?;
            patch.save(&patches_dir, key.keys.get_mut(0))?
        } else {
            patch.save(&patches_dir, None)?
        };

        let pristine_dir = opts.pristine_dir();
        let mut increase = 409600;
        let res = loop {
            match record_no_resize(
                &pristine_dir,
                &opts.repo_root,
                &branch_name,
                &hash,
                &patch,
                &syncs,
                increase,
            ) {
                Err(ref e) if e.lacks_space() => increase *= 2,
                e => break e,
            }
        };

        run_hook(&opts.repo_root, "post-record", None)?;

        res
    }
}

pub fn record_no_resize(
    pristine_dir: &Path,
    r: &RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    hash: &Hash,
    patch: &Patch,
    syncs: &HashSet<InodeUpdate>,
    increase: u64,
) -> Result<Option<Hash>, Error> {
    let size_increase = increase + patch.size_upper_bound() as u64;
    let repo = match Repository::open(&pristine_dir, Some(size_increase)) {
        Ok(repo) => repo,
        Err(x) => return Err(Error::Repository(x)),
    };
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    // save patch
    debug!("syncs: {:?}", syncs);
    let mut branch = txn.open_branch(branch_name)?;
    txn.apply_local_patch(&mut branch, r, &hash, &patch, &syncs, false)?;
    txn.commit_branch(branch)?;
    txn.commit()?;
    println!("Recorded patch {}", hash.to_base58());
    Ok(Some(hash.clone()))
}

pub fn explain(res: Result<Option<Hash>, Error>) {
    default_explain(res)
}

pub fn changes_from_prefixes<T: rand::Rng, P: AsRef<Path>>(
    algo: libpijul::DiffAlgorithm,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    txn: &mut MutTxn<T>,
    branch_name: &str,
    prefix: Option<&HashSet<PathBuf>>,
    prefix: Option<&Vec<RepoPath<P>>>,
) -> Result<
    (
        Vec<libpijul::patch::Record<libpijul::patch::ChangeContext<PatchId>>>,
        HashSet<libpijul::InodeUpdate>,
    ),
    Error,
> {
    let mut record = RecordState::new();
    let branch = txn.open_branch(branch_name)?;
    if let Some(prefixes) = prefix {
        for prefix in prefixes {
            txn.record(algo, &mut record, &branch, repo_root, prefix)?;
        }
    } else {
        txn.record(algo, &mut record, &branch, repo_root, &in_repo_root())?;
    }
    txn.commit_branch(branch)?;
    let (changes, updates) = record.finish();
    // let changes = changes.into_iter().map(|x| txn.globalize_change(x)).collect();
    Ok((changes, updates))
}

pub fn prefix(args: &ArgMatches, opts: &BasicOptions) -> Result<Option<HashSet<PathBuf>>> {
pub fn prefix(
    args: &ArgMatches,
    opts: &BasicOptions,
) -> Result<Option<Vec<RepoPath<PathBuf>>>, Error> {
    if let Some(prefixes) = args.values_of("prefix") {
        let prefixes: Result<HashSet<_>> = prefixes
        let prefixes: Result<Vec<_>, Error> = prefixes
            .map(|prefix| {














































































































































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61

62
63
64
65
66
67
68
69
70
71


72
73
74

75
76
77

78
79
80
81

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147


148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163

164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213



214
215
216
217
218

219
220
221


222
223
224
225
226
227
228
229
230
231
232

233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256








257
258
259
260
261
262
263
264
265
266
267
268
269
270
271

272
273
274
275
276
277
278
279
280



281
282
283
284
285
286
287
288
289
290
291




292
293
294
295
296
297
298






299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318




319
320
321
322
323
324
325
326
327


328
329
330
331
332

333





334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361




362
363
364
365
366
367
368




369
370
371
372
373
374
375
376
377

378
379
380
381
382






















383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418

419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436




437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474




475
476
477
478
479
480
481
482




483
484
485
486
487




488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514















515



516
517
518
519
520
521
522
523
524
525


526
527
528
529
530
531
532
533



















534
535
536
537
538
539
540




























541


544


547


550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621

623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639




















640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668



669
670

671
672

673
674
675
676
677
678
679
680
681
682




683
684
685
686
687
688
689
690
691
692
693
694
695

696
697
698
699
700
701
702


703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804

805
806
807
808
809
810
811




812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832


833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853


854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872


873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891


892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911


912
913
914
915
916
917
918
919
920


921
922
923
924
925
926



927
928
929
930
931





932
933
934
935
936
937
938
939
940
941

942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960







961
962
963
964
965
966
967


968
969
970
971


972
973
974
975
976
977
978
979

980
981
982
983
984
985
986
987
988
989
990





991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018



1019
1020
1021
1022







1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038



1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116





1117
1118
1119
1120
1121
1122
1123
1124


1127


1130


1133

1135
1136

1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185





1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200

1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252

1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271




1272
1273
1274
1275
1276
1277





use libpijul::{Hash, ApplyTimestamp, apply_resize, Repository, PatchId};
use libpijul::{apply_resize, ApplyTimestamp, Hash, Patch, PatchId, Repository};
use libpijul::fs_representation::{branch_changes_base_path, patch_file_name, patches_dir,
                                  pristine_dir, PIJUL_DIR_NAME};
use rand;
use error::{Error, ErrorKind, Result};
use futures::{Async, Future, Poll, Stream};
    Uri {
        uri: &'a str,
    },
    Local {
        path: PathBuf,
    },
    path: &'a Path,
            UnixStream::connect(path, h)
                .ok()
                .map(thrussh_keys::agent::client::AgentClient::connect)
        changes: HashMap<Hash, ApplyTimestamp>,
    },
                    .from_err(),
        session: thrussh::client::Session,
                                changes.insert(h, s);
        let path = std::env::home_dir()
            .unwrap()
            .join(".ssh")
            .join("known_hosts");
                    ).unwrap();
        path: Option<&str>,
    ) -> Result<HashMap<Hash, ApplyTimestamp>> {
        let cmd = if let Some(path) = path {
            format!(
                "{} log --repository {} --branch {:?} --hash-only --path {}",
                self.pijul_cmd, esc_path, branch, path
            )
        } else {
            format!(
                "{} log --repository {} --branch {:?} --hash-only",
                self.pijul_cmd, esc_path, branch
            )
        };
                changes: HashMap::new(),
                .run(
                                })
                                .and_then(move |mut session| {
                )
                .unwrap(),
                        return Ok(HashMap::new());
        local_file: &Path,
        local_tmp_file: &Path,
    ) -> Result<()> {
                file: try!(File::create(&local_tmp_file)),
                .run(
                                })
                                .and_then(move |mut session| {
                )
                .unwrap(),
        repo_root: &Path,
        patch_hashes: &HashSet<Hash>,
    ) -> Result<()> {
        *session = Some(
            l.run(session.take().unwrap().channel_open_session().and_then(
                move |(mut session, chan)| {
                    session.handler_mut().exit_status.remove(&chan);
                    session.handler_mut().channel = Some(chan);
                    let esc_path = escape(Cow::Borrowed(path));
                    debug!(
                        "exec {:?}",
                        format!(
                            "{} apply --repository {} --branch {:?}",
                            pijul_cmd, esc_path, remote_branch
                        )
                    );
                    session.exec(
                        chan,
                        false,
                        &format!(
                            "{} apply --repository {} --branch {:?}",
                            pijul_cmd, esc_path, remote_branch
                        ),
                    );
                    futures::stream::iter_ok(patch_hashes.iter())
                        .fold((session, Vec::new()), move |(session, buf), hash| {
                            let mut pdir = pdir.clone();
                            pdir.push(hash.to_base58());
                            pdir.set_extension("gz");
                            let f = std::fs::File::open(&pdir).unwrap();
                            pdir.pop();
                            SendFile {
                                f: f,
                                buf: Some(buf),
                                chan: chan,
                                state: Some(SendFileState::Read(session)),
                            }
                        })
                        .and_then(move |(mut session, _)| {
                            session.channel_eof(chan);
                            session
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .map(move |mut session| {
                                    exit_status =
                                        session.handler().exit_status.get(&chan).map(|x| *x);
                                    session.channel_close(chan);
                                    session
                                })
                        })
                        .map_err(From::from)
                },
            )).unwrap(),
        );
                .run(
                                })
                                .and_then(move |mut session| {
                )
                .unwrap(),
        path: Option<&str>,
    ) -> Result<HashMap<Hash, ApplyTimestamp>> {
        if path.is_some() {
            return Err(ErrorKind::PartialPullOverHttp.into());
        let mut req = reqwest_async::Request::new(reqwest::Method::Get, uri.parse().unwrap());
        let res: Vec<u8> = self.l.run(self.client.execute(req).and_then(
        local_file: &Path,
        local_tmp_file: &Path,
    ) -> Result<()> {
            uri.to_string() + "/" + PIJUL_DIR_NAME + "/patches/" + &patch_hash.to_base58() + ".gz";
        let mut req = reqwest_async::Request::new(reqwest::Method::Get, uri.parse().unwrap());
        let res = l.run(client.execute(req).and_then(|resp| {
            if resp.status() == reqwest::StatusCode::Ok {
                let res = Vec::new();
                futures::future::Either::A(
                    resp.into_body()
                        .fold(res, |mut res, x| {
                        })
                        .map(|body| {
                        }),
                )
            } else {
                futures::future::Either::B(futures::finished(None))
            }
        })).unwrap();
                patch_hash.to_owned(),
        path: Option<&str>,
    ) -> Result<HashMap<Hash, ApplyTimestamp>> {
            if let Some(path) = path {
                let inode = txn.find_inode(Path::new(path))?;
                let key = txn.get_inodes(inode).unwrap().key;
                Ok(txn.iter_patches(&branch, None)
                    .filter_map(|(hash, s)| {
                            Some((txn.get_external(hash).unwrap().to_owned(), s))
                        } else {
                            None
                    })
                    .collect())
                Ok(txn.iter_patches(&branch, None)
            Ok(HashMap::new())
        repo_root: &Path,
        patch_hashes: &HashSet<Hash>,
    ) -> Result<()> {
            match apply_resize(&self.path, &remote_branch, patch_hashes.iter(), None, |_, _| {}) {
                Err(ref e) if e.lacks_space() => {
                    debug!("lacks space")
                }
        remote_path: Option<&str>,
    ) -> Result<HashMap<Hash, ApplyTimestamp>> {
    pub fn download_patch(&mut self, repo_root: &Path, patch_hash: &Hash) -> Result<PathBuf> {
                    local_session.fetch_patch(patch_hash, &local_file)?
        repo_root: &Path,
        patch_hashes: &HashSet<Hash>,
    ) -> Result<()> {
            Session::Local(ref mut local_session) => {
                local_session.remote_apply(repo_root, remote_branch, patch_hashes)
            }
        target: &Path,
        remote_path: Option<&str>,
    ) -> Result<Pullable> {
            self.changes(remote_branch, remote_path)?
                .into_iter()
                .map(|(h, s)| (h.to_owned(), s))
                .collect();
        target: &Path,
        partial_path: Option<&Path>,
    ) -> Result<()> {
                continue
            let path = self.download_patch(&target, &hash)?;
            if partial_path.is_some() {
            pullable_plus_deps.push((hash.to_owned(), patch));
        let mut size_increase = 4096;
        let pristine_dir = pristine_dir(target).to_path_buf();
            let repo = Repository::open(&pristine_dir, Some(size_increase as u64))?;
            let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
            match txn.apply_patches(to_branch, target, &pullable_plus_deps, partial_path, |c, _| {
                p.borrow_mut().display(c as u64)
            }) {
                Ok(()) => {
                    txn.commit()?;
                    break;
                }
                Err(ref e) if e.lacks_space() => size_increase *= 2,
                Err(e) => return Err(From::from(e)),
        source: &Path,
        remote_path: Option<&str>,
    ) -> Result<Vec<(Hash, Option<PatchId>, ApplyTimestamp)>> {
        let to_changes = try!(self.changes(to_branch, remote_path));
                    .map(|(hash, s)| {
                        (
                            txn.get_external(hash).unwrap().to_owned(),
                            Some(hash.to_owned()),
                            s,
                        )
                    })
        Ok(from_changes
            .into_iter()
            .filter(|&(ref h, _, _)| !to_changes.contains(h))
            .collect())
        source: &Path,
        pushable: &HashSet<Hash>,
    ) -> Result<()> {
            Remote::Local { ref path } => Ok(Session::Local(LocalSession {
                path: path.as_path(),
            Remote::Ssh {
                ref user,
                ref host,
                port,
                ref path,
                ref local_repo_root,
                ref pijul_cmd,
                ..
            } => {
                let addr = (*host, port.unwrap_or(22))
                    .to_socket_addrs()
                    .unwrap()
                    .next()
                    .unwrap();
                        .map_err(Error::from)
                        .and_then(|socket| {
                            let use_agent = handler.agent.is_some();
                            let connection = thrussh::client::Connection::new(
                                socket,
                                handler,
                                None,
                            debug!("connection done");
                            use super::ssh_auth_attempts::{AuthAttemptFuture, AuthAttempts};
                            let user = if let Some(ref user) = *user {
                                user.to_string()
                            } else {
                                username::get_user_name().unwrap()
                            };
                            let local_repo_root = local_repo_root.map(|x| x.to_path_buf());
                            Ok(AuthAttemptFuture::new(
                                connection,
                                AuthAttempts::new(user.clone(), local_repo_root, use_agent),
                                user,
                            ))
                        })
                        .flatten(),
                Ok(Session::Ssh(SshSession {
pub fn parse_remote<'a>(
    remote_id: &'a str,
    port: Option<u16>,
    base_path: Option<&'a Path>,
    local_repo_root: Option<&'a Path>,
) -> Remote<'a> {
                (
                    Some(cap.get(1).unwrap().as_str()),
                    cap.get(2).unwrap().as_str(),
                )
    local: &'a HashSet<Hash>,
            remote: self.remote.iter(),
                return Some((h.to_owned(), t));
use libpijul::fs_representation::{
    branch_changes_base_path, patch_file_name, RepoRoot, PIJUL_DIR_NAME,
};
use libpijul::patch::read_changes;
use libpijul::{
    apply_resize, apply_resize_no_output, apply_resize_patches, apply_resize_patches_no_output,
    ApplyTimestamp, ConflictingFile, Hash, Patch, PatchId, RepoPath, Repository,
};
use regex::Regex;
use reqwest;
use reqwest::async as reqwest_async;

use error::Error;
use std;
use std::collections::hash_set::HashSet;
use std::collections::HashMap;
use std::fs::{copy, hard_link, metadata, rename, File};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use error::{ErrorKind, Result, Error};

use commands::{ask, assert_no_containing_repo, create_repo};
use cryptovec;
use dirs;
use futures;
use futures::{Async, Future, Poll, Stream};
use meta;
use progrs;
use sequoia_openpgp::serialize::Serialize;
use shell_escape::unix::escape;
use std::borrow::Cow;
use std::io::prelude::*;
use std::io::BufReader;
use std::net::ToSocketAddrs;
use tempfile::tempdir_in;
use thrussh;
use thrussh_config;
use thrussh_keys;
use futures::{Future, Stream, Async, Poll};
use tokio;
use username;

#[derive(Debug)]
pub struct SshRemote<'a> {
    user: Option<&'a str>,
    host: &'a str,
    port: Option<u16>,
    path: &'a str,
    id: &'a str,
    local_repo_root: Option<&'a Path>,
    pijul_cmd: Cow<'static, str>,
}

#[derive(Debug)]
pub enum Remote<'a> {
    Ssh(SshRemote<'a>),
    Uri { uri: &'a str },
    Local { path: RepoRoot<PathBuf> },
}

pub enum Session<'a> {
    Ssh(SshSession<'a>),
    Uri(UriSession<'a>),
    Local(LocalSession<'a>)
    Local(LocalSession<'a>),
}

pub struct SshSession<'a> {
    pub l: tokio::runtime::Runtime,
    path: &'a str,
    pijul_cmd: &'a str,
    pub session: Option<thrussh::client::Connection<thrussh_config::Stream, Client>>,
}

pub struct UriSession<'a>
{
pub struct UriSession<'a> {
    l: tokio::runtime::Runtime,
    uri: &'a str,
    client: reqwest_async::Client
    client: reqwest_async::Client,
}

pub struct LocalSession<'a> { path: &'a Path }
pub struct LocalSession<'a> {
    root: RepoRoot<&'a Path>,
}

impl <'a> Drop for SshSession<'a> {
impl<'a> Drop for SshSession<'a> {
    fn drop(&mut self) {
        if let Some(mut session) = self.session.take() {
            debug!("disconnecting");
            session.disconnect(thrussh::Disconnect::ByApplication, "finished", "EN");
            if let Err(e) = self.l.block_on(session) {
                error!("While dropping SSH Session: {:?}", e);
            }
        }
    }
}

#[cfg(unix)]
use thrussh_keys::agent::client::AgentClient;
#[cfg(unix)]
use tokio_uds::UnixStream;

pub struct Client {
    pub exit_status: HashMap<thrussh::ChannelId, u32>,
    state: State,
    host: String,
    port: u16,
    channel: Option<thrussh::ChannelId>,
    #[cfg(unix)]
    pub agent: Option<AgentClient<UnixStream>>,
    #[cfg(windows)]
    pub agent: Option<()>,
}

impl Client {
    #[cfg(unix)]
    fn new(port: Option<u16>, host: &str, l: &mut tokio::runtime::Runtime) -> Self {
        let agent = if let Ok(path) = std::env::var("SSH_AUTH_SOCK") {
            UnixStream::connect(path, h).ok().map(thrussh_keys::agent::client::AgentClient::connect)
            l.block_on(
                UnixStream::connect(path).map(thrussh_keys::agent::client::AgentClient::connect),
            )
            .ok()
        } else {
            None
        };
        debug!("Client::new(), agent: {:?}", agent.is_some());
        Client {
            exit_status: HashMap::new(),
            state: State::None,
            port: port.unwrap_or(22),
            host: host.to_string(),
            channel: None,
            agent,
        }
    }

    #[cfg(windows)]
    fn new(port: Option<u16>, host: &str, _: &mut tokio::runtime::Runtime) -> Self {
        Client {
            exit_status: HashMap::new(),
            state: State::None,
            port: port.unwrap_or(22),
            host: host.to_string(),
            channel: None,
            agent: None,
        }
    }
}

enum State {
    None,
    Changes { changes: HashMap<Hash, ApplyTimestamp>, },
    DownloadPatch { file: File },
    Changes {
        changes: Vec<(Hash, ApplyTimestamp)>,
    },
    DownloadPatch {
        file: File,
    },
    /*SendKey {
        key_pair: meta::SigningKeys,
    },*/
}

enum SendFileState {
    Read(thrussh::client::Connection<thrussh_config::Stream, Client>),
    Wait(thrussh::client::Data<thrussh_config::Stream, Client, Vec<u8>>),
}


struct SendFile {
    f: File,
    buf: Option<Vec<u8>>,
    chan: thrussh::ChannelId,
    state: Option<SendFileState>,
}

impl Future for SendFile {
    type Item = (
        thrussh::client::Connection<thrussh_config::Stream, Client>,
        Vec<u8>,
    );
    type Error = Error;
    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
        debug!("SendFile loop starting");
        loop {
            debug!("sendfile loop");
            match self.state.take() {
                Some(SendFileState::Read(c)) => {
                    debug!("read");
                    let mut buf = self.buf.take().unwrap();
                    buf.resize(BUFFER_SIZE, 0);
                    let len = self.f.read(&mut buf)?;
                    if len == 0 {
                        // If nothing has been read, return.
                        return Ok(Async::Ready((c, buf)));
                    }
                    buf.truncate(len);
                    debug!("sending {:?} bytes, {:?}", len, buf.len());
                    self.state = Some(SendFileState::Wait(c.data(self.chan, None, buf)));
                }
                Some(SendFileState::Wait(mut c)) => {
                    debug!("wait");
                    match c.poll()? {
                        Async::Ready((c, buf)) => {
                            self.buf = Some(buf);
                            self.state = Some(SendFileState::Read(c))
                        }
                        Async::NotReady => {
                            self.state = Some(SendFileState::Wait(c));
                            return Ok(Async::NotReady);
                        }
                    }
                }
                None => unreachable!(),
            }
        }
    }
}




impl thrussh::client::Handler for Client {
    type Error = Error;
    type FutureUnit = futures::Finished<Client, Error>;
    type SessionUnit = futures::Finished<(Client, thrussh::client::Session), Error>;
    type FutureBool = futures::future::FutureResult<(Client, bool), Error>;
    type FutureSign = Box<futures::Future<Item = (Self, cryptovec::CryptoVec), Error = Self::Error>>;
    type FutureSign =
        Box<futures::Future<Item = (Self, cryptovec::CryptoVec), Error = Self::Error>>;

    fn auth_publickey_sign(mut self, key: &thrussh_keys::key::PublicKey, mut to_sign: cryptovec::CryptoVec) -> Self::FutureSign {
    fn auth_publickey_sign(mut self, key: &thrussh_keys::key::PublicKey, mut to_sign: cryptovec::CryptoVec) -> Self::FutureSign {
    #[cfg(unix)]
    fn auth_publickey_sign(
        mut self,
        key: &thrussh_keys::key::PublicKey,
        mut to_sign: cryptovec::CryptoVec,
    ) -> Self::FutureSign {
        debug!("auth_publickey_sign");
        if let Some(agent) = self.agent.take() {
            use thrussh_keys::encoding::Encoding;
            debug!("using agent");
            Box::new(
                agent.sign_request(key, &to_sign)
                agent
                    .sign_request(key, &to_sign)
                    .then(move |result| match result {
                        Ok((client, sig)) => {
                            debug!("sig = {:?}", sig);
                            if let Some(sig) = sig {
                                to_sign.extend_ssh_string(&sig[..]);
                            }
                            self.agent = Some(client);
                            futures::finished::<_, Error>((self, to_sign))
                        }
                        Err(e) => {
                            error!("SSH agent error: {:?}", e);
                            futures::finished((self, to_sign))
                        }
                    })
                    .from_err(),
            )
        } else {
            debug!("no agent");
            Box::new(futures::finished((self, to_sign)))
        }
    }

    fn data(mut self,
            channel: thrussh::ChannelId,
            stream: Option<u32>,
            data: &[u8],
            session: thrussh::client::Session)
            -> Self::SessionUnit {

        debug!("data ({:?}): {:?}", channel, &data[..std::cmp::min(data.len(), 100)]);
    fn data(
        mut self,
        channel: thrussh::ChannelId,
        stream: Option<u32>,
        data: &[u8],
        session: thrussh::client::Session,
    ) -> Self::SessionUnit {
        debug!(
            "data ({:?}): {:?}",
            channel,
            &data[..std::cmp::min(data.len(), 100)]
        );
        if stream == Some(1) {
            std::io::stderr().write(data).unwrap();
        } else if stream == None {

            match self.state {
                State::None => {
                    std::io::stdout().write(data).unwrap();
                }
                State::Changes { ref mut changes } => {
                    let data = std::str::from_utf8(data).unwrap();
                    for l in data.lines() {
                        let mut spl = l.split(':');
                        if let (Some(h), Some(s)) = (spl.next(), spl.next()) {
                            if let (Some(h), Ok(s)) =
                                (Hash::from_base58(h), s.parse()) {
                                    changes.insert(h, s);
                            if let (Some(h), Ok(s)) = (Hash::from_base58(h), s.parse()) {
                                changes.push((h, s));
                            }
                        }
                    }
                }
                State::DownloadPatch { ref mut file, .. } => {
                    file.write_all(data).unwrap();
                }
            }
        } else {
            debug!("SSH data received on channel {:?}: {:?} {:?}",
                   channel,
                   stream,
                   data);
            debug!(
                "SSH data received on channel {:?}: {:?} {:?}",
                channel, stream, data
            );
        }
        futures::finished((self, session))
    }
    fn exit_status(mut self,
                   channel: thrussh::ChannelId,
                   exit_status: u32,
                   session: thrussh::client::Session)
                   -> Self::SessionUnit {
        debug!("exit_status received on channel {:?}: {:?}:", channel, exit_status);
    fn exit_status(
        mut self,
        channel: thrussh::ChannelId,
        exit_status: u32,
        session: thrussh::client::Session,
    ) -> Self::SessionUnit {
        debug!(
            "exit_status received on channel {:?}: {:?}:",
            channel, exit_status
        );
        debug!("self.channel = {:?}", self.channel);
        if let Some(c) = self.channel {
            if channel == c {
                self.exit_status.insert(channel, exit_status);
            }
        }
        debug!("self.exit_status = {:?}", self.exit_status);
        futures::finished((self, session))
    }

    fn check_server_key(self, server_public_key: &thrussh_keys::key::PublicKey) -> Self::FutureBool {

        let path = std::env::home_dir().unwrap().join(".ssh").join("known_hosts");
        match thrussh_keys::check_known_hosts_path(&self.host, self.port, server_public_key, &path) {
    fn check_server_key(
        self,
        server_public_key: &thrussh_keys::key::PublicKey,
    ) -> Self::FutureBool {
        let path = dirs::home_dir().unwrap().join(".ssh").join("known_hosts");
        match thrussh_keys::check_known_hosts_path(&self.host, self.port, server_public_key, &path)
        {
            Ok(true) => futures::done(Ok((self, true))),
            Ok(false) => {
                if let Ok(false) = ask::ask_learn_ssh(&self.host,
                                                      self.port, "") {
                if let Ok(false) = ask::ask_learn_ssh(&self.host, self.port, "") {
                    // TODO
                    // &server_public_key.fingerprint()) {

                    futures::done(Ok((self, false)))

                } else {
                    thrussh_keys::learn_known_hosts_path(&self.host,
                                                         self.port,
                                                         server_public_key,
                                                         &path)
                        .unwrap();
                    thrussh_keys::learn_known_hosts_path(
                        &self.host,
                        self.port,
                        server_public_key,
                        &path,
                    )
                    .unwrap();
                    futures::done(Ok((self, true)))
                }
            }
            Err(e) => {
                if let thrussh_keys::Error::KeyChanged(line) = e {
                    println!(
                        "Host key changed! Someone might be eavesdropping this communication, \
                         refusing to continue. Previous key found line {}",
                        line
                    );
                    futures::done(Ok((self, false)))
                } else {
                    futures::done(Err(From::from(e)))
                }
            }
        }
    }
}

const BUFFER_SIZE: usize = 1 << 14; // 16 kb.

impl<'a> SshSession<'a>
{
    pub fn changes(&mut self, branch: &str) -> Result<HashMap<Hash, ApplyTimestamp>> {
    pub fn changes(&mut self, branch: &str) -> Result<HashMap<Hash, ApplyTimestamp>> {
impl<'a> SshSession<'a> {
    pub fn changes(
        &mut self,
        branch: &str,
        path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        let esc_path = escape(Cow::Borrowed(self.path));
        let cmd = format!("{} log --repository {} --branch {:?} --hash-only",
                          self.pijul_cmd,
                          esc_path,
                          branch);
        let mut cmd = format!(
            "{} log --repository {} --branch {:?} --hash-only",
            self.pijul_cmd, esc_path, branch
        );
        for p in path {
            cmd.push_str(&format!(" --path {}", p.display()))
        }

        if let Some(ref mut session) = self.session {
            session.handler_mut().state = State::Changes { changes: HashMap::new() }
            session.handler_mut().state = State::Changes {
                changes: Vec::new(),
            }
        }
        let mut channel = None;
        self.session = Some(self.l.run(self.session.take()
                              .unwrap()
                              .channel_open_session()
                              .and_then(move |(mut connection, chan)| {
                                  debug!("exec: {:?}", cmd);
                                  channel = Some(chan);
                                  connection.handler_mut().exit_status.remove(&chan);
                                  connection.handler_mut().channel = Some(chan);
                                  connection.exec(chan, false, &cmd);
                                  connection.channel_eof(chan);
                                  // Wait until channel close.
                                  debug!("waiting channel close");
                                  connection.wait(move |session| {
                                      session.handler().exit_status.get(&chan).is_some()
                                  }).and_then(move |mut session| {
                                      if session.is_channel_open(chan) {
                                          session.channel_close(chan);
                                      }
                                      session.wait(move |session| !session.is_channel_open(chan))
                                  })
                              }))
                        .unwrap());
        self.session = Some(
            self.l
                .block_on(
                    self.session
                        .take()
                        .unwrap()
                        .channel_open_session()
                        .and_then(move |(mut connection, chan)| {
                            debug!("exec: {:?}", cmd);
                            channel = Some(chan);
                            connection.handler_mut().exit_status.remove(&chan);
                            connection.handler_mut().channel = Some(chan);
                            connection.exec(chan, false, &cmd);
                            connection.channel_eof(chan);
                            // Wait until channel close.
                            debug!("waiting channel close");
                            connection
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .and_then(move |mut session| {
                                    if session.is_channel_open(chan) {
                                        session.channel_close(chan);
                                    }
                                    session.wait(move |session| !session.is_channel_open(chan))
                                })
                        }),
                )
                .unwrap(),
        );

        if let Some(ref session) = self.session {
            if let Some(channel) = channel {
                if let Some(&exit_code) = session.handler().exit_status.get(&channel) {
                    debug!("exit_code = {:?}", exit_code);
                    if exit_code != 0 {
                        return Ok(HashMap::new())
                        return Ok(Vec::new());
                    }
                }
            }
        }
        if let Some(ref mut session) = self.session {
            match std::mem::replace(&mut session.handler_mut().state, State::None) {
                State::Changes { changes } => {
                    debug!("changes: {:?}", changes);
                    Ok(changes)
                }
                _ => unreachable!(),
            }
        } else {
            unreachable!()
        }
    }

    pub fn fetch_patch(&mut self, patch_hash: &Hash, local_file: &Path, local_tmp_file: &Path)
                       -> Result<()> {


    pub fn send_key(&mut self, key_pair: meta::SigningKeys) -> Result<(), Error> {
        if let Some(ref mut session) = self.session {
            // session.handler_mut().state = State::SendKey { };
            session.handler_mut().channel = None;
        }
        let challenge_cmd = format!("{} key register", self.pijul_cmd);
        let mut data = Vec::new();
        key_pair.tsk.tpk().serialize(&mut data)?;
        self.session = Some(
            self.l.block_on(
                self.session
                    .take()
                    .unwrap()
                    .channel_open_session()
                    .and_then(move |(mut session, channelid)| {
                        session.exec(channelid, false, &challenge_cmd);
                        session
                            .data(channelid, None, data)
                            .and_then(move |(mut session, _)| {
                                session.channel_eof(channelid);
                                session.handler_mut().channel = Some(channelid);
                                session.wait(move |session| {
                                    session.handler().exit_status.get(&channelid).is_some()
                                })
                            })
                    }),
            )?,
        );
        Ok(())
    }

    pub fn fetch_patch(
        &mut self,
        patch_hash: &Hash,
        local_file: PathBuf,
        local_tmp_file: PathBuf,
    ) -> Result<PathBuf, Error> {
        let esc_path = escape(Cow::Borrowed(self.path));
        let cmd = format!("{} patch --repository {} --bin {}",
                          self.pijul_cmd,
                          esc_path,
                          patch_hash.to_base58());
        let cmd = format!(
            "{} patch --repository {} --bin {}",
            self.pijul_cmd,
            esc_path,
            patch_hash.to_base58()
        );
        debug!("cmd {:?} {:?}", cmd, local_file);
        if let Some(ref mut session) = self.session {
            session.handler_mut().state =
                State::DownloadPatch {
                    file: try!(File::create(&local_tmp_file))
                };
            session.handler_mut().state = State::DownloadPatch {
                file: File::create(&local_tmp_file)?,
            };
            session.handler_mut().channel = None;
        }
        self.session = Some(self.l.run(self.session.take()
                                       .unwrap()
                                       .channel_open_session()
                                       .and_then(move |(mut connection, chan)| {
        self.session = Some(
            self.l
                .block_on(
                    self.session
                        .take()
                        .unwrap()
                        .channel_open_session()
                        .and_then(move |(mut connection, chan)| {
                            connection.handler_mut().exit_status.remove(&chan);
                            connection.handler_mut().channel = Some(chan);
                            connection.exec(chan, false, &cmd);
                            connection.channel_eof(chan);
                            connection
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .and_then(move |mut session| {
                                    if session.is_channel_open(chan) {
                                        session.channel_close(chan);
                                    }
                                    session.wait(move |session| !session.is_channel_open(chan))
                                })
                        }),
                )
                .unwrap(),
        );

                                           connection.handler_mut().exit_status.remove(&chan);
                                           connection.handler_mut().channel = Some(chan);
                                           connection.exec(chan, false, &cmd);
                                           connection.channel_eof(chan);
                                           connection.wait(move |session| {
                                               session.handler().exit_status.get(&chan).is_some()
                                           }).and_then(move |mut session| {
                                               if session.is_channel_open(chan) {
                                                   session.channel_close(chan);
                                               }
                                               session.wait(move |session| !session.is_channel_open(chan))
                                           })
                                       }))
                            .unwrap());
        
        if let Some(ref mut session) = self.session {


            if let State::DownloadPatch { mut file, .. } = std::mem::replace(&mut session.handler_mut().state, State::None) {
            if let State::DownloadPatch { mut file, .. } =
                std::mem::replace(&mut session.handler_mut().state, State::None)
            {
                file.flush()?;
                rename(&local_tmp_file, &local_file)?;
            }
        }
        Ok(local_file)
    }

    pub fn remote_apply(&mut self, repo_root: &Path, remote_branch: &str, patch_hashes: &HashSet<Hash>)
                        -> Result<()> {
    pub fn remote_apply(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        patch_hashes: Vec<Hash>,
    ) -> Result<(), Error> {
        let pdir = repo_root.patches_dir();
        let mut exit_status = None;
        *session = Some(l.run(session.take()
                              .unwrap()
                              .channel_open_session()
                              .and_then(move |(mut session, chan)| {

                                  session.handler_mut().exit_status.remove(&chan);
                                  session.handler_mut().channel = Some(chan);
                                  let esc_path = escape(Cow::Borrowed(path));
                                  debug!("exec {:?}",
                                         format!("{} apply --repository {} --branch {:?}",
                                                 pijul_cmd,
                                                 esc_path,
                                                 remote_branch));
                                  session.exec(chan,
                                               false,
                                               &format!("{} apply --repository {} --branch {:?}",
                                                        pijul_cmd,
                                                        esc_path,
                                                        remote_branch));
        let esc_path = escape(Cow::Borrowed(&self.path));
        let apply_cmd = format!(
            "{} apply --repository {} --branch {:?}",
            self.pijul_cmd, esc_path, remote_branch
        );
        let sign_cmd = format!("{} sign --repository {}", self.pijul_cmd, esc_path);

                                  futures::stream::iter_ok(patch_hashes.iter())
                                      .fold((session, Vec::new()), move |(session, buf), hash| {
                                          let mut pdir = pdir.clone();
                                          pdir.push(hash.to_base58());
                                          pdir.set_extension("gz");
                                          let f = std::fs::File::open(&pdir).unwrap();
                                          pdir.pop();
                                          SendFile {
                                              f: f,
                                              buf: Some(buf),
                                              chan: chan,
                                              state: Some(SendFileState::Read(session)),
                                          }
                                      })
                                      .and_then(move |(mut session, _)| {
                                          session.channel_eof(chan);
                                          session.wait(move |session| {
                                              session.handler().exit_status.get(&chan).is_some()
                                          }).map(move |mut session| {
                                              exit_status = session.handler().exit_status.get(&chan).map(|x| *x);
                                              session.channel_close(chan);
                                              session
                                          })
                                      })
                                      .map_err(From::from)
                              }))
                        .unwrap());
                        .unwrap());
        let session = self.session.take().unwrap();

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                        .fold((session, Vec::new()), move |(session, buf), hash| {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                                futures::future::Either::A((SendFile {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
        self.session = Some(
            self.l
                .block_on(
                    session
                        .channel_open_session()
                        .and_then(move |(session, chan0)| {
                            session
                                .channel_open_session()
                                .and_then(move |(mut session, chan1)| {
                                    session.handler_mut().exit_status.remove(&chan0);
                                    session.handler_mut().channel = Some(chan0);
                                    debug!("exec {:?}", apply_cmd);
                                    session.exec(chan0, false, &apply_cmd);
                                    debug!("exec {:?}", sign_cmd);
                                    session.exec(chan1, false, &sign_cmd);
                                    futures::stream::iter_ok(patch_hashes.into_iter())
                                        .fold((session, Vec::new()), move |(session, buf), hash| {
                                            let mut pdir = pdir.clone();
                                            pdir.push(hash.to_base58());
                                            pdir.set_extension("gz");
                                            let f = std::fs::File::open(&pdir).unwrap();
                                            pdir.set_extension("sig");
                                            if let Ok(sig) = std::fs::File::open(&pdir) {
                                                futures::future::Either::A(
                                                    (SendFile {
                                                        f: f,
                                                        buf: Some(buf),
                                                        chan: chan0,
                                                        state: Some(SendFileState::Read(session)),
                                                    })
                                                    .and_then(move |(session, mut buf)| {
                                                        buf.clear();
                                                        SendFile {
                                                            f: sig,
                                                            buf: Some(buf),
                                                            chan: chan1,
                                                            state: Some(SendFileState::Read(
                                                                session,
                                                            )),
                                                        }
                                                    }),
                                                )
                                            } else {
                                                futures::future::Either::B(SendFile {
                                                    f: f,
                                                    buf: Some(buf),
                                                    chan: chan0,
                                                    state: Some(SendFileState::Read(session)),
                                                })
                                            }
                                        })
                                        .and_then(move |(mut session, _)| {
                                            session.channel_eof(chan0);
                                            session
                                                .wait(move |session| {
                                                    session
                                                        .handler()
                                                        .exit_status
                                                        .get(&chan0)
                                                        .is_some()
                                                })
                                                .map(move |mut session| {
                                                    exit_status = session
                                                        .handler()
                                                        .exit_status
                                                        .get(&chan0)
                                                        .map(|x| *x);
                                                    session.channel_close(chan0);
                                                    session
                                                })
                                        })
                                        .map_err(From::from)

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                                })
                        }),
                )
                .unwrap(),
        );

        if let Some(ref session) = self.session {
            debug!("exit status = {:?}", session.handler().exit_status);
        }
        Ok(())
    }

    pub fn remote_init(&mut self) -> Result<(), Error> {
        let esc_path = escape(Cow::Borrowed(self.path));
        let cmd = format!("{} init {}", self.pijul_cmd, esc_path);
        debug!("command line:{:?}", cmd);

        self.session = Some(self.l.run(self.session.take()
                                       .unwrap()
                                       .channel_open_session()
                                       .and_then(move |(mut session, chan)| {
                                           debug!("chan = {:?}", chan);
                                           session.handler_mut().exit_status.remove(&chan);
                                           session.handler_mut().channel = Some(chan);
                                           session.exec(chan, false, &cmd);
                                           session.channel_eof(chan);
                                           // Wait until channel close.
                                           session
                                               .wait(move |session| session.handler().exit_status.get(&chan).is_some())
                                               .and_then(move |mut session| {
                                                   if session.is_channel_open(chan) {
                                                       session.channel_close(chan);
                                                   }
                                                   session.wait(move |session| !session.is_channel_open(chan))
                                               })
                                       }))
                            .unwrap());
        self.session = Some(
            self.l
                .block_on(
                    self.session
                        .take()
                        .unwrap()
                        .channel_open_session()
                        .and_then(move |(mut session, chan)| {
                            debug!("chan = {:?}", chan);
                            session.handler_mut().exit_status.remove(&chan);
                            session.handler_mut().channel = Some(chan);
                            session.exec(chan, false, &cmd);
                            session.channel_eof(chan);
                            // Wait until channel close.
                            session
                                .wait(move |session| {
                                    session.handler().exit_status.get(&chan).is_some()
                                })
                                .and_then(move |mut session| {
                                    if session.is_channel_open(chan) {
                                        session.channel_close(chan);
                                    }
                                    session.wait(move |session| !session.is_channel_open(chan))
                                })
                        }),
                )
                .unwrap(),
        );
        Ok(())
    } 


    }
}
    

impl<'a> UriSession<'a> {
    pub fn changes(&mut self, branch: &str) -> Result<HashMap<Hash, ApplyTimestamp>> {
    pub fn changes(
        &mut self,
        branch: &str,
        path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        if !path.is_empty() {
            return Err(Error::PartialPullOverHttp);
        }
        let mut uri = self.uri.to_string();
        uri = uri + "/" + PIJUL_DIR_NAME + "/" + &branch_changes_base_path(branch);
        let mut req = reqwest_async::Request::new(
            reqwest::Method::Get,
            uri.parse().unwrap()
        );
        let mut req = reqwest_async::Request::new(reqwest::Method::GET, uri.parse().unwrap());
        req.headers_mut().insert(
            reqwest::header::CONNECTION,
            reqwest::header::HeaderValue::from_static("close"),
        );
        let res: Vec<u8> = self.l.block_on(self.client.execute(req).and_then(
            |resp: reqwest_async::Response| {
                let res = Vec::new();
                let body = resp.into_body();
                body.fold(res, |mut res, x| {
                    res.extend(x.iter());
                    futures::finished::<_, reqwest::Error>(res)
                })
        )?;
            },
        ))?;
        let changes = read_changes(&mut &res[..]).unwrap_or(Vec::new());
        debug!("http: {:?}", changes);
        Ok(changes)
    }

    pub fn fetch_patch(&mut self, patch_hash: &Hash, local_file: &Path, local_tmp_file: &Path)
                       -> Result<()> {
    pub fn fetch_patch(
        &mut self,
        patch_hash: &Hash,
        local_file: PathBuf,
        local_tmp_file: PathBuf,
    ) -> Result<PathBuf, Error> {
        let ref mut l = self.l;
        let ref mut client = self.client;
        let uri = self.uri.to_string()
            + "/"
            + PIJUL_DIR_NAME
            + "/patches/"
            + &patch_hash.to_base58()
            + ".gz";
        debug!("downloading uri {:?}", uri);

        let req = reqwest_async::Request::new(reqwest::Method::GET, uri.parse().unwrap());
        let uri_sig = self.uri.to_string()
            + "/"
            + PIJUL_DIR_NAME
            + "/patches/"
            + &patch_hash.to_base58()
            + ".sig";
        debug!("{:?}", uri_sig);
        let req_sig = reqwest_async::Request::new(reqwest::Method::GET, uri_sig.parse().unwrap());
        let mut local_sig_file = local_file.clone();
        let mut local_tmp_sig_file = local_tmp_file.clone();
        local_sig_file.set_extension("sig");
        local_tmp_sig_file.set_extension("sig");

        let res = l
            .block_on(
                client
                    .execute(req)
                    .and_then(move |resp| {
                        if resp.status() == reqwest::StatusCode::OK {
                            let res = Vec::new();
                            futures::future::Either::A(
                                resp.into_body()
                                    .fold(res, |mut res, x| {
                                        res.extend(x.iter());
                                        futures::finished::<_, reqwest::Error>(res)
                                    })
                                    .map(|body| {
                                        // debug!("response={:?}", body);
                                        let mut f = File::create(&local_tmp_file).unwrap();
                                        f.write_all(&body).unwrap();
                                        // debug!("patch downloaded through http: {:?}", body);
                                        Some((local_tmp_file, local_file))
                                    }),
                            )
                        } else {
                            futures::future::Either::B(futures::finished(None))
                        }
                    })
                    .join(client.execute(req_sig).then(move |resp| {
                        let resp = if let Ok(resp) = resp {
                            resp
                        } else {
                            return futures::future::Either::B(futures::finished(None));
                        };
                        debug!("sig status {:?}", resp.status());
                        if resp.status() == reqwest::StatusCode::OK {
                            let res = Vec::new();
                            futures::future::Either::A(
                                resp.into_body()
                                    .fold(res, |mut res, x| {
                                        res.extend(x.iter());
                                        futures::finished::<_, reqwest::Error>(res)
                                    })
                                    .map(|body| {
                                        // debug!("response={:?}", body);
                                        let mut f = File::create(&local_tmp_sig_file).unwrap();
                                        f.write_all(&body).unwrap();
                                        // debug!("patch downloaded through http: {:?}", body);
                                        Some((local_tmp_sig_file, local_sig_file))
                                    }),
                            )
                        } else {
                            futures::future::Either::B(futures::finished(None))
                        }
                    })),
            )
            .unwrap();
        if let Some((local_tmp_file, local_file)) = res.0 {
            debug!("renaming {:?} to {:?}", local_tmp_file, local_file);
            rename(&local_tmp_file, &local_file)?;
            if let Some((local_tmp_sig_file, local_sig_file)) = res.1 {
                debug!("renaming {:?} to {:?}", local_tmp_sig_file, local_sig_file);
                rename(&local_tmp_sig_file, &local_sig_file).unwrap_or(());
            }
            Ok(local_file)
        } else {
            Err(Error::PatchNotFound {
                repo_root: self.uri.into(),
                patch_hash: patch_hash.to_owned(),
            })
        }
    }
}

impl<'a> LocalSession<'a> {
    pub fn changes(&mut self, branch: &str) -> Result<HashMap<Hash, ApplyTimestamp>> {
    pub fn changes(
        &mut self,
        branch: &str,
        path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        let repo = self.root.open_repo(None)?;
        let txn = repo.txn_begin()?;
        Ok(if let Some(branch) = txn.get_branch(&branch) {
            txn.iter_patches(&branch, None)
                .map(|(hash, s)| (txn.get_external(hash).unwrap().to_owned(), s))
                .collect()
        if let Some(branch) = txn.get_branch(&branch) {
            if !path.is_empty() {
                let mut patches = Vec::new();
                for (hash, s) in txn.iter_patches(&branch, None) {
                    for path in path {
                        let inode = txn.find_inode(path).unwrap();
                        let key = txn.get_inodes(inode).unwrap().key;
                        if txn.get_touched(key, hash) {
                            patches.push((txn.get_external(hash).unwrap().to_owned(), s));
                            break;
                        }
                    }
                }
                Ok(patches)
            } else {
                Ok(txn
                    .iter_patches(&branch, None)
                    .map(|(hash, s)| (txn.get_external(hash).unwrap().to_owned(), s))
                    .collect())
            }
        } else {
            HashMap::new()
        })
            Ok(Vec::new())
        }
    }

    pub fn fetch_patch(
        &mut self,
        patch_hash: &Hash,
        local_file: PathBuf,
    ) -> Result<PathBuf, Error> {
        debug!("local downloading {:?}", patch_hash);
        let remote_file = self
            .root
            .patches_dir()
            .join(&patch_file_name(patch_hash.as_ref()));
        debug!("hard linking {:?} to {:?}", remote_file, local_file);
        if hard_link(&remote_file, &local_file).is_err() {
            copy(&remote_file, &local_file)?;
        }
        Ok(local_file)
    }

    pub fn remote_apply(&mut self, repo_root:&Path, remote_branch: &str, patch_hashes: &HashSet<Hash>)
        -> Result<()> {
    pub fn remote_apply(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        patch_hashes: &Vec<Hash>,
    ) -> Result<Vec<ConflictingFile>, Error> {
        let mut remote_path = self.root.patches_dir();
        let mut local_path = repo_root.patches_dir();
        let remote_current_branch = self.root.get_current_branch()?;

        for hash in patch_hashes {
            remote_path.push(&hash.to_base58());
            remote_path.set_extension("gz");

            local_path.push(&hash.to_base58());
            local_path.set_extension("gz");

            debug!("hard linking {:?} to {:?}", local_path, remote_path);
            if metadata(&remote_path).is_err() {


                if hard_link(&local_path, &remote_path).is_err() {
                    copy(&local_path, &remote_path)?;
                }
            }

            remote_path.set_extension("sig");
            local_path.set_extension("sig");

            if metadata(&remote_path).is_err() && metadata(&local_path).is_ok() {
                if hard_link(&local_path, &remote_path).is_err() {
                    copy(&local_path, &remote_path)?;
                }
            }

            local_path.pop();
            remote_path.pop();
        }

        loop {
            match apply_resize(&self.path, &remote_branch, patch_hashes.iter(), |_, _| {}) {
                Err(ref e) if e.lacks_space() => {},
            let app = if remote_current_branch != remote_branch {
                apply_resize_no_output(&self.root, &remote_branch, patch_hashes.iter(), |_, _| {})
                    .map(|_| Vec::new())
            } else {
                apply_resize(
                    libpijul::DiffAlgorithm::default(),
                    &self.root,
                    &remote_branch,
                    patch_hashes.iter(),
                    &[] as &[RepoPath<&Path>],
                    |_, _| {},
                )
            };
            match app {
                Err(ref e) if e.lacks_space() => debug!("lacks space"),
                Ok(v) => return Ok(v),
                Err(e) => return Err(From::from(e)),
            }
        }
    }


}

#[derive(Debug, Clone)]
pub struct PushablePatches {
    pub pushable: Vec<(Hash, Option<PatchId>, ApplyTimestamp)>,
    pub non_fast_forward: Vec<Hash>,
}

impl<'a> Session<'a> {
    //                                      -> Result<(Vec<Hash>, HashMap<Hash, ApplyTimestamp>)>
    pub fn changes(&mut self, branch: &str) -> Result<HashMap<Hash, ApplyTimestamp>> {
    pub fn changes(
        &mut self,
        branch: &str,
        remote_path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Vec<(Hash, ApplyTimestamp)>, Error> {
        match *self {
            Session::Ssh(ref mut ssh_session) => ssh_session.changes(branch),
            Session::Local(ref mut local_session) => local_session.changes(branch),
            Session::Uri(ref mut uri_session) => uri_session.changes(branch)
            Session::Ssh(ref mut ssh_session) => ssh_session.changes(branch, remote_path),
            Session::Local(ref mut local_session) => local_session.changes(branch, remote_path),
            Session::Uri(ref mut uri_session) => uri_session.changes(branch, remote_path),
        }
    }
    pub fn download_patch(&mut self,
                          repo_root: &Path,
                          patch_hash: &Hash)
                          -> Result<PathBuf> {

    pub fn download_patch(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        patch_hash: &Hash,
    ) -> Result<PathBuf, Error> {
        let patches_dir_ = repo_root.patches_dir();
        let local_file = patches_dir_.join(&patch_file_name(patch_hash.as_ref()));

        if !metadata(&local_file).is_ok() {
            match *self {
                Session::Local(ref mut local_session) => local_session.fetch_patch(patch_hash, &local_file)?,
                Session::Local(ref mut local_session) => {
                    local_session.fetch_patch(patch_hash, local_file)
                }
                Session::Ssh(ref mut ssh_session) => {
                    let tmp_dir = tempdir_in(&patches_dir_)?;
                    let local_tmp_file = tmp_dir.path().join("patch");
                    ssh_session.fetch_patch(patch_hash, local_file, local_tmp_file)
                }
                Session::Uri(ref mut uri_session) => {
                    let tmp_dir = tempdir_in(&patches_dir_)?;
                    let local_tmp_file = tmp_dir.path().join("patch");
                    uri_session.fetch_patch(patch_hash, local_file, local_tmp_file)
                }
            }
        } else {
            Ok(local_file)
        }
    }

    fn remote_apply(&mut self,
                    repo_root: &Path,
                    remote_branch: &str,
                    patch_hashes: &HashSet<Hash>)
                    -> Result<()> {


    fn remote_apply(
        &mut self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        patch_hashes: Vec<Hash>,
    ) -> Result<(), Error> {
        match *self {
            Session::Ssh(ref mut ssh_session) =>
                ssh_session.remote_apply(repo_root, remote_branch, patch_hashes),
            Session::Ssh(ref mut ssh_session) => {
                ssh_session.remote_apply(repo_root, remote_branch, patch_hashes)
            }

            Session::Local(ref mut local_session) =>
                local_session.remote_apply(repo_root, remote_branch, patch_hashes),
            Session::Local(ref mut local_session) => local_session
                .remote_apply(repo_root, remote_branch, &patch_hashes)
                .map(|_| ()),

            _ => panic!("upload to URI impossible"),
        }
    }


    pub fn remote_init(&mut self) -> Result<(), Error> {
        match *self {
            Session::Ssh(ref mut ssh_session) => ssh_session.remote_init(),
            Session::Local(ref mut local_session) => {
                assert_no_containing_repo(local_session.root.repo_root)?;
                create_repo(local_session.root.repo_root)
            }
            _ => panic!("remote init not possible"),
        }
    }

    pub fn pullable_patches(&mut self,
                            remote_branch: &str,
                            local_branch: &str,
                            target: &Path)
                            -> Result<Pullable> {
    pub fn pullable_patches(
        &mut self,
        remote_branch: &str,
        local_branch: &str,
        target: &RepoRoot<impl AsRef<Path>>,
        remote_path: &[RepoPath<impl AsRef<Path>>],
    ) -> Result<Pullable, Error> {
        let mut remote_patches: Vec<(Hash, ApplyTimestamp)> = self
            .changes(remote_branch, remote_path)?
            .into_iter()
            .map(|(h, s)| (h.to_owned(), s))
            .collect();
        remote_patches.sort_by(|&(_, ref a), &(_, ref b)| a.cmp(&b));
        let local_patches: HashMap<Hash, ApplyTimestamp> = {
            let repo_dir = target.pristine_dir();
            let repo = Repository::open(&repo_dir, None)?;
            let txn = repo.txn_begin()?;
            if let Some(branch) = txn.get_branch(&local_branch) {
                txn.iter_patches(&branch, None)
                    .map(|(hash, s)| (txn.get_external(hash).unwrap().to_owned(), s))
                    .collect()
            } else {
                HashMap::new()
            }
        };
        debug!("pullable done: {:?}", remote_patches);
        Ok(Pullable {
            local: local_patches.iter().map(|(h, _)| h.to_owned()).collect(),
            remote: remote_patches
                .into_iter()
                .collect()
            remote: remote_patches.into_iter().collect(),
        })
    }

    pub fn pull(&mut self,
                target: &Path,
                to_branch: &str,
                pullable: &mut [(Hash, ApplyTimestamp)],
                display_progress: bool)
                -> Result<()> {

    pub fn pull(
        &mut self,
        target: &RepoRoot<impl AsRef<Path>>,
        to_branch: &str,
        pullable: &mut Vec<(Hash, ApplyTimestamp)>,
        partial_paths: &[RepoPath<impl AsRef<Path>>],
        display_progress: bool,
    ) -> Result<Vec<ConflictingFile>, Error> {
        let mut p = if display_progress && !pullable.is_empty() {
            Some((progrs::start("Pulling patches", pullable.len() as u64), 0))
        } else {
            None
        };
        let mut pullable_plus_deps = Vec::new();
        let mut pulled = HashSet::new();

        for &(ref i, _) in pullable.iter() {
            self.download_patch(&target, i)?;
            p.as_mut().map(|&mut (ref mut p, ref mut n)| p.display({ *n = *n + 1; *n }));
        while let Some((hash, _)) = pullable.pop() {
            if pulled.contains(&hash) {
                continue;
            }
            debug!("hash = {:?}", hash);
            let path = self.download_patch(target, &hash)?;

            let patch = {
                let file = File::open(&path)?;
                let mut file = BufReader::new(file);
                Patch::from_reader_compressed(&mut file)?.2
            };
            pulled.insert(hash.clone());

            // If the apply is partial, we might not have all the
            // dependencies. Add them to this list.
            if !partial_paths.is_empty() {
                for dep in patch.dependencies() {
                    if !pulled.contains(dep) {
                        pullable.push((dep.to_owned(), 0));
                    }
                }
            }

            pullable_plus_deps.push((hash.to_owned(), patch));

            p.as_mut().map(|&mut (ref mut p, ref mut n)| {
                p.display({
                    *n = *n + 1;
                    *n
                })
            });
        }

        // Because we've been popping the stack of pullable patches in
        // reverse order, we need to reverse the result.
        pullable_plus_deps.reverse();

        p.map(|(p, _)| p.stop("done"));
        debug!("patches downloaded");

        let p = std::cell::RefCell::new(progrs::start(
            "Applying patches",
            pullable_plus_deps.len() as u64,
        ));
        let mut size_increase = 4096;
        let current_branch = target.get_current_branch()?;
        let conflicts = loop {
            let app = if current_branch != to_branch {
                apply_resize_patches_no_output(
                    &target,
                    &to_branch,
                    &pullable_plus_deps,
                    size_increase,
                    |c, _| p.borrow_mut().display(c as u64),
                )
                .map(|_| Vec::new())
            } else {
                apply_resize_patches(
                    libpijul::DiffAlgorithm::default(),
                    &target,
                    &to_branch,
                    &pullable_plus_deps,
                    size_increase,
                    partial_paths,
                    |c, _| p.borrow_mut().display(c as u64),
                )
            };
            match app {
                Ok(conflicts) => break conflicts,
                Err(ref e) if e.lacks_space() => size_increase *= 2,
                Err(e) => return Err(e.into()),
            }
        };
        p.into_inner().stop("done");
        Ok(conflicts)
    }

    pub fn pushable_patches(&mut self,
                            from_branch: &str,
                            to_branch: &str,
                            source: &Path)
                            -> Result<Vec<(Hash, Option<PatchId>, ApplyTimestamp)>> {
    /// Returns a vector of pushable patches, and a vector of changes
    /// present on the remote branch but not on the local one (to
    /// identify fast-forward pushes).
    pub fn pushable_patches(
        &mut self,
        from_branch: &str,
        to_branch: &str,
        source: &RepoRoot<impl AsRef<Path> + std::fmt::Debug>,

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        remote_paths: &[&str],

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    ) -> Result<Vec<(Hash, Option<PatchId>, ApplyTimestamp)>, Error> {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
        remote_paths: &[RepoPath<impl AsRef<Path>>],

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    ) -> Result<PushablePatches, Error> {
        debug!("source: {:?}", source);
        let to_changes = try!(self.changes(to_branch));

        let mut non_fast_forward = Vec::new();

        let to_changes_ = self.changes(to_branch, remote_paths)?;

        let repo = source.open_repo(None)?;
        let txn = repo.txn_begin()?;

        let mut to_changes = HashSet::new();
        let branch = txn.get_branch(&from_branch);
        for (h, _) in to_changes_.iter() {
            if let Some(ref branch) = branch {
                if let Some(patchid) = txn.get_internal(h.as_ref()) {
                    if txn.get_patch(&branch.patches, patchid).is_none() {
                        non_fast_forward.push(h.clone())
                    }
                } else {
                    non_fast_forward.push(h.clone())
                }
            }
            to_changes.insert(h.as_ref());
        }
        debug!("to_changes: {:?}", to_changes);
        let from_changes: Vec<_> = {
            if let Some(branch) = txn.get_branch(&from_branch) {
                txn.iter_applied(&branch, None)
                    .filter_map(|(s, patchid)| {
                        if let Some(hash) = txn.get_external(patchid) {
                            if to_changes.contains(&hash) {
                                None
                            } else {
                                Some((hash.to_owned(), Some(patchid), s))
                            }
                        } else {
                            None
                        }
                    })
                    .collect()
            } else {
                Vec::new()
            }
        };
        debug!("pushing: {:?}", from_changes);
        Ok(PushablePatches {
            pushable: from_changes,
            non_fast_forward,
        })
    }

    pub fn push(&mut self,
                source: &Path,
                remote_branch: &str,
                pushable: &HashSet<Hash>)
                -> Result<()> {
    pub fn push(
        &mut self,
        source: &RepoRoot<impl AsRef<Path>>,
        remote_branch: &str,
        pushable: Vec<Hash>,
    ) -> Result<(), Error> {
        debug!("push, remote_applying");
        debug!("pushable: {:?}", pushable);
        if pushable.len() > 0 {
            self.remote_apply(source, remote_branch, pushable)?;
        }
        Ok(())
    }
}


pub fn ssh_connect(
    user: &Option<&str>,
    host: &str,
    port: Option<u16>,
) -> Result<(thrussh_config::Config, thrussh_config::ConnectFuture), Error> {
    let mut ssh_config =
        thrussh_config::parse_home(host).unwrap_or(thrussh_config::Config::default());
    debug!("ssh_config = {:?}", ssh_config);

    if ssh_config.host_name.is_none() {
        ssh_config.host_name = Some(host.to_string())
    }

    if let Some(port) = port {
        ssh_config.port = Some(port)
    } else if ssh_config.port.is_none() {
        ssh_config.port = Some(22)
    }

    if let Some(ref user) = *user {
        ssh_config.user = Some(user.to_string())
    } else if ssh_config.user.is_none() {
        ssh_config.user = Some(username::get_user_name().unwrap())
    }

    ssh_config.update_proxy_command();
    let stream = if let Some(ref proxycmd) = ssh_config.proxy_command {
        debug!("{:?}", proxycmd);
        thrussh_config::Stream::proxy_command("sh", &["-c", proxycmd.as_str()])
    } else {
        let addr = if let Some(addrs) = (
            ssh_config.host_name.as_ref().unwrap().as_str(),
            ssh_config.port.unwrap(),
        )
            .to_socket_addrs()?
            .next()
        {
            addrs
        } else {
            return Err(Error::UnknownHost {
                host: host.to_string(),
            });
        };
        debug!("addr = {:?}", addr);
        thrussh_config::Stream::tcp_connect(&addr)
    };
    Ok((ssh_config, stream))
}

impl<'a> Remote<'a> {
    pub fn session(&'a self) -> Result<Session<'a>, Error> {
        match *self {
            Remote::Local { ref path } => Ok(Session::Local (LocalSession { path: path.as_path() })),
            Remote::Local {
                path: RepoRoot {
                    repo_root: ref path,
                },
            } => Ok(Session::Local(LocalSession {
                root: RepoRoot { repo_root: path },
            })),
            Remote::Uri { uri } => {
                let l = tokio::runtime::Runtime::new().unwrap();
                let proxy_url = std::env::var("http_proxy");
                let c = match proxy_url {
                    Err(std::env::VarError::NotPresent) => reqwest_async::Client::new(),
                    Ok(p_url) => reqwest_async::Client::builder()
                        .proxy(reqwest::Proxy::all(reqwest::Url::parse(&p_url).unwrap())?)
                        .build()?,
                    Err(std::env::VarError::NotUnicode(s)) => {
                        panic!("invalid http_proxy value: {:?}", s)
                    }
                };
                Ok(Session::Uri (UriSession {
                    l, uri: uri,
                    l, uri: uri,
                    client: c
                Ok(Session::Uri(UriSession {
                    l,
                    uri: uri,
                    client: c,
                }))
            }
            Remote::Ssh { ref user, ref host, port, ref path, ref local_repo_root, ref pijul_cmd, .. } => {



                let addr = (*host, port.unwrap_or(22)).to_socket_addrs().unwrap().next().unwrap();


















1
2
3




4
5
6
7
8
9
10
11

12
13
14
15
16
17

















18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64









65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
































95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126

127
128
129
130
131
132
133
134
135
136
137


138
139
140
141
142
143

144
145
146
147
148
149
150
151
152
153
154

155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177

use clap::{SubCommand, ArgMatches, Arg};
use clap::{Arg, ArgMatches, SubCommand};
use libpijul::{InodeUpdate, Repository};
use std::path::{Path, PathBuf};
use super::ask::{ask_changes, ChangesDirection};
        );
    r: &Path,
    prefixes: Option<&Vec<PathBuf>>,
) -> Result<()> {
            let mut inode = txn.find_inode(pref).unwrap();
            let mut prefix = Vec::new();
            prefix.push(inode);
            while let Some(parent) = txn.get_revtree(inode) {
                prefix.push(parent.parent_inode);
                inode = parent.parent_inode;
            }
            inode_prefixes.push(prefix)
    txn.output_repository(&branch, &r, &inode_prefixes[..], pending, pending_syncs)?;
use super::ask::{ask_changes, ChangesDirection};
use super::record;
use chrono;
use commands::{BasicOptions, StaticSubcommand, default_explain};
use libpijul::{Repository, InodeUpdate};
use libpijul::patch::{Patch, UnsignedPatch, PatchFlags};
use std::path::{PathBuf, Path};
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::patch::{Patch, PatchFlags, UnsignedPatch};
use libpijul::{Inode, InodeUpdate, Repository, ToPrefixes};
use rand;
use std;
use super::ask::{ChangesDirection, ask_changes};
use std::collections::HashSet;
use std::path::{Path, PathBuf};

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("revert")
        .about("Rewrite the working copy from the pristine")
        .arg(Arg::with_name("repository")
             .long("repository")
             .takes_value(true)
             .help("Local repository."))
        .arg(Arg::with_name("all")
             .short("a")
             .long("all")
             .help("Answer 'y' to all questions")
             .takes_value(false))
        .arg(Arg::with_name("branch")
             .help("Branch to revert to.")
             .long("branch")
             .takes_value(true))
        .arg(Arg::with_name("prefix")
             .help("Prefix to start from")
             .takes_value(true)
             .multiple(true))
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .takes_value(true)
                .help("Local repository."),
        )
        .arg(
            Arg::with_name("all")
                .short("a")
                .long("all")
                .help("Answer 'y' to all questions")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("branch")
                .help("Branch to revert to.")
                .long("branch")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("prefix")
                .help("Prefix to start from")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("patience")
                .long("patience")
                .help("Use patience diff instead of the default (Myers diff)")
                .conflicts_with("myers")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("myers")
                .long("myers")
                .help("Use Myers diff")
                .conflicts_with("patience")
                .takes_value(false),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let yes_to_all = args.is_present("all");
    let branch_name = opts.branch();
    let prefix = record::prefix(args, &opts)?;
    // Generate the pending patch.
    let (pending, pending_syncs):(_, HashSet<_>) =
        if !yes_to_all || prefix.is_some() {
        if !yes_to_all || prefix.is_some() {
            let repo = opts.open_and_grow_repo(409600)?;
            let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
            let (changes, syncs) = {
                let (changes, syncs) = record::changes_from_prefixes(
                    &opts.repo_root, &mut txn, &branch_name,
                    prefix.as_ref()
    let (pending, pending_syncs): (_, HashSet<_>) = if !yes_to_all || prefix.is_some() {
        let repo = opts.open_and_grow_repo(409600)?;
        let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
        let (changes, syncs) = {
            let (changes, syncs) = record::changes_from_prefixes(
                if args.is_present("patience") {
                    libpijul::DiffAlgorithm::Patience
                } else {
                    libpijul::DiffAlgorithm::Myers
                },
                &opts.repo_root,
                &mut txn,
                &branch_name,
                prefix.as_ref(),
            )?;
            let changes: Vec<_> = changes
                .into_iter()
                .map(|x| txn.globalize_record(x))
                .collect();
            if yes_to_all {
                (Vec::new(), HashSet::new())
            } else {
                let (c, _empty_vec) = ask_changes(
                    &txn,
                    &opts.repo_root,
                    &opts.cwd,
                    &changes,
                    ChangesDirection::Revert,
                    &mut HashSet::new(),
                )?;
                let changes:Vec<_> = changes.into_iter().map(|x| txn.globalize_record(x)).collect();
                if yes_to_all {
                    (Vec::new(), HashSet::new())
                } else {
                } else {
                    let (c, _empty_vec) = ask_changes(&txn, &opts.repo_root, &opts.cwd, &changes,
                                                      ChangesDirection::Revert, &mut HashSet::new())?;
                    let selected = changes.into_iter()
                    let selected = changes.into_iter()
                        .enumerate()
                        .filter(|&(i, _)| *(c.get(&i).unwrap_or(&false)))
                        .map(|(_, x)| x)
                        .collect();
                    (selected, syncs)
                }
            };
            debug!("changes {:?}", changes);
            debug!("syncs {:?}", syncs);
            let branch = txn.get_branch(&branch_name).unwrap();
            let changes = changes.into_iter().flat_map(|x| x.into_iter()).collect();
            let patch = txn.new_patch(&branch,
                                      Vec::new(),
                                      String::new(),
                                      None,
                                      chrono::Utc::now(),
                                      changes,
                                      std::iter::empty(),
                                      PatchFlags::empty());
            txn.commit()?;
            (patch, syncs)
        } else {
            (UnsignedPatch::empty().leave_unsigned(), HashSet::new())
                let selected = changes
                    .into_iter()
                    .enumerate()
                    .filter(|&(i, _)| *(c.get(&i).unwrap_or(&false)))
                    .map(|(_, x)| x)
                    .collect();
                (selected, syncs)
            }
        };
        debug!("changes {:?}", changes);
        debug!("syncs {:?}", syncs);
        let branch = txn.get_branch(&branch_name).unwrap();
        let changes = changes.into_iter().flat_map(|x| x.into_iter()).collect();
        let patch = txn.new_patch(
            &branch,
            Vec::new(),
            String::new(),
            None,
            chrono::Utc::now(),
            changes,
            std::iter::empty(),
            PatchFlags::empty(),
        );
        txn.commit()?;
        (patch, syncs)
    } else {
        (UnsignedPatch::empty().leave_unsigned(), HashSet::new())
    };

    let mut size_increase = None;
    let pristine = opts.pristine_dir();
    loop {
        match output_repository(&opts.repo_root, &pristine, &branch_name, size_increase, prefix.as_ref(), &pending, &pending_syncs) {
        match output_repository(
            &opts.repo_root,
            &pristine,
            &branch_name,
            size_increase,
            prefix.as_ref(),
            &pending,
            &pending_syncs,
        ) {
            Err(ref e) if e.lacks_space() => {
                size_increase = Some(Repository::repository_size(&pristine).unwrap())
            },
            e => return e
            }
            e => return e,
        }
    }
}

fn output_repository(r: &Path, pristine_dir: &Path, branch: &str, size_increase: Option<u64>, prefixes: Option<&HashSet<PathBuf>>, pending: &Patch, pending_syncs: &HashSet<InodeUpdate>) -> Result<()> {
fn output_repository(
    r: &RepoRoot<impl AsRef<Path>>,
    pristine_dir: &Path,
    branch: &str,
    size_increase: Option<u64>,
    prefixes: Option<&Vec<RepoPath<PathBuf>>>,
    pending: &Patch,
    pending_syncs: &HashSet<InodeUpdate>,
) -> Result<(), Error> {
    let repo = Repository::open(&pristine_dir, size_increase)?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    txn.output_repository(&branch, &r, prefixes, pending, pending_syncs)?;

    let mut inode_prefixes = Vec::new();
    if let Some(prefixes) = prefixes {
        for pref in prefixes.iter() {
            inode_prefixes.push(txn.find_inode(pref).unwrap());
        }
    }
    for (_, key) in txn
        .iter_partials(branch)
        .take_while(|&(k, _)| k.as_str() == branch)
    {
        debug!("extra inode prefixes: {:?}", key);
        inode_prefixes.push(txn.get_revinodes(key).unwrap())
    }

    let mut branch = txn.open_branch(branch)?;
    let pref = (&inode_prefixes as &[Inode]).to_prefixes(&txn, &branch);
    debug!("{:?}", pref);
    txn.output_repository(&mut branch, &r, &pref, pending, pending_syncs)?;
    txn.commit_branch(branch)?;
    txn.commit()?;
    Ok(())
}


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
        match apply_resize(&opts.repo_root, &opts.branch(), iter::once(&hash), None, |_, _| ()) {
use clap::{Arg, ArgMatches, SubCommand};

use super::{ask, default_explain, validate_base58, BasicOptions, StaticSubcommand};
use meta::{load_signing_key, Global, Meta};
use std::collections::HashSet;
use std::path::Path;

use chrono;
use libpijul::fs_representation::{patch_file_name, RepoPath, RepoRoot};
use libpijul::patch::{Patch, PatchFlags};
use libpijul::{apply_resize, apply_resize_no_output, Hash, HashRef, PatchId};
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::iter;
use std::mem::drop;
use std::str::FromStr;

use commands::record::{decide_authors, decide_patch_message, record_args};
use error::Error;

pub fn invocation() -> StaticSubcommand {
    record_args(
        SubCommand::with_name("rollback").arg(
            Arg::with_name("patch")
                .help("Patch to roll back.")
                .takes_value(true)
                .multiple(true)
                .validator(validate_base58),
        ),
    )
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let patches: Option<HashSet<Hash>> = args
        .values_of("patch")
        .map(|ps| ps.map(|x| Hash::from_base58(x).unwrap()).collect());

    let mut increase = 409600;
    let repo = opts.open_and_grow_repo(increase)?;
    let branch_name = opts.branch();

    let mut patches: HashMap<_, _> = if let Some(ref patches) = patches {
        let txn = repo.txn_begin()?;
        if let Some(branch) = txn.get_branch(&branch_name) {
            let mut patches_ = HashMap::new();
            for h in patches.iter() {
                debug!("unrecording {:?}", h);

                if let Some(internal) = txn.get_internal(h.as_ref()) {
                    if txn.get_patch(&branch.patches, internal).is_some() {
                        let patch = load_patch(&opts.repo_root, h.as_ref());
                        patches_.insert(h.to_owned(), patch);
                        continue;
                    }
                }
                return Err(Error::BranchDoesNotHavePatch {
                    branch_name: branch.name.as_str().to_string(),
                    patch: h.to_owned(),
                });
            }
            patches_
        } else {
            HashMap::new()
        }
    } else {
        let mut patches: Vec<_> = {
            let txn = repo.txn_begin()?;
            if let Some(branch) = txn.get_branch(&branch_name) {
                txn.rev_iter_applied(&branch, None)
                    .map(|(t, h)| {
                        let ext = txn.get_external(h).unwrap();
                        let patch = load_patch(&opts.repo_root, ext);
                        (ext.to_owned(), Some(h.to_owned()), patch, t)
                    })
                    .collect()
            } else {
                Vec::new()
            }
        };
        patches.sort_by(|&(_, _, _, a), &(_, _, _, b)| b.cmp(&a));
        let patches: Vec<(Hash, Option<PatchId>, Patch)> =
            patches.into_iter().map(|(a, b, c, _)| (a, b, c)).collect();
        // debug!("patches: {:?}", patches);
        let to_unrecord = ask::ask_patches(ask::Command::Unrecord, &patches).unwrap();
        debug!("to_unrecord: {:?}", to_unrecord);
        let patches: HashMap<_, _> = patches
            .into_iter()
            .filter(|&(ref k, _, _)| to_unrecord.contains(&k))
            .map(|(k, _, p)| (k, p))
            .collect();
        patches
    };

    let mut selected = Vec::new();
    loop {
        let hash = if let Some((hash, patch)) = patches.iter().next() {
            increase += patch.size_upper_bound() as u64;
            hash.to_owned()
        } else {
            break;
        };
        deps_dfs(&mut selected, &mut patches, &hash)
    }

    // Create the inverse changes.
    let mut changes = Vec::new();
    for &(ref hash, ref patch) in selected.iter() {
        debug!("inverting {:?}", patch);
        patch.inverse(hash, &mut changes)
    }

    let meta = Meta::load(&opts.repo_root).unwrap_or_else(|_| Meta::new());
    let (global, save_global) = Global::load()
        .map(|g| (g, false))
        .unwrap_or_else(|_| (Global::new(), true));

    if save_global {
        global.save().unwrap_or(())
    }

    // Create the inverse patch, and save it.
    let patch = {
        let authors_arg = args.values_of("author").map(|x| x.collect::<Vec<_>>());
        let patch_name_arg = args.value_of("message");
        let patch_descr_arg = args.value_of("description");

        let txn = repo.txn_begin()?;
        let authors = decide_authors(authors_arg, &meta, &global)?;

        let patch_date = args.value_of("date").map_or(Ok(chrono::Utc::now()), |x| {
            chrono::DateTime::from_str(x).map_err(|_| Error::InvalidDate {
                date: String::from(x),
            })
        })?;

        let (name, description) = decide_patch_message(
            patch_name_arg,
            patch_descr_arg,
            String::from(""),
            !args.is_present("no-editor"),
            &opts.repo_root,
            &meta,
            &global,
        )?;

        if let Some(branch) = txn.get_branch(&branch_name) {
            txn.new_patch(
                &branch,
                authors,
                name,
                description,
                patch_date,
                changes,
                iter::empty(),
                PatchFlags::empty(),
            )
        } else {
            unimplemented!()
        }
    };
    let patches_dir = opts.repo_root.patches_dir();
    let mut key = meta
        .signing_key
        .or(global.signing_key)
        .and_then(|s| load_signing_key(s).ok());
    let hash = if let Some(ref mut key) = key {
        key.check_author(&patch.header().authors)?;
        patch.save(&patches_dir, key.keys.get_mut(0))?
    } else {
        patch.save(&patches_dir, None)?
    };
    drop(repo);
    println!("Recorded patch {}", hash.to_base58());

    let is_current_branch = if let Ok(br) = opts.repo_root.get_current_branch() {
        br == opts.branch()
    } else {
        false
    };

    // Apply the inverse patch.
    loop {
        match apply_resize(&opts.repo_root, &opts.branch(), iter::once(&hash),|_, _| ()) {
        let app = if !is_current_branch {
            apply_resize_no_output(
                &opts.repo_root,
                &opts.branch(),
                iter::once(&hash),
                |_, _| (),
            )
            .map(|_| Vec::new())
        } else {
            apply_resize(
                libpijul::DiffAlgorithm::default(),
                &opts.repo_root,
                &opts.branch(),
                iter::once(&hash),
                &[] as &[RepoPath<&Path>],
                |_, _| (),
            )
        };
        match app {
            Err(ref e) if e.lacks_space() => {}




































1
2
3


6


9

11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35








36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79








80
81
82
83
84
85
86
87
88
89
90
91
92
93

94
95
96
97
98
99
100

101
102
103
104
105
106
107
108
109
110
111
112






113
114
115
116
117
118
119
120

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

use clap::{SubCommand, ArgMatches, Arg};
use commands::{BasicOptions, StaticSubcommand, default_explain};
use libpijul::fs_representation::{untracked_files};
use libpijul::fs_representation::untracked_files;
        )
        .arg(
        let conflicts = txn.list_conflict_files(
            &current_branch,
            &[]
        )?;
            conflicts,
    repo_root: &PathBuf,
    cwd: &Path,
    changed: Vec<(Rc<PathBuf>, ChangeType)>,
    untracked: HashSet<PathBuf>,
    conflicts: Vec<PathBuf>,
            println!(
                "        {}",
                relativize(&cwd, &repo_root.as_path().join(f.as_path())).display()
            );
            println!(
                "        {:10} {}",
                t.long(),
                relativize(&cwd, f.as_path()).display()
            );
    repo_root: &PathBuf,
    changed: Vec<(Rc<PathBuf>, ChangeType)>,
    untracked: HashSet<PathBuf>,
    conflicts: Vec<PathBuf>,
            relativize(&cwd, &repo_root.as_path().join(f.as_path())).display()
    Move(Rc<PathBuf>),
fn unrecorded_changes<T: rand::Rng>(
    txn: &mut MutTxn<T>,
    repo_root: &PathBuf,
    branch: &String,
) -> Result<Vec<(Rc<PathBuf>, ChangeType)>, Error> {
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use libpijul::fs_representation::RepoRoot;

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
use libpijul::fs_representation::{RepoPath, RepoRoot};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
use libpijul::status::{unrecorded_changes, ChangeType};
use rand;

use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::rc::Rc;

const UNRECORDED_FILES: &'static str = r#"
Changes not yet recorded:
  (use "pijul record ..." to record a new patch)
"#;

const UNTRACKED_FILES: &'static str = r#"
Untracked files:
  (use "pijul add <file>..." to track them)
"#;

const CONFLICTED_FILES: &'static str = r#"
Unresolved conflicts:
  (fix conflicts and record the resolution with "pijul record ...")
"#;

pub fn invocation() -> StaticSubcommand {
    SubCommand::with_name("status")
        .about("Show working tree status")
        .arg(Arg::with_name("repository")
             .long("repository")
             .takes_value(true)
             .help("Local repository."))
        .arg(Arg::with_name("short")
             .long("short")
             .short("s")
             .help("Output in short format"))
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .takes_value(true)
                .help("Local repository."),
        )
        .arg(
            Arg::with_name("short")
                .long("short")
                .short("s")
                .help("Output in short format"),
        )
}

pub fn explain(r: Result<(), Error>) {
    default_explain(r)
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let current_branch = opts.branch();
    let repo = opts.open_and_grow_repo(409600)?;
    let short = args.is_present("short");

    let (unrecorded, untracked, conflicts) = {
        let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
        let unrecorded = unrecorded_changes(&mut txn, &opts.repo_root, &current_branch)?;

        let untracked = opts
            .repo_root
            .untracked_files(&txn, &opts.repo_root.repo_root)
            .collect();
        let conflicts = txn.list_conflict_files(&current_branch, &[])?;
        (unrecorded, untracked, conflicts)
    };

    if short {
        print_shortstatus(&opts.cwd, &opts.repo_root, unrecorded, untracked, conflicts);
    } else {
        print_longstatus(&current_branch, unrecorded, untracked, conflicts);
    }
    Ok(())
}

fn print_longstatus(branch: &str,
                    repo_root: &PathBuf,
                    repo_root: &PathBuf,
                    cwd: &Path,
                    changed: Vec<(Rc<PathBuf>, ChangeType)>,
                    untracked: HashSet<PathBuf>,
                    conflicts: Vec<PathBuf>) {
                    conflicts: Vec<PathBuf>) {
fn print_longstatus(
    branch: &str,
    changed: Vec<(Rc<RepoPath<PathBuf>>, ChangeType)>,
    untracked: HashSet<RepoPath<PathBuf>>,
    conflicts: Vec<RepoPath<PathBuf>>,
) {
    println!("On branch {}", branch);
    if changed.is_empty() && untracked.is_empty() && conflicts.is_empty() {
        println!("Nothing to record, working tree clean");
    }

    if !conflicts.is_empty() {
        println!("{}", CONFLICTED_FILES);
        for f in conflicts {
            println!("        {}", relativize(&cwd, &repo_root.as_path().join(f.as_path())).display());
            println!("        {}", f.display());
        }
    }

    if !changed.is_empty() {
        println!("{}", UNRECORDED_FILES);
        for (f, t) in changed {
            println!("        {:10} {}", t.long(), relativize(&cwd, f.as_path()).display());
            println!("        {:10} {}", t.long(), f.display());
        }
    }

    if !untracked.is_empty() {
        println!("{}", UNTRACKED_FILES);
        for f in untracked {
            println!("        {}", f.display());
        }
    }
}

fn print_shortstatus(cwd: &Path,
                     repo_root: &PathBuf,
                     changed: Vec<(Rc<PathBuf>, ChangeType)>,
                     untracked: HashSet<PathBuf>,
                     conflicts: Vec<PathBuf>) {
                     conflicts: Vec<PathBuf>) {
fn print_shortstatus(
    cwd: &Path,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    changed: Vec<(Rc<RepoPath<PathBuf>>, ChangeType)>,
    untracked: HashSet<RepoPath<PathBuf>>,
    conflicts: Vec<RepoPath<PathBuf>>,
) {
    for f in conflicts {
        println!("C {}", relativize(&cwd, &repo_root.as_path().join(f.as_path())).display());
        debug!("{:?} {:?}", repo_root.repo_root.as_ref(), f.as_path());
        println!(
            "C {}",
            pathdiff::diff_paths(&repo_root.repo_root.as_ref().join(f.as_path()), &cwd)
                .unwrap()
                .display()
        );
    }
    for (f, t) in changed {
        debug!("{:?} {:?}", repo_root.repo_root.as_ref(), f.as_path());
        println!(
            "{} {}",
            t.short(),
            pathdiff::diff_paths(&repo_root.repo_root.as_ref().join(f.as_path()), &cwd)
                .unwrap()
                .display()
        )
    }
    for f in untracked {
        debug!("{:?} {:?}", f.as_path(), cwd);
        println!(
            "? {}",
            pathdiff::diff_paths(&repo_root.repo_root.as_ref().join(f.as_path()), &cwd)
                .unwrap()
                .display()
        )
    }
}















1
use {hex, libpijul, regex, reqwest, term, thrussh, thrussh_keys, toml, bs58};
use {hex, libpijul, regex, reqwest, term, thrussh, thrussh_keys, toml, bs58};
        PartialPullOverHttp {
            description("Partial pull over HTTP is not (yet) supported.")
                display("Partial pull over HTTP is not (yet) supported.")

        }
        PartialPullOverHttp {
            description("Partial pull over HTTP is not (yet) supported.")
                display("Partial pull over HTTP is not (yet) supported.")

        }
            _ => false,
            thrussh::HandlerError::Error(e) => ErrorKind::SSH(e).into(),
use std;

    RUST_LOG="libpijul::output=debug" pijul_uncovered revert -a 2> /tmp/log

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
    RUST_LOG="libpijul=debug" pijul pull -a ../origin --path a/b/z 2> /tmp/out
#!/usr/bin/env bats

load ../test_helper

@test "partial checkout" {
    mkdir origin
    cd origin
    pijul init
    mkdir -p a/b a/c d
    make_random_file a/c/y
    make_random_file d/z

    pijul add a/b
    pijul record -a -m abx -A me

    pijul add a/c/y d/z
    pijul record -a -m acydz -A me

    pijul mv d/z a/b
    pijul record -a -m "dz->ab" -A me

    cd ..
    mkdir clone
    cd clone

    pijul init
    pijul pull -a ../origin --path a/b/z

    if [[ "$(ls | wc -l)" -ne "1" ]]; then
        return 1
    fi

    pijul revert -a
    if [[ "$(ls | wc -l)" -ne "1" ]]; then
        return 1
    fi

    pijul revert -a
    if [[ "$(ls | wc -l)" -ne "1" ]]; then
        return 1
    fi
}

@test "partial clone" {
  mkdir test
  cd test
  pijul init
  mkdir a b
  echo test > a/test
  echo foo > b/foo
  pijul record -A "me" -n -a -m "patch"
  cd ..
  pijul clone test test-partial --path a/

  [ "$(ls test-partial)" = "a" ]
}
16
17

18
19
me
EOF
    pijul revert -a --repository subdir
    RUST_LOG="libpijul=debug" pijul revert -a --repository subdir 2> /tmp/log
    test -e subdir/file.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

20
21
    RUST_BACKTRACE=1 RUST_LOG="pijul=debug,libpijul::output=debug,libpijul::graph=debug" pijul revert -a baz.rs 2> /tmp/log
#!/usr/bin/env bats

load ../test_helper

# Try to revert a file scheduled to be moved
@test "revert move with a prefix" {
    make_repo toto
    cd toto
    echo 'fn main() { println!("Hello"); }' > foo.rs
    echo 'fn main() { println!("World"); }' > bar.rs
    pijul add foo.rs
    pijul add bar.rs

    pijul record -am "a" -A myself
    echo "a" >> bar.rs
    cp bar.rs bar.rs_backup

    pijul mv foo.rs baz.rs

    RUST_BACKTRACE=1 RUST_LOG="pijul=debug,libpijul::output=debug" pijul revert -a baz.rs 2> /tmp/log
    RUST_BACKTRACE=1 RUST_LOG="pijul=debug,libpijul::output=debug" pijul revert -a baz.rs 2> /tmp/log