pijul_org / pijul

chore: Apply rustfmt once and for all

By lthms on May 12, 2018
This patch is not signed.
A6gUw8uCpQG113c48hkXGYDX3xiXUe2gbv3SH6Snfh2f7JQbTTuZLyc1PehkbuZLyvi6ZSzZuEU8Zzjkg218B6S1
This patch is in the following branches:
latest
master
testing














1

2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

17





18
19
20
21
22
23
24
25
26
27
28
29
30
31

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

78
79
80
81
82
83
84
85
86
87


88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

116
117

118
119
120
121
122
123

124
125
126
127
128
129
130
131
132







133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201

202

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219

220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264

use Result;
                .take_while(|&(k, v)| {
                    k == to && v.flag <= EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE
                .filter_map(|(_, v)| {
                    if self.is_alive_or_zombie(branch, v.dest) {
                        Some(v.dest)
                    } else {
                        None
                    }
                }),
                .take_while(|&(k, v)| {
                    k == to && v.flag <= EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE
                })
                .map(|(_, e)| *e),
use backend::*;
use Result;
use patch::*;
use rand;
use std::collections::HashSet;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Applies a patch to a repository. "new_patches" are patches that
    /// just this repository has, and the remote repository doesn't have.
    pub fn apply(
        &mut self,
        branch: &mut Branch,
        patch: &Patch,
        patch_id: PatchId,
        timestamp: ApplyTimestamp,
    ) -> Result<()> {

        assert!(self.put_patches(&mut branch.patches, patch_id, timestamp)?);
        assert!(self.put_revpatches(
            &mut branch.revpatches,
            timestamp,
            patch_id,
        )?);
        assert!(self.put_revpatches(&mut branch.revpatches, timestamp, patch_id)?);
        debug!("apply_raw");
        // Here we need to first apply *all* the NewNodes, and then
        // the Edges, because some of the NewNodes might be the
        // children of newly deleted edges, and we need to add the
        // corresponding pseudo-edges.
        for ch in patch.changes().iter() {
            if let Change::NewNodes {
                ref up_context,
                ref down_context,
                ref line_num,
                flag,
                ref nodes,
                ..
            } = *ch {
            } = *ch
            {
                assert!(!nodes.is_empty());
                debug!("apply: newnodes");
                self.add_new_nodes(
                    branch,
                    patch_id,
                    up_context,
                    down_context,
                    line_num,
                    flag,
                    nodes,
                )?;
            }
        }
        let mut parents: HashSet<Key<PatchId>> = HashSet::new();
        let mut children: HashSet<Edge> = HashSet::new();
        for ch in patch.changes().iter() {
            if let Change::NewEdges { previous, flag, ref edges, .. } = *ch {
            if let Change::NewEdges {
                previous,
                flag,
                ref edges,
                ..
            } = *ch
            {
                self.add_new_edges(
                    branch,
                    patch_id,
                    Some(previous),
                    flag,
                    edges,
                    &mut parents,
                    &mut children,
                )?;
                debug!("apply_raw:edges.done");
            }
        }

        // If there is a missing context, add pseudo-edges along the
        // edges that deleted the conflict, until finding (in both
        // directions) an alive context.
        self.repair_deleted_contexts(branch, patch, patch_id)?;

        Ok(())
    }


    /// Delete old versions of `edges`.
    fn delete_old_edge(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        previous: EdgeFlags,
        from: Key<PatchId>,
        to: Key<PatchId>,
        introduced_by: PatchId,
    ) -> Result<()> {

        debug!("delete {:?} -> {:?} ({:?}) {:?}", from, to, previous, introduced_by);
        debug!(
            "delete {:?} -> {:?} ({:?}) {:?}",
            from, to, previous, introduced_by
        );
        // debug!("delete_old_edges: introduced_by = {:?}", e.introduced_by);
        let mut deleted_e = Edge {
            flag: previous,
            dest: to,
            introduced_by,
        };
        self.put_cemetery(from, deleted_e, patch_id)?;
        if !self.del_edge_both_dirs(branch, from, deleted_e)? {
            debug!("killing pseudo instead {:?} {:?}", from, deleted_e);
            deleted_e.flag |= EdgeFlags::PSEUDO_EDGE;
            let result = self.del_edge_both_dirs(branch, from, deleted_e)?;
            debug!("killed ? {:?}", result);
        }
        Ok(())
    }

    fn add_new_edges(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        previous: Option<EdgeFlags>,
        flag: EdgeFlags,
        edges: &[NewEdge],
        parents: &mut HashSet<Key<PatchId>>,
        children: &mut HashSet<Edge>
        children: &mut HashSet<Edge>,
    ) -> Result<()> {

        for e in edges {
            debug!("add_new_edges {:?}", e);
            // If the edge has not been forgotten about,
            // insert the new version.
            let e_from = self.internal_key(&e.from, patch_id);
            let e_to = self.internal_key(&e.to, patch_id);
            let to = if flag.contains(EdgeFlags::PARENT_EDGE) { e_from } else { e_to };
            assert!(e_from != e_to);
            let to = if flag.contains(EdgeFlags::PARENT_EDGE) {
                e_from
            } else {
                e_to
            };

            // If this is a deletion edge and not a folder edge, reconnect parents and children.
            if flag.contains(EdgeFlags::DELETED_EDGE) && !flag.contains(EdgeFlags::FOLDER_EDGE) {
                self.reconnect_parents_children(
                    branch,
                    patch_id,
                    to,
                    parents,
                    children,
                )?;
                self.reconnect_parents_children(branch, patch_id, to, parents, children)?;
            }

            let introduced_by = self.internal_hash(&e.introduced_by, patch_id);

            if let Some(previous) = previous {
                self.delete_old_edge(branch, patch_id, previous, e_from, e_to, introduced_by)?
            }

            if flag.contains(EdgeFlags::DELETED_EDGE) && !flag.contains(EdgeFlags::FOLDER_EDGE) {
                self.delete_old_pseudo_edges(branch, patch_id, to, children)?
            }

            // Let's build the edge we're about to insert.
            let e = Edge {
                flag,
                dest: e_to,
                introduced_by: patch_id.clone(),
            };

            // Finally, add the new version of the edge.
            self.put_edge_both_dirs(branch, e_from, e)?;
        }
        Ok(())
    }

    /// Add pseudo edges from all keys of `parents` to all `dest` of
    /// the edges in `children`, with the same edge flags as in
    /// `children`, plus `PSEUDO_EDGE`.
    pub fn reconnect_parents_children(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        to: Key<PatchId>,
        parents: &mut HashSet<Key<PatchId>>,
        children: &mut HashSet<Edge>,
    ) -> Result<()> {

        // Collect all the alive parents of the source of this edge.
        parents.clear();
        parents.extend(
            self.iter_adjacent(
                &branch,
                to,
                EdgeFlags::PARENT_EDGE,
                EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE,
            )
            .filter_map(|v| {
                if !v.flag.contains(EdgeFlags::EPSILON_EDGE)
                    && self.is_alive_or_zombie(branch, v.dest)
                {
                    Some(v.dest)
                } else {
                    None
                }
            }),
        );

        // Now collect all the alive children of the target of this edge.
        children.clear();
        children.extend(self.iter_adjacent(
            &branch,
            to,
            EdgeFlags::empty(),
            EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE,
        ));

        debug!("reconnecting {:?} {:?}", parents, children);

        for &parent in parents.iter() {

            for e in children.iter() {

                // If these are not already connected
                // or pseudo-connected, add a
                // pseudo-edge.
                if parent != e.dest && !self.is_connected(branch, parent, e.dest) {
                    let pseudo_edge = Edge {
                        flag: e.flag | EdgeFlags::PSEUDO_EDGE,
                        dest: e.dest,
                        introduced_by: patch_id.clone(),
                    };
                    debug!("reconnect_parents_children: {:?} {:?}", parent, pseudo_edge);
                    self.put_edge_both_dirs(branch, parent, pseudo_edge)?;
                }
            }
        }
        Ok(())
    }

    fn delete_old_pseudo_edges(&mut self, branch: &mut Branch, patch_id: PatchId, to: Key<PatchId>, pseudo_edges: &mut HashSet<Edge>) -> Result<()> {
    fn delete_old_pseudo_edges(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        to: Key<PatchId>,
        pseudo_edges: &mut HashSet<Edge>,
    ) -> Result<()> {
        // Now collect pseudo edges, and delete them.
        pseudo_edges.clear();
        for to_edge in self
            .iter_adjacent(branch, to, EdgeFlags::empty(), EdgeFlags::DELETED_EDGE)
            .filter(|v| v.flag.contains(EdgeFlags::PSEUDO_EDGE))
        {
            // Is this pseudo-edge a zombie marker? I.e. is there a
            // deleted edge in parallel of it? Since we haven't yet
            // introduced the new deleted edge, there is no possible
            // risk of confusion here.
            let mut e = Edge::zero(EdgeFlags::DELETED_EDGE|(to_edge.flag & (EdgeFlags::PARENT_EDGE|EdgeFlags::FOLDER_EDGE)));
            let mut e = Edge::zero(
                EdgeFlags::DELETED_EDGE
                    | (to_edge.flag & (EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE)),
            );
            e.dest = to_edge.dest;
            let mut is_zombie_marker = to_edge.introduced_by != patch_id
                && (match self.iter_nodes(branch, Some((to, Some(e)))).next() {
                    Some((k, v)) if k == to && v.dest == e.dest && v.flag == e.flag => {
                        v.introduced_by != patch_id
                    }
                    _ => false,
                });
            debug!(
                "is_zombie_marker {:?}: {:?}",
                to_edge.dest, is_zombie_marker
            );
            if !is_zombie_marker {
                // This edge is not a zombie marker, we can delete it.
                pseudo_edges.insert(to_edge);
            }
        }
        debug!("killing pseudo-edges from {:?}: {:?}", to, pseudo_edges);
        for edge in pseudo_edges.drain() {
            // Delete both directions.
            self.del_edge_both_dirs(branch, to, edge)?;
        }
        Ok(())
    }















1
2
3
4
5
6
7
8
9
10
11
12
13

14
15
16
17
18
19
20
21
22
23
24
25
26
27

28
29
30
31
32
33
34
35
36
37
38
39
40
41






42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105




106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140







141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180









181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202









203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
                let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE);
                    .take_while(|&(k, _)| k == a)
                {
                    let e = Edge::zero(
                        EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE | EdgeFlags::FOLDER_EDGE,
                    );
                        .take_while(|&(k, _)| k == a)
                    {
                        files.push((a, *v));
                    .take_while(|&(k, v)| {
                        k == current
                            && v.flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
                                == flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
                    }) {
use backend::*;
use std::collections::HashSet;

#[derive(Debug)]
pub struct FindAlive {
    stack: Vec<Key<PatchId>>,
    visited: HashSet<Key<PatchId>>,
}

impl FindAlive {
    pub fn new() -> Self {
        FindAlive {
            stack: Vec::new(),
            visited: HashSet::new()
            visited: HashSet::new(),
        }
    }
    pub fn clear(&mut self) {
        self.stack.clear();
        self.visited.clear();
    }
    pub fn push(&mut self, k: Key<PatchId>) {
        self.stack.push(k)
    }
    pub fn pop(&mut self) -> Option<Key<PatchId>> {
        while let Some(p) = self.stack.pop() {
            if !self.visited.contains(&p) {
                self.visited.insert(p.clone());
                return Some(p)
                return Some(p);
            }
        }
        None
    }
}

impl<U: Transaction, R> GenericTxn<U, R> {
    /// Recursively find all ancestors by doing a DFS, and collect all
    /// edges until finding an alive ancestor.
    ///
    /// Returns whether or not at least one traversed vertex was dead
    /// (or otherwise said, returns `false` if and only if there all
    /// vertices in `find_alive` are alive).
    pub fn find_alive_ancestors(&self,
                                find_alive: &mut FindAlive,
                                branch: &Branch,
                                alive: &mut Vec<Key<PatchId>>,
                                file_ancestor: &mut Option<Key<PatchId>>,
                                files: &mut Vec<(Key<PatchId>, Edge)>) -> bool {
    pub fn find_alive_ancestors(
        &self,
        find_alive: &mut FindAlive,
        branch: &Branch,
        alive: &mut Vec<Key<PatchId>>,
        file_ancestor: &mut Option<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
    ) -> bool {
        let mut first_is_alive = false;
        let mut file = None;
        while let Some(a) = find_alive.pop() {
            if self.is_alive(branch, a) {
                // This node is alive.
                alive.push(a);
            } else {
                first_is_alive = true;
                let e = Edge::zero(EdgeFlags::PARENT_EDGE|EdgeFlags::DELETED_EDGE);
                for v in self.iter_adjacent(
                    &branch,
                    a,
                    EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    EdgeFlags::all(),
                ) {
                    debug!("find_alive_ancestors: {:?}", v);
                    if v.flag.contains(EdgeFlags::FOLDER_EDGE) {
                        // deleted file.
                        file = Some(a);
                        *file_ancestor = Some(a)
                    } else {
                        find_alive.push(v.dest)
                    }
                }
            }
        }
        debug!("file {:?}", file);
        if let Some(file) = file {
            find_alive.clear();
            find_alive.push(file);
            while let Some(a) = find_alive.pop() {
                debug!("file {:?}", a);
                if !self.is_alive(branch, a) {
                    debug!("not alive");
                    first_is_alive = true;
                    let e = Edge::zero(EdgeFlags::PARENT_EDGE|EdgeFlags::DELETED_EDGE|EdgeFlags::FOLDER_EDGE);
                    let flag =
                        EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE | EdgeFlags::FOLDER_EDGE;
                    for v in self.iter_adjacent(&branch, a, flag, EdgeFlags::all()) {
                        debug!("file find_alive_ancestors: {:?}", v);
                        // deleted file, collect.
                        files.push((a, v));
                        find_alive.push(v.dest)
                    }
                }
            }
        }

        first_is_alive
    }

    /// Recursively find all descendants by doing a DFS on deleted
    /// edges (including folder edges), and collect all edges until
    /// finding an alive or zombie descendant.
    ///
    /// Returns whether or not at least one traversed vertex was dead
    /// (or otherwise said, returns `false` if and only if there all
    /// vertices in `find_alive` are alive).
    pub fn find_alive_descendants(&self,
                                  find_alive: &mut FindAlive,
                                  branch: &Branch,
                                  alive: &mut Vec<Key<PatchId>>) -> bool {
    pub fn find_alive_descendants(
        &self,
        find_alive: &mut FindAlive,
        branch: &Branch,
        alive: &mut Vec<Key<PatchId>>,
    ) -> bool {
        let mut first_is_alive = false;
        debug!("begin find_alive_descendants");
        while let Some(a) = find_alive.pop() {
            debug!("find_alive_descendants, a = {:?}", a);
            if self.is_alive(branch, a) {
                debug!("alive: {:?}", a);
                alive.push(a);
            } else {
                // Else, we need to explore its deleted descendants.
                first_is_alive = true;
                for v in self
                    .iter_adjacent(&branch, a, EdgeFlags::empty(), EdgeFlags::all())
                    .filter(|v| !v.flag.contains(EdgeFlags::PARENT_EDGE))
                {
                    debug!("v = {:?}", v);
                    if v.flag.contains(EdgeFlags::DELETED_EDGE) {
                        debug!("find_alive_descendants: {:?}", v);
                        find_alive.push(v.dest)
                    } else {
                        debug!("alive in for: {:?}", v.dest);
                        alive.push(v.dest)
                    }
                }
            }
        }
        debug!("end find_alive_descendants");
        first_is_alive
    }

    fn find_alive(&self,
                  branch: &Branch,
                  find_alive: &mut FindAlive,
                  alive: &mut HashSet<Key<PatchId>>,
                  file: &mut Option<Key<PatchId>>,
                  current: Key<PatchId>,
                  flag: EdgeFlags) {
    fn find_alive(
        &self,
        branch: &Branch,
        find_alive: &mut FindAlive,
        alive: &mut HashSet<Key<PatchId>>,
        file: &mut Option<Key<PatchId>>,
        current: Key<PatchId>,
        flag: EdgeFlags,
    ) {
        find_alive.clear();
        debug!("find_alive: {:?}", current);
        find_alive.push(current);
        while let Some(current) = find_alive.pop() {
            debug!("find_alive, current = {:?}", current);
            if self.is_alive(branch, current) {
                alive.insert(current.clone());
            } else {
                let flagmax = flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE;
                for e in self
                    .iter_adjacent(branch, current, flag, flagmax)
                    .take_while(|e| {
                        e.flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE == flagmax
                    })
                {
                    debug!("e = {:?}", e);
                    // e might be FOLDER_EDGE here.
                    if e.flag.contains(EdgeFlags::FOLDER_EDGE) && file.is_none() {
                        *file = Some(current.clone())
                    } else {
                        find_alive.push(e.dest.clone())
                    }
                }
            }
        }
    }

    /// Find the alive descendants of `current`. `cache` is here to
    /// avoid cycles, and `alive` is an accumulator of the
    /// result. Since this search stops at files, if the file
    /// containing these lines is ever hit, it will be put in `file`.
    pub fn find_alive_nonfolder_descendants(&self,
                                            branch: &Branch,
                                            find_alive: &mut FindAlive,
                                            alive: &mut HashSet<Key<PatchId>>,
                                            file: &mut Option<Key<PatchId>>,
                                            current: Key<PatchId>) {


        self.find_alive(branch, find_alive, alive, file, current, EdgeFlags::DELETED_EDGE)
    pub fn find_alive_nonfolder_descendants(
        &self,
        branch: &Branch,
        find_alive: &mut FindAlive,
        alive: &mut HashSet<Key<PatchId>>,
        file: &mut Option<Key<PatchId>>,
        current: Key<PatchId>,
    ) {
        self.find_alive(
            branch,
            find_alive,
            alive,
            file,
            current,
            EdgeFlags::DELETED_EDGE,
        )
    }

    /// Find the alive ancestors of `current`. `cache` is here to
    /// avoid cycles, and `alive` is an accumulator of the
    /// result. Since this search stops at files, if the file
    /// containing these lines is ever hit, it will be put in `file`.
    pub fn find_alive_nonfolder_ancestors(&self,
                                          branch: &Branch,
                                          find_alive: &mut FindAlive,
                                          alive: &mut HashSet<Key<PatchId>>,
                                          file: &mut Option<Key<PatchId>>,
                                          current: Key<PatchId>) {


        self.find_alive(branch, find_alive, alive, file, current, EdgeFlags::DELETED_EDGE|EdgeFlags::PARENT_EDGE)
    pub fn find_alive_nonfolder_ancestors(
        &self,
        branch: &Branch,
        find_alive: &mut FindAlive,
        alive: &mut HashSet<Key<PatchId>>,
        file: &mut Option<Key<PatchId>>,
        current: Key<PatchId>,
    ) {
        self.find_alive(
            branch,
            find_alive,
            alive,
            file,
            current,
            EdgeFlags::DELETED_EDGE | EdgeFlags::PARENT_EDGE,
        )
    }

1

2
3

4
5
6
7
8
9
10

11


14


17

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86








87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158








159
160
use Result;
use backend::*;
use Result;
use patch::*;
use rand;
use std::path::Path;
use record::{InodeUpdate, RecordState};
use std::collections::HashSet;
use std::path::Path;
use {Error, Result};
mod apply;
pub mod find_alive;
mod repair_deleted_context;
mod apply;
use diff;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::RepoRoot;

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
use fs_representation::{RepoRoot, in_repo_root};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
use output;
use output::ConflictingFile;

impl<U: Transaction, R> GenericTxn<U, R> {
    /// Return the patch id corresponding to `e`, or `internal` if `e==None`.
    pub fn internal_hash(&self, e: &Option<Hash>, internal: PatchId) -> PatchId {
        match *e {
            Some(Hash::None) => ROOT_PATCH_ID.clone(),
            Some(ref h) => self.get_internal(h.as_ref()).unwrap().to_owned(),
            None => internal.clone(),
        }
    }

    /// Fetch the internal key for this external key (or `internal` if
    /// `key.patch` is `None`).
    pub fn internal_key(&self, key: &Key<Option<Hash>>, internal: PatchId) -> Key<PatchId> {
        // debug!("internal_key: {:?} {:?}", key, internal);
        Key {
            patch: self.internal_hash(&key.patch, internal),
            line: key.line.clone(),
        }
    }

    pub fn internal_key_unwrap(&self, key: &Key<Option<Hash>>) -> Key<PatchId> {
        Key {
            patch: self
                .get_internal(key.patch.as_ref().unwrap().as_ref())
                .unwrap()
                .to_owned(),
            line: key.line.clone(),
        }
    }
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Assumes all patches have been downloaded. The third argument
    /// `remote_patches` needs to contain at least all the patches we
    /// want to apply, and the fourth one `local_patches` at least all
    /// the patches the other repository doesn't have.
    pub fn apply_patches<F, P:output::ToPrefixes>(
    pub fn apply_patches<F, P: output::ToPrefixes>(
        &mut self,
        diff_algorithm: diff::Algorithm,
        branch: &mut Branch,
        r: &RepoRoot<impl AsRef<Path>>,
        remote_patches: &[(Hash, Patch)],
        partial_paths: P,
        mut f: F,
    ) -> Result<Vec<ConflictingFile>>
    where
        F: FnMut(usize, &Hash),
    {
        let (pending, local_pending) = {
            let mut record = RecordState::new();
            self.record(diff_algorithm, &mut record, branch, r, &in_repo_root())?;
            let (changes, local) = record.finish();
            let mut p = UnsignedPatch::empty();
            p.changes = changes
                .into_iter()
                .flat_map(|x| x.into_iter())
                .map(|x| self.globalize_change(x))
                .collect();
            p.dependencies = self.dependencies(&branch, p.changes.iter());
            (p.leave_unsigned(), local)
        };

        let mut new_patches_count = 0;
        for &(ref p, ref patch) in remote_patches.iter() {
            debug!("apply_patches: {:?}", p);
            self.apply_patches_rec(
                branch,
                remote_patches,
                remote_patches,
                p,
                patch,
                &mut new_patches_count,
            )?;
            self.apply_patches_rec(branch, remote_patches, p, patch, &mut new_patches_count)?;
            f(new_patches_count, p);
        }
        debug!("{} patches applied", new_patches_count);

        if new_patches_count > 0 {
            let partial_paths = partial_paths.to_prefixes(self, &branch);
            self.output_changes_file(&branch, r)?;
            debug!("output_repository");
            self.output_partials(branch.name.as_str(), &partial_paths)?;
            self.output_repository(branch, r, &partial_paths, &pending, &local_pending)
        } else {
            debug!("finished apply_patches");
            Ok(Vec::new())
        }
    }

    /// Lower-level applier. This function only applies patches as
    /// found in `patches_dir`, following dependencies recursively. It
    /// outputs neither the repository nor the "changes file" of the
    /// branch, necessary to exchange patches locally or over HTTP.
    pub fn apply_patches_rec(
        &mut self,
        branch: &mut Branch,
        patches: &[(Hash, Patch)],
        patch_hash: &Hash,
        patch: &Patch,
        new_patches_count: &mut usize,
    ) -> Result<()> {
        let internal = {
            if let Some(internal) = self.get_internal(patch_hash.as_ref()) {
                if self.get_patch(&branch.patches, internal).is_some() {
                    debug!(
                        "get_patch returned {:?}",
                        self.get_patch(&branch.patches, internal)
                    );
                    None
                } else {
                    // Doesn't have patch, but the patch is known in
                    // another branch
                    Some(internal.to_owned())
                }
            } else {
                // The patch is totally new to the repository.
                let internal = self.new_internal(patch_hash.as_ref());
                Some(internal)
            }
        };
        if let Some(internal) = internal {
            info!(
                "Now applying patch {:?} {:?} to branch {:?}",
                patch.name, patch_hash, branch
            );
            if patch.dependencies().is_empty() {
                info!("Patch {:?} has no dependencies", patch_hash);
            }
            for dep in patch.dependencies().iter() {
                info!("Applying dependency {:?}", dep);
                info!("dep hash {:?}", dep.to_base58());
                let is_applied = {
                    if let Some(dep_internal) = self.get_internal(dep.as_ref()) {
                        self.get_patch(&branch.patches, dep_internal).is_some()
                    } else {
                        false
                    }
                };
                if !is_applied {
                    info!("Not applied");
                    // If `patches` is sorted in topological order,
                    // this shouldn't happen, because the dependencies
                    // have been applied before.
                    if let Some(&(_, ref patch)) = patches.iter().find(|&&(ref a, _)| a == dep) {
                        self.apply_patches_rec(
                            branch,
                            branch,
                            patches,
                            &dep,
                            patch,
                            new_patches_count
                        )?;
                        self.apply_patches_rec(branch, patches, &dep, patch, new_patches_count)?;
                    } else {




1
2

3
4

5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

132
133
134
135
136
137
138
139
140
141
142

143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184



185
186
187
188
189
190
191
192
193
194
195
196
197

198
199
200
201
202
203
204
205
206

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235

236
237
238
239
240
241
242
243
244
245
246

247
248
249
250

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273

274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292

293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
use Result;
                k == context
                    && v.flag
                        .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE)
use apply::find_alive::*;
use backend::*;
use Result;
use patch::*;
use rand;
use apply::find_alive::*;
use std::collections::HashSet;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Deleted contexts are conflicts. Reconnect the graph by
    /// inserting pseudo-edges alongside deleted edges.
    pub(in apply) fn repair_deleted_contexts(
        &mut self,
        branch: &mut Branch,
        patch: &Patch,
        patch_id: PatchId,
    ) -> Result<()> {
        let mut alive = Vec::new();
        let mut files = Vec::new();
        let mut find_alive = FindAlive::new();
        // repair_missing_context adds all zombie edges needed.
        for ch in patch.changes().iter() {
            match *ch {
                Change::NewEdges {
                    flag, ref edges, ..
                } => {
                    if !flag.contains(EdgeFlags::DELETED_EDGE) {
                        self.repair_context_nondeleted(
                            branch,
                            patch_id,
                            edges,
                            flag,
                            &mut find_alive,
                            &mut alive,
                            &mut files
                            &mut files,
                        )?
                    } else {
                        self.repair_context_deleted(
                            branch,
                            patch_id,
                            edges,
                            flag,
                            &mut find_alive,
                            &mut alive,
                            &mut files,
                            patch.dependencies(),
                        )?
                    }
                }
                Change::NewNodes {
                    ref up_context,
                    ref down_context,
                    flag,
                    ..
                } => {
                    debug!("repairing missing contexts for newnodes");
                    // If not all lines in `up_context` are alive, this
                    // is a conflict, repair.
                    for c in up_context {
                        let c = self.internal_key(c, patch_id);

                        // Is the up context deleted by another patch, and the
                        // deletion was not also confirmed by this patch?

                        let up_context_deleted = self.was_context_deleted(branch, patch_id, c);
                        debug!(
                            "up_context_deleted: patch_id = {:?} context = {:?} up_context_deleted = {:?}",
                            patch_id, c, up_context_deleted
                        );
                        if up_context_deleted {
                            self.repair_missing_up_context(
                                &mut find_alive,
                                branch,
                                c,
                                flag,
                                &mut alive,
                                &mut files,
                                &[patch_id]
                                &[patch_id],
                            )?
                        }
                    }
                    // If not all lines in `down_context` are alive,
                    // this is a conflict, repair.
                    for c in down_context {
                        let c = self.internal_key(c, patch_id);
                        let down_context_deleted = self.was_context_deleted(branch, patch_id, c);
                        debug!("down_context_deleted: {:?}", down_context_deleted);
                        if down_context_deleted {
                            self.repair_missing_down_context(
                                &mut find_alive,
                                branch,
                                c,
                                &mut alive,
                                &[patch_id]
                                &[patch_id],
                            )?
                        }
                    }
                    debug!("apply: newnodes, done");
                }
            }
        }
        Ok(())
    }

    /// This function handles the case where we're adding an alive
    /// edge, and the origin or destination (or both) of this edge is
    /// dead in the graph.
    fn repair_context_nondeleted(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flag: EdgeFlags,
        find_alive: &mut FindAlive,
        alive: &mut Vec<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
    ) -> Result<()> {
        debug!("repairing missing contexts for non-deleted edges");
        for e in edges {
            let (up_context, down_context) = if flag.contains(EdgeFlags::PARENT_EDGE) {
                (
                    self.internal_key(&e.to, patch_id),
                    self.internal_key(&e.from, patch_id),
                )
            } else {
                (
                    self.internal_key(&e.from, patch_id),
                    self.internal_key(&e.to, patch_id),
                )
            };

            if self.was_context_deleted(branch, patch_id, up_context) {
                self.repair_missing_up_context(find_alive, branch, up_context, flag, alive, files, &[patch_id])?
                self.repair_missing_up_context(
                    find_alive,
                    branch,
                    up_context,
                    flag,
                    alive,
                    files,
                    &[patch_id],
                )?
            }
            if self.was_context_deleted(branch, patch_id, down_context) {
                self.repair_missing_down_context(find_alive, branch, down_context, alive, &[patch_id])?
                self.repair_missing_down_context(
                    find_alive,
                    branch,
                    down_context,
                    alive,
                    &[patch_id],
                )?
            }
        }
        Ok(())
    }

    /// Handle the case where we're inserting a deleted edge, but the
    /// source or target (or both) does not know about (at least one
    /// of) its adjacent edges.
    fn repair_context_deleted(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flag: EdgeFlags,
        find_alive: &mut FindAlive,
        alive: &mut Vec<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
        dependencies: &HashSet<Hash>,
    ) -> Result<()> {
        debug!("repairing missing contexts for deleted edges");
        debug_assert!(flag.contains(EdgeFlags::DELETED_EDGE));

        for e in edges {
            let dest = if flag.contains(EdgeFlags::PARENT_EDGE) {
                self.internal_key(&e.from, patch_id)
            } else {
                self.internal_key(&e.to, patch_id)
            };

            debug!("dest = {:?}", dest);

            // If there is at least one unknown child, repair the
            // context.
            let mut unknown_children = Vec::new();
            for (k, v) in self.iter_nodes(branch, Some((dest, None))) {
                if k != dest || v.flag | EdgeFlags::FOLDER_EDGE > EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE | EdgeFlags::FOLDER_EDGE {
                    break
                    break
                if k != dest
                    || v.flag | EdgeFlags::FOLDER_EDGE
                        > EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE | EdgeFlags::FOLDER_EDGE
                {
                    break;
                }
                if v.introduced_by != patch_id && {
                    let ext = self.external_hash(v.introduced_by).to_owned();
                    !dependencies.contains(&ext)
                } {
                    unknown_children.push(v.introduced_by)
                }

                debug!("child is_unknown({}): {:?} {:?}", line!(), v, unknown_children);
                debug!(
                    "child is_unknown({}): {:?} {:?}",
                    line!(),
                    v,
                    unknown_children
                );
            }

            if !unknown_children.is_empty() {
                self.repair_missing_up_context(find_alive, branch, dest, flag, alive, files, &unknown_children)?;
                self.repair_missing_up_context(
                    find_alive,
                    branch,
                    dest,
                    flag,
                    alive,
                    files,
                    &unknown_children,
                )?;
            }

            // If there is at least one alive parent we don't know
            // about, repair.
            let e = Edge::zero(EdgeFlags::PARENT_EDGE);
            unknown_children.clear();
            let mut unknown_parents = unknown_children;
            for (k, v) in self.iter_nodes(branch, Some((dest, Some(e)))) {
                if k != dest
                    || v.flag | EdgeFlags::FOLDER_EDGE
                        != EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE
                {
                    break;
                }
                if v.introduced_by != patch_id && {
                    let ext = self.external_hash(v.introduced_by).to_owned();
                    !dependencies.contains(&ext)
                } {
                    unknown_parents.push(v.introduced_by)
                }
                debug!("parent is_unknown({}): {:?} {:?}", line!(), v, unknown_parents);
                debug!(
                    "parent is_unknown({}): {:?} {:?}",
                    line!(),
                    v,
                    unknown_parents
                );
            }

            if !unknown_parents.is_empty() {
                self.repair_missing_down_context(find_alive, branch, dest, alive, &unknown_parents)?
            }

        }
        Ok(())
    }


    /// Was `context` deleted by patches other than `patch_id`, and
    /// additionally not deleted by `patch_id`?
    fn was_context_deleted(
        &self,
        branch: &Branch,
        patch_id: PatchId,
        context: Key<PatchId>,
    ) -> bool {
        let mut context_deleted = false;
        for v in self
            .iter_adjacent(
                branch,
                context,
                EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                EdgeFlags::all(),
            )
            .take_while(|v| {
                v.flag
                    .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE)
            })
        {
            debug!("was_context_deleted {:?}", v);
            if v.introduced_by == patch_id {
                return false
                return false;
            } else {
                context_deleted = true
            }
        }
        context_deleted
    }

    /// Checks whether a line in the up context of a hunk is marked
    /// deleted, and if so, reconnect the alive parts of the graph,
    /// marking this situation as a conflict.
    pub(crate) fn repair_missing_up_context(
        &mut self,
        find_alive: &mut FindAlive,
        branch: &mut Branch,
        context: Key<PatchId>,
        flag: EdgeFlags,
        alive: &mut Vec<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
        unknown_patches: &[PatchId]
        unknown_patches: &[PatchId],
    ) -> Result<()> {
        // The up context needs a repair iff it's deleted.

        // The up context was deleted, so the alive
        // component of the graph might be disconnected, and needs
        // a repair.

        // Follow all paths upwards (in the direction of
        // DELETED_EDGE|PARENT_EDGE) until finding an alive
        // ancestor, and turn them all into zombie edges.
        find_alive.clear();
        find_alive.push(context);
        alive.clear();
        files.clear();
        let mut first_file = None;
        self.find_alive_ancestors(find_alive, branch, alive, &mut first_file, files);
        debug!("files {:?} alive {:?}", files, alive);
        if !flag.contains(EdgeFlags::FOLDER_EDGE) {
            for ancestor in alive.drain(..).chain(first_file.into_iter()) {
                let mut edge = Edge::zero(EdgeFlags::PSEUDO_EDGE | EdgeFlags::PARENT_EDGE);
                if ancestor != context {
                    edge.dest = ancestor;
                    for patch_id in unknown_patches {
                        edge.introduced_by = patch_id.clone();
                        debug!("repairing up context: {:?} {:?}", context, edge);
                        self.put_edge_both_dirs(branch, context, edge)?;
                    }
                }
            }
        }
        for (key, mut edge) in files.drain(..) {
            if !self.is_connected(branch, key, edge.dest) {
                edge.flag =
                    EdgeFlags::PSEUDO_EDGE | EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE;
                for patch_id in unknown_patches {

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

87
88
89
90
        unsafe { Edge::from_unsafe(*self).fmt(fmt) }
use super::key::*;
use super::patch_id::*;
use byteorder::{ByteOrder, LittleEndian};
use sanakirja::*;
use std;

bitflags! {
    /// Possible flags of edges.
    ///
    /// Possible values are `PSEUDO_EDGE`, `FOLDER_EDGE`,
    /// `PARENT_EDGE` and `DELETED_EDGE`.
    #[derive(Serialize, Deserialize)]
    pub struct EdgeFlags: u8 {
        /// A pseudo-edge, computed when applying the patch to
        /// restore connectivity, and/or mark conflicts.
        const PSEUDO_EDGE = 1;
        /// An edge encoding file system hierarchy.
        const FOLDER_EDGE = 2;
        /// An epsilon-edge, i.e. a "non-transitive" edge used to
        /// solve conflicts.
        const EPSILON_EDGE = 4;
        /// A "reverse" edge (all edges in the graph have a reverse edge).
        const PARENT_EDGE = 8;
        /// An edge whose target (if not also `PARENT_EDGE`) or
        /// source (if also `PARENT_EDGE`) is marked as deleted.
        const DELETED_EDGE = 16;
    }
}

/// The target half of an edge in the repository graph.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Edge {
    /// Flags of this edge.
    pub flag: EdgeFlags,
    /// Target of this edge.
    pub dest: Key<PatchId>,
    /// Patch that introduced this edge (possibly as a
    /// pseudo-edge, i.e. not explicitly in the patch, but
    /// computed from it).
    pub introduced_by: PatchId,
}
impl Edge {
    /// Create an edge with the flags set to `flags`, and other
    /// parameters to 0.
    pub fn zero(flag: EdgeFlags) -> Edge {
        Edge {
            flag: flag,
            dest: ROOT_KEY.clone(),
            introduced_by: ROOT_PATCH_ID.clone(),
        }
    }
}

impl Representable for Edge {
    fn alignment() -> Alignment {
        Alignment::B1
    }
    fn onpage_size(&self) -> u16 {
        std::mem::size_of::<Edge>() as u16
    }
    unsafe fn write_value(&self, p: *mut u8) {
        trace!("write_value {:?}", p);
        let s = std::slice::from_raw_parts_mut(p, 25);
        s[0] = (*self).flag.bits();
        LittleEndian::write_u64(&mut s[1..], (*self).dest.patch.0);
        LittleEndian::write_u64(&mut s[9..], (*self).dest.line.0);
        LittleEndian::write_u64(&mut s[17..], (*self).introduced_by.0);
    }
    unsafe fn read_value(p: *const u8) -> Self {
        trace!("read_value {:?}", p);
        let s = std::slice::from_raw_parts(p, 25);
        Edge {
            flag: EdgeFlags::from_bits(s[0]).unwrap(),
            dest: Key {
                patch: PatchId(LittleEndian::read_u64(&s[1..])),
                line: LineId(LittleEndian::read_u64(&s[9..])),
            },
            introduced_by: PatchId(LittleEndian::read_u64(&s[17..])),
        }
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        let a: &Edge = self;
        let b: &Edge = &x;
        a.cmp(b)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}
65
66



67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

115
116
117
118
119
120
121
122
123
124

125
126
127
128
impl<W: std::io::Write> WriteMetadata for W {}


use sanakirja::{Representable, Alignment};
use std;
use super::key::*;
use super::patch_id::*;
use sanakirja::{Alignment, Representable};
use std;
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum FileStatus {
    Ok = 0,
    Moved = 1,
    Deleted = 2,
    Zombie = 3,
}

// Warning: FileMetadata is 16 bit-aligned, don't change the order.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)]
pub struct FileHeader {
    pub metadata: FileMetadata,
    pub status: FileStatus,
    pub key: Key<PatchId>,
}


#[test]
fn test_fileheader_alignment() {
    assert_eq!(std::mem::size_of::<FileHeader>(), 2 + 1 + 16)
}

impl Representable for FileHeader {
    fn alignment() -> Alignment {
        Alignment::B1
    }
    fn onpage_size(&self) -> u16 {
        std::mem::size_of::<FileHeader>() as u16
    }
    unsafe fn write_value(&self, p: *mut u8) {
        let meta = self.metadata.0;
        *p = (meta & 255) as u8;
        *(p.offset(1)) = (meta >> 8) as u8;
        *(p.offset(2)) = self.status as u8;
        self.key.write_value(p.offset(3))
    }
    unsafe fn read_value(p: *const u8) -> Self {
        let metadata = {
            let x0 = (*p) as u16;
            let x1 = (*(p.offset(1))) as u16;
            std::mem::transmute((x1 << 8) | x0)
        };
        let status = std::mem::transmute(*(p.offset(2)));
        let key: Key<PatchId> = Representable::read_value(p.offset(3));
        FileHeader { metadata, status, key }
        FileHeader {
            metadata,
            status,
            key,
        }
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        self.cmp(&x)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80

81
82
83
84
use sanakirja::{Representable, Alignment};
use std;
use super::inode::*;
use super::small_string::*;
use sanakirja::{Alignment, Representable};
use std;

#[derive(Debug, Hash, Eq, PartialEq, Clone)]
#[repr(packed)]
pub struct OwnedFileId {
    pub parent_inode: Inode,
    pub basename: SmallString,
}

impl OwnedFileId {
    pub fn as_file_id(&self) -> FileId {
        FileId {
            parent_inode: self.parent_inode,
            basename: self.basename.as_small_str(),
        }
    }
}

#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct FileId<'a> {
    pub parent_inode: Inode,
    pub basename: SmallStr<'a>,
}

#[derive(Clone, Copy, Debug)]
pub struct UnsafeFileId {
    parent_inode: Inode,
    basename: UnsafeSmallStr,
}

impl<'a> FileId<'a> {
    pub fn to_owned(&self) -> OwnedFileId {
        OwnedFileId {
            parent_inode: self.parent_inode.clone(),
            basename: self.basename.to_owned(),
        }
    }
    pub fn to_unsafe(&self) -> UnsafeFileId {
        UnsafeFileId {
            parent_inode: self.parent_inode,
            basename: self.basename.to_unsafe(),
        }
    }
    pub unsafe fn from_unsafe(p: UnsafeFileId) -> FileId<'a> {
        FileId {
            parent_inode: p.parent_inode,
            basename: SmallStr::from_unsafe(p.basename),
        }
    }
}

impl Representable for UnsafeFileId {
    fn alignment() -> Alignment {
        Alignment::B1
    }
    fn onpage_size(&self) -> u16 {
        INODE_SIZE as u16 + self.basename.onpage_size()
    }
    unsafe fn write_value(&self, p: *mut u8) {
        trace!("write_value {:?}", p);
        self.parent_inode.write_value(p);
        self.basename.write_value(p.offset(INODE_SIZE as isize));
    }
    unsafe fn read_value(p: *const u8) -> Self {
        trace!("read_value {:?}", p);
        UnsafeFileId {
            parent_inode: Inode::read_value(p),
            basename: UnsafeSmallStr::read_value(p.offset(INODE_SIZE as isize)),
        }
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        trace!("cmp_value file_id");
        let a: FileId = FileId::from_unsafe(*self);
        let b: FileId = FileId::from_unsafe(x);
        a.cmp(&b)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}








1

2
3
4
5
6

7
8
9
10
11
12
13
14
15
16
17
18

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

88
89
90
91
92
93
94

95
96
97
98
99
100

101
102
103
104
105

106
107
108

109
110
111
112
113
114

115
116
117
118
119

120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

136
137
138
139
140
141
142
143
144

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176





177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230



231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266

267
268
269
270
271
272
273
274
275
276
277
278
279
280



281
282
283
284
285
286
287
288

289
290
291
292
293
294





295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334

335
336
337
338
use sanakirja::{Representable, Alignment};
use std;
use Error;
            Hash::Sha512(ref e) => &e.0,
        1 + (match *self {
            UnsafeHash::Sha512(_) => 64,
            UnsafeHash::None => 0,
        })
use bs58;
use Error;
use byteorder::{ByteOrder, LittleEndian};
use sanakirja::{Alignment, Representable};
use serde;
use serde::de::{Deserialize, Deserializer, Visitor};
use serde::ser::{Serialize, Serializer};
use serde;
use std;
use {Error, LineId};

const SHA512_BYTES: usize = 512 / 8;

/// The external hash of patches.
#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub enum Hash {
    /// None is the hash of the "null patch", which introduced a
    /// single root vertex at the beginning of the repository.
    None,
    /// Patch hashed using the SHA2-512 algorithm.
    Sha512(Sha512)
    Sha512(Sha512),
    /// Recursive patch hashes (patches written as text in the repository).
    Recursive(Recursive),
}

#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct Recursive(Vec<u8>);

impl Recursive {
    pub fn patch(&self) -> HashRef {
        match unsafe { std::mem::transmute(self.0[0]) } {
            Algorithm::None => HashRef::None,
            Algorithm::Sha512 => HashRef::Sha512(&self.0[1..self.0.len() - 8]),
            Algorithm::Recursive => HashRef::Recursive(&self.0[1..self.0.len() - 8]),
        }
    }
    pub fn line(&self) -> LineId {
        match unsafe { std::mem::transmute(self.0[0]) } {
            Algorithm::None => LineId(0),
            Algorithm::Sha512 => LineId(LittleEndian::read_u64(&self.0[self.0.len() - 8..])),
            Algorithm::Recursive => LineId(LittleEndian::read_u64(&self.0[self.0.len() - 8..])),
        }
    }
}

impl Hash {
    pub fn recursive(&self, line: LineId) -> Self {
        match *self {
            Hash::None => Hash::Recursive(Recursive(vec![Algorithm::None as u8])),
            Hash::Sha512(ref p) => {
                let mut v = Vec::new();
                v.push(Algorithm::Sha512 as u8);
                v.extend(&p.0[..]);
                v.extend(b"\0\0\0\0\0\0\0\0");
                LittleEndian::write_u64(&mut v[SHA512_BYTES..], line.0);
                Hash::Recursive(Recursive(v))
            }
            Hash::Recursive(Recursive(ref p)) => {
                let mut v = Vec::new();
                v.push(Algorithm::Recursive as u8);
                v.extend(p);
                v.extend(b"\0\0\0\0\0\0\0\0");
                LittleEndian::write_u64(&mut v[p.len()..], line.0);
                Hash::Recursive(Recursive(v))
            }
        }
    }
}

pub struct Sha512(pub [u8; SHA512_BYTES]);

impl PartialEq for Sha512 {
    fn eq(&self, h: &Sha512) -> bool {
        (&self.0[..]).eq(&h.0[..])
    }
}
impl Eq for Sha512 {}
impl PartialOrd for Sha512 {
    fn partial_cmp(&self, h: &Sha512) -> Option<std::cmp::Ordering> {
        (&self.0[..]).partial_cmp(&h.0[..])
    }
}
impl Ord for Sha512 {
    fn cmp(&self, h: &Sha512) -> std::cmp::Ordering {
        (&self.0[..]).cmp(&h.0[..])
    }
}

impl std::hash::Hash for Sha512 {
    fn hash<H:std::hash::Hasher>(&self, h: &mut H) {
    fn hash<H: std::hash::Hasher>(&self, h: &mut H) {
        (&self.0[..]).hash(h)
    }
}

struct Sha512Visitor;
impl<'a> Visitor<'a> for Sha512Visitor {

    type Value = Sha512;

    fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(formatter, "A byte slice of length {}", SHA512_BYTES)
    }

    fn visit_bytes<E:serde::de::Error>(self, v: &[u8]) -> Result<Self::Value, E> {
    fn visit_bytes<E: serde::de::Error>(self, v: &[u8]) -> Result<Self::Value, E> {
        let mut x: [u8; SHA512_BYTES] = [0; SHA512_BYTES];
        x.copy_from_slice(v);
        Ok(Sha512(x))
    }

}

impl<'a> Deserialize<'a> for Sha512 {
    fn deserialize<D:Deserializer<'a>>(d: D) -> Result<Sha512, D::Error> {
    fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Sha512, D::Error> {
        d.deserialize_bytes(Sha512Visitor)
    }
}

impl Serialize for Sha512 {
    fn serialize<S:Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
    fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
        s.serialize_bytes(&self.0[..])
    }
}


impl std::fmt::Debug for Sha512 {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        (&self.0[..]).fmt(fmt)
    }
}
impl<'a> std::fmt::Debug for HashRef<'a> {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(fmt, "{}", self.to_base58())
    }
}
impl std::fmt::Debug for Hash {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        self.as_ref().fmt(fmt)
    }
}


/// A borrowed version of `Hash`.
#[derive(Copy, Clone, Hash, Eq, Ord, PartialEq, PartialOrd, Serialize)]
pub enum HashRef<'a> {
    None,
    Sha512(&'a [u8]),
    Recursive(&'a [u8]),
}

impl Hash {

    /// Get a `Hash` from a binary slice. This function does not
    /// compute the digest of anything, it just converts types.
    pub fn from_binary(v: &[u8]) -> Option<Self> {
        if v.len() == 0 {
            None
        } else {
            if v[0] == Algorithm::Sha512 as u8 && v.len() == 1 + SHA512_BYTES {
                let mut hash = [0; SHA512_BYTES];
                hash.clone_from_slice(&v[1..]);
                Some(Hash::Sha512(Sha512(hash)))
            } else if v[0] == Algorithm::None as u8 && v.len() == 1 {
                Some(Hash::None)
            } else {
                None
            }
        }
    }

    /// Decode a hash from a base58-encoded `str`.
    pub fn from_base58(base58: &str) -> Option<Self> {
        if let Ok(v) = bs58::decode(base58).into_vec() {
            Self::from_binary(&v)
        } else {
            None
        }
    }

    /// A borrowed version of this `Hash`, used for instance to
    /// query the databases.
    pub fn as_hash_ref(&self) -> HashRef {
        match *self {
            Hash::None => HashRef::None,
            Hash::Sha512(ref e) => {
                HashRef::Sha512(unsafe {
                    std::slice::from_raw_parts(e.0.as_ptr() as *const u8, SHA512_BYTES)
                })
            }
            Hash::Sha512(ref e) => HashRef::Sha512(unsafe {
                std::slice::from_raw_parts(e.0.as_ptr() as *const u8, SHA512_BYTES)
            }),
            Hash::Recursive(Recursive(ref p)) => HashRef::Recursive(p.as_slice()),
        }
    }

    pub fn as_ref(&self) -> HashRef {
        self.as_hash_ref()
    }

    /// Create a `Hash` from the binary slice of the patch contents.
    pub fn of_slice(buf: &[u8]) -> Result<Hash, Error> {
        use openssl::hash::*;
        let hash = {
            let mut hasher = Hasher::new(MessageDigest::sha512())?;
            hasher.update(buf)?;
            hasher.finish()?
        };
        let mut digest: [u8; SHA512_BYTES] = [0; SHA512_BYTES];
        digest.clone_from_slice(hash.as_ref());
        Ok(Hash::Sha512(Sha512(digest)))
    }
}

impl<'a> HashRef<'a> {
    /// Encode this `HashRef` in binary.
    pub fn to_binary(&self) -> Vec<u8> {
        let u = self.to_unsafe();
        let mut v = vec![0; u.onpage_size() as usize];
        unsafe { u.write_value(v.as_mut_ptr()) }
        v
    }

    /// Encode this `HashRef` in base58.
    pub fn to_base58(&self) -> String {
        bs58::encode(&self.to_binary()).into_string()
    }
}
impl Hash {
    /// Encode this `Hash` in base64.
    pub fn to_base58(&self) -> String {
        self.as_ref().to_base58()
    }
}

impl<'a> HashRef<'a> {
    /// Build an owned version of a `HashRef`.
    pub fn to_owned(&self) -> Hash {
        match *self {
            HashRef::None => Hash::None,
            HashRef::Sha512(e) => {
                let mut hash = [0; SHA512_BYTES];
                unsafe {
                    std::ptr::copy_nonoverlapping(e.as_ptr() as *const u8,
                                                  hash.as_mut_ptr() as *mut u8,
                                                  SHA512_BYTES)
                    std::ptr::copy_nonoverlapping(
                        e.as_ptr() as *const u8,
                        hash.as_mut_ptr() as *mut u8,
                        SHA512_BYTES,
                    )
                }
                Hash::Sha512(Sha512(hash))
            }
            HashRef::Recursive(p) => Hash::Recursive(Recursive(p.to_vec())),
        }
    }
}

impl Clone for Hash {
    fn clone(&self) -> Self {
        self.as_ref().to_owned()
    }
}

pub const ROOT_HASH: &'static Hash = &Hash::None;

#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
#[repr(u8)]
pub enum Algorithm {
    None = 0,
    Sha512 = 1,
    Recursive = 2,
}

#[derive(Clone, Copy, Debug)]
pub enum UnsafeHash {
    None,
    Sha512(*const u8),
    Recursive { ptr: *const u8, len: usize },
}


impl<'a> HashRef<'a> {
    pub fn to_unsafe(&self) -> UnsafeHash {
        match *self {
            HashRef::None => UnsafeHash::None,
            HashRef::Sha512(e) => UnsafeHash::Sha512(e.as_ptr()),
            HashRef::Recursive(p) => UnsafeHash::Recursive {
                ptr: p.as_ptr(),
                len: p.len(),
            },
        }
    }
    pub unsafe fn from_unsafe(p: UnsafeHash) -> HashRef<'a> {
        match p {
            UnsafeHash::None => HashRef::None,
            UnsafeHash::Sha512(p) => {
                HashRef::Sha512(std::slice::from_raw_parts(p, SHA512_BYTES))
            }
            UnsafeHash::Sha512(p) => HashRef::Sha512(std::slice::from_raw_parts(p, SHA512_BYTES)),
            UnsafeHash::Recursive { ptr, len } => {
                HashRef::Recursive(std::slice::from_raw_parts(ptr, len))
            }
        }
    }
}


impl Representable for UnsafeHash {
    fn alignment() -> Alignment {
        Alignment::B1
    }

    fn onpage_size(&self) -> u16 {
        1 +
            (match *self {
                UnsafeHash::Sha512(_) => 64,
                UnsafeHash::None => 0,
            })
        1 + (match *self {
            UnsafeHash::Sha512(_) => 64,
            UnsafeHash::None => 0,
            UnsafeHash::Recursive { len, .. } => len as u16,
        })
    }
    unsafe fn write_value(&self, p: *mut u8) {
        trace!("write_value {:?} {:?}", self, p);
        match *self {
            UnsafeHash::Sha512(q) => {
                *p = Algorithm::Sha512 as u8;
                std::ptr::copy(q, p.offset(1), 64)
            }
            UnsafeHash::None => *p = Algorithm::None as u8,
            UnsafeHash::Recursive { ptr, len } => {
                *p = Algorithm::Recursive as u8;
                std::ptr::copy(ptr, p.offset(1), len)
            }
        }
    }
    unsafe fn read_value(p: *const u8) -> Self {
        trace!("read_value {:?} {:?}", p, *p);
        match std::mem::transmute(*p) {
            Algorithm::Sha512 => UnsafeHash::Sha512(p.offset(1)),
            Algorithm::None => UnsafeHash::None,
            Algorithm::Recursive => {
                let rec = Self::read_value(p.offset(1));
                UnsafeHash::Recursive {
                    ptr: p.offset(1),
                    len: rec.onpage_size() as usize,
                }
            }
        }
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        let a = HashRef::from_unsafe(*self);
        let b = HashRef::from_unsafe(x);
        a.cmp(&b)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

73
74
75
76
use sanakirja::{Representable, Alignment};
use std;
use hex;
use sanakirja::{Alignment, Representable};
use std;
pub const INODE_SIZE: usize = 8;
/// A unique identifier for files or directories in the actual
/// file system, to map "files from the graph" to real files.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Inode([u8; INODE_SIZE]);
/// The `Inode` representing the root of the repository (on the
/// actual file system).
pub const ROOT_INODE: Inode = Inode([0; INODE_SIZE]);
impl std::ops::Deref for Inode {
    type Target = [u8];
    fn deref(&self) -> &[u8] {
        &self.0
    }
}
impl std::ops::DerefMut for Inode {
    fn deref_mut(&mut self) -> &mut [u8] {
        &mut self.0
    }
}

impl std::fmt::Debug for Inode {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(fmt, "Inode({})", hex::encode(&self.0))
    }
}
impl Inode {
    pub fn to_hex(&self) -> String {
        use hex::ToHex;
        let mut s = String::new();
        self.0.write_hex(&mut s).unwrap();
        s
    }

    pub fn is_root(&self) -> bool {
        *self == ROOT_INODE
    }

    /// Decode an inode from its hexadecimal representation.
    pub fn from_hex(hex: &str) -> Option<Inode> {
        let mut i = Inode([0; INODE_SIZE]);
        if super::from_hex(hex, &mut i) {
            Some(i)
        } else {
            None
        }
    }
}

impl Representable for Inode {
    fn alignment() -> Alignment {
        Alignment::B1
    }
    fn onpage_size(&self) -> u16 {
        std::mem::size_of::<Inode>() as u16
    }
    unsafe fn write_value(&self, p: *mut u8) {
        trace!("write_value {:?}", p);
        std::ptr::copy(self.0.as_ptr(), p, INODE_SIZE)
    }
    unsafe fn read_value(p: *const u8) -> Self {
        trace!("read_value {:?}", p);
        let mut i: Inode = std::mem::uninitialized();
        std::ptr::copy(p, i.0.as_mut_ptr(), INODE_SIZE);
        i
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        self.cmp(&x)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}



1

2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

19
20
21
22

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54

55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82

83
84
85

86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

199
200
201
202
use sanakirja::{Representable, Alignment};
use std;
use Hash;
use super::patch_id::*;
use Hash;
use bs58;
use sanakirja::{Alignment, Representable};
use std;
use Hash;

const LINE_ID_SIZE: usize = 8;
pub const KEY_SIZE: usize = PATCH_ID_SIZE + LINE_ID_SIZE;

/// The node at the root of the repository graph.
pub const ROOT_KEY: Key<PatchId> = Key {
    patch: ROOT_PATCH_ID,
    line: LineId(0),
};

use hex::ToHex;
use std::fmt::Write;
impl ToHex for Key<PatchId> {
    fn write_hex<W:Write>(&self, w: &mut W) -> std::fmt::Result {
    fn write_hex<W: Write>(&self, w: &mut W) -> std::fmt::Result {
        self.patch.write_hex(w)?;
        self.line.write_hex(w)
    }
    fn write_hex_upper<W:Write>(&self, w: &mut W) -> std::fmt::Result {
    fn write_hex_upper<W: Write>(&self, w: &mut W) -> std::fmt::Result {
        self.patch.write_hex(w)?;
        self.line.write_hex(w)
    }
}

impl Key<PatchId> {
    pub fn to_base58(&self) -> String {
        let mut b = self.patch.to_base58();
        let mut x = [0; 8];
        LittleEndian::write_u64(&mut x, self.line.0);
        bs58::encode(&x).into(&mut b);
        b
    }
    pub fn to_hex(&self) -> String {
        let mut s = String::new();
        self.write_hex(&mut s).unwrap();
        s
    }
}

impl Key<Hash> {
    pub fn to_base58(&self) -> String {
        let mut b = self.patch.to_base58();
        let mut x = [0; 8];
        LittleEndian::write_u64(&mut x, self.line.0);
        bs58::encode(&x).into(&mut b);
        b
    }
}

impl Key<PatchId> {

    /// Is this the root key? (the root key is all 0s).
    pub fn is_root(&self) -> bool {
        self == &ROOT_KEY
    }

    /// Decode this key from its hexadecimal representation.
    pub fn from_hex(hex: &str) -> Option<Self> {
        let mut s = [0; KEY_SIZE];
        if super::from_hex(hex, &mut s) {
            Some(Key {
                patch: PatchId(LittleEndian::read_u64(&s[..8])),
                line: LineId(LittleEndian::read_u64(&s[8..])),
            })
        } else {
            None
        }
    }
}

// A LineId contains a counter encoded little-endian, so that it
// can both be deterministically put into a Sanakirja database,
// and passed to standard serializers.

/// An index for file chunks within a patch.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct LineId(pub u64);

impl ToHex for LineId {
    fn write_hex<W:Write>(&self, w: &mut W) -> std::fmt::Result {
    fn write_hex<W: Write>(&self, w: &mut W) -> std::fmt::Result {
        PatchId(self.0).write_hex(w)
    }
    fn write_hex_upper<W:Write>(&self, w: &mut W) -> std::fmt::Result {
    fn write_hex_upper<W: Write>(&self, w: &mut W) -> std::fmt::Result {
        PatchId(self.0).write_hex_upper(w)
    }
}

impl std::fmt::Debug for LineId {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(fmt, "LineId(0x{})", self.to_hex())
    }
}

impl LineId {
    /// Creates a new `LineId`, initialized to 0.
    pub fn new() -> LineId {
        LineId(0)
    }
    /// Is this line identifier all 0?
    pub fn is_root(&self) -> bool {
        self.0 == 0
    }
    pub fn to_hex(&self) -> String {
        let mut s = String::new();
        self.write_hex(&mut s).unwrap();
        s
    }
    pub fn to_base58(&self) -> String {
        let mut x = [0; 8];
        LittleEndian::write_u64(&mut x, self.0);
        let mut b = String::new();
        bs58::encode(&x).into(&mut b);
        b
    }
    pub fn from_base58(s: &str) -> Option<Self> {
        let mut p = [0; 8];
        if bs58::decode(s).into(&mut p).is_ok() {
            Some(LineId(LittleEndian::read_u64(&p)))
        } else {
            None
        }
    }
}
use byteorder::{ByteOrder, LittleEndian};
impl std::ops::Add<usize> for LineId {
    type Output = LineId;
    fn add(self, x: usize) -> Self::Output {
        LineId(self.0 + x as u64)
    }
}
impl std::ops::AddAssign<usize> for LineId {
    fn add_assign(&mut self, x: usize) {
        *self = self.clone() + x
    }
}

/// A node in the repository graph, made of a patch internal
/// identifier, and a line identifier in that patch.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct Key<H> {
    /// The patch that introduced this node.
    pub patch: H,
    /// The line identifier of the node in that patch. Here,
    /// "line" does not imply anything on the contents of the
    /// chunk.
    pub line: LineId,
}

#[test]
fn test_key_alignment() {
    assert_eq!(std::mem::size_of::<Key<PatchId>>(), 16)
}

impl<T> AsRef<LineId> for Key<T> {
    fn as_ref(&self) -> &LineId {
        &self.line
    }
}

impl<T: Clone> Key<Option<T>> {
    pub fn unwrap_patch(&self) -> Key<T> {
        Key {
            patch: self.patch.as_ref().unwrap().clone(),
            line: self.line.clone(),
        }
    }
}

impl Representable for Key<PatchId> {
    fn alignment() -> Alignment {
        Alignment::B1
    }
    fn onpage_size(&self) -> u16 {
        (PATCH_ID_SIZE + LINE_ID_SIZE) as u16
    }
    unsafe fn write_value(&self, p: *mut u8) {
        trace!("write_value {:?}", p);
        let p = std::slice::from_raw_parts_mut(p, KEY_SIZE);
        LittleEndian::write_u64(p, self.patch.0);
        LittleEndian::write_u64(&mut p[PATCH_ID_SIZE..], self.line.0);
    }
    unsafe fn read_value(p: *const u8) -> Self {
        trace!("read_value {:?}", p);
        let p = std::slice::from_raw_parts(p, KEY_SIZE);
        let patch = LittleEndian::read_u64(p);
        let line = LittleEndian::read_u64(&p[PATCH_ID_SIZE..]);
        Key {
            patch: PatchId(patch),
            line: LineId(line),
        }
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        self.cmp(&x)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}
1
2
3

4


5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34


35


36
37
38
39
40
41
42
43

44


45
46
47
48
49
50
use hex;
use rand;
use sanakirja;
pub use sanakirja::Transaction;
use sanakirja::Representable;
use std::path::Path;
use rand;
pub use sanakirja::Transaction;
use std;
use std::path::Path;
use {Error, Result};

pub use self::patch_id::*;

fn from_hex(hex: &str, s: &mut [u8]) -> bool {
    let hex = hex.as_bytes();
    if hex.len() <= 2 * s.len() {
        let mut i = 0;
        while i < hex.len() {
            let h = hex[i].to_ascii_lowercase();
            if h >= b'0' && h <= b'9' {
                s[i / 2] = s[i / 2] << 4 | (h - b'0')
            } else if h >= b'a' && h <= b'f' {
                s[i / 2] = s[i / 2] << 4 | (h - b'a' + 10)
            } else {
                return false;
            }
            i += 1
        }
        if i & 1 == 1 {
            s[i / 2] = s[i / 2] << 4
        }
        true
    } else {
        false
    }
}
mod patch_id;
mod key;
mod edge;
mod hash;
mod inode;
mod file_header;
mod file_id;
mod hash;
mod inode;
mod key;
mod patch_id;
mod small_string;

pub use self::key::*;
pub use self::edge::*;
pub use self::hash::*;
pub use self::inode::*;
pub use self::file_header::*;
pub use self::file_id::*;
pub use self::hash::*;
pub use self::inode::*;
pub use self::key::*;
pub use self::small_string::*;


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

25
26
27
28
29

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

99
100
101
102
use sanakirja::{Representable, Alignment};
use std;
use bs58;
use byteorder::{ByteOrder, LittleEndian};
use sanakirja::{Alignment, Representable};
use std;

// Patch Identifiers.
pub const PATCH_ID_SIZE: usize = 8;
pub const ROOT_PATCH_ID: PatchId = PatchId(0);

/// An internal patch identifier, less random than external patch
/// identifiers, but more stable in time, and much smaller.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct PatchId(pub(crate) u64);

impl std::fmt::Debug for PatchId {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(fmt, "PatchId({})", self.to_base58())
    }
}

use hex::ToHex;
use std::fmt::Write;

impl ToHex for PatchId {
    fn write_hex<W:Write>(&self, w: &mut W) -> std::fmt::Result {
    fn write_hex<W: Write>(&self, w: &mut W) -> std::fmt::Result {
        let mut x = [0; 8];
        LittleEndian::write_u64(&mut x, self.0);
        x.write_hex(w)
    }
    fn write_hex_upper<W:Write>(&self, w: &mut W) -> std::fmt::Result {

    fn write_hex_upper<W: Write>(&self, w: &mut W) -> std::fmt::Result {
        let mut x = [0; 8];
        LittleEndian::write_u64(&mut x, self.0);
        x.write_hex_upper(w)
    }
}

impl PatchId {
    /// New patch id (initialised to 0).
    pub fn new() -> Self {
        PatchId(0)
    }

    pub fn from_slice(s: &[u8]) -> Self {
        PatchId(LittleEndian::read_u64(s))
    }

    /// Encode this patch id in base58.
    pub fn to_base58(&self) -> String {
        let mut x = [0; 8];
        LittleEndian::write_u64(&mut x, self.0);
        bs58::encode(&x).into_string()
    }
    /// Decode this patch id from its base58 encoding.
    pub fn from_base58(s: &str) -> Option<Self> {
        let mut p = [0; 8];
        if bs58::decode(s).into(&mut p).is_ok() {
            Some(PatchId(LittleEndian::read_u64(&p)))
        } else {
            None
        }
    }
    pub fn is_root(&self) -> bool {
        *self == ROOT_PATCH_ID
    }
}

impl std::ops::Deref for PatchId {
    type Target = u64;
    fn deref(&self) -> &Self::Target {
        &self.0
    }
}
impl std::ops::DerefMut for PatchId {
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.0
    }
}

impl Representable for PatchId {
    fn alignment() -> Alignment {
        Alignment::B8
    }
    fn onpage_size(&self) -> u16 {
        8
    }
    unsafe fn write_value(&self, p: *mut u8) {
        LittleEndian::write_u64(std::slice::from_raw_parts_mut(p, 8), self.0)
    }
    unsafe fn read_value(p: *const u8) -> Self {
        let x = PatchId(LittleEndian::read_u64(std::slice::from_raw_parts(p, 8)));
        trace!("read_value {:?} {:?}", p, x.to_base58());
        x
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        self.0.cmp(&x.0)
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46
47
48
49

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65


66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103


104
105
106
107
108
109
110
111
112
113
114
115

116
117
118
119
120
121
122
123
124
125


126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146



147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174

175
176
177
178
use sanakirja::{Representable, Alignment};
use sanakirja::{Alignment, Representable};
use std;
pub const MAX_LENGTH: usize = 255;

/// A string of length at most 255, with a more compact on-disk
/// encoding.
#[repr(packed)]
pub struct SmallString {
    pub len: u8,
    pub str: [u8; MAX_LENGTH],
}

/// A borrowed version of `SmallStr`.
#[derive(Clone, Copy)]
pub struct SmallStr<'a>(*const u8, std::marker::PhantomData<&'a ()>);

impl Clone for SmallString {
    fn clone(&self) -> Self {
        Self::from_str(self.as_str())
    }
}

impl std::fmt::Debug for SmallString {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        self.as_small_str().fmt(fmt)
    }
}


impl<'a> PartialEq for SmallStr<'a> {
    fn eq(&self, x: &SmallStr) -> bool {
        self.as_str().eq(x.as_str())
    }
}
impl<'a> Eq for SmallStr<'a> {}

impl PartialEq for SmallString {
    fn eq(&self, x: &SmallString) -> bool {
        self.as_str().eq(x.as_str())
    }
}
impl Eq for SmallString {}

impl<'a> std::hash::Hash for SmallStr<'a> {
    fn hash<H:std::hash::Hasher>(&self, x: &mut H) {
    fn hash<H: std::hash::Hasher>(&self, x: &mut H) {
        self.as_str().hash(x)
    }
}

impl std::hash::Hash for SmallString {
    fn hash<H:std::hash::Hasher>(&self, x: &mut H) {
    fn hash<H: std::hash::Hasher>(&self, x: &mut H) {
        self.as_str().hash(x)
    }
}

impl<'a> PartialOrd for SmallStr<'a> {
    fn partial_cmp(&self, x: &SmallStr) -> Option<std::cmp::Ordering> {
        self.as_str().partial_cmp(x.as_str())
    }
}
impl<'a> Ord for SmallStr<'a> {
    fn cmp(&self, x: &SmallStr) -> std::cmp::Ordering {
        self.as_str().cmp(x.as_str())
    }
}



impl<'a> std::fmt::Debug for SmallStr<'a> {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        self.as_str().fmt(fmt)
    }
}

impl SmallString {
    pub fn len(&self) -> usize {
        self.len as usize
    }

    pub fn is_empty(&self) -> bool {
        self.len() == 0
    }

    pub fn from_str(s: &str) -> Self {
        let mut b = SmallString {
            len: s.len() as u8,
            str: [0; MAX_LENGTH],
        };
        b.clone_from_str(s);
        b
    }
    pub fn clone_from_str(&mut self, s: &str) {
        self.len = s.len() as u8;
        (&mut self.str[..s.len()]).copy_from_slice(s.as_bytes());
    }
    pub fn clear(&mut self) {
        self.len = 0;
    }
    pub fn push_str(&mut self, s: &str) {
        let l = self.len as usize;
        assert!(l + s.len() <= 0xff);
        (&mut self.str[l..l + s.len()]).copy_from_slice(s.as_bytes());
        self.len += s.len() as u8;
    }

    pub fn as_small_str(&self) -> SmallStr {
        SmallStr(self as *const SmallString as *const u8,
                 std::marker::PhantomData)
        SmallStr(
            self as *const SmallString as *const u8,
            std::marker::PhantomData,
        )
    }

    pub fn as_str(&self) -> &str {
        self.as_small_str().as_str()
    }
}

impl<'a> SmallStr<'a> {

    pub fn is_empty(&self) -> bool {
        self.len() == 0
    }

    pub fn len(&self) -> usize {
        unsafe { (*self.0) as usize }
    }

    pub fn as_str(&self) -> &'a str {
        unsafe {
            std::str::from_utf8_unchecked(std::slice::from_raw_parts(self.0.offset(1),
                                                                     *self.0 as usize))
            std::str::from_utf8_unchecked(std::slice::from_raw_parts(
                self.0.offset(1),
                *self.0 as usize,
            ))
        }
    }
    pub fn to_unsafe(&self) -> UnsafeSmallStr {
        UnsafeSmallStr(self.0)
    }
    pub unsafe fn from_unsafe(u: UnsafeSmallStr) -> Self {
        SmallStr(u.0, std::marker::PhantomData)
    }
    pub fn to_owned(&self) -> SmallString {
        SmallString::from_str(self.as_str())
    }
}

#[derive(Clone, Copy)]
pub struct UnsafeSmallStr(*const u8);
impl std::fmt::Debug for UnsafeSmallStr {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        unsafe {
            SmallStr::from_unsafe(*self).fmt(fmt)
        }
        unsafe { SmallStr::from_unsafe(*self).fmt(fmt) }
    }
}

impl Representable for UnsafeSmallStr {
    fn alignment() -> Alignment {
        Alignment::B1
    }
    fn onpage_size(&self) -> u16 {
        unsafe {
            let len = (*self.0) as u16;
            1 + len
        }
    }
    unsafe fn write_value(&self, p: *mut u8) {
        trace!("write_value {:?}", p);
        std::ptr::copy(self.0, p, self.onpage_size() as usize)
    }
    unsafe fn read_value(p: *const u8) -> Self {
        trace!("read_value {:?}", p);
        UnsafeSmallStr(p)
    }
    unsafe fn cmp_value<T>(&self, _: &T, x: Self) -> std::cmp::Ordering {
        let a = SmallStr::from_unsafe(UnsafeSmallStr(self.0));
        let b = SmallStr::from_unsafe(x);
        a.as_str().cmp(b.as_str())
    }
    type PageOffsets = std::iter::Empty<u64>;
    fn page_offsets(&self) -> Self::PageOffsets { std::iter::empty() }
    fn page_offsets(&self) -> Self::PageOffsets {
        std::iter::empty()
    }
}

1
2

3
4
pub const SEPARATOR: &'static str = "================================\n";
pub const START_MARKER: &'static str = "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n";

pub const SEPARATOR:  &'static str = "================================\n";
pub const SEPARATOR: &'static str = "\n================================\n";






















1
2
3
4

5

6
7
8
9
10
11


12
13
14
15
16
17

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41

42
43







44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94





95
96
97
98
99
100
101

102
103
104
105




106
107
108
109




110
111
112
113
114
115
116
117
118
119
120
121
122

123
124
125
126
127

128
129
130
131
132





133
134
135
136
137
138
139
140
141
142
143
144
145
146

147
148
149
150
151
152
153
154
155


156
157
158
159
160
161
162
163


164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213






214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249

250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267




268
269
270
271
272
273
274
275
276
277
278
279
280
281
282


283
284
285
286
287
288
289

290
291
292
293
294
295
296
297
298
299
300
301





302
303
304
305
306
307
308
309















310
311
312
313
314
315
316
317
318
319
320
321

322
323
324
325
326
327
328
329
330
331
332
333
334


335
336
337
338
339
340
341
342
343
344
345





346
347
348
349
350
351
352
353
354
355
356
357

358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382



383
384
385
386
387
388
389
390
391
392
393
394















395
396
397
398
399
400
401
402
403
404
405
406
407





408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438












439
440
441
442
443
444
445
446
447



448
449
450
451
452
453
454
455
456
457
458
use {ErrorKind, Result};
                return Err(ErrorKind::AlreadyAdded.into());
    pub fn add_inode(
        path: &std::path::Path,
        path: &std::path::Path,
        path_: &std::path::Path,
        pb: &Path,
        files: &mut Vec<PathBuf>,
                    self.collect(v.to_owned(), next_pb_.as_path(), k.basename.as_str(), files)?;
        for (_, child) in self.iter_nodes(branch, Some((key, Some(&e)))).take_while(
            |&(k, ref v)| k == key && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE,
        ) {
            for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(&e))))
                }) {
        path: &'a std::path::Path,
            }) {
                    for (_, grandparent) in self.iter_nodes(branch, Some((parent.dest, Some(&e))))
                        }) {
                if contents.into_cow().split_at(2).1 == comp.as_bytes() {
                        return Err(ErrorKind::FileNameCount(current_key).into());
                    for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(&e))))
//! Manipulating the internal representation of files and directories
//! tracked by Pijul (i.e. adding files, removing files, getting file
//! names…).

use backend::*;
use backend;
use {Result, ErrorKind};
use backend::*;
use {Error, Result};
use fs_representation::{RepoPath, in_repo_root};

use rand;
use std;
use std::path::{Path, PathBuf};
use std::iter::Iterator;
use std::collections::BTreeMap;
use std::iter::Iterator;
use std::path::{Path, PathBuf};

impl<'env, R: rand::Rng> MutTxn<'env, R> {
    pub fn mark_inode_moved(&mut self, inode: Inode) {

        let mut header = None;
        if let Some(h) = self.get_inodes(inode) {
            header = Some(h.clone())
        }
        if let Some(mut h) = header {
            h.status = FileStatus::Moved;
            self.replace_inodes(inode, h).unwrap();
        }
    }

    /// Create an inode that doesn't exist in the repository, but
    /// doesn't put it into the repository.
    pub fn create_new_inode(&self) -> Inode {
        let mut already_taken = true;
        let mut inode: Inode = ROOT_INODE.clone();
        while already_taken {
            for i in inode.iter_mut() {
                *i = rand::random()
            }
            already_taken = self.get_revtree(inode).is_some();
        }
        inode
    }


    /// Record the information that `parent_inode` is now a parent of
    /// file `filename`, and `filename` has inode `child_inode`.
    fn make_new_child(&mut self,
                      parent_inode: Inode,
                      filename: &str,
                      is_dir: bool,
                      child_inode: Option<Inode>)
                      -> Result<Inode> {

    fn make_new_child(
        &mut self,
        parent_inode: Inode,
        filename: &str,
        is_dir: bool,
        child_inode: Option<Inode>,
    ) -> Result<Inode> {
        let parent_id = OwnedFileId {
            parent_inode: parent_inode.clone(),
            basename: SmallString::from_str(filename),
        };
        if filename == ".pijul" {
            return Err(Error::CannotAddDotPijul);
        }
        if let Some(inode) = self.get_tree(&parent_id.as_file_id()) {
            // If we already have the file, make sure the file status
            // is Ok (i.e. not zombie, not deleted).
            let mut header = if let Some(header) = self.get_inodes(inode) {
                header.to_owned()
            } else {
                return Err(ErrorKind::AlreadyAdded.into())
                return Err(Error::AlreadyAdded);
            };
            if let FileStatus::Ok = header.status {
            } else {
                header.status = FileStatus::Ok;
                self.replace_inodes(inode, header)?;
            }
            Ok(inode)
        } else {
            // Else, add a new file.

            let child_inode = match child_inode {
                None => self.create_new_inode(),
                Some(i) => i.clone(),
            };
            self.put_tree(&parent_id.as_file_id(), child_inode)?;
            self.put_revtree(child_inode, &parent_id.as_file_id())?;

            if is_dir {
                // If this new file is a directory, add a name-less
                // file id without a reverse in revtree.
                let dir_id = OwnedFileId {
                    parent_inode: child_inode.clone(),
                    basename: SmallString::from_str(""),
                };
                self.put_tree(&dir_id.as_file_id(), child_inode)?;
            };
            Ok(child_inode)
        }
    }

    pub fn add_inode(&mut self,
                     inode: Option<Inode>,
                     path: &std::path::Path,
                     is_dir: bool)
                     -> Result<()> {
    pub fn add_inode<P: AsRef<Path>>(
        &mut self,
        inode: Option<Inode>,
        path: &RepoPath<P>,
        is_dir: bool,
    ) -> Result<()> {
        if let Some(parent) = path.parent() {
            let (mut current_inode, unrecorded_path) = self.closest_in_repo_ancestor(&parent).unwrap();
            let (mut current_inode, unrecorded_path) =
                self.closest_in_repo_ancestor(&parent).unwrap();

            for c in unrecorded_path {
                current_inode = self.make_new_child(current_inode,
                                                    c.as_os_str().to_str().unwrap(),
                                                    true,
                                                    None)?
                current_inode =
                    self.make_new_child(current_inode, c.as_os_str().to_str().unwrap(), true, None)?
            }

            self.make_new_child(current_inode,
                                path.file_name().unwrap().to_str().unwrap(),
                                is_dir,
                                inode)?;
            self.make_new_child(
                current_inode,
                path.file_name().unwrap().to_str().unwrap(),
                is_dir,
                inode,
            )?;
        }
        Ok(())
    }

    pub fn inode_is_ancestor_of(&self, a: Inode, mut b: Inode) -> bool {
        loop {
            if a == b {
                return true
                return true;
            }
            if let Some(b_parent) = self.get_revtree(b) {
                b = b_parent.parent_inode
            } else {
                return false
                return false;
            }
        }
    }

    pub fn move_file(&mut self,
                     path: &std::path::Path,
                     path_: &std::path::Path,
                     is_dir: bool)
                     -> Result<()> {
    pub fn move_file(
        &mut self,
        origin: &RepoPath<impl AsRef<Path>>,
        destination: &RepoPath<impl AsRef<Path>>,
        is_dir: bool,
    ) -> Result<()> {
        debug!("move_file: {},{}", origin.display(), destination.display());
        if let Some(parent) = origin.parent() {
            let fileref = OwnedFileId {
                parent_inode: self.find_inode(&parent)?,
                basename: SmallString::from_str(origin.file_name().unwrap().to_str().unwrap()),
            };

            if let Some(inode) = self.get_tree(&fileref.as_file_id()).map(|i| i.clone()) {

                // Now the last inode is in "*inode"
                debug!("txn.del fileref={:?}", fileref);
                self.del_tree(&fileref.as_file_id(), None)?;
                self.del_revtree(inode, None)?;

                debug!("inode={} destination={}", inode.to_hex(), destination.display());
                self.add_inode(Some(inode), destination, is_dir)?;
                self.mark_inode_moved(inode);

                return Ok(())

                return Ok(());
            }
        }
        Err(Error::FileNotInRepo(origin.to_path_buf()))
    }

    // Deletes a directory, given by its inode, recursively.
    pub fn rec_delete(&mut self, key: Inode) -> Result<bool> {


        debug!("rec_delete, key={:?}", key.to_hex());
        let file_id = OwnedFileId {
            parent_inode: key.clone(),
            basename: SmallString::from_str(""),
        };

        let children: Vec<(_, Inode)> = self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| key == k.parent_inode)
            .filter(|&(ref k, _)| !k.basename.is_empty())
            .map(|(k, v)| (k.to_owned(), v.to_owned()))
            .collect();

        let mut has_recorded_descendants = false;
        for (_, b) in children {
            debug!("deleting from tree {:?}", b);
            has_recorded_descendants |= self.rec_delete(b)?;
        }

        // Now that the directory is empty, mark the corresponding node as deleted (flag '2').
        if let Some(mut header) = self.get_inodes(key).map(|h| h.clone()) {
            // If this is was recorded, mark deleted.
            debug!("key {:?}, header = {:?}", key, header);
            header.status = FileStatus::Deleted;
            self.replace_inodes(key, header)?;
            debug!("after = {:?}", self.get_inodes(key).map(|h| h.clone()));
        } else if !has_recorded_descendants {
            // Else, simply delete from the tree.
            let parent = self.get_revtree(key).unwrap().to_owned();
            debug!("key = {:?}, parent = {:?}", key, parent);
            self.del_tree(&parent.as_file_id(), None)?;
            self.del_revtree(key, None)?;
        }
        Ok(has_recorded_descendants)
    }

    /// Removes a file from the repository.
    pub fn remove_file(&mut self, path: &RepoPath<impl AsRef<Path>>)
                       -> Result<()> {
        debug!("remove_file");
        let inode = self.find_inode(path)?;
        debug!("rec_delete");
        self.rec_delete(inode)?;
        debug!("/rec_delete");
        Ok(())
    }
}

impl<A: Transaction, R> backend::GenericTxn<A, R> {
    /// Traverses the `tree` base recursively, collecting all descendants of `key`.
    fn collect(&self,
               key: Inode,
               pb: &Path,
               basename: &str,
               files: &mut Vec<PathBuf>)
               -> Result<()> {
    fn collect(
        &self,
        key: Inode,
        current_path: &RepoPath<impl AsRef<Path>>,
        basename: &str,
        files: &mut Vec<RepoPath<PathBuf>>,
    ) -> Result<()> {
        debug!("collecting {:?},{:?}", key, basename);
        let add = match self.get_inodes(key) {
            Some(inode) => {
                debug!("node = {:?}", inode);
                inode.status != FileStatus::Deleted
            }
            None => true,
        };
        if add {
            debug!("basename = {:?}", basename);
            
            let next_pb = current_path.join(Path::new(basename));
            if basename.len() > 0 {
                files.push(next_pb.clone())
            }

            debug!("starting iterator, key={:?}", key);
            let fileid = OwnedFileId {
                parent_inode: key.clone(),
                basename: SmallString::from_str(""),
            };
            for (k, v) in self
                .iter_tree(Some((&fileid.as_file_id(), None)))
                .take_while(|&(ref k, _)| k.parent_inode == key)
            {
                debug!("iter: {:?} {:?}", k, v);
                if k.basename.len() > 0 {
                    self.collect(v.to_owned(), &next_pb, k.basename.as_str(), files)?;
                }
            debug!("ending iterator {:?}", { let v: Vec<_> = self.iter_tree(Some((&fileid.as_file_id(), None))).collect(); v });
            }
            debug!("ending iterator {:?}", {
                let v: Vec<_> = self.iter_tree(Some((&fileid.as_file_id(), None))).collect();
                v
            });
        }
        Ok(())
    }

    /// Returns a vector containing all files in the repository.
    pub fn list_files(&self, inode: Inode) -> Result<Vec<RepoPath<PathBuf>>> {
        debug!("list_files {:?}", inode);
        let mut files = Vec::new();
        self.collect(inode, &in_repo_root(), "", &mut files)?;
        Ok(files)
    }

    /// Returns a list of files under the given inode.
    pub fn list_files_under_inode(&self,
                                  inode: Inode)
                                  -> Vec<(SmallString, Option<Key<PatchId>>, Inode)> {

    pub fn list_files_under_inode(
        &self,
        inode: Inode,
    ) -> Vec<(SmallString, Option<Key<PatchId>>, Inode)> {
        let mut result = Vec::new();

        let file_id = OwnedFileId {
            parent_inode: inode,
            basename: SmallString::from_str(""),
        };
        for (k, v) in self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| k.parent_inode == inode)
        {
            let header = self.get_inodes(k.parent_inode).map(|x| x.clone());
                // add: checking that this file has neither been moved nor deleted.
                println!("============= {:?} {:?}", k, v);
            // add: checking that this file has neither been moved nor deleted.
            println!("============= {:?} {:?}", k, v);
            let add = match header {
                Some(ref h) => h.status == FileStatus::Ok,
                None => true,
            };
            if add && k.basename.len() > 0 {
                result.push((k.basename.to_owned(), header.map(|h| h.key.clone()), v.clone()))
                result.push((
                    k.basename.to_owned(),
                    header.map(|h| h.key.clone()),
                    v.clone(),
                ))
            }
        }

        result
    }

    /// Returns a list of files under the given inode.
    pub fn list_files_under_node(&self,
                                 branch: &Branch,
                                 key: Key<PatchId>)
                                 -> BTreeMap<Key<PatchId>, Vec<(FileMetadata, &str)>> {

    pub fn list_files_under_node(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> BTreeMap<Key<PatchId>, Vec<(FileMetadata, &str)>> {
        let mut result = BTreeMap::new();

        let e = Edge::zero(EdgeFlags::FOLDER_EDGE);
        for (_, child) in self.iter_nodes(branch, Some((key, Some(&e))))
            .take_while(|&(k, ref v)| k == key && v.flag <= EdgeFlags::FOLDER_EDGE|EdgeFlags::PSEUDO_EDGE) {

                let name = self.get_contents(child.dest).unwrap();
                // This is supposed to be a small string anyway.
                let (perms, basename) = name.as_slice().split_at(2);
                let perms = FileMetadata::from_contents(perms);
                let basename = std::str::from_utf8(basename).unwrap();

                for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(&e))))
                    .take_while(|&(k, ref v)| k == child.dest && v.flag <= EdgeFlags::FOLDER_EDGE|EdgeFlags::PSEUDO_EDGE) {


                        let names = result.entry(grandchild.dest.to_owned()).or_insert(vec![]);
                        names.push((perms, basename))
        for (_, child) in self
            .iter_nodes(branch, Some((key, Some(e))))
            .take_while(|&(k, ref v)| {
                k == key && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
            })
        {
            let name = self.get_contents(child.dest).unwrap();
            // This is supposed to be a small string anyway.
            let (perms, basename) = name.as_slice().split_at(2);
            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();

                    }
            for (_, grandchild) in self
                .iter_nodes(branch, Some((child.dest, Some(e))))
                .take_while(|&(k, ref v)| {
                    k == child.dest && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
                })
            {
                let names = result.entry(grandchild.dest.to_owned()).or_insert(vec![]);
                names.push((perms, basename))
            }
        }
        result
    }



    pub fn is_directory(&self, inode: &Inode) -> bool {
        let file_id = OwnedFileId {
            parent_inode: inode.clone(),
            basename: SmallString::from_str(""),
        };
        inode == &ROOT_INODE || self.get_tree(&file_id.as_file_id()).is_some()
    }

    /// Splits a path into (1) the deepest inode from the root that is
    /// an ancestor of the path or the path itself and (2) the
    /// remainder of this path
    fn closest_in_repo_ancestor<'a>
        (&self,
         path: &'a std::path::Path)
         -> Result<(Inode, std::iter::Peekable<std::path::Components<'a>>)> {

    fn closest_in_repo_ancestor<'a>(
        &self,
        path: &'a RepoPath<impl AsRef<Path>>
    ) -> Result<(Inode, std::iter::Peekable<std::path::Components<'a>>)> {
        let mut components = path.components().peekable();
        let mut fileid = OwnedFileId {
            parent_inode: ROOT_INODE,
            basename: SmallString::from_str(""),
        };

        loop {
            if let Some(c) = components.peek() {

                fileid.basename = SmallString::from_str(c.as_os_str().to_str().unwrap());
                if let Some(v) = self.get_tree(&fileid.as_file_id()) {
                    fileid.parent_inode = v.clone()
                } else {
                    break;
                }
            } else {
                break;
            }
            components.next();
        }
        Ok((fileid.parent_inode.clone(), components))
    }

    /// Find the inode corresponding to that path, or return an error if there's no such inode.
    pub fn find_inode(&self, path: &RepoPath<impl AsRef<Path>>)
                      -> Result<Inode> {
        let (inode, mut remaining_path_components) = self.closest_in_repo_ancestor(path)?;
        if remaining_path_components.next().is_none() {
            Ok(inode)
        } else {
            Err(Error::FileNotInRepo(path.to_path_buf()))
        }
    }


    pub fn file_names(&self, branch: &Branch, key: Key<PatchId>) -> Vec<(Key<PatchId>, FileMetadata, &str)> {

    pub fn file_names(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, FileMetadata, &str)> {
        let mut result = Vec::new();
        let e = Edge::zero(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE);

        debug!("file_names, key {:?}", key);
        for (_, parent) in self
            .iter_nodes(branch, Some((key, Some(e))))
            .take_while(|&(k, _)| k == key)
            .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)) {
                debug!("file_names, parent {:?}", parent);
                match self.get_contents(parent.dest) {
                    Some(ref name) if name.len() >= 2 => {
                        // This is supposed to be a small string anyway.
                        let (perms, basename) = name.as_slice().split_at(2);
                        let perms = FileMetadata::from_contents(perms);
                        let basename = std::str::from_utf8(basename).unwrap();

                        for (_, grandparent) in self.iter_nodes(branch, Some((parent.dest, Some(&e))))
                            .take_while(|&(k, _)| k == parent.dest)
                            .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)) {

                                result.push((grandparent.dest.to_owned(), perms, basename));
                                break
            .filter(|&(_, ref v)| {
                v.flag
                    .contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
            })
        {
            debug!("file_names, parent {:?}", parent);
            match self.get_contents(parent.dest) {
                Some(ref name) if name.len() >= 2 => {
                    // This is supposed to be a small string anyway.
                    let (perms, basename) = name.as_slice().split_at(2);
                    let perms = FileMetadata::from_contents(perms);
                    let basename = std::str::from_utf8(basename).unwrap();

                            }
                    },
                    _ => error!("Key: {:?}, file {}, line {}",
                                key,
                                file!(), line!())
                    for (_, grandparent) in self
                        .iter_nodes(branch, Some((parent.dest, Some(e))))
                        .take_while(|&(k, _)| k == parent.dest)
                        .filter(|&(_, ref v)| {
                            v.flag
                                .contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
                        })
                    {
                        result.push((grandparent.dest.to_owned(), perms, basename));
                        break;
                    }
                }
                _ => error!("Key: {:?}, file {}, line {}", key, file!(), line!()),
            }
        }
        result
    }

    pub fn prefix_keys(&self, branch: &Branch, path: &RepoPath<impl AsRef<Path>>)
                       -> Result<Vec<Key<PatchId>>> {
        let mut result = Vec::new();
        let e = Edge::zero(EdgeFlags::FOLDER_EDGE);

        let mut current_key = ROOT_KEY;

        for comp in path.components() {
            let mut is_first = true;
            let cur = current_key;
            for (_, child) in self
                .iter_nodes(branch, Some((current_key, Some(e))))
                .take_while(|&(k, _)| k == cur)
                .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE)) {

                    let contents = self.get_contents(child.dest).unwrap();
                    if contents.into_cow().split_at(2).1 == comp.as_bytes() {

                        if !is_first {
                            return Err(ErrorKind::FileNameCount(current_key).into())
                        }

                        for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(&e))))
                            .take_while(|&(k, _)| k == child.dest)
                            .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE)) {
                .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE))
            {
                let contents = self.get_contents(child.dest).unwrap();
                if contents.into_cow().split_at(2).1
                    == comp.as_os_str().to_str().expect("file encoding problem").as_bytes() {
                    if !is_first {
                        return Err(Error::FileNameCount(current_key));
                    }

                                result.push(grandchild.dest);
                                current_key = grandchild.dest;
                            }
                    for (_, grandchild) in self
                        .iter_nodes(branch, Some((child.dest, Some(e))))
                        .take_while(|&(k, _)| k == child.dest)
                        .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE))
                    {
                        result.push(grandchild.dest);
                        current_key = grandchild.dest;
                    }
                }
            }
        }












1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

16
17
18
19


20
21
22

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304

305
306
307
308
309
310
311
312
313
314

use ignore::WalkBuilder;
use rand::Rng;
use Result;
    return p.as_ref().join(PIJUL_DIR_NAME).join("patches");
pub fn create<R: Rng>(dir: &Path, mut rng: R) -> std::io::Result<()> {
pub fn untracked_files<T: rand::Rng>(txn: &MutTxn<T>, repo_root: &Path) -> HashSet<PathBuf> {
    let known_files = txn.list_files(ROOT_INODE).unwrap_or_else(|_| vec![]);
        .add("!.pijul")
        .unwrap()
        .build()
        .unwrap(); // we can be pretty confident these two calls will
                   // not fail as the glob is hard-coded
//! Layout of a repository (files in `.pijul`) on the disk. This
//! module exports both high-level functions that require no knowledge
//! of the repository, and lower-level constants documented on
//! [pijul.org/documentation/repository](https://pijul.org/documentation/repository),
//! used for instance for downloading files from remote repositories.

use super::Repository;
use backend::DEFAULT_BRANCH;
use backend::{Hash, HashRef};
use backend::{MutTxn, ROOT_INODE};
use bs58;
use flate2;
use ignore::overrides::OverrideBuilder;
use ignore::WalkBuilder;
use patch::{Patch, PatchHeader};
use rand::Rng;
use rand::distributions::Alphanumeric;
use rand::Rng;
use std;
use std::fs::canonicalize;
use std::fs::{metadata, create_dir_all, File};
use std::io::{Write, BufReader, Read};
use std::fs::{create_dir_all, metadata, File};
use std::io::{BufReader, Read, Write};
use std::path::{Path, PathBuf};
use std;
use {Result, Error};
use std::ffi::OsStr;

/// Given a Path-like type P, RepoPath<P> is a 'P' relative to some fs_representation::RepoRoot
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)]
pub struct RepoPath<P: ?Sized>(pub P);

pub fn in_repo_root() -> RepoPath<&'static Path> {
    RepoPath(Path::new(""))
}

impl RepoPath<std::path::PathBuf> {
    pub fn push(&mut self, x: &str) {
        self.0.push(x)
    }

    pub fn pop(&mut self) -> bool {
        self.0.pop()
    }

    pub fn new() -> Self {
        RepoPath(PathBuf::new())
    }
    
    pub fn as_ref(&self) -> RepoPath<&Path> {
        RepoPath(self.0.as_ref())
    }

    pub fn set_file_name(&mut self, filename: &OsStr) {
        self.0.set_file_name(filename)
    }

    pub fn from_string(path: String) -> Self {
        RepoPath(PathBuf::from(path))
    }
}

    
impl<P: AsRef<Path>> RepoPath<P> {
    pub fn as_path(&self) -> &std::path::Path {
        self.0.as_ref()
    }
}

impl<P: AsRef<Path>> RepoPath<P> {    
    pub fn parent(&self) -> Option<RepoPath<&std::path::Path>> {
        self.as_path().parent().map(RepoPath)
    }

    pub fn file_name(&self) -> Option<&OsStr> {
        self.as_path().file_name()
    }

    pub fn split(&self) -> Option<(RepoPath<&std::path::Path>, &OsStr)> {
        self.parent().map(|p| {(p, self.file_name().expect("file_name and parent should be consistent"))})
    }

    pub fn components(&self) -> std::path::Components {
        self.as_path().components()
    }

    pub fn to_path_buf(&self) -> PathBuf {
        self.as_path().to_path_buf()
    }

    pub fn display(&self) -> std::path::Display {
        self.as_path().display()
    }

    
    pub fn to_owned(&self) -> RepoPath<PathBuf> {
        RepoPath(self.0.as_ref().to_path_buf())
    }    
    
    pub fn join(&self, path: &Path) -> RepoPath<PathBuf> {
        // TODO: check that the joined path is indeed inside the repo
        let joined_path = self.as_path().join(path);
        RepoPath(joined_path)
    }
}

impl<P: AsRef<Path>> RepoPath<P> {
    pub fn empty(&self) -> bool {
        self.as_path() == Path::new("")
    }
}

/// A directory at the root of a pijul repository.
#[derive(Clone, Copy, Debug)]
pub struct RepoRoot<P: AsRef<Path>> {
    pub repo_root: P,
}

/// Name of the root directory, i.e. `.pijul`.
pub const PIJUL_DIR_NAME: &'static str = ".pijul";

/// Basename of the changes file for branch `br`. This file is only
/// used when pulling/pushing over HTTP (where calling remote programs
/// to list patches is impossible).
///
/// The changes file contains the same information as the one returned by `pijul log --hash-only`.
pub fn branch_changes_base_path(b: &str) -> String {
    "changes.".to_string() + &bs58::encode(b.as_bytes()).into_string()
}

/// Basename of the patch corresponding to the given patch hash.
pub fn patch_file_name(hash: HashRef) -> String {
    hash.to_base58() + ".gz"
}

impl<P: AsRef<Path>> RepoRoot<P> {
    /// The subdirectory of `self` with pijul's metadata
    pub fn repo_dir(&self) -> PathBuf {
        self.repo_root.as_ref().join(PIJUL_DIR_NAME)
    }

    /// Directory where the pristine of `self` is.
    /// For instance, if the repository in in `/a/b`,
    /// `self.pristine_dir() is `/a/b/.pijul/pristine`.
    pub fn pristine_dir(&self) -> PathBuf {
        self.repo_dir().join("pristine")
    }

    /// Directory where the patches are. `patches_dir("/a/b") = "/a/b/.pijul/patches"`.
    pub fn patches_dir(&self) -> PathBuf {
        self.repo_dir().join("patches")
    }

    /// The location of the changes file for theb branch `b`.
    ///
    /// The changes file contains the same information as the one returned by `pijul log --hash-only`.
    pub fn branch_changes_file(&self, b: &str) -> PathBuf {
        self.repo_dir().join(branch_changes_base_path(b))
    }

    /// The meta file, where user preferences are stored.
    pub fn meta_file(&self) -> PathBuf {
        self.repo_dir().join("meta.toml")
    }

    /// The id file is used for remote operations, to identify a
    /// repository and save bandwidth when the remote state is partially
    /// known.
    pub fn id_file(&self) -> PathBuf {
        self.repo_dir().join("id")
    }

    /// Read a complete patch.
    pub fn read_patch(&self, hash: HashRef) -> Result<Patch> {
        let patch_dir = self.patches_dir();
        let path = patch_dir.join(&patch_file_name(hash));
        let f = File::open(path)?;
        let mut f = BufReader::new(f);
        let (_, _, patch) = Patch::from_reader_compressed(&mut f)?;
        Ok(patch)
    }

    /// Read a patch, but without the "changes" part, i.e. the actual
    /// contents of the patch.
    pub fn read_patch_nochanges(&self, hash: HashRef) -> Result<PatchHeader> {
        let patch_dir = self.patches_dir();
        let path = patch_dir.join(&patch_file_name(hash));
        let f = File::open(path)?;
        let mut f = flate2::bufread::GzDecoder::new(BufReader::new(f));
        Ok(PatchHeader::from_reader_nochanges(&mut f)?)
    }

    /// Read a patch, but without the "changes" part, i.e. the actual
    /// contents of the patch.
    pub fn read_dependencies(&self, hash: HashRef) -> Result<Vec<Hash>> {
        let patch_dir = self.patches_dir();
        let path = patch_dir.join(&patch_file_name(hash));
        let f = File::open(path)?;
        let mut f = flate2::bufread::GzDecoder::new(BufReader::new(f));
        Ok(Patch::read_dependencies(&mut f)?)
    }

    /// The ignore file that is _not_ tracked by pijul.
    pub fn local_ignore_file(&self) -> PathBuf {
        self.repo_dir().join("local").join("ignore")
    }

    pub fn get_current_branch(&self) -> Result<String> {
        let mut path = self.repo_dir();
        path.push("current_branch");
        if let Ok(mut f) = File::open(&path) {
            let mut s = String::new();
            f.read_to_string(&mut s)?;
            Ok(s.trim().to_string())
        } else {
            Ok(DEFAULT_BRANCH.to_string())
        }
    }

    pub fn set_current_branch(&self, branch: &str) -> Result<()> {
        let mut path = self.repo_dir();
        path.push("current_branch");
        let mut f = File::create(&path)?;
        f.write_all(branch.trim().as_ref())?;
        f.write_all(b"\n")?;
        Ok(())
    }

    pub fn open_repo(&self, increase: Option<u64>) -> Result<Repository> {
        Repository::open(self.pristine_dir(), increase)
    }

    
    pub fn relativize<'a>(&self, path: &'a Path) -> Result<RepoPath<&'a Path>> {
    	match path.strip_prefix(&self.repo_root) {
            Ok(p) => Ok(RepoPath(p)),
            Err(_) => Err(Error::FileNotInRepo(path.to_path_buf()))
        }
    }

    pub fn absolutize<'a>(&self, path: &RepoPath<impl AsRef<Path>>) -> PathBuf {
        self.repo_root.as_ref().join(path.as_path())
    }
}

impl<P: AsRef<Path> + 'static> RepoRoot<P> {
    pub fn untracked_files<T: rand::Rng, Q: AsRef<Path>>(
        &self,
        txn: &MutTxn<T>,
        path: Q,
    ) -> impl Iterator<Item = RepoPath<PathBuf>> + '_ {
        let known_files = txn.list_files(ROOT_INODE).unwrap_or_else(|_| vec![]);

        let o = OverrideBuilder::new(self.repo_root.as_ref())
            .add("!.pijul")
            .unwrap()
            .build()
            .unwrap(); // we can be pretty confident these two calls will
                       // not fail as the glob is hard-coded

        let mut w = WalkBuilder::new(path.as_ref());
        w.git_ignore(false)
            .git_exclude(false)
            .git_global(false)
            .hidden(false)
            .add_custom_ignore_filename(".pijulignore");

        // add .pijul/local/ignore
        w.add_ignore(self.local_ignore_file());
        w.overrides(o);

        w.build().filter_map(move |f| {
            if let Ok(f) = f {
                let p = f.path();
                if p == self.repo_root.as_ref() {
                    return None;
                }

                let p_in_repo = self.relativize(&p).unwrap();
                // ^- cannot fail since p must be within repo_root.
                if known_files.iter().any(|t| t.as_ref() == p_in_repo) {
                    return None
                }
                Some(p_in_repo.to_owned())
            } else {
                None
            }
        })
    }
}

/// Find the repository root from one of its descendant
/// directories. Return `None` iff `dir` is not in a repository.
pub fn find_repo_root<'a>(dir: &'a Path) -> Option<RepoRoot<PathBuf>> {
    let mut p = dir.to_path_buf();
    loop {
        p.push(PIJUL_DIR_NAME);
        match metadata(&p) {
            Ok(ref attr) if attr.is_dir() => {
                p.pop();
                return Some(RepoRoot { repo_root: p });
            }
            _ => {}
        }
        p.pop();

        if !p.pop() {
            return None
            return None;
        }
    }
}

#[doc(hidden)]
pub const ID_LENGTH: usize = 100;

/// Create a repository. `dir` must be the repository root (a
/// `".pijul"` directory will be created in `dir`).
pub fn create<R:Rng>(dir: &Path, mut rng: R) -> std::io::Result<()> {






1
2
3
4

5

6
7
8
9

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627

628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651

652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678

use Result;
                                debug!("forward: {:?} {:?}", self[*cousin].key, edge);
                                forward.push((self[*cousin].key, edge))
            return Ok(false);
                    return true;
                    return true;
//! The data structure of the in-memory version of Pijul's main
//! datastructure, used to edit and organise it (for instance before a
//! record or before outputting a file).
use backend::*;
use Result;
use conflict;
use std::collections::{HashMap, HashSet};
use std::cmp::min;
use std::collections::{HashMap, HashSet};
use Result;

use std;
use rand;
use std;

mod dfs;
use self::dfs::{DFS, Path, PathElement};

bitflags! {
    struct Flags: u8 {
        const LINE_HALF_DELETED = 4;
        const LINE_VISITED = 2;
        const LINE_ONSTACK = 1;
    }
}

/// The elementary datum in the representation of the repository state
/// at any given point in time. We need this structure (as opposed to
/// working directly on a branch) in order to add more data, such as
/// strongly connected component identifier, to each node.
#[derive(Debug)]
pub struct Line {
    /// The internal identifier of the line.
    pub key: Key<PatchId>,

    // The status of the line with respect to a dfs of
    // a graph it appears in. This is 0 or
    // `LINE_HALF_DELETED`.
    flags: Flags,
    children: usize,
    n_children: usize,
    index: usize,
    lowlink: usize,
    scc: usize,
}

impl Line {
    pub fn is_zombie(&self) -> bool {
        self.flags.contains(Flags::LINE_HALF_DELETED)
    }
}

/// A graph, representing the whole content of the repository state at
/// a point in time. The encoding is a "flat adjacency list", where
/// each vertex contains a index `children` and a number of children
/// `n_children`. The children of that vertex are then
/// `&g.children[children .. children + n_children]`.
#[derive(Debug)]
pub struct Graph {
    /// Array of all alive lines in the graph. Line 0 is a dummy line
    /// at the end, so that all nodes have a common successor
    pub lines: Vec<Line>,
    /// Edge + index of the line in the "lines" array above. "None"
    /// means "dummy line at the end", and corresponds to line number
    /// 0.
    children: Vec<(Option<Edge>, VertexId)>,
}

#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
struct VertexId(usize);

const DUMMY_VERTEX: VertexId = VertexId(0);

impl std::ops::Index<VertexId> for Graph {
    type Output = Line;
    fn index(&self, idx: VertexId) -> &Self::Output {
        self.lines.index(idx.0)
    }
}
impl std::ops::IndexMut<VertexId> for Graph {
    fn index_mut(&mut self, idx: VertexId) -> &mut Self::Output {
        self.lines.index_mut(idx.0)
    }
}

use std::io::Write;

impl Graph {
    fn children(&self, i: VertexId) -> &[(Option<Edge>, VertexId)] {
        let ref line = self[i];
        &self.children[line.children..line.children + line.n_children]
    }

    fn child(&self, i: VertexId, j: usize) -> &(Option<Edge>, VertexId) {
        &self.children[self[i].children + j]
    }

    pub fn debug<W: Write, R, A: Transaction>(
        &self,
        txn: &GenericTxn<A, R>,
        branch: &Branch,
        add_others: bool,
        introduced_by: bool,
        mut w: W,
    ) -> std::io::Result<()> {
        writeln!(w, "digraph {{")?;
        let mut cache = HashMap::new();
        if add_others {
            for (line, i) in self.lines.iter().zip(0..) {
                cache.insert(line.key, i);
            }
        }
        let mut others = HashSet::new();
        for (line, i) in self.lines.iter().zip(0..) {
            let contents = {
                if let Some(c) = txn.get_contents(line.key) {
                    let c = c.into_cow();
                    if let Ok(c) = std::str::from_utf8(&c) {
                        c.split_at(std::cmp::min(50, c.len())).0.to_string()
                    } else {
                        "<INVALID>".to_string()
                    }
                } else {
                    "".to_string()
                }
            };
            let contents = format!("{:?}", contents);
            let contents = contents.split_at(contents.len() - 1).0.split_at(1).1;
            writeln!(
                w,
                "n_{}[label=\"{}: {}.{}: {}\"];",
                i,
                i,
                line.key.patch.to_base58(),
                line.key.line.to_hex(),
                contents
            )?;

            if add_others && !line.key.is_root() {
                for v in txn.iter_adjacent(branch, line.key, EdgeFlags::empty(), EdgeFlags::all()) {
                    if let Some(dest) = cache.get(&v.dest) {
                        writeln!(
                            w,
                            "n_{} -> n_{}[color=red,label=\"{:?}{}{}\"];",
                            i,
                            dest,
                            v.flag.bits(),
                            if introduced_by { " " } else { "" },
                            if introduced_by {
                                v.introduced_by.to_base58()
                            } else {
                                String::new()
                            }
                        )?;
                    } else {
                        if !others.contains(&v.dest) {
                            others.insert(v.dest);
                            writeln!(
                                w,
                                "n_{}[label=\"{}.{}\",color=red];",
                                v.dest.to_base58(),
                                v.dest.patch.to_base58(),
                                v.dest.line.to_hex()
                            )?;
                        }
                        writeln!(
                            w,
                            "n_{} -> n_{}[color=red,label=\"{:?}{}{}\"];",
                            i,
                            v.dest.to_base58(),
                            v.flag.bits(),
                            if introduced_by { " " } else { "" },
                            if introduced_by {
                                v.introduced_by.to_base58()
                            } else {
                                String::new()
                            }
                        )?;
                    }
                }
            }
            for &(ref edge, VertexId(j)) in
                &self.children[line.children..line.children + line.n_children]
            {
                if let Some(ref edge) = *edge {
                    writeln!(
                        w,
                        "n_{}->n_{}[label=\"{:?}{}{}\"];",
                        i,
                        j,
                        edge.flag.bits(),
                        if introduced_by { " " } else { "" },
                        if introduced_by {
                            edge.introduced_by.to_base58()
                        } else {
                            String::new()
                        }
                    )?
                } else {
                    writeln!(w, "n_{}->n_{}[label=\"none\"];", i, j)?
                }
            }
        }
        writeln!(w, "}}")?;
        Ok(())
    }
}

use sanakirja::value::Value;
/// A "line outputter" trait.
pub trait LineBuffer<'a, T: 'a + Transaction> {
    fn output_line(&mut self, key: &Key<PatchId>, contents: Value<'a, T>) -> Result<()>;

    fn output_conflict_marker(&mut self, s: &'a str) -> Result<()>;
    fn begin_conflict(&mut self) -> Result<()> {
        self.output_conflict_marker(conflict::START_MARKER)
    }
    fn begin_zombie_conflict(&mut self) -> Result<()> {
        self.begin_conflict()
    }
    fn conflict_next(&mut self) -> Result<()> {
        self.output_conflict_marker(conflict::SEPARATOR)
    }
    fn end_conflict(&mut self) -> Result<()> {
        self.output_conflict_marker(conflict::END_MARKER)
    }
}

pub struct Writer<W: std::io::Write> {
    pub w: W,
    new_line: bool,
}

impl<W: std::io::Write> Writer<W> {
    pub fn new(w: W) -> Self {
        Writer { w, new_line: true }
    }
}

impl<W: std::io::Write> std::ops::Deref for Writer<W> {
    type Target = W;
    fn deref(&self) -> &Self::Target {
        &self.w
    }
}

impl<W: std::io::Write> std::ops::DerefMut for Writer<W> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.w
    }
}

impl<'a, T: 'a + Transaction, W: std::io::Write> LineBuffer<'a, T> for Writer<W> {
    fn output_line(&mut self, k: &Key<PatchId>, c: Value<T>) -> Result<()> {
        let mut ends_with_newline = false;
        let mut is_empty = true;
        for chunk in c {
            debug!("output line {:?} {:?}", k, std::str::from_utf8(chunk));
            is_empty = is_empty && chunk.is_empty();
            ends_with_newline = chunk.ends_with(b"\n");
            self.w.write_all(chunk)?
        }
        if !is_empty {
            // empty "lines" (such as in the beginning of a file)
            // don't change the status of self.new_line.
            self.new_line = ends_with_newline;
        }
        Ok(())
    }

    fn output_conflict_marker(&mut self, s: &'a str) -> Result<()> {
        debug!("output_conflict_marker {:?}", self.new_line);
        if !self.new_line {
            self.w.write(s.as_bytes())?;
        } else {
            debug!("{:?}", &s.as_bytes()[1..]);
            self.w.write(&s.as_bytes()[1..])?;
        }
        Ok(())
    }
}

impl Graph {
    /// Tarjan's strongly connected component algorithm, returning a
    /// vector of strongly connected components, where each SCC is a
    /// vector of vertex indices.
    fn tarjan(&mut self) -> Vec<Vec<VertexId>> {
        if self.lines.len() <= 1 {
            return vec![vec![VertexId(0)]];
        }

        let mut call_stack = vec![(VertexId(1), 0, true)];

        let mut index = 0;
        let mut stack = Vec::new();
        let mut scc = Vec::new();
        while let Some((n_l, i, first_visit)) = call_stack.pop() {
            if first_visit {
                // First time we visit this node.
                let ref mut l = self[n_l];
                l.index = index;
                l.lowlink = index;
                l.flags = l.flags | Flags::LINE_ONSTACK | Flags::LINE_VISITED;
                debug!("tarjan {:?} {:?} chi", l.key, l.n_children);
                stack.push(n_l);
                index = index + 1;
            } else {
                let &(_, n_child) = self.child(n_l, i);
                self[n_l].lowlink = std::cmp::min(self[n_l].lowlink, self[n_child].lowlink);
            }

            let call_stack_length = call_stack.len();
            for j in i..self[n_l].n_children {
                let &(_, n_child) = self.child(n_l, j);
                if !self[n_child].flags.contains(Flags::LINE_VISITED) {
                    call_stack.push((n_l, j, false));
                    call_stack.push((n_child, 0, true));
                    break;
                // self.tarjan_dfs(scc, stack, index, n_child);
                } else {
                    if self[n_child].flags.contains(Flags::LINE_ONSTACK) {
                        self[n_l].lowlink = min(self[n_l].lowlink, self[n_child].index)
                    }
                }
            }
            if call_stack_length < call_stack.len() {
                // recursive call
                continue;
            }
            // Here, all children of n_l have been visited.

            if self[n_l].index == self[n_l].lowlink {
                let mut v = Vec::new();
                loop {
                    match stack.pop() {
                        None => break,
                        Some(n_p) => {
                            self[n_p].scc = scc.len();
                            self[n_p].flags = self[n_p].flags ^ Flags::LINE_ONSTACK;
                            v.push(n_p);
                            if n_p == n_l {
                                break;
                            }
                        }
                    }
                }
                scc.push(v);
            }
        }
        scc
    }
}

impl<A: Transaction, R> GenericTxn<A, R> {
    /// This function constructs a graph by reading the branch from the
    /// input key. It guarantees that all nodes but the first one (index
    /// 0) have a common descendant, which is index 0.
    pub fn retrieve<'a>(&'a self, branch: &Branch, key0: Key<PatchId>) -> Graph {
        let mut graph = Graph {
            lines: Vec::new(),
            children: Vec::new(),
        };
        // Insert last "dummy" line (so that all lines have a common descendant).
        graph.lines.push(Line {
            key: ROOT_KEY,
            flags: Flags::empty(),
            children: 0,
            n_children: 0,
            index: 0,
            lowlink: 0,
            scc: 0,
        });

        // Avoid the root key.
        let mut cache: HashMap<Key<PatchId>, VertexId> = HashMap::new();
        cache.insert(ROOT_KEY.clone(), DUMMY_VERTEX);
        let mut stack = Vec::new();
        if self.get_nodes(&branch, key0, None).is_some() {
            stack.push(key0)
        }
        while let Some(key) = stack.pop() {
            if cache.contains_key(&key) {
                // We're doing a DFS here, this can definitely happen.
                continue;
            }

            let idx = VertexId(graph.lines.len());
            cache.insert(key.clone(), idx);

            debug!("{:?}", key);
            let mut is_zombie = false;
            // Does this vertex have a DELETED/DELETED+FOLDER edge
            // pointing to it?
            let mut first_edge = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE);
            let mut nodes = self.iter_nodes(&branch, Some((key, Some(first_edge))));
            if let Some((k, v)) = nodes.next() {
                debug!("zombie? {:?} {:?}", k, v);
                if k == key
                    && (v.flag | EdgeFlags::FOLDER_EDGE == first_edge.flag | EdgeFlags::FOLDER_EDGE)
                {
                    // Does this vertex also have an alive edge
                    // pointing to it? (might not be the case for the
                    // first vertex)
                    if key == key0 {
                        first_edge.flag = EdgeFlags::PARENT_EDGE;
                        nodes = self.iter_nodes(&branch, Some((key, Some(first_edge))));
                        if let Some((_, v)) = nodes.next() {
                            // We know the key is `key`.
                            is_zombie = v.flag | EdgeFlags::FOLDER_EDGE
                                == first_edge.flag | EdgeFlags::FOLDER_EDGE
                        }
                    } else {
                        is_zombie = true
                    }
                }
            }
            debug!("is_zombie: {:?}", is_zombie);
            let mut l = Line {
                key: key.clone(),
                flags: if is_zombie {
                    Flags::LINE_HALF_DELETED
                } else {
                    Flags::empty()
                },
                children: graph.children.len(),
                n_children: 0,
                index: 0,
                lowlink: 0,
                scc: 0,
            };

            let mut last_flag = EdgeFlags::empty();
            let mut last_dest = ROOT_KEY;

            for (_, v) in self
                .iter_nodes(&branch, Some((key, None)))
                .take_while(|&(k, v)| {
                    k == key
                        && v.flag
                            <= EdgeFlags::PSEUDO_EDGE
                                | EdgeFlags::FOLDER_EDGE
                                | EdgeFlags::EPSILON_EDGE
                })
            {
                debug!("-> v = {:?}", v);
                if last_flag == EdgeFlags::PSEUDO_EDGE && last_dest == v.dest {
                    // This is a doubled edge, it should be removed.
                    /*} else if v.flag.contains(EdgeFlags::EPSILON_EDGE) {
                    // Epsilon lines are skipped.
                    for (_, v_) in self
                        .iter_nodes(&branch, Some((v.dest, None)))
                        .take_while(|&(k_, v_)| {
                            k_ == v.dest
                                && v_.flag
                                <= EdgeFlags::PSEUDO_EDGE
                                | EdgeFlags::FOLDER_EDGE
                                | EdgeFlags::EPSILON_EDGE
                        }) {
                            graph.children.push((Some(v_.clone()), DUMMY_VERTEX));
                            l.n_children += 1;
                            if !cache.contains_key(&v_.dest) {
                                stack.push(v_.dest.clone())
                            } else {
                                debug!("v already visited");
                            }
                            last_flag = v_.flag;
                            last_dest = v_.dest;
                        }
                     */
                } else {
                    graph.children.push((Some(v.clone()), DUMMY_VERTEX));
                    l.n_children += 1;
                    if !cache.contains_key(&v.dest) {
                        stack.push(v.dest.clone())
                    } else {
                        debug!("v already visited");
                    }
                    last_flag = v.flag;
                    last_dest = v.dest;
                }
            }
            // If this key has no children, give it the dummy child.
            if l.n_children == 0 {
                debug!("no children for {:?}", l);
                graph.children.push((None, DUMMY_VERTEX));
                l.n_children = 1;
            }
            graph.lines.push(l)
        }
        for &mut (ref child_key, ref mut child_idx) in graph.children.iter_mut() {
            if let Some(ref child_key) = *child_key {
                if let Some(idx) = cache.get(&child_key.dest) {
                    *child_idx = *idx
                }
            }
        }
        graph
    }
}

/// The conflict markers keep track of the number of conflicts, and is
/// used for outputting conflicts to a given LineBuffer.
///
/// "Zombie" conflicts are those conflicts introduced by zombie
/// vertices in the contained Graph.
struct ConflictMarkers {
    current_conflicts: usize,
}

impl ConflictMarkers {
    /*
    fn output_zombie_markers_if_needed<'a, A: Transaction + 'a, B: LineBuffer<'a, A>>(
        &mut self,
        buf: &mut B,
        vertex: VertexId,
    ) -> Result<()> {
        if self.graph[vertex].is_zombie() {
            if !self.current_is_zombie {
                debug!("begin zombie conflict: vertex = {:?}", self.graph[vertex]);
                self.current_is_zombie = true;
                buf.begin_zombie_conflict()?;
            }
        } else if self.current_is_zombie {
            // Zombie segment has ended
            if !self.current_is_zombie {
                debug!("end zombie conflict: vertex = {:?}", self.graph[vertex]);
            }
            self.current_is_zombie = false;
            buf.end_conflict()?;
        }
        Ok(())
    }
    */
    fn begin_conflict<'a, A: Transaction + 'a, B: LineBuffer<'a, A>>(
        &mut self,
        buf: &mut B,
    ) -> Result<()> {
        buf.begin_conflict()?;
        self.current_conflicts += 1;
        Ok(())
    }
    fn end_conflict<'a, A: Transaction + 'a, B: LineBuffer<'a, A>>(
        &mut self,
        buf: &mut B,
    ) -> Result<()> {
        if self.current_conflicts > 0 {
            buf.end_conflict()?;
            self.current_conflicts -= 1;
        }
        Ok(())
    }
}

impl PathElement {
    fn smallest_line(&self, graph: &Graph, sccs: &[Vec<VertexId>]) -> Key<PatchId> {
        match *self {
            PathElement::Scc { ref scc } => sccs[*scc].iter().map(|x| graph[*x].key).min().unwrap(),
            PathElement::Conflict { ref sides } => sides
                .iter()
                .map(|x|
                     x.path.iter().map(|x| x.smallest_line(graph, sccs))
                     .min().unwrap())
                .min()
                .unwrap(),
    }
    }
}

impl<'a, A: Transaction + 'a, R> GenericTxn<A, R> {
    fn output_conflict<B: LineBuffer<'a, A>>(
        &'a self,
        conflicts: &mut ConflictMarkers,
        buf: &mut B,
        graph: &Graph,
        sccs: &[Vec<VertexId>],
        conflict: &mut [Path],
    ) -> Result<usize> {
        let mut is_first = true;
        let n_sides = conflict.len();
        debug!(target:"libpijul::graph::output_conflict", "n_sides = {:?}", n_sides);
        let mut n_conflicts = 0;
        if n_sides > 1 {
            conflicts.begin_conflict(buf)?;
            n_conflicts += 1;
        }
        conflict.sort_by(|a, b| {
            let a = a.path.iter().map(|a| a.smallest_line(graph, sccs)).min().unwrap();
            let b = b.path.iter().map(|a| a.smallest_line(graph, sccs)).min().unwrap();
            a.cmp(&b)
        });
        for side in conflict {
            if !is_first {
                buf.conflict_next()?;
            }
            is_first = false;
            debug!(target:"libpijul::graph::output_conflict", "side = {:?}", side);
            for i in side.path.iter_mut() {
                match *i {
                    PathElement::Scc { scc } => {
                        debug!(target:"libpijul::graph::output_conflict", "output {:?}", scc);
                        self.output_scc(graph, &sccs[scc], buf)?
                    },
                    PathElement::Conflict { ref mut sides } => {
                        debug!(target:"libpijul::graph::output_conflict", "begin conflict {:?}", sides);
                        n_conflicts += self.output_conflict(conflicts, buf, graph, sccs, sides)?;
                        debug!(target:"libpijul::graph::output_conflict", "end conflict");
                    }
                }
            }
        }
        if n_sides > 1 {
            conflicts.end_conflict(buf)?;
        }
        Ok(n_conflicts)
    }

    /// Output the database contents of the file into the buffer
    /// `buf`. The return value indicates whether there are any
    /// conflicts in the file that was output. If forward edges are
    /// encountered, they are collected into `forward`.
    ///
    pub fn output_file<B: LineBuffer<'a, A>>(
        &'a self,
        branch: &Branch,
        buf: &mut B,
        graph: &mut Graph,
        forward: &mut Vec<(Key<PatchId>, Edge)>,
    ) -> Result<usize> {
        debug!("output_file");
        if graph.lines.len() <= 1 {
            return Ok(false)
            return Ok(0);
        }
        let scc = graph.tarjan(); // SCCs are given here in reverse order.
        debug!("There are {} SCC", scc.len());
        debug!("SCCs = {:?}", scc);

        let mut dfs = DFS::new(scc.len());
        let conflict_tree = graph.dfs(self, branch, &scc, &mut dfs, forward);

        debug!("dfs done");
        buf.output_line(&graph.lines[1].key, Value::from_slice(b""))?;
        // let conflict_tree = conflict_tree(graph, &scc, &mut dfs);
        debug!("conflict_tree = {:?}", conflict_tree);
        let mut conflicts = ConflictMarkers {
            current_conflicts: 0,
        };
        let n_conflicts =
            self.output_conflict(&mut conflicts, buf, graph, &scc, &mut [conflict_tree])?;
        // Close any remaining zombie part (if needed).
        // conflicts.output_zombie_markers_if_needed(buf, DUMMY_VERTEX)?;
        debug!("/output_file");
        Ok(n_conflicts)
    }


    fn output_scc<B: LineBuffer<'a, A>>(
        &'a self,
        // conflicts: &mut ConflictMarkers,
        graph: &Graph,
        scc: &[VertexId],
        buf: &mut B,
    ) -> Result<()> {
        assert_eq!(scc.len(), 1);
        // conflicts.output_zombie_markers_if_needed(buf, scc[0])?;
        if graph[scc[0]].is_zombie() {
            debug!(target:"libpijul::graph::output_conflict", "zombie {:?}", graph[scc[0]].key);
            buf.begin_zombie_conflict()?;
        }
        let key = graph[scc[0]].key;
        if let Some(cont) = self.get_contents(key) {
            debug!(target:"libpijul::graph::output_conflict", "outputting {:?}", cont);
            buf.output_line(&key, cont)?;
        }
        if graph[scc[0]].is_zombie() {
            buf.end_conflict()?;
        }
        Ok(())
    }
}

/// Removes redundant forward edges, among those listed in `forward`.
impl<'env, R: rand::Rng> MutTxn<'env, R> {





















1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54

55
56
57
58
59

60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215

216
217
218
219
220
221
222
223
224
225
226
227
228
229



230
231
232
233
234
235



238


241
242

244
245
246
247
248
249



250

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491

492
493
494
pub use sanakirja::Transaction;
use std::path::Path;
pub mod fs_representation;
pub mod apply;
mod record;
pub use backend::{
    ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus, GenericTxn, Hash,
    HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId, Repository, SmallStr, SmallString,
    Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY,
};
use fs_representation::ID_LENGTH;
pub use output::{Prefixes, ToPrefixes};
use rand::Rng;
pub use record::{InodeUpdate, RecordState};
    use std::io::BufReader;
        apply_resize_no_output(
            target,
            branch_name,
            remote, /*, partial_paths, apply_cb */
        )
//! This crate contains the core API to access Pijul repositories.
//!
//! The key object is a `Repository`, on which `Txn` (immutable
//! transactions) and `MutTxn` (mutable transactions) can be started,
//! to perform a variety of operations.
//!
//! Another important object is a `Patch`, which encodes two different pieces of information:
//!
//! - Information about deleted and inserted lines between two versions of a file.
//!
//! - Information about file moves, additions and deletions.
//!
//! The standard layout of a repository is defined in module
//! `fs_representation`, and mainly consists of a directory called
//! `.pijul` at the root of the repository, containing:
//!
//! - a directory called `pristine`, containing a Sanakirja database
//! storing most of the repository information.
//!
//! - a directory called `patches`, actually containing the patches,
//! where each patch is a gzipped compression of the bincode encoding
//! of the `patch::Patch` type.
//!
//! At the moment, users of this library, such as the Pijul
//! command-line tool, may use other files in the `.pijul` directory,
//! such as user preferences, or information about remote branches and
//! repositories.
#![recursion_limit = "128"]
#[macro_use]
extern crate bitflags;
extern crate chrono;
#[macro_use]
extern crate log;

extern crate base64;
extern crate bincode;
extern crate bs58;
extern crate byteorder;
extern crate flate2;
extern crate hex;
extern crate ignore;
extern crate openssl;
extern crate rand;
extern crate sanakirja;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate diffs;
extern crate failure;
extern crate sequoia_openpgp;
extern crate serde_json;
extern crate tempdir;
extern crate toml;

use std::path::Path;
pub use sanakirja::Transaction;
use std::collections::HashSet;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
pub use sanakirja::Transaction;

#[derive(Debug)]
pub enum Error {
    IO(std::io::Error),
    Sanakirja(sanakirja::Error),
    Bincode(bincode::Error),
    Utf8(std::str::Utf8Error),
    Serde(serde_json::Error),
    OpenSSL(openssl::error::Error),
    OpenSSLStack(openssl::error::ErrorStack),
    Base58Decode(bs58::decode::DecodeError),
    Failure(failure::Error),
    AlreadyAdded,
    FileNotInRepo(PathBuf),
    NoDb(backend::Root),
    WrongHash,
    EOF,
    WrongPatchSignature,
    BranchNameAlreadyExists(String),
    WrongFileHeader(Key<PatchId>),
    FileNameCount(Key<PatchId>),
    MissingDependency(Hash),
    PatchNotOnBranch(PatchId),
    CannotAddDotPijul,
    KeyIsEncrypted,
}

impl std::convert::From<std::io::Error> for Error {
    fn from(e: std::io::Error) -> Self {
        Error::IO(e)
    }
}

impl std::convert::From<failure::Error> for Error {
    fn from(e: failure::Error) -> Self {
        Error::Failure(e)
    }
}

impl std::convert::From<sanakirja::Error> for Error {
    fn from(e: sanakirja::Error) -> Self {
        Error::Sanakirja(e)
    }
}

impl std::convert::From<bincode::Error> for Error {
    fn from(e: bincode::Error) -> Self {
        Error::Bincode(e)
    }
}

impl std::convert::From<serde_json::Error> for Error {
    fn from(e: serde_json::Error) -> Self {
        Error::Serde(e)
    }
}

impl std::convert::From<std::str::Utf8Error> for Error {
    fn from(e: std::str::Utf8Error) -> Self {
        Error::Utf8(e)
    }
}

impl std::convert::From<openssl::error::ErrorStack> for Error {
    fn from(e: openssl::error::ErrorStack) -> Self {
        Error::OpenSSLStack(e)
    }
}

impl std::convert::From<bs58::decode::DecodeError> for Error {
    fn from(e: bs58::decode::DecodeError) -> Self {
        Error::Base58Decode(e)
    }
}

pub type Result<A> = std::result::Result<A, Error>;

impl std::fmt::Display for Error {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        match *self {
            Error::IO(ref e) => e.fmt(fmt),
            Error::Sanakirja(ref e) => e.fmt(fmt),
            Error::Bincode(ref e) => e.fmt(fmt),
            Error::Utf8(ref e) => e.fmt(fmt),
            Error::Serde(ref e) => e.fmt(fmt),
            Error::OpenSSL(ref e) => e.fmt(fmt),
            Error::OpenSSLStack(ref e) => e.fmt(fmt),
            Error::Base58Decode(ref e) => e.fmt(fmt),
            Error::Failure(ref e) => e.fmt(fmt),
            Error::AlreadyAdded => write!(fmt, "Already added"),
            Error::FileNotInRepo(ref file) => write!(fmt, "File {:?} not tracked", file),
            Error::NoDb(ref e) => write!(fmt, "Table missing: {:?}", e),
            Error::WrongHash => write!(fmt, "Wrong hash"),
            Error::EOF => write!(fmt, "EOF"),
            Error::WrongPatchSignature => write!(fmt, "Wrong patch signature"),
            Error::BranchNameAlreadyExists(ref name) => {
                write!(fmt, "Branch {:?} already exists", name)
            }
            Error::WrongFileHeader(ref h) => write!(
                fmt,
                "Wrong file header (possible branch corruption): {:?}",
                h
            ),
            Error::FileNameCount(ref f) => {
                write!(fmt, "Name {:?} doesn't have exactly one child", f)
            }
            Error::MissingDependency(ref f) => write!(fmt, "Missing dependency: {:?}", f),
            Error::PatchNotOnBranch(ref f) => {
                write!(fmt, "The patch is not on this branch {:?}", f)
            }
            Error::CannotAddDotPijul => write!(fmt, "Cannot add a file or directory name .pijul"),
            Error::KeyIsEncrypted => write!(fmt, "Key is encrypted"),
        }
    }
}

impl std::error::Error for Error {
    fn description(&self) -> &str {
        match *self {
            Error::IO(ref e) => e.description(),
            Error::Sanakirja(ref e) => e.description(),
            Error::Bincode(ref e) => e.description(),
            Error::Utf8(ref e) => e.description(),
            Error::Serde(ref e) => e.description(),
            Error::OpenSSL(ref e) => e.description(),
            Error::OpenSSLStack(ref e) => e.description(),
            Error::Base58Decode(ref e) => e.description(),
            Error::Failure(ref e) => e.name().unwrap_or("Unknown Failure"),
            Error::AlreadyAdded => "Already added",
            Error::FileNotInRepo(_) => "File not tracked",
            Error::NoDb(_) => "One of the tables is missing",
            Error::WrongHash => "Wrong hash",
            Error::EOF => "EOF",
            Error::WrongPatchSignature => "Wrong patch signature",
            Error::BranchNameAlreadyExists(_) => "Branch name already exists",
            Error::WrongFileHeader(_) => "Wrong file header (possible branch corruption)",
            Error::FileNameCount(_) => "A file name doesn't have exactly one child",
            Error::MissingDependency(_) => "Missing dependency",
            Error::PatchNotOnBranch(_) => "The patch is not on this branch",
            Error::CannotAddDotPijul => "Cannot add a file or directory name .pijul",
            Error::KeyIsEncrypted => "Key is encrypted",
        }
    }
}

impl Error {
    pub fn lacks_space(&self) -> bool {
        match *self {
            Error::Sanakirja(sanakirja::Error::NotEnoughSpace) => true,
            _ => false,
        }
    }
}

#[macro_use]
mod backend;
pub mod fs_representation;
mod file_operations;
pub mod fs_representation;

pub mod patch;
pub mod status;

pub mod apply;
mod conflict;
mod diff;
pub mod graph;
mod output;
mod record;
mod unrecord;

pub use backend::{ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus,
                  GenericTxn, Hash, HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId,
                  Repository, SmallStr, SmallString, Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY};
pub use backend::{
    ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus, GenericTxn, Hash,
    HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId, Repository, SmallStr, SmallString,
    Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY,
};

pub use record::{InodeUpdate, RecordState};

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::{RepoRoot, ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
pub use fs_representation::{RepoRoot, RepoPath};
use fs_representation::{ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
pub use output::{ConflictingFile, Prefixes, ToPrefixes};
pub use patch::{Patch, PatchHeader};
use rand::distributions::Alphanumeric;
use rand::Rng;
pub use record::{InodeUpdate, RecordState};
pub use sanakirja::value::Value;
pub use output::{Prefixes, ToPrefixes};
use fs_representation::ID_LENGTH;
use fs_representation::ID_LENGTH;
use std::io::Read;
use rand::Rng;

pub use diff::Algorithm as DiffAlgorithm;

impl<'env, T: rand::Rng> backend::MutTxn<'env, T> {
    pub fn output_changes_file<P: AsRef<Path>>(
        &mut self,
        branch: &Branch,
        fs_repo: &RepoRoot<P>,
    ) -> Result<()> {
        let changes_file = fs_repo.branch_changes_file(branch.name.as_str());
        let mut branch_id: Vec<u8> = vec![b'\n'; ID_LENGTH + 1];
        {
            if let Ok(mut file) = std::fs::File::open(&changes_file) {
                file.read_exact(&mut branch_id)?;
            }
        }
        let mut branch_id = if let Ok(s) = String::from_utf8(branch_id) {
            s
        } else {
            "\n".to_string()
        };
        if branch_id.as_bytes()[0] == b'\n' {
            branch_id.truncate(0);
            let mut rng = rand::thread_rng();
            branch_id.extend(rng.sample_iter(&Alphanumeric).take(ID_LENGTH));
            branch_id.push('\n');
        }

        let mut file = std::fs::File::create(&changes_file)?;
        file.write_all(&branch_id.as_bytes())?;
        for (s, hash) in self.iter_applied(&branch, None) {
            let hash_ext = self.get_external(hash).unwrap();
            writeln!(file, "{}:{}", hash_ext.to_base58(), s)?
        }
        Ok(())
    }

    pub fn branch_patches(&mut self, branch: &Branch) -> HashSet<(backend::Hash, ApplyTimestamp)> {
        self.iter_patches(branch, None)
            .map(|(patch, time)| (self.external_hash(patch).to_owned(), time))
            .collect()
    }

    pub fn fork(&mut self, branch: &Branch, new_name: &str) -> Result<Branch> {
        if branch.name.as_str() == new_name {
            Err(Error::BranchNameAlreadyExists(new_name.to_string()))
        } else {
            Ok(Branch {
                db: self.txn.fork(&mut self.rng, &branch.db)?,
                patches: self.txn.fork(&mut self.rng, &branch.patches)?,
                revpatches: self.txn.fork(&mut self.rng, &branch.revpatches)?,
                name: SmallString::from_str(new_name),
                apply_counter: branch.apply_counter,
            })
        }
    }
    
    pub fn add_file<P: AsRef<Path>>(&mut self, path: &RepoPath<P>, is_dir: bool) -> Result<()> {
        self.add_inode(None, path, is_dir)
    }

    fn file_nodes_fold_<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        root: Key<PatchId>,
        level: usize,
        mut init: A,
        f: &mut F,
    ) -> Result<A> {
        for v in self
            .iter_adjacent(&branch, root, EdgeFlags::empty(), EdgeFlags::all())
            .take_while(|v| {
                v.flag.contains(EdgeFlags::FOLDER_EDGE) && !v.flag.contains(EdgeFlags::PARENT_EDGE)
            })
        {
            debug!("file_nodes_fold_: {:?} {:?}", root, v);
            if level & 1 == 0 && level > 0 {
                init = f(init, root)
            }
            init = self.file_nodes_fold_(branch, v.dest, level + 1, init, f)?
        }
        Ok(init)
    }

    pub fn file_nodes_fold<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        init: A,
        mut f: F,
    ) -> Result<A> {
        self.file_nodes_fold_(branch, ROOT_KEY, 0, init, &mut f)
    }
}

impl<T: Transaction, R> backend::GenericTxn<T, R> {
    /// Tells whether a `key` is alive in `branch`, i.e. is either the
    /// root, or all its ingoing edges are alive.
    pub fn is_alive(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive {:?}?", key);
        let mut alive = key.is_root();
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            alive = alive
                || (!v.flag.contains(EdgeFlags::DELETED_EDGE)
                    && !v.flag.contains(EdgeFlags::PSEUDO_EDGE))
        }
        alive
    }

    /// Tells whether a `key` is alive or zombie in `branch`, i.e. is
    /// either the root, or has at least one of its incoming alive
    /// edge is alive.
    pub fn is_alive_or_zombie(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive_or_zombie {:?}?", key);
        if key == ROOT_KEY {
            return true;
        }
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            debug!("{:?}", v);
            if v.flag.contains(EdgeFlags::PARENT_EDGE) && !v.flag.contains(EdgeFlags::DELETED_EDGE)
            {
                return true;
            }
        }
        false
    }

    /// Test whether `key` has a neighbor with flag `flag0`. If
    /// `include_pseudo`, this includes pseudo-neighbors.
    pub fn has_edge(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        min: EdgeFlags,
        max: EdgeFlags,
    ) -> bool {
        let e = Edge::zero(min);
        if let Some((k, v)) = self.iter_nodes(&branch, Some((key, Some(e)))).next() {
            debug!("has_edge {:?}", v.flag);
            k == key && (v.flag <= max)
        } else {
            false
        }
    }

    /// Tells which paths (of folder nodes) a key is in.
    pub fn get_file<'a>(&'a self, branch: &Branch, key: Key<PatchId>) -> Vec<Key<PatchId>> {
        let mut stack = vec![key.to_owned()];
        let mut seen = HashSet::new();
        let mut names = Vec::new();
        loop {
            match stack.pop() {
                None => break,
                Some(key) if !seen.contains(&key) => {
                    debug!("key {:?}, None", key);
                    seen.insert(key.clone());

                    for v in self.iter_adjacent(branch, key, EdgeFlags::empty(), EdgeFlags::all()) {
                        debug!("all_edges: {:?}", v);
                    }
                    for v in
                        self.iter_adjacent(branch, key, EdgeFlags::PARENT_EDGE, EdgeFlags::all())
                    {
                        debug!("get_file {:?}", v);
                        if v.flag | EdgeFlags::PSEUDO_EDGE
                            == EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE
                        {
                            debug!("push!");
                            stack.push(v.dest.clone())
                        } else if v
                            .flag
                            .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE)
                        {
                            names.push(key);
                        }
                    }
                }
                _ => {}
            }
        }
        debug!("get_file returning {:?}", names);
        names
    }

    pub fn get_file_names<'a>(
        &'a self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, Vec<&'a str>)> {
        let mut names = vec![(key, Vec::new())];
        debug!("inode: {:?}", names);
        // Go back to the root.
        let mut next_names = Vec::new();
        let mut only_roots = false;
        let mut inodes = HashSet::new();
        while !only_roots {
            next_names.clear();
            only_roots = true;
            for (inode, names) in names.drain(..) {
                if !inodes.contains(&inode) {
                    inodes.insert(inode.clone());

                    if inode != ROOT_KEY {
                        only_roots = false;
                    }
                    let names_ = self.file_names(branch, inode);
                    if names_.is_empty() {
                        next_names.push((inode, names));
                        break;
                    } else {
                        debug!("names_ = {:?}", names_);
                        for (inode_, _, base) in names_ {
                            let mut names = names.clone();
                            names.push(base);
                            next_names.push((inode_, names))
                        }
                    }
                }
            }
            std::mem::swap(&mut names, &mut next_names)
        }
        debug!("end: {:?}", names);
        for &mut (_, ref mut name) in names.iter_mut() {
            name.reverse()
        }
        names
    }
}

fn make_remote<'a, I: Iterator<Item = &'a Hash>>(
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    remote: I,
) -> Result<(Vec<(Hash, Patch)>, usize)> {
    use fs_representation::*;
    use std::io::BufReader;
    use std::fs::File;
    use std::io::BufReader;
    let mut patches = Vec::new();

















use backend::*;
use backend::*;
            vec![Key {
                patch: Some(up_context.patch),
                line: up_context.line,
            }]
                            let k = diff.lines_a[descendant + 1];
                            Rc::new(RefCell::new(vec![Key {
                                patch: Some(k.patch),
                                line: k.line,
                            }]))
                diff.conflicts_down_contexts
                    .insert(line_index, (line_id, down.clone()));
            Rc::new(RefCell::new(vec![Key {
                patch: Some(down_context.patch),
                line: down_context.line.clone(),
            }]))
















































use backend::*;
use backend::*;
    fn delete_edges(
        &self,
        branch: &Branch,
        edges: &mut Vec<patch::NewEdge>,
        key: Option<Key<PatchId>>,
        flag: EdgeFlags,
    ) {
                    self.get_patch(&branch.patches, key.patch).is_some() || key.is_root()
                    .take_while(|&(k, v)| {
                        k == key
                            && v.flag <= flag | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE
                    }) {
                        edge.flag = EdgeFlags::DELETED_EDGE | EdgeFlags::PARENT_EDGE;
                                continue;
            None => {}
    pub(in optimal_diff) fn delete_lines(
        &self,
        branch: &Branch,
        diff: &mut Diff<A>,
        i0: usize,
        i1: usize,
    ) -> Deletion {
                self.delete_edges(
                    branch,
                    &mut edges,
                    Some(diff.lines_a[i]),
                    EdgeFlags::PARENT_EDGE,
                );
                                                       // TODO: check that these two lines are not already linked.
                if !self.is_connected(branch, diff.lines_a[i0 - 1], diff.lines_a[i1]) {
                    debug!("conflict ordering between {:?} and {:?}", i0 - 1, i1);
                            line: diff.lines_a[i0 - 1].line.clone(),
    pub(in optimal_diff) fn confirm_zombie(
        &self,
        branch: &Branch,
        diff: &Diff<A>,
        actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
        key: Key<PatchId>,
    ) {
        self.delete_edges(
            branch,
            &mut zombie_edges,
            Some(key),
            EdgeFlags::DELETED_EDGE | EdgeFlags::PARENT_EDGE,
        );
                    previous: EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,






















































































































































use patch;
use graph;
use patch::{Change, Record, ChangeContext};
use {GenericTxn, Result};
use graph;
use patch;
use patch::{Change, ChangeContext, Record};
use {GenericTxn, Result};
use conflict;
use sanakirja::value::Value;
use std;
use std::cell::RefCell;
use std::collections::HashMap;
mod delete;
        self.conflicts_descendants
            .insert(*self.current_conflict_ancestor.last().unwrap(), len);
            let e = self.conflicts_sides
                .entry(*self.current_conflict_ancestor.last().unwrap())
                .or_insert(Vec::new());
        self.conflicts_ancestors
            .insert(l, *self.current_conflict_ancestor.last().unwrap());
    conflict_ordering: Vec<Change<Rc<RefCell<ChangeContext<PatchId>>>>>,
    Addition(Change<Rc<RefCell<ChangeContext<PatchId>>>>),
        if let Pending::None = *self {
            true
        } else {
            false
        }
fn amend_down_context<A: Transaction>(diff: &Diff<A>, i0: usize, i: usize) {
            line: key.line,
    fn lines_eq(
        &self,
        branch: &Branch,
        diff: &mut Diff<A>,
        b: &[&[u8]],
        cursors: &mut Cursors,
        actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
    ) {
            let status = *diff.status.get(&(cursors.i - 1)).unwrap();
            let mut i0 = *diff.conflicts_ancestors.get(&(cursors.i - 1)).unwrap();
            debug!(
                "adding from {} to {} / {}, context {}",
                j0,
                cursors.j,
                b.len(),
                cursors.last_alive_context
            );
                let adds = self.add_lines(
                    cursors.last_alive_context,
                    Some(i),
                    diff,
                    &b[j0..j],
                    cursors.trailing_equals,
                );
        self.confirm_zombie(
            branch,
            diff,
            actions,
            diff.lines_a[cursors.leading_equals + cursors.i],
        );
            debug!(
                "adding from {} to {} / {}, context {}",
                j0,
                j,
                b.len(),
                cursors.last_alive_context
            );
                    cursors.trailing_equals,
    fn finish_i(
        &self,
        branch: &Branch,
        diff: &mut Diff<A>,
        b: &[&[u8]],
        cursors: &mut Cursors,
        actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
    ) {
                debug!(
                    "line {}, adding remaining lines before the last deletion i={} j={} j0={}",
                    line!(),
                    i,
                    j0,
                    j
                );
                    cursors.trailing_equals,
                cursors.pending = Pending::Deletion(self.delete_lines(branch, diff, i0, i))
    fn finish_j(
        &self,
        branch: &Branch,
        diff: &mut Diff<A>,
        b: &[&[u8]],
        cursors: &mut Cursors,
        actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
    ) {
                cursors.pending = Pending::Deletion(self.delete_lines(branch, diff, i0, i));
            debug!(
                "line {}, adding lines after trailing equals: {:?} {:?}",
                line!(),
                diff.lines_a.len(),
                cursors.trailing_equals
            );
            cursors.trailing_equals,
    fn local_diff<'a>(
        &'a self,
        branch: &Branch,
        actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
        diff: &mut Diff<A>,
        b: &[&'a [u8]],
    ) {
            0,
        compute_costs(
            diff,
            b,
            cursors.leading_equals,
            cursors.trailing_equals,
            &mut opt,
        );
            let contents_b_j: Value<'a, A> =
                Value::from_slice(b[cursors.leading_equals + cursors.j]);
            self.finish_j(branch, diff, b, &mut cursors, actions)
    pub fn diff<'a>(
        &'a self,
        inode: Key<Option<Hash>>,
        branch: &'a Branch,
        file: Rc<PathBuf>,
        line_num: &mut LineId,
        actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
        redundant: &mut Vec<(Key<PatchId>, Edge)>,
        a: &mut Graph,
        lines_b: &[&[u8]],
    ) -> Result<()> {
        self.local_diff(branch, actions, &mut d, &lines_b);
fn compute_costs<A: Transaction>(
    diff: &Diff<A>,
    b: &[&[u8]],
    leading_equals: usize,
    trailing_equals: usize,
    opt: &mut Matrix<usize>,
) {
fn stop_del(
    file: Rc<PathBuf>,
    cursors: &mut Cursors,
    dels: Deletion,
    actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
) {
fn stop_add(
    file: Rc<PathBuf>,
    cursors: &mut Cursors,
    adds: Change<Rc<RefCell<ChangeContext<PatchId>>>>,
    actions: &mut Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
) {

1
2
3
4
5
6
7
8
9

10
11

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

69
70

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95


96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223

225
226
227


230


233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554









555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594

595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617

618
619
620
621
622
623
624
625
626
627

















628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
use rand;
use backend::*;
use graph;
use patch::*;
use rand;
use record::InodeUpdate;
use std;
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::fs;
use rand;
use std::path::{Path, PathBuf};
use tempdir;
use std::borrow::Cow;
use {Error, Result};

use super::fs_representation::{RepoRoot, RepoPath, in_repo_root};

#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;

#[cfg(not(windows))]
fn set_permissions(name: &Path, permissions: u16) -> Result<()> {
    let metadata = std::fs::metadata(&name)?;
    let mut current = metadata.permissions();
    debug!(
        "setting mode for {:?} to {:?} (currently {:?})",
        name, permissions, current
    );
    current.set_mode(permissions as u32);
    std::fs::set_permissions(name, current)?;
    Ok(())
}

#[cfg(windows)]
fn set_permissions(_name: &Path, _permissions: u16) -> Result<()> {
    Ok(())
}

#[derive(Debug)]
struct OutputItem {
    parent: Inode,
    meta: FileMetadata,
    key: Key<PatchId>,
    inode: Option<Inode>,
    is_zombie: bool,
    related: Related,
}

#[derive(Debug, PartialEq, Eq)]
pub enum Related {
    No,
    Ancestor,
    Exact,
}

pub struct ConflictingFile {
    pub inode: Inode,
    pub n_conflicts: usize,
    pub path: RepoPath<PathBuf>,
}

fn is_related(prefixes: &Prefixes, key: Key<PatchId>) -> Related {
    if prefixes.0.is_empty() {
        return Related::Exact;
    }
    for pref in prefixes.0.iter() {
        let mut is_first = true;
        for &p in pref {
            if p == key {
                if is_first {
                    return Related::Exact
                    return Related::Exact;
                } else {
                    return Related::Ancestor
                    return Related::Ancestor;
                }
            }
            is_first = false
        }
    }
    Related::No
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    // Climb up the tree (using revtree).
    fn filename_of_inode(&self, inode: Inode, working_copy: &Path) -> Option<PathBuf> {
        let mut components = Vec::new();
        let mut current = inode;
        loop {
            match self.get_revtree(current) {
                Some(v) => {
                    components.push(v.basename.to_owned());
                    current = v.parent_inode.clone();
                    if current == ROOT_INODE {
                        break;
                    }
                }
                None => {
                    debug!("filename_of_inode: not in tree");
                    return None
                },
                    return None;
                }
            }
        }
        let mut working_copy = working_copy.to_path_buf();
        for c in components.iter().rev() {
            working_copy.push(c.as_small_str().as_str());
        }
        Some(working_copy)
    }

    /// Collect all the children of key `key` into `files`.
    fn collect_children(
        &mut self,
        branch: &Branch,
        path: RepoPath<&Path>,
        key: Key<PatchId>,
        inode: Inode,
        base_path: &RepoPath<impl AsRef<Path> + std::fmt::Debug>,
        prefixes: &Prefixes,
        files: &mut HashMap<RepoPath<PathBuf>, HashMap<Key<PatchId>, OutputItem>>,
    ) -> Result<()> {
        debug!("collect_children {:?}", base_path);
        let f = EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE;
        for b in self.iter_adjacent(&branch, key, EdgeFlags::empty(), f) {
            debug!("b={:?}", b);
            let cont_b = self.get_contents(b.dest).unwrap();
            let (_, b_key) = self
                .iter_nodes(
                    &branch,
                    Some((b.dest, Some(Edge::zero(EdgeFlags::FOLDER_EDGE)))),
                )
                .next()
                .unwrap();
            let b_inode = self.get_revinodes(b_key.dest);

            // This is supposed to be a small string, so we can do
            // as_slice.
            if cont_b.as_slice().len() < 2 {
                error!("cont_b {:?} b.dest {:?}", cont_b, b.dest);
                return Err(Error::WrongFileHeader(b.dest));
            }
            let (perms, basename) = cont_b.as_slice().split_at(2);

            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();
            debug!("filename: {:?} {:?}", perms, basename);
            let name = path.join(Path::new(basename));
            let related = is_related(&prefixes, b_key.dest);
            debug!("related {:?} = {:?}", base_path, related);
            if related != Related::No {
                let v = files.entry(name).or_insert(HashMap::new());
                if v.get(&b.dest).is_none() {
                    let is_zombie = {
                        let f = EdgeFlags::FOLDER_EDGE
                            | EdgeFlags::PARENT_EDGE
                            | EdgeFlags::DELETED_EDGE;
                        self.iter_adjacent(&branch, b_key.dest, f, f)
                            .next()
                            .is_some()
                    };
                    debug!("is_zombie = {:?}", is_zombie);
                    v.insert(
                        b.dest,
                        OutputItem {
                            parent: inode,
                            meta: perms,
                            key: b_key.dest,
                            inode: b_inode,
                            is_zombie,
                            related,
                        },
                    );
                }
            }
        }
        Ok(())
    }

    /// Collect names of files with conflicts
    ///
    /// As conflicts have an internal representation, it can be determined
    /// exactly which files contain conflicts.
    pub fn list_conflict_files(
        &mut self,
        branch_name: &str,
        prefixes: &[RepoPath<&Path>],
    ) -> Result<Vec<RepoPath<PathBuf>>> {
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let branch = self.open_branch(branch_name)?;
        let mut base_path = in_repo_root();
        let prefixes = prefixes.to_prefixes(self, &branch);
        self.collect_children(
            &branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            &prefixes,
            &mut files,
        )?;

        let mut ret = vec![];
        let mut forward = Vec::new();
        while !files.is_empty() {
            next_files.clear();
            for (a, b) in files.drain() {
                for (_, output_item) in b {
                    // (_, meta, inode_key, inode, is_zombie)
                    // Only bother with existing files
                    if let Some(inode) = output_item.inode {
                        if output_item.is_zombie {
                            ret.push(a.clone())
                        }
                        if output_item.meta.is_dir() {
                            self.collect_children(
                                &branch,
                                a.as_ref(),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &prefixes,
                                &mut next_files,
                            )?;
                        } else {
                            let mut graph = self.retrieve(&branch, output_item.key);
                            let mut buf = graph::Writer::new(std::io::sink());

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            let n_conflicts =
                                self.output_file(&branch, &mut buf, &mut graph, &mut forward)?;
                            if n_conflicts > 0 {

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            if self.output_file(&branch, &mut buf, &mut graph, &mut forward)? {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                                ret.push(a.clone())
                            }
                        }
                    }
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(ret)
    }

    fn make_conflicting_name(&self, name: &mut RepoPath<PathBuf>, name_key: Key<PatchId>) {
        let basename = {
            let basename = name.file_name().unwrap().to_string_lossy();
            format!("{}.{}", basename, &name_key.patch.to_base58())
        };
        name.set_file_name(std::ffi::OsStr::new(&basename));
    }

    fn output_alive_files(
        &mut self,
        branch: &mut Branch,
        prefixes: &Prefixes,
        working_copy: &Path,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!("working copy {:?}", working_copy);
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let mut base_path = RepoPath(PathBuf::new());
        self.collect_children(
            branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            prefixes,
            &mut files,
        )?;

        let mut done = HashSet::new();
        while !files.is_empty() {
            debug!("files {:?}", files);
            next_files.clear();
            for (a, b) in files.drain() {
                let b_len = b.len();
                for (name_key, output_item) in b {
                    // (parent_inode, meta, inode_key, inode, is_zombie)
                    /*let has_several_names = {
                        let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
                        let mut it = self.iter_nodes(branch, Some((inode_key, Some(&e))))
                            .take_while(|&(k, v)| {
                                k == inode_key && v.flag|EdgeFlags::PSEUDO_EDGE == e.flag|EdgeFlags::PSEUDO_EDGE
                            });
                        it.next();
                        it.next().is_some()
                    };*/
                    if !done.insert(output_item.key) {
                        debug!("already done {:?}", output_item.key);
                        continue;
                    }

                    let name = if b_len > 1
                    /*|| has_several_names*/
                    {
                        // debug!("b_len = {:?}, has_several_names {:?}", b_len, has_several_names);
                        let mut name = a.clone();
                        self.make_conflicting_name(&mut name, name_key);
                        Cow::Owned(name.0)
                    } else {
                        Cow::Borrowed(a.as_path())
                    };
                    let file_name = name.file_name().unwrap().to_string_lossy();
                    base_path.push(&file_name);
                    let file_id = OwnedFileId {
                        parent_inode: output_item.parent,
                        basename: SmallString::from_str(&file_name),
                    };
                    let working_copy_name = working_copy.join(name.as_ref());

                    let status = if output_item.is_zombie {
                        FileStatus::Zombie
                    } else {
                        FileStatus::Ok
                    };

                    let inode = if let Some(inode) = output_item.inode {
                        // If the file already exists, find its
                        // current name and rename it if that name
                        // is different.
                        if let Some(ref current_name) = self.filename_of_inode(inode, "".as_ref()) {
                            if current_name != name.as_ref() {
                                let current_name = working_copy.join(current_name);
                                debug!("renaming {:?} to {:?}", current_name, working_copy_name);
                                let parent = self.get_revtree(inode).unwrap().to_owned();
                                self.del_revtree(inode, None)?;
                                self.del_tree(&parent.as_file_id(), None)?;

                                debug!("file_id: {:?}", file_id);
                                if let Some(p) = working_copy_name.parent() {
                                    std::fs::create_dir_all(p)?
                                }
                                if let Err(e) = std::fs::rename(&current_name, &working_copy_name) {
                                    error!(
                                        "while renaming {:?} to {:?}: {:?}",
                                        current_name, working_copy_name, e
                                    )
                                }
                            }
                        }
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        // If the file had been marked for deletion, remove that mark.
                        if let Some(header) = self.get_inodes(inode) {
                            debug!("header {:?}", header);
                            let mut header = header.to_owned();
                            header.status = status;
                            self.replace_inodes(inode, header)?;
                        } else {
                            let header = FileHeader {
                                key: output_item.key,
                                metadata: output_item.meta,
                                status,
                            };
                            debug!("no header {:?}", header);
                            self.replace_inodes(inode, header)?;
                            self.replace_revinodes(output_item.key, inode)?;
                        }
                        inode
                    } else {
                        // Else, create new inode.
                        let inode = self.create_new_inode();
                        let file_header = FileHeader {
                            key: output_item.key,
                            metadata: output_item.meta,
                            status,
                        };
                        self.replace_inodes(inode, file_header)?;
                        self.replace_revinodes(output_item.key, inode)?;
                        debug!("file_id: {:?}", file_id);
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        inode
                    };
                    if output_item.meta.is_dir() {
                        // This is a directory, register it in inodes/trees.
                        std::fs::create_dir_all(&working_copy_name)?;
                        if let Related::Exact = output_item.related {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &Prefixes(Vec::new()),
                                &mut next_files,
                            )?
                        } else {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                prefixes,
                                &mut next_files,
                            )?
                        }
                    } else {
                        // Output file.
                        info!(
                            "creating file {:?}, key {:?} {:?}",
                            &name, output_item.key, working_copy_name
                        );
                        let mut f =
                            graph::Writer::new(std::fs::File::create(&working_copy_name).unwrap());
                        debug!("done");
                        let mut l = self.retrieve(branch, output_item.key);
                        if log_enabled!(log::Level::Debug) {
                            let mut w = working_copy_name.clone();
                            w.set_extension("pijul_debug");
                            let f = std::fs::File::create(&w)?;
                            l.debug(self, branch, false, false, f)?;
                        }
                        let mut forward = Vec::new();
                        let n_conflicts = self.output_file(branch, &mut f, &mut l, &mut forward)?;
                        if n_conflicts > 0 {
                            conflicts.push(ConflictingFile {
                                inode,
                                n_conflicts,
                                path: RepoPath(name.to_path_buf()),
                            })
                        }
                        self.remove_redundant_edges(branch, &forward)?
                    }
                    base_path.pop();
                    set_permissions(&working_copy_name, output_item.meta.permissions())?
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(())
    }

    fn output_repository_assuming_no_pending_patch(
        &mut self,
        prefixes: &Prefixes,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        pending_patch_id: PatchId,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!(
            "inodes: {:?}",
            self.iter_inodes(None)
                .map(|(u, v)| (u.to_owned(), v.to_owned()))
                .collect::<Vec<_>>()
        );
        // Now, garbage collect dead inodes.
        let dead: Vec<_> = self
            .iter_tree(None)
            .filter_map(|(k, v)| {
                debug!("{:?} {:?}", k, v);
                if let Some(key) = self.get_inodes(v) {
                    if key.key.patch == pending_patch_id || self.is_alive_or_zombie(branch, key.key)
                    {
                        // Don't delete.
                        None
                    } else {
                        Some((
                            k.to_owned(),
                            v,
                            self.filename_of_inode(v, working_copy.repo_root.as_ref()),
                        ))
                    }
                } else {
                    debug!("not in inodes");
                    Some((k.to_owned(), v, None))
                }
            })
            .collect();
        debug!("dead: {:?}", dead);

        // Now, "kill the deads"
        for (ref parent, inode, ref name) in dead {
            self.remove_inode_rec(inode)?;
            debug!("removed");
            if let Some(ref name) = *name {
                debug!("deleting {:?}", name);
                if let Ok(meta) = fs::metadata(name) {
                    if let Err(e) = if meta.is_dir() {
                        fs::remove_dir_all(name)
                    } else {
                        fs::remove_file(name)
                    } {
                        error!("while deleting {:?}: {:?}", name, e);
                    }
                }
            } else {
                self.del_tree(&parent.as_file_id(), Some(inode))?;
                self.del_revtree(inode, Some(&parent.as_file_id()))?;
            }
        }
        debug!("done deleting dead files");
        // Then output alive files. This has to be done *after*
        // removing files, because we a file removed might have the
        // same name as a file added without there being a conflict
        // (depending on the relation between the two patches).
        self.output_alive_files(branch, prefixes, working_copy.repo_root.as_ref(), conflicts)?;
        debug!("done raw_output_repository");
        Ok(())
    }

    fn remove_inode_rec(&mut self, inode: Inode) -> Result<()> {
        // Remove the inode from inodes/revinodes.
        let mut to_kill = vec![inode];
        while let Some(inode) = to_kill.pop() {
            debug!("kill dead {:?}", inode.to_hex());
            let header = self.get_inodes(inode).map(|x| x.to_owned());
            if let Some(header) = header {
                self.del_inodes(inode, None)?;
                self.del_revinodes(header.key, None)?;
                let mut kills = Vec::new();
                // Remove the inode from tree/revtree.
                for (k, v) in self
                    .iter_revtree(Some((inode, None)))
                    .take_while(|&(k, _)| k == inode)
                {
                    kills.push((k.clone(), v.to_owned()))
                }
                for &(k, ref v) in kills.iter() {
                    self.del_tree(&v.as_file_id(), Some(k))?;
                    self.del_revtree(k, Some(&v.as_file_id()))?;
                }
                // If the dead is a directory, remove its descendants.
                let inode_fileid = OwnedFileId {
                    parent_inode: inode.clone(),
                    basename: SmallString::from_str(""),
                };
                to_kill.extend(
                    self.iter_tree(Some((&inode_fileid.as_file_id(), None)))
                        .take_while(|&(ref k, _)| k.parent_inode == inode)
                        .map(|(_, v)| v.to_owned()),
                )
            }
        }
        Ok(())
    }

    pub fn output_repository(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefixes: &Prefixes,
        pending: &Patch,
        local_pending: &HashSet<InodeUpdate>,
    ) -> Result<Vec<ConflictingFile>> {
        debug!("begin output repository");

        debug!("applying pending patch");
        let tempdir = tempdir::TempDir::new("pijul")?;
        let hash = pending.save(tempdir.path(), None)?;
        let internal = self.apply_local_patch(
            branch,
            working_copy,
            working_copy,
            &hash,
            pending,
            local_pending,
            true,
        )?;
        let internal =
            self.apply_local_patch(branch, working_copy, &hash, pending, local_pending, true)?;

        debug!("applied as {:?}", internal.to_base58());

        // let prefixes = prefixes.to_prefixes(&self, &branch);
        debug!("prefixes {:?}", prefixes);
        let mut conflicts = Vec::new();
        self.output_repository_assuming_no_pending_patch(
            &prefixes,
            branch,
            working_copy,
            internal,
            &mut conflicts,
        )?;

        debug!("unrecording pending patch");
        self.unrecord(branch, internal, pending)?;
        Ok(conflicts)
    }

    pub fn output_repository_no_pending(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefixes: &Prefixes,
    ) -> Result<Vec<ConflictingFile>> {
        debug!("begin output repository {:?}", prefixes);
        debug!("prefixes {:?}", prefixes);
        let mut conflicts = Vec::new();
        self.output_repository_assuming_no_pending_patch(
            &prefixes,
            branch,
            working_copy,
            ROOT_PATCH_ID,
            &mut conflicts,
        )?;
        Ok(conflicts)
    }

    pub(crate)fn output_partials(&mut self, branch_name: &str, prefixes: &Prefixes) -> Result<()> {
    pub(crate) fn output_partials(&mut self, branch_name: &str, prefixes: &Prefixes) -> Result<()> {
        for p in prefixes.0.iter() {
            self.put_partials(branch_name, p[0])?;
        }
        Ok(())
    }
}

#[derive(Debug)]
pub struct Prefixes(Vec<Vec<Key<PatchId>>>);

impl Prefixes {
    pub fn empty() -> Self {
        Prefixes(Vec::new())
    }
}

pub trait ToPrefixes {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, branch: &Branch) -> Prefixes;
}

impl<'a, P> ToPrefixes for &'a [RepoPath<P>] where P: AsRef<Path>+'a {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, branch: &Branch) -> Prefixes {
        Prefixes(self.iter().flat_map(|pref| txn.prefix_keys(&branch, pref)).collect())
        Prefixes(
            self.iter()
                .flat_map(|pref| txn.prefix_keys(&branch, pref))
                .collect(),
        )
    }
}

impl<'a> ToPrefixes for &'a [Inode] {
    fn to_prefixes<T>(&self, txn: &MutTxn<T>, _: &Branch) -> Prefixes {
        Prefixes(self.iter().map(|pref| {
            let mut result = Vec::new();
            let mut result = Vec::new();
            let mut current = *pref;
            loop {
                if current == ROOT_INODE {
                    result.push(ROOT_KEY);
                    break;
                }
                result.push(txn.get_inodes(current).unwrap().key);
                match txn.get_revtree(current) {
                    Some(v) => current = v.parent_inode.clone(),
                    None => break,
                }
            }
            result
        }).collect())
        Prefixes(
            self.iter()
                .map(|pref| {
                    let mut result = Vec::new();
                    let mut current = *pref;
                    loop {
                        if current == ROOT_INODE {
                            result.push(ROOT_KEY);
                            break;
                        }
                        result.push(txn.get_inodes(current).unwrap().key);
                        match txn.get_revtree(current) {
                            Some(v) => current = v.parent_inode.clone(),
                            None => break,
                        }
                    }
                    result
                })
                .collect(),
        )
    }










use flate2;
use rand;
use std::fs::{metadata, File, OpenOptions};
use std::io::{BufRead, Read, Write};
use std::path::Path;
use std::str::from_utf8;
use thrussh_keys;
use thrussh_keys::key::KeyPair;
use thrussh_keys::PublicKeyBase64;
use {ErrorKind, Result};

























1


2
3
4
5


6
7
8
9
10
11

12
13
14

15
16


17
18
19
20
21
22
23
24
25
26
27
28

29
30
31
32
33
34
35
36
37
38
39
40

41

42
43
44
45
46
47
48




















49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67








68
69
70
71
72
73
74
75
76
77
78
79

80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177


178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316

317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363


364
365
366
367
368
369


370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425






426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489

490
491
492
493
494
495
496
497


498
499
500
501
502

503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568

569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605


606
607
608
609
610
611
612
613
614
615

616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634






635
636
637
638
639
640
641
642





643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667







668

669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705







706
707
708
709





710
711
712
713
714
715
716
717

718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737


738
739
740
741
742
743
744
745
746

747
748
749
750

751
752
753
754
755
756
757

758
759
760
761
762
763
764
765

766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786

787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811

812
813
814
815
816
817
818
819
820
821
822
823
824
825

826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861

use {ErrorKind, Result};
use std::cell::RefCell;
                up_context: Rc::try_unwrap(up_context)
                    .unwrap()
                    .into_inner()
                down_context: Rc::try_unwrap(down_context)
                    .unwrap()
                    .into_inner()
                    down_context: Rc::new(RefCell::new(vec![Key {
                        patch: Some(current_node.patch),
                        line: current_node.line.clone(),
                    }])),
                self.record_deleted_file(st, branch, realpath, file_header.key)?
                    self.diff_with_binary(inode, branch, st, &mut ret, Rc::new(realpath.clone()))?;
                    self.record_file_addition(st, current_inode, parent_node, realpath, basename)
        realpath: &Path,
        let e =
            Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE | EdgeFlags::DELETED_EDGE);
                    previous: EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE
    pub fn finish(
        self,
    ) -> (
        Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>,
        HashSet<InodeUpdate>,
    ) {
use backend::*;
use patch::*;
use {ErrorKind, Result};
use graph;
use patch::*;
use {Error, Result};

use std::path::{Path, PathBuf};
use std::fs::metadata;
use diff;
use rand;
use std;
use std::collections::HashSet;
use std::fs::metadata;
use std::io::BufRead;
use rand;
use std::io::Read;
#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::HashSet;

use fs_representation::{RepoRoot, RepoPath};

#[cfg(not(windows))]
fn permissions(attr: &std::fs::Metadata) -> Option<usize> {
    Some(attr.permissions().mode() as usize)
}
#[cfg(windows)]
fn permissions(_: &std::fs::Metadata) -> Option<usize> {
    None
}


fn file_metadata(path: &Path) -> Result<FileMetadata> {
    let attr = metadata(&path)?;
    let permissions = permissions(&attr).unwrap_or(0o755);
    debug!("permissions = {:?}", permissions);
    Ok(FileMetadata::new(permissions & 0o777, attr.is_dir()))
}

impl<U: Transaction, R> GenericTxn<U, R> {
    pub fn globalize_change(
        &self,
        change: Change<ChangeContext<PatchId>>,
    ) -> Change<ChangeContext<Hash>> {

        match change {

            Change::NewNodes {
                up_context,
                down_context,
                flag,
                line_num,
                nodes,
                inode,
            } => {
                Change::NewNodes {
                    up_context: Rc::try_unwrap(up_context)
                        .unwrap()
                        .into_inner()
                        .iter()
                        .map(|&k| self.external_key_opt(k))
                        .collect(),
                    down_context: Rc::try_unwrap(down_context)
                        .unwrap()
                        .into_inner()
                        .iter()
                        .map(|&k| self.external_key_opt(k))
                        .collect(),
                    flag,
                    line_num,
                    nodes,
                    inode,
                }
            }
            } => Change::NewNodes {
                up_context: up_context
                    .iter()
                    .map(|&k| self.external_key_opt(k))
                    .collect(),
                down_context: down_context
                    .iter()
                    .map(|&k| self.external_key_opt(k))
                    .collect(),
                flag,
                line_num,
                nodes,
                inode,
            },
            Change::NewEdges {
                previous,
                flag,
                edges,
                inode,
            } => {
                Change::NewEdges {
                    previous,
                    flag,
                    edges,
                    inode,
                }
            }
            } => Change::NewEdges {
                previous,
                flag,
                edges,
                inode,
            },
        }
    }
    pub fn globalize_record(
        &self,
        change: Record<ChangeContext<PatchId>>,
    ) -> Record<ChangeContext<Hash>> {

        match change {
            Record::FileMove { new_name, del, add } => Record::FileMove {
                new_name,
                del: self.globalize_change(del),
                add: self.globalize_change(add),
            },
            Record::FileDel {
                name,
                del,
                contents,
            } => Record::FileDel {
                name,
                del: self.globalize_change(del),
                contents: contents.map(|del| self.globalize_change(del)),
            },
            Record::FileAdd {
                name,
                add,
                contents,
            } => Record::FileAdd {
                name,
                add: self.globalize_change(add),
                contents: contents.map(|add| self.globalize_change(add)),
            },
            Record::Change {
                file,
                change,
                replacement,
                old_line,
                new_line,
            } => Record::Change {
                file,
                change: self.globalize_change(change),
                replacement: replacement.map(|x| self.globalize_change(x)),
                old_line,
                new_line,
            },
        }
    }
}

pub struct RecordState {
    line_num: LineId,
    updatables: HashSet<InodeUpdate>,
    actions: Vec<Record<ChangeContext<PatchId>>>,
    redundant: Vec<(Key<PatchId>, Edge)>,
}

/// An account of the files that have been added, moved or deleted, as
/// returned by record, and used by apply (when applying a patch
/// created locally) to update the trees and inodes databases.
#[derive(Debug, Hash, PartialEq, Eq)]
pub enum InodeUpdate {
    Add {
        /// `LineId` in the new patch.
        line: LineId,
        /// `FileMetadata` in the updated file.
        meta: FileMetadata,
        /// `Inode` added by this file addition.
        inode: Inode,
    },
    Moved {
        /// `Inode` of the moved file.
        inode: Inode,
        metadata: FileMetadata,
    },
    Deleted {
        /// `Inode` of the deleted file.
        inode: Inode,
    },
}

#[derive(Debug)]
pub enum WorkingFileStatus {
    Moved {
        from: FileMetadata,
        to: FileMetadata,
    },
    Deleted,
    Ok,
    Zombie,
}

pub(crate) fn is_text(x: &[u8]) -> bool {
    x.iter().take(8000).all(|&c| c != 0)
}

impl<'env, R: rand::Rng> MutTxn<'env, R> {
    /// Create appropriate NewNodes for adding a file.
    fn record_file_addition(
        &self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        st: &mut RecordState,
        current_inode: Inode,
        parent_node: Key<Option<PatchId>>,
        realpath: &mut RepoPath<std::path::PathBuf>,
        basename: &str,
    ) -> Result<Option<LineId>> {


        let name_line_num = st.line_num.clone();
        let blank_line_num = st.line_num + 1;
        st.line_num += 2;

        debug!("metadata for {:?}", realpath);
        let path = &repo_root.absolutize(realpath);
        let meta = match file_metadata(&path) {
            Ok(metadata) => metadata,
            Err(e) => return Err(e),
        };
        debug!("meta = {:?}", meta.is_dir());

        let mut name = Vec::with_capacity(basename.len() + 2);
        name.write_metadata(meta).unwrap(); // 2 bytes.
        name.extend(basename.as_bytes());

        let mut nodes = Vec::new();

        st.updatables.insert(InodeUpdate::Add {
            line: blank_line_num.clone(),
            meta: meta,
            inode: current_inode.clone(),
        });
        let up_context_ext = Key {
            patch: if parent_node.line.is_root() {
                Some(Hash::None)
            } else if let Some(patch_id) = parent_node.patch {
                Some(self.external_hash(patch_id).to_owned())
            } else {
                None
            },
            line: parent_node.line.clone(),
        };
        let up_context = Key {
            patch: if parent_node.line.is_root() {
                Some(ROOT_PATCH_ID)
            } else if let Some(patch_id) = parent_node.patch {
                Some(patch_id)
            } else {
                None
            },
            line: parent_node.line.clone(),
        };
        st.actions.push(Record::FileAdd {
            name: realpath.to_owned(),
            add: Change::NewNodes {
                up_context: vec![up_context],
                line_num: name_line_num,
                down_context: vec![],
                nodes: vec![name, vec![]],
                flag: EdgeFlags::FOLDER_EDGE,
                inode: up_context_ext.clone(),
            },
            contents: None,
        });
        // Reading the file
        if !meta.is_dir() {
            nodes.clear();

            let mut node = Vec::new();
            {
                let mut f = std::fs::File::open(path.as_path())?;
                f.read_to_end(&mut node)?;
            }

            let up_context = Key {
                patch: None,
                line: blank_line_num.clone(),
            };
            let up_context_ext = Key {
                patch: None,
                line: blank_line_num.clone(),
            };
            if is_text(&node) {
                let mut line = Vec::new();
                let mut f = &node[..];
                loop {
                    match f.read_until('\n' as u8, &mut line) {
                        Ok(l) => {
                            if l > 0 {
                                nodes.push(line.clone());
                                line.clear()
                            } else {
                                break;
                            }
                        }
                        Err(_) => break,
                    }
                }
                let len = nodes.len();
                if !nodes.is_empty() {
                    if let Some(Record::FileAdd {
                        ref mut contents, ..
                    }) = st.actions.last_mut()
                    {
                        *contents = Some(Change::NewNodes {
                            up_context: vec![up_context],
                            line_num: st.line_num,
                            down_context: vec![],
                            nodes: nodes,
                            flag: EdgeFlags::empty(),
                            inode: up_context_ext,
                        });
                    }
                }
                st.line_num += len;
            } else if let Some(Record::FileAdd {
                ref mut contents, ..
            }) = st.actions.last_mut()
            {
                *contents = Some(Change::NewNodes {
                    up_context: vec![up_context],
                    line_num: st.line_num,
                    down_context: vec![],
                    nodes: vec![node],
                    flag: EdgeFlags::empty(),
                    inode: up_context_ext,
                });
                st.line_num += 1;
            }
            Ok(None)
        } else {
            Ok(Some(blank_line_num))
        }
    }

    /// Diff for binary files, doesn't bother splitting the file in
    /// lines. This is wasteful, but doesn't break the format, and
    /// doesn't create conflicts inside binary files.
    fn diff_with_binary(
        &self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        algorithm: diff::Algorithm,
        inode: Key<Option<Hash>>,
        branch: &Branch,
        st: &mut RecordState,
        ret: &mut graph::Graph,
        path: Rc<RepoPath<PathBuf>>,
    ) -> Result<()> {

        let mut lines_b = Vec::new();
        {
            debug!("opening file for diff: {:?}", path);
            let mut f = std::fs::File::open(repo_root.absolutize(&path))?;
            f.read_to_end(&mut lines_b)?;
        }

        self.diff(
            algorithm,
            inode,
            branch,
            path,
            &mut st.line_num,
            &mut st.actions,
            &mut st.redundant,
            ret,
            &lines_b,
        )
    }

    fn record_moved_file(
        &self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        diff_algorithm: diff::Algorithm,
        branch: &Branch,
        realpath: &mut RepoPath<std::path::PathBuf>,
        st: &mut RecordState,
        parent_node: Key<Option<PatchId>>,
        current_node: Key<PatchId>,
        basename: &str,
        new_meta: FileMetadata,
        old_meta: FileMetadata,
    ) -> Result<()> {
        debug!("record_moved_file: parent_node={:?}", parent_node);
        // Delete all former names.
        let mut edges = Vec::new();
        // Now take all grandparents of l2, delete them.

        let mut name = Vec::with_capacity(basename.len() + 2);
        name.write_metadata(new_meta).unwrap();
        name.extend(basename.as_bytes());
        for parent in self.iter_parents(branch, current_node, EdgeFlags::FOLDER_EDGE) {
            debug!("iter_parents: {:?}", parent);
            let previous_name: &[u8] = match self.get_contents(parent.dest) {
                None => &[],
                Some(n) => n.as_slice(),
            };
            let name_changed = (&previous_name[2..] != &name[2..]) ||
                (new_meta != old_meta && cfg!(not(windows)));
            let name_changed =
                (&previous_name[2..] != &name[2..]) || (new_meta != old_meta && cfg!(not(windows)));

            for grandparent in self.iter_parents(branch, parent.dest, EdgeFlags::FOLDER_EDGE) {
                debug!("iter_parents: grandparent = {:?}", grandparent);
                let grandparent_changed = if let Some(ref parent_node_patch) = parent_node.patch {
                    *parent_node_patch != grandparent.dest.patch ||
                        parent_node.line != grandparent.dest.line
                    *parent_node_patch != grandparent.dest.patch
                        || parent_node.line != grandparent.dest.line
                } else {
                    true
                };
                if grandparent_changed || name_changed {
                    edges.push(NewEdge {
                        from: Key {
                            line: parent.dest.line.clone(),
                            patch: Some(self.external_hash(parent.dest.patch).to_owned()),
                        },
                        to: Key {
                            line: grandparent.dest.line.clone(),
                            patch: Some(self.external_hash(grandparent.dest.patch).to_owned()),
                        },
                        introduced_by: Some(
                            self.external_hash(grandparent.introduced_by).to_owned(),
                        ),
                    })
                }
            }
        }
        debug!("edges:{:?}", edges);
        let up_context_ext = Key {
            patch: if parent_node.line.is_root() {
                Some(Hash::None)
            } else if let Some(parent_patch) = parent_node.patch {
                Some(self.external_hash(parent_patch).to_owned())
            } else {
                None
            },
            line: parent_node.line.clone(),
        };
        let up_context = Key {
            patch: if parent_node.line.is_root() {
                Some(ROOT_PATCH_ID)
            } else if let Some(parent_patch) = parent_node.patch {
                Some(parent_patch)
            } else {
                None
            },
            line: parent_node.line.clone(),
        };
        if !edges.is_empty() {
            // If this file's name or meta info has changed.
            st.actions.push(Record::FileMove {
                new_name: realpath.to_owned(),
                del: Change::NewEdges {
                    edges: edges,
                    previous: EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE,
                    flag: EdgeFlags::DELETED_EDGE | EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE,
                    inode: up_context_ext.clone(),
                },
                add: Change::NewNodes {
                    up_context: vec![up_context],
                    line_num: st.line_num,
                    down_context: Rc::new(RefCell::new(vec![
                        Key {
                            patch: Some(current_node.patch),
                            line: current_node.line.clone(),
                        },
                    ])),
                    down_context: vec![Key {
                        patch: Some(current_node.patch),
                        line: current_node.line.clone(),
                    }],
                    nodes: vec![name],
                    flag: EdgeFlags::FOLDER_EDGE,
                    inode: up_context_ext.clone(),
                },
            });
            st.line_num += 1;
        }
        if !old_meta.is_dir() {
            info!("retrieving");
            let mut ret = self.retrieve(branch, current_node);
            debug!("diff");
            let patch_ext = self.get_external(current_node.patch).unwrap();
            self.diff_with_binary(
                repo_root,
                diff_algorithm,
                Key {
                    patch: Some(patch_ext.to_owned()),
                    line: current_node.line,
                },
                branch,
                st,
                &mut ret,
                Rc::new(realpath.clone()),
            )?;
        };
        Ok(())
    }

    fn record_deleted_file(
        &self,
        st: &mut RecordState,
        branch: &Branch,
        realpath: &RepoPath<impl AsRef<Path>>,
        current_node: Key<PatchId>,
    ) -> Result<()> {
        debug!("record_deleted_file");
        let mut edges = Vec::new();
        let mut previous = EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE;
        // Now take all grandparents of the current node, delete them.
        for parent in self.iter_parents(branch, current_node, EdgeFlags::FOLDER_EDGE) {
            for grandparent in self.iter_parents(branch, parent.dest, EdgeFlags::FOLDER_EDGE) {
                edges.push(NewEdge {
                    from: self.external_key(&parent.dest).unwrap(),
                    to: self.external_key(&grandparent.dest).unwrap(),
                    introduced_by: Some(self.external_hash(grandparent.introduced_by).to_owned()),
                });
                previous = grandparent.flag;
            }
        }
        // If the file is a directory, delete recursively
        let mut file_edges = vec![];
        {
            debug!("del={:?}", current_node);
            let ret = self.retrieve(branch, current_node);
            debug!("ret {:?}", ret);
            for l in ret.lines.iter() {
                if l.key != ROOT_KEY {
                    let ext_key = self.external_key(&l.key).unwrap();
                    debug!("ext_key={:?}", ext_key);
                    for v in self.iter_parents(branch, l.key, EdgeFlags::empty()) {

                        debug!("v={:?}", v);
                        file_edges.push(NewEdge {
                            from: ext_key.clone(),
                            to: self.external_key(&v.dest).unwrap(),
                            introduced_by: Some(self.external_hash(v.introduced_by).to_owned()),
                        });
                        if let Some(inode) = self.get_revinodes(v.dest) {
                            st.updatables.insert(InodeUpdate::Deleted {
                                inode: inode.to_owned()
                                inode: inode.to_owned()
                                inode: inode.to_owned(),
                            });
                        }
                    }
                    for v in self.iter_parents(branch, l.key, EdgeFlags::FOLDER_EDGE) {

                        debug!("v={:?}", v);
                        edges.push(NewEdge {
                            from: ext_key.clone(),
                            to: self.external_key(&v.dest).unwrap(),
                            introduced_by: Some(self.external_hash(v.introduced_by).to_owned()),
                        });
                    }
                }
            }
        }

        if !edges.is_empty() {
            st.actions.push(Record::FileDel {
                name: realpath.to_owned(),
                del: Change::NewEdges {
                    edges: edges,
                    previous,
                    flag: EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    inode: self.external_key(&current_node).unwrap(),
                },
                contents: if file_edges.is_empty() {
                    None
                } else {
                    Some(Change::NewEdges {
                        edges: file_edges,
                        previous: EdgeFlags::PARENT_EDGE,
                        flag: EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                        inode: self.external_key(&current_node).unwrap(),
                    })
                },
            });
        }
        Ok(())
    }

    fn record_children(
        &self,
        repo_root:  &RepoRoot<impl AsRef<Path>>,
        diff_algorithm: diff::Algorithm,
        branch: &Branch,
        st: &mut RecordState,
        path: &mut RepoPath<std::path::PathBuf>,
        current_node: Key<Option<PatchId>>,
        current_inode: Inode,
        obsolete_inodes: &mut Vec<Inode>,
    ) -> Result<()> {
        debug!("children of current_inode {}", current_inode.to_hex());
        let file_id = OwnedFileId {
            parent_inode: current_inode.clone(),
            basename: SmallString::from_str(""),
        };
        debug!("iterating tree, starting from {:?}", file_id.as_file_id());
        for (k, v) in self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| k.parent_inode == current_inode)
        {
            debug!("calling record_all recursively, {}", line!());

            if k.basename.len() > 0 {
                // If this is an actual file and not just the "."
                self.record_inode(
                    repo_root,
                    diff_algorithm,
                    branch,
                    st,
                    current_node.clone(), // parent
                    v, // current_inode
                    v,                    // current_inode
                    path,
                    obsolete_inodes,
                    k.basename.as_str(),
                )?
            }
        }
        Ok(())
    }

    /// If `inode` is a file known to the current branch, return
    /// whether it's been moved, deleted, or its "status" (including
    /// permissions) has been changed.
    ///
    /// Returns `None` if `inode` is not known to the current branch.
    fn inode_status(&self,
                    repo_root: &RepoRoot<impl AsRef<Path>>,
                    inode: Inode,
                    path: &RepoPath<impl AsRef<Path>>)
                    -> (Option<(WorkingFileStatus, FileHeader)>) {
        match self.get_inodes(inode) {
            Some(file_header) => {
                let old_meta = file_header.metadata;
                let new_meta = file_metadata(&repo_root.absolutize(path)).ok();
                
                debug!("current_node={:?}", file_header);
                debug!("old_attr={:?},int_attr={:?}", old_meta, new_meta);

                let status = match (new_meta, file_header.status) {
                    (Some(new_meta), FileStatus::Moved) => WorkingFileStatus::Moved {
                        from: old_meta,
                        to: new_meta,
                    },
                    (Some(new_meta), _) if old_meta != new_meta => WorkingFileStatus::Moved {
                        from: old_meta,
                        to: new_meta,
                    },
                    (None, _) |
                    (_, FileStatus::Deleted) => WorkingFileStatus::Deleted,
                    (None, _) | (_, FileStatus::Deleted) => WorkingFileStatus::Deleted,
                    (Some(_), FileStatus::Ok) => WorkingFileStatus::Ok,
                    (Some(_), FileStatus::Zombie) => WorkingFileStatus::Zombie,
                };
                Some((status, file_header.clone()))
            }
            None => None,
        }
    }


    fn record_inode(
        &self,
        repo_root: &RepoRoot<impl AsRef<Path>>,
        diff_algorithm: diff::Algorithm,
        branch: &Branch,
        st: &mut RecordState,
        parent_node: Key<Option<PatchId>>,
        current_inode: Inode,
        realpath: &mut RepoPath<std::path::PathBuf>,
        obsolete_inodes: &mut Vec<Inode>,
        basename: &str,
    ) -> Result<()> {
        realpath.push(basename);
        debug!("realpath: {:?}", realpath);
        debug!("inode: {:?}", current_inode);
        debug!("header: {:?}", self.get_inodes(current_inode));
        let status_header = self.inode_status(repo_root, current_inode, realpath);
        debug!("status_header: {:?}", status_header);
        let mut current_key = match &status_header {
            &Some((_, ref file_header)) => {
                Some(Key {
                    patch: Some(file_header.key.patch.clone()),
                    line: file_header.key.line.clone(),
                })
            }
            &Some((_, ref file_header)) => Some(Key {
                patch: Some(file_header.key.patch.clone()),
                line: file_header.key.line.clone(),
            }),
            &None => None,
        };

        match status_header {
            Some((WorkingFileStatus::Moved {
                      from: old_meta,
                      to: new_meta,
                  },
                  file_header)) => {
            Some((
                WorkingFileStatus::Moved {
                    from: old_meta,
                    to: new_meta,
                },
                file_header,
            )) => {
                st.updatables.insert(InodeUpdate::Moved {
                    inode: current_inode.clone(),
                    metadata: new_meta,
                });
                self.record_moved_file(
                    repo_root,
                    diff_algorithm,
                    branch,
                    realpath,
                    st,
                    parent_node,
                    file_header.key,
                    basename,
                    new_meta,
                    old_meta,
                )?
            }
            Some((WorkingFileStatus::Deleted, file_header)) => {
                self.record_deleted_file(
                self.record_deleted_file(
                    st,
                    branch,
                    realpath,
                    file_header.key,
                )?
                st.updatables.insert(InodeUpdate::Deleted {
                    inode: current_inode.clone()
                    inode: current_inode.clone(),
                });
                self.record_deleted_file(st, branch, realpath, file_header.key)?;
                // If we are deleting a directory, don't recurse,
                // because record_deleted_file already did it.
                realpath.pop();
                return Ok(());
            }
            Some((WorkingFileStatus::Ok, file_header)) => {
                if !file_header.metadata.is_dir() {
                    let mut ret = self.retrieve(branch, file_header.key);
                    debug!("now calling diff {:?}", file_header.key);
                    let inode = Key {
                        patch: Some(self.external_hash(file_header.key.patch).to_owned()),
                        line: file_header.key.line,
                    };
                    self.confirm_path(st, branch, realpath, file_header.key)?;
                    self.diff_with_binary(
                        repo_root,
                        diff_algorithm,
                        inode,
                        branch,
                        st,
                        &mut ret,
                        Rc::new(realpath.clone()),
                    )?;
                } else {
                    // Confirm
                    self.confirm_path(st, branch, &realpath, file_header.key)?;
                }
            }
            Some((WorkingFileStatus::Zombie, _)) => {
                // This file is a zombie, but the user has not
                // specified anything to do with this file, so leave
                // it alone.
            }
            None => {
                if let Ok(new_key) = self.record_file_addition(
                    st,
                    current_inode,
                    parent_node,
                    realpath,
                    basename,
                )
                if let Ok(new_key) =
                    self.record_file_addition(repo_root, st, current_inode, parent_node,
                                              realpath, basename)
                {
                    current_key = new_key.map(|next| {
                        Key {
                            patch: None,
                            line: next,
                        }
                    current_key = new_key.map(|next| Key {
                        patch: None,
                        line: next,
                    })
                } else {
                    obsolete_inodes.push(current_inode)
                }
            }

        }

        let current_key = current_key;
        debug!("current_node={:?}", current_key);
        if let Some(current_node) = current_key {
            self.record_children(
                repo_root,
                diff_algorithm,
                branch,
                st,
                realpath,
                current_node,
                current_inode,
                obsolete_inodes,
            )?;
        };
        realpath.pop();
        Ok(())
    }


    fn external_newedge(&self, from: Key<PatchId>, to: Key<PatchId>, introduced_by: PatchId) -> NewEdge {
    fn external_newedge(
        &self,
        from: Key<PatchId>,
        to: Key<PatchId>,
        introduced_by: PatchId,
    ) -> NewEdge {
        NewEdge {
            from: Key {
                patch: Some(self.external_hash(from.patch).to_owned()),
                line: from.line
                line: from.line,
            },
            to: Key {
                patch: Some(self.external_hash(to.patch).to_owned()),
                line: to.line
                line: to.line,
            },
            introduced_by: Some(self.external_hash(introduced_by).to_owned()),
        }
    }

    /// `key` must be a non-root inode key.
    fn confirm_path(&self, st: &mut RecordState, branch: &Branch, realpath: &Path, key: Key<PatchId>) -> Result<()> {
    fn confirm_path(
        &self,
        st: &mut RecordState,
        branch: &Branch,
        realpath: &RepoPath<impl AsRef<Path>>,
        key: Key<PatchId>,
    ) -> Result<()> {
        debug!("confirm_path");
        let e = Edge::zero(EdgeFlags::PARENT_EDGE|EdgeFlags::FOLDER_EDGE|EdgeFlags::DELETED_EDGE);
        let f = EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE | EdgeFlags::DELETED_EDGE;
        // Are there deleted parent edges?
        let mut edges = Vec::new();
        for v in self.iter_adjacent(branch, key, f, f) {
            debug!("confirm {:?}", v.dest);
            edges.push(self.external_newedge(key, v.dest, v.introduced_by));
            for v_ in self.iter_adjacent(branch, v.dest, f, f) {
                debug!("confirm 2 {:?}", v_.dest);
                edges.push(self.external_newedge(v.dest, v_.dest, v_.introduced_by));
            }
        }

        if !edges.is_empty() {
            let inode = Key {
                patch: Some(self.external_hash(key.patch).to_owned()),
                line: key.line.clone(),
            };
            st.actions.push(Record::FileAdd {
                name: realpath.to_owned(),
                add: Change::NewEdges {
                    edges,
                    previous: EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    previous: EdgeFlags::FOLDER_EDGE
                        | EdgeFlags::PARENT_EDGE
                        | EdgeFlags::DELETED_EDGE,
                    flag: EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE,
                    inode,
                },
                contents: None,
            });
        }
        debug!("/confirm_path");

        Ok(())
    }
}

impl RecordState {
    pub fn new() -> Self {
        RecordState {
            line_num: LineId::new() + 1,
            actions: Vec::new(),
            updatables: HashSet::new(),
            redundant: Vec::new(),
        }
    }

    pub fn finish(self) -> (Vec<Record<Rc<RefCell<ChangeContext<PatchId>>>>>, HashSet<InodeUpdate>) {
    pub fn finish(self) -> (Vec<Record<ChangeContext<PatchId>>>, HashSet<InodeUpdate>) {
        (self.actions, self.updatables)
    }
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    pub fn record(
        &mut self,
        diff_algorithm: diff::Algorithm,
        state: &mut RecordState,
        branch: &Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        prefix: &RepoPath<impl AsRef<Path>>,
    ) -> Result<()> {

        let mut obsolete_inodes = Vec::new();

        match prefix.split() {
            Some ((parent_path, basename)) =>
            {
                let inode = self.find_inode(prefix)?;
                // Key needs to be the parent's node.
                let key: Key<PatchId> = {
                    // find this inode's parent.
                    if let Some(parent) = self.get_revtree(inode) {
                        if parent.parent_inode.is_root() {
                            ROOT_KEY
                        } else if let Some(key) = self.get_inodes(parent.parent_inode) {
                            key.key
                        } else {
                            return Err(Error::FileNotInRepo(prefix.to_path_buf()));
                        }
                    } else {
                        return Err(Error::FileNotInRepo(prefix.to_path_buf()));
                    }
                };
                let key = Key {
                    patch: Some(key.patch),
                    line: key.line,
                };
                self.record_inode(
                    working_copy,
                    diff_algorithm,
                    &branch,
                    state,
                    key,
                    inode,
                    &mut parent_path.to_owned(),
                    &mut obsolete_inodes,
                    &basename.to_str().unwrap(),
                )?













1

2
3
4
5
6







7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23

24
25
26






27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46


47
48
49

50
51







52
53
54
55
56
57
58
59
60

61
62
63
64
65
66
67
68







69
70
71
72
73
74
75
76
use rand;
use std::mem::swap;
                k == key
                    && v.flag
                        <= EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE
                    && v.introduced_by == patch_id
            if !edges.contains_key(&k) {
                edges.insert(k.to_owned(), v.to_owned());
                k == key && v.flag <= EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE
                    && v.introduced_by == patch_id
            if !edges.contains_key(&k) {
                edges.insert(k.to_owned(), v.to_owned());
use backend::*;
use std::mem::swap;
use rand;
use std::collections::HashMap;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {

    fn collect_up_context_repair(&self,
                                 branch: &Branch,
                                 key: Key<PatchId>,
                                 patch_id: PatchId,
                                 edges: &mut HashMap<Key<PatchId>, Edge>) {

    fn collect_up_context_repair(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        patch_id: PatchId,
        edges: &mut HashMap<Key<PatchId>, Edge>,
    ) {
        debug!("collect up {:?}", key);
        let start_flag = EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE;
        for v in self
            .iter_adjacent(branch, key, start_flag, start_flag | EdgeFlags::FOLDER_EDGE)
            .take_while(|v| v.introduced_by == patch_id)
        {
            if !edges.contains_key(&key) {
                edges.insert(key.to_owned(), v.to_owned());
                self.collect_up_context_repair(branch, v.dest, patch_id, edges)
            }

        }
    }

    fn collect_down_context_repair(&self,
                                   branch: &Branch,
                                   key: Key<PatchId>,
                                   patch_id: PatchId,
                                   edges: &mut HashMap<Key<PatchId>, Edge>) {

    fn collect_down_context_repair(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        patch_id: PatchId,
        edges: &mut HashMap<Key<PatchId>, Edge>,
    ) {
        debug!("collect down {:?}", key);
        for v in self
            .iter_adjacent(
                branch,
                key,
                EdgeFlags::PSEUDO_EDGE,
                EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE,
            )
            .take_while(|v| v.introduced_by == patch_id)
        {
            if !edges.contains_key(&key) {
                edges.insert(key.to_owned(), v.to_owned());

                    self.collect_down_context_repair(branch, v.dest, patch_id, edges)
                }
                self.collect_down_context_repair(branch, v.dest, patch_id, edges)
            }
        }

    }

    pub fn remove_up_context_repair(&mut self,
                                    branch: &mut Branch,
                                    key: Key<PatchId>,
                                    patch_id: PatchId,
                                    edges: &mut HashMap<Key<PatchId>, Edge>)
                                    -> Result<()> {

    pub fn remove_up_context_repair(
        &mut self,
        branch: &mut Branch,
        key: Key<PatchId>,
        patch_id: PatchId,
        edges: &mut HashMap<Key<PatchId>, Edge>,
    ) -> Result<()> {
        self.collect_up_context_repair(branch, key, patch_id, edges);
        for (mut k, mut v) in edges.drain() {

            debug!("remove {:?} {:?}", k, v);

            self.del_edge_both_dirs(branch, k, v)?;
        }

        Ok(())
    }

    pub fn remove_down_context_repair(&mut self,
                                      branch: &mut Branch,
                                      key: Key<PatchId>,
                                      patch_id: PatchId,
                                      edges: &mut HashMap<Key<PatchId>, Edge>)
                                      -> Result<()> {

    pub fn remove_down_context_repair(
        &mut self,
        branch: &mut Branch,
        key: Key<PatchId>,
        patch_id: PatchId,
        edges: &mut HashMap<Key<PatchId>, Edge>,
    ) -> Result<()> {
        self.collect_down_context_repair(branch, key, patch_id, edges);





1
2

3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48










49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75


76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220

221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306

307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351


352
353
354
355
356
357
358
359



360
361
362
363
364
365
366
367
368
369
370
371
372
373
374



375
376
use rand;
use backend::*;
use patch::*;
use Result;
use Result;
use super::Workspace;
use apply::find_alive::FindAlive;
use std::str::from_utf8;
use backend::*;
use patch::*;
use rand;
use std::collections::HashSet;
use std::str::from_utf8;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    pub(in unrecord) fn unrecord_edges(
        &mut self,
        find_alive: &mut FindAlive,
        branch: &mut Branch,
        patch_id: PatchId,
        dependencies: &HashSet<Hash>,
        previous: EdgeFlags,
        flags: EdgeFlags,
        edges: &[NewEdge],
        w: &mut Workspace,
        unused_in_other_branches: bool,
    ) -> Result<()> {
        debug!("unrecord_edges: {:?}", edges);

        // Revert the edges, i.e. add the previous edges.
        self.remove_edges(
            branch,
            patch_id,
            previous,
            flags,
            edges,
            unused_in_other_branches,
        )?;

        // If this NewEdges caused pseudo-edges to be inserted at the
        // time of applying this patch, remove them, because these
        // vertices don't need them anymore (we'll reconnect possibly
        // disconnected parts later).
        self.remove_patch_pseudo_edges(branch, patch_id, flags, edges, w)?;

        // We now take care of the connectivity of the alive graph,
        // which we must maintain.
        if previous.contains(EdgeFlags::DELETED_EDGE) {
            // This NewEdges turns a deleted edge into an alive one.
            // Therefore, unrecording this NewEdges introduced DELETED
            // edges to the graph, which might have disconnect the
            // graph. Add pseudo edges where necessary to keep the
            // alive component of the graph connected.
            let targets: Vec<_> =
                if flags.contains(EdgeFlags::PARENT_EDGE) {
                    edges.iter()
                        .map(|e| self.internal_key(&e.from, patch_id))
                        .collect()
                } else {
                    edges.iter()
                        .map(|e| self.internal_key(&e.to, patch_id))
                        .collect()
                };
            let targets: Vec<_> = if flags.contains(EdgeFlags::PARENT_EDGE) {
                edges
                    .iter()
                    .map(|e| self.internal_key(&e.from, patch_id))
                    .collect()
            } else {
                edges
                    .iter()
                    .map(|e| self.internal_key(&e.to, patch_id))
                    .collect()
            };
            debug!("previous contains DELETED_EDGE, targets = {:?}", targets);
            self.reconnect_across_deleted_nodes(patch_id, branch, dependencies, &targets)?
        } else {
            // This NewEdge turns an alive edge into a deleted
            // one. Therefore, unapplying it reintroduces alive edges,
            // but these new alive edges might have their context
            // dead. If this is the case, find their closest alive
            // ancestors and descendants, and reconnect.
            assert!(flags.contains(EdgeFlags::DELETED_EDGE));

            // If we're reintroducing a non-deleted edge, there is
            // no reason why the deleted part is still connected
            // to the alive component of the graph, so we must
            // reconnect the deleted part to its alive ancestors
            // and descendants.
            self.reconnect_deletions(branch, patch_id, edges, flags, find_alive)?


        }

        // Now, we're done reconnecting the graph. However, if this
        // NewEdges changed "folder" edges, the inodes and trees
        // tables might have to be updated.
        if flags.contains(EdgeFlags::FOLDER_EDGE) {
            if flags.contains(EdgeFlags::DELETED_EDGE) {
                // This file was deleted by this `NewEdge`. Therefore,
                // unrecording this NewEdges adds it back to the
                // repository. There are two things to do here:
                //
                // - Put it back into trees and revtrees to start
                //   following it again.
                //
                // - Since this file was *not* added by this patch
                // (because no patch can both add and delete the same
                // file), put the file back into inodes and revinodes.
                self.restore_deleted_file(branch, patch_id, edges, flags)?
            } else {
                // This file was undeleted by this patch. One way (the
                // only way?) to create such a patch is by rolling
                // back a patch that deletes a file.
                self.undo_file_reinsertion(patch_id, edges, flags)?
            }
        }

        Ok(())
    }

    /// Handles the case where the patch we are unrecording deletes an
    /// "inode" node, i.e. deletes a file from the system.
    ///
    /// We need (1) to check that, which is done in
    /// `dest_is_an_inode`, and (2) to add the file back into the
    /// `tree` and `revtree` tables (but not in the `inodes` tables).
    fn restore_deleted_file(
        &mut self,
        branch: &Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flags: EdgeFlags,
    ) -> Result<()> {
        let is_upwards = flags.contains(EdgeFlags::PARENT_EDGE);
        for e in edges {
            let (source, dest) = if is_upwards {
                (&e.to, &e.from)
            } else {
                (&e.from, &e.to)
            };
            let source = self.internal_key(source, patch_id).to_owned();
            let dest = self.internal_key(dest, patch_id).to_owned();
            let dest_is_an_inode = if let Some(contents) = self.get_contents(dest) {
                contents.len() == 0
            } else {
                true
            };
            if dest_is_an_inode {
                // This is actually a file deletion, so it's not in
                // the tree anymore. Put it back into tree/revtrees,
                // and into inodes/revinodes.

                // Since patches *must* be recorded from top to
                // bottom, source's parent is an inode, and must be in
                // inodes/revinodes.
                let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
                let source_parent = self
                    .iter_nodes(branch, Some((source, Some(e))))
                    .take_while(|&(k, _)| k == source)
                    .next()
                    .unwrap()
                    .1
                    .dest
                    .to_owned();
                debug!("source_parent = {:?}", source_parent);
                let parent_inode = if source_parent.is_root() {
                    ROOT_INODE
                } else {
                    // There is a complexity choice here: we don't
                    // want to resurrect all paths leading to this
                    // file. Resurrecting only the latest known path
                    // is not deterministic.

                    // So, if the parent doesn't exist, we attach this
                    // to the root of the repository.
                    self.get_revinodes(source_parent).unwrap_or(ROOT_INODE)
                };
                let inode = self.create_new_inode();

                let (metadata, basename) = {
                    let source_contents = self.get_contents(source).unwrap();
                    assert!(source_contents.len() >= 2);
                    let (a, b) = source_contents.as_slice().split_at(2);
                    let name = SmallString::from_str(from_utf8(b)?);
                    (FileMetadata::from_contents(a), name)
                };

                let file_id = OwnedFileId {
                    parent_inode,
                    basename,
                };
                self.put_tree(&file_id.as_file_id(), inode)?;
                self.put_revtree(inode, &file_id.as_file_id())?;

                self.replace_inodes(
                    inode,
                    FileHeader {
                        status: FileStatus::Deleted,
                        metadata,
                        key: dest,
                    },
                )?;
                self.replace_revinodes(dest, inode)?;
            }
        }
        Ok(())
    }

    fn undo_file_reinsertion(
        &mut self,
        patch_id: PatchId,
        edges: &[NewEdge],
        flags: EdgeFlags,
    ) -> Result<()> {
        for e in edges {
            let dest = if flags.contains(EdgeFlags::PARENT_EDGE) {
                &e.from
            } else {
                &e.to
            };
            let internal = self.internal_key(dest, patch_id).to_owned();
            // We're checking here that this is not a move, but
            // really the inverse of a deletion, by checking that
            // `dest` is an "inode node".
            let dest_is_an_inode = if let Some(contents) = self.get_contents(internal) {
                contents.len() == 0
            } else {
                true
            };
            if dest_is_an_inode {
                self.remove_file_from_inodes(internal)?;
            }
        }
        Ok(())
    }


    fn reconnect_deletions(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flags: EdgeFlags,
        find_alive: &mut FindAlive,
    ) -> Result<()> {
        // For all targets of this edges, finds its
        // alive ascendants, and add pseudo-edges.
        let is_upwards = flags.contains(EdgeFlags::PARENT_EDGE);
        let mut alive_relatives = Vec::new();
        for e in edges.iter() {
            debug!("is_upwards: {:?}", is_upwards);
            let (source, dest) = if is_upwards {
                (&e.to, &e.from)
            } else {
                (&e.from, &e.to)
            };

            let source = self.internal_key(source, patch_id);
            let dest = self.internal_key(dest, patch_id);

            if !self.is_alive(branch, dest) {
                continue;
            }

            // Collect the source's closest alive descendants, if
            // the immediate descendant is not alive.
            find_alive.clear();
            for dead_child in self.iter_adjacent(
                branch,
                dest,
                EdgeFlags::DELETED_EDGE,
                EdgeFlags::DELETED_EDGE,
            ) {
                find_alive.push(dead_child.dest);
            }
            debug!("find_alive {:?}", find_alive);
            alive_relatives.clear();
            let mut edge = Edge::zero(EdgeFlags::empty());
            if self.find_alive_descendants(find_alive, branch, &mut alive_relatives) {
                debug!("alive_descendants: {:?}", alive_relatives);
                for desc in alive_relatives.drain(..) {
                    if dest != desc {
                        edge.flag = EdgeFlags::PSEUDO_EDGE | (flags & EdgeFlags::FOLDER_EDGE);
                        edge.dest = desc;
                        edge.introduced_by = self.internal_hash(&e.introduced_by, patch_id);
                        debug!("put_nodes (line {:?}): {:?} {:?}", line!(), source, edge);
                        self.put_edge_both_dirs(branch, dest, edge)?;
                    }
                }
            }
            // now we'll use alive_relatives to
            // collect alive ancestors.
            debug!("source = {:?}, dest = {:?}", source, dest);
            debug!("alive_ancestors, source = {:?}", source);
            find_alive.clear();
            find_alive.push(source);
            alive_relatives.clear();
            let mut files = Vec::new();
            let mut first_file = None;
            if self.find_alive_ancestors(find_alive, branch, &mut alive_relatives, &mut first_file, &mut files) {
            if self.find_alive_ancestors(
                find_alive,
                branch,
                &mut alive_relatives,
                &mut first_file,
                &mut files,
            ) {
                debug!("alive_ancestors: {:?}", alive_relatives);
                for asc in alive_relatives.drain(..) {
                    if dest != asc {
                        edge.flag = EdgeFlags::PSEUDO_EDGE
                            | EdgeFlags::PARENT_EDGE
                            | (flags & EdgeFlags::FOLDER_EDGE);
                        edge.dest = asc;
                        edge.introduced_by = self.internal_hash(&e.introduced_by, patch_id);
                        debug!("put_edge (line {:?}): {:?} {:?}", line!(), dest, edge);
                        self.put_edge_both_dirs(branch, dest, edge)?;
                    }
                }
                for (mut k, mut v) in files.drain(..) {
                    assert!(v.flag.contains(EdgeFlags::DELETED_EDGE));
                    v.flag = (v.flag | EdgeFlags::PSEUDO_EDGE) ^ EdgeFlags::DELETED_EDGE;
                    self.put_edge_one_dir(branch, k, v)?;
                }

            }
        }
        Ok(())
    }

    fn remove_edges(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        previous: EdgeFlags,
        flag: EdgeFlags,
        edges: &[NewEdge],
        unused_in_other_branches: bool,
    ) -> Result<()> {
        let mut del_edge = Edge::zero(EdgeFlags::empty());
        del_edge.introduced_by = patch_id;

        let mut edge = Edge::zero(EdgeFlags::empty());
        edge.introduced_by = patch_id;

        for e in edges {
            let int_from = self.internal_key(&e.from, patch_id);
            let int_to = self.internal_key(&e.to, patch_id);

            // Delete the edge introduced by this patch,
            // if this NewEdges is not forgetting its
            // edges.
            del_edge.flag = flag;
            del_edge.dest = int_to.clone();
            debug!("delete {:?} -> {:?}", int_from, del_edge);
            self.del_edge_both_dirs(branch, int_from, del_edge)?;

            // Add its previous version, if these edges
            // are Forget or Map (i.e. not brand new
            // edges).

            // If there are other edges with the
            // same source and target, check that
            // none of these edges knows about the
            // patch that introduced the edge we
            // want to put back in.

            edge.dest = int_to;
            debug!(
                "trying to put an edge from {:?} to {:?} back",
                int_from,
                int_to
                int_from, int_to
            );
            edge.flag = previous;
            edge.introduced_by = self.internal_hash(&e.introduced_by, patch_id);

            if unused_in_other_branches {
                debug!(
                    "unused_in_other_branches: {:?} {:?} {:?}",
                    int_from,
                    edge,
                    patch_id
                    int_from, edge, patch_id
                );
                self.del_cemetery(int_from, edge, patch_id)?;
            }

            // Is this edge deleted by another patch?
            // patch_id has already been removed from the table.
            let edge_is_still_absent = self
                .iter_cemetery(int_from, edge)
                .take_while(|&((k, v), _)| {
                    k == int_from
                        && v.dest == edge.dest
                        && v.flag | EdgeFlags::PSEUDO_EDGE == edge.flag | EdgeFlags::PSEUDO_EDGE
                        && v.introduced_by == edge.introduced_by
                })
                .any(|(_, patch)| {
                    self.get_patch(&branch.patches, patch).is_some()
                });
                .any(|(_, patch)| self.get_patch(&branch.patches, patch).is_some());


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161

162
163
164
165
166
167
168

169
170
171
172
173
174
175
176
177
178
179


180
181
use rand;
use apply::find_alive::FindAlive;
use backend::*;
use patch::*;
use rand;
use std::collections::{HashMap, HashSet};
use {Error, Result};
mod context_repair;
mod edges;
mod nodes;

#[derive(Debug)]
struct Workspace {
    file_moves: HashSet<Key<PatchId>>,
    context_edges: HashMap<Key<PatchId>, Edge>,
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    pub fn unrecord(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        patch: &Patch,
    ) -> Result<bool> {
        let timestamp = if let Some(ts) = self.get_patch(&branch.patches, patch_id) {
            ts
        } else {
            return Err(Error::PatchNotOnBranch(patch_id));
        };
        self.del_patches(&mut branch.patches, patch_id)?;
        self.del_revpatches(&mut branch.revpatches, timestamp, patch_id)?;

        // Is the patch used in another branch?;
        let unused_in_other_branches = {
            let mut it = self.iter_branches(None).filter(|br| {
                br.name != branch.name && self.get_patch(&br.patches, patch_id).is_some()
            });
            it.next().is_none()
        };

        debug!("unrecord: {:?}", patch_id);

        self.unapply(branch, patch_id, patch, unused_in_other_branches)?;

        for dep in patch.dependencies().iter() {
            let internal_dep = self.get_internal(dep.as_ref()).unwrap().to_owned();
            // Test whether other branches have both this patch and `dep`.
            let other_branches_have_dep = self.iter_branches(None).any(|br| {
                br.name != branch.name
                    && self.get_patch(&br.patches, internal_dep).is_some()
                    && self.get_patch(&br.patches, patch_id).is_some()
            });

            if !other_branches_have_dep {
                self.del_revdep(internal_dep, Some(patch_id))?;
            }
        }


        // If no other branch uses this patch, delete from revdeps.
        if unused_in_other_branches {
            info!("deleting patch");
            // Delete all references to patch_id in revdep.
            while self.del_revdep(patch_id, None)? {}
            let ext = self.get_external(patch_id).unwrap().to_owned();
            self.del_external(patch_id)?;
            self.del_internal(ext.as_ref())?;
            Ok(false)
        } else {
            Ok(true)
        }
    }

    /// Unrecord the patch, returning true if and only if another
    /// branch still uses this patch.
    pub fn unapply(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        patch: &Patch,
        unused_in_other_branch: bool,
    ) -> Result<()> {
        debug!("revdep: {:?}", self.get_revdep(patch_id, None));

        // Check that the branch has no patch that depends on this one.
        assert!(self
            .iter_revdep(Some((patch_id, None)))
            .take_while(|&(p, _)| p == patch_id)
            .all(|(_, p)| self.get_patch(&branch.patches, p).is_none()));

        let mut workspace = Workspace {
            file_moves: HashSet::new(),
            context_edges: HashMap::new(),
        };
        let mut find_alive = FindAlive::new();

        // Check applied, check dependencies.
        for change in patch.changes().iter() {
            match *change {
                Change::NewEdges {
                    ref edges,
                    previous,
                    flag,
                    ..
                } => self.unrecord_edges(
                    &mut find_alive,
                    branch,
                    patch_id,
                    patch.dependencies(),
                    previous,
                    flag,
                    edges,
                    &mut workspace,
                    unused_in_other_branch,
                )?,
                Change::NewNodes {
                    ref up_context,
                    ref down_context,
                    ref line_num,
                    ref flag,
                    ref nodes,
                    ..
                } => self.unrecord_nodes(
                    branch,
                    patch_id,
                    patch.dependencies(),
                    up_context,
                    down_context,
                    *line_num,
                    *flag,
                    nodes,
                    &mut workspace,
                    unused_in_other_branch,
                )?,
            }
        }
        Ok(())
    }

    fn reconnect_across_deleted_nodes(
        &mut self,
        patch_id: PatchId,
        branch: &mut Branch,
        dependencies: &HashSet<Hash>,
        deleted_nodes: &[Key<PatchId>],
    ) -> Result<()> {
        debug!("reconnect_across_deleted_nodes");
        let mut find_alive = FindAlive::new();
        let mut alive_ancestors = Vec::new();
        let mut files = Vec::new();

        // find alive descendants of the deleted nodes.
        let mut alive_descendants = Vec::new();
        for &c in deleted_nodes {
            debug!("down_context c = {:?}", c);
            if !self.is_alive(branch, c) {
                find_alive.clear();
                find_alive.push(c);
                self.find_alive_descendants(&mut find_alive, branch, &mut alive_descendants);
            }
        }

        if !alive_descendants.is_empty() {

            // find alive ancestors of the deleted nodes.
            for &c in deleted_nodes {
                debug!("down_context c = {:?}", c);
                if !self.is_alive(branch, c) {
                    find_alive.clear();
                    find_alive.push(c);
                    let mut first_file = None;
                    self.find_alive_ancestors(&mut find_alive, branch, &mut alive_ancestors, &mut first_file, &mut files);
                    self.find_alive_ancestors(
                        &mut find_alive,
                        branch,
                        &mut alive_ancestors,
                        &mut first_file,
                        &mut files,
                    );
                }
            }
            debug!(
                "ancestors = {:?}, descendants = {:?}",
                alive_ancestors,
                alive_descendants
                alive_ancestors, alive_descendants
            );

1
2

3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

use rand;
use super::Workspace;
use backend::*;
use std::mem::swap;
use rand;
use std::collections::HashSet;
use std::mem::swap;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    pub(in unrecord) fn unrecord_nodes(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        dependencies: &HashSet<Hash>,
        up_context: &[Key<Option<Hash>>],
        down_context: &[Key<Option<Hash>>],
        line_num: LineId,
        flag: EdgeFlags,
        nodes: &[Vec<u8>],
        w: &mut Workspace,
        unused_in_other_branch: bool,
    ) -> Result<()> {
        debug!(
            "unrecord_nodes: {:?} {:?} {:?}",
            patch_id,
            line_num,
            nodes.len()
        );
        // Delete the new nodes.





1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

68
69
70
71
72
73
74
75
76

77
78

79
80

81
82

83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283

284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335

336
337
338
339
340
341
342
343
344

345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487

488
489
use std::io::{Read, Write};
use std;
                        write!(o, "\x1b[{};{}H", self.posy - 1, w)?;
            write!(o, "\x1b[{}S", self.posy + extra_lines - h)?;
use libc;
use std;
use std::io::{Read, Write};
use utf8parse;

fn size() -> (usize, usize) {
    unsafe {
        let mut size: libc::winsize = std::mem::zeroed();

        // Albeit it may look useless in the general case, the use of `into()`
        // here is justified for portability reason.  It looks like there is a
        // type mismatch in the `libc` API for FreeBSD, which prevents `line`
        // from compiling.
        if libc::ioctl(1, libc::TIOCGWINSZ.into(), &mut size) == 0 {
            (size.ws_col as usize, size.ws_row as usize)
        } else {
            (0, 0)
        }
    }
}

pub struct Terminal{
pub struct Terminal {
    attr: libc::termios,
    posx: usize,
    posy: usize,
    posx0: usize,
    posy0: usize,
    cursor: usize,
    n_chars: usize,
    buf: String,
}

impl Terminal {
    pub fn new() -> Option<Terminal> {
        unsafe {
            if libc::isatty(0) != 0 {
                let mut attr = std::mem::zeroed();
                libc::tcgetattr(0, &mut attr);

                // attr type implements copy and therefore does not need to be
                // cloned
                let attr_orig = attr;

                attr.c_iflag &= !(libc::BRKINT|libc::ICRNL|libc::INPCK|libc::ISTRIP|libc::IXON);
                attr.c_iflag &=
                    !(libc::BRKINT | libc::ICRNL | libc::INPCK | libc::ISTRIP | libc::IXON);
                attr.c_oflag &= !libc::OPOST;
                attr.c_lflag &= !(libc::ECHO|libc::ICANON|libc::IEXTEN|libc::ISIG);
                attr.c_lflag &= !(libc::ECHO | libc::ICANON | libc::IEXTEN | libc::ISIG);
                libc::tcsetattr(0, libc::TCSAFLUSH, &attr);
                Some(Terminal {
                    attr: attr_orig,
                    posx: 0,
                    posy: 0,
                    posx0: 0,
                    posy0: 0,
                    cursor: 0,
                    n_chars: 0,
                    buf: String::new(),
                })
            } else {
                None
            }
        }
    }
}

impl Drop for Terminal {
    fn drop(&mut self) {
        unsafe { libc::tcsetattr(0, libc::TCSAFLUSH, &self.attr); }
        unsafe {
            libc::tcsetattr(0, libc::TCSAFLUSH, &self.attr);
        }
    }
}

fn next_char(s: &str, i: usize) -> usize {
    let s = s.as_bytes();
    if s[i] <= 0x7f {
        i+1
        i + 1
    } else if s[i] >> 5 == 0b110 {
        i+2
        i + 2
    } else if s[i] >> 4 == 0b1110 {
        i+3
        i + 3
    } else {
        i+4
        i + 4
    }
}

fn prev_char(s: &str, mut i: usize) -> usize {
    let s = s.as_bytes();
    i -= 1;
    while s[i] & 0b1100_0000 == 0b1000_0000 {
        i -= 1
    }
    i
}

impl Terminal {
    fn move_left(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor > 0 {
            self.cursor = prev_char(&self.buf, self.cursor);
            if self.posx > 1 {
                self.posx -= 1;
            } else {
                let (w, _) = size();
                self.posx = w;
                self.posy -= 1;
            }
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        }
        o.flush()?;
        Ok(())
    }

    fn word_left(&mut self) -> Result<(), std::io::Error> {
        if self.cursor > 0 {
            let bytes = self.buf.as_bytes();
            let mut is_first = true;
            while self.cursor > 0 {
                self.cursor -= 1;
                if self.posx > 1 {
                    self.posx -= 1;
                } else {
                    let (w, _) = size();
                    self.posx = w;
                    self.posy -= 1;
                }
                if bytes[self.cursor] == b' ' {
                    if !is_first {
                        break
                        break;
                    }
                } else {
                    is_first = false
                }
            }
        }
        if self.buf.as_bytes()[self.cursor] == b' ' {
            self.move_right()?
        } else {
            let mut o = std::io::stdout();
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn home(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        let (w, _) = size();
        let delta_y = (self.cursor + self.posx0 - 1) / w;
        self.posy -= delta_y;
        self.posx = self.posx0;
        self.cursor = 0;
        write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        o.flush()?;
        Ok(())
    }

    fn end(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        let remaining_chars = self.buf.split_at(self.cursor).1.chars().count();
        let (w, _) = size();
        self.cursor = self.buf.len();
        self.posy += (self.posx + remaining_chars) / w;
        self.posx = 1 + ((self.posx - 1 + remaining_chars) % w);
        write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        o.flush()?;
        Ok(())
    }

    fn move_right(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            self.cursor = next_char(&self.buf, self.cursor);

            let (w, h) = size();
            if self.posx < w {
                self.posx += 1;
            } else {
                if self.posy >= h {
                    write!(o, "\x1b[1S")?;
                }
                self.posx = 1;
                self.posy += 1;
            }
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        }
        o.flush()?;
        Ok(())
    }

    fn word_right(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            let bytes = self.buf.as_bytes();
            let (w, h) = size();
            let mut is_first = true;
            while self.cursor < self.buf.len() {
                self.cursor += 1;
                if self.posx < w {
                    self.posx += 1;
                } else {
                    if self.posy >= h {
                        write!(o, "\x1b[1S")?;
                    }
                    self.posx = 1;
                    self.posy += 1;
                }
                if self.cursor >= self.buf.len() || bytes[self.cursor] == b' ' {
                    if !is_first {
                        break
                        break;
                    }
                } else {
                    is_first = false
                }
            }
        }
        if self.cursor < self.buf.len() && self.buf.as_bytes()[self.cursor] == b' ' {
            self.move_right()?
        } else {
            let mut o = std::io::stdout();
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn backspace(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor >= 1 {
            self.cursor = prev_char(&self.buf, self.cursor);
            self.buf.remove(self.cursor);
            self.n_chars -= 1;
            if self.posx > 1 {
                self.posx -= 1;
                write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            } else {
                let (w, _) = size();
                self.posx = w;
                if self.posy > 1 {
                    write!(o, "\x1b[{};{}H", self.posy - 1, w)?;
                    self.posy -= 1;
                } else {
                    // scroll down by one
                    write!(o, "\x1b[1T")?;
                    write!(o, "\x1b[1;{}H", w)?;
                    self.posy0 = 1;
                    self.posy = 1;
                }
            }
            let (_, end) = self.buf.split_at(self.cursor);
            o.write_all(end.as_bytes())?;
            write!(o, "\x1b[0J")?;
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn delete(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            self.buf.remove(self.cursor);
            self.n_chars -= 1;
            let (_, end) = self.buf.split_at(self.cursor);
            o.write_all(end.as_bytes())?;
            write!(o, "\x1b[0J")?;
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn erase_to_end(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            self.buf.truncate(self.cursor);
            write!(o, "\x1b[0J")?;
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }


    fn insert(&mut self, c: char) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        self.n_chars += 1;
        self.buf.insert(self.cursor, c);
        let (w, h) = size();

        let (_, end) = self.buf.split_at(self.cursor);
        o.write_all(end.as_bytes())?;

        if self.posx + 1 > w && self.posy + 1 > h && end.len() == 1 {
            // we're discovering a new line, scroll up
            write!(o, "\x1b[1S")?;
            write!(o, "\x1b[{};1H", self.posy)?;
            self.posx = 1;
        } else {
            // did we scroll down when we wrote `end`?
            let y_end = self.posy + ((self.posx + end.len() - 2) / w);
            if y_end > h {
                if self.posx + 1 > w {
                    write!(o, "\x1b[{};1H", self.posy)?;
                    self.posx = 1;
                } else {
                    write!(o, "\x1b[{};{}H", self.posy - 1, self.posx + 1)?;
                    self.posy -= 1;
                    self.posx += 1;
                }
            } else if self.posx + 1 > w {
                // Ok, we didn't scroll down, but we're at the right
                // edge of the screen, and `end` did not start a new
                // line.
                write!(o, "\x1b[{};1H", self.posy + 1)?;
                self.posx = 1;
                self.posy += 1;
            } else {
                // we're not at the right edge of the screen, and
                // `end` did not start a new line.
                write!(o, "\x1b[{};{}H", self.posy, self.posx + 1)?;
                self.posx += 1;
            }
        }
        self.cursor = next_char(&self.buf, self.cursor);
        o.flush()?;
        Ok(())
    }

    pub fn read_line(&mut self) -> Result<String, std::io::Error> {
        let mut i = std::io::stdin();
        let mut o = std::io::stdout();
        o.write_all(b"\x1b[6n")?;
        o.flush()?;
        let mut p = Parser {
            c: None,
            valid: true
            valid: true,
        };
        let mut pending = None;
        let mut utf8 = utf8parse::Parser::new();
        loop {
            let mut c = [0; 4];
            i.read_exact(&mut c[..1])?;
            if c[0] == 3 {
                // Ctrl+C
                return Ok(String::new())
                self.print_newline(o)?;
                return Ok(String::new());
            } else if c[0] == 26 {
                // Ctrl+Z
            } else if c[0] == 27 {
                i.read_exact(&mut c[..1])?;
                if c[0] == b'[' {
                    i.read_exact(&mut c[..1])?;
                    if c[0] == b'D' {
                        // self.move_left()?
                        o.write_all(b"\x1b[6n")?;
                        o.flush()?;
                        pending = Some(Pending::MoveLeft)
                    } else if c[0] == b'C' {
                        // self.move_right()?
                        o.write_all(b"\x1b[6n")?;
                        o.flush()?;
                        pending = Some(Pending::MoveRight)
                    } else {
                        let mut y = 0;
                        while c[0] >= b'0' && c[0] <= b'9' {
                            y = y * 10 + ((c[0] - b'0') as usize);
                            i.read_exact(&mut c[..1])?;
                        }
                        if c[0] == b';' {
                            i.read_exact(&mut c[..1])?;
                            let mut x = 0;
                            while c[0] >= b'0' && c[0] <= b'9' {
                                x = x * 10 + ((c[0] - b'0') as usize);
                                i.read_exact(&mut c[..1])?;
                            }
                            if c[0] == b'R' {
                                // The terminal is reporting its position.
                                self.posy = y;
                                self.posx = x;
                                if let Some(p) = pending.take() {
                                    self.do_pending(p)?
                                } else {
                                    self.posy0 = y;
                                    self.posx0 = x;
                                }
                            }
                        } else if c[0] == b'~' {
                            if y == 3 {
                                // self.delete()?
                                o.write_all(b"\x1b[6n")?;
                                o.flush()?;
                                pending = Some(Pending::Delete)
                            } else if y == 7 {
                                // home
                                // self.home()?
                                o.write_all(b"\x1b[6n")?;
                                o.flush()?;
                                pending = Some(Pending::Home)
                            } else if y == 8 {
                                // end
                                // self.end()?
                                o.write_all(b"\x1b[6n")?;
                                o.flush()?;
                                pending = Some(Pending::End)
                            }
                        }
                    }
                } else if c[0] == b'O' {
                    i.read_exact(&mut c[..1])?;
                    if c[0] == b'd' {
                        //crtl + <-
                        o.write_all(b"\x1b[6n")?;
                        o.flush()?;
                        pending = Some(Pending::WordLeft)
                    } else if c[0] == b'c' {
                        //crtl + ->
                        o.write_all(b"\x1b[6n")?;
                        o.flush()?;
                        pending = Some(Pending::WordRight)
                    }
                }
            } else if c[0] == 127 || c[0] == 8 {
                // backspace
                o.write_all(b"\x1b[6n")?;
                o.flush()?;
                pending = Some(Pending::Backspace)
            } else if c[0] == 10 || c[0] == 13 {
                self.print_newline(o)?;
                return Ok(std::mem::replace(&mut self.buf, String::new()));
            } else if c[0] == 11 {
                o.write_all(b"\x1b[6n")?;
                o.flush()?;
                pending = Some(Pending::EraseToEnd)
            } else if c[0] == 4 {
                // ctrl + d;
                o.write_all(b"\x1b[6n")?;
                o.flush()?;
                pending = Some(Pending::Delete)
            } else {
                utf8.advance(&mut p, c[0]);
                if let Some(c) = p.c.take() {
                    o.write_all(b"\x1b[6n")?;
                    o.flush()?;
                    pending = Some(Pending::Insert(c))
                }
            }
        }
    }

    fn print_newline<W: std::io::Write>(&mut self, mut o: W) -> Result<(), std::io::Error> {
        self.posy += 1;
        self.posx = 0;
        write!(o, "\n\x1b[{};1H", self.posy)?;
        o.flush()
    }

    fn do_pending(&mut self, p: Pending) -> Result<(), std::io::Error> {
        match p {
            Pending::Insert(c) => self.insert(c),
            Pending::Delete => self.delete(),
            Pending::Home => self.home(),
            Pending::End => self.end(),
            Pending::MoveRight => self.move_right(),
            Pending::MoveLeft => self.move_left(),
            Pending::Backspace => self.backspace(),
            Pending::EraseToEnd => self.erase_to_end(),
            Pending::WordLeft => self.word_left(),
            Pending::WordRight => self.word_right(),
        }
    }
}

enum Pending {
    Insert(char),
    Delete,
    Home,
    End,
    MoveRight,
    MoveLeft,
    Backspace,
    EraseToEnd,
    WordLeft,
    WordRight,
}

struct Parser {
    c: Option<char>,
    valid: bool
    valid: bool,
}
1
2


3
4
use std;

pub struct Terminal{
}
pub struct Terminal {}

1
2
3

4
5

6

7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
use clap::{Arg, ArgMatches, SubCommand};
use commands::fs_operation;
use commands::fs_operation::Operation;
use commands::{StaticSubcommand, default_explain};
use commands::{default_explain, StaticSubcommand};
use error::Error;
use clap::{SubCommand, ArgMatches, Arg};


pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("add")
        .about("Add a file to the repository")
        .arg(
            Arg::with_name("files")
                .multiple(true)
                .help("Files to add to the repository.")
                .required(true),
        )
        .arg(
            Arg::with_name("repository")
                .takes_value(true)
                .long("repository")
                .help(
                    "Add the files to this repository. Defaults to the repository containing \
                     the current directory.",
                ),
        )
        .arg(
            Arg::with_name("recursive")
                .long("recursive")
                .help("Add repositories recursively."),
        );
}



1
2
3




4
5
6
7
use super::{validate_base58, BasicOptions};
use libpijul::{apply_resize, apply_resize_no_output, Hash};
use std::fs::File;
use super::{validate_base58, BasicOptions};
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, StaticSubcommand};
use libpijul::{apply_resize, apply_resize_no_output, Hash};
use libpijul::patch::Patch;
use libpijul::patch::Patch;
use std::fs::File;
use error::Error;
use libpijul::patch::Patch;
use libpijul::{apply_resize, apply_resize_no_output, Hash, RepoPath};
use std::collections::HashSet;



1
2
3
4
5
6
7


8
9
10
11
12
13
14
15
16
17
18
19
20
21

22
23



24
25

26
27
28

29
30
use std::io::prelude::*;
use isatty::stdout_isatty;
use relativize::relativize;
use getch;
use libpijul::patch::{Change, ChangeContext, Patch, PatchHeader, Record};
use std::io::prelude::*;

use std::collections::{HashMap, HashSet};
use std::ffi::OsString;
use std::io::stdout;
use std::collections::{HashMap, HashSet};
use std::rc::Rc;
use std::path::{Path, PathBuf};
use std::rc::Rc;

use regex::Regex;

use commands::pretty_repo_path;
use libpijul::fs_representation::{RepoPath, RepoRoot, PIJUL_DIR_NAME};

use atty;
use error::Error;
use libpijul::{EdgeFlags, Hash, LineId, MutTxn, PatchId};
use rand;
use std;
use std::char::from_u32;
use std::str;
use std::fs::{remove_file, File};
use std::process;
use std;
use rand;
use term::{Attr, StdoutTerminal};
use std::str;
use term;
use isatty::stdout_isatty;
use term::{Attr, StdoutTerminal};

use ignore::gitignore::GitignoreBuilder;
use relativize::relativize;
use line;



1
2

3
4
5
6
7
8






9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

use clap::{SubCommand, ArgMatches, Arg};
use super::{default_explain, get_current_branch, BasicOptions, StaticSubcommand};
use clap::{Arg, ArgMatches, SubCommand};

use super::{BasicOptions, StaticSubcommand, get_current_branch, default_explain};
use super::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("branches")
        .about("List all branches")
        .arg(Arg::with_name("repository")
             .long("repository")
             .help("Path to a pijul repository. Defaults to the repository containing the \
                    current directory.")
             .takes_value(true)
        )
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help(
                    "Path to a pijul repository. Defaults to the repository containing the \
                     current directory.",
                )
                .takes_value(true),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let repo = opts.open_repo()?;
    let txn = repo.txn_begin()?;
    let current_branch = opts.repo_root.get_current_branch()?;
    for branch in txn.iter_branches(None) {
        debug!("branch: {:?}", branch);
        if branch.name.as_str() == current_branch {
            println!("* {}", branch.name.as_str())
        } else {
            println!("  {}", branch.name.as_str())
        }
    }
    Ok(())
}







use clap::{SubCommand, ArgMatches, AppSettings};
use commands::{StaticSubcommand, default_explain};
use clap::{AppSettings, ArgMatches, SubCommand};
use commands::{default_explain, StaticSubcommand};
use std::io::stdin;
        .about("Prove ownership of a signature key");

1
2
3
4
5
6
7




8
9
10
11
use libpijul::fs_representation::{get_current_branch, set_current_branch};
use clap::{Arg, ArgMatches, SubCommand};

use super::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{in_repo_root, RepoPath};
use libpijul::patch::UnsignedPatch;
use libpijul::{FileStatus, RecordState, ToPrefixes};
use libpijul::fs_representation::{get_current_branch, set_current_branch};
use std::collections::HashSet;
use std::collections::HashSet;
use std::collections::HashSet;
use rand;
use std::collections::HashSet;
use std::path::Path;









1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171

172
173
use error::{Error, ErrorKind};
use libpijul::DEFAULT_BRANCH;
    let from_path = args.values_of("from_path")
        .map(|x| x.collect())
            let mut pullable: Vec<_> = session
                .pullable_patches(args.from_branch, args.to_branch, path, &args.from_path)?
                .iter()
                .collect();
use clap::{Arg, ArgMatches, SubCommand};
use commands::remote::{parse_remote, Remote};
use commands::{assert_no_containing_repo, create_repo, default_explain, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::{Hash, DEFAULT_BRANCH};
use regex::Regex;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::exit;
use tempfile::tempdir_in;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("clone")
        .about("Clone a remote branch")
        .arg(
            Arg::with_name("from")
                .help("Repository to clone.")
                .required(true),
        )
        .arg(
            Arg::with_name("from_branch")
                .long("from-branch")
                .help("The branch to pull from")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("to_branch")
                .long("to-branch")
                .help("The branch to pull into")
                .takes_value(true),
        )
        .arg(Arg::with_name("to").help("Target."))
        .arg(
            Arg::with_name("from_path")
                .long("path")
                .help("Only pull patches relative to that path.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .help("Pull a patch and its dependencies.")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("port")
                .short("p")
                .long("port")
                .help("Port of the remote ssh server.")
                .takes_value(true)
                .validator(|val| {
                    let x: Result<u16, _> = val.parse();
                    match x {
                        Ok(_) => Ok(()),
                        Err(_) => Err(val),
                    }
                }),
        );
}
#[derive(Debug)]
pub struct Params<'a> {
    pub from: Remote<'a>,
    pub from_branch: &'a str,
    pub from_path: Vec<RepoPath<&'a Path>>,
    pub to: Remote<'a>,
    pub to_branch: &'a str,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Params<'a> {
    // At least one must not use its "port" argument
    let from = parse_remote(
        args.value_of("from").unwrap(),
        args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
        None,
        None,
    );
    let to = if let Some(to) = args.value_of("to") {
        parse_remote(
            to,
            args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
            None,
            None,
        )
    } else {
        let basename = Regex::new(r"([^/:]+)").unwrap();
        let from = args.value_of("from").unwrap();
        if let Some(to) = basename.captures_iter(from).last().and_then(|to| to.get(1)) {
            parse_remote(
                to.as_str(),
                args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
                None,
                None,
            )
        } else {
            panic!("Could not parse target")
        }
    };
    let from_branch = args.value_of("from_branch").unwrap_or(DEFAULT_BRANCH);
    let from_path = args.values_of("from_path").map(|x| x.collect()).unwrap_or(Vec::new());
    let from_path = args
        .values_of("from_path")
        .map(|x| x.map(|p| RepoPath(Path::new(p))).collect())
        .unwrap_or(Vec::new());
    let to_branch = args.value_of("to_branch").unwrap_or(from_branch);
    Params {
        from,
        from_branch,
        from_path,
        to,
        to_branch,
    }
}

pub fn run(args_: &ArgMatches) -> Result<(), Error> {
    let args = parse_args(args_);
    debug!("{:?}", args);
    match args.to {
        Remote::Local { path: repo_root } => {
            assert_no_containing_repo(&repo_root.repo_root)?;

            let parent = repo_root.repo_root.parent().unwrap();
            let tmp_dir = tempdir_in(parent)?;
            {
                create_repo(tmp_dir.path())?;
                let tmp_root = RepoRoot {
                    repo_root: tmp_dir.path(),
                };
                let mut session = args.from.session()?;
                let mut pullable: Vec<_> = if let Some(patches) = args_.values_of("patch") {
                    let mut p = Vec::new();
                    for x in patches {
                        p.push((Hash::from_base58(x).unwrap(), 0))
                    }
                    p
                } else {
                    session.changes(args.from_branch, &args.from_path[..])?
                };
                session.pull(
                    &tmp_root,
                    args.to_branch,
                    &mut pullable,
                    &args.from_path,
                    true,
                )?;
                tmp_root.set_current_branch(args.to_branch)?;
            }
            let path = tmp_dir.into_path();
            std::fs::rename(&path, &repo_root.repo_root)?;
            Ok(())
        }
        _ => {
            // Clone between remote repositories.
            match args.from {
                Remote::Local { path } => {
                    let mut to_session = args.to.session()?;
                    debug!("remote init");
                    to_session.remote_init()?;
                    debug!("pushable?");
                    let pushable = to_session.pushable_patches(
                        args.from_branch,
                        args.to_branch,
                        &path,
                        &args.from_path,
                    )?;
                    debug!("pushable = {:?}", pushable);
                    let pushable = pushable.pushable.into_iter().map(|(h, _, _)| h).collect();
                    to_session.push(&path, args.to_branch, pushable)?;
                    path.set_current_branch(args.to_branch)
                        .map_err(|x| x.into())
                }
                _ => unimplemented!()
                _ => unimplemented!(),
            }










1
2
3
4
5
6
7
8

9
10
11
12
13













14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

60
61
62
63
64
65

66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102

use clap::{SubCommand, ArgMatches, Arg};
use commands::{BasicOptions, StaticSubcommand, default_explain};
use libpijul::{PatchId, Key, Value, Txn};
use libpijul::graph::LineBuffer;
use bs58;
use error::{Error, ErrorKind};
                return Err(ErrorKind::IsDirectory.into());
struct OutBuffer<'a> {
    target: &'a Path,
            bs58::encode(key.patch.as_ref()).into_string()
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::RepoRoot;
use libpijul::graph::LineBuffer;
use libpijul::{Key, PatchId, Txn, Value};
use std::fs::canonicalize;
use std::io::{stdout, Stdout};
use bs58;
use std::path::Path;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("credit")
        .about("Show what patch introduced each line of a file.")
        .arg(Arg::with_name("repository")
             .long("repository")
             .takes_value(true)
             .help("Local repository."))
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("The branch to annotate, defaults to the current branch.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("file")
             .help("File to annotate.")
             .required(true)
             .takes_value(true)
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .takes_value(true)
                .help("Local repository."),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch to annotate, defaults to the current branch.")
                .takes_value(true)
                .required(false),
        )
        .arg(
            Arg::with_name("file")
                .help("File to annotate.")
                .required(true)
                .takes_value(true),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {

    let opts = BasicOptions::from_args(args)?;
    let file = Path::new(args.value_of("file").unwrap());
    let p = canonicalize(opts.cwd.join(file))?;

    let file = opts.repo_root.relativize(&p)?;
    let repo = opts.open_repo()?;
    let txn = repo.txn_begin()?;
    if let Some(branch) = txn.get_branch(&opts.branch()) {
        let inode = txn.find_inode(&file)?;
        if txn.is_directory(&inode) {
            return Err(Error::IsDirectory);
        }
        let node = txn.get_inodes(inode).unwrap();
        let mut graph = txn.retrieve(&branch, node.key);
        let mut buf = OutBuffer {
            stdout: stdout(),
            txn: &txn,
            target: opts.repo_root,
        };
        txn.output_file(&branch, &mut buf, &mut graph, &mut Vec::new())?;
    }
    Ok(())
}

struct OutBuffer<'a> { stdout: Stdout, txn: &'a Txn<'a>, target: &'a Path }
struct OutBuffer<'a, P: AsRef<Path> + 'a> {
    stdout: Stdout,
    txn: &'a Txn<'a>,
    target: RepoRoot<P>,
}

use libpijul::Transaction;
use libpijul;
use libpijul::Transaction;
use std::io::Write;

impl<'a, P: AsRef<Path>, T: 'a + Transaction> LineBuffer<'a, T> for OutBuffer<'a, P> {
    fn output_line(
        &mut self,
        key: &Key<PatchId>,
        contents: Value<'a, T>,
    ) -> Result<(), libpijul::Error> {
        let ext = self.txn.get_external(key.patch).unwrap();
        let patch = self.target.read_patch_nochanges(ext)?;
        write!(
            self.stdout,
            "{} {} {} > ",
            patch.authors[0],
            patch.timestamp.format("%F %R %Z"),
            key.patch.to_base58()
        )?;
        let mut ends_with_eol = false;
        for chunk in contents {
            self.stdout.write_all(chunk)?;
            if let Some(&c) = chunk.last() {
                ends_with_eol = c == b'\n'
            }
        }
        if !ends_with_eol {
            writeln!(self.stdout, "")?;
        }
        Ok(())
    }

    fn output_conflict_marker(&mut self, s: &'a str) -> Result<(), libpijul::Error> {
        write!(self.stdout, "{}", s)?;
        Ok(())
    }
}





1
2
3
4

5
6

7
8
9








10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

30
31
32
33
34
35
36
37
38
39
40

41
42
43
44
45
46
47
48
49
50
51
52

use clap::{SubCommand, ArgMatches, Arg};
use super::{default_explain, get_current_branch, BasicOptions, StaticSubcommand};
        return Err(ErrorKind::CannotDeleteCurrentBranch.into());
            return Err(ErrorKind::NoSuchBranch.into());
use clap::{Arg, ArgMatches, SubCommand};
use error::Error;
use rand;

use super::{BasicOptions, StaticSubcommand, get_current_branch, default_explain};
use super::{default_explain, BasicOptions, StaticSubcommand};


pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("prune")
        .about("Delete a branch in the local repository")
        .arg(Arg::with_name("repository")
            .long("repository")
            .help("Local repository.")
            .takes_value(true))
        .arg(Arg::with_name("branch")
            .help("Branch to delete.")
            .takes_value(true)
            .required(true));
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Local repository.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .help("Branch to delete.")
                .takes_value(true)
                .required(true),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    debug!("args {:?}", args);
    let opts = BasicOptions::from_args(args)?;
    let branch = args.value_of("branch").unwrap();
    let current_branch = opts.repo_root.get_current_branch()?;
    if current_branch == branch {
        return Err(ErrorKind::CannotDeleteCurrentBranch.into())
        return Err(Error::CannotDeleteCurrentBranch);
    }
    let repo = opts.open_repo()?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let at_least_two_branches = {
        let mut it = txn.iter_branches(None);
        it.next();
        it.next().is_some()
    };
    if at_least_two_branches {
        if !txn.drop_branch(&branch)? {
            return Err(ErrorKind::NoSuchBranch.into())
            return Err(Error::NoSuchBranch);
        };
        txn.commit()?;
        Ok(())
    } else {
        if txn.get_branch(&branch).is_none() {
            Err(Error::NoSuchBranch)
        } else {
            Err(Error::CannotDeleteCurrentBranch)
        }
    }
}











1
2
3
4
5
6
7
8
9



10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74

75
76
77
78
use clap::{SubCommand, ArgMatches, Arg};
use libpijul::RecordState;
        )
        .arg(
        )
        .arg(
        );
    let prefix = if let Some(ref prefix) = prefix {
        Some(prefix.as_path())
        None
use clap::{Arg, ArgMatches, SubCommand};
use commands::{BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::in_repo_root;
use libpijul::RecordState;
use rand;
use std::fs::canonicalize;
use std::io::{stderr, Write};
use std::process::exit;
use std::fs::{canonicalize};
use rand;
use libpijul::RecordState;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("diff")
        .about("Show what would be recorded if record were called")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("The repository to show, defaults to the current directory.")
                .takes_value(true)
                .required(false),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch to show, defaults to the current branch.")
                .takes_value(true)
                .required(false),
        )
        .arg(
            Arg::with_name("prefix")
                .help("Prefix to start from")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("patience")
                .long("patience")
                .help("Use patience diff instead of the default (Myers diff)")
                .conflicts_with("myers")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("myers")
                .long("myers")
                .help("Use Myers diff")
                .conflicts_with("patience")
                .takes_value(false),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let repo = opts.open_repo()?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let prefix = if let Some(prefix) = args.value_of("prefix") {
        let p = canonicalize(opts.cwd.join(prefix))?;
        opts.repo_root.relativize(&p)?.to_owned()
    } else {
        in_repo_root().to_owned()
    };
    let mut record = RecordState::new();
    let branch = txn.open_branch(&opts.branch())?;
    txn.record(
        if args.is_present("patience") {
            libpijul::DiffAlgorithm::Patience
        } else {
            libpijul::DiffAlgorithm::Myers
        },
        &mut record,
        &branch,
        &opts.repo_root,
        &prefix,
    )?;
    txn.commit_branch(branch)?;
    let (changes, _) = record.finish();
    let changes:Vec<_> = changes.into_iter().map(|x| txn.globalize_record(x)).collect();
    let changes: Vec<_> = changes
        .into_iter()
        .map(|x| txn.globalize_record(x))
        .collect();








1
2
3
4
5
6
7
8
9
10
11
12
13
14























15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48






49
50
51
52
53
54
55
56
57
58
59
60
61
62
63




64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83




84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107











108
109
110
111
112
113
114
115
116
117
118
119








120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153

use commands::{BasicOptions, StaticSubcommand, default_explain, ScanScope};
use clap::{SubCommand, ArgMatches, Arg};
use clap::{SubCommand, ArgMatches, Arg};
use error::Result;
use libpijul::{Branch, Edge, Key, PatchId, Repository, Txn, ROOT_KEY};
) -> Result<()> {
    buffer: &mut Vec<u8>,
) -> Result<()> {
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, ScanScope, StaticSubcommand};
use error::Error;
use flate2::write::GzEncoder;
use flate2::Compression;
use libpijul::{graph, Branch, Edge, Key, PatchId, Repository, Txn, ROOT_KEY};
use std::fs::{remove_file, File};
use std::io::{stdout, Write};
use std::path::{Path, PathBuf};
use tar::{Builder, Header};

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("dist")
        .about("Produces a tar.gz archive of the repository")
        .arg(Arg::with_name("archive")
             .short("d")
             .takes_value(true)
             .required(true)
             .help("File name of the output archive."))
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("The branch from which to make the archive, defaults to the current branch.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("repository")
             .long("repository")
             .help("Repository where to work.")
             .takes_value(true))
        .arg(Arg::with_name("stdout")
             .long("stdout")
             .short("s")
             .help("Prints the resulting archive to stdout")
             .takes_value(false))
        .arg(Arg::with_name("dir")
        .arg(Arg::with_name("dir")
             .help("Directory (or file) to archive, defaults to the whole repository.")
             .takes_value(true));
        .arg(
            Arg::with_name("archive")
                .short("d")
                .takes_value(true)
                .required(true)
                .help("File name of the output archive."),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch from which to make the archive, defaults to the current branch.")
                .takes_value(true)
                .required(false),
        )
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Repository where to work.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("stdout")
                .long("stdout")
                .short("s")
                .help("Prints the resulting archive to stdout")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("dir")
                .help("Directory (or file) to archive, defaults to the whole repository.")
                .takes_value(true),
        );
}

pub fn dist<W: Write>(repo: Repository,
                      branch_name: &str,
                      scope: ScanScope,
                      archive_name: &str,
                      encoder: GzEncoder<W>
                     ) -> Result<()> {
pub fn dist<W: Write>(
    repo: Repository,
    branch_name: &str,
    scope: ScanScope,
    archive_name: &str,
    encoder: GzEncoder<W>,
) -> Result<(), Error> {
    let txn = repo.txn_begin()?;
    let branch = txn.get_branch(branch_name).ok_or(Error::NoSuchBranch)?;
    let mut current_path = Path::new(archive_name).to_path_buf();
    let mut archive = Builder::new(encoder);
    let mut buffer = graph::Writer::new(Vec::new());
    let mut forward = Vec::new();

    let key = match scope {
        ScanScope::FromRoot => {
            ROOT_KEY
            ROOT_KEY
        },
        ScanScope::FromRoot => ROOT_KEY,
        ScanScope::WithPrefix(prefix, user_input) => {
            let inode = txn.find_inode(&prefix)?;
            txn.get_inodes(inode)
                .map(|key| key.key.to_owned())
                .ok_or(Error::InvalidPath {
                    path: PathBuf::from(user_input),
                })?
        }
    };
    archive_rec(
        &txn,
        &branch,
        key,
        &mut archive,
        &mut buffer,
        &mut forward,
        &mut current_path,
    )?;

    archive.into_inner()?
            .finish()?
            .flush()
            .map_err(|x| x.into())
    archive
        .into_inner()?
        .finish()?
        .flush()
        .map_err(|x| x.into())
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;

    let archive_name = args.value_of("archive").unwrap();

    let repo = opts.open_repo()?;
    let scan = opts.scan_scope()?;

    if args.is_present("stdout") {
        let encoder = GzEncoder::new(stdout(), Compression::best());

        dist(repo, &opts.branch(), scan, archive_name, encoder)
    } else {
        let archive_path = PathBuf::from(archive_name.to_string() + ".tar.gz");

        let encoder = GzEncoder::new(File::create(&archive_path)?, Compression::best());

        dist(repo, &opts.branch(), scan, archive_name, encoder)
            .map_err(|err| {
                // The creation of the archive has failed, we should try to
                // remove it, but we ignore the error if we cannot.
                // This should not happen, because either we could not create
                // the file, or we have enough permission to do it, as we are
                // its creator.
                let _ = remove_file(archive_path);
                err
                err
            })
        dist(repo, &opts.branch(), scan, archive_name, encoder).map_err(|err| {
            // The creation of the archive has failed, we should try to
            // remove it, but we ignore the error if we cannot.
            // This should not happen, because either we could not create
            // the file, or we have enough permission to do it, as we are
            // its creator.
            let _ = remove_file(archive_path);
            err
        })
    }
}

fn archive_rec<W:Write> (txn: &Txn,
                         branch: &Branch,
                         key: Key<PatchId>,
                         builder: &mut Builder<W>,
                         buffer: &mut Vec<u8>,
                         forward: &mut Vec<(Key<PatchId>, Edge)>,
                         current_path: &mut PathBuf,
                        ) -> Result<()> {
fn archive_rec<W: Write>(
    txn: &Txn,
    branch: &Branch,
    key: Key<PatchId>,
    builder: &mut Builder<W>,
    buffer: &mut graph::Writer<Vec<u8>>,
    forward: &mut Vec<(Key<PatchId>, Edge)>,
    current_path: &mut PathBuf,
) -> Result<(), Error> {
    let files = txn.list_files_under_node(branch, key);

    for (key, names) in files {
        debug!("archive_rec: {:?} {:?}", key, names);
        if names.len() > 1 {
            error!("file has several names: {:?}", names);
        }
        current_path.push(names[0].1);
        if names[0].0.is_dir() {
            archive_rec(txn, branch, key, builder, buffer, forward, current_path)?;
        } else {
            buffer.clear();
            let mut graph = txn.retrieve(&branch, key);
            txn.output_file(branch, buffer, &mut graph, forward)?;
            let mut header = Header::new_gnu();
            header.set_path(&current_path)?;
            header.set_size(buffer.len() as u64);
            header.set_mode(names[0].0.permissions() as u32);
            header.set_cksum();
            builder.append(&header, &buffer[..])?;
        }
        current_path.pop();
    }
    Ok(())
}










1
2
3
4
5
6
7
8



9
10
11
12
13
14




15
16
17
18
19
20




21
22
23
24
25
26









27
28
29
30
31
32
33
34
35
36
37
38



39
40
41
42
43
44
45



46
47
48
49
50

51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88



89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111

112
113
114
115
116
117
use clap::{SubCommand, ArgMatches, Arg, ArgGroup};
use rand;
use rand;
use libpijul::{Hash, apply_resize_no_output};
use error::{Error, ErrorKind, Result};
fn patch_dependencies(hash_str: &str, repo_root: &PathBuf) -> Result<Vec<Hash>> {
    let mut current =
        vec![Hash::from_base58(hash_str).ok_or::<Error>(ErrorKind::WrongHash.into())?];
            let partials = txn.iter_partials(&br)
use clap::{Arg, ArgGroup, ArgMatches, SubCommand};
use commands::checkout::checkout;
use libpijul::fs_representation::RepoRoot;
use libpijul::{apply_resize_no_output, Hash};
use rand;
use std::mem;
use std::path::Path; // PathBuf;

use super::{BasicOptions, StaticSubcommand, default_explain};
use error::{Result, ErrorKind, Error};
use error::{Result, ErrorKind, Error};
use super::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("fork")
        .about("Create a new branch")
        .arg(Arg::with_name("repository")
             .long("repository")
             .help("Local repository.")
             .takes_value(true)
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Local repository.")
                .takes_value(true),
        )
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("Branch.")
             .takes_value(true)
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("Branch.")
                .takes_value(true),
        )
        .arg(Arg::with_name("patch")
             .long("patch")
             .help("A patch hash, preferably a tag.")
             .takes_value(true)
        .arg(Arg::with_name("to")
        .arg(Arg::with_name("to")
             .help("Name of the new branch.")
             .takes_value(true)
             .required(true)
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .help("A patch hash, preferably a tag.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("to")
                .help("Name of the new branch.")
                .takes_value(true)
                .required(true),
        )
        .group(ArgGroup::with_name("source")
               .required(false)
               .args(&["branch", "patch"]))
        .group(
            ArgGroup::with_name("source")
                .required(false)
                .args(&["branch", "patch"]),
        );
}

fn patch_dependencies(hash_str: &str,
                      repo_root: &PathBuf)
                      -> Result<Vec<Hash>> {
fn patch_dependencies(
    hash_str: &str,
    repo_root: &RepoRoot<impl AsRef<Path>>,
) -> Result<Vec<Hash>, Error> {
    let mut deps = Vec::new();
    let mut current = vec![Hash::from_base58(hash_str).ok_or::<Error>(ErrorKind::WrongHash.into())?];
    let mut current = vec![Hash::from_base58(hash_str).ok_or::<Error>(Error::WrongHash)?];
    let mut next = Vec::new();

    while !current.is_empty() {
        for hash in current.drain(..) {
            deps.push(hash.clone());
            let patch = repo_root.read_patch(hash.as_ref())?;

            for hash_dep in patch.dependencies().iter() {
                let h = hash_dep.to_owned();

                if !deps.contains(&h) {
                    next.push(h);
                }
            }
        }

        mem::swap(&mut next, &mut current);
    }

    deps.reverse();

    Ok(deps)
}

pub fn has_branch(opts: &BasicOptions, branch_name: &str) -> Result<bool, Error> {
    let repo = opts.open_repo()?;
    let txn = repo.txn_begin()?;

    Ok(txn.has_branch(branch_name))
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let to = args.value_of("to").unwrap();

    if !has_branch(&opts, to)? {
        if let Some(ref hash) = args.value_of("patch") {
            debug!("Creating a new branch {:?} with dependencies of {:?}",
                   to,
                   hash);
            debug!(
                "Creating a new branch {:?} with dependencies of {:?}",
                to, hash
            );

            let deps = patch_dependencies(hash, &opts.repo_root)?;

            apply_resize_no_output(&opts.repo_root, to, deps.iter(), |_, _| ())?;

            println!("Branch {:?} has been created.", to);

            checkout(&opts, to, false, None)
        } else {
            let repo = opts.open_repo()?;
            let mut txn = repo.mut_txn_begin(rand::thread_rng())?;

            let br = opts.branch();
            let branch = txn.open_branch(&br)?;
            let new_branch = txn.fork(&branch, to)?;

            txn.commit_branch(branch)?;
            txn.commit_branch(new_branch)?;

            let partials = txn.iter_partials(&br).take_while(|&(k, _)| k.as_str() == &br).map(|(_, v)| v).collect::<Vec<_>>();
            let partials = txn
                .iter_partials(&br)
                .take_while(|&(k, _)| k.as_str() == &br)
                .map(|(_, v)| v)
                .collect::<Vec<_>>();
            for &key in partials.iter() {







1
2
3
4
5
6

7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27


28
29
30
31
32
33
34
35
36

37
38
39
40
41


42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63


66


69
70

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

87
88
89


90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116

use error::{ErrorKind, Result};
use std::fs::{canonicalize, metadata, read_dir};
            &opts.pristine_dir(),
            &opts.cwd,
    repo_dir: &Path,
    r: &Path,
) -> Result<()> {
use clap::ArgMatches;
use commands::BasicOptions;
use error::Error;
use libpijul;
use libpijul::fs_representation::RepoRoot;
use rand;
use error::{Result, ErrorKind};
use std::fs::{canonicalize, metadata};
use std::path::{Path, PathBuf};

#[derive(Debug, Clone, Copy)]
pub enum Operation {
    Add,
    Remove,
}

pub fn run(args: &ArgMatches, op: Operation) -> Result<(), Error> {
    debug!("fs_operation {:?}", op);
    let opts = BasicOptions::from_args(args)?;

    debug!("repo {:?}", &opts.repo_root);
    let mut extra_space = 409600;
    let recursive = args.is_present("recursive");
    loop {
        let touched_files = match args.values_of("files") {
            Some(l) => l.map(|p| Path::new(p).to_owned()).collect(),
            None => vec![],
        };
        match really_run(&opts.pristine_dir(), &opts.cwd, &opts.repo_root,
                         touched_files, recursive, op, extra_space) {
        match really_run(
            &opts.repo_root,
            &opts.cwd,
            touched_files,
            recursive,
            op,
            extra_space,
        ) {
            Err(ref e) if e.lacks_space() => extra_space *= 2,
            e => return e
            e => return e,
        }
    }
}

fn really_run(repo_dir: &Path, wd: &Path, r: &Path, mut files: Vec<PathBuf>,
              recursive: bool, op: Operation, extra_space: u64) -> Result<()> {
fn really_run(
    //    repo_dir: &RepoRoot<&'static Path>,
    repo_dir: &RepoRoot<PathBuf>,
    wd: &Path,
    mut files: Vec<PathBuf>,
    recursive: bool,
    op: Operation,
    extra_space: u64,
) -> Result<(), Error> {
    debug!("files {:?}", files);
    let mut rng = rand::thread_rng();
    let repo = repo_dir.open_repo(Some(extra_space))?;
    let mut txn = repo.mut_txn_begin(&mut rng)?;
    match op {
        Operation::Add => {
            for file_ in files.drain(..) {
                let p = canonicalize(wd.join(&file_))?;
                if recursive {
                    debug!("adding from {:?}", p);
                    let mut files = Vec::new();
                    for file in repo_dir.untracked_files(&txn, &p) {
                        debug!("untracked {:?}", file);

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                        if let Ok(file) = file.strip_prefix(r) {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
                        let m = metadata(repo_dir.absolutize(&file))?;
                        files.push((file, m.is_dir()));

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                    }
                    for (file, is_dir) in files {
                        match txn.add_file(&file, is_dir) {
                            Ok(()) => {}
                            Err(libpijul::Error::AlreadyAdded) => {
                                eprintln!("{:?} is already in the repository", file_)
                            }
                            Err(e) => return Err(e.into()),
                        }
                    }
                    continue;
                } else {
                    let m = metadata(&p)?;
                    if let Ok(file) = repo_dir.relativize(&p) {
                        match txn.add_file(&file, m.is_dir()) {
                            Ok(()) => {},
                            Ok(()) => {}
                            Err(libpijul::Error::AlreadyAdded) => {
                                eprintln!("{:?} is already in the repository", file_)
                            },
                            Err(e) => return Err(e.into())
                            }
                            Err(e) => return Err(e.into()),
                        }
                        continue;
                    }
                }
                return Err(Error::InvalidPath { path: file_ });
            }
        }
        Operation::Remove => {
            for file in &files[..] {
                debug!("file: {:?} {:?}", file, wd.join(file));
                let p = wd.join(file).canonicalize()?;
                debug!("p: {:?}", p);
                if let Ok(file) = repo_dir.relativize(&p) {
                    debug!("remove_file {:?}", file);
                    txn.remove_file(&file)?
                } else {
                    return Err(Error::InvalidPath {
                        path: file.to_path_buf(),
                    });
                }
            }
        }
    }
    txn.commit()?;
    Ok(())



1
2
3
4

5

6

7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46

47
48
49
50

51
use commands::{StaticSubcommand, default_explain};
use clap::{Arg, ArgGroup, ArgMatches, Shell, SubCommand};
use clap::{AppSettings, Arg, ArgGroup, ArgMatches, Shell, SubCommand};
use cli;
use commands::{default_explain, StaticSubcommand};
use error::Error;
use clap::{SubCommand, ArgMatches, Arg, ArgGroup, Shell};
use std::io;
use cli;


pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("generate-completions")
        .setting(AppSettings::Hidden)
        .about("Generate shell completions for pijul to stdout")
        .group(
            ArgGroup::with_name("shells")
                .args(&["bash", "fish", "zsh", "powershell"])
                .required(true),
        )
        .arg(
            Arg::with_name("bash")
                .long("bash")
                .help("Completions for Bash"),
        )
        .arg(
            Arg::with_name("zsh")
                .long("zsh")
                .help("Completions for Zsh"),
        )
        .arg(
            Arg::with_name("fish")
                .long("fish")
                .help("Completions for Fish"),
        )
        .arg(
            Arg::with_name("powershell")
                .long("powershell")
                .help("Completions for Powershell"),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    if args.is_present("bash") {
        cli::build_cli().gen_completions_to("pijul", Shell::Bash, &mut io::stdout());
        Ok(())
    } else if args.is_present("zsh") {
        cli::build_cli().gen_completions_to("pijul", Shell::Zsh, &mut io::stdout());
        Ok(()) 
        Ok(())
    } else if args.is_present("fish") {
        cli::build_cli().gen_completions_to("pijul", Shell::Fish, &mut io::stdout());
        Ok(()) 
        Ok(())
    } else {
        assert!(args.is_present("powershell"));
        cli::build_cli().gen_completions_to("pijul", Shell::PowerShell, &mut io::stdout());
        Ok(()) 
        Ok(())







1
2
3
4
5

6
7
8
9
10
11
12
13
14
15
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

34
35
36
37
use std::path::{Path};
use std::process::{Command};
use libpijul::fs_representation::PIJUL_DIR_NAME;
use error::{ErrorKind, Result};
    repo_root: &Path,
) -> Result<()> {
            return Err(ErrorKind::HookFailed(String::from(hook)).into());
use error::Error;
use libpijul::fs_representation::{RepoRoot, PIJUL_DIR_NAME};
use std::path::Path;
use std::process::Command;

use error::{Result, ErrorKind};
pub fn run_hook(
    repo_root: &RepoRoot<impl AsRef<Path>>,
    hook: &'static str,
    additional_arg: Option<&String>,
) -> Result<(), Error> {
    let repo_root = &repo_root.repo_root;
    let mut cmd = repo_root.as_ref().to_path_buf();
    cmd.push(PIJUL_DIR_NAME);
    cmd.push("hooks");
    cmd.push(hook);

    if cmd.is_file()  {
    if cmd.is_file() {
        println!("Running hook: {}", hook);

        let arg = match additional_arg {
            Some(ref arg) => vec![*arg],
            None => vec![],
        };

        let output = Command::new(cmd.as_path())
            .args(arg)
            .current_dir(repo_root)
            .output()?;

        if !output.status.success() {
            if let Ok(err) = String::from_utf8(output.stderr) {
                print!("{}", err);
            }
            return Err(ErrorKind::HookFailed(String::from(hook)).into())
            return Err(Error::HookFailed {
                cmd: String::from(hook),
            });
        }

1
2
3
4
5
6

7
8
9
10
11


















12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78


79
80
81
82
83
84
use clap::{SubCommand, Arg, ArgMatches, AppSettings};
use clap::{AppSettings, Arg, ArgMatches, SubCommand};
use libpijul::Inode;
use std::fs::File;

use super::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use super::{BasicOptions, StaticSubcommand, default_explain};

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("info")
        .setting(AppSettings::Hidden)
        .about("Get information about the current repository, if any")
        .arg(Arg::with_name("debug")
             .long("--debug")
             .help("Pijul info will be given about this directory.")
             .required(false))
        .arg(Arg::with_name("inode")
             .long("--from-inode")
             .help("Inode to start the graph from.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("all")
             .short("a"))
        .arg(Arg::with_name("exclude-parents")
             .long("exclude-parents"))
        .arg(Arg::with_name("folder")
             .short("f")
             .help("show only folder edges"))
        .arg(Arg::with_name("introduced_by")
             .long("introducedby"));
        .arg(
            Arg::with_name("debug")
                .long("--debug")
                .help("Pijul info will be given about this directory.")
                .required(false),
        )
        .arg(
            Arg::with_name("inode")
                .long("--from-inode")
                .help("Inode to start the graph from.")
                .takes_value(true)
                .required(false),
        )
        .arg(Arg::with_name("all").short("a"))
        .arg(Arg::with_name("exclude-parents").long("exclude-parents"))
        .arg(
            Arg::with_name("folder")
                .short("f")
                .help("show only folder edges"),
        )
        .arg(Arg::with_name("introduced_by").long("introducedby"));
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let repo = opts.open_repo()?;

    let txn = repo.txn_begin()?;
    let stats = txn.txn.statistics();
    let pages = txn.page_counts();
    println!("{:?}", stats);
    println!("{:?}", pages);
    println!("page count sum: {:?}", pages.sum());
    println!(
        "total referenced: {:?}",
        pages.sum() + stats.free_pages.len() + stats.bookkeeping_pages.len() + 1
    );

    if args.is_present("debug") {
        txn.dump();
        if let Some(ref inode) = args.value_of("inode") {
            // Output just the graph under `inode`.
            if let Some(inode) = Inode::from_hex(inode) {
                if let Some(node) = txn.get_inodes(inode) {
                    let node = node.key;
                    debug!("node {:?}", node);
                    for branch in txn.iter_branches(None) {
                        let ret = txn.retrieve(&branch, node);
                        let mut f = File::create(format!("debug_{}", branch.name.as_str()))?;
                        ret.debug(&txn, &branch, args.is_present("all"), args.is_present("introduced_by"), &mut f)?
                        ret.debug(
                            &txn,
                            &branch,
                            args.is_present("all"),
                            args.is_present("introduced_by"),
                            &mut f,
                        )?
                    }
                }
            }
        } else {
            // Output everything.
            for branch in txn.iter_branches(None) {
                if args.is_present("folder") {
                    let mut f = File::create(format!("folders_{}", branch.name.as_str()))?;
                    txn.debug_folders(branch.name.as_str(), &mut f);
                } else {
                    let mut f = File::create(format!("debug_{}", branch.name.as_str()))?;
                    txn.debug(branch.name.as_str(), &mut f, args.is_present("exclude-parents"));
                    txn.debug(branch.name.as_str(), &mut f, args.is_present("exclude-parents"));
                    txn.debug(
                        branch.name.as_str(),
                        &mut f,
                        args.is_present("exclude-parents"),
                    );
                }


1
2
3
4
5
6


7
8
9
10
11




12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

27
28
use clap::{SubCommand, Arg, ArgMatches};
use error::{Error, ErrorKind, Result};
use clap::{Arg, ArgMatches, SubCommand};
use commands::{create_repo, default_explain, StaticSubcommand};
use error::Error;
use std::env::current_dir;
use std::io::{stderr, Write};
use std::path::Path;
use error::{Result, ErrorKind, Error};
use commands::{StaticSubcommand, create_repo, default_explain};
use std::process::exit;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("init")
        .about("Create a new repository")
        .arg(Arg::with_name("directory")
            .index(1)
            .help("Where to create the repository, defaults to the current directory.")
            .required(false));
        .arg(
            Arg::with_name("directory")
                .index(1)
                .help("Where to create the repository, defaults to the current directory.")
                .required(false),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    // Since the location may not exist, we can't always canonicalize,
    // which doesn't really matter since we're going to explore the
    // whole path in `find_repo_root`.
    let wd = match args.value_of("directory").map(Path::new) {
        Some(r) if r.is_relative() => current_dir()?.join(r),
        Some(r) => r.to_path_buf(),
        None => current_dir()?
        None => current_dir()?,
    };










































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60






















61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

















76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97


use clap::{SubCommand, Arg, ArgMatches};
use error::{ErrorKind, Error};
use commands::{BasicOptions, StaticSubcommand};
use std::io::{Write,stderr};
use std::process::exit;
use meta;
use meta::KeyType;
use thrussh;
use super::ask;
use clap::{Arg, ArgMatches, SubCommand};
use commands::{BasicOptions, StaticSubcommand};
use cryptovec;
use error::{Error, ErrorKind};
use meta::KeyType;
use std::path::PathBuf;
use std::sync::Arc;
use thrussh;
use thrussh::{client, ChannelId};
use thrussh_keys;
use thrussh_keys::key;
            port: args.value_of("port")
                .and_then(|x| x.parse().ok())
                .unwrap_or(22),
            repository: if args.is_present("repository") {
                Some(BasicOptions::from_args(args)?.repo_dir())
            } else {
                None
            },
            remote_cmd: super::remote_pijul_cmd(),
            signing: args.is_present("signing") || !args.is_present("ssh"),
            ssh: args.is_present("ssh") || !args.is_present("signing"),
            local: if args.is_present("repository") || args.is_present("local") {
                Some(BasicOptions::from_args(args)?.repo_dir())
            } else {
                None
            },
        Params::Upload {
            address,
            port,
            repository,
            remote_cmd,
        } => match meta::load_global_or_local_signing_key(repository.as_ref()) {
            Ok(key) => {
                let config = Arc::new(thrussh::client::Config::default());
                let ssh_user_host = Regex::new(r"^([^@]*)@(.*)$").unwrap();
                let (user, server) = if let Some(cap) = ssh_user_host.captures(&address) {
                    (cap[1].to_string(), cap[2].to_string())
                } else {
                    (username::get_user_name().unwrap(), address.to_string())
                };
                let mut l = tokio_core::reactor::Core::new().unwrap();
                let h = l.handle();
                let client = SshClient::new(port, &server, key, &h);
                use super::ssh_auth_attempts::{AuthAttemptFuture, AuthAttempts};
                let use_agent = client.agent.is_some();
                l.run(thrussh::client::connect_future(
                    h,
                    (server.as_str(), port),
                    config,
                    None,
                    client,
                    |connection| {
                        AuthAttemptFuture::new(
                            connection,
                            AuthAttempts::new(user.to_string(), repository, use_agent),
                            user,
                        ).from_err()
                            .and_then(|session| {
                                session.channel_open_session().and_then(
                                    |(mut session, channelid)| {
                                        session.exec(
                                            channelid,
                                            false,
                                            &format!("{} challenge", remote_cmd),
                                        );
                                    },
                                )
                            })
                    },
                )?).unwrap();
            Err(e) => return Err(e),
        },
        Params::Gen {
            signing,
            ssh,
            local,
        } => {
use tokio_core;
            UnixStream::connect(path, h)
                .ok()
                .map(thrussh_keys::agent::client::AgentClient::connect)
    type FutureSign =
        Box<futures::Future<Item = (Self, cryptovec::CryptoVec), Error = Self::Error>>;
    fn auth_publickey_sign(
        mut self,
        key: &thrussh_keys::key::PublicKey,
        mut to_sign: cryptovec::CryptoVec,
    ) -> Self::FutureSign {
                agent
                    .sign_request(key, &to_sign)
                    .from_err(),
        let path = std::env::home_dir()
            .unwrap()
            .join(".ssh")
            .join("known_hosts");
        match thrussh_keys::check_known_hosts_path(&self.host, self.port, &server_public_key, &path)
        {
                if let Ok(false) = ask::ask_learn_ssh(&self.host, self.port, "") {
                    thrussh_keys::learn_known_hosts_path(
                        &self.host,
                        self.port,
                        &server_public_key,
                        &path,
                    ).unwrap();
                println!(
                    "Host key changed! Someone might be eavesdropping this communication, \
                     refusing to continue. Previous key found line {}",
                    line
                );
            },
    fn data(
        self,
        channel: ChannelId,
        _: Option<u32>,
        data: &[u8],
        mut session: client::Session,
    ) -> Self::SessionUnit {
            self.key_pair.sign_detached(data).unwrap(),
    fn exit_status(
        mut self,
        channel: thrussh::ChannelId,
        exit_status: u32,
        session: thrussh::client::Session,
    ) -> Self::SessionUnit {
        debug!(
            "exit_status received on channel {:?}: {:?}:",
            channel, exit_status
        );
use super::BasicOptions;
use clap::{AppSettings, Arg, ArgMatches, SubCommand};
use commands::StaticSubcommand;
use error::Error;
use meta;
use meta::{load_signing_key, Meta};
use std::io::{stderr, Write};
use std::process::exit;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("key")
        .about("Manage signing and SSH keys")
        .subcommand(
            SubCommand::with_name("upload")
                .about("Upload keys to a remote server")
                .arg(Arg::with_name("port")
                     .long("port")
                     .short("p")
                     .help("Port of the SSH server.")
                     .takes_value(true)
                     .required(false))
                .arg(Arg::with_name("local")
                     .long("local")
                     .help("Save keys for the local repository only")
                     .takes_value(false)
                     .required(false))
                .arg(Arg::with_name("address")
                     .help("Address to use, for instance pijul_org@nest.pijul.com.")
                     .takes_value(true)
                     .required(true))
        )
        .subcommand(
            SubCommand::with_name("gen")
                .about("Generate keys. This command generates an SSH key if --signing-id is not given")
                .arg(Arg::with_name("signing-id")
                     .long("signing-id")
                     .help("Generate a signing key for this user id (user ids are email addresses)")
                     .takes_value(true))
                .arg(Arg::with_name("repository")
                     .long("for-repository")
                     .help("Save keys for the given repository only")
                     .takes_value(true)
                     .required(false))
        )
        )
        .subcommand(
            SubCommand::with_name("register")
                .setting(AppSettings::Hidden)
                .about("Register a signature key given in binary on the standard input")
        );
}

pub enum Params<'a> {
    Upload { address: &'a str, port: Option<u16> },
    Gen { signing: Option<&'a str> },
    Register,
    None,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Result<Params<'a>, Error> {
    match args.subcommand() {
        ("upload", Some(args)) =>
            Ok(Params::Upload {
                address: args.value_of("address").unwrap(),
                port: args.value_of("port").and_then(|x| x.parse().ok()).unwrap_or(22),
                repository: if args.is_present("repository") {
                    Some(BasicOptions::from_args(args)?.repo_dir())
                } else {
                    None
                },
                remote_cmd: super::remote_pijul_cmd(),
            }),
        ("gen", Some(args)) =>
            Ok(Params::Gen {
                signing: args.is_present("signing") || !args.is_present("ssh"),
                ssh: args.is_present("ssh") || !args.is_present("signing"),
                local: if args.is_present("repository") || args.is_present("local") {
                    Some(BasicOptions::from_args(args)?.repo_dir())
                } else {
                    None
                },
            }),
        _ => Ok(Params::None)
        ("upload", Some(args)) => Ok(Params::Upload {
            address: args.value_of("address").unwrap(),
            port: args.value_of("port").and_then(|x| x.parse().ok()),
        }),
        ("gen", Some(args)) => Ok(Params::Gen {
            signing: args.value_of("signing-id"),
        }),
        ("register", _) => Ok(Params::Register),
        _ => Ok(Params::None),
    }
}

pub fn run(arg_matches: &ArgMatches) -> Result<(), Error> {
    let mut global = meta::Global::load().unwrap_or_else(|_| meta::Global::new());
    match parse_args(arg_matches)? {
        Params::Upload { address, port, repository, remote_cmd }=> {
            match meta::load_global_or_local_signing_key(repository.as_ref()) {
                Ok(key) => {
                    let config = Arc::new(thrussh::client::Config::default());
                    let ssh_user_host = Regex::new(r"^([^@]*)@(.*)$").unwrap();
                    let (user, server) =
                        if let Some(cap) = ssh_user_host.captures(&address) {
                        if let Some(cap) = ssh_user_host.captures(&address) {
                            (cap[1].to_string(), cap[2].to_string())
                        } else {
                        } else {
                            (username::get_user_name().unwrap(), address.to_string())
                        };
                        };

                    let mut l = tokio_core::reactor::Core::new().unwrap();
                    let h = l.handle();
        Params::Upload { address, port } => {
            let local_key = BasicOptions::from_args(arg_matches).ok().and_then(|opts| {
                Meta::load(&opts.repo_root)
                    .ok()
                    .and_then(|meta| meta.signing_key)
            });
            let key = local_key
                .or(global.signing_key)
                .map(|s| load_signing_key(s));
            match key {
                Some(Ok(mut keys)) => {
                    if keys.keys.is_empty() {
                        return Ok(());
                    }
                    if let Some(remote) = super::remote::parse_ssh_remote_nopath(address, port) {
                        debug!("sending key");
                        remote.session()?.send_key(keys)?
                    }
                }
                Some(Err(e)) => return Err(e),
                None => return Ok(()),
            }
        }
        Params::Gen { signing, ssh, local } => {





1
2
3
4
5
6
7
8
9

10
11
12

13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
use error::{Error, ErrorKind};
use std::path::Path;
        let path = args.values_of("path")
            .map(|x| x.collect())
            .unwrap_or(Vec::new());
use clap::{Arg, ArgMatches, SubCommand};
use commands::patch::print_patch;
use commands::{ask, default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::RepoPath;
use libpijul::patch::Patch;
use libpijul::{Branch, PatchId, Txn};
use regex::Regex;
use std::fs::File;
use std::path::Path;
use std::io::{BufReader, Read};
use std::path::PathBuf;
use term;
use regex::Regex;

pub fn invocation() -> StaticSubcommand {
    SubCommand::with_name("log")
        .about("List the patches applied to the given branch")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Path to the repository to list.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch to list.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("internal-id")
                .long("internal-id")
                .help("Display only patches with these internal identifiers.")
                .multiple(true)
                .takes_value(true),
        )
        .arg(
            Arg::with_name("hash-only")
                .long("hash-only")
                .help("Only display the hash of each path."),
        )
        .arg(
            Arg::with_name("repository-id")
                .long("repository-id")
                .help("display a header with the repository id")
        )
        .arg(
            Arg::with_name("path")
                .long("path")
                .multiple(true)
                .takes_value(true)
                .help("Only display patches that touch the given path."),
        )
        .arg(
            Arg::with_name("grep")
                .long("grep")
                .multiple(true)
                .takes_value(true)
                .help("Search patch name and description with a regular expression."),
        )
        .arg(
            Arg::with_name("last")
                .long("last")
                .takes_value(true)
                .help("Show only the last n patches. If `--first m` is also used, then (a) if the command normally outputs the last patches first, this means the last n patches of the first m ones. (b) Else, it means the first m patches of the last n ones."),
        )
        .arg(
            Arg::with_name("first")
                .long("first")
                .takes_value(true)
                .help("Show only the last n patches. If `--last m` is also used, then (a) if the command normally outputs the last patches first, this means the last m patches of the first n ones. (b) Else, it means the first n patches of the last m ones."),
        )
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .short("p")
                .help("Show patches"),
        )
}

struct Settings<'a> {
    hash_only: bool,
    show_repoid: bool,
    show_patches: bool,
    regex: Vec<Regex>,
    opts: BasicOptions<'a>,
    path: Vec<RepoPath<PathBuf>>,
    first: Option<usize>,
    last: Option<usize>,
}

impl<'a> Settings<'a> {
    fn parse(args: &'a ArgMatches) -> Result<Self, Error> {
        let basic_opts = BasicOptions::from_args(args)?;
        let hash_only = args.is_present("hash-only");
        let first = args.value_of("first").and_then(|x| x.parse().ok());
        let last = args.value_of("last").and_then(|x| x.parse().ok());
        let show_patches = args.is_present("patch");
        let show_repoid = args.is_present("repository-id");
        let mut regex = Vec::new();
        if let Some(regex_args) = args.values_of("grep") {
            for r in regex_args {
                debug!("regex: {:?}", r);
                regex.push(Regex::new(r)?)
            }
        }
        let path = args.values_of("path").map(|x| x.collect()).unwrap_or(Vec::new());
        let path = match args.values_of("path") {
            Some(arg_paths) => {
                let mut paths = Vec::new();
                for path in arg_paths {
                    let p = basic_opts.cwd.join(path);
                    let p = if let Ok(p) = std::fs::canonicalize(&p) {
                        p
                    } else {
                        p
                    };
                    paths.push(basic_opts.repo_root.relativize(&p)?.to_owned());
                }
                paths
            }
            None => Vec::new(),
        };
        Ok(Settings {
            hash_only,
            show_patches,
            show_repoid,
            regex,
            opts: basic_opts,
            path,
            first,
            last,
        })
    }
}

impl<'a> Settings<'a> {
    fn display_patch_(
        &self,
        txn: &Txn,
        branch: &Branch,
        nth: u64,
        patchid: PatchId,
    ) -> Result<(), Error> {
        let hash_ext = txn.get_external(patchid).unwrap();
        debug!("hash: {:?}", hash_ext.to_base58());

        let (matches_regex, o_patch) = if self.regex.is_empty() {
            (true, None)
        } else {
            let patch = self.opts.repo_root.read_patch_nochanges(hash_ext)?;
            let does_match = {
                let descr = match patch.description {
                    Some(ref d) => d,
                    None => "",
                };
                self.regex
                    .iter()
                    .any(|ref r| r.is_match(&patch.name) || r.is_match(descr))
            };
            (does_match, Some(patch))
        };
        if !matches_regex {
            return Ok(());
        };

        if self.hash_only {
            println!("{}:{}", hash_ext.to_base58(), nth);
        } else {
            let patch = match o_patch {
                None => self.opts.repo_root.read_patch_nochanges(hash_ext)?,
                Some(patch) => patch,
            };
            let mut term = term::stdout();
            ask::print_patch_descr(&mut term, &hash_ext.to_owned(), Some(patchid), &patch);
        }

        if self.show_patches {
            let mut patch_path = self.opts.repo_root.patches_dir().join(hash_ext.to_base58());
            patch_path.set_extension("gz");
            let f = File::open(&patch_path)?;

            let mut f = BufReader::new(f);
            let (hash, _, patch) = Patch::from_reader_compressed(&mut f)?;

            print_patch(&hash, &patch, txn, branch)?;
            println!();
        }

        Ok(())
    }

    fn display_patch(
        &self,
        txn: &Txn,
        branch: &Branch,
        n: u64,
        patchid: PatchId,
    ) -> Result<(), Error> {
        if self.path.is_empty() {
            self.display_patch_(txn, branch, n, patchid)?;
        } else {
            for path in self.path.iter() {
                let inode = txn.find_inode(&path)?;
                let key = if let Some(key) = txn.get_inodes(inode) {
                    key.key
                } else {
                    continue;
                };
                if txn.get_touched(key, patchid) {
                    self.display_patch_(txn, branch, n, patchid)?;
                    break;
                }
            }
        }
        Ok(())
    }

    fn is_touched(&self, txn: &Txn, patchid: PatchId) -> bool {
        self.path.is_empty()
            || self.path.iter().any(|path| {
                if let Ok(inode) = txn.find_inode(&path) {
                    if let Some(key) = txn.get_inodes(inode) {
                        return txn.get_touched(key.key, patchid);
                    }
                }
                false
            })
    }
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let settings = Settings::parse(args)?;
    let repo = settings.opts.open_repo()?;
    let txn = repo.txn_begin()?;
    let branch = match txn.get_branch(&settings.opts.branch()) {
        Some(b) => b,
        None => return Err(Error::NoSuchBranch),
    };

    if settings.show_repoid {
        let id_file = settings.opts.repo_root.id_file();
        let mut f = File::open(&id_file)?;
        let mut s = String::new();
        f.read_to_string(&mut s)?;
        if settings.hash_only {
            println!("{}", s.trim());
        } else {
            println!("Repository id: {}", s.trim());
            println!();
        }
    };
    if settings.hash_only {
        // If in binary form, show the patches in chronological order.
        let start = settings.last.and_then(|last| {
            txn.rev_iter_applied(&branch, None)
                .filter(|(_, patchid)| {
                    // Only select patches that touch the input path
                    // (if that path exists).
                    settings.is_touched(&txn, *patchid)
                })
                .take(last)
                .last()
                .map(|(n, _)| n)
        });
        debug!("start {:?}", start);
        for (n, (applied, patchid)) in txn.iter_applied(&branch, start).enumerate() {
            if let Some(first) = settings.first {
                if n >= first {
                    break;
                }
            }
            settings.display_patch(&txn, &branch, applied, patchid)?
        }
        return Ok(());
    }

    let txn = repo.txn_begin()?;
    if let Some(v) = args.values_of("internal-id") {
        for (n, patchid) in v.filter_map(|x| PatchId::from_base58(x)).enumerate() {
            settings.display_patch(&txn, &branch, n as u64, patchid)?;
        }
    } else {
        let first = if let Some(first) = settings.first {
            txn.iter_applied(&branch, None)
                .filter(|(_, patchid)| settings.is_touched(&txn, *patchid))
                .take(first)
                .last()
                .map(|(n, _)| n)
        } else {
            None
        };
        for (n, (applied, patchid)) in txn.rev_iter_applied(&branch, first).enumerate() {
            if let Some(last) = settings.last {
                if n >= last {
                    break;
                }


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
use commands::{BasicOptions, StaticSubcommand, default_explain};
use clap::{SubCommand, ArgMatches, Arg};
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("ls")
        .about("List tracked files")
        .arg(
            Arg::with_name("dir")
                .multiple(true)
                .help("Prefix of the list"),
        )
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Repository to list.")
                .takes_value(true),
        );
}










1
2
3
4
5
6

7

8
9




10


11









12

13

14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

37


38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57






























58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

97
98
99
100
101
102
103
104

105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169

170
171
172
173
174
175

176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200

202


205


208
209
210


213


216
217
218

220
221
222
223
224
225
226
227
228
229
230
231



232
233
234
235
236
237


238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268

pub mod challenge;
pub mod delete_branch;
use error::{ErrorKind, Result};
        delete_branch::invocation(),
        show_dependencies::invocation(),
        challenge::invocation(),
            return Err(ErrorKind::InARepository(dir.to_owned()).into());
            return Err(ErrorKind::NotInARepository.into());
            let root = self.args
fn setup_pager() {}
use clap;
use clap::ArgMatches;
pub type StaticSubcommand = clap::App<'static, 'static>;

mod ask;
mod fs_operation;
mod ssh_auth_attempts;
pub mod remote;
mod ask;
mod ssh_auth_attempts;

pub mod hooks;
pub mod info;
pub mod init;
pub mod record;
pub mod add;
pub mod pull;
pub mod push;
pub mod apply;
pub mod clone;
pub mod remove;
pub mod mv;
pub mod ls;
pub mod revert;
pub mod unrecord;
pub mod log;
pub mod patch;
pub mod fork;
pub mod branches;
pub mod delete_branch;
pub mod checkout;
pub mod diff;
pub mod clone;
pub mod credit;
pub mod dependencies;
pub mod diff;
pub mod dist;
pub mod fork;
pub mod generate_completions;
pub mod grep;
pub mod hooks;
pub mod info;
pub mod init;
pub mod key;
pub mod log;
pub mod ls;
pub mod mv;
pub mod patch;
pub mod prune;
pub mod pull;
pub mod push;
pub mod record;
pub mod remove;
pub mod revert;
pub mod rollback;
pub mod status;
pub mod sign;
pub mod challenge;
pub mod generate_completions;
pub mod status;
pub mod tag;
pub mod unrecord;

mod fold_until;

use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::Hash;
use libpijul::{fs_representation, Inode, Repository, Txn, DEFAULT_BRANCH};
use rand;
use std::borrow::Cow;
use std::env::current_dir;
use std::env::var;
use std::fs::{canonicalize, create_dir, metadata};
use std::io::{stderr, Write};
use std::path::{Path, PathBuf};
use std::process::exit;

pub fn all_command_invocations() -> Vec<StaticSubcommand> {
    return vec![log::invocation(),
                info::invocation(),
                init::invocation(),
                record::invocation(),
                unrecord::invocation(),
                add::invocation(),
                pull::invocation(),
                push::invocation(),
                apply::invocation(),
                clone::invocation(),
                remove::invocation(),
                mv::invocation(),
                ls::invocation(),
                revert::invocation(),
                patch::invocation(),
                fork::invocation(),
                branches::invocation(),
                delete_branch::invocation(),
                checkout::invocation(),
                diff::invocation(),
                credit::invocation(),
                dist::invocation(),
                key::invocation(),
                rollback::invocation(),
                status::invocation(),
                show_dependencies::invocation(),
                tag::invocation(),
                sign::invocation(),
                challenge::invocation(),
                generate_completions::invocation(),
    return vec![
        log::invocation(),
        info::invocation(),
        init::invocation(),
        record::invocation(),
        unrecord::invocation(),
        add::invocation(),
        pull::invocation(),
        push::invocation(),
        apply::invocation(),
        clone::invocation(),
        remove::invocation(),
        mv::invocation(),
        ls::invocation(),
        revert::invocation(),
        patch::invocation(),
        fork::invocation(),
        branches::invocation(),
        prune::invocation(),
        checkout::invocation(),
        diff::invocation(),
        credit::invocation(),
        dist::invocation(),
        key::invocation(),
        rollback::invocation(),
        status::invocation(),
        dependencies::invocation(),
        tag::invocation(),
        sign::invocation(),
        generate_completions::invocation(),
        grep::invocation(),
    ];
}

pub fn get_wd(repository_path: Option<&Path>) -> Result<PathBuf, Error> {
    debug!("get_wd: {:?}", repository_path);
    match repository_path {
        None => Ok(canonicalize(current_dir()?)?),
        Some(a) if a.is_relative() => Ok(canonicalize(current_dir()?.join(a))?),
        Some(a) => Ok(canonicalize(a)?)
        Some(a) => Ok(canonicalize(a)?),
    }
}

/// Returns an error if the `dir` is contained in a repository.
pub fn assert_no_containing_repo(dir: &Path) -> Result<(), Error> {
    if metadata(dir).is_ok() {
        if fs_representation::find_repo_root(&canonicalize(dir)?).is_some() {
            return Err(ErrorKind::InARepository(dir.to_owned()).into())