pijul_org / pijul

Reformatting (cargo fmt)

By pmeunier on March 22, 2019
This patch is not signed.
6AhZMPYCSDUeUkz2kyoKvLVmxWYc8zaqXeWmkK6YPTMvzKg6x9Vz7e5kjQY2hDDfM5PFgn7FYjU8tDYfhZXUkJiC
This patch is in the following branches:
latest
master
testing
32
33

34
35
36
37
38

39
40
41
42
43

44
45
46
47
48
49
50

51
52
53
54
55
56
57
58
59
60
61

62
63
64
65
66
67
68
69
70
71

72
73
74
75

76
77
78
79

80
81
82
83
84
85
86
87
    /// `new` (in the new version) start an section equal in both
    /// versions, of length `len`.
    fn equal(&mut self, old: usize, new: usize, len: usize) -> Result<(), Self::Error> { Ok(()) }
    fn equal(&mut self, old: usize, new: usize, len: usize) -> Result<(), Self::Error> {
        Ok(())
    }
    /// Called when a section of length `len`, starting at `old`,
    /// needs to be deleted from the old version.
    fn delete(&mut self, old: usize, len: usize) -> Result<(), Self::Error> { Ok(()) }
    fn delete(&mut self, old: usize, len: usize) -> Result<(), Self::Error> {
        Ok(())
    }
    /// Called when a section of the new version, of length `new_len`
    /// and starting at `new`, needs to be inserted at position `old'.
    fn insert(&mut self, old: usize, new: usize, new_len: usize) -> Result<(), Self::Error> { Ok(()) }
    fn insert(&mut self, old: usize, new: usize, new_len: usize) -> Result<(), Self::Error> {
        Ok(())
    }
    /// Called when a section of the old version, starting at index
    /// `old` and of length `old_len`, needs to be replaced with a
    /// section of length `new_len`, starting at `new`, of the new
    /// version.
    fn replace(&mut self, old: usize, old_len: usize, new: usize, new_len: usize) -> Result<(), Self::Error> {
    fn replace(
        &mut self,
        old: usize,
        old_len: usize,
        new: usize,
        new_len: usize,
    ) -> Result<(), Self::Error> {
        self.delete(old, old_len)?;
        self.insert(old, new, new_len)
    }
    /// Always called at the end of the algorithm.
    fn finish(&mut self) -> Result<(), Self::Error> { Ok(()) }
    fn finish(&mut self) -> Result<(), Self::Error> {
        Ok(())
    }
}

impl<'a, D: Diff + 'a> Diff for &'a mut D {
    type Error = D::Error;
    fn equal(&mut self, old: usize, new: usize, len: usize) -> Result<(), Self::Error> {
        (*self).equal(old, new, len)
    }
    fn delete(&mut self, old: usize, len: usize) -> Result<(), Self::Error>  {
    fn delete(&mut self, old: usize, len: usize) -> Result<(), Self::Error> {
        (*self).delete(old, len)
    }

    fn insert(&mut self, old: usize, new: usize, new_len: usize) -> Result<(), Self::Error>  {
    fn insert(&mut self, old: usize, new: usize, new_len: usize) -> Result<(), Self::Error> {
        (*self).insert(old, new, new_len)
    }

    fn replace(&mut self, old: usize, old_len: usize, new: usize, new_len: usize) -> Result<(), Self::Error>  {
    fn replace(
        &mut self,
        old: usize,
        old_len: usize,
        new: usize,
        new_len: usize,
    ) -> Result<(), Self::Error> {
        (*self).replace(old, old_len, new, new_len)
2
3

4
5
6

7
8
9
10
11

12
13
14
15
16
17
18
19
20


21
22
23
24
25
26
27
28
29
30

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62


63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97





98
99
100
101
102
103

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210

211
212
213
214
215
use std::collections::HashMap;
use std::hash::Hash;
use {myers, Diff, Replace};
use std::ops::Index;
use {myers, Diff, Replace};

struct I<'a, S: 'a + Index<usize>+?Sized> {
struct I<'a, S: 'a + Index<usize> + ?Sized> {
    p: &'a S,
    i: usize,
}

impl<'a, A: Index<usize> + 'a> std::fmt::Debug for I<'a, A> where A::Output: std::fmt::Debug {
impl<'a, A: Index<usize> + 'a> std::fmt::Debug for I<'a, A>
where
    A::Output: std::fmt::Debug,
{
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(fmt, "{:?}", &self.p[self.i])
    }
}

impl<'a, 'b, A: Index<usize> + 'b+?Sized, B: Index<usize> + 'b+?Sized> PartialEq<I<'a, A>> for I<'b, B>
where  B::Output: PartialEq<A::Output> {
impl<'a, 'b, A: Index<usize> + 'b + ?Sized, B: Index<usize> + 'b + ?Sized> PartialEq<I<'a, A>>
    for I<'b, B>
where
    B::Output: PartialEq<A::Output>,
{
    fn eq(&self, b: &I<'a, A>) -> bool {
        self.p[self.i] == b.p[b.i]
    }
}

fn unique<A: Hash + Eq, S: Index<usize, Output = A> + ?Sized>(p: &S, e0: usize, e1: usize) -> Vec<I<S>> {
fn unique<A: Hash + Eq, S: Index<usize, Output = A> + ?Sized>(
    p: &S,
    e0: usize,
    e1: usize,
) -> Vec<I<S>> {
    let mut aa = HashMap::new();
    for i in e0..e1 {
        match aa.entry(&p[i]) {
            Entry::Vacant(e) => {
                e.insert(Some(i));
            }
            Entry::Occupied(mut e) => {
                let e = e.get_mut();
                if e.is_some() {
                    *e = None
                }
            }
        }
    }
    let mut v: Vec<_> = aa
        .into_iter()
        .filter_map(|(_, x)| x)
        .map(|i| I { p, i })
        .collect();
    v.sort_by(|a, b| a.i.cmp(&b.i));
    v
}

/// Patience diff algorithm.
pub fn diff<
    A: Hash + Eq,
    B: Hash + Eq + PartialEq<A>,
    S: Index<usize, Output = A>+?Sized,
    T: Index<usize, Output = B>+?Sized,
    S: Index<usize, Output = A> + ?Sized,
    T: Index<usize, Output = B> + ?Sized,
    D: Diff,
>(
    d: &mut D,
    e: &S,
    e0: usize,
    e1: usize,
    f: &T,
    f0: usize,
    f1: usize,
) -> Result<(), D::Error> {
    let au = unique(e, e0, e1);
    let bu = unique(f, f0, f1);

    struct Patience<'a, 'b, 'd, S: 'a + Index<usize>+?Sized, T: 'b + Index<usize>+?Sized, D: Diff + 'd> {
    struct Patience<
        'a,
        'b,
        'd,
        S: 'a + Index<usize> + ?Sized,
        T: 'b + Index<usize> + ?Sized,
        D: Diff + 'd,
    > {
        current_a: usize,
        current_b: usize,
        a1: usize,
        b1: usize,
        a: &'a S,
        b: &'b T,
        d: &'d mut D,
        au: &'a [I<'a, S>],
        bu: &'b [I<'b, T>],
    }
    impl<
            'a,
        'b,
        'd,
        S: 'a + Index<usize>+?Sized,
        T: 'b + Index<usize>+?Sized,
        D: Diff + 'd,
            'b,
            'd,
            S: 'a + Index<usize> + ?Sized,
            T: 'b + Index<usize> + ?Sized,
            D: Diff + 'd,
        > Diff for Patience<'a, 'b, 'd, S, T, D>
        where T::Output: PartialEq<S::Output>
    where
        T::Output: PartialEq<S::Output>,
    {
        type Error = D::Error;
        fn equal(&mut self, old: usize, new: usize, len: usize) -> Result<(), D::Error> {
            // eprintln!("equal {:?} {:?} {:?}", old, new, len);
            for (old, new) in (old..old + len).zip(new..new + len) {
                let a0 = self.current_a;
                let b0 = self.current_b;
                while self.current_a < self.au[old].i
                    && self.current_b < self.bu[new].i
                    && self.b[self.current_b] == self.a[self.current_a]
                {
                    self.current_a += 1;
                    self.current_b += 1;
                }
                if self.current_a > a0 {
                    self.d.equal(a0, b0, self.current_a - a0)?
                }
                // let a = &self.a[self.current_a..self.au[old].i];
                // let b = &self.b[self.current_b..self.bu[new].i];
                // eprintln!("matching a: {:?} {:?}", self.current_a, self.au[old].i);
                // eprintln!("matching b: {:?} {:?}", self.current_b, self.bu[new].i);
                myers::diff_offsets(self.d, self.a, self.current_a, self.au[old].i, self.b, self.current_b, self.bu[new].i)?;
                myers::diff_offsets(
                    self.d,
                    self.a,
                    self.current_a,
                    self.au[old].i,
                    self.b,
                    self.current_b,
                    self.bu[new].i,
                )?;
                self.current_a = self.au[old].i;
                self.current_b = self.bu[new].i;
            }
            Ok(())
        }
        /*
        fn insert(&mut self, old: usize, new: usize, len: usize) -> Result<(), D::Error> {
            eprintln!("insert {:?} {:?} {:?}", old, new, len);
            Ok(())
        }
        fn delete(&mut self, old: usize, len: usize) -> Result<(), D::Error> {
            eprintln!("delete {:?} {:?}", old, len);
            Ok(())
        }
        fn replace(
            &mut self,
            old: usize,
            len: usize,
            new: usize,
            new_len: usize,
        ) -> Result<(), D::Error> {
            eprintln!("replace {:?} {:?} {:?} {:?}", old, len, new, new_len);
            Ok(())
        }
        */
        fn finish(&mut self) -> Result<(), D::Error> {
            myers::diff(
                self.d,
                self.a,
                self.current_a,
                self.a1,
                self.b,
                self.current_b,
                self.b1,
            )
        }
    }
    let mut d = Replace::new(Patience {
        current_a: e0,
        current_b: f0,
        a: e,
        a1: e1,
        b: f,
        b1: f1,
        d,
        au: &au,
        bu: &bu,
    });
    myers::diff(&mut d, &au, 0, au.len(), &bu, 0, bu.len())?;
    Ok(())
}

#[test]
fn patience() {
    let a: &[usize] = &[11, 1, 2, 2, 3, 4, 4, 4, 5, 47, 19];
    let b: &[usize] = &[10, 1, 2, 2, 8, 9, 4, 4, 7, 47, 18];
    struct D(Vec<(usize, usize, usize, usize)>);
    impl Diff for D {
        type Error = ();
        fn delete(&mut self, o: usize, len: usize) -> Result<(), ()> {
            self.0.push((o, len, 0, 0));
            Ok(())
        }
        fn insert(&mut self, o: usize, n: usize, len: usize) -> Result<(), ()> {
            self.0.push((o, 0, n, len));
            Ok(())
        }
        fn replace(&mut self, o: usize, l: usize, n: usize, nl: usize) -> Result<(), ()> {
            self.0.push((o, l, n, nl));
            Ok(())
        }
    }
    let mut d = Replace::new(D(Vec::new()));
    diff(&mut d, a, 0, a.len(), b, 0, b.len()).unwrap();
    let d: D = d.into_inner();
    assert_eq!(d.0.as_slice(), &[(0, 1, 0, 1), (4, 2, 4, 2), (8, 1, 8, 1), (10, 1, 10, 1)]);
    assert_eq!(
        d.0.as_slice(),
        &[(0, 1, 0, 1), (4, 2, 4, 2), (8, 1, 8, 1), (10, 1, 10, 1)]
    );
}
3
4

5
6
7
8
9
10
11

12
13
14
15
16
17
18
19
20
21
22
23
24
25

26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

64
65
66
67
68
69
70
71
72
73
74
75

76
77
78
79
80
81
82
83

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116






117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
/// A "differ" that combines deletions and insertions to give blocks
/// of maximal length, and replacements when appropriate.
pub struct Replace<D:Diff> {
pub struct Replace<D: Diff> {
    d: D,
    del: Option<(usize, usize)>,
    ins: Option<(usize, usize, usize)>,
    eq: Option<(usize, usize, usize)>,
}

impl<D:Diff> Replace<D> {
impl<D: Diff> Replace<D> {
    pub fn new(d: D) -> Self {
        Replace {
            d,
            del: None,
            ins: None,
            eq: None,
        }
    }
    pub fn into_inner(self) -> D {
        self.d
    }
}


impl<D: Diff> AsRef<D> for Replace<D> {
    fn as_ref(&self) -> &D {
        &self.d
    }
}

impl<D: Diff> AsMut<D> for Replace<D> {
    fn as_mut(&mut self) -> &mut D {
        &mut self.d
    }
}

impl<D: Diff> Diff for Replace<D> {
    type Error = D::Error;
    fn equal(&mut self, old: usize, new: usize, len: usize) -> Result<(), D::Error> {
        if let Some((old0, len0)) = self.del.take() {
            if let Some((_, new1, new_len1)) = self.ins.take() {
                self.d.replace(old0, len0, new1, new_len1)?
            } else {
                self.d.delete(old0, len0)?
            }
        } else if let Some((old0, new0, new_len0)) = self.ins.take() {
            self.d.insert(old0, new0, new_len0)?
        }

        if let Some((a, b, c)) = self.eq.take() {
            self.eq = Some((a, b, c + len))
        } else {
            self.eq = Some((old, new, len))
        }
        Ok(())
    }
    fn delete(&mut self, old: usize, len: usize) -> Result<(), D::Error> {
        if let Some((a, b, c)) = self.eq.take() {
            self.d.equal(a, b, c)?
        }
        if let Some((old0, len0)) = self.del.take() {
            assert_eq!(old, old0 + len0);
            self.del = Some((old0, len0+len))
            self.del = Some((old0, len0 + len))
        } else {
            self.del = Some((old, len))
        }
        Ok(())
    }

    fn insert(&mut self, old: usize, new: usize, new_len: usize) -> Result<(), D::Error> {
        if let Some((a, b, c)) = self.eq.take() {
            self.d.equal(a, b, c)?
        }
        if let Some((old1, new1, new_len1)) = self.ins.take() {
            assert_eq!(new1+new_len1, new);
            assert_eq!(new1 + new_len1, new);
            self.ins = Some((old1, new1, new_len + new_len1))
        } else {
            self.ins = Some((old, new, new_len))
        }
        Ok(())
    }

    fn replace(&mut self, old: usize, old_len: usize, new: usize, new_len: usize) -> Result<(), D::Error> {
    fn replace(
        &mut self,
        old: usize,
        old_len: usize,
        new: usize,
        new_len: usize,
    ) -> Result<(), D::Error> {
        if let Some((a, b, c)) = self.eq.take() {
            self.d.equal(a, b, c)?
        }
        self.d.replace(old, old_len, new, new_len)
    }

    fn finish(&mut self) -> Result<(), D::Error> {
        if let Some((a, b, c)) = self.eq.take() {
            self.d.equal(a, b, c)?
        }
        if let Some((old0, len0)) = self.del.take() {
            if let Some((_, new1, new_len1)) = self.ins.take() {
                self.d.replace(old0, len0, new1, new_len1)?
            } else {
                self.d.delete(old0, len0)?
            }
        } else if let Some((old0, new0, new_len0)) = self.ins.take() {
            self.d.insert(old0, new0, new_len0)?
        }
        self.d.finish()
    }
}

#[test]
fn myers() {
    use myers;
    let a: &[&str] = &[">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n", "a\n", "b\n", "c\n",
                       "================================\n", "d\n", "e\n", "f\n",
                       "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"];
    let b: &[&str] = &[">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n", "x\n", "b\n", "c\n",
                       "================================\n", "y\n", "e\n", "f\n",
                       "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"];
    let a: &[&str] = &[
        ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "a\n",
        "b\n",
        "c\n",
        "================================\n",
        "d\n",
        "e\n",
        "f\n",
        "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
    ];
    let b: &[&str] = &[
        ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "x\n",
        "b\n",
        "c\n",
        "================================\n",
        "y\n",
        "e\n",
        "f\n",
        "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
    ];


1
2
3
4


5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29


30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

48
49
50
51
52
53


54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

93
94
use {Diff, Replace, myers};
use {myers, Diff, Replace};

#[test]
fn test_diff() {
    let a: &[usize] = &[0,1,2,3,4];
    let b: &[usize] = &[0,1,2,9,4];
    let a: &[usize] = &[0, 1, 2, 3, 4];
    let b: &[usize] = &[0, 1, 2, 9, 4];
    struct D {}
    impl Diff for D {
        type Error = ();
        fn delete(&mut self, o: usize, len: usize) -> Result<(), ()> {
            assert_eq!(o, 3);
            assert_eq!(len, 1);
            println!("delete");
            Ok(())
        }
        fn insert(&mut self, o: usize, n: usize, len: usize) -> Result<(), ()> {
            assert_eq!(o, 3);
            assert_eq!(n, 3);
            assert_eq!(len, 1);
            println!("insert");
            Ok(())
        }
    }
    let mut diff = Replace::new(D {});
    myers::diff(&mut diff, a, 0, a.len(), b, 0, b.len()).unwrap()
}

#[test]
fn test_contiguous() {
    let a: &[usize] = &[0,1,2,3,4,4,4,5];
    let b: &[usize] = &[0,1,2,8,9,4,4,7];
    let a: &[usize] = &[0, 1, 2, 3, 4, 4, 4, 5];
    let b: &[usize] = &[0, 1, 2, 8, 9, 4, 4, 7];
    struct D {}
    impl Diff for D {
        type Error = ();
        fn delete(&mut self, _o: usize, _len: usize) -> Result<(), ()> {
            panic!("Should not delete")
        }
        fn insert(&mut self, _o: usize, _n: usize, _len: usize) -> Result<(), ()> {
            panic!("Should not insert")
        }
        fn replace(&mut self, o: usize, l: usize, n: usize, nl: usize) -> Result<(), ()> {
            assert!(o != 3 || (l == 2 && nl == 2));
            assert!(o != 7 || (l == 1 && nl == 1));
            println!("replace {:?} {:?} {:?} {:?}", o, l, n, nl);
            Ok(())
        }
    }
    let mut diff = Replace::new(D{});
    let mut diff = Replace::new(D {});
    myers::diff(&mut diff, a, 0, a.len(), b, 0, b.len()).unwrap();
}

#[test]
fn test_replace() {
    let a: &[usize] = &[0,1,2,3,4];
    let b: &[usize] = &[0,1,2,7,8,9];
    let a: &[usize] = &[0, 1, 2, 3, 4];
    let b: &[usize] = &[0, 1, 2, 7, 8, 9];
    struct D {}
    impl Diff for D {
        type Error = ();
        fn delete(&mut self, _o: usize, _len: usize) -> Result<(), ()> {
            panic!("should not delete")
        }
        fn insert(&mut self, _o: usize, _n: usize, _len: usize) -> Result<(), ()> {
            panic!("should not insert")
        }
        fn replace(&mut self, _o: usize, _l: usize, _n: usize, _nl: usize) -> Result<(), ()> {
            Ok(())
        }
    }
    let mut diff = Replace::new(D{});
    let mut diff = Replace::new(D {});
    myers::diff(&mut diff, a, 0, a.len(), b, 0, b.len()).unwrap();
}

#[test]
fn test_pat() {
    let a: &[usize] = &[0, 1, 3, 4, 5];
    let b: &[usize] = &[0, 1, 4, 5, 8, 9];
    struct D {}
    impl Diff for D {
        type Error = ();
        fn delete(&mut self, _o: usize, _len: usize) -> Result<(), ()> {
            println!("delete {:?} {:?}", _o, _len);
            Ok(())
        }
        fn insert(&mut self, _o: usize, _n: usize, _len: usize) -> Result<(), ()> {
            println!("insert {:?} {:?} {:?}", _o, _n, _len);
            Ok(())
        }
        fn replace(&mut self, _o: usize, _l: usize, _n: usize, _nl: usize) -> Result<(), ()> {
            println!("replace {:?} {:?} {:?} {:?}", _o, _l, _n, _nl);
            Ok(())
        }
    }
    let mut diff = Replace::new(D{});
    let mut diff = Replace::new(D {});
    myers::diff(&mut diff, a, 0, a.len(), b, 0, b.len()).unwrap();

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172












173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191






192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228

229
230
231
use Result;
use backend::*;
use patch::*;
use rand;
use std::collections::HashSet;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Applies a patch to a repository. "new_patches" are patches that
    /// just this repository has, and the remote repository doesn't have.
    pub fn apply(
        &mut self,
        branch: &mut Branch,
        patch: &Patch,
        patch_id: PatchId,
        timestamp: ApplyTimestamp,
    ) -> Result<()> {
        assert!(self.put_patches(&mut branch.patches, patch_id, timestamp)?);
        assert!(self.put_revpatches(&mut branch.revpatches, timestamp, patch_id)?);
        debug!("apply_raw");
        // Here we need to first apply *all* the NewNodes, and then
        // the Edges, because some of the NewNodes might be the
        // children of newly deleted edges, and we need to add the
        // corresponding pseudo-edges.
        for ch in patch.changes().iter() {
            if let Change::NewNodes {
                ref up_context,
                ref down_context,
                ref line_num,
                flag,
                ref nodes,
                ..
            } = *ch
            {
                assert!(!nodes.is_empty());
                debug!("apply: newnodes");
                self.add_new_nodes(
                    branch,
                    patch_id,
                    up_context,
                    down_context,
                    line_num,
                    flag,
                    nodes,
                )?;
            }
        }
        let mut parents: HashSet<Key<PatchId>> = HashSet::new();
        let mut children: HashSet<Edge> = HashSet::new();
        for ch in patch.changes().iter() {
            if let Change::NewEdges {
                previous,
                flag,
                ref edges,
                ..
            } = *ch
            {
                self.add_new_edges(
                    branch,
                    patch_id,
                    Some(previous),
                    flag,
                    edges,
                    &mut parents,
                    &mut children,
                )?;
                debug!("apply_raw:edges.done");
            }
        }

        // If there is a missing context, add pseudo-edges along the
        // edges that deleted the conflict, until finding (in both
        // directions) an alive context.
        self.repair_deleted_contexts(branch, patch, patch_id)?;

        Ok(())
    }

    /// Delete old versions of `edges`.
    fn delete_old_edge(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        previous: EdgeFlags,
        from: Key<PatchId>,
        to: Key<PatchId>,
        introduced_by: PatchId,
    ) -> Result<()> {
        debug!(
            "delete {:?} -> {:?} ({:?}) {:?}",
            from, to, previous, introduced_by
        );
        // debug!("delete_old_edges: introduced_by = {:?}", e.introduced_by);
        let mut deleted_e = Edge {
            flag: previous,
            dest: to,
            introduced_by,
        };
        self.put_cemetery(from, deleted_e, patch_id)?;
        if !self.del_edge_both_dirs(branch, from, deleted_e)? {
            debug!("killing pseudo instead {:?} {:?}", from, deleted_e);
            deleted_e.flag |= EdgeFlags::PSEUDO_EDGE;
            let result = self.del_edge_both_dirs(branch, from, deleted_e)?;
            debug!("killed ? {:?}", result);
        }
        Ok(())
    }

    fn add_new_edges(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        previous: Option<EdgeFlags>,
        flag: EdgeFlags,
        edges: &[NewEdge],
        parents: &mut HashSet<Key<PatchId>>,
        children: &mut HashSet<Edge>,
    ) -> Result<()> {
        for e in edges {
            debug!("add_new_edges {:?}", e);
            // If the edge has not been forgotten about,
            // insert the new version.
            let e_from = self.internal_key(&e.from, patch_id);
            let e_to = self.internal_key(&e.to, patch_id);
            assert!(e_from != e_to);
            let to = if flag.contains(EdgeFlags::PARENT_EDGE) {
                e_from
            } else {
                e_to
            };

            // If this is a deletion edge and not a folder edge, reconnect parents and children.
            if flag.contains(EdgeFlags::DELETED_EDGE) && !flag.contains(EdgeFlags::FOLDER_EDGE) {
                self.reconnect_parents_children(branch, patch_id, to, parents, children)?;
            }

            let introduced_by = self.internal_hash(&e.introduced_by, patch_id);

            if let Some(previous) = previous {
                self.delete_old_edge(branch, patch_id, previous, e_from, e_to, introduced_by)?
            }

            if flag.contains(EdgeFlags::DELETED_EDGE) && !flag.contains(EdgeFlags::FOLDER_EDGE) {
                self.delete_old_pseudo_edges(branch, patch_id, to, children)?
            }

            // Let's build the edge we're about to insert.
            let e = Edge {
                flag,
                dest: e_to,
                introduced_by: patch_id.clone(),
            };

            // Finally, add the new version of the edge.
            self.put_edge_both_dirs(branch, e_from, e)?;
        }
        Ok(())
    }

    /// Add pseudo edges from all keys of `parents` to all `dest` of
    /// the edges in `children`, with the same edge flags as in
    /// `children`, plus `PSEUDO_EDGE`.
    pub fn reconnect_parents_children(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        to: Key<PatchId>,
        parents: &mut HashSet<Key<PatchId>>,
        children: &mut HashSet<Edge>,
    ) -> Result<()> {
        // Collect all the alive parents of the source of this edge.
        parents.clear();
        parents.extend(
            self.iter_adjacent(&branch, to, EdgeFlags::PARENT_EDGE, EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE)
                .filter_map(|v| {
                    if !v.flag.contains(EdgeFlags::EPSILON_EDGE)
                    if !v.flag.contains(EdgeFlags::EPSILON_EDGE)
                        && self.is_alive_or_zombie(branch, v.dest)
                    {
                        Some(v.dest)
                        Some(v.dest)
                    } else {
                        None
                    }
                }),
            self.iter_adjacent(
                &branch,
                to,
                EdgeFlags::PARENT_EDGE,
                EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE,
            )
            .filter_map(|v| {
                if !v.flag.contains(EdgeFlags::EPSILON_EDGE)
                    && self.is_alive_or_zombie(branch, v.dest)
                {
                    Some(v.dest)
                } else {
                    None
                }
            }),
        );

        // Now collect all the alive children of the target of this edge.
        children.clear();
        children.extend(
            self.iter_adjacent(&branch, to, EdgeFlags::empty(), EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE)
        );
        );
        );
        );
        children.extend(self.iter_adjacent(
            &branch,
            to,
            EdgeFlags::empty(),
            EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE,
        ));

        debug!("reconnecting {:?} {:?}", parents, children);

        for &parent in parents.iter() {
            for e in children.iter() {
                // If these are not already connected
                // or pseudo-connected, add a
                // pseudo-edge.
                if parent != e.dest && !self.is_connected(branch, parent, e.dest) {
                    let pseudo_edge = Edge {
                        flag: e.flag | EdgeFlags::PSEUDO_EDGE,
                        dest: e.dest,
                        introduced_by: patch_id.clone(),
                    };
                    debug!("reconnect_parents_children: {:?} {:?}", parent, pseudo_edge);
                    self.put_edge_both_dirs(branch, parent, pseudo_edge)?;
                }
            }
        }
        Ok(())
    }

    fn delete_old_pseudo_edges(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        to: Key<PatchId>,
        pseudo_edges: &mut HashSet<Edge>,
    ) -> Result<()> {
        // Now collect pseudo edges, and delete them.
        pseudo_edges.clear();
        for to_edge in self.iter_adjacent(branch, to, EdgeFlags::empty(), EdgeFlags::DELETED_EDGE)
        for to_edge in self
            .iter_adjacent(branch, to, EdgeFlags::empty(), EdgeFlags::DELETED_EDGE)
            .filter(|v| v.flag.contains(EdgeFlags::PSEUDO_EDGE))
56
57





58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85



86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121

122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

159
160
161


162
163
            } else {
                first_is_alive = true;
                for v in self.iter_adjacent(&branch, a,
                                            EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                                            EdgeFlags::all())
                {
                {
                for v in self.iter_adjacent(
                    &branch,
                    a,
                    EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    EdgeFlags::all(),
                ) {
                    debug!("find_alive_ancestors: {:?}", v);
                    if v.flag.contains(EdgeFlags::FOLDER_EDGE) {
                        // deleted file.
                        file = Some(a);
                        *file_ancestor = Some(a)
                    } else {
                        find_alive.push(v.dest)
                    }
                }
            }
        }
        debug!("file {:?}", file);
        if let Some(file) = file {
            find_alive.clear();
            find_alive.push(file);
            while let Some(a) = find_alive.pop() {
                debug!("file {:?}", a);
                if !self.is_alive(branch, a) {
                    debug!("not alive");
                    first_is_alive = true;
                    let flag =
                        EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE | EdgeFlags::FOLDER_EDGE;
                    for v in self.iter_adjacent(&branch, a, flag, EdgeFlags::all())
                    {
                    {
                    for v in self.iter_adjacent(&branch, a, flag, EdgeFlags::all()) {
                        debug!("file find_alive_ancestors: {:?}", v);
                        // deleted file, collect.
                        files.push((a, v));
                        find_alive.push(v.dest)
                    }
                }
            }
        }

        first_is_alive
    }

    /// Recursively find all descendants by doing a DFS on deleted
    /// edges (including folder edges), and collect all edges until
    /// finding an alive or zombie descendant.
    ///
    /// Returns whether or not at least one traversed vertex was dead
    /// (or otherwise said, returns `false` if and only if there all
    /// vertices in `find_alive` are alive).
    pub fn find_alive_descendants(
        &self,
        find_alive: &mut FindAlive,
        branch: &Branch,
        alive: &mut Vec<Key<PatchId>>,
    ) -> bool {
        let mut first_is_alive = false;
        debug!("begin find_alive_descendants");
        while let Some(a) = find_alive.pop() {
            debug!("find_alive_descendants, a = {:?}", a);
            if self.is_alive(branch, a) {
                debug!("alive: {:?}", a);
                alive.push(a);
            } else {
                // Else, we need to explore its deleted descendants.
                first_is_alive = true;
                for v in self.iter_adjacent(&branch, a, EdgeFlags::empty(), EdgeFlags::all())
                for v in self
                    .iter_adjacent(&branch, a, EdgeFlags::empty(), EdgeFlags::all())
                    .filter(|v| !v.flag.contains(EdgeFlags::PARENT_EDGE))
                {
                    debug!("v = {:?}", v);
                    if v.flag.contains(EdgeFlags::DELETED_EDGE) {
                        debug!("find_alive_descendants: {:?}", v);
                        find_alive.push(v.dest)
                    } else {
                        debug!("alive in for: {:?}", v.dest);
                        alive.push(v.dest)
                    }
                }
            }
        }
        debug!("end find_alive_descendants");
        first_is_alive
    }

    fn find_alive(
        &self,
        branch: &Branch,
        find_alive: &mut FindAlive,
        alive: &mut HashSet<Key<PatchId>>,
        file: &mut Option<Key<PatchId>>,
        current: Key<PatchId>,
        flag: EdgeFlags,
    ) {
        find_alive.clear();
        debug!("find_alive: {:?}", current);
        find_alive.push(current);
        while let Some(current) = find_alive.pop() {
            debug!("find_alive, current = {:?}", current);
            if self.is_alive(branch, current) {
                alive.insert(current.clone());
            } else {
                let flagmax = flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE;
                for e in self.iter_adjacent(branch, current, flag, flagmax)
                for e in self
                    .iter_adjacent(branch, current, flag, flagmax)
                    .take_while(|e| {
                        e.flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
                            == flagmax
                        e.flag | EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE == flagmax
                    })

1
2
3
4
5
6
7
8
9
10
11


14


17

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303

304
305
306
use {Result, Error};
use backend::*;
use patch::*;
use rand;
use record::{InodeUpdate, RecordState};
use std::collections::HashSet;
use std::path::Path;
use {Error, Result};
mod apply;
pub mod find_alive;
mod repair_deleted_context;
use diff;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::RepoRoot;

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
use fs_representation::{RepoRoot, in_repo_root};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
use output;
use output::ConflictingFile;

impl<U: Transaction, R> GenericTxn<U, R> {
    /// Return the patch id corresponding to `e`, or `internal` if `e==None`.
    pub fn internal_hash(&self, e: &Option<Hash>, internal: PatchId) -> PatchId {
        match *e {
            Some(Hash::None) => ROOT_PATCH_ID.clone(),
            Some(ref h) => self.get_internal(h.as_ref()).unwrap().to_owned(),
            None => internal.clone(),
        }
    }

    /// Fetch the internal key for this external key (or `internal` if
    /// `key.patch` is `None`).
    pub fn internal_key(&self, key: &Key<Option<Hash>>, internal: PatchId) -> Key<PatchId> {
        // debug!("internal_key: {:?} {:?}", key, internal);
        Key {
            patch: self.internal_hash(&key.patch, internal),
            line: key.line.clone(),
        }
    }

    pub fn internal_key_unwrap(&self, key: &Key<Option<Hash>>) -> Key<PatchId> {
        Key {
            patch: self.get_internal(key.patch.as_ref().unwrap().as_ref())
            patch: self
                .get_internal(key.patch.as_ref().unwrap().as_ref())
                .unwrap()
                .to_owned(),
            line: key.line.clone(),
        }
    }
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Assumes all patches have been downloaded. The third argument
    /// `remote_patches` needs to contain at least all the patches we
    /// want to apply, and the fourth one `local_patches` at least all
    /// the patches the other repository doesn't have.
    pub fn apply_patches<F, P: output::ToPrefixes>(
        &mut self,
        diff_algorithm: diff::Algorithm,
        branch: &mut Branch,
        r: &RepoRoot<impl AsRef<Path>>,
        remote_patches: &[(Hash, Patch)],
        partial_paths: P,
        mut f: F,
    ) -> Result<Vec<ConflictingFile>>
    where
        F: FnMut(usize, &Hash),
    {
        let (pending, local_pending) = {
            let mut record = RecordState::new();
            self.record(diff_algorithm, &mut record, branch, r, &in_repo_root())?;
            let (changes, local) = record.finish();
            let mut p = UnsignedPatch::empty();
            p.changes = changes
                .into_iter()
                .flat_map(|x| x.into_iter())
                .map(|x| self.globalize_change(x))
                .collect();
            p.dependencies = self.dependencies(&branch, p.changes.iter());
            (p.leave_unsigned(), local)
        };

        let mut new_patches_count = 0;
        for &(ref p, ref patch) in remote_patches.iter() {
            debug!("apply_patches: {:?}", p);
            self.apply_patches_rec(branch, remote_patches, p, patch, &mut new_patches_count)?;
            f(new_patches_count, p);
        }
        debug!("{} patches applied", new_patches_count);

        if new_patches_count > 0 {
            let partial_paths = partial_paths.to_prefixes(self, &branch);
            self.output_changes_file(&branch, r)?;
            debug!("output_repository");
            self.output_partials(branch.name.as_str(), &partial_paths)?;
            self.output_repository(branch, r, &partial_paths, &pending, &local_pending)
        } else {
            debug!("finished apply_patches");
            Ok(Vec::new())
        }
    }

    /// Lower-level applier. This function only applies patches as
    /// found in `patches_dir`, following dependencies recursively. It
    /// outputs neither the repository nor the "changes file" of the
    /// branch, necessary to exchange patches locally or over HTTP.
    pub fn apply_patches_rec(
        &mut self,
        branch: &mut Branch,
        patches: &[(Hash, Patch)],
        patch_hash: &Hash,
        patch: &Patch,
        new_patches_count: &mut usize,
    ) -> Result<()> {
        let internal = {
            if let Some(internal) = self.get_internal(patch_hash.as_ref()) {
                if self.get_patch(&branch.patches, internal).is_some() {
                    debug!(
                        "get_patch returned {:?}",
                        self.get_patch(&branch.patches, internal)
                    );
                    None
                } else {
                    // Doesn't have patch, but the patch is known in
                    // another branch
                    Some(internal.to_owned())
                }
            } else {
                // The patch is totally new to the repository.
                let internal = self.new_internal(patch_hash.as_ref());
                Some(internal)
            }
        };
        if let Some(internal) = internal {
            info!(
                "Now applying patch {:?} {:?} to branch {:?}",
                patch.name, patch_hash, branch
            );
            if patch.dependencies().is_empty() {
                info!("Patch {:?} has no dependencies", patch_hash);
            }
            for dep in patch.dependencies().iter() {
                info!("Applying dependency {:?}", dep);
                info!("dep hash {:?}", dep.to_base58());
                let is_applied = {
                    if let Some(dep_internal) = self.get_internal(dep.as_ref()) {
                        self.get_patch(&branch.patches, dep_internal).is_some()
                    } else {
                        false
                    }
                };
                if !is_applied {
                    info!("Not applied");
                    // If `patches` is sorted in topological order,
                    // this shouldn't happen, because the dependencies
                    // have been applied before.
                    if let Some(&(_, ref patch)) = patches.iter().find(|&&(ref a, _)| a == dep) {
                        self.apply_patches_rec(branch, patches, &dep, patch, new_patches_count)?;
                    } else {
                        error!("Dependency not found");
                        return Err(Error::MissingDependency(dep.to_owned()))
                        return Err(Error::MissingDependency(dep.to_owned()));
                    }
                } else {
                    info!("Already applied");
                }
                let dep_internal = self.get_internal(dep.as_ref()).unwrap().to_owned();
                self.put_revdep(dep_internal, internal)?;
                self.put_dep(internal, dep_internal)?;
            }

            // Sanakirja doesn't let us insert the same pair twice.
            self.register_patch(internal, patch_hash, patch)?;

            let now = branch.apply_counter;
            branch.apply_counter += 1;
            self.apply(branch, &patch, internal, now)?;

            *new_patches_count += 1;

            Ok(())
        } else {
            info!("Patch {:?} has already been applied", patch_hash);
            Ok(())
        }
    }

    /// Apply a patch from a local record: register it, give it a hash, and then apply.
    pub fn apply_local_patch(
        &mut self,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        hash: &Hash,
        patch: &Patch,
        inode_updates: &HashSet<InodeUpdate>,
        is_pending: bool,
    ) -> Result<PatchId> {
        info!("registering a patch with {} changes", patch.changes().len());
        info!("dependencies: {:?}", patch.dependencies());

        // let child_patch = patch.clone();

        let internal: PatchId = self.new_internal(hash.as_ref());

        for dep in patch.dependencies().iter() {
            let dep_internal = self.get_internal(dep.as_ref()).unwrap().to_owned();
            self.put_revdep(dep_internal, internal)?;
            self.put_dep(internal, dep_internal)?;
        }
        self.register_patch(internal, hash, patch)?;

        info!("Applying local patch");
        let now = branch.apply_counter;
        self.apply(branch, &patch, internal, now)?;
        debug!("synchronizing tree: {:?}", inode_updates);
        for update in inode_updates.iter() {
            self.update_inode(&branch, internal, update)?;
        }
        debug!("committing branch");
        if !is_pending {
            debug!("not pending, adding to changes");
            branch.apply_counter += 1;
            self.output_changes_file(&branch, working_copy)?;
        }
        trace!("done apply_local_patch");
        Ok(internal)
    }

    fn register_patch(&mut self, internal: PatchId, hash: &Hash, patch: &Patch) -> Result<()> {
        self.put_external(internal, hash.as_ref())?;
        self.put_internal(hash.as_ref(), internal)?;
        for hunk in patch.changes() {
            let inode = match *hunk {
                Change::NewNodes { ref inode, .. } => inode,
                Change::NewEdges { ref inode, .. } => inode,
            };
            let inode = self.internal_key(inode, internal);
            self.put_touched_file(inode, internal)?;
        }
        Ok(())
    }

    /// Update the inodes/revinodes, tree/revtrees databases with the
    /// patch we just applied. This is because files don't really get
    /// moved or deleted before we apply the patch, they are just
    /// "marked as moved/deleted". This function does the actual
    /// update.
    fn update_inode(
        &mut self,
        branch: &Branch,
        internal: PatchId,
        update: &InodeUpdate,
    ) -> Result<()> {
        match *update {
            InodeUpdate::Add {
                ref line,
                ref meta,
                inode,
            } => {
                let key = FileHeader {
                    metadata: *meta,
                    status: FileStatus::Ok,
                    key: Key {
                        patch: internal.clone(),
                        line: line.clone(),
                    },
                };
                // If this file addition was actually recorded.
                if self.get_nodes(&branch, key.key, None).is_some() {
                    debug!("it's in here!: {:?} {:?}", key, inode);
                    self.replace_inodes(inode, key)?;
                    self.replace_revinodes(key.key, inode)?;
                }
            }
            InodeUpdate::Deleted { inode } => {
                // If this change was actually applied.
                debug!("deleted: {:?}", inode);
                let header = self.get_inodes(inode).unwrap().clone();
                debug!("deleted header: {:?}", header);
                let flag =
                    EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE | EdgeFlags::DELETED_EDGE;
                if self.iter_adjacent(&branch, header.key, flag, flag)
                if self
                    .iter_adjacent(&branch, header.key, flag, flag)
                    .any(|v| v.introduced_by == internal)
                {
                    self.del_inodes(inode, Some(header))?;
                    self.del_revinodes(header.key, Some(inode))?;

                    // We might have killed the parent in the same
                    // update.
                    if let Some(parent) = self.get_revtree(inode).map(|x| x.to_owned()) {
                        let parent = parent.as_file_id();
                        self.del_tree(&parent, None)?;
                        self.del_revtree(inode, None)?;
                    }
                }
            }
            InodeUpdate::Moved { inode, metadata } => {
                // If this change was actually applied.
                debug!("moved: {:?}", inode);
                let mut header = self.get_inodes(inode).unwrap().clone();
                debug!("moved header: {:?}", header);
                let flag = EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE;
                if self.iter_adjacent(&branch, header.key, flag, flag)
                if self
                    .iter_adjacent(&branch, header.key, flag, flag)
                    .any(|v| v.introduced_by == internal)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259



260
261
262
263
264
265
266
267





268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353



354
355
356
357
358
359
360
use Result;
use apply::find_alive::*;
use backend::*;
use patch::*;
use rand;
use std::collections::HashSet;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    /// Deleted contexts are conflicts. Reconnect the graph by
    /// inserting pseudo-edges alongside deleted edges.
    pub(in apply) fn repair_deleted_contexts(
        &mut self,
        branch: &mut Branch,
        patch: &Patch,
        patch_id: PatchId,
    ) -> Result<()> {
        let mut alive = Vec::new();
        let mut files = Vec::new();
        let mut find_alive = FindAlive::new();
        // repair_missing_context adds all zombie edges needed.
        for ch in patch.changes().iter() {
            match *ch {
                Change::NewEdges {
                    flag, ref edges, ..
                } => {
                    if !flag.contains(EdgeFlags::DELETED_EDGE) {
                        self.repair_context_nondeleted(
                            branch,
                            patch_id,
                            edges,
                            flag,
                            &mut find_alive,
                            &mut alive,
                            &mut files,
                        )?
                    } else {
                        self.repair_context_deleted(
                            branch,
                            patch_id,
                            edges,
                            flag,
                            &mut find_alive,
                            &mut alive,
                            &mut files,
                            patch.dependencies(),
                        )?
                    }
                }
                Change::NewNodes {
                    ref up_context,
                    ref down_context,
                    flag,
                    ..
                } => {
                    debug!("repairing missing contexts for newnodes");
                    // If not all lines in `up_context` are alive, this
                    // is a conflict, repair.
                    for c in up_context {
                        let c = self.internal_key(c, patch_id);

                        // Is the up context deleted by another patch, and the
                        // deletion was not also confirmed by this patch?

                        let up_context_deleted = self.was_context_deleted(branch, patch_id, c);
                        debug!(
                            "up_context_deleted: patch_id = {:?} context = {:?} up_context_deleted = {:?}",
                            patch_id, c, up_context_deleted
                        );
                        if up_context_deleted {
                            self.repair_missing_up_context(
                                &mut find_alive,
                                branch,
                                c,
                                flag,
                                &mut alive,
                                &mut files,
                                &[patch_id],
                            )?
                        }
                    }
                    // If not all lines in `down_context` are alive,
                    // this is a conflict, repair.
                    for c in down_context {
                        let c = self.internal_key(c, patch_id);
                        let down_context_deleted = self.was_context_deleted(branch, patch_id, c);
                        debug!("down_context_deleted: {:?}", down_context_deleted);
                        if down_context_deleted {
                            self.repair_missing_down_context(
                                &mut find_alive,
                                branch,
                                c,
                                &mut alive,
                                &[patch_id],
                            )?
                        }
                    }
                    debug!("apply: newnodes, done");
                }
            }
        }
        Ok(())
    }

    /// This function handles the case where we're adding an alive
    /// edge, and the origin or destination (or both) of this edge is
    /// dead in the graph.
    fn repair_context_nondeleted(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flag: EdgeFlags,
        find_alive: &mut FindAlive,
        alive: &mut Vec<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
    ) -> Result<()> {
        debug!("repairing missing contexts for non-deleted edges");
        for e in edges {
            let (up_context, down_context) = if flag.contains(EdgeFlags::PARENT_EDGE) {
                (
                    self.internal_key(&e.to, patch_id),
                    self.internal_key(&e.from, patch_id),
                )
            } else {
                (
                    self.internal_key(&e.from, patch_id),
                    self.internal_key(&e.to, patch_id),
                )
            };

            if self.was_context_deleted(branch, patch_id, up_context) {
                self.repair_missing_up_context(
                    find_alive,
                    branch,
                    up_context,
                    flag,
                    alive,
                    files,
                    &[patch_id],
                )?
            }
            if self.was_context_deleted(branch, patch_id, down_context) {
                self.repair_missing_down_context(
                    find_alive,
                    branch,
                    down_context,
                    alive,
                    &[patch_id],
                )?
            }
        }
        Ok(())
    }

    /// Handle the case where we're inserting a deleted edge, but the
    /// source or target (or both) does not know about (at least one
    /// of) its adjacent edges.
    fn repair_context_deleted(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flag: EdgeFlags,
        find_alive: &mut FindAlive,
        alive: &mut Vec<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
        dependencies: &HashSet<Hash>,
    ) -> Result<()> {
        debug!("repairing missing contexts for deleted edges");
        debug_assert!(flag.contains(EdgeFlags::DELETED_EDGE));

        for e in edges {
            let dest = if flag.contains(EdgeFlags::PARENT_EDGE) {
                self.internal_key(&e.from, patch_id)
            } else {
                self.internal_key(&e.to, patch_id)
            };

            debug!("dest = {:?}", dest);

            // If there is at least one unknown child, repair the
            // context.
            let mut unknown_children = Vec::new();
            for (k, v) in self.iter_nodes(branch, Some((dest, None))) {
                if k != dest
                    || v.flag | EdgeFlags::FOLDER_EDGE
                        > EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE | EdgeFlags::FOLDER_EDGE
                {
                    break;
                }
                if v.introduced_by != patch_id && {
                    let ext = self.external_hash(v.introduced_by).to_owned();
                    !dependencies.contains(&ext)
                } {
                    unknown_children.push(v.introduced_by)
                }

                debug!(
                    "child is_unknown({}): {:?} {:?}",
                    line!(),
                    v,
                    unknown_children
                );
            }

            if !unknown_children.is_empty() {
                self.repair_missing_up_context(
                    find_alive,
                    branch,
                    dest,
                    flag,
                    alive,
                    files,
                    &unknown_children,
                )?;
            }

            // If there is at least one alive parent we don't know
            // about, repair.
            let e = Edge::zero(EdgeFlags::PARENT_EDGE);
            unknown_children.clear();
            let mut unknown_parents = unknown_children;
            for (k, v) in self.iter_nodes(branch, Some((dest, Some(e)))) {
                if k != dest
                    || v.flag | EdgeFlags::FOLDER_EDGE
                        != EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE
                {
                    break;
                }
                if v.introduced_by != patch_id && {
                    let ext = self.external_hash(v.introduced_by).to_owned();
                    !dependencies.contains(&ext)
                } {
                    unknown_parents.push(v.introduced_by)
                }
                debug!(
                    "parent is_unknown({}): {:?} {:?}",
                    line!(),
                    v,
                    unknown_parents
                );
            }

            if !unknown_parents.is_empty() {
                self.repair_missing_down_context(find_alive, branch, dest, alive, &unknown_parents)?
            }
        }
        Ok(())
    }

    /// Was `context` deleted by patches other than `patch_id`, and
    /// additionally not deleted by `patch_id`?
    fn was_context_deleted(
        &self,
        branch: &Branch,
        patch_id: PatchId,
        context: Key<PatchId>,
    ) -> bool {
        let mut context_deleted = false;
        for v in self.iter_adjacent(branch, context,
                                    EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                                    EdgeFlags::all())
        for v in self
            .iter_adjacent(
                branch,
                context,
                EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                EdgeFlags::all(),
            )
            .take_while(|v| {
                v.flag.contains(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE)
            }) {
            }) {
            }) {
            }) {
                v.flag
                    .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE)
            })
        {
            debug!("was_context_deleted {:?}", v);
            if v.introduced_by == patch_id {
                return false;
            } else {
                context_deleted = true
            }
        }
        context_deleted
    }

    /// Checks whether a line in the up context of a hunk is marked
    /// deleted, and if so, reconnect the alive parts of the graph,
    /// marking this situation as a conflict.
    pub(crate) fn repair_missing_up_context(
        &mut self,
        find_alive: &mut FindAlive,
        branch: &mut Branch,
        context: Key<PatchId>,
        flag: EdgeFlags,
        alive: &mut Vec<Key<PatchId>>,
        files: &mut Vec<(Key<PatchId>, Edge)>,
        unknown_patches: &[PatchId],
    ) -> Result<()> {
        // The up context needs a repair iff it's deleted.

        // The up context was deleted, so the alive
        // component of the graph might be disconnected, and needs
        // a repair.

        // Follow all paths upwards (in the direction of
        // DELETED_EDGE|PARENT_EDGE) until finding an alive
        // ancestor, and turn them all into zombie edges.
        find_alive.clear();
        find_alive.push(context);
        alive.clear();
        files.clear();
        let mut first_file = None;
        self.find_alive_ancestors(find_alive, branch, alive, &mut first_file, files);
        debug!("files {:?} alive {:?}", files, alive);
        if !flag.contains(EdgeFlags::FOLDER_EDGE) {
            for ancestor in alive.drain(..).chain(first_file.into_iter()) {
                let mut edge = Edge::zero(EdgeFlags::PSEUDO_EDGE | EdgeFlags::PARENT_EDGE);
                if ancestor != context {
                    edge.dest = ancestor;
                    for patch_id in unknown_patches {
                        edge.introduced_by = patch_id.clone();
                        debug!("repairing up context: {:?} {:?}", context, edge);
                        self.put_edge_both_dirs(branch, context, edge)?;
                    }
                }
            }
        }
        for (key, mut edge) in files.drain(..) {
            if !self.is_connected(branch, key, edge.dest) {
                edge.flag =
                    EdgeFlags::PSEUDO_EDGE | EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE;
                for patch_id in unknown_patches {
                    edge.introduced_by = patch_id.clone();
                    debug!("file: repairing up context: {:?} {:?}", key, edge);
                    self.put_edge_both_dirs(branch, key, edge)?;
                }
            }
        }
        Ok(())
    }

    /// Checks whether a line in the down context of a hunk is marked
    /// deleted, and if so, reconnect the alive parts of the graph,
    /// marking this situation as a conflict.
    fn repair_missing_down_context(
        &mut self,
        find_alive: &mut FindAlive,
        branch: &mut Branch,
        context: Key<PatchId>,
        alive: &mut Vec<Key<PatchId>>,
        unknown_patches: &[PatchId],
    ) -> Result<()> {
        // Find all alive descendants, as well as the paths
        // leading to them, and double these edges with
        // pseudo-edges everywhere.
        find_alive.clear();
        alive.clear();
        for v in self.iter_adjacent(branch, context, EdgeFlags::DELETED_EDGE, EdgeFlags::DELETED_EDGE)
        {
        {
        for v in self.iter_adjacent(
            branch,
            context,
            EdgeFlags::DELETED_EDGE,
            EdgeFlags::DELETED_EDGE,
        ) {
            find_alive.push(v.dest)
2
3
4
5

use super::patch_id::*;
use byteorder::{ByteOrder, LittleEndian};
use sanakirja::*;
use std;
use byteorder::{LittleEndian, ByteOrder};
18
19

20
21
22
23
24
25
        let mut m = FileMetadata(0);
        m.set_permissions(perm as u16);
        if is_dir { m.set_dir() } else { m.unset_dir() }
        if is_dir {
            m.set_dir()
        } else {
            m.unset_dir()
        }
        m
293
294






295
296
297
298
299
300

    fn onpage_size(&self) -> u16 {
        1 +
            (match *self {
                 UnsafeHash::Sha512(_) => 64,
                 UnsafeHash::None => 0,
                 UnsafeHash::Recursive { len, .. } => len as u16,
             })
        1 + (match *self {
            UnsafeHash::Sha512(_) => 64,
            UnsafeHash::None => 0,
            UnsafeHash::Recursive { len, .. } => len as u16,
        })
    }
64
65

66
67
            Some(Key {
                patch: PatchId(LittleEndian::read_u64(&s[..8])),
                line: LineId(LittleEndian::read_u64(&s[8..]))
                line: LineId(LittleEndian::read_u64(&s[8..])),
            })
1220
1221

1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236

1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294

1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307

1308
1309
1310
                    k.line.to_hex(),
                    cont.replace("\n", "")
                ).unwrap();
                )
                .unwrap();
                cur = k.clone();
            }
            debug!("debug: {:?}", v);
            let flag = v.flag.bits();
            if !(exclude_parents && v.flag.contains(EdgeFlags::PARENT_EDGE)) {
                write!(
                    w,
                    "n_{}->n_{}[{},label=\"{} {}\"];\n",
                    k.to_hex(),
                    &v.dest.to_hex(),
                    styles[(flag & 0xff) as usize],
                    flag,
                    v.introduced_by.to_base58()
                ).unwrap();
                )
                .unwrap();
            }
        }
        w.write(b"}\n").unwrap();
    }

    /// Dump the graph of a branch into a writer, in dot format.
    pub fn debug_folders<W>(&self, branch_name: &str, w: &mut W)
    where
        W: std::io::Write,
    {
        debug!("debugging branch {:?}", branch_name);
        let mut styles = Vec::with_capacity(16);
        for i in 0..32 {
            let flag = EdgeFlags::from_bits(i as u8).unwrap();
            styles.push(
                ("color=").to_string()
                    + ["red", "blue", "orange", "green", "black"][(i >> 1) & 3]
                    + if flag.contains(EdgeFlags::DELETED_EDGE) {
                        ", style=dashed"
                    } else {
                        ""
                    }
                    + if flag.contains(EdgeFlags::PSEUDO_EDGE) {
                        ", style=dotted"
                    } else {
                        ""
                    },
            )
        }
        w.write(b"digraph{\n").unwrap();
        let branch = self.get_branch(branch_name).unwrap();

        let mut nodes = vec![ROOT_KEY];
        while let Some(k) = nodes.pop() {
            let cont = if let Some(cont) = self.get_contents(k) {
                let cont = cont.into_cow();
                let cont = &cont[..std::cmp::min(50, cont.len())];
                if cont.len() > 2 {
                    let (a, b) = cont.split_at(2);
                    let cont = format!("{:?}", std::str::from_utf8(b).unwrap());
                    let cont = &cont[1..(cont.len() - 1)];
                    format!("{} {}", hex::encode(a), cont)
                } else {
                    format!("{}", hex::encode(cont))
                }
            } else {
                "".to_string()
            };
            // remove the leading and trailing '"'.
            write!(
                w,
                "n_{}[label=\"{}.{}: {}\"];\n",
                k.to_hex(),
                k.patch.to_base58(),
                k.line.to_hex(),
                cont.replace("\n", "")
            ).unwrap();
            )
            .unwrap();

            for child in self.iter_adjacent(&branch, k, EdgeFlags::empty(), EdgeFlags::all()) {
                let flag = child.flag.bits();
                write!(
                    w,
                    "n_{}->n_{}[{},label=\"{} {}\"];\n",
                    k.to_hex(),
                    &child.dest.to_hex(),
                    styles[(flag & 0xff) as usize],
                    flag,
                    child.introduced_by.to_base58()
                ).unwrap();
                )
                .unwrap();
                if child.flag.contains(EdgeFlags::FOLDER_EDGE)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335







336
337
338
339







340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373


374
375
376
377

378

379
380
381
382
383
384
385
386

387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431

432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622

623
624
625
626
627
628
629
630
631

632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702

703
704
705
706
707
708
709
710
711

712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815

816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
                _ => unreachable!(),
use backend::*;
use graph;
use graph::Graph;
use patch;
use patch::{Change, ChangeContext, Record};
use {GenericTxn, Result};
use fs_representation::RepoPath;

use conflict;
use diffs;
use rand;
use record::is_text;
use sanakirja::value::Value;
use std;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::rc::Rc;

#[derive(Debug, PartialEq, Eq, Hash)]
struct Line<'a>(&'a [u8]);

struct Contents<'a, T: Transaction + 'a> {
    key: Key<PatchId>,
    contents: Value<'a, T>,
    before_conflict_line: bool,
}

impl<'a, T: Transaction + 'a> std::fmt::Debug for Contents<'a, T> {
    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
        write!(
            f,
            "Contents {{ key: {:?}, before_conflict_line: {:?}, {:?} }}",
            self.key,
            self.before_conflict_line,
            self.contents.into_cow()
        )
    }
}

#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
enum Status {
    Begin,
    Next,
    End,
}

#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Algorithm {
    Myers,
    Patience,
}

impl Default for Algorithm {
    fn default() -> Self {
        Algorithm::Myers
    }
}

struct Diff<'a, 'b, T: Transaction + 'a> {
    conflicts_a: Vec<(usize, usize, bool)>,
    lines_a: Vec<Key<PatchId>>,
    contents_a: Vec<Contents<'a, T>>,
    lines_b: &'b [Line<'b>],
    conflict_starts: HashMap<usize, usize>,
    conflict_ends: HashMap<usize, usize>,
    conflict_down_contexts: HashMap<usize, Vec<LineId>>,
    status: HashMap<usize, Status>,
    conflict_stack: Vec<(usize, usize, bool)>,
    conflict_counter: usize,
    solved_conflicts: HashSet<usize>,
}

impl<'a, 'b, T: Transaction + 'a> graph::LineBuffer<'a, T> for Diff<'a, 'b, T> {
    fn output_line(&mut self, k: &Key<PatchId>, c: Value<'a, T>) -> Result<()> {
        if c.len() != 0 {
            self.conflicts_a.push(*self.conflict_stack.last().unwrap());
            self.lines_a.push(*k);
            self.contents_a.push(Contents {
                key: *k,
                contents: c,
                before_conflict_line: false,
            });
        }
        Ok(())
    }

    fn begin_conflict(&mut self) -> Result<()> {
        let len = self.contents_a.len();
        self.conflict_counter += 1;
        self.conflict_stack.push((self.conflict_counter, 0, false));
        self.conflict_starts.insert(self.conflict_counter, len);
        self.status.insert(len, Status::Begin);
        self.output_conflict_marker(conflict::START_MARKER)
    }

    fn begin_zombie_conflict(&mut self) -> Result<()> {
        let len = self.contents_a.len();
        self.conflict_counter += 1;
        self.conflict_stack.push((self.conflict_counter, 0, true));
        self.conflict_starts.insert(self.conflict_counter, len);
        self.status.insert(len, Status::Begin);
        self.output_conflict_marker(conflict::START_MARKER)
    }

    fn end_conflict(&mut self) -> Result<()> {
        let len = self.contents_a.len();
        self.output_conflict_marker(conflict::END_MARKER)?;
        self.status.insert(len, Status::End);
        let (conflict, _, _) = self.conflict_stack.pop().unwrap();
        self.conflict_ends.insert(conflict, len);
        Ok(())
    }

    fn conflict_next(&mut self) -> Result<()> {
        self.status.insert(self.contents_a.len(), Status::Next);
        if let Some((_, ref mut side, _)) = self.conflict_stack.last_mut() {
            *side += 1
        }
        self.output_conflict_marker(conflict::SEPARATOR)
    }

    fn output_conflict_marker(&mut self, marker: &'a str) -> Result<()> {
        let mut ends_with_newline = true;
        if self.contents_a.len() > 1 {
            for chunk in self.contents_a[self.contents_a.len() - 1].contents.clone() {
                debug!(
                    "chunk {:?} {:?}",
                    std::str::from_utf8(chunk),
                    chunk.ends_with(b"\n")
                );
                ends_with_newline = chunk.ends_with(b"\n")
            }
        };
        debug!("ends_with_newline {:?}", ends_with_newline);
        let (conflict, _, is_zombie) = *self.conflict_stack.last().unwrap();
        self.conflicts_a.push((conflict, 0, is_zombie));
        if let Some(l) = self.contents_a.last_mut() {
            l.before_conflict_line = true
        }
        if ends_with_newline {
            self.lines_a.push(ROOT_KEY);
            self.contents_a.push(Contents {
                key: ROOT_KEY,
                contents: Value::from_slice(&marker.as_bytes()[1..]),
                before_conflict_line: false,
            })
        } else {
            self.lines_a.push(ROOT_KEY);
            self.contents_a.push(Contents {
                key: ROOT_KEY,
                contents: Value::from_slice(marker.as_bytes()),
                before_conflict_line: false,
            });
        }
        Ok(())
    }
}

impl<'a, 'b, T: Transaction + 'b> PartialEq<Contents<'b, T>> for Line<'a> {
    fn eq(&self, b: &Contents<T>) -> bool {
        debug!("comparing {:?} {:?}", self, b);
        if self.0.last() == Some(&0xa) {
            if b.key.is_root() {
                // Conflict
                let b = b.contents.into_cow();
                if b[0] == 0xa {
                    (&self.0[..]).eq(&b[1..])
                } else {
                    (&self.0[..]).eq(&b[..])
                }
            } else if b.before_conflict_line {
                // We might have added an artificial line at the end
                // of a when we output it. Fix this.
                if self.0.len() as u64 - 1 == b.contents.len() {
                    (&self.0[..self.0.len() - 1]).eq(&b.contents)
                } else {
                    self.0.eq(&b.contents)
                }
            } else {
                self.0.eq(&b.contents)
            }
        } else {
            self.0.eq(&b.contents)
        }
    }
}

impl<'b, T: Transaction + 'b> PartialEq<Contents<'b, T>> for Contents<'b, T> {
    fn eq(&self, b: &Contents<T>) -> bool {
        self.contents == b.contents
    }
}
impl<'b, T: Transaction + 'b> Eq for Contents<'b, T> {}
impl<'b, T: Transaction + 'b> std::hash::Hash for Contents<'b, T> {
    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
        self.contents.hash(state)
    }
}

struct Replace {
    old: usize,
    old_len: usize,
    new: usize,
    new_len: usize,
}
struct D {
    r: Vec<Replace>,
    deleted: HashMap<usize, bool>,
}
impl diffs::Diff for D {
    type Error = ();
    fn delete(&mut self, old: usize, old_len: usize) -> std::result::Result<(), ()> {
        for i in old..old + old_len {
            self.deleted.insert(i, false);
        }
        self.r.push(Replace {
            old,
            old_len,
            new: 0,
            new_len: 0,
        });
        Ok(())
    }
    fn insert(&mut self, old: usize, new: usize, new_len: usize) -> std::result::Result<(), ()> {
        self.r.push(Replace {
            old,
            old_len: 0,
            new,
            new_len,
        });
        Ok(())
    }
    fn replace(
        &mut self,
        old: usize,
        old_len: usize,
        new: usize,
        new_len: usize,
    ) -> std::result::Result<(), ()> {
        for i in old..old + old_len {
            self.deleted.insert(i, true);
        }
        self.r.push(Replace {
            old,
            old_len,
            new,
            new_len,
        });
        Ok(())
    }
}

struct DiffProducer<'a> {
    line_num: &'a mut LineId,
    inode: Key<Option<Hash>>,
    inode_internal: Key<Option<PatchId>>,
    file: Rc<RepoPath<PathBuf>>,
    conflict_insertions: HashMap<usize, LineId>,
}

impl<A: Transaction, R: rand::Rng> GenericTxn<A, R> {
    pub fn diff<'a>(
        &'a self,
        algorithm: Algorithm,
        inode: Key<Option<Hash>>,
        branch: &'a Branch,
        file: Rc<RepoPath<PathBuf>>,
        line_num: &mut LineId,
        actions: &mut Vec<Record<ChangeContext<PatchId>>>,
        redundant: &mut Vec<(Key<PatchId>, Edge)>,
        a: &mut Graph,
        lines_b: &[u8],
    ) -> Result<()> {
        // First outputting A
        let inode_internal = Key {
            patch: Some(
                self.get_internal(inode.patch.as_ref().unwrap().as_ref())
                    .unwrap(),
            ),
            line: inode.line,
        };
        let mut lines_b_ = Vec::new();
        let mut d = Diff {
            lines_a: Vec::with_capacity(2 * lines_b.len()),
            contents_a: Vec::with_capacity(2 * lines_b.len()),
            conflicts_a: Vec::with_capacity(2 * lines_b.len()),
            lines_b: &[],
            conflict_starts: HashMap::new(),
            conflict_ends: HashMap::new(),
            conflict_down_contexts: HashMap::new(),
            status: HashMap::new(),
            conflict_counter: 0,
            conflict_stack: vec![(0, 0, false)],
            solved_conflicts: HashSet::new(),
        };
        self.output_file(branch, &mut d, a, redundant)?;

        // Now cutting B
        if is_text(&lines_b) {
            let mut i = 0;
            let mut j = 0;

            while j < lines_b.len() {
                if lines_b[j] == 0xa {
                    lines_b_.push(Line(&lines_b[i..j + 1]));
                    i = j + 1
                }
                j += 1;
            }
            if i < j {
                lines_b_.push(Line(&lines_b[i..]))
            }
        } else {
            lines_b_.push(Line(&lines_b[..]))
        };
        d.lines_b = &lines_b_;

        // And finally diffing
        let contents_a = std::mem::replace(&mut d.contents_a, Vec::new());
        for line in d.lines_a.iter() {
            debug!("line: {:?}", line);
        }

        trace!(
            "contents_a {:?}",
            contents_a.iter().map(|x| &x.contents).collect::<Vec<_>>()
        );
        trace!("contents_b {:?}", lines_b_);
        let mut dd = diffs::Replace::new(D {
            r: Vec::with_capacity(d.lines_a.len() + lines_b_.len()),
            deleted: HashMap::new(),
        });
        match algorithm {
            Algorithm::Patience => {
                let a = contents_a.as_slice();
                diffs::patience::diff(&mut dd,
                                      a,
                                      0,
                                      a.len(),
                                      &lines_b_,
                                      0,
                                      lines_b_.len()).unwrap()
                diffs::patience::diff(&mut dd, a, 0, a.len(), &lines_b_, 0, lines_b_.len()).unwrap()
            }
            Algorithm::Myers => {
                let a = contents_a.as_slice();
                diffs::myers::diff(&mut dd,
                                   a,
                                   0,
                                   a.len(),
                                   &lines_b_,
                                   0,
                                   lines_b_.len()).unwrap()
                diffs::myers::diff(&mut dd, a, 0, a.len(), &lines_b_, 0, lines_b_.len()).unwrap()
            }
        }
        let dd = dd.into_inner();
        let mut diffprod = DiffProducer {
            line_num,
            inode,
            inode_internal,
            file,
            conflict_insertions: HashMap::new(),
        };
        for r in 0..dd.r.len() {
            if dd.r[r].new_len == 0 {
                let old = dd.r[r].old;
                let len = dd.r[r].old_len;
                self.diff_delete(branch, &mut diffprod, actions, &mut d, old, len)
            } else {
                self.diff_replace(branch, &mut diffprod, actions, &mut d, &dd, r)
            }
        }
        debug!("Diff ended");
        Ok(())
    }

    /// Insert epsilon-edges if we're deleting conflict markers.
    fn order_conflict_sides(
        &self,
        branch: &Branch,
        dp: &mut DiffProducer,
        actions: &mut Vec<Record<ChangeContext<PatchId>>>,
        diff: &mut Diff<A>,
        old: usize,
        len: usize,
    ) {


        if !diff.lines_a[old - 1].is_root()
            && !diff.lines_a[old + len].is_root()
            && !self.is_connected(branch, diff.lines_a[old - 1], diff.lines_a[old + len])
        {

            let mut is_conflict = false;
            for i in old .. (old+len) {
            for i in old..(old + len) {
                match diff.status.get(&i) {
                    Some(&Status::Next) => is_conflict = true,
                    Some(_) => return, // we're not reordering a conflict.
                    _ => {}
                }
            }
            if !is_conflict {
                return
                return;
            }
            let from_patch = diff.lines_a[old - 1];
            let to_patch = diff.lines_a[old + len];
            debug!("Conflict reordering {:?} {:?}", from_patch, to_patch);
            actions.push(Record::Change {
                change: Change::NewNodes {
                    up_context: vec![Key {
                        patch: Some(from_patch.patch),
                        line: from_patch.line,
                    }],
                    down_context: vec![Key {
                        patch: Some(to_patch.patch),
                        line: to_patch.line,
                    }],
                    line_num: *dp.line_num,
                    flag: EdgeFlags::EPSILON_EDGE,
                    nodes: vec![Vec::new()],
                    inode: dp.inode.clone(),
                },
                replacement: None,
                old_line: old,
                new_line: 0,
                file: dp.file.clone(),
            });
            *dp.line_num += 1
        }
    }

    /// Actually delete the lines that need to be deleted.
    fn delete_lines(
        &self,
        branch: &Branch,
        dp: &mut DiffProducer,
        actions: &mut Vec<Record<ChangeContext<PatchId>>>,
        diff: &mut Diff<A>,
        old: usize,
        len: usize,
    ) {
        let mut edges = Vec::new();
        let flag0 = EdgeFlags::PARENT_EDGE;
        let flag1 = EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE;

        debug!("Delete_lines {:?} {:?}", old, len);
        for (i, &key) in diff.lines_a[old..(old + len)].iter().enumerate() {
            debug!("status of {:?}: {:?}", old+i, diff.status.get(&(old + i)));
            debug!("status of {:?}: {:?}", old + i, diff.status.get(&(old + i)));
            match diff.status.get(&(old + i)) {
                Some(&Status::Begin) => {
                    diff.solved_conflicts.insert(diff.conflicts_a[old + i].0);
                    continue;
                }
                None => {}
                _ => continue,
            }
            let ext_hash = self.external_hash(key.patch);
            for v in self
                .iter_adjacent(&branch, key, flag0, flag1)
                .filter(|v| !v.flag.contains(EdgeFlags::EPSILON_EDGE))
            {
                if v.flag.contains(EdgeFlags::PSEUDO_EDGE) {
                    let edge = Edge {
                        flag: EdgeFlags::DELETED_EDGE | EdgeFlags::PARENT_EDGE,
                        introduced_by: ROOT_PATCH_ID,
                        dest: ROOT_KEY
                    };
                    let mut edges = self.iter_nodes(&branch, Some((key, Some(edge))));
                    match edges.next() {
                        Some((k_, v_)) if k_ == key && v_.flag == edge.flag => {
                            // `key` has at least one deleted edge
                            // pointing to it, so key is a zombie. We
                            // need to confirm that it is actually
                            // deleted by deleting edge `v`.
                            debug!("zombie edge: {:?} {:?}", key, v);
                        }
                        x => {
                            // The target of this edge is not deleted,
                            // and not a zombie vertex.  We can ignore
                            // this pseudo-edge, as deleting all alive
                            // edges to the target will have the same
                            // effect.
                            debug!("not a zombie: {:?}", x);
                            continue;
                        }
                    }
                };
                debug!("delete: {:?} {:?}", key, v);
                debug!("actually deleting");
                edges.push(patch::NewEdge {
                    from: Key {
                        patch: Some(ext_hash.to_owned()),
                        line: key.line.clone(),
                    },
                    to: Key {
                        patch: Some(self.external_hash(v.dest.patch).to_owned()),
                        line: v.dest.line.clone(),
                    },
                    introduced_by: Some(self.external_hash(v.introduced_by).to_owned()),
                });
            }
        }

        if !edges.is_empty() {
            actions.push(Record::Change {
                change: Change::NewEdges {
                    edges,
                    previous: EdgeFlags::PARENT_EDGE,
                    flag: EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    inode: dp.inode.clone(),
                },
                replacement: None,
                old_line: old,
                new_line: 0,
                file: dp.file.clone(),
            })
        }
    }

    fn resurrect_zombies(
        &self,
        branch: &Branch,
        dp: &mut DiffProducer,
        actions: &mut Vec<Record<ChangeContext<PatchId>>>,
        diff: &mut Diff<A>,
        old: usize,
        len: usize,
    ) {
        // If the conflict markers of zombie lines have been deleted,
        // resurect the zombies.
        let mut i = old + len;
        let mut resurrections = Vec::new();
        let mut dead_parents = Vec::new();
        debug!("Resurrect_zombies {:?} {:?}", old, len);
        while i < diff.lines_a.len() {
            let key = diff.lines_a[i];
            let (conflict, _, is_zombie) = diff.conflicts_a[i];
            debug!("resurrecting {:?} {:?}", key, is_zombie);
            debug!("solved ? {:?} {:?}", conflict, diff.solved_conflicts);
            if is_zombie && !key.is_root() && diff.solved_conflicts.contains(&conflict) {
                // Resurect the zombie, i.e. remove its deleted edges.
                let flag0 = EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE;
                let ext_hash = self.external_hash(key.patch);
                for v in self.iter_adjacent(&branch, key, flag0, flag0) {
                    debug!("resurrecting {:?}: {:?}", key, v);
                    let v_hash = self.external_hash(v.dest.patch).to_owned();
                    resurrections.push(patch::NewEdge {
                        from: Key {
                            patch: Some(ext_hash.to_owned()),
                            line: key.line.clone(),
                        },
                        to: Key {
                            patch: Some(v_hash.clone()),
                            line: v.dest.line.clone(),
                        },
                        introduced_by: Some(self.external_hash(v.introduced_by).to_owned()),
                    });
                    // We must re-kill the parents to make sure they
                    // don't get context-fixed when we apply this
                    // patch.
                    for w in self.iter_adjacent(&branch, v.dest, flag0, flag0) {
                        dead_parents.push(patch::NewEdge {
                            from: Key {
                                patch: Some(v_hash.clone()),
                                line: v.dest.line.clone(),
                            },
                            to: Key {
                                patch: Some(self.external_hash(w.dest.patch).to_owned()),
                                line: w.dest.line.clone(),
                            },
                            introduced_by: Some(self.external_hash(w.introduced_by).to_owned()),
                        })
                    }
                }
                i += 1
            } else {
                break;
            }
        }
        if !resurrections.is_empty() {
            actions.push(Record::Change {
                change: Change::NewEdges {
                    edges: resurrections,
                    previous: EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    flag: EdgeFlags::PARENT_EDGE,
                    inode: dp.inode.clone(),
                },
                replacement: None,
                file: dp.file.clone(),
                old_line: old,
                new_line: 0,
            });
        }
        if !dead_parents.is_empty() {
            actions.push(Record::Change {
                change: Change::NewEdges {
                    edges: dead_parents,
                    previous: EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    flag: EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE,
                    inode: dp.inode.clone(),
                },
                replacement: None,
                file: dp.file.clone(),
                old_line: old,
                new_line: 0,
            });
        }
    }

    fn diff_delete(
        &self,
        branch: &Branch,
        dp: &mut DiffProducer,
        actions: &mut Vec<Record<ChangeContext<PatchId>>>,
        diff: &mut Diff<A>,
        old: usize,
        len: usize,
    ) {
        debug!("diff_delete {:?} {:?}", old, len);
        if old > 0 && old + len < diff.lines_a.len() {
            self.order_conflict_sides(branch, dp, actions, diff, old, len)
        }

        self.delete_lines(branch, dp, actions, diff, old, len);
        self.resurrect_zombies(branch, dp, actions, diff, old, len);
    }

    fn get_up_context(
        &self,
        dp: &mut DiffProducer,
        diff: &mut Diff<A>,
        old: usize,
    ) -> Vec<Key<Option<PatchId>>> {
        if old == 0 {
            vec![dp.inode_internal]
        } else {
            let mut up_context_idx = old - 1;
            loop {
                debug!("up_context_idx = {:?} ({:?})", up_context_idx, diff.lines_a[up_context_idx]);
                debug!(
                    "up_context_idx = {:?} ({:?})",
                    up_context_idx, diff.lines_a[up_context_idx]
                );
                match diff.status.get(&up_context_idx) {
                    None => {
                        break vec![Key {
                            patch: Some(diff.lines_a[up_context_idx].patch),
                            line: diff.lines_a[up_context_idx].line,
                        }]
                        }];
                    }
                    Some(&Status::Begin) => {
                        let conflict = diff.conflicts_a[up_context_idx].0;
                        if let Some(&line) = dp.conflict_insertions.get(&conflict) {
                            break vec![Key { patch: None, line }];
                        } else if up_context_idx == 0 {
                            break vec![dp.inode_internal];
                        } else {
                            up_context_idx -= 1
                        }
                    }
                    Some(&Status::Next) => {
                        let conflict = diff.conflicts_a[up_context_idx].0;
                        if let Some(&line) = dp.conflict_insertions.get(&conflict) {
                            break vec![Key { patch: None, line }];
                        } else {
                            let conflict = diff.conflicts_a[up_context_idx].0;
                            up_context_idx = *diff.conflict_starts.get(&conflict).unwrap();
                        }
                    }
                    Some(&Status::End) => {
                        // The up context is all the sides above this end marker
                        let conflict = diff.conflicts_a[up_context_idx].0;
                        let conflict_start = *diff.conflict_starts.get(&conflict).unwrap();
                        let mut up_context = Vec::new();

                        if let Some(ref up) = diff.conflict_down_contexts.get(&up_context_idx) {
                            up_context.extend(up.iter().map(|&x| Key {
                                patch: None,
                                line: x,
                            }));
                        }

                        let mut on = true;
                        while up_context_idx > conflict_start {
                            on |= diff.status.get(&up_context_idx).is_some()
                                && diff.conflicts_a[up_context_idx].0 == conflict;
                            if on && diff.status.get(&up_context_idx).is_none() {
                                // This is a real line, add it
                                up_context.push(Key {
                                    patch: Some(diff.lines_a[up_context_idx].patch),
                                    line: diff.lines_a[up_context_idx].line,
                                });
                                on = false
                            }
                            up_context_idx -= 1
                        }
                        assert!(!up_context.is_empty());
                        break up_context;
                    }
                }
            }
        }
    }

    fn get_down_context(
        &self,
        dp: &mut DiffProducer,
        diff: &mut Diff<A>,
        dd: &D,
        old: usize,
        old_len: usize,
        len: usize,
    ) -> Vec<Key<Option<PatchId>>> {
        let mut down_context_idx = old + old_len;
        loop {
            debug!("down_context_idx = {:?}", down_context_idx);
            if down_context_idx >= diff.lines_a.len() {
                break Vec::new();
            }
            debug!("down_context_idx = {:?} ({:?})", down_context_idx, diff.lines_a[down_context_idx]);
            debug!(
                "down_context_idx = {:?} ({:?})",
                down_context_idx, diff.lines_a[down_context_idx]
            );
            match diff.status.get(&down_context_idx) {
                None => {
                    break vec![Key {
                        patch: Some(diff.lines_a[down_context_idx].patch),
                        line: diff.lines_a[down_context_idx].line,
                    }]
                    }];
                }
                Some(&Status::End) => {
                    let mut e = diff
                        .conflict_down_contexts
                        .entry(down_context_idx)
                        .or_insert(Vec::new());
                    e.push(*dp.line_num + (len - 1));
                    down_context_idx += 1
                }
                Some(&Status::Next) => {
                    let conflict = diff.conflicts_a[down_context_idx].0;
                    down_context_idx = *diff.conflict_ends.get(&conflict).unwrap();
                }
                Some(&Status::Begin) => {
                    // The down context is all the sides below this beginning marker
                    let conflict = diff.conflicts_a[down_context_idx].0;
                    dp.conflict_insertions
                        .insert(conflict, *dp.line_num + (len - 1));
                    let conflict_end = *diff.conflict_ends.get(&conflict).unwrap();
                    let mut down_context = Vec::new();
                    let mut on = true;
                    while down_context_idx < conflict_end {
                        on |= diff.status.get(&down_context_idx).is_some()
                            && diff.conflicts_a[down_context_idx].0 == conflict;
                        if on && diff.status.get(&down_context_idx).is_none() {
                            match dd.deleted.get(&down_context_idx) {
                                Some(true) => {
                                    // If the down context line has
                                    // been replaced, we'll insert the
                                    // edge later (when dealing with
                                    // the replacement).
                                    on = false
                                }
                                Some(false) => {
                                    // If the line has been
                                    // deleted, wait for the next
                                    // appropriate down context
                                    // line.
                                }
                                None => {
                                    // If the down context has not
                                    // been deleted, just add it
                                    // as a down context.
                                    down_context.push(Key {
                                        patch: Some(diff.lines_a[down_context_idx].patch),
                                        line: diff.lines_a[down_context_idx].line,
                                    });
                                    on = false
                                }
                            }
                        }
                        down_context_idx += 1
                    }
                    assert!(!down_context.is_empty());
                    break down_context;
                }
            }
        }
    }

    fn diff_replace(
        &self,
        branch: &Branch,
        dp: &mut DiffProducer,
        actions: &mut Vec<Record<ChangeContext<PatchId>>>,
        diff: &mut Diff<A>,
        dd: &D,
        r: usize,
    ) {
        let old = dd.r[r].old;
        let old_len = dd.r[r].old_len;
        let from_new = dd.r[r].new;
        let len = dd.r[r].new_len;
        debug!("replace {:?} {:?} {:?} {:?}", old, old_len, from_new, len);
        if old_len > 0 {
            self.diff_delete(branch, dp, actions, diff, old, old_len);
        }
        // Inserting between old and old+1. Both might be conflict
        // markers.
        let up_context = self.get_up_context(dp, diff, old);
        let down_context = self.get_down_context(dp, diff, dd, old, old_len, len);

        for i in &diff.lines_b[from_new..(from_new + len)] {
            debug!("{:?}", std::str::from_utf8(&i.0));
        }

        let change = Change::NewNodes {
            up_context,
            down_context,
            line_num: *dp.line_num,
            flag: EdgeFlags::empty(),
            nodes: (&diff.lines_b[from_new..(from_new + len)])
                .iter()
                .map(|x| x.0.to_vec())
                .collect(),
            inode: dp.inode.clone(),
        };
        *dp.line_num += len;
        debug!("insert done");

        if old_len > 0 {
            // this is a replacement
            match actions.last_mut() {
                Some(Record::Change { old_line, ref mut replacement, .. }) => {
                Some(Record::Change {
                    old_line,
                    ref mut replacement,
                    ..
                }) => {
                    if *old_line == old {
                        *replacement = Some(change);
                        return;
                    }
                }
                None => {
                    debug!("We're in a replacement, but no deletion occurred. Conflict solved?");
                }
                e => {
                    error!("{:?}", e);
                    unreachable!()
                }
            }
54
55

56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168


169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277

278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309




310
311
312
313
314
315
316
317
318
319
320
321

322
323
324
325

326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391

392
393
394
395
396
397

398
399
400
401
402
403
404
405
406
407

408
409
410
411
412
413

414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435

436
437
438
439
440
441
442
443
444
445
446
447

448
449
450
        };
        if filename == ".pijul" {
            return Err(Error::CannotAddDotPijul)
            return Err(Error::CannotAddDotPijul);
        }
        if let Some(inode) = self.get_tree(&parent_id.as_file_id()) {
            // If we already have the file, make sure the file status
            // is Ok (i.e. not zombie, not deleted).
            let mut header = if let Some(header) = self.get_inodes(inode) {
                header.to_owned()
            } else {
                return Err(Error::AlreadyAdded);
            };
            if let FileStatus::Ok = header.status {
            } else {
                header.status = FileStatus::Ok;
                self.replace_inodes(inode, header)?;
            }
            Ok(inode)
        } else {
            // Else, add a new file.

            let child_inode = match child_inode {
                None => self.create_new_inode(),
                Some(i) => i.clone(),
            };
            self.put_tree(&parent_id.as_file_id(), child_inode)?;
            self.put_revtree(child_inode, &parent_id.as_file_id())?;

            if is_dir {
                // If this new file is a directory, add a name-less
                // file id without a reverse in revtree.
                let dir_id = OwnedFileId {
                    parent_inode: child_inode.clone(),
                    basename: SmallString::from_str(""),
                };
                self.put_tree(&dir_id.as_file_id(), child_inode)?;
            };
            Ok(child_inode)
        }
    }

    pub fn add_inode<P: AsRef<Path>>(
        &mut self,
        inode: Option<Inode>,
        path: &RepoPath<P>,
        is_dir: bool,
    ) -> Result<()> {
        if let Some(parent) = path.parent() {
            let (mut current_inode, unrecorded_path) =
                self.closest_in_repo_ancestor(&parent).unwrap();

            for c in unrecorded_path {
                current_inode =
                    self.make_new_child(current_inode, c.as_os_str().to_str().unwrap(), true, None)?
            }

            self.make_new_child(
                current_inode,
                path.file_name().unwrap().to_str().unwrap(),
                is_dir,
                inode,
            )?;
        }
        Ok(())
    }

    pub fn inode_is_ancestor_of(&self, a: Inode, mut b: Inode) -> bool {
        loop {
            if a == b {
                return true;
            }
            if let Some(b_parent) = self.get_revtree(b) {
                b = b_parent.parent_inode
            } else {
                return false;
            }
        }
    }

    pub fn move_file(
        &mut self,
        origin: &RepoPath<impl AsRef<Path>>,
        destination: &RepoPath<impl AsRef<Path>>,
        is_dir: bool,
    ) -> Result<()> {
        debug!("move_file: {},{}", origin.display(), destination.display());
        if let Some(parent) = origin.parent() {
            let fileref = OwnedFileId {
                parent_inode: self.find_inode(&parent)?,
                basename: SmallString::from_str(origin.file_name().unwrap().to_str().unwrap()),
            };

            if let Some(inode) = self.get_tree(&fileref.as_file_id()).map(|i| i.clone()) {
                // Now the last inode is in "*inode"
                debug!("txn.del fileref={:?}", fileref);
                self.del_tree(&fileref.as_file_id(), None)?;
                self.del_revtree(inode, None)?;

                debug!("inode={} destination={}", inode.to_hex(), destination.display());
                self.add_inode(Some(inode), destination, is_dir)?;
                self.mark_inode_moved(inode);

                return Ok(());
            }
        }
        Err(Error::FileNotInRepo(origin.to_path_buf()))
    }

    // Deletes a directory, given by its inode, recursively.
    pub fn rec_delete(&mut self, key: Inode) -> Result<bool> {
        debug!("rec_delete, key={:?}", key.to_hex());
        let file_id = OwnedFileId {
            parent_inode: key.clone(),
            basename: SmallString::from_str(""),
        };
        let children: Vec<(_, Inode)> = self.iter_tree(Some((&file_id.as_file_id(), None)))
        let children: Vec<(_, Inode)> = self.iter_tree(Some((&file_id.as_file_id(), None)))

        let children: Vec<(_, Inode)> = self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| key == k.parent_inode)
            .filter(|&(ref k, _)| !k.basename.is_empty())
            .map(|(k, v)| (k.to_owned(), v.to_owned()))
            .collect();

        let mut has_recorded_descendants = false;
        for (_, b) in children {
            debug!("deleting from tree {:?}", b);
            has_recorded_descendants |= self.rec_delete(b)?;
        }

        // Now that the directory is empty, mark the corresponding node as deleted (flag '2').
        if let Some(mut header) = self.get_inodes(key).map(|h| h.clone()) {
            // If this is was recorded, mark deleted.
            debug!("key {:?}, header = {:?}", key, header);
            header.status = FileStatus::Deleted;
            self.replace_inodes(key, header)?;
            debug!("after = {:?}", self.get_inodes(key).map(|h| h.clone()));
        } else if !has_recorded_descendants {
            // Else, simply delete from the tree.
            let parent = self.get_revtree(key).unwrap().to_owned();
            debug!("key = {:?}, parent = {:?}", key, parent);
            self.del_tree(&parent.as_file_id(), None)?;
            self.del_revtree(key, None)?;
        }
        Ok(has_recorded_descendants)
    }

    /// Removes a file from the repository.
    pub fn remove_file(&mut self, path: &RepoPath<impl AsRef<Path>>)
                       -> Result<()> {
        debug!("remove_file");
        let inode = self.find_inode(path)?;
        debug!("rec_delete");
        self.rec_delete(inode)?;
        debug!("/rec_delete");
        Ok(())
    }
}

impl<A: Transaction, R> backend::GenericTxn<A, R> {
    /// Traverses the `tree` base recursively, collecting all descendants of `key`.
    fn collect(
        &self,
        key: Inode,
        current_path: &RepoPath<impl AsRef<Path>>,
        basename: &str,
        files: &mut Vec<RepoPath<PathBuf>>,
    ) -> Result<()> {
        debug!("collecting {:?},{:?}", key, basename);
        let add = match self.get_inodes(key) {
            Some(inode) => {
                debug!("node = {:?}", inode);
                inode.status != FileStatus::Deleted
            }
            None => true,
        };
        if add {
            debug!("basename = {:?}", basename);
            
            let next_pb = current_path.join(Path::new(basename));
            if basename.len() > 0 {
                files.push(next_pb.clone())
            }

            debug!("starting iterator, key={:?}", key);
            let fileid = OwnedFileId {
                parent_inode: key.clone(),
                basename: SmallString::from_str(""),
            };
            for (k, v) in self.iter_tree(Some((&fileid.as_file_id(), None)))
            for (k, v) in self
                .iter_tree(Some((&fileid.as_file_id(), None)))
                .take_while(|&(ref k, _)| k.parent_inode == key)
            {
                debug!("iter: {:?} {:?}", k, v);
                if k.basename.len() > 0 {
                    self.collect(v.to_owned(), &next_pb, k.basename.as_str(), files)?;
                }
            }
            debug!("ending iterator {:?}", {
                let v: Vec<_> = self.iter_tree(Some((&fileid.as_file_id(), None))).collect();
                v
            });
        }
        Ok(())
    }

    /// Returns a vector containing all files in the repository.
    pub fn list_files(&self, inode: Inode) -> Result<Vec<RepoPath<PathBuf>>> {
        debug!("list_files {:?}", inode);
        let mut files = Vec::new();
        self.collect(inode, &in_repo_root(), "", &mut files)?;
        Ok(files)
    }

    /// Returns a list of files under the given inode.
    pub fn list_files_under_inode(
        &self,
        inode: Inode,
    ) -> Vec<(SmallString, Option<Key<PatchId>>, Inode)> {
        let mut result = Vec::new();

        let file_id = OwnedFileId {
            parent_inode: inode,
            basename: SmallString::from_str(""),
        };
        for (k, v) in self.iter_tree(Some((&file_id.as_file_id(), None)))
        for (k, v) in self
            .iter_tree(Some((&file_id.as_file_id(), None)))
            .take_while(|&(ref k, _)| k.parent_inode == inode)
        {
            let header = self.get_inodes(k.parent_inode).map(|x| x.clone());
            // add: checking that this file has neither been moved nor deleted.
            println!("============= {:?} {:?}", k, v);
            let add = match header {
                Some(ref h) => h.status == FileStatus::Ok,
                None => true,
            };
            if add && k.basename.len() > 0 {
                result.push((
                    k.basename.to_owned(),
                    header.map(|h| h.key.clone()),
                    v.clone(),
                ))
            }
        }

        result
    }

    /// Returns a list of files under the given inode.
    pub fn list_files_under_node(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> BTreeMap<Key<PatchId>, Vec<(FileMetadata, &str)>> {
        let mut result = BTreeMap::new();

        let e = Edge::zero(EdgeFlags::FOLDER_EDGE);
        for (_, child) in self.iter_nodes(branch, Some((key, Some(e)))).take_while(
            |&(k, ref v)| k == key && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE,
            |&(k, ref v)| k == key && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE,
        ) {
        for (_, child) in self
            .iter_nodes(branch, Some((key, Some(e))))
            .take_while(|&(k, ref v)| {
                k == key && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
            })
        {
            let name = self.get_contents(child.dest).unwrap();
            // This is supposed to be a small string anyway.
            let (perms, basename) = name.as_slice().split_at(2);
            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();

            for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(e))))
            for (_, grandchild) in self
                .iter_nodes(branch, Some((child.dest, Some(e))))
                .take_while(|&(k, ref v)| {
                    k == child.dest && v.flag <= EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE
                }) {
                })
            {
                let names = result.entry(grandchild.dest.to_owned()).or_insert(vec![]);
                names.push((perms, basename))
            }
        }
        result
    }

    pub fn is_directory(&self, inode: &Inode) -> bool {
        let file_id = OwnedFileId {
            parent_inode: inode.clone(),
            basename: SmallString::from_str(""),
        };
        inode == &ROOT_INODE || self.get_tree(&file_id.as_file_id()).is_some()
    }

    /// Splits a path into (1) the deepest inode from the root that is
    /// an ancestor of the path or the path itself and (2) the
    /// remainder of this path
    fn closest_in_repo_ancestor<'a>(
        &self,
        path: &'a RepoPath<impl AsRef<Path>>
    ) -> Result<(Inode, std::iter::Peekable<std::path::Components<'a>>)> {
        let mut components = path.components().peekable();
        let mut fileid = OwnedFileId {
            parent_inode: ROOT_INODE,
            basename: SmallString::from_str(""),
        };

        loop {
            if let Some(c) = components.peek() {
                fileid.basename = SmallString::from_str(c.as_os_str().to_str().unwrap());
                if let Some(v) = self.get_tree(&fileid.as_file_id()) {
                    fileid.parent_inode = v.clone()
                } else {
                    break;
                }
            } else {
                break;
            }
            components.next();
        }
        Ok((fileid.parent_inode.clone(), components))
    }

    /// Find the inode corresponding to that path, or return an error if there's no such inode.
    pub fn find_inode(&self, path: &RepoPath<impl AsRef<Path>>)
                      -> Result<Inode> {
        let (inode, mut remaining_path_components) = self.closest_in_repo_ancestor(path)?;
        if remaining_path_components.next().is_none() {
            Ok(inode)
        } else {
            Err(Error::FileNotInRepo(path.to_path_buf()))
        }
    }

    pub fn file_names(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, FileMetadata, &str)> {
        let mut result = Vec::new();
        let e = Edge::zero(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE);

        debug!("file_names, key {:?}", key);
        for (_, parent) in self.iter_nodes(branch, Some((key, Some(e))))
        for (_, parent) in self
            .iter_nodes(branch, Some((key, Some(e))))
            .take_while(|&(k, _)| k == key)
            .filter(|&(_, ref v)| {
                v.flag
                    .contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
            }) {
            })
        {
            debug!("file_names, parent {:?}", parent);
            match self.get_contents(parent.dest) {
                Some(ref name) if name.len() >= 2 => {
                    // This is supposed to be a small string anyway.
                    let (perms, basename) = name.as_slice().split_at(2);
                    let perms = FileMetadata::from_contents(perms);
                    let basename = std::str::from_utf8(basename).unwrap();

                    for (_, grandparent) in self.iter_nodes(branch, Some((parent.dest, Some(e))))
                    for (_, grandparent) in self
                        .iter_nodes(branch, Some((parent.dest, Some(e))))
                        .take_while(|&(k, _)| k == parent.dest)
                        .filter(|&(_, ref v)| {
                            v.flag
                                .contains(EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE)
                        }) {
                        })
                    {
                        result.push((grandparent.dest.to_owned(), perms, basename));
                        break;
                    }
                }
                _ => error!("Key: {:?}, file {}, line {}", key, file!(), line!()),
            }
        }
        result
    }

    pub fn prefix_keys(&self, branch: &Branch, path: &RepoPath<impl AsRef<Path>>)
                       -> Result<Vec<Key<PatchId>>> {
        let mut result = Vec::new();
        let e = Edge::zero(EdgeFlags::FOLDER_EDGE);

        let mut current_key = ROOT_KEY;

        for comp in path.components() {
            let mut is_first = true;
            let cur = current_key;
            for (_, child) in self.iter_nodes(branch, Some((current_key, Some(e))))
            for (_, child) in self
                .iter_nodes(branch, Some((current_key, Some(e))))
                .take_while(|&(k, _)| k == cur)
                .filter(|&(_, ref v)| v.flag.contains(EdgeFlags::FOLDER_EDGE))
            {
                let contents = self.get_contents(child.dest).unwrap();
                if contents.into_cow().split_at(2).1
                    == comp.as_os_str().to_str().expect("file encoding problem").as_bytes() {
                    if !is_first {
                        return Err(Error::FileNameCount(current_key));
                    }

                    for (_, grandchild) in self.iter_nodes(branch, Some((child.dest, Some(e))))
                    for (_, grandchild) in self
                        .iter_nodes(branch, Some((child.dest, Some(e))))
                        .take_while(|&(k, _)| k == child.dest)



1
2
3
4
5
6

7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22


23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111


112
113
114

115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137

138
139
140
141
142
143
144

145
146
147
148
149

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178

179
180
181
182
183
184
185
186
187
188
189

190
191
192
193
194
195
196
197
198
199
200
201
202
203
204

205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228



229
230
231
232
233
234
235
236
237
238
239
240
241
242


243
244
245
246
247
248
249
250
251
252
253
254
255

256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288

289
290
291
292
293
294
295
296
297

298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316

317
318
use Result;
    ) -> impl Iterator<Item = PathBuf> + '_ {
                        return None;
//! Layout of a repository (files in `.pijul`) on the disk. This
//! module exports both high-level functions that require no knowledge
//! of the repository, and lower-level constants documented on
//! [pijul.org/documentation/repository](https://pijul.org/documentation/repository),
//! used for instance for downloading files from remote repositories.

use Result;
use super::Repository;
use backend::DEFAULT_BRANCH;
use backend::{Hash, HashRef};
use backend::{MutTxn, ROOT_INODE};
use bs58;
use flate2;
use ignore::overrides::OverrideBuilder;
use ignore::WalkBuilder;
use patch::{Patch, PatchHeader};
use rand::distributions::Alphanumeric;
use rand::Rng;
use std;
use std::fs::canonicalize;
use std::fs::{create_dir_all, metadata, File};
use std::io::{BufReader, Read, Write};
use std::path::{Path, PathBuf};
use super::Repository;
use backend::{MutTxn, ROOT_INODE};
use {Result, Error};
use std::ffi::OsStr;

/// Given a Path-like type P, RepoPath<P> is a 'P' relative to some fs_representation::RepoRoot
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)]
pub struct RepoPath<P: ?Sized>(pub P);

pub fn in_repo_root() -> RepoPath<&'static Path> {
    RepoPath(Path::new(""))
}

impl RepoPath<std::path::PathBuf> {
    pub fn push(&mut self, x: &str) {
        self.0.push(x)
    }

    pub fn pop(&mut self) -> bool {
        self.0.pop()
    }

    pub fn new() -> Self {
        RepoPath(PathBuf::new())
    }
    
    pub fn as_ref(&self) -> RepoPath<&Path> {
        RepoPath(self.0.as_ref())
    }

    pub fn set_file_name(&mut self, filename: &OsStr) {
        self.0.set_file_name(filename)
    }

    pub fn from_string(path: String) -> Self {
        RepoPath(PathBuf::from(path))
    }
}

    
impl<P: AsRef<Path>> RepoPath<P> {
    pub fn as_path(&self) -> &std::path::Path {
        self.0.as_ref()
    }
}

impl<P: AsRef<Path>> RepoPath<P> {    
    pub fn parent(&self) -> Option<RepoPath<&std::path::Path>> {
        self.as_path().parent().map(RepoPath)
    }

    pub fn file_name(&self) -> Option<&OsStr> {
        self.as_path().file_name()
    }

    pub fn split(&self) -> Option<(RepoPath<&std::path::Path>, &OsStr)> {
        self.parent().map(|p| {(p, self.file_name().expect("file_name and parent should be consistent"))})
    }

    pub fn components(&self) -> std::path::Components {
        self.as_path().components()
    }

    pub fn to_path_buf(&self) -> PathBuf {
        self.as_path().to_path_buf()
    }

    pub fn display(&self) -> std::path::Display {
        self.as_path().display()
    }

    
    pub fn to_owned(&self) -> RepoPath<PathBuf> {
        RepoPath(self.0.as_ref().to_path_buf())
    }    
    
    pub fn join(&self, path: &Path) -> RepoPath<PathBuf> {
        // TODO: check that the joined path is indeed inside the repo
        let joined_path = self.as_path().join(path);
        RepoPath(joined_path)
    }
}

impl<P: AsRef<Path>> RepoPath<P> {
    pub fn empty(&self) -> bool {
        self.as_path() == Path::new("")
    }
}

/// A directory at the root of a pijul repository.
#[derive(Clone, Copy, Debug)]
pub struct RepoRoot<P: AsRef<Path>>{
    pub repo_root: P
pub struct RepoRoot<P: AsRef<Path>> {
    pub repo_root: P,
}


/// Name of the root directory, i.e. `.pijul`.
pub const PIJUL_DIR_NAME: &'static str = ".pijul";

/// Basename of the changes file for branch `br`. This file is only
/// used when pulling/pushing over HTTP (where calling remote programs
/// to list patches is impossible).
///
/// The changes file contains the same information as the one returned by `pijul log --hash-only`.
pub fn branch_changes_base_path(b: &str) -> String {
    "changes.".to_string() + &bs58::encode(b.as_bytes()).into_string()
}

/// Basename of the patch corresponding to the given patch hash.
pub fn patch_file_name(hash: HashRef) -> String {
    hash.to_base58() + ".gz"
}

impl<P: AsRef<Path>> RepoRoot<P> {
    /// The subdirectory of `self` with pijul's metadata
    pub fn repo_dir(&self) -> PathBuf {
        self.repo_root.as_ref().join(PIJUL_DIR_NAME)
    }
    

    /// Directory where the pristine of `self` is.
    /// For instance, if the repository in in `/a/b`,
    /// `self.pristine_dir() is `/a/b/.pijul/pristine`.
    pub fn pristine_dir(&self) -> PathBuf {
        self.repo_dir().join("pristine")
    }
    

    /// Directory where the patches are. `patches_dir("/a/b") = "/a/b/.pijul/patches"`.
    pub fn patches_dir(&self) -> PathBuf {
        self.repo_dir().join("patches")
    }
    

    /// The location of the changes file for theb branch `b`.
    ///
    /// The changes file contains the same information as the one returned by `pijul log --hash-only`.
    pub fn branch_changes_file(&self, b: &str) -> PathBuf {
        self.repo_dir().join(branch_changes_base_path(b))
    }

    /// The meta file, where user preferences are stored.
    pub fn meta_file(&self) -> PathBuf {
        self.repo_dir().join("meta.toml")
    }

    /// The id file is used for remote operations, to identify a
    /// repository and save bandwidth when the remote state is partially
    /// known.
    pub fn id_file(&self) -> PathBuf {
        self.repo_dir().join("id")
    }

    /// Read a complete patch.
    pub fn read_patch(&self, hash: HashRef) -> Result<Patch> {
        let patch_dir = self.patches_dir();
        let path = patch_dir.join(&patch_file_name(hash));
        let f = File::open(path)?;
        let mut f = BufReader::new(f);
        let (_, _, patch) = Patch::from_reader_compressed(&mut f)?;
        Ok(patch)
    }
    

    /// Read a patch, but without the "changes" part, i.e. the actual
    /// contents of the patch.
    pub fn read_patch_nochanges(&self, hash: HashRef) -> Result<PatchHeader> {
        let patch_dir = self.patches_dir();
        let path = patch_dir.join(&patch_file_name(hash));
        let f = File::open(path)?;
        let mut f = flate2::bufread::GzDecoder::new(BufReader::new(f));
        Ok(PatchHeader::from_reader_nochanges(&mut f)?)
    }

    
    /// Read a patch, but without the "changes" part, i.e. the actual
    /// contents of the patch.
    pub fn read_dependencies(&self, hash: HashRef) -> Result<Vec<Hash>> {
        let patch_dir = self.patches_dir();
        let path = patch_dir.join(&patch_file_name(hash));
        let f = File::open(path)?;
        let mut f = flate2::bufread::GzDecoder::new(BufReader::new(f));
        Ok(Patch::read_dependencies(&mut f)?)
    }

    /// The ignore file that is _not_ tracked by pijul.
    pub fn local_ignore_file(&self) -> PathBuf {
        self.repo_dir().join("local").join("ignore")
    }

    
    pub fn get_current_branch(&self) -> Result<String> {
        let mut path = self.repo_dir();
        path.push("current_branch");
        if let Ok(mut f) = File::open(&path) {
            let mut s = String::new();
            f.read_to_string(&mut s)?;
            Ok(s.trim().to_string())
        } else {
            Ok(DEFAULT_BRANCH.to_string())
        }
    }

    pub fn set_current_branch(&self, branch: &str) -> Result<()> {
        let mut path = self.repo_dir();
        path.push("current_branch");
        let mut f = File::create(&path)?;
        f.write_all(branch.trim().as_ref())?;
        f.write_all(b"\n")?;
        Ok(())
    }

    pub fn open_repo(&self, increase: Option<u64>) -> Result<Repository> {
        Repository::open(self.pristine_dir(), increase)
    }




    
    pub fn relativize<'a>(&self, path: &'a Path) -> Result<RepoPath<&'a Path>> {
    	match path.strip_prefix(&self.repo_root) {
            Ok(p) => Ok(RepoPath(p)),
            Err(_) => Err(Error::FileNotInRepo(path.to_path_buf()))
        }
    }

    pub fn absolutize<'a>(&self, path: &RepoPath<impl AsRef<Path>>) -> PathBuf {
        self.repo_root.as_ref().join(path.as_path())
    }
}

impl<P: AsRef<Path>+'static> RepoRoot<P> {
    pub fn untracked_files<T: rand::Rng, Q:AsRef<Path>>(&self, txn: &MutTxn<T>, path: Q) -> impl Iterator<Item = PathBuf> + '_ {
impl<P: AsRef<Path> + 'static> RepoRoot<P> {
    pub fn untracked_files<T: rand::Rng, Q: AsRef<Path>>(
        &self,
        txn: &MutTxn<T>,
        path: Q,
    ) -> impl Iterator<Item = RepoPath<PathBuf>> + '_ {
        let known_files = txn.list_files(ROOT_INODE).unwrap_or_else(|_| vec![]);

        let o = OverrideBuilder::new(self.repo_root.as_ref())
            .add("!.pijul")
            .unwrap()
            .build()
            .unwrap(); // we can be pretty confident these two calls will
        // not fail as the glob is hard-coded
                       // not fail as the glob is hard-coded

        let mut w = WalkBuilder::new(path.as_ref());
        w.git_ignore(false)
            .git_exclude(false)
            .git_global(false)
            .hidden(false)
            .add_custom_ignore_filename(".pijulignore");

        // add .pijul/local/ignore
        w.add_ignore(self.local_ignore_file());
        w.overrides(o);

        w.build().filter_map(move |f| {
            if let Ok(f) = f {
                let p = f.path();
                if p == self.repo_root.as_ref() {
                    return None
                    return None;
                }

                let p_in_repo = self.relativize(&p).unwrap();
                // ^- cannot fail since p must be within repo_root.
                if known_files.iter().any(|t| t.as_ref() == p_in_repo) {
                    return None
                }
                Some(p_in_repo.to_owned())
            } else {
                None
            }
        })
    }
}


/// Find the repository root from one of its descendant
/// directories. Return `None` iff `dir` is not in a repository.
pub fn find_repo_root<'a>(dir: &'a Path) -> Option<RepoRoot<PathBuf>> {
    let mut p = dir.to_path_buf();
    loop {
        p.push(PIJUL_DIR_NAME);
        match metadata(&p) {
            Ok(ref attr) if attr.is_dir() => {
                p.pop();
                return Some(RepoRoot {repo_root: p});
                return Some(RepoRoot { repo_root: p });
            }
            _ => {}
        }
        p.pop();

        if !p.pop() {
            return None;
        }
    }
}

#[doc(hidden)]
pub const ID_LENGTH: usize = 100;

/// Create a repository. `dir` must be the repository root (a
/// `".pijul"` directory will be created in `dir`).
pub fn create<R: Rng>(dir: &Path, mut rng: R) -> std::io::Result<RepoRoot<PathBuf>> {
    let r = RepoRoot {
        repo_root: canonicalize(dir)?
        repo_root: canonicalize(dir)?,
    };




























































use super::{ConflictLine, VertexId, DFS, Graph};
use super::{ConflictLine, Graph, VertexId, DFS};
            stack: vec![(
                scc.len() - 1,
                State::EvalConflict {
                    start: scc.len() - 1,
                    end: 0,
                    cur: 1,
                    last_visit: 0,
                    sides: vec![Side {
                        next: scc.len() - 1,
                        side: vec![],
                    }],
                },
            )],
                    _ => true,
                    _ => true,
                    debug!(
                        "{:?} {:?}",
                        self.dfs.first_visit(next),
                        self.dfs.first_visit(i)
                    );
    fn visit_next_side(
        &mut self,
        i: usize,
        start: usize,
        end: usize,
        cur: usize,
        last_visit: usize,
        sides: Vec<Side>,
    ) {
            })
            .collect();
                    sides_.push(Side { next, side })
            sides_.push(Side { next, side })
pub(in graph) fn conflict_tree(
    graph: &Graph,
    scc: &[Vec<VertexId>],
    dfs: &mut DFS,
) -> Vec<ConflictLine> {
        debug!(
            ">>>>>>>>>>>>> {:?} ({:?}): {:?}, {:?}",
            i, scc[i], graph[scc[i][0]].key, conflict.dfs.visits[i]
        );
            State::Init {
                resume_after_conflict,
            } => {
                            continue;
                    let next = sides
                        .iter()
                        .filter(|x| !x.side.is_empty())
                        .next()
                        .unwrap()
                        .next;
                    let conflict_is_over =
                        { sides.iter().all(|x| x.side.is_empty() || x.next == next) };
                    debug!(
                        "Conflict_is_over: {:?}, next = {:?} ({:?}), end = {:?} ({:?})",
                        conflict_is_over, next, conflict.scc[next], end, conflict.scc[end]
                    );
2
3

4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439

440
441
442
443
444

445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643

644
645
646
//! datastructure, used to edit and organise it (for instance before a
//! record or before outputting a file).
use Result;
use backend::*;
use conflict;
use std::cmp::min;
use std::collections::{HashMap, HashSet};
use Result;

use rand;
use std;

mod dfs;
use self::dfs::{DFS, Path, PathElement};

bitflags! {
    struct Flags: u8 {
        const LINE_HALF_DELETED = 4;
        const LINE_VISITED = 2;
        const LINE_ONSTACK = 1;
    }
}

/// The elementary datum in the representation of the repository state
/// at any given point in time. We need this structure (as opposed to
/// working directly on a branch) in order to add more data, such as
/// strongly connected component identifier, to each node.
#[derive(Debug)]
pub struct Line {
    /// The internal identifier of the line.
    pub key: Key<PatchId>,

    // The status of the line with respect to a dfs of
    // a graph it appears in. This is 0 or
    // `LINE_HALF_DELETED`.
    flags: Flags,
    children: usize,
    n_children: usize,
    index: usize,
    lowlink: usize,
    scc: usize,
}

impl Line {
    pub fn is_zombie(&self) -> bool {
        self.flags.contains(Flags::LINE_HALF_DELETED)
    }
}

/// A graph, representing the whole content of the repository state at
/// a point in time. The encoding is a "flat adjacency list", where
/// each vertex contains a index `children` and a number of children
/// `n_children`. The children of that vertex are then
/// `&g.children[children .. children + n_children]`.
#[derive(Debug)]
pub struct Graph {
    /// Array of all alive lines in the graph. Line 0 is a dummy line
    /// at the end, so that all nodes have a common successor
    pub lines: Vec<Line>,
    /// Edge + index of the line in the "lines" array above. "None"
    /// means "dummy line at the end", and corresponds to line number
    /// 0.
    children: Vec<(Option<Edge>, VertexId)>,
}

#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
struct VertexId(usize);

const DUMMY_VERTEX: VertexId = VertexId(0);

impl std::ops::Index<VertexId> for Graph {
    type Output = Line;
    fn index(&self, idx: VertexId) -> &Self::Output {
        self.lines.index(idx.0)
    }
}
impl std::ops::IndexMut<VertexId> for Graph {
    fn index_mut(&mut self, idx: VertexId) -> &mut Self::Output {
        self.lines.index_mut(idx.0)
    }
}

use std::io::Write;

impl Graph {
    fn children(&self, i: VertexId) -> &[(Option<Edge>, VertexId)] {
        let ref line = self[i];
        &self.children[line.children..line.children + line.n_children]
    }

    fn child(&self, i: VertexId, j: usize) -> &(Option<Edge>, VertexId) {
        &self.children[self[i].children + j]
    }

    pub fn debug<W: Write, R, A: Transaction>(
        &self,
        txn: &GenericTxn<A, R>,
        branch: &Branch,
        add_others: bool,
        introduced_by: bool,
        mut w: W,
    ) -> std::io::Result<()> {
        writeln!(w, "digraph {{")?;
        let mut cache = HashMap::new();
        if add_others {
            for (line, i) in self.lines.iter().zip(0..) {
                cache.insert(line.key, i);
            }
        }
        let mut others = HashSet::new();
        for (line, i) in self.lines.iter().zip(0..) {
            let contents = {
                if let Some(c) = txn.get_contents(line.key) {
                    let c = c.into_cow();
                    if let Ok(c) = std::str::from_utf8(&c) {
                        c.split_at(std::cmp::min(50, c.len())).0.to_string()
                    } else {
                        "<INVALID>".to_string()
                    }
                } else {
                    "".to_string()
                }
            };
            let contents = format!("{:?}", contents);
            let contents = contents.split_at(contents.len() - 1).0.split_at(1).1;
            writeln!(
                w,
                "n_{}[label=\"{}: {}.{}: {}\"];",
                i,
                i,
                line.key.patch.to_base58(),
                line.key.line.to_hex(),
                contents
            )?;

            if add_others && !line.key.is_root() {
                for v in txn.iter_adjacent(branch, line.key, EdgeFlags::empty(), EdgeFlags::all()) {
                    if let Some(dest) = cache.get(&v.dest) {
                        writeln!(
                            w,
                            "n_{} -> n_{}[color=red,label=\"{:?}{}{}\"];",
                            i,
                            dest,
                            v.flag.bits(),
                            if introduced_by { " " } else { "" },
                            if introduced_by {
                                v.introduced_by.to_base58()
                            } else {
                                String::new()
                            }
                        )?;
                    } else {
                        if !others.contains(&v.dest) {
                            others.insert(v.dest);
                            writeln!(
                                w,
                                "n_{}[label=\"{}.{}\",color=red];",
                                v.dest.to_base58(),
                                v.dest.patch.to_base58(),
                                v.dest.line.to_hex()
                            )?;
                        }
                        writeln!(
                            w,
                            "n_{} -> n_{}[color=red,label=\"{:?}{}{}\"];",
                            i,
                            v.dest.to_base58(),
                            v.flag.bits(),
                            if introduced_by { " " } else { "" },
                            if introduced_by {
                                v.introduced_by.to_base58()
                            } else {
                                String::new()
                            }
                        )?;
                    }
                }
            }
            for &(ref edge, VertexId(j)) in
                &self.children[line.children..line.children + line.n_children]
            {
                if let Some(ref edge) = *edge {
                    writeln!(
                        w,
                        "n_{}->n_{}[label=\"{:?}{}{}\"];",
                        i,
                        j,
                        edge.flag.bits(),
                        if introduced_by { " " } else { "" },
                        if introduced_by {
                            edge.introduced_by.to_base58()
                        } else {
                            String::new()
                        }
                    )?
                } else {
                    writeln!(w, "n_{}->n_{}[label=\"none\"];", i, j)?
                }
            }
        }
        writeln!(w, "}}")?;
        Ok(())
    }
}

use sanakirja::value::Value;
/// A "line outputter" trait.
pub trait LineBuffer<'a, T: 'a + Transaction> {
    fn output_line(&mut self, key: &Key<PatchId>, contents: Value<'a, T>) -> Result<()>;

    fn output_conflict_marker(&mut self, s: &'a str) -> Result<()>;
    fn begin_conflict(&mut self) -> Result<()> {
        self.output_conflict_marker(conflict::START_MARKER)
    }
    fn begin_zombie_conflict(&mut self) -> Result<()> {
        self.begin_conflict()
    }
    fn conflict_next(&mut self) -> Result<()> {
        self.output_conflict_marker(conflict::SEPARATOR)
    }
    fn end_conflict(&mut self) -> Result<()> {
        self.output_conflict_marker(conflict::END_MARKER)
    }
}

pub struct Writer<W: std::io::Write> {
    pub w: W,
    new_line: bool,
}

impl<W: std::io::Write> Writer<W> {
    pub fn new(w: W) -> Self {
        Writer { w, new_line: true }
    }
}

impl<W: std::io::Write> std::ops::Deref for Writer<W> {
    type Target = W;
    fn deref(&self) -> &Self::Target {
        &self.w
    }
}

impl<W: std::io::Write> std::ops::DerefMut for Writer<W> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.w
    }
}

impl<'a, T: 'a + Transaction, W: std::io::Write> LineBuffer<'a, T> for Writer<W> {
    fn output_line(&mut self, k: &Key<PatchId>, c: Value<T>) -> Result<()> {
        let mut ends_with_newline = false;
        let mut is_empty = true;
        for chunk in c {
            debug!("output line {:?} {:?}", k, std::str::from_utf8(chunk));
            is_empty = is_empty && chunk.is_empty();
            ends_with_newline = chunk.ends_with(b"\n");
            self.w.write_all(chunk)?
        }
        if !is_empty {
            // empty "lines" (such as in the beginning of a file)
            // don't change the status of self.new_line.
            self.new_line = ends_with_newline;
        }
        Ok(())
    }

    fn output_conflict_marker(&mut self, s: &'a str) -> Result<()> {
        debug!("output_conflict_marker {:?}", self.new_line);
        if !self.new_line {
            self.w.write(s.as_bytes())?;
        } else {
            debug!("{:?}", &s.as_bytes()[1..]);
            self.w.write(&s.as_bytes()[1..])?;
        }
        Ok(())
    }
}

impl Graph {
    /// Tarjan's strongly connected component algorithm, returning a
    /// vector of strongly connected components, where each SCC is a
    /// vector of vertex indices.
    fn tarjan(&mut self) -> Vec<Vec<VertexId>> {
        if self.lines.len() <= 1 {
            return vec![vec![VertexId(0)]];
        }

        let mut call_stack = vec![(VertexId(1), 0, true)];

        let mut index = 0;
        let mut stack = Vec::new();
        let mut scc = Vec::new();
        while let Some((n_l, i, first_visit)) = call_stack.pop() {
            if first_visit {
                // First time we visit this node.
                let ref mut l = self[n_l];
                l.index = index;
                l.lowlink = index;
                l.flags = l.flags | Flags::LINE_ONSTACK | Flags::LINE_VISITED;
                debug!("tarjan {:?} {:?} chi", l.key, l.n_children);
                stack.push(n_l);
                index = index + 1;
            } else {
                let &(_, n_child) = self.child(n_l, i);
                self[n_l].lowlink = std::cmp::min(self[n_l].lowlink, self[n_child].lowlink);
            }

            let call_stack_length = call_stack.len();
            for j in i..self[n_l].n_children {
                let &(_, n_child) = self.child(n_l, j);
                if !self[n_child].flags.contains(Flags::LINE_VISITED) {
                    call_stack.push((n_l, j, false));
                    call_stack.push((n_child, 0, true));
                    break;
                // self.tarjan_dfs(scc, stack, index, n_child);
                } else {
                    if self[n_child].flags.contains(Flags::LINE_ONSTACK) {
                        self[n_l].lowlink = min(self[n_l].lowlink, self[n_child].index)
                    }
                }
            }
            if call_stack_length < call_stack.len() {
                // recursive call
                continue;
            }
            // Here, all children of n_l have been visited.

            if self[n_l].index == self[n_l].lowlink {
                let mut v = Vec::new();
                loop {
                    match stack.pop() {
                        None => break,
                        Some(n_p) => {
                            self[n_p].scc = scc.len();
                            self[n_p].flags = self[n_p].flags ^ Flags::LINE_ONSTACK;
                            v.push(n_p);
                            if n_p == n_l {
                                break;
                            }
                        }
                    }
                }
                scc.push(v);
            }
        }
        scc
    }
}

impl<A: Transaction, R> GenericTxn<A, R> {
    /// This function constructs a graph by reading the branch from the
    /// input key. It guarantees that all nodes but the first one (index
    /// 0) have a common descendant, which is index 0.
    pub fn retrieve<'a>(&'a self, branch: &Branch, key0: Key<PatchId>) -> Graph {
        let mut graph = Graph {
            lines: Vec::new(),
            children: Vec::new(),
        };
        // Insert last "dummy" line (so that all lines have a common descendant).
        graph.lines.push(Line {
            key: ROOT_KEY,
            flags: Flags::empty(),
            children: 0,
            n_children: 0,
            index: 0,
            lowlink: 0,
            scc: 0,
        });

        // Avoid the root key.
        let mut cache: HashMap<Key<PatchId>, VertexId> = HashMap::new();
        cache.insert(ROOT_KEY.clone(), DUMMY_VERTEX);
        let mut stack = Vec::new();
        if self.get_nodes(&branch, key0, None).is_some() {
            stack.push(key0)
        }
        while let Some(key) = stack.pop() {
            if cache.contains_key(&key) {
                // We're doing a DFS here, this can definitely happen.
                continue;
            }

            let idx = VertexId(graph.lines.len());
            cache.insert(key.clone(), idx);

            debug!("{:?}", key);
            let mut is_zombie = false;
            // Does this vertex have a DELETED/DELETED+FOLDER edge
            // pointing to it?
            let mut first_edge = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE);
            let mut nodes = self.iter_nodes(&branch, Some((key, Some(first_edge))));
            if let Some((k, v)) = nodes.next() {
                debug!("zombie? {:?} {:?}", k, v);
                if k == key
                    && (v.flag | EdgeFlags::FOLDER_EDGE == first_edge.flag | EdgeFlags::FOLDER_EDGE)
                {
                    // Does this vertex also have an alive edge
                    // pointing to it? (might not be the case for the
                    // first vertex)
                    if key == key0 {
                        first_edge.flag = EdgeFlags::PARENT_EDGE;
                        nodes = self.iter_nodes(&branch, Some((key, Some(first_edge))));
                        if let Some((_, v)) = nodes.next() {
                            // We know the key is `key`.
                            is_zombie = v.flag | EdgeFlags::FOLDER_EDGE
                                == first_edge.flag | EdgeFlags::FOLDER_EDGE
                        }
                    } else {
                        is_zombie = true
                    }
                }
            }
            debug!("is_zombie: {:?}", is_zombie);
            let mut l = Line {
                key: key.clone(),
                flags: if is_zombie {
                    Flags::LINE_HALF_DELETED
                } else {
                    Flags::empty()
                },
                children: graph.children.len(),
                n_children: 0,
                index: 0,
                lowlink: 0,
                scc: 0,
            };

            let mut last_flag = EdgeFlags::empty();
            let mut last_dest = ROOT_KEY;

            for (_, v) in self
                .iter_nodes(&branch, Some((key, None)))
                .take_while(|&(k, v)| {
                    k == key
                        && v.flag
                            <= EdgeFlags::PSEUDO_EDGE
                                | EdgeFlags::FOLDER_EDGE
                                | EdgeFlags::EPSILON_EDGE
                }) {
                })
            {
                debug!("-> v = {:?}", v);
                if last_flag == EdgeFlags::PSEUDO_EDGE && last_dest == v.dest {
                    // This is a doubled edge, it should be removed.
                /*} else if v.flag.contains(EdgeFlags::EPSILON_EDGE) {
                    /*} else if v.flag.contains(EdgeFlags::EPSILON_EDGE) {
                    // Epsilon lines are skipped.
                    for (_, v_) in self
                        .iter_nodes(&branch, Some((v.dest, None)))
                        .take_while(|&(k_, v_)| {
                            k_ == v.dest
                                && v_.flag
                                <= EdgeFlags::PSEUDO_EDGE
                                | EdgeFlags::FOLDER_EDGE
                                | EdgeFlags::EPSILON_EDGE
                        }) {
                            graph.children.push((Some(v_.clone()), DUMMY_VERTEX));
                            l.n_children += 1;
                            if !cache.contains_key(&v_.dest) {
                                stack.push(v_.dest.clone())
                            } else {
                                debug!("v already visited");
                            }
                            last_flag = v_.flag;
                            last_dest = v_.dest;
                        }
                     */
                } else {
                    graph.children.push((Some(v.clone()), DUMMY_VERTEX));
                    l.n_children += 1;
                    if !cache.contains_key(&v.dest) {
                        stack.push(v.dest.clone())
                    } else {
                        debug!("v already visited");
                    }
                    last_flag = v.flag;
                    last_dest = v.dest;
                }
            }
            // If this key has no children, give it the dummy child.
            if l.n_children == 0 {
                debug!("no children for {:?}", l);
                graph.children.push((None, DUMMY_VERTEX));
                l.n_children = 1;
            }
            graph.lines.push(l)
        }
        for &mut (ref child_key, ref mut child_idx) in graph.children.iter_mut() {
            if let Some(ref child_key) = *child_key {
                if let Some(idx) = cache.get(&child_key.dest) {
                    *child_idx = *idx
                }
            }
        }
        graph
    }
}

/// The conflict markers keep track of the number of conflicts, and is
/// used for outputting conflicts to a given LineBuffer.
///
/// "Zombie" conflicts are those conflicts introduced by zombie
/// vertices in the contained Graph.
struct ConflictMarkers {
    current_conflicts: usize,
}

impl ConflictMarkers {
    /*
    fn output_zombie_markers_if_needed<'a, A: Transaction + 'a, B: LineBuffer<'a, A>>(
        &mut self,
        buf: &mut B,
        vertex: VertexId,
    ) -> Result<()> {
        if self.graph[vertex].is_zombie() {
            if !self.current_is_zombie {
                debug!("begin zombie conflict: vertex = {:?}", self.graph[vertex]);
                self.current_is_zombie = true;
                buf.begin_zombie_conflict()?;
            }
        } else if self.current_is_zombie {
            // Zombie segment has ended
            if !self.current_is_zombie {
                debug!("end zombie conflict: vertex = {:?}", self.graph[vertex]);
            }
            self.current_is_zombie = false;
            buf.end_conflict()?;
        }
        Ok(())
    }
    */
    fn begin_conflict<'a, A: Transaction + 'a, B: LineBuffer<'a, A>>(
        &mut self,
        buf: &mut B,
    ) -> Result<()> {
        buf.begin_conflict()?;
        self.current_conflicts += 1;
        Ok(())
    }
    fn end_conflict<'a, A: Transaction + 'a, B: LineBuffer<'a, A>>(
        &mut self,
        buf: &mut B,
    ) -> Result<()> {
        if self.current_conflicts > 0 {
            buf.end_conflict()?;
            self.current_conflicts -= 1;
        }
        Ok(())
    }
}

impl PathElement {
    fn smallest_line(&self, graph: &Graph, sccs: &[Vec<VertexId>]) -> Key<PatchId> {
        match *self {
            PathElement::Scc { ref scc } => sccs[*scc].iter().map(|x| graph[*x].key).min().unwrap(),
            PathElement::Conflict { ref sides } => sides
                .iter()
                .map(|x|
                     x.path.iter().map(|x| x.smallest_line(graph, sccs))
                     .min().unwrap())
                .min()
                .unwrap(),
    }
    }
}

impl<'a, A: Transaction + 'a, R> GenericTxn<A, R> {
    fn output_conflict<B: LineBuffer<'a, A>>(
        &'a self,
        conflicts: &mut ConflictMarkers,
        buf: &mut B,
        graph: &Graph,
        sccs: &[Vec<VertexId>],
        conflict: &mut [Path],
    ) -> Result<usize> {
        let mut is_first = true;
        let n_sides = conflict.len();
        debug!(target:"libpijul::graph::output_conflict", "n_sides = {:?}", n_sides);
        let mut n_conflicts = 0;
        if n_sides > 1 {
            conflicts.begin_conflict(buf)?;
            n_conflicts += 1;
        }
        conflict.sort_by(|a, b| {
            let a = a.path.iter().map(|a| a.smallest_line(graph, sccs)).min().unwrap();
            let b = b.path.iter().map(|a| a.smallest_line(graph, sccs)).min().unwrap();
            a.cmp(&b)
        });
        for side in conflict {
            if !is_first {
                buf.conflict_next()?;
            }
            is_first = false;
            debug!(target:"libpijul::graph::output_conflict", "side = {:?}", side);
            for i in side.path.iter_mut() {
                match *i {
                    PathElement::Scc { scc } => {
                        debug!(target:"libpijul::graph::output_conflict", "output {:?}", scc);
                        self.output_scc(graph, &sccs[scc], buf)?
                    },
                    PathElement::Conflict { ref mut sides } => {
                        debug!(target:"libpijul::graph::output_conflict", "begin conflict {:?}", sides);
                        n_conflicts += self.output_conflict(conflicts, buf, graph, sccs, sides)?;
                        debug!(target:"libpijul::graph::output_conflict", "end conflict");
                    }
                }
            }
        }
        if n_sides > 1 {
            conflicts.end_conflict(buf)?;
        }
        Ok(n_conflicts)
    }

    /// Output the database contents of the file into the buffer
    /// `buf`. The return value indicates whether there are any
    /// conflicts in the file that was output. If forward edges are
    /// encountered, they are collected into `forward`.
    ///
    pub fn output_file<B: LineBuffer<'a, A>>(
        &'a self,
        branch: &Branch,
        buf: &mut B,
        graph: &mut Graph,
        forward: &mut Vec<(Key<PatchId>, Edge)>,
    ) -> Result<usize> {
        debug!("output_file");
        if graph.lines.len() <= 1 {
            return Ok(0);
        }
        let scc = graph.tarjan(); // SCCs are given here in reverse order.
        debug!("There are {} SCC", scc.len());
        debug!("SCCs = {:?}", scc);

        let mut dfs = DFS::new(scc.len());
        let conflict_tree = graph.dfs(self, branch, &scc, &mut dfs, forward);

        debug!("dfs done");
        buf.output_line(&graph.lines[1].key, Value::from_slice(b""))?;
        // let conflict_tree = conflict_tree(graph, &scc, &mut dfs);
        debug!("conflict_tree = {:?}", conflict_tree);
        let mut conflicts = ConflictMarkers {
            current_conflicts: 0,
        };
        let n_conflicts = self.output_conflict(&mut conflicts, buf, graph, &scc, &mut [conflict_tree])?;
        let n_conflicts =
            self.output_conflict(&mut conflicts, buf, graph, &scc, &mut [conflict_tree])?;
        // Close any remaining zombie part (if needed).
47
48
49
50
51
52
53

54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154



155
156
157
158
159
160
161
162
163
164
165
166

167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225

226
227
228
229



230
231
232
233
234
235


238



241
242

244
245
246
247

248
249
250
251
252
253
254




255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319





320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414



415
416
417

418
419
420
421
422
423
424
425
426

427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487

488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560

561
562
563
564
565
566
567
568
569
extern crate serde_derive;
extern crate diffs;
extern crate failure;
extern crate sequoia_openpgp;
extern crate serde_json;
extern crate tempdir;
extern crate toml;
extern crate diffs;

pub use sanakirja::Transaction;
use std::collections::HashSet;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;

#[derive(Debug)]
pub enum Error {
    IO(std::io::Error),
    Sanakirja(sanakirja::Error),
    Bincode(bincode::Error),
    Utf8(std::str::Utf8Error),
    Serde(serde_json::Error),
    OpenSSL(openssl::error::Error),
    OpenSSLStack(openssl::error::ErrorStack),
    Base58Decode(bs58::decode::DecodeError),
    Failure(failure::Error),
    AlreadyAdded,
    FileNotInRepo(PathBuf),
    NoDb(backend::Root),
    WrongHash,
    EOF,
    WrongPatchSignature,
    BranchNameAlreadyExists(String),
    WrongFileHeader(Key<PatchId>),
    FileNameCount(Key<PatchId>),
    MissingDependency(Hash),
    PatchNotOnBranch(PatchId),
    CannotAddDotPijul,
    KeyIsEncrypted,
}

impl std::convert::From<std::io::Error> for Error {
    fn from(e: std::io::Error) -> Self {
        Error::IO(e)
    }
}

impl std::convert::From<failure::Error> for Error {
    fn from(e: failure::Error) -> Self {
        Error::Failure(e)
    }
}

impl std::convert::From<sanakirja::Error> for Error {
    fn from(e: sanakirja::Error) -> Self {
        Error::Sanakirja(e)
    }
}

impl std::convert::From<bincode::Error> for Error {
    fn from(e: bincode::Error) -> Self {
        Error::Bincode(e)
    }
}

impl std::convert::From<serde_json::Error> for Error {
    fn from(e: serde_json::Error) -> Self {
        Error::Serde(e)
    }
}

impl std::convert::From<std::str::Utf8Error> for Error {
    fn from(e: std::str::Utf8Error) -> Self {
        Error::Utf8(e)
    }
}

impl std::convert::From<openssl::error::ErrorStack> for Error {
    fn from(e: openssl::error::ErrorStack) -> Self {
        Error::OpenSSLStack(e)
    }
}

impl std::convert::From<bs58::decode::DecodeError> for Error {
    fn from(e: bs58::decode::DecodeError) -> Self {
        Error::Base58Decode(e)
    }
}

pub type Result<A> = std::result::Result<A, Error>;

impl std::fmt::Display for Error {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
        match *self {
            Error::IO(ref e) => e.fmt(fmt),
            Error::Sanakirja(ref e) => e.fmt(fmt),
            Error::Bincode(ref e) => e.fmt(fmt),
            Error::Utf8(ref e) => e.fmt(fmt),
            Error::Serde(ref e) => e.fmt(fmt),
            Error::OpenSSL(ref e) => e.fmt(fmt),
            Error::OpenSSLStack(ref e) => e.fmt(fmt),
            Error::Base58Decode(ref e) => e.fmt(fmt),
            Error::Failure(ref e) => e.fmt(fmt),
            Error::AlreadyAdded => write!(fmt, "Already added"),
            Error::FileNotInRepo(ref file) => write!(fmt, "File {:?} not tracked", file),
            Error::NoDb(ref e) => write!(fmt, "Table missing: {:?}", e),
            Error::WrongHash => write!(fmt, "Wrong hash"),
            Error::EOF => write!(fmt, "EOF"),
            Error::WrongPatchSignature => write!(fmt, "Wrong patch signature"),
            Error::BranchNameAlreadyExists(ref name) => write!(fmt, "Branch {:?} already exists", name),
            Error::WrongFileHeader(ref h) => write!(fmt, "Wrong file header (possible branch corruption): {:?}", h),
            Error::FileNameCount(ref f) => write!(fmt, "Name {:?} doesn't have exactly one child", f),
            Error::BranchNameAlreadyExists(ref name) => {
                write!(fmt, "Branch {:?} already exists", name)
            }
            Error::WrongFileHeader(ref h) => write!(
                fmt,
                "Wrong file header (possible branch corruption): {:?}",
                h
            ),
            Error::FileNameCount(ref f) => {
                write!(fmt, "Name {:?} doesn't have exactly one child", f)
            }
            Error::MissingDependency(ref f) => write!(fmt, "Missing dependency: {:?}", f),
            Error::PatchNotOnBranch(ref f) => write!(fmt, "The patch is not on this branch {:?}", f),
            Error::PatchNotOnBranch(ref f) => {
                write!(fmt, "The patch is not on this branch {:?}", f)
            }
            Error::CannotAddDotPijul => write!(fmt, "Cannot add a file or directory name .pijul"),
            Error::KeyIsEncrypted => write!(fmt, "Key is encrypted"),
        }
    }
}

impl std::error::Error for Error {
    fn description(&self) -> &str {
        match *self {
            Error::IO(ref e) => e.description(),
            Error::Sanakirja(ref e) => e.description(),
            Error::Bincode(ref e) => e.description(),
            Error::Utf8(ref e) => e.description(),
            Error::Serde(ref e) => e.description(),
            Error::OpenSSL(ref e) => e.description(),
            Error::OpenSSLStack(ref e) => e.description(),
            Error::Base58Decode(ref e) => e.description(),
            Error::Failure(ref e) => e.name().unwrap_or("Unknown Failure"),
            Error::AlreadyAdded => "Already added",
            Error::FileNotInRepo(_) => "File not tracked",
            Error::NoDb(_) => "One of the tables is missing",
            Error::WrongHash => "Wrong hash",
            Error::EOF => "EOF",
            Error::WrongPatchSignature => "Wrong patch signature",
            Error::BranchNameAlreadyExists(_) => "Branch name already exists",
            Error::WrongFileHeader(_) => "Wrong file header (possible branch corruption)",
            Error::FileNameCount(_) => "A file name doesn't have exactly one child",
            Error::MissingDependency(_) => "Missing dependency",
            Error::PatchNotOnBranch(_) => "The patch is not on this branch",
            Error::CannotAddDotPijul => "Cannot add a file or directory name .pijul",
            Error::KeyIsEncrypted => "Key is encrypted",
        }
    }
}

impl Error {
    pub fn lacks_space(&self) -> bool {
        match *self {
            Error::Sanakirja(sanakirja::Error::NotEnoughSpace) => true,
            _ => false,
        }
    }
}

#[macro_use]
mod backend;
mod file_operations;
pub mod fs_representation;

pub mod patch;
pub mod status;

pub mod apply;
mod conflict;
mod diff;
pub mod graph;
mod diff;
mod output;
mod record;
mod unrecord;

pub use backend::{ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus,
                  GenericTxn, Hash, HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId,
                  Repository, SmallStr, SmallString, Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY};
pub use backend::{
    ApplyTimestamp, Branch, Edge, EdgeFlags, FileId, FileMetadata, FileStatus, GenericTxn, Hash,
    HashRef, Inode, Key, LineId, MutTxn, OwnedFileId, PatchId, Repository, SmallStr, SmallString,
    Txn, DEFAULT_BRANCH, ROOT_INODE, ROOT_KEY,
};


>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use fs_representation::{RepoRoot, ID_LENGTH};
pub use output::{Prefixes, ToPrefixes, ConflictingFile};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
pub use fs_representation::{RepoRoot, RepoPath};
use fs_representation::{ID_LENGTH};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
pub use output::{ConflictingFile, Prefixes, ToPrefixes};
pub use patch::{Patch, PatchHeader};
use rand::distributions::Alphanumeric;
use rand::Rng;
use rand::distributions::Alphanumeric;
pub use record::{InodeUpdate, RecordState};
pub use sanakirja::value::Value;
use std::io::Read;

pub use diff::Algorithm as DiffAlgorithm;

impl<'env, T: rand::Rng> backend::MutTxn<'env, T> {
    pub fn output_changes_file<P: AsRef<Path>>(&mut self, branch: &Branch, fs_repo: &RepoRoot<P>) -> Result<()> {
        let changes_file =
        let changes_file =
            fs_repo.branch_changes_file(branch.name.as_str());
    pub fn output_changes_file<P: AsRef<Path>>(
        &mut self,
        branch: &Branch,
        fs_repo: &RepoRoot<P>,
    ) -> Result<()> {
        let changes_file = fs_repo.branch_changes_file(branch.name.as_str());
        let mut branch_id: Vec<u8> = vec![b'\n'; ID_LENGTH + 1];
        {
            if let Ok(mut file) = std::fs::File::open(&changes_file) {
                file.read_exact(&mut branch_id)?;
            }
        }
        let mut branch_id = if let Ok(s) = String::from_utf8(branch_id) {
            s
        } else {
            "\n".to_string()
        };
        if branch_id.as_bytes()[0] == b'\n' {
            branch_id.truncate(0);
            let mut rng = rand::thread_rng();
            branch_id.extend(rng.sample_iter(&Alphanumeric).take(ID_LENGTH));
            branch_id.push('\n');
        }

        let mut file = std::fs::File::create(&changes_file)?;
        file.write_all(&branch_id.as_bytes())?;
        for (s, hash) in self.iter_applied(&branch, None) {
            let hash_ext = self.get_external(hash).unwrap();
            writeln!(file, "{}:{}", hash_ext.to_base58(), s)?
        }
        Ok(())
    }

    pub fn branch_patches(&mut self, branch: &Branch) -> HashSet<(backend::Hash, ApplyTimestamp)> {
        self.iter_patches(branch, None)
            .map(|(patch, time)| (self.external_hash(patch).to_owned(), time))
            .collect()
    }

    pub fn fork(&mut self, branch: &Branch, new_name: &str) -> Result<Branch> {
        if branch.name.as_str() == new_name {
            Err(Error::BranchNameAlreadyExists(new_name.to_string()))
        } else {
            Ok(Branch {
                db: self.txn.fork(&mut self.rng, &branch.db)?,
                patches: self.txn.fork(&mut self.rng, &branch.patches)?,
                revpatches: self.txn.fork(&mut self.rng, &branch.revpatches)?,
                name: SmallString::from_str(new_name),
                apply_counter: branch.apply_counter,
            })
        }
    }
    
    pub fn add_file<P: AsRef<Path>>(&mut self, path: &RepoPath<P>, is_dir: bool) -> Result<()> {
        self.add_inode(None, path, is_dir)
    }

    fn file_nodes_fold_<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        root: Key<PatchId>,
        level: usize,
        mut init: A,
        f: &mut F,
    ) -> Result<A> {
        for v in self.iter_adjacent(&branch, root, EdgeFlags::empty(), EdgeFlags::all())
            .take_while(|v| { v.flag.contains(EdgeFlags::FOLDER_EDGE)
                              && !v.flag.contains(EdgeFlags::PARENT_EDGE)
            }) {
            }) {
        for v in self
            .iter_adjacent(&branch, root, EdgeFlags::empty(), EdgeFlags::all())
            .take_while(|v| {
                v.flag.contains(EdgeFlags::FOLDER_EDGE) && !v.flag.contains(EdgeFlags::PARENT_EDGE)
            })
        {
            debug!("file_nodes_fold_: {:?} {:?}", root, v);
            if level & 1 == 0 && level > 0 {
                init = f(init, root)
            }
            init = self.file_nodes_fold_(branch, v.dest, level + 1, init, f)?
        }
        Ok(init)
    }

    pub fn file_nodes_fold<A, F: FnMut(A, Key<PatchId>) -> A>(
        &self,
        branch: &Branch,
        init: A,
        mut f: F,
    ) -> Result<A> {
        self.file_nodes_fold_(branch, ROOT_KEY, 0, init, &mut f)
    }
}

impl<T: Transaction, R> backend::GenericTxn<T, R> {
    /// Tells whether a `key` is alive in `branch`, i.e. is either the
    /// root, or all its ingoing edges are alive.
    pub fn is_alive(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive {:?}?", key);
        let mut alive = key.is_root();
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            alive = alive
                || (!v.flag.contains(EdgeFlags::DELETED_EDGE)
                    && !v.flag.contains(EdgeFlags::PSEUDO_EDGE))
        }
        alive
    }

    /// Tells whether a `key` is alive or zombie in `branch`, i.e. is
    /// either the root, or has at least one of its incoming alive
    /// edge is alive.
    pub fn is_alive_or_zombie(&self, branch: &Branch, key: Key<PatchId>) -> bool {
        debug!("is_alive_or_zombie {:?}?", key);
        if key == ROOT_KEY {
            return true;
        }
        let e = Edge::zero(EdgeFlags::PARENT_EDGE);
        for (k, v) in self.iter_nodes(&branch, Some((key, Some(e)))) {
            if k != key {
                break;
            }
            debug!("{:?}", v);
            if v.flag.contains(EdgeFlags::PARENT_EDGE) && !v.flag.contains(EdgeFlags::DELETED_EDGE)
            {
                return true;
            }
        }
        false
    }

    /// Test whether `key` has a neighbor with flag `flag0`. If
    /// `include_pseudo`, this includes pseudo-neighbors.
    pub fn has_edge(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        min: EdgeFlags,
        max: EdgeFlags,
    ) -> bool {
        let e = Edge::zero(min);
        if let Some((k, v)) = self.iter_nodes(&branch, Some((key, Some(e)))).next() {
            debug!("has_edge {:?}", v.flag);
            k == key && (v.flag <= max)
        } else {
            false
        }
    }

    /// Tells which paths (of folder nodes) a key is in.
    pub fn get_file<'a>(&'a self, branch: &Branch, key: Key<PatchId>) -> Vec<Key<PatchId>> {
        let mut stack = vec![key.to_owned()];
        let mut seen = HashSet::new();
        let mut names = Vec::new();
        loop {
            match stack.pop() {
                None => break,
                Some(key) if !seen.contains(&key) => {
                    debug!("key {:?}, None", key);
                    seen.insert(key.clone());

                    for v in self.iter_adjacent(branch, key, EdgeFlags::empty(), EdgeFlags::all())
                    {
                    {
                    for v in self.iter_adjacent(branch, key, EdgeFlags::empty(), EdgeFlags::all()) {
                        debug!("all_edges: {:?}", v);
                    }
                    for v in self.iter_adjacent(branch, key, EdgeFlags::PARENT_EDGE, EdgeFlags::all())
                    for v in
                        self.iter_adjacent(branch, key, EdgeFlags::PARENT_EDGE, EdgeFlags::all())
                    {
                        debug!("get_file {:?}", v);
                        if v.flag | EdgeFlags::PSEUDO_EDGE
                            == EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE
                        {
                            debug!("push!");
                            stack.push(v.dest.clone())
                        } else if v.flag
                        } else if v
                            .flag
                            .contains(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE)
                        {
                            names.push(key);
                        }
                    }
                }
                _ => {}
            }
        }
        debug!("get_file returning {:?}", names);
        names
    }

    pub fn get_file_names<'a>(
        &'a self,
        branch: &Branch,
        key: Key<PatchId>,
    ) -> Vec<(Key<PatchId>, Vec<&'a str>)> {
        let mut names = vec![(key, Vec::new())];
        debug!("inode: {:?}", names);
        // Go back to the root.
        let mut next_names = Vec::new();
        let mut only_roots = false;
        let mut inodes = HashSet::new();
        while !only_roots {
            next_names.clear();
            only_roots = true;
            for (inode, names) in names.drain(..) {
                if !inodes.contains(&inode) {
                    inodes.insert(inode.clone());

                    if inode != ROOT_KEY {
                        only_roots = false;
                    }
                    let names_ = self.file_names(branch, inode);
                    if names_.is_empty() {
                        next_names.push((inode, names));
                        break;
                    } else {
                        debug!("names_ = {:?}", names_);
                        for (inode_, _, base) in names_ {
                            let mut names = names.clone();
                            names.push(base);
                            next_names.push((inode_, names))
                        }
                    }
                }
            }
            std::mem::swap(&mut names, &mut next_names)
        }
        debug!("end: {:?}", names);
        for &mut (_, ref mut name) in names.iter_mut() {
            name.reverse()
        }
        names
    }
}

fn make_remote<'a, I: Iterator<Item = &'a Hash>>(
    target: & fs_representation::RepoRoot<impl AsRef<Path>>,
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    remote: I,
) -> Result<(Vec<(Hash, Patch)>, usize)> {
    use fs_representation::*;
    use std::fs::File;
    use std::io::BufReader;
    let mut patches = Vec::new();
    let mut patches_dir = target.patches_dir();
    let mut size_increase = 0;

    for h in remote {
        patches_dir.push(&patch_file_name(h.as_ref()));

        debug!("opening {:?}", patches_dir);
        let file = try!(File::open(&patches_dir));
        let mut file = BufReader::new(file);
        let (h, _, patch) = Patch::from_reader_compressed(&mut file)?;

        size_increase += patch.size_upper_bound();
        patches.push((h.clone(), patch));

        patches_dir.pop();
    }
    Ok((patches, size_increase))
}

/// Apply a number of patches, guessing the new repository size.  If
/// this fails, the repository size is guaranteed to have been
/// increased by at least some pages, and it is safe to call this
/// function again.
///
/// Also, this function takes a file lock on the repository.
pub fn apply_resize<'a, I, F, P: output::ToPrefixes>(
    diff_algorithm: diff::Algorithm,
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    remote: I,
    partial_paths: P,
    apply_cb: F,
) -> Result<Vec<ConflictingFile>>
where
    I: Iterator<Item = &'a Hash>,
    F: FnMut(usize, &Hash),
{
    let (patches, size_increase) = make_remote(target, remote)?;
    apply_resize_patches(
        diff_algorithm,
        target,
        branch_name,
        &patches,
        size_increase,
        partial_paths,
        apply_cb,
    )
}

/// A version of `apply_resize` with the patches list already loaded.
pub fn apply_resize_patches<'a, F, P: output::ToPrefixes>(
    diff_algorithm: diff::Algorithm,
    target: &fs_representation::RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    patches: &[(Hash, Patch)],
    size_increase: usize,
    partial_paths: P,
    apply_cb: F,
) -> Result<Vec<ConflictingFile>>
where
    F: FnMut(usize, &Hash),
{
    info!("applying patches with size_increase {:?}", size_increase);
    let repo = target.open_repo(Some(size_increase as u64))?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let mut branch = txn.open_branch(branch_name)?;
    let conflicts = txn.apply_patches(diff_algorithm, &mut branch, target, &patches, partial_paths, apply_cb)?;
    let conflicts = txn.apply_patches(
        diff_algorithm,
        &mut branch,
        target,
        &patches,
        partial_paths,
        apply_cb,
    )?;
    txn.commit_branch(branch)?;
1
2
3
4
5




6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119


120
121
122


123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223


225
226
227


230


233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406

407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442

443
444
445
446
447
448
449
450
451

452
453
454
455
456
457
458
459
460
461

462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516

517
518
519
use backend::*;
use graph;
use patch::*;
use rand;
use record::InodeUpdate;
use {Error, Result};
use graph;
use rand;
use rand;
use std;
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
use tempdir;
use {Error, Result};

use super::fs_representation::{RepoRoot, RepoPath, in_repo_root};

#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;

#[cfg(not(windows))]
fn set_permissions(name: &Path, permissions: u16) -> Result<()> {
    let metadata = std::fs::metadata(&name)?;
    let mut current = metadata.permissions();
    debug!(
        "setting mode for {:?} to {:?} (currently {:?})",
        name, permissions, current
    );
    current.set_mode(permissions as u32);
    std::fs::set_permissions(name, current)?;
    Ok(())
}

#[cfg(windows)]
fn set_permissions(_name: &Path, _permissions: u16) -> Result<()> {
    Ok(())
}

#[derive(Debug)]
struct OutputItem {
    parent: Inode,
    meta: FileMetadata,
    key: Key<PatchId>,
    inode: Option<Inode>,
    is_zombie: bool,
    related: Related,
}

#[derive(Debug, PartialEq, Eq)]
pub enum Related {
    No,
    Ancestor,
    Exact,
}

pub struct ConflictingFile {
    pub inode: Inode,
    pub n_conflicts: usize,
    pub path: RepoPath<PathBuf>,
}

fn is_related(prefixes: &Prefixes, key: Key<PatchId>) -> Related {
    if prefixes.0.is_empty() {
        return Related::Exact;
    }
    for pref in prefixes.0.iter() {
        let mut is_first = true;
        for &p in pref {
            if p == key {
                if is_first {
                    return Related::Exact;
                } else {
                    return Related::Ancestor;
                }
            }
            is_first = false
        }
    }
    Related::No
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    // Climb up the tree (using revtree).
    fn filename_of_inode(&self, inode: Inode, working_copy: &Path) -> Option<PathBuf> {
        let mut components = Vec::new();
        let mut current = inode;
        loop {
            match self.get_revtree(current) {
                Some(v) => {
                    components.push(v.basename.to_owned());
                    current = v.parent_inode.clone();
                    if current == ROOT_INODE {
                        break;
                    }
                }
                None => {
                    debug!("filename_of_inode: not in tree");
                    return None;
                }
            }
        }
        let mut working_copy = working_copy.to_path_buf();
        for c in components.iter().rev() {
            working_copy.push(c.as_small_str().as_str());
        }
        Some(working_copy)
    }

    /// Collect all the children of key `key` into `files`.
    fn collect_children(
        &mut self,
        branch: &Branch,
        path: RepoPath<&Path>,
        key: Key<PatchId>,
        inode: Inode,
        base_path: &RepoPath<impl AsRef<Path> + std::fmt::Debug>,
        prefixes: &Prefixes,
        files: &mut HashMap<RepoPath<PathBuf>, HashMap<Key<PatchId>, OutputItem>>,
    ) -> Result<()> {
        debug!("collect_children {:?}", base_path);
        let f = EdgeFlags::FOLDER_EDGE | EdgeFlags::PSEUDO_EDGE | EdgeFlags::EPSILON_EDGE;
        for b in self.iter_adjacent(&branch, key, EdgeFlags::empty(), f)
        {
        for b in self.iter_adjacent(&branch, key, EdgeFlags::empty(), f) {
            debug!("b={:?}", b);
            let cont_b = self.get_contents(b.dest).unwrap();
            let (_, b_key) = self.iter_nodes(&branch,
                                             Some((b.dest, Some(Edge::zero(EdgeFlags::FOLDER_EDGE)))))
            let (_, b_key) = self
                .iter_nodes(
                    &branch,
                    Some((b.dest, Some(Edge::zero(EdgeFlags::FOLDER_EDGE)))),
                )
                .next()
                .unwrap();
            let b_inode = self.get_revinodes(b_key.dest);

            // This is supposed to be a small string, so we can do
            // as_slice.
            if cont_b.as_slice().len() < 2 {
                error!("cont_b {:?} b.dest {:?}", cont_b, b.dest);
                return Err(Error::WrongFileHeader(b.dest));
            }
            let (perms, basename) = cont_b.as_slice().split_at(2);

            let perms = FileMetadata::from_contents(perms);
            let basename = std::str::from_utf8(basename).unwrap();
            debug!("filename: {:?} {:?}", perms, basename);
            let name = path.join(Path::new(basename));
            let related = is_related(&prefixes, b_key.dest);
            debug!("related {:?} = {:?}", base_path, related);
            if related != Related::No {
                let v = files.entry(name).or_insert(HashMap::new());
                if v.get(&b.dest).is_none() {
                    let is_zombie = {
                        let f = EdgeFlags::FOLDER_EDGE | EdgeFlags::PARENT_EDGE | EdgeFlags::DELETED_EDGE;
                        let f = EdgeFlags::FOLDER_EDGE
                            | EdgeFlags::PARENT_EDGE
                            | EdgeFlags::DELETED_EDGE;
                        self.iter_adjacent(&branch, b_key.dest, f, f)
                            .next()
                            .is_some()
                    };
                    debug!("is_zombie = {:?}", is_zombie);
                    v.insert(
                        b.dest,
                        OutputItem {
                            parent: inode,
                            meta: perms,
                            key: b_key.dest,
                            inode: b_inode,
                            is_zombie,
                            related,
                        },
                    );
                }
            }
        }
        Ok(())
    }

    /// Collect names of files with conflicts
    ///
    /// As conflicts have an internal representation, it can be determined
    /// exactly which files contain conflicts.
    pub fn list_conflict_files(
        &mut self,
        branch_name: &str,
        prefixes: &[RepoPath<&Path>],
    ) -> Result<Vec<RepoPath<PathBuf>>> {
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let branch = self.open_branch(branch_name)?;
        let mut base_path = in_repo_root();
        let prefixes = prefixes.to_prefixes(self, &branch);
        self.collect_children(
            &branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            &prefixes,
            &mut files,
        )?;

        let mut ret = vec![];
        let mut forward = Vec::new();
        while !files.is_empty() {
            next_files.clear();
            for (a, b) in files.drain() {
                for (_, output_item) in b {
                    // (_, meta, inode_key, inode, is_zombie)
                    // Only bother with existing files
                    if let Some(inode) = output_item.inode {
                        if output_item.is_zombie {
                            ret.push(a.clone())
                        }
                        if output_item.meta.is_dir() {
                            self.collect_children(
                                &branch,
                                a.as_ref(),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &prefixes,
                                &mut next_files,
                            )?;
                        } else {
                            let mut graph = self.retrieve(&branch, output_item.key);
                            let mut buf = graph::Writer::new(std::io::sink());
                            let n_conflicts = self.output_file(&branch, &mut buf, &mut graph, &mut forward)?;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            let n_conflicts =
                                self.output_file(&branch, &mut buf, &mut graph, &mut forward)?;
                            if n_conflicts > 0 {

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            if self.output_file(&branch, &mut buf, &mut graph, &mut forward)? {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                                ret.push(a.clone())
                            }
                        }
                    }
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(ret)
    }

    fn make_conflicting_name(&self, name: &mut RepoPath<PathBuf>, name_key: Key<PatchId>) {
        let basename = {
            let basename = name.file_name().unwrap().to_string_lossy();
            format!("{}.{}", basename, &name_key.patch.to_base58())
        };
        name.set_file_name(std::ffi::OsStr::new(&basename));
    }

    fn output_alive_files(
        &mut self,
        branch: &mut Branch,
        prefixes: &Prefixes,
        working_copy: &Path,
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!("working copy {:?}", working_copy);
        let mut files = HashMap::new();
        let mut next_files = HashMap::new();
        let mut base_path = RepoPath(PathBuf::new());
        self.collect_children(
            branch,
            in_repo_root(),
            ROOT_KEY,
            ROOT_INODE,
            &mut base_path,
            prefixes,
            &mut files,
        )?;

        let mut done = HashSet::new();
        while !files.is_empty() {
            debug!("files {:?}", files);
            next_files.clear();
            for (a, b) in files.drain() {
                let b_len = b.len();
                for (name_key, output_item) in b {
                    // (parent_inode, meta, inode_key, inode, is_zombie)
                    /*let has_several_names = {
                        let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
                        let mut it = self.iter_nodes(branch, Some((inode_key, Some(&e))))
                            .take_while(|&(k, v)| {
                                k == inode_key && v.flag|EdgeFlags::PSEUDO_EDGE == e.flag|EdgeFlags::PSEUDO_EDGE
                            });
                        it.next();
                        it.next().is_some()
                    };*/
                    if !done.insert(output_item.key) {
                        debug!("already done {:?}", output_item.key);
                        continue;
                    }

                    let name = if b_len > 1
                    /*|| has_several_names*/
                    {
                        // debug!("b_len = {:?}, has_several_names {:?}", b_len, has_several_names);
                        let mut name = a.clone();
                        self.make_conflicting_name(&mut name, name_key);
                        Cow::Owned(name.0)
                    } else {
                        Cow::Borrowed(a.as_path())
                    };
                    let file_name = name.file_name().unwrap().to_string_lossy();
                    base_path.push(&file_name);
                    let file_id = OwnedFileId {
                        parent_inode: output_item.parent,
                        basename: SmallString::from_str(&file_name),
                    };
                    let working_copy_name = working_copy.join(name.as_ref());

                    let status = if output_item.is_zombie {
                        FileStatus::Zombie
                    } else {
                        FileStatus::Ok
                    };

                    let inode = if let Some(inode) = output_item.inode {
                        // If the file already exists, find its
                        // current name and rename it if that name
                        // is different.
                        if let Some(ref current_name) = self.filename_of_inode(inode, "".as_ref()) {
                            if current_name != name.as_ref() {
                                let current_name = working_copy.join(current_name);
                                debug!("renaming {:?} to {:?}", current_name, working_copy_name);
                                let parent = self.get_revtree(inode).unwrap().to_owned();
                                self.del_revtree(inode, None)?;
                                self.del_tree(&parent.as_file_id(), None)?;

                                debug!("file_id: {:?}", file_id);
                                if let Some(p) = working_copy_name.parent() {
                                    std::fs::create_dir_all(p)?
                                }
                                if let Err(e) = std::fs::rename(&current_name, &working_copy_name) {
                                    error!(
                                        "while renaming {:?} to {:?}: {:?}",
                                        current_name, working_copy_name, e
                                    )
                                }
                            }
                        }
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        // If the file had been marked for deletion, remove that mark.
                        if let Some(header) = self.get_inodes(inode) {
                            debug!("header {:?}", header);
                            let mut header = header.to_owned();
                            header.status = status;
                            self.replace_inodes(inode, header)?;
                        } else {
                            let header = FileHeader {
                                key: output_item.key,
                                metadata: output_item.meta,
                                status,
                            };
                            debug!("no header {:?}", header);
                            self.replace_inodes(inode, header)?;
                            self.replace_revinodes(output_item.key, inode)?;
                        }
                        inode
                    } else {
                        // Else, create new inode.
                        let inode = self.create_new_inode();
                        let file_header = FileHeader {
                            key: output_item.key,
                            metadata: output_item.meta,
                            status,
                        };
                        self.replace_inodes(inode, file_header)?;
                        self.replace_revinodes(output_item.key, inode)?;
                        debug!("file_id: {:?}", file_id);
                        self.put_tree(&file_id.as_file_id(), inode)?;
                        self.put_revtree(inode, &file_id.as_file_id())?;
                        inode
                    };
                    if output_item.meta.is_dir() {
                        // This is a directory, register it in inodes/trees.
                        std::fs::create_dir_all(&working_copy_name)?;
                        if let Related::Exact = output_item.related {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                &Prefixes(Vec::new()),
                                &mut next_files,
                            )?
                        } else {
                            self.collect_children(
                                branch,
                                RepoPath(name.as_ref()),
                                output_item.key,
                                inode,
                                &mut base_path,
                                prefixes,
                                &mut next_files,
                            )?
                        }
                    } else {
                        // Output file.
                        info!(
                            "creating file {:?}, key {:?} {:?}",
                            &name, output_item.key, working_copy_name
                        );
                        let mut f = graph::Writer::new(std::fs::File::create(&working_copy_name).unwrap());
                        let mut f =
                            graph::Writer::new(std::fs::File::create(&working_copy_name).unwrap());
                        debug!("done");
                        let mut l = self.retrieve(branch, output_item.key);
                        if log_enabled!(log::Level::Debug) {
                            let mut w = working_copy_name.clone();
                            w.set_extension("pijul_debug");
                            let f = std::fs::File::create(&w)?;
                            l.debug(self, branch, false, false, f)?;
                        }
                        let mut forward = Vec::new();
                        let n_conflicts = self.output_file(branch, &mut f, &mut l, &mut forward)?;
                        if n_conflicts > 0 {
                            conflicts.push(ConflictingFile {
                                inode,
                                n_conflicts,
                                path: RepoPath(name.to_path_buf()),
                            })
                        }
                        self.remove_redundant_edges(branch, &forward)?
                    }
                    base_path.pop();
                    set_permissions(&working_copy_name, output_item.meta.permissions())?
                }
            }
            std::mem::swap(&mut files, &mut next_files);
        }
        Ok(())
    }

    fn output_repository_assuming_no_pending_patch(
        &mut self,
        prefixes: &Prefixes,
        branch: &mut Branch,
        working_copy: &RepoRoot<impl AsRef<Path>>,
        pending_patch_id: PatchId,
        conflicts: &mut Vec<ConflictingFile>
        conflicts: &mut Vec<ConflictingFile>,
    ) -> Result<()> {
        debug!(
            "inodes: {:?}",
            self.iter_inodes(None)
                .map(|(u, v)| (u.to_owned(), v.to_owned()))
                .collect::<Vec<_>>()
        );
        // Now, garbage collect dead inodes.
        let dead: Vec<_> = self.iter_tree(None)
        let dead: Vec<_> = self
            .iter_tree(None)
            .filter_map(|(k, v)| {
                debug!("{:?} {:?}", k, v);
                if let Some(key) = self.get_inodes(v) {
                    if key.key.patch == pending_patch_id || self.is_alive_or_zombie(branch, key.key)
                    {
                        // Don't delete.
                        None
                    } else {
                        Some((k.to_owned(), v, self.filename_of_inode(v, working_copy.repo_root.as_ref())))
                        Some((
                            k.to_owned(),
                            v,
                            self.filename_of_inode(v, working_copy.repo_root.as_ref()),
                        ))
                    }
                } else {
                    debug!("not in inodes");
                    Some((k.to_owned(), v, None))
                }
            })
            .collect();
        debug!("dead: {:?}", dead);

        // Now, "kill the deads"
        for (ref parent, inode, ref name) in dead {
            self.remove_inode_rec(inode)?;
            debug!("removed");
            if let Some(ref name) = *name {
                debug!("deleting {:?}", name);
                if let Ok(meta) = fs::metadata(name) {
                    if let Err(e) = if meta.is_dir() {
                        fs::remove_dir_all(name)
                    } else {
                        fs::remove_file(name)
                    } {
                        error!("while deleting {:?}: {:?}", name, e);
                    }
                }
            } else {
                self.del_tree(&parent.as_file_id(), Some(inode))?;
                self.del_revtree(inode, Some(&parent.as_file_id()))?;
            }
        }
        debug!("done deleting dead files");
        // Then output alive files. This has to be done *after*
        // removing files, because we a file removed might have the
        // same name as a file added without there being a conflict
        // (depending on the relation between the two patches).
        self.output_alive_files(branch, prefixes, working_copy.repo_root.as_ref(), conflicts)?;
        debug!("done raw_output_repository");
        Ok(())
    }

    fn remove_inode_rec(&mut self, inode: Inode) -> Result<()> {
        // Remove the inode from inodes/revinodes.
        let mut to_kill = vec![inode];
        while let Some(inode) = to_kill.pop() {
            debug!("kill dead {:?}", inode.to_hex());
            let header = self.get_inodes(inode).map(|x| x.to_owned());
            if let Some(header) = header {
                self.del_inodes(inode, None)?;
                self.del_revinodes(header.key, None)?;
                let mut kills = Vec::new();
                // Remove the inode from tree/revtree.
                for (k, v) in self.iter_revtree(Some((inode, None)))
                for (k, v) in self
                    .iter_revtree(Some((inode, None)))
                    .take_while(|&(k, _)| k == inode)



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241



242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257

258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279



280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303

304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361

362
363
364
365
366
367
368
369
370
371
372

373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596






597
598
599
600
601
602
603
604
605
606

607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725

726
727
728
                Record::FileDel { del: c, .. } | Record::FileAdd { add: c, .. } => return Some(c),
                Record::FileDel { del: ref c, .. } | Record::FileAdd { add: ref c, .. } => {
                    return Some(c);
//! Definition of patches, and a number of methods.
use chrono;
use chrono::{DateTime, Utc};
use flate2;
use std::collections::HashSet;
use std::fs::{File, OpenOptions};
use std::io::{BufRead, Read, Write};
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::str::from_utf8;
pub type Flag = u8;
use bincode::{deserialize, deserialize_from, serialize};
use sequoia_openpgp::constants::DataFormat;
use sequoia_openpgp::crypto;
use sequoia_openpgp::serialize::stream::{LiteralWriter, Message, Signer};

use {Error, Result};
use super::fs_representation::RepoPath;

mod pretty;

bitflags! {
    #[derive(Serialize, Deserialize)]
    pub struct PatchFlags: u32 {
        const TAG = 1;
    }
}

/// A patch without its signature suffix.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct UnsignedPatch {
    /// Header part, containing the metadata.
    pub header: PatchHeader,
    /// The dependencies of this patch.
    pub dependencies: HashSet<Hash>,
    /// The actual contents of the patch.
    pub changes: Vec<Change<ChangeContext<Hash>>>,
}

/// The definition of a patch.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum Patch {
    Unsigned0,
    Signed0,
    Unsigned(UnsignedPatch),
}

impl Patch {
    /// The contents of this patch.
    pub fn changes(&self) -> &[Change<ChangeContext<Hash>>] {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref patch) => &patch.changes,
        }
    }

    pub fn changes_mut(&mut self) -> &mut Vec<Change<ChangeContext<Hash>>> {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref mut patch) => &mut patch.changes,
        }
    }
    /// The dependencies of this patch.
    pub fn dependencies(&self) -> &HashSet<Hash> {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref patch) => &patch.dependencies,
        }
    }
    pub fn dependencies_mut(&mut self) -> &mut HashSet<Hash> {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref mut patch) => &mut patch.dependencies,
        }
    }
    /// The header of this patch.
    pub fn header(&self) -> &PatchHeader {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref patch) => &patch.header,
        }
    }
    pub fn header_mut(&mut self) -> &mut PatchHeader {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref mut patch) => &mut patch.header,
        }
    }

    /// Reads everything in this patch, but the actual contents.
    pub fn read_dependencies<R: Read>(mut r: R) -> Result<Vec<Hash>> {
        let version: u32 = deserialize_from(&mut r)?;
        assert_eq!(version, 2);
        let _header: PatchHeader = deserialize_from(&mut r)?;
        debug!("version: {:?}", version);
        Ok(deserialize_from(&mut r)?)
    }
}

/// The header of a patch contains all the metadata about a patch
/// (but not the actual contents of a patch).
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct PatchHeader {
    pub authors: Vec<String>,
    pub name: String,
    pub description: Option<String>,
    pub timestamp: DateTime<Utc>,
    pub flag: PatchFlags,
}

use std::ops::{Deref, DerefMut};
impl Deref for Patch {
    type Target = PatchHeader;
    fn deref(&self) -> &Self::Target {
        self.header()
    }
}
impl DerefMut for Patch {
    fn deref_mut(&mut self) -> &mut Self::Target {
        self.header_mut()
    }
}

/// Options are for when this edge is between vertices introduced by
/// the current patch.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NewEdge {
    pub from: Key<Option<Hash>>,
    pub to: Key<Option<Hash>>,
    pub introduced_by: Option<Hash>,
}

pub type ChangeContext<H> = Vec<Key<Option<H>>>;

#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Change<Context> {
    NewNodes {
        up_context: Context,
        down_context: Context,
        flag: EdgeFlags,
        line_num: LineId,
        nodes: Vec<Vec<u8>>,
        inode: Key<Option<Hash>>,
    },
    NewEdges {
        previous: EdgeFlags,
        flag: EdgeFlags,
        edges: Vec<NewEdge>,
        inode: Key<Option<Hash>>,
    },
}

impl PatchHeader {
    /// Reads everything in this patch, but the actual contents.
    pub fn from_reader_nochanges<R: Read>(mut r: R) -> Result<PatchHeader> {
        let version: u32 = deserialize_from(&mut r)?;
        debug!("version: {:?}", version);
        Ok(deserialize_from(&mut r)?)
    }
}

/// A record is a group of changes pertaining to a file or region of a
/// file.  They are going to be offered together, since displaying one
/// of the changes may be affected by the other (eg, if a file is
/// being moved, we need to display the correct name when asking about
/// modifications within that file).
#[derive(Debug)]
pub enum Record<Context> {
    FileMove {
        new_name: RepoPath<PathBuf>,
        del: Change<Context>,
        add: Change<Context>,
    },
    FileDel {
        name: RepoPath<PathBuf>,
        del: Change<Context>,
        contents: Option<Change<Context>>,
    },
    FileAdd {
        name: RepoPath<PathBuf>,
        add: Change<Context>,
        contents: Option<Change<Context>>,
    },
    Change {
        file: Rc<RepoPath<PathBuf>>,
        change: Change<Context>,
        replacement: Option<Change<Context>>,
        old_line: usize,
        new_line: usize,
    },
}

pub struct RecordIter<R, C> {
    rec: Option<R>,
    extra: Option<C>,
}

impl<Context> IntoIterator for Record<Context> {
    type IntoIter = RecordIter<Record<Context>, Change<Context>>;
    type Item = Change<Context>;
    fn into_iter(self) -> Self::IntoIter {
        RecordIter {
            rec: Some(self),
            extra: None,
        }
    }
}

impl<Context> Record<Context> {
    pub fn iter(&self) -> RecordIter<&Record<Context>, &Change<Context>> {
        RecordIter {
            rec: Some(self),
            extra: None,
        }
    }
}

impl<Context> Iterator for RecordIter<Record<Context>, Change<Context>> {
    type Item = Change<Context>;
    fn next(&mut self) -> Option<Self::Item> {
        if let Some(extra) = self.extra.take() {
            return Some(extra);
        } else if let Some(rec) = self.rec.take() {
            match rec {
                Record::FileMove { del, add, .. } => {
                    self.extra = Some(add);
                    return Some(del);
                }
                Record::FileDel { del: c, .. }
                | Record::FileAdd { add: c, .. } => return Some(c),
                | Record::Change { change, replacement, .. } => {
                Record::FileDel { del, contents, .. } => {
                    self.extra = contents;
                    return Some(del);
                }
                Record::FileAdd { add, contents, .. } => {
                    self.extra = contents;
                    return Some(add);
                }
                Record::Change {
                    change,
                    replacement,
                    ..
                } => {
                    if let Some(r) = replacement {
                        self.extra = Some(r)
                    }
                    return Some(change)
                    return Some(change);
                }
            }
        } else {
            return None;
        }
    }
}

impl<'a, Context> Iterator for RecordIter<&'a Record<Context>, &'a Change<Context>> {
    type Item = &'a Change<Context>;
    fn next(&mut self) -> Option<Self::Item> {
        if let Some(extra) = self.extra.take() {
            return Some(extra);
        } else if let Some(rec) = self.rec.take() {
            match *rec {
                Record::FileMove {
                    ref del, ref add, ..
                } => {
                    self.extra = Some(add);
                    return Some(del);
                }
                Record::FileDel { del: ref c, .. }
                | Record::FileAdd { add: ref c, .. } => return Some(c),
                | Record::Change { replacement: ref r, change: ref c, .. } => {
                Record::FileDel {
                    ref del,
                    ref contents,
                    ..
                } => {
                    self.extra = contents.as_ref();
                    return Some(del);
                }
                Record::FileAdd {
                    ref add,
                    ref contents,
                    ..
                } => {
                    self.extra = contents.as_ref();
                    return Some(add);
                }
                Record::Change {
                    replacement: ref r,
                    change: ref c,
                    ..
                } => {
                    if let Some(ref r) = r {
                        self.extra = Some(r)
                    }
                    return Some(c)
                    return Some(c);
                }
            }
        } else {
            return None;
        }
    }
}

impl UnsignedPatch {
    pub fn empty() -> Self {
        UnsignedPatch {
            header: PatchHeader {
                authors: vec![],
                name: "".to_string(),
                description: None,
                timestamp: chrono::Utc::now(),
                flag: PatchFlags::empty(),
            },
            dependencies: HashSet::new(),
            changes: vec![],
        }
    }
    pub fn leave_unsigned(self) -> Patch {
        Patch::Unsigned(self)
    }

    fn is_tag(&self) -> bool {
        self.header.flag.contains(PatchFlags::TAG)
    }

    pub fn inverse(&self, hash: &Hash, changes: &mut Vec<Change<ChangeContext<Hash>>>) {
        for ch in self.changes.iter() {
            debug!("inverse {:?}", ch);
            match *ch {
                Change::NewNodes {
                    ref up_context,
                    flag,
                    line_num,
                    ref nodes,
                    ref inode,
                    ..
                } => {
                    let edges = up_context
                        .iter()
                        .map(|up| NewEdge {
                            from: Key {
                                patch: match up.patch {
                                    Some(ref h) => Some(h.clone()),
                                    None => Some(hash.clone()),
                                },
                                line: up.line,
                            },
                            to: Key {
                                patch: Some(hash.clone()),
                                line: line_num,
                            },
                            introduced_by: Some(hash.clone()),
                        }).chain((1..nodes.len()).map(|i| NewEdge {
                        })
                        .chain((1..nodes.len()).map(|i| NewEdge {
                            from: Key {
                                patch: Some(hash.clone()),
                                line: line_num + (i - 1),
                            },
                            to: Key {
                                patch: Some(hash.clone()),
                                line: line_num + i,
                            },
                            introduced_by: Some(hash.clone()),
                        })).collect();
                        }))
                        .collect();
                    changes.push(Change::NewEdges {
                        edges,
                        inode: inode.clone(),
                        previous: flag,
                        flag: flag ^ EdgeFlags::DELETED_EDGE,
                    })
                }
                Change::NewEdges {
                    previous,
                    flag,
                    ref edges,
                    ref inode,
                } => changes.push(Change::NewEdges {
                    previous: flag,
                    flag: previous,
                    inode: inode.clone(),
                    edges: edges
                        .iter()
                        .map(|e| NewEdge {
                            from: e.from.clone(),
                            to: e.to.clone(),
                            introduced_by: Some(hash.clone()),
                        }).collect(),
                        })
                        .collect(),
                }),
            }
        }
    }
}

impl Patch {
    /// An approximate upper bound of the number of extra bytes in the
    /// database this patch might require. This depends a lot on the
    /// patch and the database, so it might be wrong.
    pub fn size_upper_bound(&self) -> usize {
        // General overhead for applying a patch; 8 pages.
        let mut size: usize = 1 << 15;
        for c in self.changes().iter() {
            match *c {
                Change::NewNodes { ref nodes, .. } => {
                    size += nodes.iter().map(|x| x.len()).sum::<usize>();
                    size += nodes.len() * 2048 // + half a page
                }
                Change::NewEdges { ref edges, .. } => size += edges.len() * 2048,
            }
        }
        size
    }

    pub fn is_tag(&self) -> bool {
        match *self {
            Patch::Unsigned(ref patch) => patch.is_tag(),
            _ => false,
        }
    }

    /// Read one patch from a gzip-compressed `BufRead`. If several
    /// patches are available in the same `BufRead`, this method can
    /// be called again.
    pub fn from_reader_compressed<R: BufRead>(r: &mut R) -> Result<(Hash, Vec<u8>, Patch)> {
        let mut rr = flate2::bufread::GzDecoder::new(r);
        let filename = {
            let filename = if let Some(header) = rr.header() {
                if let Some(filename) = header.filename() {
                    from_utf8(filename)?
                } else {
                    return Err(Error::EOF);
                }
            } else {
                return Err(Error::EOF);
            };
            if let Some(h) = Hash::from_base58(filename) {
                h
            } else {
                return Err(Error::WrongHash);
            }
        };

        let mut buf = Vec::new();
        rr.read_to_end(&mut buf)?;

        // Checking the hash.
        let patch: Patch = deserialize(&buf[..])?;
        patch.check_hash(&buf, &filename)?;

        Ok((filename, buf, patch))
    }

    fn check_hash(&self, buf: &[u8], filename: &Hash) -> Result<()> {
        let buf = match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(_) => buf,
        };
        let hash = Hash::of_slice(buf)?;
        match (filename, &hash) {
            (&Hash::Sha512(ref filename), &Hash::Sha512(ref hash))
                if &filename.0[..] == &hash.0[..] =>
            {
                Ok(())
            }
            _ => Err(Error::WrongHash),
        }
    }

    pub fn to_buf(&self) -> Result<(Vec<u8>, Hash)> {
        // Encoding to a buffer.
        let buf = serialize(&self)?;
        // Hashing the buffer.
        let hash = Hash::of_slice(&buf)?;
        Ok((buf, hash))
    }

    /// Save the patch, computing the hash.
    pub fn save<P: AsRef<Path>>(&self, dir: P, key: Option<&mut crypto::KeyPair>) -> Result<Hash> {
        let (buf, hash) = self.to_buf()?;
        // Writing to the file.
        let h = hash.to_base58();
        let mut path = dir.as_ref().join(&h);
        path.set_extension("gz");
        debug!("save, path {:?}", path);
        let f = File::create(&path)?;
        debug!("created");
        let mut w = flate2::GzBuilder::new()
            .filename(h.as_bytes())
            .write(f, flate2::Compression::best());
        w.write_all(&buf)?;
        w.finish()?;
        debug!("saved");

        if let Some(key) = key {
            path.set_extension("sig");
            let file = OpenOptions::new()
                .read(true)
                .write(true)
                .create(true)
                .open(&path)?;
            let message = Message::new(file);
            let mut signer = Signer::detached(message, vec![key as &mut crypto::Signer])?;
            let mut w = LiteralWriter::new(signer, DataFormat::Binary, Some(h.as_bytes()), None)?;
            w.write_all(&buf)?;
            w.finalize()?
        }
        Ok(hash)
    }

    pub fn inverse(&self, hash: &Hash, changes: &mut Vec<Change<ChangeContext<Hash>>>) {
        match *self {
            Patch::Unsigned0 => panic!("Can't reverse old patches"),
            Patch::Signed0 => panic!("Can't reverse old patches"),
            Patch::Unsigned(ref u) => u.inverse(hash, changes),
        }
    }
}

pub fn read_changes(r: &mut Read) -> Result<Vec<(Hash, ApplyTimestamp)>> {
    let mut s = String::new();
    r.read_to_string(&mut s)?;
    let mut result = Vec::new();
    for l in s.lines() {
        let mut sp = l.split(':');
        match (
            sp.next().and_then(Hash::from_base58),
            sp.next().and_then(|s| s.parse().ok()),
        ) {
            (Some(h), Some(s)) => {
                result.push((h, s));
            }
            _ => {}
        }
    }
    Ok(result)
}

pub fn read_changes_from_file<P: AsRef<Path>>(
    changes_file: P,
) -> Result<Vec<(Hash, ApplyTimestamp)>> {
    let mut file = File::open(changes_file)?;
    read_changes(&mut file)
}

impl<U: Transaction, R> GenericTxn<U, R> {
    pub fn new_patch<I: Iterator<Item = Hash>>(
        &self,
        branch: &Branch,
        authors: Vec<String>,
        name: String,
        description: Option<String>,
        timestamp: DateTime<Utc>,
        changes: Vec<Change<ChangeContext<Hash>>>,
        extra_dependencies: I,
        flag: PatchFlags,
    ) -> Patch {
        let mut dependencies = self.dependencies(branch, changes.iter());
        dependencies.extend(extra_dependencies);
        Patch::Unsigned(UnsignedPatch {
            header: PatchHeader {
                authors,
                name,
                description,
                timestamp,
                flag,
            },
            dependencies,
            changes,
        })
    }

    pub fn dependencies<'a, I: Iterator<Item = &'a Change<ChangeContext<Hash>>>>(
        &self,
        branch: &Branch,
        changes: I,
    ) -> HashSet<Hash> {
        let mut deps = HashSet::new();
        let mut zombie_deps = HashSet::new();
        for ch in changes {
            match *ch {
                Change::NewNodes {
                    ref up_context,
                    ref down_context,
                    ..
                } => for c in up_context.iter().chain(down_context.iter()) {
                    match c.patch {
                        None | Some(Hash::None) => {}
                        Some(ref dep) => {
                            debug!("dependencies (line {}) += {:?}", line!(), dep);
                            deps.insert(dep.clone());
                } => {
                    for c in up_context.iter().chain(down_context.iter()) {
                        match c.patch {
                            None | Some(Hash::None) => {}
                            Some(ref dep) => {
                                debug!("dependencies (line {}) += {:?}", line!(), dep);
                                deps.insert(dep.clone());
                            }
                        }
                    }
                },
                }
                Change::NewEdges {
                    flag, ref edges, ..
                } => {
                    for e in edges {
                        let (from, to) = if flag.contains(EdgeFlags::PARENT_EDGE) {
                            (&e.to, &e.from)
                        } else {
                            (&e.from, &e.to)
                        };

                        match from.patch {
                            None | Some(Hash::None) => {}
                            Some(ref h) => {
                                debug!("dependencies (line {}) += {:?}", line!(), h);
                                deps.insert(h.clone());
                                if flag.contains(EdgeFlags::DELETED_EDGE) {
                                    // Add "known patches" to
                                    // allow identifying missing
                                    // contexts.
                                    let k = Key {
                                        patch: self.get_internal(h.as_ref()).unwrap().to_owned(),
                                        line: from.line.clone(),
                                    };
                                    self.edge_context_deps(branch, k, &mut zombie_deps)
                                }
                            }
                        }
                        match to.patch {
                            None | Some(Hash::None) => {}
                            Some(ref h) => {
                                debug!("dependencies (line {}) += {:?}", line!(), h);
                                deps.insert(h.clone());
                                if flag.contains(EdgeFlags::DELETED_EDGE) {
                                    // Add "known patches" to
                                    // allow identifying
                                    // missing contexts.
                                    let k = Key {
                                        patch: self.get_internal(h.as_ref()).unwrap().to_owned(),
                                        line: to.line.clone(),
                                    };
                                    self.edge_context_deps(branch, k, &mut zombie_deps)
                                }
                            }
                        }
                        match e.introduced_by {
                            None | Some(Hash::None) => {}
                            Some(ref h) => {
                                debug!("dependencies (line {}) += {:?}", line!(), h);
                                zombie_deps.insert(h.clone());
                            }
                        }
                    }
                }
            }
        }
        let mut h = self.minimize_deps(&deps);
        for z in zombie_deps.drain() {
            h.insert(z);
        }
        h
    }

    pub fn minimize_deps(&self, deps: &HashSet<Hash>) -> HashSet<Hash> {
        debug!("minimize_deps {:?}", deps);
        let mut covered = HashSet::new();
        let mut stack = Vec::new();
        let mut seen = HashSet::new();
        for dep_ext in deps.iter() {
            // For each dependency, do a DFS.
            let dep = self.get_internal(dep_ext.as_ref()).unwrap();
            debug!("dep = {:?}", dep);
            stack.clear();
            stack.push((dep, false));
            while let Some((current, on_path)) = stack.pop() {
                // Is current already covered? (either transitively in
                // covered, or directly in deps).
                let already_covered = covered.get(&current).is_some()
                    || (current != dep && {
                        let current_ext = self.get_external(current).unwrap();
                        deps.get(&current_ext.to_owned()).is_some()
                    });
                if already_covered {
                    // We look at all patches on the current path, and
                    // mark them as covered.
                    for &(h, h_on_path) in stack.iter() {
                        if h_on_path {
                            debug!("covered: h {:?}", h);
                            covered.insert(h);
                        }
                    }
                    break;
                }
                // If we've already seen `current`, and dep is not
                // covered, we don't need to explore `current`'s
                // children.  Or, if we're coming here for the second
                // time (i.e. after exploring all children), no need to
                // explore the children again either.
                if seen.insert(current) && !on_path {
                    stack.push((current, true));

                    for (_, parent) in self
                        .iter_revdep(Some((current, None)))
                        .take_while(|k| k.0 == current)
                    {
                        stack.push((parent, false))
                    }
                }
            }
        }

        deps.iter()
            .filter_map(|dep_ext| {
                let dep = self.get_internal(dep_ext.as_ref()).unwrap();
                if covered.get(&dep).is_none() {
                    Some(dep_ext.to_owned())
                } else {
                    None
                }
            }).collect()
            })
            .collect()
    }
2
3

4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46
47
48
49
50
51

52
53
54
55
56

57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

136
137
138
139
140
141
142
143
144
145





146
147
148
149
150
151
152
153
154
155


156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183

184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

200
201
202
use base64;
use serde_json;
use std::str::from_utf8;
use std::collections::HashMap;
use std::str::from_utf8;
use {EdgeFlags, Hash, Key, LineId};

#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct PrettyPatch {
    pub header: PatchHeader,
    pub dependencies: Vec<String>,
    pub changes: Vec<PrettyChange>,
}

impl Patch {
    pub fn to_pretty(&self) -> PrettyPatch {
        match *self {
            Patch::Signed0 | Patch::Unsigned0 => {
                panic!("refusing to interact with old patch version")
            }
            Patch::Unsigned(ref patch) => patch.to_pretty(),
        }
    }
}

impl UnsignedPatch {
    pub fn to_pretty(&self) -> PrettyPatch {
        let deps = self.dependencies.iter().map(|x| x.clone()).zip(1..).collect();
        let deps = self
            .dependencies
            .iter()
            .map(|x| x.clone())
            .zip(1..)
            .collect();
        PrettyPatch {
            header: self.header.clone(),
            dependencies: self.dependencies.iter().map(|x| x.to_base58()).collect(),
            changes: self.changes.iter().map(|x| x.to_pretty(&deps)).collect(),
        }
    }
}

impl PrettyPatch {
    pub fn to_patch(&self) -> UnsignedPatch {
        let deps_vec:Vec<_> = self.dependencies
        let deps_vec: Vec<_> = self
            .dependencies
            .iter()
            .map(|x| Hash::from_base58(x).unwrap())
            .collect();

        UnsignedPatch {
            header: self.header.clone(),
            dependencies: self.dependencies
            dependencies: self
                .dependencies
                .iter()
                .map(|x| Hash::from_base58(x).unwrap())
                .collect(),
            changes: self.changes.iter().map(|x| x.to_change(&deps_vec)).collect(),
            changes: self
                .changes
                .iter()
                .map(|x| x.to_change(&deps_vec))
                .collect(),
        }
    }
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PrettyChange {
    NewNodes {
        up_context: Vec<String>,
        down_context: Vec<String>,
        flag: u8,
        line_num: u64,
        nodes: Vec<serde_json::Value>,
        inode: String,
    },
    NewEdges {
        previous: u8,
        flag: u8,
        edges: Vec<PrettyNewEdge>,
        inode: String,
    },
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PrettyNewEdge {
    pub from: String,
    pub to: String,
    pub introduced_by: usize,
}

impl PrettyNewEdge {
    fn to_new_edge(&self, deps: &[Hash]) -> NewEdge {
        NewEdge {
            from: read_key(deps, &self.from),
            to: read_key(deps, &self.to),
            introduced_by: Some(deps[self.introduced_by].clone()),
        }
    }
}

impl NewEdge {
    fn to_pretty(&self, deps: &HashMap<Hash, usize>) -> PrettyNewEdge {
        PrettyNewEdge {
            from: print_key(deps, &self.from),
            to: print_key(deps, &self.to),
            introduced_by: if let Some(ref i) = self.introduced_by {
                *deps.get(i).unwrap()
            } else {
                0
            },
        }
    }
}

#[derive(Debug, Clone, Serialize, Deserialize)]
struct Filename {
    name: String,
    flag: u16,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
struct Binary {
    binary: String,
}

fn print_key(deps: &HashMap<Hash, usize>, key: &Key<Option<Hash>>) -> String {
    if let Some(ref k) = key.patch {
        if *k == Hash::None {
            String::new()
        } else {
            debug!("print key: {:?} {:?}", k, deps);
            if let Some(dep) = deps.get(&k) {
                format!("{}.{}", dep, key.line.to_base58())
            } else {
                format!("{}", key.line.to_base58())
            }            
            }
        }
    } else {
        format!("{}", key.line.to_base58())
    }
}

fn read_key(deps: &[Hash], key: &str) -> Key<Option<Hash>> {
    let mut it = key.split('.');
    match (it.next(), it.next()) {
        (Some(a), Some(b)) =>
            Key {
                patch: Some(deps[a.parse::<usize>().unwrap()].clone()),
                line: LineId::from_base58(b).unwrap(),
            },
        (Some(a), Some(b)) => Key {
            patch: Some(deps[a.parse::<usize>().unwrap()].clone()),
            line: LineId::from_base58(b).unwrap(),
        },
        (Some(a), None) => Key {
            patch: None,
            line: LineId::from_base58(a).unwrap(),
        },
        _ => Key {
            patch: Some(Hash::None),
            line: LineId(0)
        }
            line: LineId(0),
        },
    }
}

impl Change<ChangeContext<Hash>> {
    fn to_pretty(&self, deps: &HashMap<Hash, usize>) -> PrettyChange {
        match *self {
            Change::NewNodes {
                ref up_context,
                ref down_context,
                flag,
                line_num,
                ref nodes,
                ref inode,
            } => PrettyChange::NewNodes {
                up_context: up_context.iter().map(|x| print_key(&deps, x)).collect(),
                down_context: down_context.iter().map(|x| print_key(&deps, x)).collect(),
                flag: flag.bits(),
                line_num: line_num.0,
                nodes: if flag.contains(EdgeFlags::FOLDER_EDGE) {
                    nodes
                        .iter()
                        .map(|x| {
                            if x.len() >= 2 {
                                serde_json::to_value(Filename {
                                    name: from_utf8(&x[2..]).unwrap().to_string(),
                                    flag: ((x[0] as u16) << 8) | (x[1] as u16),
                                }).unwrap()
                                })
                                .unwrap()
                            } else {
                                serde_json::to_value("").unwrap()
                            }
                        })
                        .collect()
                } else {
                    nodes
                        .iter()
                        .map(|x| {
                            if let Ok(x) = from_utf8(x) {
                                serde_json::to_value(x).unwrap()
                            } else {
                                serde_json::to_value(Binary {
                                    binary: base64::encode(x),
                                }).unwrap()
                                })
                                .unwrap()
                            }



2
3
4
5
6
7
8
9
10


13






16
use fs_representation::RepoRoot;
use patch::Record;

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use super::{EdgeFlags, MutTxn};
use backend::{ROOT_PATCH_ID, FileMetadata};
use fs_representation::{in_repo_root, RepoPath, RepoRoot};
use patch::{Change, Record};
use record::RecordState;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use Result;
use std::borrow::Cow;

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
use std::path::{PathBuf};
use record::{RecordState};
use record::{RecordState};
use patch::{Record};
use fs_representation::{RepoRoot};

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15


16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34


35
36
37
38
39
40
41
42
43
44
use Result;
use backend::*;
use rand;
use std::collections::HashMap;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    fn collect_up_context_repair(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        patch_id: PatchId,
        edges: &mut HashMap<Key<PatchId>, Edge>,
    ) {
        debug!("collect up {:?}", key);
        let start_flag = EdgeFlags::PARENT_EDGE | EdgeFlags::PSEUDO_EDGE;
        for v in self.iter_adjacent(branch, key, start_flag, start_flag | EdgeFlags::FOLDER_EDGE)
            .take_while(|v| { v.introduced_by == patch_id }) {
        for v in self
            .iter_adjacent(branch, key, start_flag, start_flag | EdgeFlags::FOLDER_EDGE)
            .take_while(|v| v.introduced_by == patch_id)
        {
            if !edges.contains_key(&key) {
                edges.insert(key.to_owned(), v.to_owned());
                self.collect_up_context_repair(branch, v.dest, patch_id, edges)
            }
        }
    }

    fn collect_down_context_repair(
        &self,
        branch: &Branch,
        key: Key<PatchId>,
        patch_id: PatchId,
        edges: &mut HashMap<Key<PatchId>, Edge>,
    ) {
        debug!("collect down {:?}", key);
        for v in self.iter_adjacent(branch, key, EdgeFlags::PSEUDO_EDGE, EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE)
            .take_while(|v| { v.introduced_by == patch_id }) {
        for v in self
            .iter_adjacent(
                branch,
                key,
                EdgeFlags::PSEUDO_EDGE,
                EdgeFlags::PSEUDO_EDGE | EdgeFlags::FOLDER_EDGE,
            )
            .take_while(|v| v.introduced_by == patch_id)
        {
            if !edges.contains_key(&key) {
1

2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250




251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292

293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366

367
368
369

370
371
372
use super::Workspace;
use Result;
use apply::find_alive::FindAlive;
use backend::*;
use patch::*;
use rand;
use std::collections::HashSet;
use std::str::from_utf8;
use Result;

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    pub(in unrecord) fn unrecord_edges(
        &mut self,
        find_alive: &mut FindAlive,
        branch: &mut Branch,
        patch_id: PatchId,
        dependencies: &HashSet<Hash>,
        previous: EdgeFlags,
        flags: EdgeFlags,
        edges: &[NewEdge],
        w: &mut Workspace,
        unused_in_other_branches: bool,
    ) -> Result<()> {
        debug!("unrecord_edges: {:?}", edges);

        // Revert the edges, i.e. add the previous edges.
        self.remove_edges(
            branch,
            patch_id,
            previous,
            flags,
            edges,
            unused_in_other_branches,
        )?;

        // If this NewEdges caused pseudo-edges to be inserted at the
        // time of applying this patch, remove them, because these
        // vertices don't need them anymore (we'll reconnect possibly
        // disconnected parts later).
        self.remove_patch_pseudo_edges(branch, patch_id, flags, edges, w)?;

        // We now take care of the connectivity of the alive graph,
        // which we must maintain.
        if previous.contains(EdgeFlags::DELETED_EDGE) {
            // This NewEdges turns a deleted edge into an alive one.
            // Therefore, unrecording this NewEdges introduced DELETED
            // edges to the graph, which might have disconnect the
            // graph. Add pseudo edges where necessary to keep the
            // alive component of the graph connected.
            let targets: Vec<_> = if flags.contains(EdgeFlags::PARENT_EDGE) {
                edges
                    .iter()
                    .map(|e| self.internal_key(&e.from, patch_id))
                    .collect()
            } else {
                edges
                    .iter()
                    .map(|e| self.internal_key(&e.to, patch_id))
                    .collect()
            };
            debug!("previous contains DELETED_EDGE, targets = {:?}", targets);
            self.reconnect_across_deleted_nodes(patch_id, branch, dependencies, &targets)?
        } else {
            // This NewEdge turns an alive edge into a deleted
            // one. Therefore, unapplying it reintroduces alive edges,
            // but these new alive edges might have their context
            // dead. If this is the case, find their closest alive
            // ancestors and descendants, and reconnect.
            assert!(flags.contains(EdgeFlags::DELETED_EDGE));

            // If we're reintroducing a non-deleted edge, there is
            // no reason why the deleted part is still connected
            // to the alive component of the graph, so we must
            // reconnect the deleted part to its alive ancestors
            // and descendants.
            self.reconnect_deletions(branch, patch_id, edges, flags, find_alive)?
        }

        // Now, we're done reconnecting the graph. However, if this
        // NewEdges changed "folder" edges, the inodes and trees
        // tables might have to be updated.
        if flags.contains(EdgeFlags::FOLDER_EDGE) {
            if flags.contains(EdgeFlags::DELETED_EDGE) {
                // This file was deleted by this `NewEdge`. Therefore,
                // unrecording this NewEdges adds it back to the
                // repository. There are two things to do here:
                //
                // - Put it back into trees and revtrees to start
                //   following it again.
                //
                // - Since this file was *not* added by this patch
                // (because no patch can both add and delete the same
                // file), put the file back into inodes and revinodes.
                self.restore_deleted_file(branch, patch_id, edges, flags)?
            } else {
                // This file was undeleted by this patch. One way (the
                // only way?) to create such a patch is by rolling
                // back a patch that deletes a file.
                self.undo_file_reinsertion(patch_id, edges, flags)?
            }
        }

        Ok(())
    }

    /// Handles the case where the patch we are unrecording deletes an
    /// "inode" node, i.e. deletes a file from the system.
    ///
    /// We need (1) to check that, which is done in
    /// `dest_is_an_inode`, and (2) to add the file back into the
    /// `tree` and `revtree` tables (but not in the `inodes` tables).
    fn restore_deleted_file(
        &mut self,
        branch: &Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flags: EdgeFlags,
    ) -> Result<()> {
        let is_upwards = flags.contains(EdgeFlags::PARENT_EDGE);
        for e in edges {
            let (source, dest) = if is_upwards {
                (&e.to, &e.from)
            } else {
                (&e.from, &e.to)
            };
            let source = self.internal_key(source, patch_id).to_owned();
            let dest = self.internal_key(dest, patch_id).to_owned();
            let dest_is_an_inode = if let Some(contents) = self.get_contents(dest) {
                contents.len() == 0
            } else {
                true
            };
            if dest_is_an_inode {
                // This is actually a file deletion, so it's not in
                // the tree anymore. Put it back into tree/revtrees,
                // and into inodes/revinodes.

                // Since patches *must* be recorded from top to
                // bottom, source's parent is an inode, and must be in
                // inodes/revinodes.
                let e = Edge::zero(EdgeFlags::PARENT_EDGE | EdgeFlags::FOLDER_EDGE);
                let source_parent = self.iter_nodes(branch, Some((source, Some(e))))
                let source_parent = self
                    .iter_nodes(branch, Some((source, Some(e))))
                    .take_while(|&(k, _)| k == source)
                    .next()
                    .unwrap()
                    .1
                    .dest
                    .to_owned();
                debug!("source_parent = {:?}", source_parent);
                let parent_inode = if source_parent.is_root() {
                    ROOT_INODE
                } else {
                    // There is a complexity choice here: we don't
                    // want to resurrect all paths leading to this
                    // file. Resurrecting only the latest known path
                    // is not deterministic.

                    // So, if the parent doesn't exist, we attach this
                    // to the root of the repository.
                    self.get_revinodes(source_parent).unwrap_or(ROOT_INODE)
                };
                let inode = self.create_new_inode();

                let (metadata, basename) = {
                    let source_contents = self.get_contents(source).unwrap();
                    assert!(source_contents.len() >= 2);
                    let (a, b) = source_contents.as_slice().split_at(2);
                    let name = SmallString::from_str(from_utf8(b)?);
                    (FileMetadata::from_contents(a), name)
                };

                let file_id = OwnedFileId {
                    parent_inode,
                    basename,
                };
                self.put_tree(&file_id.as_file_id(), inode)?;
                self.put_revtree(inode, &file_id.as_file_id())?;

                self.replace_inodes(
                    inode,
                    FileHeader {
                        status: FileStatus::Deleted,
                        metadata,
                        key: dest,
                    },
                )?;
                self.replace_revinodes(dest, inode)?;
            }
        }
        Ok(())
    }

    fn undo_file_reinsertion(
        &mut self,
        patch_id: PatchId,
        edges: &[NewEdge],
        flags: EdgeFlags,
    ) -> Result<()> {
        for e in edges {
            let dest = if flags.contains(EdgeFlags::PARENT_EDGE) {
                &e.from
            } else {
                &e.to
            };
            let internal = self.internal_key(dest, patch_id).to_owned();
            // We're checking here that this is not a move, but
            // really the inverse of a deletion, by checking that
            // `dest` is an "inode node".
            let dest_is_an_inode = if let Some(contents) = self.get_contents(internal) {
                contents.len() == 0
            } else {
                true
            };
            if dest_is_an_inode {
                self.remove_file_from_inodes(internal)?;
            }
        }
        Ok(())
    }

    fn reconnect_deletions(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        edges: &[NewEdge],
        flags: EdgeFlags,
        find_alive: &mut FindAlive,
    ) -> Result<()> {
        // For all targets of this edges, finds its
        // alive ascendants, and add pseudo-edges.
        let is_upwards = flags.contains(EdgeFlags::PARENT_EDGE);
        let mut alive_relatives = Vec::new();
        for e in edges.iter() {
            debug!("is_upwards: {:?}", is_upwards);
            let (source, dest) = if is_upwards {
                (&e.to, &e.from)
            } else {
                (&e.from, &e.to)
            };

            let source = self.internal_key(source, patch_id);
            let dest = self.internal_key(dest, patch_id);

            if !self.is_alive(branch, dest) {
                continue;
            }

            // Collect the source's closest alive descendants, if
            // the immediate descendant is not alive.
            find_alive.clear();
            for dead_child in
                self.iter_adjacent(branch, dest, EdgeFlags::DELETED_EDGE, EdgeFlags::DELETED_EDGE)
            {
            {
            for dead_child in self.iter_adjacent(
                branch,
                dest,
                EdgeFlags::DELETED_EDGE,
                EdgeFlags::DELETED_EDGE,
            ) {
                find_alive.push(dead_child.dest);
            }
            debug!("find_alive {:?}", find_alive);
            alive_relatives.clear();
            let mut edge = Edge::zero(EdgeFlags::empty());
            if self.find_alive_descendants(find_alive, branch, &mut alive_relatives) {
                debug!("alive_descendants: {:?}", alive_relatives);
                for desc in alive_relatives.drain(..) {
                    if dest != desc {
                        edge.flag = EdgeFlags::PSEUDO_EDGE | (flags & EdgeFlags::FOLDER_EDGE);
                        edge.dest = desc;
                        edge.introduced_by = self.internal_hash(&e.introduced_by, patch_id);
                        debug!("put_nodes (line {:?}): {:?} {:?}", line!(), source, edge);
                        self.put_edge_both_dirs(branch, dest, edge)?;
                    }
                }
            }
            // now we'll use alive_relatives to
            // collect alive ancestors.
            debug!("source = {:?}, dest = {:?}", source, dest);
            debug!("alive_ancestors, source = {:?}", source);
            find_alive.clear();
            find_alive.push(source);
            alive_relatives.clear();
            let mut files = Vec::new();
            let mut first_file = None;
            if self.find_alive_ancestors(
                find_alive,
                branch,
                &mut alive_relatives,
                &mut first_file,
                &mut files,
            ) {
                debug!("alive_ancestors: {:?}", alive_relatives);
                for asc in alive_relatives.drain(..) {
                    if dest != asc {
                        edge.flag = EdgeFlags::PSEUDO_EDGE | EdgeFlags::PARENT_EDGE
                        edge.flag = EdgeFlags::PSEUDO_EDGE
                            | EdgeFlags::PARENT_EDGE
                            | (flags & EdgeFlags::FOLDER_EDGE);
                        edge.dest = asc;
                        edge.introduced_by = self.internal_hash(&e.introduced_by, patch_id);
                        debug!("put_edge (line {:?}): {:?} {:?}", line!(), dest, edge);
                        self.put_edge_both_dirs(branch, dest, edge)?;
                    }
                }
                for (mut k, mut v) in files.drain(..) {
                    assert!(v.flag.contains(EdgeFlags::DELETED_EDGE));
                    v.flag = (v.flag | EdgeFlags::PSEUDO_EDGE) ^ EdgeFlags::DELETED_EDGE;
                    self.put_edge_one_dir(branch, k, v)?;
                }
            }
        }
        Ok(())
    }

    fn remove_edges(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        previous: EdgeFlags,
        flag: EdgeFlags,
        edges: &[NewEdge],
        unused_in_other_branches: bool,
    ) -> Result<()> {
        let mut del_edge = Edge::zero(EdgeFlags::empty());
        del_edge.introduced_by = patch_id;

        let mut edge = Edge::zero(EdgeFlags::empty());
        edge.introduced_by = patch_id;

        for e in edges {
            let int_from = self.internal_key(&e.from, patch_id);
            let int_to = self.internal_key(&e.to, patch_id);

            // Delete the edge introduced by this patch,
            // if this NewEdges is not forgetting its
            // edges.
            del_edge.flag = flag;
            del_edge.dest = int_to.clone();
            debug!("delete {:?} -> {:?}", int_from, del_edge);
            self.del_edge_both_dirs(branch, int_from, del_edge)?;

            // Add its previous version, if these edges
            // are Forget or Map (i.e. not brand new
            // edges).

            // If there are other edges with the
            // same source and target, check that
            // none of these edges knows about the
            // patch that introduced the edge we
            // want to put back in.

            edge.dest = int_to;
            debug!(
                "trying to put an edge from {:?} to {:?} back",
                int_from, int_to
            );
            edge.flag = previous;
            edge.introduced_by = self.internal_hash(&e.introduced_by, patch_id);

            if unused_in_other_branches {
                debug!(
                    "unused_in_other_branches: {:?} {:?} {:?}",
                    int_from, edge, patch_id
                );
                self.del_cemetery(int_from, edge, patch_id)?;
            }

            // Is this edge deleted by another patch?
            // patch_id has already been removed from the table.
            let edge_is_still_absent = self.iter_cemetery(int_from, edge)
            let edge_is_still_absent = self
                .iter_cemetery(int_from, edge)
                .take_while(|&((k, v), _)| {
                    k == int_from && v.dest == edge.dest
                    k == int_from
                        && v.dest == edge.dest
                        && v.flag | EdgeFlags::PSEUDO_EDGE == edge.flag | EdgeFlags::PSEUDO_EDGE

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83





84
85
86
87
88
use {Result, Error};
use apply::find_alive::FindAlive;
use backend::*;
use patch::*;
use rand;
use std::collections::{HashMap, HashSet};
use {Error, Result};
mod context_repair;
mod edges;
mod nodes;

#[derive(Debug)]
struct Workspace {
    file_moves: HashSet<Key<PatchId>>,
    context_edges: HashMap<Key<PatchId>, Edge>,
}

impl<'env, T: rand::Rng> MutTxn<'env, T> {
    pub fn unrecord(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        patch: &Patch,
    ) -> Result<bool> {
        let timestamp = if let Some(ts) = self.get_patch(&branch.patches, patch_id) {
            ts
        } else {
            return Err(Error::PatchNotOnBranch(patch_id))
            return Err(Error::PatchNotOnBranch(patch_id));
        };
        self.del_patches(&mut branch.patches, patch_id)?;
        self.del_revpatches(&mut branch.revpatches, timestamp, patch_id)?;

        // Is the patch used in another branch?;
        let unused_in_other_branches = {
            let mut it = self.iter_branches(None).filter(|br| {
                br.name != branch.name && self.get_patch(&br.patches, patch_id).is_some()
            });
            it.next().is_none()
        };

        debug!("unrecord: {:?}", patch_id);

        self.unapply(branch, patch_id, patch, unused_in_other_branches)?;

        for dep in patch.dependencies().iter() {
            let internal_dep = self.get_internal(dep.as_ref()).unwrap().to_owned();
            // Test whether other branches have both this patch and `dep`.
            let other_branches_have_dep = self.iter_branches(None).any(|br| {
                br.name != branch.name && self.get_patch(&br.patches, internal_dep).is_some()
                br.name != branch.name
                    && self.get_patch(&br.patches, internal_dep).is_some()
                    && self.get_patch(&br.patches, patch_id).is_some()
            });

            if !other_branches_have_dep {
                self.del_revdep(internal_dep, Some(patch_id))?;
            }
        }

        // If no other branch uses this patch, delete from revdeps.
        if unused_in_other_branches {
            info!("deleting patch");
            // Delete all references to patch_id in revdep.
            while self.del_revdep(patch_id, None)? {}
            let ext = self.get_external(patch_id).unwrap().to_owned();
            self.del_external(patch_id)?;
            self.del_internal(ext.as_ref())?;
            Ok(false)
        } else {
            Ok(true)
        }
    }

    /// Unrecord the patch, returning true if and only if another
    /// branch still uses this patch.
    pub fn unapply(
        &mut self,
        branch: &mut Branch,
        patch_id: PatchId,
        patch: &Patch,
        unused_in_other_branch: bool,
    ) -> Result<()> {
        debug!("revdep: {:?}", self.get_revdep(patch_id, None));

        // Check that the branch has no patch that depends on this one.
        assert!(
            self.iter_revdep(Some((patch_id, None)))
                .take_while(|&(p, _)| p == patch_id)
                .all(|(_, p)| self.get_patch(&branch.patches, p).is_none())
        );
        assert!(self
            .iter_revdep(Some((patch_id, None)))
            .take_while(|&(p, _)| p == patch_id)
            .all(|(_, p)| self.get_patch(&branch.patches, p).is_none()));

1

2
3
4
5
6
7
use super::Workspace;
use Result;
use backend::*;
use rand;
use std::collections::HashSet;
use std::mem::swap;
use Result;


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313

314
315
                write!(o, "\x1b[{};{}H", self.posy + 1, 1)?;
use libc;
use std;
use std::io::{Read, Write};
use utf8parse;

fn size() -> (usize, usize) {
    unsafe {
        let mut size: libc::winsize = std::mem::zeroed();

        // Albeit it may look useless in the general case, the use of `into()`
        // here is justified for portability reason.  It looks like there is a
        // type mismatch in the `libc` API for FreeBSD, which prevents `line`
        // from compiling.
        if libc::ioctl(1, libc::TIOCGWINSZ.into(), &mut size) == 0 {
            (size.ws_col as usize, size.ws_row as usize)
        } else {
            (0, 0)
        }
    }
}

pub struct Terminal {
    attr: libc::termios,
    posx: usize,
    posy: usize,
    posx0: usize,
    posy0: usize,
    cursor: usize,
    n_chars: usize,
    buf: String,
}

impl Terminal {
    pub fn new() -> Option<Terminal> {
        unsafe {
            if libc::isatty(0) != 0 {
                let mut attr = std::mem::zeroed();
                libc::tcgetattr(0, &mut attr);

                // attr type implements copy and therefore does not need to be
                // cloned
                let attr_orig = attr;

                attr.c_iflag &=
                    !(libc::BRKINT | libc::ICRNL | libc::INPCK | libc::ISTRIP | libc::IXON);
                attr.c_oflag &= !libc::OPOST;
                attr.c_lflag &= !(libc::ECHO | libc::ICANON | libc::IEXTEN | libc::ISIG);
                libc::tcsetattr(0, libc::TCSAFLUSH, &attr);
                Some(Terminal {
                    attr: attr_orig,
                    posx: 0,
                    posy: 0,
                    posx0: 0,
                    posy0: 0,
                    cursor: 0,
                    n_chars: 0,
                    buf: String::new(),
                })
            } else {
                None
            }
        }
    }
}

impl Drop for Terminal {
    fn drop(&mut self) {
        unsafe {
            libc::tcsetattr(0, libc::TCSAFLUSH, &self.attr);
        }
    }
}

fn next_char(s: &str, i: usize) -> usize {
    let s = s.as_bytes();
    if s[i] <= 0x7f {
        i + 1
    } else if s[i] >> 5 == 0b110 {
        i + 2
    } else if s[i] >> 4 == 0b1110 {
        i + 3
    } else {
        i + 4
    }
}

fn prev_char(s: &str, mut i: usize) -> usize {
    let s = s.as_bytes();
    i -= 1;
    while s[i] & 0b1100_0000 == 0b1000_0000 {
        i -= 1
    }
    i
}

impl Terminal {

    fn move_left(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor > 0 {
            self.cursor = prev_char(&self.buf, self.cursor);
            if self.posx > 1 {
                self.posx -= 1;
            } else {
                let (w, _) = size();
                self.posx = w;
                self.posy -= 1;
            }
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        }
        o.flush()?;
        Ok(())
    }

    fn word_left(&mut self) -> Result<(), std::io::Error> {
        if self.cursor > 0 {
            let bytes = self.buf.as_bytes();
            let mut is_first = true;
            while self.cursor > 0 {
                self.cursor -= 1;
                if self.posx > 1 {
                    self.posx -= 1;
                } else {
                    let (w, _) = size();
                    self.posx = w;
                    self.posy -= 1;
                }
                if bytes[self.cursor] == b' ' {
                    if !is_first {
                        break;
                    }
                } else {
                    is_first = false
                }
            }
        }
        if self.buf.as_bytes()[self.cursor] == b' ' {
            self.move_right()?
        } else {
            let mut o = std::io::stdout();
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn home(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        let (w, _) = size();
        let delta_y = (self.cursor + self.posx0 - 1) / w;
        self.posy -= delta_y;
        self.posx = self.posx0;
        self.cursor = 0;
        write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        o.flush()?;
        Ok(())
    }

    fn end(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        let remaining_chars = self.buf.split_at(self.cursor).1.chars().count();
        let (w, _) = size();
        self.cursor = self.buf.len();
        self.posy += (self.posx + remaining_chars) / w;
        self.posx = 1 + ((self.posx - 1 + remaining_chars) % w);
        write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        o.flush()?;
        Ok(())
    }

    fn move_right(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            self.cursor = next_char(&self.buf, self.cursor);

            let (w, h) = size();
            if self.posx < w {
                self.posx += 1;
            } else {
                if self.posy >= h {
                    write!(o, "\x1b[1S")?;
                }
                self.posx = 1;
                self.posy += 1;
            }
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
        }
        o.flush()?;
        Ok(())
    }

    fn word_right(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            let bytes = self.buf.as_bytes();
            let (w, h) = size();
            let mut is_first = true;
            while self.cursor < self.buf.len() {
                self.cursor += 1;
                if self.posx < w {
                    self.posx += 1;
                } else {
                    if self.posy >= h {
                        write!(o, "\x1b[1S")?;
                    }
                    self.posx = 1;
                    self.posy += 1;
                }
                if self.cursor >= self.buf.len() || bytes[self.cursor] == b' ' {
                    if !is_first {
                        break;
                    }
                } else {
                    is_first = false
                }
            }
        }
        if self.cursor < self.buf.len() && self.buf.as_bytes()[self.cursor] == b' ' {
            self.move_right()?
        } else {
            let mut o = std::io::stdout();
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn backspace(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor >= 1 {
            self.cursor = prev_char(&self.buf, self.cursor);
            self.buf.remove(self.cursor);
            self.n_chars -= 1;
            if self.posx > 1 {
                self.posx -= 1;
                write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            } else {
                let (w, _) = size();
                self.posx = w;
                if self.posy > 1 {
                    write!(o, "\x1b[{};{}H", self.posy - 1, w)?;
                    self.posy -= 1;
                } else {
                    // scroll down by one
                    write!(o, "\x1b[1T")?;
                    write!(o, "\x1b[1;{}H", w)?;
                    self.posy0 = 1;
                    self.posy = 1;
                }
            }
            let (_, end) = self.buf.split_at(self.cursor);
            o.write_all(end.as_bytes())?;
            write!(o, "\x1b[0J")?;
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn delete(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            self.buf.remove(self.cursor);
            self.n_chars -= 1;
            let (_, end) = self.buf.split_at(self.cursor);
            o.write_all(end.as_bytes())?;
            write!(o, "\x1b[0J")?;
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn erase_to_end(&mut self) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        if self.cursor < self.buf.len() {
            self.buf.truncate(self.cursor);
            write!(o, "\x1b[0J")?;
            write!(o, "\x1b[{};{}H", self.posy, self.posx)?;
            o.flush()?;
        }
        Ok(())
    }

    fn insert(&mut self, c: char) -> Result<(), std::io::Error> {
        let mut o = std::io::stdout();
        self.n_chars += 1;
        self.buf.insert(self.cursor, c);
        let (w, h) = size();

        let (_, end) = self.buf.split_at(self.cursor);
        o.write_all(end.as_bytes())?;

        if self.posx + 1 > w && self.posy + 1 > h && end.len() == 1 {
            // we're discovering a new line, scroll up
            write!(o, "\x1b[1S")?;
            write!(o, "\x1b[{};1H", self.posy)?;
            self.posx = 1;
        } else {
            // did we scroll down when we wrote `end`?
            let y_end = self.posy + ((self.posx + end.len() - 2) / w);
            if y_end > h {
                if self.posx + 1 > w {
                    write!(o, "\x1b[{};1H", self.posy)?;
                    self.posx = 1;
                } else {
                    write!(o, "\x1b[{};{}H", self.posy - 1, self.posx + 1)?;
                    self.posy -= 1;
                    self.posx += 1;
                }
            } else if self.posx + 1 > w {
                // Ok, we didn't scroll down, but we're at the right
                // edge of the screen, and `end` did not start a new
                // line.
                write!(o, "\x1b[{};{}H", self.posy+1, 1)?;
                write!(o, "\x1b[{};1H", self.posy + 1)?;
                self.posx = 1;



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

93
94
95
use serde_json;
                .help("Accept patch in JSON format (for debugging only)."),
            let patch: Patch = serde_json::from_reader(&buf[..]).unwrap();
use super::{validate_base58, BasicOptions};
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, StaticSubcommand};
use error::Error;
use libpijul::patch::Patch;
use libpijul::{apply_resize, apply_resize_no_output, Hash, RepoPath};
use std::collections::HashSet;
use std::fs::File;
use std::io::{stdin, Read, Write};
use std::path::Path;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("apply")
        .about("Apply a patch")
        .arg(
            Arg::with_name("patch")
                .help(
                    "Hash of the patch to apply, in base58. If no patch is given, patches are \
                     read from the standard input.",
                )
                .takes_value(true)
                .multiple(true)
                .validator(validate_base58),
        )
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help(
                    "Path to the repository where the patches will be applied. Defaults to the \
                     repository containing the current directory.",
                )
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help(
                    "The branch to which the patches will be applied. Defaults to the current \
                     branch.",
                )
                .takes_value(true),
        )
        .arg(
            Arg::with_name("no-output")
                .long("no-output")
                .help("Only apply the patch, don't output it to the repository."),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    debug!("applying");
    let mut remote = HashSet::new();

    // let remote: HashSet<Hash> =
    let mut has_patches = false;
    if let Some(hashes) = args.values_of("patch") {
        remote.extend(hashes.map(|h| Hash::from_base58(&h).unwrap()));
        has_patches = true
    }

    if !has_patches {
        // Read patches in gz format from stdin.
        let mut buf = Vec::new();
        stdin().read_to_end(&mut buf)?;

        let mut buf_ = &buf[..];
        let mut i = 0;
        while let Ok((h, _, patch)) = Patch::from_reader_compressed(&mut buf_) {
            debug!("{:?}", patch);

            {
                let mut path = opts.patches_dir();
                path.push(h.to_base58());
                path.set_extension("gz");
                let mut f = File::create(&path)?;
                f.write_all(&buf[i..(buf.len() - buf_.len())])?;
                i = buf.len() - buf_.len();
            }

            remote.insert(h);
        }
    }

    debug!("remote={:?}", remote);
    let is_current_branch = if let Ok(br) = opts.repo_root.get_current_branch() {
        br == opts.branch()
    } else {
        false
    };
    loop {
        let result = if args.is_present("no-output") || !is_current_branch {
            apply_resize_no_output(&opts.repo_root, &opts.branch(), remote.iter(), |_, _| ()).map(|_| Vec::new())
            apply_resize_no_output(&opts.repo_root, &opts.branch(), remote.iter(), |_, _| ())
                .map(|_| Vec::new())
        } else {
14
15

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331

333
334


337


340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401

403


406

408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538


use libpijul::fs_representation::{RepoPath, RepoRoot, PIJUL_DIR_NAME};

use error::Error;
use atty;
use error::Error;
use libpijul::{EdgeFlags, Hash, LineId, MutTxn, PatchId};
use rand;
use std;
use std::char::from_u32;
use std::fs::{remove_file, File};
use std::process;
use std::str;
use term;
use term::{Attr, StdoutTerminal};

use ignore::gitignore::GitignoreBuilder;
use line;

const BINARY_CONTENTS: &'static str = "<binary contents>";
#[derive(Clone, Copy)]
pub enum Command {
    Pull,
    Push,
    Unrecord,
}

impl Command {
    fn verb(&self) -> &'static str {
        match *self {
            Command::Push => "push",
            Command::Pull => "pull",
            Command::Unrecord => "unrecord",
        }
    }
}

fn print_section(term: &mut Option<Box<StdoutTerminal>>, title: &str, contents: &str) {
    if let Some(ref mut term) = *term {
        term.attr(Attr::Bold).unwrap_or(());
    }
    let mut stdout = std::io::stdout();
    write!(stdout, "{}", title).unwrap_or(());
    if let Some(ref mut term) = *term {
        term.reset().unwrap_or(());
    }
    writeln!(stdout, "{}", contents).unwrap_or(());
}

fn remove_escape_codes(f: &str) -> std::borrow::Cow<str> {
    if f.as_bytes().contains(&27) {
        std::borrow::Cow::Owned(f.chars().filter(|&c| c != 27 as char).collect())
    } else {
        std::borrow::Cow::Borrowed(f)
    }
}

pub fn print_patch_descr(
    term: &mut Option<Box<StdoutTerminal>>,
    hash: &Hash,
    internal: Option<PatchId>,
    patch: &PatchHeader,
) {
    print_section(term, "Hash:", &format!(" {}", &hash.to_base58()));
    if let Some(internal) = internal {
        print_section(term, "Internal id:", &format!(" {}", &internal.to_base58()));
    }

    print_section(
        term,
        "Authors:",
        &format!(" {}", remove_escape_codes(&patch.authors.join(", "))),
    );
    print_section(term, "Timestamp:", &format!(" {}", patch.timestamp));

    let is_tag = if !patch.flag.is_empty() { "TAG: " } else { "" };

    let mut stdout = std::io::stdout();
    writeln!(
        stdout,
        "\n    {}{}",
        is_tag,
        remove_escape_codes(&patch.name)
    )
    .unwrap_or(());
    if let Some(ref d) = patch.description {
        writeln!(stdout, "").unwrap_or(());
        let d = remove_escape_codes(d);
        for descr_line in d.lines() {
            writeln!(stdout, "    {}", descr_line).unwrap_or(());
        }
    }
    writeln!(stdout, "").unwrap_or(());
}

fn check_forced_decision(
    command: Command,
    choices: &HashMap<&Hash, bool>,
    rev_dependencies: &HashMap<&Hash, Vec<&Hash>>,
    a: &Hash,
    b: &Patch,
) -> Option<bool> {
    let covariant = match command {
        Command::Pull | Command::Push => true,
        Command::Unrecord => false,
    };
    // If we've selected patches that depend on a, and this is a pull
    // or a push, select a.
    if let Some(x) = rev_dependencies.get(a) {
        for y in x {
            // Here, y depends on a.
            //
            // If this command is covariant, and we've selected y, select a.
            // If this command is covariant, and we've unselected y, don't do anything.
            //
            // If this command is contravariant, and we've selected y, don't do anything.
            // If this command is contravariant, and we've unselected y, unselect a.
            if let Some(&choice) = choices.get(y) {
                if choice == covariant {
                    return Some(covariant);
                }
            }
        }
    };

    // If we've unselected dependencies of a, unselect a.
    for y in b.dependencies().iter() {
        // Here, a depends on y.
        //
        // If this command is covariant, and we've selected y, don't do anything.
        // If this command is covariant, and we've unselected y, unselect a.
        //
        // If this command is contravariant, and we've selected y, select a.
        // If this command is contravariant, and we've unselected y, don't do anything.

        if let Some(&choice) = choices.get(&y) {
            if choice != covariant {
                return Some(!covariant);
            }
        }
    }

    None
}

fn interactive_ask(
    getch: &getch::Getch,
    a: &Hash,
    patchid: Option<PatchId>,
    b: &Patch,
    command_name: Command,
    show_help: bool,
) -> Result<(char, Option<bool>), Error> {
    let mut term = if atty::is(atty::Stream::Stdout) {
        term::stdout()
    } else {
        None
    };
    print_patch_descr(&mut term, a, patchid, b);

    if show_help {
        display_help(command_name);
        print!("Shall I {} this patch? ", command_name.verb());
    } else {
        print!("Shall I {} this patch? [ynkad?] ", command_name.verb());
    }

    stdout().flush()?;
    match getch.getch().ok().and_then(|x| from_u32(x as u32)) {
        Some(e) => {
            println!("{}", e);
            let e = e.to_uppercase().next().unwrap_or('\0');
            match e {
                'A' => Ok(('Y', Some(true))),
                'D' => Ok(('N', Some(false))),
                e => Ok((e, None)),
            }
        }
        _ => Ok(('\0', None)),
    }
}

fn display_help(c: Command) {
    println!("Available options: ynkad?");
    println!("y: {} this patch", c.verb());
    println!("n: don't {} this patch", c.verb());
    println!("k: go bacK to the previous patch");
    println!("a: {} all remaining patches", c.verb());
    println!("d: finish, skipping all remaining patches");
    println!("")
}

/// Patches might have a dummy "changes" field here.
pub fn ask_patches(
    command: Command,
    patches: &[(Hash, Option<PatchId>, Patch)],
) -> Result<Vec<Hash>, Error> {
    let getch = getch::Getch::new();
    let mut i = 0;

    // Record of the user's choices.
    let mut choices: HashMap<&Hash, bool> = HashMap::new();

    // For each patch, the list of patches that depend on it.
    let mut rev_dependencies: HashMap<&Hash, Vec<&Hash>> = HashMap::new();

    // Decision for the remaining patches ('a' or 'd'), if any.
    let mut final_decision = None;
    let mut show_help = false;

    while i < patches.len() {
        let (ref a, patchid, ref b) = patches[i];
        let forced_decision = check_forced_decision(command, &choices, &rev_dependencies, a, b);

        // Is the decision already forced by a previous choice?
        let e = match final_decision.or(forced_decision) {
            Some(true) => 'Y',
            Some(false) => 'N',
            None => {
                debug!("decision not forced");
                let (current, remaining) =
                    interactive_ask(&getch, a, patchid, b, command, show_help)?;
                final_decision = remaining;
                current
            }
        };

        show_help = false;

        debug!("decision: {:?}", e);
        match e {
            'Y' => {
                choices.insert(a, true);
                match command {
                    Command::Pull | Command::Push => {
                        for ref dep in b.dependencies().iter() {
                            let d = rev_dependencies.entry(dep).or_insert(vec![]);
                            d.push(a)
                        }
                    }
                    Command::Unrecord => {}
                }
                i += 1
            }
            'N' => {
                choices.insert(a, false);
                match command {
                    Command::Unrecord => {
                        for ref dep in b.dependencies().iter() {
                            let d = rev_dependencies.entry(dep).or_insert(vec![]);
                            d.push(a)
                        }
                    }
                    Command::Pull | Command::Push => {}
                }
                i += 1
            }
            'K' if i > 0 => {
                let (ref a, _, _) = patches[i];
                choices.remove(a);
                i -= 1
            }
            '?' => {
                show_help = true;
            }
            _ => {}
        }
    }
    Ok(patches
        .into_iter()
        .filter_map(|&(ref hash, _, _)| {
            if let Some(true) = choices.get(hash) {
                Some(hash.to_owned())
            } else {
                None
            }
        })
        .collect())
}

/// Compute the dependencies of this change.
fn change_deps(
    id: usize,
    c: &Record<ChangeContext<Hash>>,
    provided_by: &mut HashMap<LineId, usize>,
) -> HashSet<LineId> {
    let mut s = HashSet::new();
    for c in c.iter() {
        match *c {
            Change::NewNodes {
                ref up_context,
                ref down_context,
                ref line_num,
                ref nodes,
                ..
            } => {
                for cont in up_context.iter().chain(down_context) {
                    if cont.patch.is_none() && !cont.line.is_root() {
                        s.insert(cont.line.clone());
                    }
                }
                for i in 0..nodes.len() {
                    provided_by.insert(*line_num + i, id);
                }
            }
            Change::NewEdges { ref edges, .. } => {
                for e in edges {
                    if e.from.patch.is_none() && !e.from.line.is_root() {
                        s.insert(e.from.line.clone());
                    }
                    if e.to.patch.is_none() && !e.from.line.is_root() {
                        s.insert(e.to.line.clone());
                    }
                }
            }
        }
    }
    s
}


>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
fn print_record<T: rand::Rng>(
    repo_root: &RepoRoot<impl AsRef<Path>>,

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
fn print_change<T: rand::Rng>(

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    term: &mut Option<Box<StdoutTerminal>>,
    cwd: &Path,
    repo: &MutTxn<T>,
    current_file: &mut Option<Rc<RepoPath<PathBuf>>>,
    c: &Record<ChangeContext<Hash>>,
) -> Result<(), Error> {
    match *c {
        Record::FileAdd {
            ref name,
            ref contents,
            ..
        } => {
            if let Some(ref mut term) = *term {
                term.fg(term::color::CYAN).unwrap_or(());
            }
            print!("added file ");
            if let Some(ref mut term) = *term {
                term.reset().unwrap_or(());
            }
            println!("{}", pretty_repo_path(repo_root, name, cwd).display());
            if let Some(ref change) = contents {
                print_change(term, repo, 0, 0, change)?;
            }
            Ok(())
        }
        Record::FileDel {
            ref name,
            ref contents,
            ..
        } => {
            if let Some(ref mut term) = *term {
                term.fg(term::color::MAGENTA).unwrap_or(());
            }
            print!("deleted file: ");
            if let Some(ref mut term) = *term {
                term.reset().unwrap_or(());
            }
            println!("{}", pretty_repo_path(repo_root, name, cwd).display());
            if let Some(ref change) = contents {
                print_change(term, repo, 0, 0, change)?;
            }
            Ok(())
        }
        Record::FileMove { ref new_name, .. } => {
            if let Some(ref mut term) = *term {
                term.fg(term::color::YELLOW).unwrap_or(());
            }
            print!("file moved to: ");
            if let Some(ref mut term) = *term {
                term.reset().unwrap_or(());
            }
            println!("{}", pretty_repo_path(repo_root, new_name, cwd).display());
            Ok(())
        }
        Record::Change {
            ref change,
            ref replacement,
            ref file,
            old_line,
            new_line,
            ..
        } => {

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                            if let Some(ref mut term) = *term {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            let mut file_changed = true;
            if let Some(ref cur_file) = *current_file {
                if file == cur_file {
                    file_changed = false;
                }
            }
            if file_changed {
                if let Some(ref mut term) = *term {
                    term.attr(Attr::Bold).unwrap_or(());
                    term.attr(Attr::Underline(true)).unwrap_or(());
                }
                println!("{}", pretty_repo_path(repo_root, file, cwd).display());
                if let Some(ref mut term) = *term {
                    term.reset().unwrap_or(());
                }
                *current_file = Some(file.clone())
            }

            print_change(term, repo, old_line, new_line, change)?;
            if let Some(ref c) = *replacement {
                print_change(term, repo, old_line, new_line, c)?
            }
            Ok(())
        }
    }
}

fn print_change<T: rand::Rng>(
    term: &mut Option<Box<StdoutTerminal>>,
    repo: &MutTxn<T>,
    old_line: usize,
    new_line: usize,
    change: &Change<ChangeContext<Hash>>,
) -> Result<(), Error> {
    match *change {
        Change::NewNodes {
            // ref up_context,ref down_context,ref line_num,
            ref flag,
            ref nodes,
            ..
        } => {
            if flag.contains(EdgeFlags::FOLDER_EDGE) {
                for n in nodes {
                    if n.len() >= 2 {
                        if let Some(ref mut term) = *term {
                            term.fg(term::color::CYAN).unwrap_or(());
                        }
                        print!("new file ");
                        if let Some(ref mut term) = *term {
                            term.reset().unwrap_or(());
                        }
                        println!("{}", str::from_utf8(&n[2..]).unwrap_or(""));
                    }
                }
            } else {
                if new_line > 0 {
                    println!("From line {}\n", new_line);
                }

                for n in nodes {
                    let s = str::from_utf8(n).unwrap_or(BINARY_CONTENTS);
                    if let Some(ref mut term) = *term {
                        term.fg(term::color::GREEN).unwrap_or(());
                    }
                    print!("+ ");
                    if let Some(ref mut term) = *term {
                        term.reset().unwrap_or(());
                    }
                    if s.ends_with("\n") {
                        print!("{}", s);
                    } else {
                        println!("{}", s);
                    }
                }
            }
            Ok(())
        }
        Change::NewEdges {
            ref edges, flag, ..
        } => {
            let mut h_targets = HashSet::with_capacity(edges.len());

            if old_line > 0 {
                println!("From line {}\n", old_line);
            }
            for e in edges {
                let (target, flag) = if !flag.contains(EdgeFlags::PARENT_EDGE) {
                    if h_targets.insert(&e.to) {
                        (Some(&e.to), flag)
                    } else {
                        (None, flag)
                    }
                } else {
                    if h_targets.insert(&e.from) {
                        (Some(&e.from), flag)
                    } else {
                        (None, flag)
                    }
                };
                if let Some(target) = target {
                    let internal = repo.internal_key_unwrap(target);
                    let l = repo.get_contents(internal).unwrap();
                    let l = l.into_cow();
                    let s = str::from_utf8(&l).unwrap_or(BINARY_CONTENTS);

                    if flag.contains(EdgeFlags::DELETED_EDGE) {
                        if let Some(ref mut term) = *term {
                            term.fg(term::color::RED).unwrap_or(());
                        }
                        print!("- ");
                    } else {
                        if let Some(ref mut term) = *term {
                            term.fg(term::color::GREEN).unwrap_or(());
                        }
                        print!("+ ");
                    }
                    if let Some(ref mut term) = *term {
                        term.reset().unwrap_or(());
                    }
                    if s.ends_with("\n") {
                        print!("{}", s)
                    } else {
                        println!("{}", s)
                    }
                }
            }
            Ok(())
        }
    }
}





use rand::Rng;
use std::io::Read;
18
19

20
21
22
23
24

25
26
27
28
29
30

31
32
33
                .help("Local repository.")
                .takes_value(true),
        ).arg(
        )
        .arg(
            Arg::with_name("branch")
                .help("Branch to switch to.")
                .takes_value(true),
        ).arg(
        )
        .arg(
            Arg::with_name("path")
                .long("path")
                .help("Partial path to check out.")
                .takes_value(true),
        ).arg(
        )
        .arg(
            Arg::with_name("force")














1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183

184
185
186
                .pullable_patches(
                    args.from_branch,
                    &repo_root,
                )?
            session.pull(
                &repo_root,
                args.to_branch,
                &mut pullable,
                &args.from_path,
                true,
            )?;
            repo_root
                .set_current_branch(args.to_branch)
                .map_err(|x| x.into())
use clap::{Arg, ArgMatches, SubCommand};
use commands::remote::{parse_remote, Remote};
use commands::{assert_no_containing_repo, create_repo, default_explain, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::{Hash, DEFAULT_BRANCH};
use regex::Regex;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::exit;
use tempfile::tempdir_in;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("clone")
        .about("Clone a remote branch")
        .arg(
            Arg::with_name("from")
                .help("Repository to clone.")
                .required(true),
        )
        .arg(
            Arg::with_name("from_branch")
                .long("from-branch")
                .help("The branch to pull from")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("to_branch")
                .long("to-branch")
                .help("The branch to pull into")
                .takes_value(true),
        )
        .arg(Arg::with_name("to").help("Target."))
        .arg(
            Arg::with_name("from_path")
                .long("path")
                .help("Only pull patches relative to that path.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .help("Pull a patch and its dependencies.")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("port")
                .short("p")
                .long("port")
                .help("Port of the remote ssh server.")
                .takes_value(true)
                .validator(|val| {
                    let x: Result<u16, _> = val.parse();
                    match x {
                        Ok(_) => Ok(()),
                        Err(_) => Err(val),
                    }
                }),
        );
}
#[derive(Debug)]
pub struct Params<'a> {
    pub from: Remote<'a>,
    pub from_branch: &'a str,
    pub from_path: Vec<RepoPath<&'a Path>>,
    pub to: Remote<'a>,
    pub to_branch: &'a str,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Params<'a> {
    // At least one must not use its "port" argument
    let from = parse_remote(
        args.value_of("from").unwrap(),
        args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
        None,
        None,
    );
    let to = if let Some(to) = args.value_of("to") {
        parse_remote(
            to,
            args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
            None,
            None,
        )
    } else {
        let basename = Regex::new(r"([^/:]+)").unwrap();
        let from = args.value_of("from").unwrap();
        if let Some(to) = basename.captures_iter(from).last().and_then(|to| to.get(1)) {
            parse_remote(
                to.as_str(),
                args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
                None,
                None,
            )
        } else {
            panic!("Could not parse target")
        }
    };
    let from_branch = args.value_of("from_branch").unwrap_or(DEFAULT_BRANCH);
    let from_path = args
        .values_of("from_path")
        .map(|x| x.map(|p| RepoPath(Path::new(p))).collect())
        .unwrap_or(Vec::new());
    let to_branch = args.value_of("to_branch").unwrap_or(from_branch);
    Params {
        from,
        from_branch,
        from_path,
        to,
        to_branch,
    }
}

pub fn run(args_: &ArgMatches) -> Result<(), Error> {
    let args = parse_args(args_);
    debug!("{:?}", args);
    match args.to {
        Remote::Local { path: repo_root } => {
            assert_no_containing_repo(&repo_root.repo_root)?;

            let parent = repo_root.repo_root.parent().unwrap();
            let tmp_dir = tempdir_in(parent)?;
            {
                create_repo(tmp_dir.path())?;
                let tmp_root = RepoRoot {
                    repo_root: tmp_dir.path(),
                };
                let mut session = args.from.session()?;
                let mut pullable: Vec<_> = if let Some(patches) = args_.values_of("patch") {
                    let mut p = Vec::new();
                    for x in patches {
                        p.push((Hash::from_base58(x).unwrap(), 0))
                    }
                    p
                } else {
                    session.changes(args.from_branch, &args.from_path[..])?
                };
                session.pull(
                    &tmp_root,
                    args.to_branch,
                    &mut pullable,
                    &args.from_path,
                    true,
                )?;
                tmp_root.set_current_branch(args.to_branch)?;
            }
            let path = tmp_dir.into_path();
            std::fs::rename(&path, &repo_root.repo_root)?;
            Ok(())
        }
        _ => {
            // Clone between remote repositories.
            match args.from {
                Remote::Local { path } => {
                    let mut to_session = args.to.session()?;
                    debug!("remote init");
                    to_session.remote_init()?;
                    debug!("pushable?");
                    let pushable = to_session.pushable_patches(
                        args.from_branch,
                        args.to_branch,
                        &path,
                        &args.from_path,
                    )?;
                    debug!("pushable = {:?}", pushable);
                    let pushable = pushable.pushable.into_iter().map(|(h, _, _)| h).collect();
                    to_session.push(&path, args.to_branch, pushable)?;
                    path.set_current_branch(args.to_branch).map_err(|x| x.into())
                    path.set_current_branch(args.to_branch)
                        .map_err(|x| x.into())
                }
                _ => unimplemented!(),
            }
        }
    }
}

pub fn explain(res: Result<(), Error>) {
    if let Err(Error::InARepository { ref path }) = res {
        writeln!(
            stderr(),
            "error: Cannot clone onto / into existing repository {:?}",
            path
        ).unwrap();
        )
        .unwrap();
        exit(1)
1
2
3
4
5
6
7
8
9





10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65

66
67
68
use super::{default_explain, get_wd, validate_base58, StaticSubcommand};
use clap::{Arg, ArgMatches, SubCommand};
use error::Error;
use libpijul::fs_representation::find_repo_root;
use libpijul::{Hash, DEFAULT_BRANCH};
use std::collections::HashSet;
use std::mem;
use std::path::Path;
use std::string::String;
use libpijul::fs_representation::{find_repo_root};
use libpijul::{Hash, DEFAULT_BRANCH};
use error::Error;
use error::Error;
use super::{default_explain, get_wd, StaticSubcommand, validate_base58};

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("dependencies")
        .alias("deps")
        .about("Print the patch dependencies using the DOT syntax in stdout")
        .arg(
            Arg::with_name("hash")
                .help("Hash of a patch.")
                .takes_value(true)
                .required(false)
                .multiple(true)
                .validator(validate_base58),
        )
        .arg(
            Arg::with_name("depth")
                .long("depth")
                .help("The depth of the dependencies graph")
                .takes_value(true)
                .required(false)
                .validator(|x| {
                    if let Ok(x) = x.parse::<usize>() {
                        if x >= 1 {
                            return Ok(());
                        }
                    }
                    Err("The depth argument must be an integer, and at least 1".to_owned())
                }),
        )
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Local repository.")
                .multiple(true)
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("Branch.")
                .takes_value(true)
                .required(false),
        );
}

enum Target<'a> {
    Branch(Option<&'a str>),
    Hash(Vec<&'a str>, usize),
}

pub struct Params<'a> {
    pub repository: Option<&'a Path>,
    target: Target<'a>,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Result<Params<'a>, Error> {
    let target = if let Some(hash) = args.values_of("hash") {
        let depth = args.value_of("depth")
        let depth = args
            .value_of("depth")
            .unwrap_or("1")
18
19

20
21
22
23
24
25
26

27
28
29
30
31
32
33










34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
                .takes_value(true)
                .required(false),
        ).arg(
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch to show, defaults to the current branch.")
                .takes_value(true)
                .required(false),
        ).arg(
        )
        .arg(
            Arg::with_name("prefix")
                .help("Prefix to start from")
                .takes_value(true)
                .multiple(true),
        )
        .arg(Arg::with_name("patience")
             .long("patience")
             .help("Use patience diff instead of the default (Myers diff)")
             .conflicts_with("myers")
             .takes_value(false))
        .arg(Arg::with_name("myers")
             .long("myers")
             .help("Use Myers diff")
             .conflicts_with("patience")
             .takes_value(false))
        .arg(
            Arg::with_name("patience")
                .long("patience")
                .help("Use patience diff instead of the default (Myers diff)")
                .conflicts_with("myers")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("myers")
                .long("myers")
                .help("Use Myers diff")
                .conflicts_with("patience")
                .takes_value(false),
        );
}
3
4

5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56


57
58
use error::Error;
use flate2::write::GzEncoder;
use libpijul::{Branch, Edge, Key, PatchId, Repository, Txn, ROOT_KEY, graph};
use flate2::Compression;
use libpijul::{graph, Branch, Edge, Key, PatchId, Repository, Txn, ROOT_KEY};
use std::fs::{remove_file, File};
use std::io::{stdout, Write};
use std::path::{Path, PathBuf};
use tar::{Builder, Header};

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("dist")
        .about("Produces a tar.gz archive of the repository")
        .arg(
            Arg::with_name("archive")
                .short("d")
                .takes_value(true)
                .required(true)
                .help("File name of the output archive."),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch from which to make the archive, defaults to the current branch.")
                .takes_value(true)
                .required(false),
        )
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Repository where to work.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("stdout")
                .long("stdout")
                .short("s")
                .help("Prints the resulting archive to stdout")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("dir")
                .help("Directory (or file) to archive, defaults to the whole repository.")
                .takes_value(true),
        );
}

pub fn dist<W: Write>(
    repo: Repository,
    branch_name: &str,
    scope: ScanScope,
    archive_name: &str,
    encoder: GzEncoder<W>,
) -> Result<(), Error> {
    let txn = repo.txn_begin()?;
    let branch = txn.get_branch(branch_name)
        .ok_or(Error::NoSuchBranch)?;
    let branch = txn.get_branch(branch_name).ok_or(Error::NoSuchBranch)?;
    let mut current_path = Path::new(archive_name).to_path_buf();

1
2
3
4
5
6
7
8

9
10
11
12
13
14
15
16
17
18
19
20

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39






40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56






57
58
59
60
61
62
63
64
65
66
67
68
69
70
71












72
73
74
75
76
77








78
79
80
81
82









83
84
85
86
87

88
89
90
91
92
93
94
95
96
97
98
99
100
101
use futures::{Future, Stream, Poll, IntoFuture, Async};
use futures::{Async, Future, IntoFuture, Poll, Stream};
use std::mem;

/// A future used to collect all the results of a stream into one generic type.
///
/// This future is returned by the `Stream::fold` method.
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct FoldWhile<S, F, G, Fut, Gut, T> where Fut: IntoFuture, Gut: IntoFuture {
pub struct FoldWhile<S, F, G, Fut, Gut, T>
where
    Fut: IntoFuture,
    Gut: IntoFuture,
{
    stream: S,
    f: F,
    state: State<T, Fut::Future, Gut::Future>,
    g: G,
}

#[derive(Debug)]
enum State<T, F, G> where F: Future, G: Future {
enum State<T, F, G>
where
    F: Future,
    G: Future,
{
    /// Placeholder state when doing work
    Empty,

    /// Ready to process the next stream item; current accumulator is the `T`
    Ready(T),

    /// Working on a future the process the previous stream item
    Processing(F),

    /// Checking whether we can continue.
    Checking(G),
}

pub fn new<S, F, G, Fut, Gut, T>(s: S, t: T, f: F, g: G) -> FoldWhile<S, F, G, Fut, Gut, T>
    where S: Stream,
          F: FnMut(T, S::Item) -> Fut,
          G: FnMut(T) -> Gut,
          Fut: IntoFuture<Item = T>,
          Gut: IntoFuture<Item = (bool, T)>,
          S::Error: From<Fut::Error> + From<Gut::Error>,
where
    S: Stream,
    F: FnMut(T, S::Item) -> Fut,
    G: FnMut(T) -> Gut,
    Fut: IntoFuture<Item = T>,
    Gut: IntoFuture<Item = (bool, T)>,
    S::Error: From<Fut::Error> + From<Gut::Error>,
{
    FoldWhile {
        stream: s,
        f: f,
        state: State::Ready(t),
        g: g,
    }
}

impl<S, F, G, Fut, Gut, T> Future for FoldWhile<S, F, G, Fut, Gut, T>
    where S: Stream,
          F: FnMut(T, S::Item) -> Fut,
          G: FnMut(T) -> Gut,
          Fut: IntoFuture<Item = T>,
          Gut: IntoFuture<Item = (bool, T)>,
          S::Error: From<Fut::Error> + From<Gut::Error>,
where
    S: Stream,
    F: FnMut(T, S::Item) -> Fut,
    G: FnMut(T) -> Gut,
    Fut: IntoFuture<Item = T>,
    Gut: IntoFuture<Item = (bool, T)>,
    S::Error: From<Fut::Error> + From<Gut::Error>,
{
    type Item = T;
    type Error = S::Error;

    fn poll(&mut self) -> Poll<T, S::Error> {
        loop {
            match mem::replace(&mut self.state, State::Empty) {
                State::Empty => panic!("cannot poll Fold twice"),
                State::Ready(state) => {
                    match self.stream.poll()? {
                        Async::Ready(Some(e)) => {
                            let future = (self.f)(state, e);
                            let future = future.into_future();
                            self.state = State::Processing(future);
                        }
                        Async::Ready(None) => return Ok(Async::Ready(state)),
                        Async::NotReady => {
                            self.state = State::Ready(state);
                            return Ok(Async::NotReady)
                        }
                State::Ready(state) => match self.stream.poll()? {
                    Async::Ready(Some(e)) => {
                        let future = (self.f)(state, e);
                        let future = future.into_future();
                        self.state = State::Processing(future);
                    }
                }
                State::Processing(mut fut) => {
                    match fut.poll()? {
                        Async::Ready(state) => self.state = State::Checking((self.g)(state).into_future()),
                        Async::NotReady => {
                            self.state = State::Processing(fut);
                            return Ok(Async::NotReady)
                        }
                    Async::Ready(None) => return Ok(Async::Ready(state)),
                    Async::NotReady => {
                        self.state = State::Ready(state);
                        return Ok(Async::NotReady);
                    }
                }
                State::Checking(mut gut) => {
                    match gut.poll()? {
                        Async::Ready((true, state)) => self.state = State::Ready(state),
                        Async::Ready((false, state)) => return Ok(Async::Ready(state)),
                        Async::NotReady => {
                            self.state = State::Checking(gut);
                            return Ok(Async::NotReady)
                        }
                },
                State::Processing(mut fut) => match fut.poll()? {
                    Async::Ready(state) => {
                        self.state = State::Checking((self.g)(state).into_future())
                    }
                }
                    Async::NotReady => {
                        self.state = State::Processing(fut);
                        return Ok(Async::NotReady);
                    }
                },
                State::Checking(mut gut) => match gut.poll()? {
                    Async::Ready((true, state)) => self.state = State::Ready(state),
                    Async::Ready((false, state)) => return Ok(Async::Ready(state)),
                    Async::NotReady => {
                        self.state = State::Checking(gut);
                        return Ok(Async::NotReady);
                    }
                },
            }
44
45

46
47
48
49
50




51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111

112
113
114
}

fn patch_dependencies(hash_str: &str, repo_root: &RepoRoot<impl AsRef<Path>>) -> Result<Vec<Hash>, Error> {
fn patch_dependencies(
    hash_str: &str,
    repo_root: &RepoRoot<impl AsRef<Path>>,
) -> Result<Vec<Hash>, Error> {
    let mut deps = Vec::new();
    let mut current = vec![
        Hash::from_base58(hash_str).ok_or::<Error>(Error::WrongHash)?,
    ];
    ];
    let mut current = vec![Hash::from_base58(hash_str).ok_or::<Error>(Error::WrongHash)?];
    let mut next = Vec::new();

    while !current.is_empty() {
        for hash in current.drain(..) {
            deps.push(hash.clone());
            let patch = repo_root.read_patch(hash.as_ref())?;

            for hash_dep in patch.dependencies().iter() {
                let h = hash_dep.to_owned();

                if !deps.contains(&h) {
                    next.push(h);
                }
            }
        }

        mem::swap(&mut next, &mut current);
    }

    deps.reverse();

    Ok(deps)
}

pub fn has_branch(opts: &BasicOptions, branch_name: &str) -> Result<bool, Error> {
    let repo = opts.open_repo()?;
    let txn = repo.txn_begin()?;

    Ok(txn.has_branch(branch_name))
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let to = args.value_of("to").unwrap();

    if !has_branch(&opts, to)? {
        if let Some(ref hash) = args.value_of("patch") {
            debug!(
                "Creating a new branch {:?} with dependencies of {:?}",
                to, hash
            );

            let deps = patch_dependencies(hash, &opts.repo_root)?;

            apply_resize_no_output(&opts.repo_root, to, deps.iter(), |_, _| ())?;

            println!("Branch {:?} has been created.", to);

            checkout(&opts, to, false, None)
        } else {
            let repo = opts.open_repo()?;
            let mut txn = repo.mut_txn_begin(rand::thread_rng())?;

            let br = opts.branch();
            let branch = txn.open_branch(&br)?;
            let new_branch = txn.fork(&branch, to)?;

            txn.commit_branch(branch)?;
            txn.commit_branch(new_branch)?;

            let partials = txn.iter_partials(&br)
            let partials = txn
                .iter_partials(&br)
                .take_while(|&(k, _)| k.as_str() == &br)
2
3
4
5
6
7
8


9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63


66


69
70

72
73
74
75
76
77
78
79
80
81

82
83
84
85
86
87
88
89
90
91
92

93
94
use commands::BasicOptions;
use error::Error;
use libpijul;
use libpijul::fs_representation::RepoRoot;
use rand;
use std::fs::{canonicalize, metadata};
use std::path::{Path, PathBuf};
use error::Error;
use libpijul::fs_representation::RepoRoot;

#[derive(Debug, Clone, Copy)]
pub enum Operation {
    Add,
    Remove,
}

pub fn run(args: &ArgMatches, op: Operation) -> Result<(), Error> {
    debug!("fs_operation {:?}", op);
    let opts = BasicOptions::from_args(args)?;

    debug!("repo {:?}", &opts.repo_root);
    let mut extra_space = 409600;
    let recursive = args.is_present("recursive");
    loop {
        let touched_files = match args.values_of("files") {
            Some(l) => l.map(|p| Path::new(p).to_owned()).collect(),
            None => vec![],
        };
        match really_run(
            &opts.repo_root,
            &opts.cwd,
            touched_files,
            recursive,
            op,
            extra_space,
        ) {
            Err(ref e) if e.lacks_space() => extra_space *= 2,
            e => return e,
        }
    }
}

fn really_run(
    //    repo_dir: &RepoRoot<&'static Path>,
    repo_dir: &RepoRoot<PathBuf>,
    wd: &Path,
    mut files: Vec<PathBuf>,
    recursive: bool,
    op: Operation,
    extra_space: u64,
) -> Result<(), Error> {
    debug!("files {:?}", files);
    let mut rng = rand::thread_rng();
    let repo = repo_dir.open_repo(Some(extra_space))?;
    let mut txn = repo.mut_txn_begin(&mut rng)?;
    match op {
        Operation::Add => {
            for file_ in files.drain(..) {
                let p = canonicalize(wd.join(&file_))?;
                if recursive {
                    debug!("adding from {:?}", p);
                    let mut files = Vec::new();
                    for file in repo_dir.untracked_files(&txn, &p) {
                        debug!("untracked {:?}", file);

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                        if let Ok(file) = file.strip_prefix(r) {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
                        let m = metadata(repo_dir.absolutize(&file))?;
                        files.push((file, m.is_dir()));

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
                    }
                    for (file, is_dir) in files {
                        match txn.add_file(&file, is_dir) {
                            Ok(()) => {}
                            Err(libpijul::Error::AlreadyAdded) => {
                                eprintln!("{:?} is already in the repository", file_)
                            }
                            Err(e) => return Err(e.into()),
                        }
                    }
                    continue
                    continue;
                } else {
                    let m = metadata(&p)?;
                    if let Ok(file) = repo_dir.relativize(&p) {
                        match txn.add_file(&file, m.is_dir()) {
                            Ok(()) => {}
                            Err(libpijul::Error::AlreadyAdded) => {
                                eprintln!("{:?} is already in the repository", file_)
                            }
                            Err(e) => return Err(e.into()),
                        }
                        continue
                        continue;
                    }

1
2
use clap::{Arg, ArgGroup, ArgMatches, Shell, SubCommand, AppSettings};
use clap::{AppSettings, Arg, ArgGroup, ArgMatches, Shell, SubCommand};
use cli;
21
22

23
24
25
                .help("Regex pattern to search for")
                .required(true),
        ).arg(
        )
        .arg(
            Arg::with_name("branch")
1
2
3
4


5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

34
35
36
37
use error::Error;
use libpijul::fs_representation::{RepoRoot, PIJUL_DIR_NAME};
use std::path::Path;
use std::process::Command;
use error::Error;
use libpijul::fs_representation::{PIJUL_DIR_NAME, RepoRoot};

pub fn run_hook(
    repo_root: &RepoRoot<impl AsRef<Path>>,
    hook: &'static str,
    additional_arg: Option<&String>,
) -> Result<(), Error> {
    let repo_root = &repo_root.repo_root;
    let mut cmd = repo_root.as_ref().to_path_buf();
    cmd.push(PIJUL_DIR_NAME);
    cmd.push("hooks");
    cmd.push(hook);

    if cmd.is_file() {
        println!("Running hook: {}", hook);

        let arg = match additional_arg {
            Some(ref arg) => vec![*arg],
            None => vec![],
        };

        let output = Command::new(cmd.as_path())
            .args(arg)
            .current_dir(repo_root)
            .output()?;

        if !output.status.success() {
            if let Ok(err) = String::from_utf8(output.stderr) {
                print!("{}", err);
            }
            return Err(Error::HookFailed { cmd: String::from(hook) });
            return Err(Error::HookFailed {
                cmd: String::from(hook),
            });
        }
43
44


45
46
47
48
49
    println!("{:?}", pages);
    println!("page count sum: {:?}", pages.sum());
    println!("total referenced: {:?}", pages.sum() + stats.free_pages.len() + stats.bookkeeping_pages.len() + 1);

    println!(
        "total referenced: {:?}",
        pages.sum() + stats.free_pages.len() + stats.bookkeeping_pages.len() + 1
    );

61
62


63
        ("upload", Some(args)) => Ok(Params::Upload {
            address: args.value_of("address").unwrap(),
            port: args.value_of("port")
                .and_then(|x| x.parse().ok()),
            port: args.value_of("port").and_then(|x| x.parse().ok()),


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

        let path = args
            .values_of("path")
use clap::{Arg, ArgMatches, SubCommand};
use commands::patch::print_patch;
use commands::{ask, default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::RepoPath;
use libpijul::patch::Patch;
use libpijul::{Branch, PatchId, Txn};
use regex::Regex;
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::PathBuf;
use term;

pub fn invocation() -> StaticSubcommand {
    SubCommand::with_name("log")
        .about("List the patches applied to the given branch")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Path to the repository to list.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("branch")
                .long("branch")
                .help("The branch to list.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("internal-id")
                .long("internal-id")
                .help("Display only patches with these internal identifiers.")
                .multiple(true)
                .takes_value(true),
        )
        .arg(
            Arg::with_name("hash-only")
                .long("hash-only")
                .help("Only display the hash of each path."),
        )
        .arg(
            Arg::with_name("repository-id")
                .long("repository-id")
                .help("display a header with the repository id")
        )
        .arg(
            Arg::with_name("path")
                .long("path")
                .multiple(true)
                .takes_value(true)
                .help("Only display patches that touch the given path."),
        )
        .arg(
            Arg::with_name("grep")
                .long("grep")
                .multiple(true)
                .takes_value(true)
                .help("Search patch name and description with a regular expression."),
        )
        .arg(
            Arg::with_name("last")
                .long("last")
                .takes_value(true)
                .help("Show only the last n patches. If `--first m` is also used, then (a) if the command normally outputs the last patches first, this means the last n patches of the first m ones. (b) Else, it means the first m patches of the last n ones."),
        )
        .arg(
            Arg::with_name("first")
                .long("first")
                .takes_value(true)
                .help("Show only the last n patches. If `--last m` is also used, then (a) if the command normally outputs the last patches first, this means the last m patches of the first n ones. (b) Else, it means the first n patches of the last m ones."),
        )
        .arg(
            Arg::with_name("patch")
                .long("patch")
                .short("p")
                .help("Show patches"),
        )
}

struct Settings<'a> {
    hash_only: bool,
    show_repoid: bool,
    show_patches: bool,
    regex: Vec<Regex>,
    opts: BasicOptions<'a>,
    path: Vec<RepoPath<PathBuf>>,
    first: Option<usize>,
    last: Option<usize>,
}

impl<'a> Settings<'a> {
    fn parse(args: &'a ArgMatches) -> Result<Self, Error> {
        let basic_opts = BasicOptions::from_args(args)?;
        let hash_only = args.is_present("hash-only");
        let first = args.value_of("first").and_then(|x| x.parse().ok());
        let last = args.value_of("last").and_then(|x| x.parse().ok());
        let show_patches = args.is_present("patch");
        let show_repoid = args.is_present("repository-id");
        let mut regex = Vec::new();
        if let Some(regex_args) = args.values_of("grep") {
            for r in regex_args {
                debug!("regex: {:?}", r);
                regex.push(Regex::new(r)?)
            }
        }
        let path = args.values_of("path")

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200

202


205


208
209
210



213


216
217
218

220
221
222
223
224
225
226
227
228
229
230
231

232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
) -> Result<PathBuf, Error> {
use clap;
use clap::ArgMatches;
pub type StaticSubcommand = clap::App<'static, 'static>;

mod ask;
mod fs_operation;
pub mod remote;
mod ssh_auth_attempts;

pub mod add;
pub mod apply;
pub mod branches;
pub mod checkout;
pub mod clone;
pub mod credit;
pub mod prune;
pub mod dependencies;
pub mod diff;
pub mod dist;
pub mod fork;
pub mod generate_completions;
pub mod grep;
pub mod hooks;
pub mod info;
pub mod init;
pub mod key;
pub mod log;
pub mod ls;
pub mod mv;
pub mod patch;
pub mod prune;
pub mod pull;
pub mod push;
pub mod record;
pub mod remove;
pub mod revert;
pub mod rollback;
pub mod dependencies;
pub mod sign;
pub mod status;
pub mod tag;
pub mod unrecord;

mod fold_until;

use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::Hash;
use libpijul::{fs_representation, Inode, Repository, Txn, DEFAULT_BRANCH};
use rand;
use std::borrow::Cow;
use std::env::current_dir;
use std::env::var;
use std::fs::{canonicalize, create_dir, metadata};
use std::io::{stderr, Write};
use std::path::{Path, PathBuf};
use std::process::exit;

pub fn all_command_invocations() -> Vec<StaticSubcommand> {
    return vec![
        log::invocation(),
        info::invocation(),
        init::invocation(),
        record::invocation(),
        unrecord::invocation(),
        add::invocation(),
        pull::invocation(),
        push::invocation(),
        apply::invocation(),
        clone::invocation(),
        remove::invocation(),
        mv::invocation(),
        ls::invocation(),
        revert::invocation(),
        patch::invocation(),
        fork::invocation(),
        branches::invocation(),
        prune::invocation(),
        checkout::invocation(),
        diff::invocation(),
        credit::invocation(),
        dist::invocation(),
        key::invocation(),
        rollback::invocation(),
        status::invocation(),
        dependencies::invocation(),
        tag::invocation(),
        sign::invocation(),
        generate_completions::invocation(),
        grep::invocation(),
    ];
}

pub fn get_wd(repository_path: Option<&Path>) -> Result<PathBuf, Error> {
    debug!("get_wd: {:?}", repository_path);
    match repository_path {
        None => Ok(canonicalize(current_dir()?)?),
        Some(a) if a.is_relative() => Ok(canonicalize(current_dir()?.join(a))?),
        Some(a) => Ok(canonicalize(a)?),
    }
}

/// Returns an error if the `dir` is contained in a repository.
pub fn assert_no_containing_repo(dir: &Path) -> Result<(), Error> {
    if metadata(dir).is_ok() {
        if fs_representation::find_repo_root(&canonicalize(dir)?).is_some() {
            return Err(Error::InARepository { path: dir.to_owned() });
            return Err(Error::InARepository {
                path: dir.to_owned(),
            });
        }
    }
    Ok(())
}

/// Creates an empty pijul repository in the given directory.
pub fn create_repo(dir: &Path) -> Result<(), Error> {
    // Check that a repository does not already exist.
    if metadata(dir).is_err() {
        create_dir(dir)?;
    }
    let dir = canonicalize(dir)?;
    if fs_representation::find_repo_root(&dir).is_some() {
        return Err(Error::InARepository { path: dir.to_owned() });
        return Err(Error::InARepository {
            path: dir.to_owned(),
        });
    }

    let repo_root = fs_representation::create(&dir, rand::thread_rng())?;
    let repo = repo_root.open_repo(None)?;
    repo.mut_txn_begin(rand::thread_rng())?.commit()?;
    Ok(())
}

fn default_explain<R>(command_result: Result<R, Error>) {
    debug!("default_explain");
    match command_result {
        Ok(_) => (),
        Err(e) => {
            writeln!(stderr(), "error: {}", e).unwrap();
            exit(1)
        }
    }
}

fn validate_base58(x: String) -> ::std::result::Result<(), String> {
    if Hash::from_base58(&x).is_some() {
        Ok(())
    } else {
        Err(format!("\"{}\" is invalid base58", x))
    }
}

/// Almost all commands want to know the current directory and the repository root.  This struct
/// fills that need, and also provides methods for other commonly-used tasks.
pub struct BasicOptions<'a> {
    /// This isn't 100% the same as the actual current working directory, so pay attention: this
    /// will be the current directory, unless the user specifies `--repository`, in which case
    /// `cwd` will actually be the path of the repository root. In other words, specifying
    /// `--repository` has the same effect as changing directory to the repository root before
    /// running `pijul`.
    pub cwd: PathBuf,
    pub repo_root: RepoRoot<PathBuf>,
    args: &'a ArgMatches<'a>,
}

pub enum ScanScope {
    FromRoot,
    WithPrefix(RepoPath<PathBuf>, String),
}

impl<'a> BasicOptions<'a> {
    /// Reads the options from command line arguments.
    pub fn from_args(args: &'a ArgMatches<'a>) -> Result<BasicOptions<'a>, Error> {
        let wd = get_wd(args.value_of("repository").map(Path::new))?;
        let repo_root = if let Some(r) = fs_representation::find_repo_root(&canonicalize(&wd)?) {
            r
        } else {
            return Err(Error::NotInARepository);
        };
        Ok(BasicOptions {
            cwd: wd,
            repo_root: repo_root,
            args: args,
        })
    }

    /// Gets the name of the desired branch.
    pub fn branch(&self) -> String {
        if let Some(b) = self.args.value_of("branch") {
            b.to_string()
        } else if let Ok(b) = self.repo_root.get_current_branch() {
            b
        } else {
            DEFAULT_BRANCH.to_string()
        }
    }

    pub fn repo_root(&self) -> PathBuf {
        self.repo_root.repo_root.clone()
    }

    pub fn open_repo(&self) -> Result<Repository, Error> {

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        self.repo_root.open_repo(None).map_err(|e| e.into())

================================

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        fs_representation::open_repo(&self.repo_root, None).map_err(|e| e.into())

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    }

    pub fn open_and_grow_repo(&self, increase: u64) -> Result<Repository, Error> {
        self.repo_root.open_repo(Some(increase)).map_err(|e| e.into())

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        fs_representation::open_repo(&self.repo_root, Some(increase)).map_err(|e| e.into())

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
        self.repo_root
            .open_repo(Some(increase))
            .map_err(|e| e.into())

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    }

    pub fn pristine_dir(&self) -> PathBuf {
        self.repo_root.pristine_dir()
    }

    pub fn patches_dir(&self) -> PathBuf {
        self.repo_root.patches_dir()
    }

    pub fn scan_scope(&self) -> Result<ScanScope, Error> {
        if let Some(prefix) = self.args.value_of("dir") {
            let root = self.args
            let root = self
                .args
                .value_of("repository")
                .map(|root| Path::new(root).to_path_buf())
                .unwrap_or(current_dir()?);

            Ok(ScanScope::WithPrefix(
                relative_repo_path(&self.repo_root, &root, prefix)?,
                prefix.into(),
            ))
        } else {
            Ok(ScanScope::FromRoot)
        }
    }

    fn dir_inode(&self, txn: &Txn) -> Result<Inode, Error> {
        use libpijul::ROOT_INODE;
        if let Some(dir) = self.args.value_of("dir") {
            let dir = if Path::new(dir).is_relative() {
                let root = if let Some(root) = self.args.value_of("repository") {
                    Path::new(root).to_path_buf()
                } else {
                    current_dir()?
                };
                root.join(&dir).canonicalize()?
            } else {
                Path::new(dir).canonicalize()?
            };
            let dir = self.repo_root.relativize(&dir)?;
            debug!("{:?}", dir);
            let inode = txn.find_inode(&dir)?;
            debug!("{:?}", inode);
            Ok(inode)
        } else {
            Ok(ROOT_INODE)
        }
    }
}

fn remote_pijul_cmd() -> Cow<'static, str> {
    if let Ok(cmd) = var("REMOTE_PIJUL") {
        Cow::Owned(cmd)
    } else {
        Cow::Borrowed("pijul")
    }
}

pub fn relative_repo_path(
    repo_root: &RepoRoot<PathBuf>,
    base: &PathBuf,
    dir: &str,
) -> Result<RepoPath<PathBuf>, Error> {
    let dir = if Path::new(dir).is_relative() {






1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121

122
123
124
125
126
127
128
129
130
131
132
133
134
135

136
137
138
139
                        return Err(Error::InvalidPath {
                            path: f.to_string_lossy().into_owned(),
                        });
                        return Err(Error::InvalidPath {
                            path: f.to_string_lossy().into_owned(),
                        });
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::RepoPath;
use std::fs::{metadata, rename};
use std::path::PathBuf;

use rand;
use std;
pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("mv")
        .about("Change file names")
        .arg(
            Arg::with_name("files")
                .multiple(true)
                .help("Files to move.")
                .required(true)
                .min_values(2),
        )
        .arg(
            Arg::with_name("repository")
                .takes_value(true)
                .long("repository")
                .help("Repository where the files are.")
                .takes_value(true),
        );
}

#[derive(Debug)]
pub enum Movement {
    IntoDir {
        from: Vec<RepoPath<PathBuf>>,
        to: RepoPath<PathBuf>,
    },
    FileToFile {
        from: RepoPath<PathBuf>,
        to: RepoPath<PathBuf>,
    },
}

fn get_movement(opts: &BasicOptions, args: &ArgMatches) -> Movement {
    debug!("wd = {:?}", opts.cwd);
    debug!("repo_root = {:?}", opts.repo_root);
    let mut repo_paths = Vec::new();
    for fname in args.values_of("files").unwrap() {
        debug!("fname: {:?}", fname);
        // if fname is absolute, erases current_dir.
        let mut path = std::env::current_dir().unwrap();
        path.push(fname);
        debug!("path = {:?}", path);
        let path = if let Ok(f) = std::fs::canonicalize(&path) {
            f
        } else {
            std::fs::canonicalize(&path.parent().unwrap())
                .unwrap()
                .join(&path.file_name().unwrap())
        };
        debug!("path = {:?}", path);
        let path = opts.repo_root.relativize(&path).unwrap();
        debug!("path = {:?}", path);

        repo_paths.push(path.to_owned());
    }
    debug!("parse_args: done");
    let repo_paths = repo_paths;
    let (dest, origs) = repo_paths.split_last().unwrap();
    let target_path = opts.repo_root.absolutize(&dest);
    let to_dir = target_path.exists() && target_path.is_dir();

    if to_dir {
        Movement::IntoDir {
            from: Vec::from(origs),
            to: dest.clone(),
        }
    } else {
        if origs.len() == 1 {
            Movement::FileToFile {
                from: origs[0].clone(),
                to: dest.clone(),
            }
        } else {
            panic!(
                "Cannot move files into {}: it is not a valid directory",
                dest.display()
            );
        }
    }
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let movement = get_movement(&opts, args);
    let repo = opts.open_repo()?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    match movement {
        Movement::FileToFile {
            from: ref orig_path,
            to: ref dest_path,
        } => {
            txn.move_file(orig_path, dest_path, false)?;
            debug!(
                "1 renaming {:?} into {:?}",
                opts.repo_root.repo_root.join(orig_path.as_path()),
                opts.repo_root.repo_root.join(dest_path.as_path())
            );
            rename(
                opts.repo_root.repo_root.join(orig_path.as_path()),
                opts.repo_root.repo_root.join(dest_path.as_path()),
            )?;
            txn.commit()?;
            Ok(())
        }
        Movement::IntoDir {
            from: ref orig_paths,
            to: ref dest_dir,
        } => {
            for file in orig_paths {
                let repo_target_name = {
                    let target_basename = if let Some(f) = file.file_name() {
                        f
                    } else {
                        return Err(Error::InvalidPath { path: f.to_string_lossy().into_owned() });
                        return Err(Error::InvalidPath {
                            path: file.to_path_buf(),
                        });
                    };
                    dest_dir.join(std::path::Path::new(target_basename))
                };
                let is_dir = metadata(&opts.repo_root.absolutize(file))?.is_dir();
                txn.move_file(&file, &repo_target_name, is_dir)?;
            }
            for file in orig_paths {
                let full_target_name = {
                    let target_basename = if let Some(f) = file.file_name() {
                        f
                    } else {
                        return Err(Error::InvalidPath { path: f.to_string_lossy().into_owned() });
                        return Err(Error::InvalidPath {
                            path: file.to_path_buf(),
                        });
                    };



1
2
3
4

5
6
7
8
9
10
11
12
13
14
15
16

use error::Error;
                let internal = txn
                    .get_internal(hash.as_ref())
use super::validate_base58;
use atty;
use clap::{Arg, ArgGroup, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use atty;
use error::Error;
use libpijul;
use libpijul::graph::LineBuffer;
use libpijul::patch::{Change, NewEdge, Patch};
use libpijul::{Branch, EdgeFlags, Hash, Key, LineId, PatchId, Transaction, Txn, Value, ROOT_KEY};
use std::cmp::max;
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io::{copy, stdout, BufReader};
use std::str::from_utf8;
use term;
use term::StdoutTerminal;
use error::Error;
2
3
4
use error::Error;
use rand;


1
2
3
4
5
6
7
8
9
10
11
12

use libpijul::fs_representation::RepoRoot;
use clap::{Arg, ArgMatches, SubCommand};

use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use std::fs::File;
use std::path::Path;

use commands::ask::{ask_patches, Command};
use commands::remote;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::patch::Patch;
use libpijul::{ApplyTimestamp, Hash, PatchId, DEFAULT_BRANCH};
use libpijul::fs_representation::RepoRoot;


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42

43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86


89


92
93

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

141
142
143
144

145
146
147
148
                .help("Force the push even if non fast-forward."),
            return Err(Error::NonFastForwardPush);
use clap::{Arg, ArgMatches, SubCommand};

use super::ask;
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::RepoPath;
use meta::{Meta, Repository, DEFAULT_REMOTE};
use std::env::current_dir;
use std::path::Path;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("push")
        .about("Push to a remote repository")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .help("Repository to list.")
                .takes_value(true),
        )
        .arg(Arg::with_name("remote").help("Repository to push to."))
        .arg(
            Arg::with_name("local_branch")
                .long("from-branch")
                .help("The branch to push from")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("remote_branch")
                .long("to-branch")
                .help("The branch to push into. Defaults to the current branch.")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("all")
                .short("a")
                .long("all")
                .help("Answer 'y' to all questions.")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("force")
                .long("force")
                .help("Force the push even if non fast-forward.")
                .help("Force the push even if non fast-forward."),
        )
        .arg(
            Arg::with_name("set-default")
                .long("set-default")
                .help("Used with --set-remote, sets this remote as the default push target."),
        )
        .arg(
            Arg::with_name("set-remote")
                .long("set-remote")
                .help("Set the name of this remote")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("remote_path")
                .long("path")
                .help("Only pull patches relative to that patch.")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("port")
                .short("p")
                .long("port")
                .help("Port of the remote ssh server.")
                .takes_value(true)
                .validator(|val| {
                    let x: Result<u16, _> = val.parse();
                    match x {
                        Ok(_) => Ok(()),
                        Err(_) => Err(val),
                    }
                }),
        );
}

#[derive(Debug)]
pub struct Params<'a> {
    pub remote_id: Option<&'a str>,
    pub set_remote: Option<&'a str>,
    pub yes_to_all: bool,
    pub set_default: bool,
    pub port: Option<u16>,
    pub local_branch: Option<&'a str>,

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    pub remote_path: Vec<&'a str>,

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

================================
    pub remote_path: Vec<RepoPath<&'a Path>>,
    pub remote_branch: Option<&'a str>,

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    pub force: bool,
}

pub fn parse_args<'a>(args: &'a ArgMatches) -> Params<'a> {
    Params {
        remote_id: args.value_of("remote"),
        set_remote: args.value_of("set-remote"),
        yes_to_all: args.is_present("all"),
        set_default: args.is_present("set-default"),
        port: args.value_of("port").and_then(|x| Some(x.parse().unwrap())),
        local_branch: args.value_of("local_branch"),
        remote_branch: args.value_of("remote_branch"),
        remote_path: args
            .values_of("remote_path")
            .map(|x| x.map(|p| RepoPath(Path::new(p))).collect())
            .unwrap_or(Vec::new()),
        force: args.is_present("force"),
    }
}

pub fn run(arg_matches: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(arg_matches)?;
    let args = parse_args(arg_matches);
    let mut meta = Meta::load(&opts.repo_root).unwrap_or(Meta::new());
    let local_branch = args
        .local_branch
        .map(|b| b.to_string())
        .unwrap_or(opts.branch());
    let remote_branch = args
        .remote_branch
        .map(|b| b.to_string())
        .unwrap_or(opts.branch());
    let cwd = current_dir()?;
    let repo_root = opts.repo_root();
    {
        let remote = meta.push(args.remote_id, args.port, Some(&cwd), Some(&repo_root))?;
        debug!("remote: {:?}", remote);
        println!("Pushing to branch {}", remote_branch);
        let mut session = remote.session()?;
        let mut pushable = session.pushable_patches(
            &local_branch,
            &remote_branch,
            &opts.repo_root,
            &args.remote_path,
        )?;
        if !pushable.non_fast_forward.is_empty() && !args.force {
            return Err(Error::NonFastForwardPush)
            return Err(Error::NonFastForwardPush);
        }
        let pushable = if !args.yes_to_all {
            let mut patches = Vec::new();
            pushable.pushable.sort_by(|&(_, _, a), &(_, _, b)| a.cmp(&b));
            pushable
                .pushable
                .sort_by(|&(_, _, a), &(_, _, b)| a.cmp(&b));
            for &(ref i, ref internal, _) in pushable.pushable.iter() {
15
16
17
18
19
20


21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

79
80
81
82
83
84
85

86
87
88
89
90
91
92

93
94
95
96
97
98
99
100
101
102
103
104
105


108


111


114

116
117
118

119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

526
527
528
529
530
531
532
533
534
535
536
537


538
539
540
use std::fs::canonicalize;
use std::fs::{metadata, OpenOptions};
use std::io::Write;
use std::mem::drop;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::fs::{metadata, OpenOptions};
use std::io::Write;

pub fn record_args(sub: StaticSubcommand) -> StaticSubcommand {
    sub.arg(Arg::with_name("repository")
            .long("repository")
            .help("The repository where to record, defaults to the current directory.")
            .takes_value(true)
            .required(false))
        .arg(Arg::with_name("branch")
             .long("branch")
             .help("The branch where to record, defaults to the current branch.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("date")
             .long("date")
             .help("The date to use to record the patch, default is now.")
             .takes_value(true)
             .required(false))
        .arg(Arg::with_name("message")
             .short("m")
             .long("message")
             .help("The name of the patch to record")
             .takes_value(true))
        .arg(Arg::with_name("description")
             .short("d")
             .long("description")
             .help("The description of the patch to record")
             .takes_value(true))
        .arg(Arg::with_name("no-editor")
             .long("no-editor")
             .help("Do not use an editor to write the patch name and description, even if the variable is set in the configuration file")
             .takes_value(false))
        .arg(Arg::with_name("author")
             .short("A")
             .long("author")
             .help("Author of this patch (multiple occurrences allowed)")
             .takes_value(true))
        .arg(Arg::with_name("patience")
             .long("patience")
             .help("Use patience diff instead of the default (Myers diff)")
             .conflicts_with("myers")
             .takes_value(false))
        .arg(Arg::with_name("myers")
             .long("myers")
             .help("Use Myers diff")
             .conflicts_with("patience")
             .takes_value(false))
}

pub fn invocation() -> StaticSubcommand {
    return record_args(
        SubCommand::with_name("record")
            .about("Record changes in the repository")
            .arg(
                Arg::with_name("all")
                    .short("a")
                    .long("all")
                    .help("Answer 'y' to all questions")
                    .takes_value(false),
            ).arg(
            )
            .arg(
                Arg::with_name("add-new-files")
                    .short("n")
                    .long("add-new-files")
                    .help("Offer to add files that have been created since the last record")
                    .takes_value(false),
            ).arg(
            )
            .arg(
                Arg::with_name("depends-on")
                    .help("Add a dependency to this patch (internal id or hash accepted)")
                    .long("depends-on")
                    .takes_value(true)
                    .multiple(true),
            ).arg(
            )
            .arg(
                Arg::with_name("prefix")
                    .help("Prefix to start from")
                    .takes_value(true)
                    .multiple(true),
            ),
    );
}

fn add_untracked_files<T: rand::Rng, P: AsRef<Path> + 'static>(
    txn: &mut MutTxn<T>,
    repo_root: &RepoRoot<P>,

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        if let Err(e) = txn.add_file(&file, m.is_dir()) {

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    }

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    Ok(untracked)
}

fn append_to_ignore_file(repo_root: &RepoRoot<impl AsRef<Path>>, lines: &Vec<String>) -> Result<(), Error> {
fn append_to_ignore_file(
    repo_root: &RepoRoot<impl AsRef<Path>>,
    lines: &Vec<String>,
) -> Result<(), Error> {
    let ignore_file = repo_root.local_ignore_file();
    let mut file = OpenOptions::new()
        .append(true)
        .create(true)
        .open(ignore_file)?;
    for line in lines {
        file.write_all(line.as_ref())?;
        file.write_all(b"\n")?
    }
    Ok(())
}

fn select_changes(
    algo: libpijul::DiffAlgorithm,
    opts: &BasicOptions,
    add_new_files: bool,
    branch_name: &str,
    yes_to_all: bool,
    prefix: Option<Vec<RepoPath<PathBuf>>>,
) -> Result<(Vec<Record<Vec<Key<Option<Hash>>>>>, HashSet<InodeUpdate>), Error> {
    // Increase by 100 pages. The most things record can write is one
    // write in the branches table, affecting at most O(log n) blocks.
    let repo = opts.open_and_grow_repo(409600)?;
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    let mut to_unadd = if add_new_files {
        add_untracked_files(&mut txn, &opts.repo_root)?
    } else {
        HashSet::new()
    };
    let (changes, syncs) = changes_from_prefixes(
        algo,
        &opts.repo_root,
        &mut txn,
        &branch_name,
        prefix.as_ref(),
    )?;
    let changes: Vec<_> = changes
        .into_iter()
        .map(|x| txn.globalize_record(x))
        .collect();
    if !yes_to_all {
        let (c, i) = ask_changes(
            &txn,
            &opts.repo_root,
            &opts.cwd,
            &changes,
            ChangesDirection::Record,
            &mut to_unadd,
        )?;
        let selected = changes
            .into_iter()
            .enumerate()
            .filter(|&(i, _)| *(c.get(&i).unwrap_or(&false)))
            .map(|(_, x)| x)
            .collect();
        for file in to_unadd {
            txn.remove_file(&file)?
        }
        txn.commit()?;
        append_to_ignore_file(&opts.repo_root, &i)?;
        Ok((selected, syncs))
    } else {
        txn.commit()?;
        Ok((changes, syncs))
    }
}

pub fn run(args: &ArgMatches) -> Result<Option<Hash>, Error> {
    let opts = BasicOptions::from_args(args)?;
    let yes_to_all = args.is_present("all");
    let patch_name_arg = args.value_of("message");
    let patch_descr_arg = args.value_of("description");
    let authors_arg = args.values_of("author").map(|x| x.collect::<Vec<_>>());
    let branch_name = opts.branch();
    let add_new_files = args.is_present("add-new-files");

    let patch_date = args.value_of("date").map_or(Ok(chrono::Utc::now()), |x| {
        chrono::DateTime::from_str(x).map_err(|_| Error::InvalidDate {
            date: String::from(x),
        })
    })?;

    let mut save_meta = false;

    let (mut global, save_global) = Global::load().map(|c| (c, false)).unwrap_or_else(|e| {
        info!("loading global key, error {:?}", e);
        (Global::new(), true)
    });

    let mut meta = match Meta::load(&opts.repo_root) {
        Ok(m) => m,
        Err(_) => {
            save_meta = true;
            Meta::new()
        }
    };

    run_hook(&opts.repo_root, "pre-record", None)?;

    debug!("prefix {:?}", args.value_of("prefix"));
    let prefix = prefix(args, &opts)?;

    let (changes, syncs) = select_changes(
        if args.is_present("patience") {
            libpijul::DiffAlgorithm::Patience
        } else {
            libpijul::DiffAlgorithm::Myers
        },
        &opts,
        add_new_files,
        &branch_name,
        yes_to_all,
        prefix,
    )?;

    if changes.is_empty() {
        println!("Nothing to record");
        Ok(None)
    } else {
        let template = prepare_changes_template(patch_name_arg.unwrap_or(""), &changes);

        let repo = opts.open_repo()?;
        let patch = {
            let txn = repo.txn_begin()?;
            debug!("meta:{:?}", meta);

            let authors = decide_authors(authors_arg, &meta, &global)?;

            if authors.is_empty() {
                return Err(Error::NoAuthor);
            }

            if meta.authors.is_empty() {
                meta.authors = authors.clone();
                save_meta = true;
            }

            if global.author.is_none() {
                global.author = Some(authors[0].clone());
            }

            debug!("authors:{:?}", authors);

            let (patch_name, description) = decide_patch_message(
                patch_name_arg,
                patch_descr_arg,
                template,
                !args.is_present("no-editor"),
                &opts.repo_root,
                &meta,
                &global,
            )?;

            run_hook(&opts.repo_root, "patch-name", Some(&patch_name))?;

            debug!("patch_name:{:?}", patch_name);
            if save_meta {
                meta.save(&opts.repo_root)?
            }
            if save_global {
                global.save().unwrap_or(())
            }
            debug!("new");
            let changes = changes.into_iter().flat_map(|x| x.into_iter()).collect();
            let branch = txn.get_branch(&branch_name).unwrap();

            let mut extra_deps = Vec::new();
            if let Some(deps) = args.values_of("depends-on") {
                for dep in deps {
                    if let Some(hash) = Hash::from_base58(dep) {
                        if let Some(internal) = txn.get_internal(hash.as_ref()) {
                            if txn.get_patch(&branch.patches, internal).is_some() {
                                extra_deps.push(hash)
                            } else {
                                return Err(Error::ExtraDepNotOnBranch { hash });
                            }
                        } else {
                            return Err(Error::PatchNotFound {
                                repo_root: opts.repo_root().to_string_lossy().into_owned(),
                                patch_hash: hash,
                            });
                        }
                    } else if let Some(internal) = PatchId::from_base58(dep) {
                        if let Some(hash) = txn.get_external(internal) {
                            if txn.get_patch(&branch.patches, internal).is_some() {
                                extra_deps.push(hash.to_owned())
                            } else {
                                return Err(Error::ExtraDepNotOnBranch {
                                    hash: hash.to_owned(),
                                });
                            }
                        }
                    } else {
                        return Err(Error::WrongHash);
                    }
                }
            }
            txn.new_patch(
                &branch,
                authors,
                patch_name,
                description,
                patch_date,
                changes,
                extra_deps.into_iter(),
                PatchFlags::empty(),
            )
        };
        drop(repo);

        let patches_dir = opts.repo_root.patches_dir();
        let mut key = meta
            .signing_key
            .or(global.signing_key)
            .and_then(|s| load_signing_key(s).ok());
        let hash = if let Some(ref mut key) = key {
            key.check_author(&patch.header().authors)?;
            patch.save(&patches_dir, key.keys.get_mut(0))?
        } else {
            patch.save(&patches_dir, None)?
        };

        let pristine_dir = opts.pristine_dir();
        let mut increase = 409600;
        let res = loop {
            match record_no_resize(
                &pristine_dir,
                &opts.repo_root,
                &branch_name,
                &hash,
                &patch,
                &syncs,
                increase,
            ) {
                Err(ref e) if e.lacks_space() => increase *= 2,
                e => break e,
            }
        };

        run_hook(&opts.repo_root, "post-record", None)?;

        res
    }
}

pub fn record_no_resize(
    pristine_dir: &Path,
    r: &RepoRoot<impl AsRef<Path>>,
    branch_name: &str,
    hash: &Hash,
    patch: &Patch,
    syncs: &HashSet<InodeUpdate>,
    increase: u64,
) -> Result<Option<Hash>, Error> {
    let size_increase = increase + patch.size_upper_bound() as u64;
    let repo = match Repository::open(&pristine_dir, Some(size_increase)) {
        Ok(repo) => repo,
        Err(x) => return Err(Error::Repository(x)),
    };
    let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
    // save patch
    debug!("syncs: {:?}", syncs);
    let mut branch = txn.open_branch(branch_name)?;
    txn.apply_local_patch(&mut branch, r, &hash, &patch, &syncs, false)?;
    txn.commit_branch(branch)?;
    txn.commit()?;
    println!("Recorded patch {}", hash.to_base58());
    Ok(Some(hash.clone()))
}

pub fn explain(res: Result<Option<Hash>, Error>) {
    default_explain(res)
}

pub fn changes_from_prefixes<T: rand::Rng, P: AsRef<Path>>(
    algo: libpijul::DiffAlgorithm,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    txn: &mut MutTxn<T>,
    branch_name: &str,
    prefix: Option<&Vec<RepoPath<P>>>,
) -> Result<
    (
        Vec<libpijul::patch::Record<libpijul::patch::ChangeContext<PatchId>>>,
        HashSet<libpijul::InodeUpdate>,
    ),
    Error,
> {
    let mut record = RecordState::new();
    let branch = txn.open_branch(branch_name)?;
    if let Some(prefixes) = prefix {
        for prefix in prefixes {
            txn.record(algo, &mut record, &branch, repo_root, prefix)?;
        }
    } else {
        txn.record(algo, &mut record, &branch, repo_root, &in_repo_root())?;
    }
    txn.commit_branch(branch)?;
    let (changes, updates) = record.finish();
    // let changes = changes.into_iter().map(|x| txn.globalize_change(x)).collect();
    Ok((changes, updates))
}

pub fn prefix(
    args: &ArgMatches,
    opts: &BasicOptions,
) -> Result<Option<Vec<RepoPath<PathBuf>>>, Error> {
    if let Some(prefixes) = args.values_of("prefix") {
        let prefixes: Result<Vec<_>, Error> = prefixes
            .map(|prefix| {
                let p = opts.cwd.join(prefix);
                let p = if let Ok(p) = canonicalize(&p) { p } else { p };
                let file = opts.repo_root.relativize(&p)?;
                Ok(file.to_owned())
            })
            .collect();
        Ok(Some(prefixes?))
    } else {
        Ok(None)
    }
}

pub fn decide_authors(
    authors_args: Option<Vec<&str>>,
    meta: &Meta,
    global: &Global,
) -> Result<Vec<String>, Error> {
    Ok(match authors_args {
        Some(authors) => authors.iter().map(|x| x.to_string()).collect(),
        _ => {
            if meta.authors.len() > 0 {
                meta.authors.clone()
            } else if let Some(ref auth) = global.author {
                vec![auth.clone()]
            } else {
                ask::ask_authors()?
            }
        }
    })
}

pub fn decide_patch_message(
    name_arg: Option<&str>,
    descr_arg: Option<&str>,
    template: String,
    use_editor: bool,
    repo_root: &RepoRoot<impl AsRef<Path>>,
    meta: &Meta,
    global: &Global,
) -> Result<(String, Option<String>), Error> {
    Ok(match name_arg {
        Some(m) => (m.to_string(), descr_arg.map(|x| String::from(x.trim()))),
        _ => {
            let maybe_editor = if use_editor {
                if meta.editor.is_some() {
                    meta.editor.as_ref()
                } else {
                    global.editor.as_ref()
                }
            } else {
                None
            };

            ask::ask_patch_name(repo_root, maybe_editor, template)?
        }
    })
}

fn prepare_changes_template(descr: &str, changes: &[Record<Vec<Key<Option<Hash>>>>]) -> String {
    let mut res = format!(
        r#"
{}
# Please enter a patch title, and consider writing a description too. Lines
# starting with '#' will be ignored. Besides, an empty patch title aborts the
# patch recording.
#
# Here is a summary of the changes you are about to record:
#"#,
        descr
    );
    let mut known_files = Vec::new();
    for change in changes.iter() {
        match *change {
            Record::Change { ref file, .. } => {
                if !known_files.contains(&file) {
                    res = format!("{}\n#\tmodified:  {}", res, file.display());
                    known_files.push(file);
                }
            }
            Record::FileAdd { ref name, .. } => {
                res = format!("{}\n#\tnew file:  {}", res, name.display());
            }
            Record::FileDel { ref name, .. } => {
                res = format!("{}\n#\tdeleted:  {}", res, name.display());
            }
            Record::FileMove { ref new_name, .. } => {
                res = format!("{}\n#\t   moved:  to {}", res, new_name.display());
            }
        }
    }

    return res;
}

================================
) -> Result<HashSet<RepoPath<PathBuf>>, Error> {
    let mut untracked = HashSet::new();
    for file in repo_root.untracked_files(txn, &repo_root.repo_root) {
        debug!("untracked: {:?}", file);
        untracked.insert(file);
    }
    for file in untracked.iter() {
        let metadata = metadata(repo_root.absolutize(&file))?;
        // Because we're using a HashSet and not a Vec, our untracked
        // paths are in a random order, and hence add_inode of a long
        // path might add the components of prefixes of that path.
        match txn.add_inode(None, file, metadata.is_dir()) {
            Ok(()) | Err(libpijul::Error::AlreadyAdded) => {},
            Err(e) => return Err(e.into())
            Ok(()) | Err(libpijul::Error::AlreadyAdded) => {}
            Err(e) => return Err(e.into()),
        }
1311
1312





1313
1314
                    auth_attempt_future(
                        connection,
                        AuthAttempts::new(
                            host,
                            local_repo_root,
                            use_agent,
                        ),
                        AuthAttempts::new(host, local_repo_root, use_agent),
                        user,

1
2
3
4
5
6
7
8
9

10
11
12
13

14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49










50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161

162
163
164
use libpijul::fs_representation::RepoRoot;
use super::ask::{ask_changes, ChangesDirection};
use super::record;
use chrono;
use clap::{Arg, ArgMatches, SubCommand};
use commands::{default_explain, BasicOptions, StaticSubcommand};
use error::Error;
use libpijul::fs_representation::{RepoPath, RepoRoot};
use libpijul::patch::{Patch, PatchFlags, UnsignedPatch};
use libpijul::{Inode, InodeUpdate, Repository, ToPrefixes};
use libpijul::fs_representation::RepoRoot;
use rand;
use std;
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use error::Error;

pub fn invocation() -> StaticSubcommand {
    return SubCommand::with_name("revert")
        .about("Rewrite the working copy from the pristine")
        .arg(
            Arg::with_name("repository")
                .long("repository")
                .takes_value(true)
                .help("Local repository."),
        )
        .arg(
            Arg::with_name("all")
                .short("a")
                .long("all")
                .help("Answer 'y' to all questions")
                .takes_value(false),
        )
        .arg(
            Arg::with_name("branch")
                .help("Branch to revert to.")
                .long("branch")
                .takes_value(true),
        )
        .arg(
            Arg::with_name("prefix")
                .help("Prefix to start from")
                .takes_value(true)
                .multiple(true),
        )
        .arg(
            Arg::with_name("patience")
                .long("patience")
                .help("Use patience diff instead of the default (Myers diff)")
                .conflicts_with("myers")
                .takes_value(false),
        )
        .arg(Arg::with_name("patience")
             .long("patience")
             .help("Use patience diff instead of the default (Myers diff)")
             .conflicts_with("myers")
             .takes_value(false))
        .arg(Arg::with_name("myers")
             .long("myers")
             .help("Use Myers diff")
             .conflicts_with("patience")
             .takes_value(false))
        .arg(
            Arg::with_name("myers")
                .long("myers")
                .help("Use Myers diff")
                .conflicts_with("patience")
                .takes_value(false),
        );
}

pub fn run(args: &ArgMatches) -> Result<(), Error> {
    let opts = BasicOptions::from_args(args)?;
    let yes_to_all = args.is_present("all");
    let branch_name = opts.branch();
    let prefix = record::prefix(args, &opts)?;
    // Generate the pending patch.
    let (pending, pending_syncs): (_, HashSet<_>) = if !yes_to_all || prefix.is_some() {
        let repo = opts.open_and_grow_repo(409600)?;
        let mut txn = repo.mut_txn_begin(rand::thread_rng())?;
        let (changes, syncs) = {
            let (changes, syncs) = record::changes_from_prefixes(
                if args.is_present("patience") {
                    libpijul::DiffAlgorithm::Patience
                } else {
                    libpijul::DiffAlgorithm::Myers
                },
                &opts.repo_root,
                &mut txn,
                &branch_name,
                prefix.as_ref(),
            )?;
            let changes: Vec<_> = changes
                .into_iter()
                .map(|x| txn.globalize_record(x))
                .collect();
            if yes_to_all {
                (Vec::new(), HashSet::new())
            } else {
                let (c, _empty_vec) = ask_changes(
                    &txn,
                    &opts.repo_root,