IVLLXQ5ZWZDKHO4TNQG3TPXN34H6Y2WXPAGSO4PWCYNSKUZWOEJQC
OP77HLKNGBOHKHNGITIL4U6FAFUNZRPORJRCFXOJJE5DI5WO5S4QC
QZ77NIXCOQDZJTTVQNN22CAZSMRPGVOZAW4X6RKIZ534UG4XCWEQC
EUZFFJSOWV4PXDFFPDAFBHFUUMOFEU6ST7JH57YYRRR2SEOXLN6QC
5QTMRUXNE2XNJCMLN6MQN24UEZ55EFC3LIR4PO6OPNTT5KEL7WXQC
TKEVOH7HXON7SOBGXTUDHAHO2U2GPTQRNESP6ERKUQAS526OZIRAC
SXEYMYF7P4RZMZ46WPL4IZUTSQ2ATBWYZX7QNVMS3SGOYXYOHAGQC
Q45QHPO4HDTEZF2W4UDZSYYQ46BPEIWSW4GJILZR5HTJNLKXJABQC
CCLLB7OIFNFYJZTG3UCI7536TOCWSCSXR67VELSB466R24WLJSDAC
76PCXGML77EZWTRI5E6KHLVRAFTJ2AB5YRN5EKOYNAPKTWY2KCGAC
2D7P2VKJASU7QDQZHGCLBIT6G2V5WUFYLWTCEVVEI2EZHGM6XYRAC
L4JXJHWXYNCL4QGJXNKKTOKKTAXKKXBJUUY7HFZGEUZ5A2V5H34QC
4H2XTVJ2BNXDNHQ3RQTMOG3I4NRGZT7JDLC2GRINS56TIYTYTO4QC
367UBQ6KNAKUEWG32R4QRJ6H7IE7NAZFOPTC3ZOE4Z6E44RV3ISQC
A3RM526Y7LUXNYW4TL56YKQ5GVOK2R5D7JJVTSQ6TT5MEXIR6YAAC
MU5GSJAW65PEG3BRYUKZ7O37BPHW3MOX3S5E2RFOXKGUOJEEDQ5AC
WZVCLZKY34KQBQU6YBGJLQCDADBQ67LQVDNRVCMQVY3O3C3EIWSQC
I24UEJQLCH2SOXA4UHIYWTRDCHSOPU7AFTRUOTX7HZIAV4AZKYEQC
YN63NUZO4LVJ7XPMURDULTXBVJKW5MVCTZ24R7Z52QMHO3HPDUVQC
3AMEP2Y5J6GA4AWQONF4JVA3XSR3ASLHHKMYG44R72SOUY3UQCDAC
GHO6DWPILBBTL6CVZKERJBTFL3EY6ZT4YM4E5R4S6YPGVFKFHCVAC
YS2HLPX6S3FCPI3Q7TVFP3XIZPK4JRKV6PFWRNZHCWWC2P2SDDLQC
BBKV6VMN4EVBCBSAQMTL2TARBBSQEZGRCXMTKYUIDOJ3HZISUP7AC
M5FK3ABTKBDG6HHW32G7UKRJEJQKD2U7BPXNZ3HVHBKULWVV6CTQC
HSVGP2G4D2F56DS3YKZLSYPS4A5BNGH4NTAXAOZ57OCXFM3E5AYAC
QMTANHVNRPQ5IX66FYQBFRBDCTN6YKMNCO6OHTQ6QCUASPWWXJKAC
I52XSRUH5RVHQBFWVMAQPTUSPAJ4KNVID2RMI3UGCVKFLYUO6WZAC
44BN7FWSIXKG75IJUTCXLJE7VANNQFPRHQXTPLQHFU7AKGLSPQRAC
L2VH4BYK3IULLGBHXMZJWKRKDQY43QEMQRTXFJCNRDE7PODLXWTAC
}
}
// Extracting this saves a little bit of duplication.
fn get_local_inodes(
txn: &mut MutTxn<()>,
channel: &ChannelRef<MutTxn<()>>,
repo: &Repository,
path: &[String],
) -> Result<HashSet<Position<ChangeId>>, anyhow::Error> {
let mut paths = HashSet::new();
for path in path.iter() {
let (p, ambiguous) = txn.follow_oldest_path(&repo.changes, &channel, path)?;
if ambiguous {
bail!("Ambiguous path: {:?}", path)
}
paths.insert(p);
paths.extend(
libpijul::fs::iter_graph_descendants(txn, &channel.read().graph, p)?
.map(|x| x.unwrap()),
);
}
Ok(paths)
}
/// Embellished [`RemoteDelta`] that has information specific
/// to a push operation. We want to know what our options are
/// for changes to upload, whether the remote has unrecorded relevant changes,
/// and whether the remote has changes we don't know about, since those might
/// effect whether or not we actually want to go through with the push.
pub(crate) struct PushDelta<T: MutTxnTExt + TxnTExt> {
pub to_upload: Vec<Hash>,
pub remote_ref: Option<RemoteRef<T>>,
pub remote_unrecs: Vec<(u64, Hash)>,
pub unknown_changes: Vec<Hash>,
}
/// For a [`RemoteRepo`] that's Local, Ssh, or Http
/// (anything other than a LocalChannel),
/// [`RemoteDelta`] contains data about the difference between
/// the "actual" state of the remote ('theirs') and the last version of it
/// that we cached ('ours'). The dichotomy is the last point at which the two
/// were the same. `remote_unrecs` is a list of changes which used to be
/// present in the remote, AND were present in the current channel we're
/// pulling to or pushing from. The significance of that is that if we knew
/// about a certain change but did not pull it, the user won't be notified
/// if it's unrecorded in the remote.
///
/// If the remote we're pulling from or pushing to is a LocalChannel,
/// (meaning it's just a different channel of the repo we're already in), then
/// `ours_ge_dichotomy`, `theirs_ge_dichotomy`, and `remote_unrecs` will be empty
/// since they have no meaning. If we're pulling from a LocalChannel,
/// there's no cache to have diverged from, and if we're pushing to a LocalChannel,
/// we're not going to suddenly be surprised by the presence of unknown changes.
///
/// This struct will be created by both a push and pull operation since both
/// need to update the changelist and will at least try to update the local
/// remote cache. For a push, this later gets turned into [`PushDelta`].
pub(crate) struct RemoteDelta<T: MutTxnTExt + TxnTExt> {
pub inodes: HashSet<Position<Hash>>,
pub to_download: Vec<Hash>,
pub remote_ref: Option<RemoteRef<T>>,
pub ours_ge_dichotomy_set: HashSet<Hash>,
pub theirs_ge_dichotomy_set: HashSet<Hash>,
// Keep the Vec representation around as well so that notification
// for unknown changes during shows the hashes in order.
pub theirs_ge_dichotomy: Vec<(u64, Hash, Merkle)>,
pub remote_unrecs: Vec<(u64, Hash)>,
}
impl RemoteDelta<MutTxn<()>> {
/// Make a [`PushDelta`] from a [`RemoteDelta`]
/// when the remote is a [`RemoteRepo::LocalChannel`].
pub(crate) fn to_local_channel_push(
self,
remote_channel: &str,
txn: &mut MutTxn<()>,
path: &[String],
channel: &ChannelRef<MutTxn<()>>,
repo: &Repository,
) -> Result<PushDelta<MutTxn<()>>, anyhow::Error> {
let mut to_upload = Vec::<Hash>::new();
let inodes = get_local_inodes(txn, channel, repo, path)?;
for x in txn.reverse_log(&*channel.read(), None)? {
let (_, (h, _)) = x?;
if let Some(channel) = txn.load_channel(remote_channel)? {
let channel = channel.read();
let h_int = txn.get_internal(h)?.unwrap();
if txn.get_changeset(txn.changes(&channel), h_int)?.is_none() {
if inodes.is_empty() {
to_upload.push(h.into())
} else {
for p in inodes.iter() {
if txn.get_touched_files(p, Some(h_int))?.is_some() {
to_upload.push(h.into());
break;
}
}
}
}
}
}
assert!(self.ours_ge_dichotomy_set.is_empty());
assert!(self.theirs_ge_dichotomy_set.is_empty());
let d = PushDelta {
to_upload: to_upload.into_iter().rev().collect(),
remote_ref: self.remote_ref,
remote_unrecs: self.remote_unrecs,
unknown_changes: Vec::new(),
};
assert!(d.remote_unrecs.is_empty());
Ok(d)
}
/// Make a [`PushDelta`] from a [`RemoteDelta`] when the remote
/// is not a LocalChannel.
pub(crate) fn to_remote_push(
self,
txn: &mut MutTxn<()>,
path: &[String],
channel: &ChannelRef<MutTxn<()>>,
repo: &Repository,
) -> Result<PushDelta<MutTxn<()>>, anyhow::Error> {
let mut to_upload = Vec::<Hash>::new();
let inodes = get_local_inodes(txn, channel, repo, path)?;
if let Some(ref remote_ref) = self.remote_ref {
for x in txn.reverse_log(&*channel.read(), None)? {
let (_, (h, m)) = x?;
if txn.remote_has_state(remote_ref, &m)? {
break;
}
let h_int = txn.get_internal(h)?.unwrap();
let h_deser = Hash::from(h);
// For elements that are in the uncached remote changes (theirs_ge_dichotomy),
// don't put those in to_upload since the remote we're pushing to
// already has those changes.
if !txn.remote_has_change(remote_ref, &h)?
&& !self.theirs_ge_dichotomy_set.contains(&h_deser)
{
if inodes.is_empty() {
to_upload.push(h_deser)
} else {
for p in inodes.iter() {
if txn.get_touched_files(p, Some(h_int))?.is_some() {
to_upload.push(h_deser);
break;
}
}
}
}
}
}
// { h | h \in theirs_ge_dichotomy /\ ~(h \in ours_ge_dichotomy) }
// The set of their changes >= dichotomy that aren't
// already known to our set of changes after the dichotomy.
let unknown_changes = self
.theirs_ge_dichotomy
.iter()
.filter_map(|(_, h, _)| {
if self.ours_ge_dichotomy_set.contains(h) {
None
} else {
Some(*h)
}
})
.collect::<Vec<Hash>>();
Ok(PushDelta {
to_upload: to_upload.into_iter().rev().collect(),
remote_ref: self.remote_ref,
remote_unrecs: self.remote_unrecs,
unknown_changes,
})
/// Create a [`RemoteDelta`] for a [`RemoteRepo::LocalChannel`].
/// Since this case doesn't have a local remote cache to worry about,
/// mainly just calculates the `to_download` list of changes.
pub(crate) fn update_changelist_local_channel(
remote_channel: &str,
txn: &mut MutTxn<()>,
path: &[String],
current_channel: &ChannelRef<MutTxn<()>>,
repo: &Repository,
specific_changes: &[String],
) -> Result<RemoteDelta<MutTxn<()>>, anyhow::Error> {
if !specific_changes.is_empty() {
let to_download: Result<Vec<libpijul::Hash>, anyhow::Error> = specific_changes
.iter()
.map(|h| Ok(txn.hash_from_prefix(h)?.0))
.collect();
Ok(RemoteDelta {
inodes: HashSet::new(),
to_download: to_download?,
remote_ref: None,
ours_ge_dichotomy_set: HashSet::new(),
theirs_ge_dichotomy: Vec::new(),
theirs_ge_dichotomy_set: HashSet::new(),
remote_unrecs: Vec::new(),
})
} else {
let mut inodes = HashSet::new();
let inodes_ = get_local_inodes(txn, current_channel, repo, path)?;
let mut to_download = Vec::new();
inodes.extend(inodes_.iter().map(|x| libpijul::pristine::Position {
change: txn.get_external(&x.change).unwrap().unwrap().into(),
pos: x.pos,
}));
if let Some(remote_channel) = txn.load_channel(remote_channel)? {
let remote_channel = remote_channel.read();
for x in txn.reverse_log(&remote_channel, None)? {
let (h, m) = x?.1;
if txn
.channel_has_state(txn.states(&*current_channel.read()), &m)?
.is_some()
{
break;
}
let h_int = txn.get_internal(h)?.unwrap();
if txn
.get_changeset(txn.changes(&*current_channel.read()), h_int)?
.is_none()
{
if inodes_.is_empty()
|| inodes_.iter().any(|&inode| {
txn.get_rev_touched_files(h_int, Some(&inode))
.unwrap()
.is_some()
})
{
to_download.push(h.into())
}
}
}
}
Ok(RemoteDelta {
inodes,
to_download,
remote_ref: None,
ours_ge_dichotomy_set: HashSet::new(),
theirs_ge_dichotomy: Vec::new(),
theirs_ge_dichotomy_set: HashSet::new(),
remote_unrecs: Vec::new(),
})
}
}
/// Creates a [`RemoteDelta`].
///
/// IF:
/// the RemoteRepo is a [`RemoteRepo::LocalChannel`], delegate to
/// the simpler method [`update_changelist_local_channel`], returning the
/// `to_download` list of changes.
///
/// ELSE:
/// calculate the `to_download` list of changes. Additionally, if there are
/// no remote unrecords, update the local remote cache. If there are remote unrecords,
/// calculate and return information about the difference between our cached version
/// of the remote, and their version of the remote.
pub(crate) async fn update_changelist_pushpull(
&mut self,
txn: &mut MutTxn<()>,
path: &[String],
current_channel: &ChannelRef<MutTxn<()>>,
force_cache: Option<bool>,
repo: &Repository,
specific_changes: &[String],
) -> Result<RemoteDelta<MutTxn<()>>, anyhow::Error> {
debug!("update_changelist");
if let RemoteRepo::LocalChannel(c) = self {
return update_changelist_local_channel(
c,
txn,
path,
current_channel,
repo,
specific_changes,
);
}
let id = self.get_id(txn).await?.unwrap();
let mut remote_ref = if let Some(name) = self.name() {
txn.open_or_create_remote(id, name).unwrap()
} else {
unreachable!()
};
let dichotomy_n = self
.dichotomy_changelist(txn, &remote_ref.lock().remote)
.await?;
let ours_ge_dichotomy: Vec<(u64, Hash)> = txn
.iter_remote(&remote_ref.lock().remote, dichotomy_n)?
.filter_map(|k| {
debug!("filter_map {:?}", k);
match k.unwrap() {
(k, libpijul::pristine::Pair { a: hash, .. }) => {
let (k, hash) = (u64::from(*k), Hash::from(*hash));
if k >= dichotomy_n {
Some((k, hash))
} else {
None
}
}
}
})
.collect();
let (inodes, theirs_ge_dichotomy) =
self.download_changelist_nocache(dichotomy_n, path).await?;
let ours_ge_dichotomy_set = ours_ge_dichotomy
.iter()
.map(|(_, h)| h)
.copied()
.collect::<HashSet<Hash>>();
let theirs_ge_dichotomy_set = theirs_ge_dichotomy
.iter()
.map(|(_, h, _)| h)
.copied()
.collect::<HashSet<Hash>>();
// remote_unrecs = {x: (u64, Hash) | x \in ours_ge_dichot /\ ~(x \in theirs_ge_dichot) /\ x \in current_channel }
let mut remote_unrecs = Vec::new();
for (n, hash) in &ours_ge_dichotomy {
if theirs_ge_dichotomy_set.contains(hash) {
// If this change is still present in the remote, skip
continue;
} else if txn.get_revchanges(¤t_channel, &hash)?.is_none() {
// If this unrecord wasn't in our current channel, skip
continue;
} else {
remote_unrecs.push((*n, *hash))
}
}
let should_cache = force_cache.unwrap_or_else(|| remote_unrecs.is_empty());
if should_cache {
for (k, _) in ours_ge_dichotomy.iter().copied() {
txn.del_remote(&mut remote_ref, k)?;
}
for (n, h, m) in theirs_ge_dichotomy.iter().copied() {
txn.put_remote(&mut remote_ref, n, (h, m))?;
}
}
let state_cond = |txn: &MutTxn<()>, merkle: &libpijul::pristine::SerializedMerkle| {
txn.channel_has_state(txn.states(&*current_channel.read()), merkle)
.map(|x| x.is_some())
};
let change_cond = |txn: &MutTxn<()>, hash: &Hash| {
txn.get_revchanges(¤t_channel, hash)
.unwrap()
.is_none()
};
// IF:
// The user only wanted to push/pull specific changes
// ELIF:
// The user specified no changes and there were no remote unrecords
// effecting the current channel means we can auto-cache
// the local remote cache
// ELSE:
// The user specified no changes but there were remote unrecords
// effecting the current channel meaning we can't auto-cache
// the local remote cache.
if !specific_changes.is_empty() {
let to_download = specific_changes
.iter()
.map(|h| Ok(txn.hash_from_prefix(h)?.0))
.collect::<Result<Vec<_>, anyhow::Error>>();
Ok(RemoteDelta {
inodes,
remote_ref: Some(remote_ref),
to_download: to_download?,
ours_ge_dichotomy_set,
theirs_ge_dichotomy,
theirs_ge_dichotomy_set,
remote_unrecs,
})
} else if should_cache {
let mut to_download: Vec<Hash> = Vec::new();
for thing in txn.iter_remote(&remote_ref.lock().remote, 0)? {
let (_, libpijul::pristine::Pair { a: hash, b: merkle }) = thing?;
if state_cond(txn, &merkle)? {
break;
} else if change_cond(txn, &hash.into()) {
to_download.push(Hash::from(hash));
}
}
Ok(RemoteDelta {
inodes,
remote_ref: Some(remote_ref),
to_download,
ours_ge_dichotomy_set,
theirs_ge_dichotomy,
theirs_ge_dichotomy_set,
remote_unrecs,
})
} else {
let mut to_download: Vec<Hash> = Vec::new();
for thing in txn.iter_remote(&remote_ref.lock().remote, 0)? {
let (n, libpijul::pristine::Pair { a: hash, b: merkle }) = thing?;
if u64::from(*n) < dichotomy_n {
if state_cond(txn, &merkle)? {
continue;
} else if change_cond(txn, &hash.into()) {
to_download.push(Hash::from(hash));
}
}
}
for (_, hash, merkle) in &theirs_ge_dichotomy {
if state_cond(txn, &merkle.into())? {
continue;
} else if change_cond(txn, &hash) {
to_download.push(Hash::from(*hash));
}
}
Ok(RemoteDelta {
inodes,
remote_ref: Some(remote_ref),
to_download,
ours_ge_dichotomy_set,
theirs_ge_dichotomy,
theirs_ge_dichotomy_set,
remote_unrecs,
})
}
}
/// Get the list of the remote's changes that come after `from: u64`.
/// Instead of immediately updating the local cache of the remote, return
/// the change info without changing the cache.
pub async fn download_changelist_nocache(
&mut self,
from: u64,
paths: &[String],
) -> Result<(HashSet<Position<Hash>>, Vec<(u64, Hash, Merkle)>), anyhow::Error> {
let mut v = Vec::new();
let f = |v: &mut Vec<(u64, Hash, Merkle)>, n, h, m| {
debug!("no cache: {:?}", h);
Ok(v.push((n, h, m)))
};
let r = match *self {
RemoteRepo::Local(ref mut l) => l.download_changelist(f, &mut v, from, paths)?,
RemoteRepo::Ssh(ref mut s) => s.download_changelist(f, &mut v, from, paths).await?,
RemoteRepo::Http(ref h) => h.download_changelist(f, &mut v, from, paths).await?,
RemoteRepo::LocalChannel(_) => HashSet::new(),
RemoteRepo::None => unreachable!(),
};
Ok((r, v))
}
/// Uses a binary search to find the integer identifier of the last point
/// at which our locally cached version of the remote was the same as the 'actual'
/// state of the remote.
pub async fn download_changelist_nocache(
&mut self,
from: u64,
paths: &[String],
v: &mut Vec<Hash>,
) -> Result<HashSet<Position<Hash>>, anyhow::Error> {
let f = |v: &mut Vec<Hash>, _n, h, _m| {
debug!("no cache: {:?}", h);
Ok(v.push(h))
};
let r = match *self {
RemoteRepo::Local(ref mut l) => l.download_changelist(f, v, from, paths)?,
RemoteRepo::Ssh(ref mut s) => s.download_changelist(f, v, from, paths).await?,
RemoteRepo::Http(ref h) => h.download_changelist(f, v, from, paths).await?,
RemoteRepo::LocalChannel(_) => HashSet::new(),
RemoteRepo::None => unreachable!(),
};
Ok(r)
}
/// Gets the `to_upload` vector while trying to auto-update
/// the local cache if possible. Also calculates whether the remote
/// has any changes we don't know about.
async fn to_upload(
&self,
txn: &mut MutTxn<()>,
channel: &mut ChannelRef<MutTxn<()>>,
repo: &Repository,
remote: &mut RemoteRepo,
) -> Result<PushDelta<MutTxn<()>>, anyhow::Error> {
let remote_delta = remote
.update_changelist_pushpull(
txn,
&self.path,
channel,
Some(self.force_cache),
repo,
self.changes.as_slice(),
)
.await?;
if let RemoteRepo::LocalChannel(ref remote_channel) = remote {
remote_delta.to_local_channel_push(
remote_channel,
txn,
self.path.as_slice(),
channel,
repo,
)
} else {
remote_delta.to_remote_push(txn, self.path.as_slice(), channel, repo)
}
}
let remote_changes = remote
.update_changelist(&mut *txn.write(), &self.path)
let mut channel = txn.write().open_or_create_channel(&channel_name)?;
let PushDelta {
remote_ref,
to_upload,
remote_unrecs,
unknown_changes,
} = self
.to_upload(&mut *txn.write(), &mut channel, &repo, &mut remote)
let mut paths = HashSet::new();
for path in self.path.iter() {
let (p, ambiguous) = txn
.read()
.follow_oldest_path(&repo.changes, &channel, path)?;
if ambiguous {
bail!("Ambiguous path: {:?}", path)
}
paths.insert(p);
paths.extend(
libpijul::fs::iter_graph_descendants(&*txn.read(), &channel.read().graph, p)?
.map(|x| x.unwrap()),
);
}
let mut to_upload: Vec<Hash> = Vec::new();
{
let txn = txn.read();
for x in txn.reverse_log(&*channel.read(), None)? {
let (_, (h, m)) = x?;
if let Some((_, ref remote_changes)) = remote_changes {
if txn.remote_has_state(remote_changes, &m)? {
break;
}
let h_int = txn.get_internal(h)?.unwrap();
if !txn.remote_has_change(&remote_changes, &h)? {
if paths.is_empty() {
to_upload.push(h.into())
} else {
for p in paths.iter() {
if txn.get_touched_files(p, Some(h_int))?.is_some() {
to_upload.push(h.into());
break;
}
}
}
}
} else if let crate::remote::RemoteRepo::LocalChannel(ref remote_channel) = remote {
if let Some(channel) = txn.load_channel(remote_channel)? {
let channel = channel.read();
let h_int = txn.get_internal(h)?.unwrap();
if txn.get_changeset(txn.changes(&channel), h_int)?.is_none() {
if paths.is_empty() {
to_upload.push(h.into())
} else {
for p in paths.iter() {
if txn.get_touched_files(p, Some(h_int))?.is_some() {
to_upload.push(h.into());
break;
}
}
}
}
}
}
}
}
/// Gets the `to_download` vec and calculates any remote unrecords.
/// If the local remote cache can be auto-updated, it will be.
async fn to_download(
&self,
txn: &mut MutTxn<()>,
channel: &mut ChannelRef<MutTxn<()>>,
repo: &mut Repository,
remote: &mut RemoteRepo,
) -> Result<RemoteDelta<MutTxn<()>>, anyhow::Error> {
let force_cache = if self.force_cache {
Some(self.force_cache)
} else {
None
};
let delta = remote
.update_changelist_pushpull(
txn,
&self.path,
channel,
force_cache,
repo,
self.changes.as_slice(),
)
.await?;
let to_download = remote
.pull(
repo,
txn,
channel,
delta.to_download.as_slice(),
&delta.inodes,
false,
)
.await?;
Ok(RemoteDelta {
to_download,
..delta
})
}
let mut inodes: HashSet<libpijul::pristine::Position<libpijul::Hash>> = HashSet::new();
let remote_changes = remote
.update_changelist(&mut *txn.write(), &self.path)
let RemoteDelta {
inodes,
remote_ref,
mut to_download,
remote_unrecs,
..
} = self
.to_download(&mut *txn.write(), &mut channel, &mut repo, &mut remote)
let mut to_download = if self.changes.is_empty() {
debug!("changelist done");
let mut to_download: Vec<Hash> = Vec::new();
if let Some((inodes_, remote_changes)) = remote_changes.as_ref() {
inodes.extend(inodes_.into_iter());
let txn = txn.read();
for x in txn.iter_remote(&remote_changes.lock().remote, 0)? {
let p = x?.1; // (h, m)
if txn
.channel_has_state(txn.states(&*channel.read()), &p.b)?
.is_some()
{
break;
} else if txn.get_revchanges(&channel, &p.a.into())?.is_none() {
to_download.push(p.a.into())
}
}
} else if let crate::remote::RemoteRepo::LocalChannel(ref remote_channel) = remote {
let mut inodes_ = HashSet::new();
let txn = txn.read();
for path in self.path.iter() {
let (p, ambiguous) = txn.follow_oldest_path(&repo.changes, &channel, path)?;
if ambiguous {
bail!("Ambiguous path: {:?}", path)
}
inodes_.insert(p);
inodes_.extend(
libpijul::fs::iter_graph_descendants(&*txn, &channel.read().graph, p)?
.map(|x| x.unwrap()),
);
}
inodes.extend(inodes_.iter().map(|x| libpijul::pristine::Position {
change: txn.get_external(&x.change).unwrap().unwrap().into(),
pos: x.pos,
}));
if let Some(remote_channel) = txn.load_channel(remote_channel)? {
let remote_channel = remote_channel.read();
for x in txn.reverse_log(&remote_channel, None)? {
let (h, m) = x?.1;
if txn
.channel_has_state(txn.states(&*channel.read()), &m)?
.is_some()
{
break;
}
let h_int = txn.get_internal(h)?.unwrap();
if txn
.get_changeset(txn.changes(&*channel.read()), h_int)?
.is_none()
{
if inodes_.is_empty()
|| inodes_.iter().any(|&inode| {
txn.get_rev_touched_files(h_int, Some(&inode))
.unwrap()
.is_some()
})
{
to_download.push(h.into())
}
}
}
}
} else {
inodes = remote
.download_changelist_nocache(0, &self.path, &mut to_download)
.await?
}
to_download
} else {
let txn = txn.read();
let r: Result<Vec<libpijul::Hash>, anyhow::Error> = self
.changes
.iter()
.map(|h| Ok(txn.hash_from_prefix(h)?.0))
.collect();
r?
};
debug!("recording");
let mut to_download = remote
.pull(
&mut repo,
&mut *txn.write(),
&mut channel,
&mut to_download,
&inodes,
self.all,
)
.await?;
if let Some((_, ref r)) = remote_changes {
if let Some(ref r) = remote_ref {
fn notify_remote_unrecords(repo: &Repository, remote_unrecs: &[(u64, Hash)]) {
use std::fmt::Write;
if !remote_unrecs.is_empty() {
let mut s = format!(
"# The following changes have been unrecorded in the remote.\n\
# This buffer is only being used to inform you of the remote change;\n\
# your push will continue when it is closed.\n"
);
for (_, hash) in remote_unrecs {
let header = &repo.changes.get_change(hash).unwrap().header;
s.push_str("#\n");
writeln!(&mut s, "# {}", header.message).expect("Infallible write to String");
writeln!(&mut s, "# {}", header.timestamp).expect("Infallible write to String");
writeln!(&mut s, "# {}", hash.to_base32()).expect("Infallible write to String");
}
if let Err(e) = edit::edit(s.as_str()) {
log::error!(
"Notification of remote unrecords experienced an error: {}",
e
);
}
}
}
fn notify_unknown_changes(unknown_changes: &[Hash]) {
use std::fmt::Write;
if unknown_changes.is_empty() {
return;
} else {
let mut s = format!(
"# The following changes are new in the remote\n# (and are not yet known to your local copy):\n#\n"
);
let rest_len = unknown_changes.len().saturating_sub(5);
for hash in unknown_changes.iter().take(5) {
writeln!(&mut s, "# {}", hash.to_base32()).expect("Infallible write to String");
}
if rest_len > 0 {
let plural = if rest_len == 1 { "" } else { "s" };
writeln!(&mut s, "# ... plus {} more change{}", rest_len, plural)
.expect("Infallible write to String");
}
if let Err(e) = edit::edit(s.as_str()) {
log::error!(
"Notification of unknown changes experienced an error: {}",
e
);
}
}
}