5PJAXSZ7AV7WEGMHTKWFOYWN5YUFRFZDZUXNIMLRPRTADZQEN4JAC
} else {
panic!()
}
}
}
impl Endian<Pht> {
fn from(id: Endianness, u: Pht) -> Self {
if id == Endianness::LITTLE {
Endian(Pht(u.0.to_le()))
} else if id == Endianness::BIG {
Endian(Pht(u.0.to_be()))
} else {
panic!()
}
}
}
impl Endian<DynTag64> {
fn from(id: Endianness, u: DynTag64) -> Self {
if id == Endianness::LITTLE {
Endian(DynTag64(u.0.to_le()))
} else if id == Endianness::BIG {
Endian(DynTag64(u.0.to_be()))
} else {
panic!()
}
}
}
impl Endian<DynTag32> {
fn from(id: Endianness, u: DynTag32) -> Self {
if id == Endianness::LITTLE {
Endian(DynTag32(u.0.to_le()))
} else if id == Endianness::BIG {
Endian(DynTag32(u.0.to_be()))
}
}
pub fn set_addr(&mut self, h: Endianness, u: usize) {
match self {
SectionHeaderMut_::B32(s) => s.addr = <Endian<u32>>::from(h, u as u32),
SectionHeaderMut_::B64(s) => s.addr = <Endian<u64>>::from(h, u as u64),
}
}
pub fn offset(&self, h: Endianness) -> u64 {
match self {
SectionHeaderMut_::B32(s) => EndianT::endian(s.offset, h) as u64,
SectionHeaderMut_::B64(s) => EndianT::endian(s.offset, h),
ProgramHeaderMut_::B32(s) => EndianT::endian(s.offset, h) as u64,
ProgramHeaderMut_::B64(s) => EndianT::endian(s.offset, h),
ProgramHeaderMut_::B32(s) => s.filesz = <Endian<u32>>::from(h, u as u32),
ProgramHeaderMut_::B64(s) => s.filesz = <Endian<u64>>::from(h, u as u64),
ProgramHeaderMut_::B32(s) => s.filesz = <Endian<u32>>::from(h, u as u32),
ProgramHeaderMut_::B64(s) => s.filesz = <Endian<u64>>::from(h, u as u64),
ProgramHeaderMut_::B32(s) => s.vaddr = <Endian<u32>>::from(h, u as u32),
ProgramHeaderMut_::B64(s) => s.vaddr = <Endian<u64>>::from(h, u as u64),
}
}
pub fn set_type(&mut self, h: Endianness, u: Pht) {
match self {
ProgramHeaderMut_::B32(s) => s.type_ = <Endian<Pht>>::from(h, u),
ProgramHeaderMut_::B64(s) => s.type_ = <Endian<Pht>>::from(h, u),
}
}
pub fn set_flags(&mut self, h: Endianness, u: u32) {
match self {
ProgramHeaderMut_::B32(s) => s.flags32 = <Endian<u32>>::from(h, u),
ProgramHeaderMut_::B64(s) => s.flags64 = <Endian<u32>>::from(h, u),
}
}
pub fn set_align(&mut self, h: Endianness, u: u64) {
match self {
ProgramHeaderMut_::B32(s) => s.align = <Endian<u32>>::from(h, u as u32),
ProgramHeaderMut_::B64(s) => s.align = <Endian<u64>>::from(h, u),
}
}
fn type_(&self, id: Endianness) -> Pht {
match self {
ProgramHeaderMut_::B32(h) => EndianT::endian(h.type_, id),
ProgramHeaderMut_::B64(h) => EndianT::endian(h.type_, id),
}
}
pub fn filesz(&self, h: Endianness) -> u64 {
match self {
ProgramHeaderMut_::B32(s) => EndianT::endian(s.filesz, h) as u64,
ProgramHeaderMut_::B64(s) => EndianT::endian(s.filesz, h),
}
}
pub fn memsz(&self, h: Endianness) -> u64 {
match self {
ProgramHeaderMut_::B32(s) => EndianT::endian(s.memsz, h) as u64,
ProgramHeaderMut_::B64(s) => EndianT::endian(s.memsz, h),
dynamic = match id.class {
Bits::B32 => {
assert_eq!(offset % std::mem::align_of::<Elf32Dyn>(), 0);
assert!(offset + size <= self.map.len());
Dynamic::B32(unsafe {
std::slice::from_raw_parts(
self.map.as_ptr().add(offset) as *const u8 as *const Elf32Dyn,
size / std::mem::size_of::<Elf32Dyn>(),
)
})
}
Bits::B64 => Dynamic::B64(unsafe {
assert_eq!(offset % std::mem::align_of::<Elf64Dyn>(), 0);
assert!(offset + size <= self.map.len());
std::slice::from_raw_parts(
self.map.as_ptr().add(offset) as *const u8 as *const Elf64Dyn,
size / std::mem::size_of::<Elf64Dyn>(),
)
}),
_ => panic!(),
};
assert!(offset + size <= self.map.len());
dynamic =
unsafe { dynamic_from_raw(id.class, self.map.as_ptr().add(offset), size) };
let mut next_off = header.shoff(id);
for sec in self.sections::<U>() {
let s: u64 = EndianT::endian(sec.offset, id).into();
let mut next_off = self.f.metadata().unwrap().len();
for s in self
.sections::<U>()
.iter()
.map(|sec| EndianT::endian(sec.offset, id).into())
.chain(std::iter::once(header.shoff(id)))
{
// First find a large enough deleted section = gap
// between sections.
let mut sections: Vec<_> = (0..h.shnum(id))
.map(|p_| {
let sh = self.section(p_ as usize);
(sh.offset(id) as usize, sh.size(id) as usize, p_ as isize)
})
.collect();
sections.push((
h.shoff(id) as usize,
h.shentsize(id) as usize * h.shnum(id) as usize,
-1,
));
sections.sort_by_key(|(o, _, _)| *o);
let mut sections: Vec<_> = if class == Bits::B32 {
self.sections::<u32>()
.iter()
.map(|s| {
(
EndianT::endian(s.offset, id) as usize,
EndianT::endian(s.size, id) as usize,
)
})
.collect()
} else {
self.sections::<u64>()
.iter()
.map(|s| {
(
EndianT::endian(s.offset, id) as usize,
EndianT::endian(s.size, id) as usize,
)
})
.collect()
};
sections.sort();
let s = sections
.iter()
.position(|(_, _, i)| *i == interp_i as isize)
.unwrap();
let mut last_vaddr = 0;
let mut page_size = 0;
let mut programs: Vec<_> = (0..h.phnum(id))
.map(|p_| {
let sh = self.program(p_ as usize);
last_vaddr = last_vaddr.max((sh.vaddr(id) + sh.memsz(id)) as usize);
let type_ = sh.type_(id);
if type_ == Pht::LOAD {
page_size = sh.align(id) as usize;
}
(
sh.offset(id) as usize,
sh.filesz(id) as usize,
type_,
sh.flags(id),
p_ as isize,
)
})
.collect();
let rounded_last_vaddr = align_next(last_vaddr as usize, page_size);
let mut end = 0;
let mut new_offset = h.shoff(id) as usize;
let mut gap = usize::MAX;
let s_off = sections[s].0;
let p = programs
.iter()
.position(|(o, e, t, _, _)| *o <= s_off && *o + *e > s_off && *t == Pht::LOAD)
.expect("Section isn't loaded, no need to do anything");
assert!(extra_size + total <= self.map.len());
for (off, len, _) in sections.iter().skip(s + 1) {
if *off >= end + new_len {
// Is this the same segment ?
if end <= programs[p].0 + programs[p].1 {
new_offset = end;
break;
}
}
end = end.max(*off + *len)
}
self.map.as_ptr().add(shoff as usize),
self.map.as_mut_ptr().add(shoff as usize + extra_size),
section_table_len,
new.as_ptr(),
self.map.as_mut_ptr().add(new_offset),
new.len(),
)
}
let mut sec = self.section_mut(interp_i);
let addr = new_offset + sec.addr(id) as usize - sec.offset(id) as usize;
sec.set_addr(id, addr);
sec.set_offset(id, new_offset);
sec.set_size(id, new.len());
if new_offset == programs[p].0 + programs[p].1 {
// Extend segment
let mut p = self.program_mut(programs[p].4 as usize);
p.set_filesz(id, p.filesz(id) as usize + new.len());
p.set_memsz(id, p.memsz(id) as usize + new.len());
}
// If the section has its own segment, update that too.
for (o, l, _, _, p) in &programs {
if *o == s_off && *l == s_len {
let mut p = self.program_mut(*p as usize);
p.set_offset(id, new_offset);
p.set_filesz(id, new.len());
p.set_memsz(id, new.len());
p.set_vaddr(id, addr);
}
}
} else {
// Add a segment at the end, possibly moving the first
// section after the section header table.
let pht_end = h.phoff(id) as usize + h.phentsize(id) as usize * h.phnum(id) as usize;
// Combien de sections doit-on déplacer pour ajouter notre
// segment ?
let first_section_after_pht = sections
.iter()
.filter_map(|(o, _, _)| {
if *o >= pht_end as usize {
Some(*o)
} else {
None
}
})
.min()
.unwrap_or(0);
let new_offset;
if first_section_after_pht < pht_end + h.phentsize(id) as usize {
// In the absolute worst case, we need 3 new LOAD
// segments: our new one, and then one for each end of
// the sequence of sections we need to move to make
// room for the program headers.
let new_segments = 3;
let limit = pht_end + new_segments * h.phentsize(id) as usize;
let mut init = 0;
let mut end = 0;
let mut init_off = 0;
let mut end_off = 0;
for (n, (o, _, _)) in sections.iter().enumerate() {
if *o >= pht_end && init == 0 {
init = n;
init_off = *o;
}
if *o >= limit && end == 0 {
end = n;
end_off = *o;
}
}
let current_len = self.f.metadata().unwrap().len() as usize;
let rounded_len = align_next(current_len, page_size);
let rounded_moved =
align_next(end_off - init_off, page_size as usize);
new_offset = rounded_len as usize + rounded_moved;
// New pages for the moved + new pages for new.
self.f
.set_len((rounded_len + rounded_moved + new.len()) as u64)
.unwrap();
self.map = unsafe { memmap::MmapOptions::new().map_mut(self.f).unwrap() };
unsafe {
std::ptr::copy(
self.map.as_ptr().add(init_off),
self.map.as_mut_ptr().add(rounded_len as usize),
end_off - init_off,
);
}
let mut pi = 0;
for s in init..end {
let sec_n = sections[s].2;
let mut sec = self.section_mut(sec_n as usize);
let new_offset = rounded_len as usize + sec.offset(id) as usize - init_off;
sec.set_offset(id, new_offset);
sec.set_addr(
id,
new_offset + rounded_last_vaddr - rounded_len as usize,
);
while pi < programs.len() && programs[pi].0 <= sections[s].0 {
if programs[pi].0 == sections[s].0 && programs[pi].2 != Pht::LOAD {
let mut p = self.program_mut(programs[pi].4 as usize);
p.set_offset(id, new_offset);
p.set_vaddr(
id,
new_offset + rounded_last_vaddr - rounded_len as usize,
);
}
pi += 1;
}
}
self.add_load_segment(
id,
rounded_len as usize,
end_off - init_off,
rounded_last_vaddr,
4,
page_size,
self.header_mut().set_shoff(id, shoff as usize + extra_size);
last_vaddr = rounded_last_vaddr + rounded_moved as usize;
} else {
let current_len = self.f.metadata().unwrap().len() as usize;
let rounded_len = align_next(current_len, page_size);
new_offset = rounded_len;
// New pages for new.
self.f.set_len((rounded_len + new.len()) as u64).unwrap();
self.map = unsafe { memmap::MmapOptions::new().map_mut(self.f).unwrap() };
assert!(new_offset as usize + new.len() <= self.map.len());
unsafe {
// Write the new value
self.map
.as_mut_ptr()
.add(new_offset as usize)
.write_bytes(0, extra_size);
std::ptr::copy(
new.as_ptr() as *const i8,
self.map.as_mut_ptr().add(new_offset as usize) as *mut i8,
new.len(),
);
let mut sec = self.section_mut(interp_i);
sec.set_offset(id, new_offset);
sec.set_size(id, new.len());
sec.set_addr(id, last_vaddr as usize);
unsafe {
std::ptr::copy(
new.as_ptr(),
self.map.as_mut_ptr().add(new_offset),
new.len(),
)
}
self.add_load_segment(id, new_offset, new.len(), last_vaddr as usize, 4, page_size);
// Update the section
let mut s = self.section_mut(interp_i as usize);
s.set_offset(id, new_offset);
s.set_size(id, new_len);
// Do the same in the program header table
let phnum = self.header().phnum(id);
for i in 0..phnum {
let mut p = self.program_mut(i as usize);
let p_off = p.offset(id);
if p_off == off {
// the new offset is the old shoff
p.set_offset(id, new_offset as usize);
p.set_filesz(id, new_len);
p.set_memsz(id, new_len);
fn add_load_segment(
&mut self,
id: Endianness,
offset: usize,
len: usize,
vaddr: usize,
flags: u32,
align: usize,
) {
let mut hh = self.header_mut();
let phentsize = hh.phentsize(id) as usize;
let phnum = hh.phnum(id);
hh.set_phnum(id, phnum + 1);
let mut p = self.program_mut(phnum as usize);
p.set_offset(id, offset);
p.set_memsz(id, len);
p.set_filesz(id, len);
p.set_vaddr(id, vaddr);
p.set_type(id, Pht::LOAD);
p.set_flags(id, flags);
p.set_align(id, align as u64);
for p in 0..phnum {
let mut p = self.program_mut(p as usize);
if p.type_(id) == Pht::PHDR {
p.set_filesz(id, p.filesz(id) as usize + phentsize);
p.set_memsz(id, p.memsz(id) as usize + phentsize);
break;
std::mem::drop(self);
let current_len = f.metadata()?.len();
f.set_len(current_len + extra_room as u64)?;
let mut elf = Elf::open(f)?;
let id = self.id().data;
if !new_runpath.is_empty() {
set_runpath(id, &mut dynstr_owned, &mut dynamic_owned, &new_runpath)
}
if !new_runpath.is_empty() {
set_runpath(&mut dynstr_owned, &mut dynamic_owned, &new_runpath)
if new_needed.is_some() || !new_runpath.is_empty() {
self.update_section(b".dynstr", &dynstr_owned).unwrap();
self
.update_section(b".dynamic", dynamic_owned.as_bytes())
.unwrap();
if new_needed.is_some() || !new_runpath.is_empty() {
extra += elf.update_section(b".dynstr", &dynstr_owned).unwrap()
+ elf
.update_section(b".dynamic", dynamic_owned.as_bytes())
.unwrap();
let id = self.id().data;
match dynamic_owned {
OwnedDynamic::B32(ref mut v) => {
for t in v.iter_mut() {
self.update_dynamic::<u32>(id, t)
}
}
OwnedDynamic::B64(ref mut v) => {
for t in v.iter_mut() {
self.update_dynamic::<u64>(id, t)
}
}
fn update_dynamic<F: Flags>(&self, id: Endianness, f: &mut F::Dyn) {
if F::tag(id, *f) == F::dt_strtab() {
let s = self.find_section::<F>(b".dynstr").unwrap().unwrap();
let sec = self.section(s);
F::set_d_un(id, f, sec.addr(id) as usize)
} else if F::tag(id, *f) == F::dt_strsz() {
let s = self.find_section::<F>(b".dynstr").unwrap().unwrap();
let sec = self.section(s);
F::set_d_un(id, f, sec.size(id) as usize)
}
}
OwnedDynamic::B32(b) => remove_trailing_needed_runpath_::<u32>(dynstr, b),
OwnedDynamic::B64(b) => remove_trailing_needed_runpath_::<u64>(dynstr, b),
OwnedDynamic::B32(b) => remove_trailing_needed_runpath_::<u32>(id, dynstr, b),
OwnedDynamic::B64(b) => remove_trailing_needed_runpath_::<u64>(id, dynstr, b),
OwnedDynamic::B32(b) => retain_needed_::<u32>(dynstr, b, needed),
OwnedDynamic::B64(b) => retain_needed_::<u64>(dynstr, b, needed),
OwnedDynamic::B32(b) => retain_needed_::<u32>(id, dynstr, b, needed),
OwnedDynamic::B64(b) => retain_needed_::<u64>(id, dynstr, b, needed),
OwnedDynamic::B32(b) => set_runpath_::<u32>(dynstr, b, rpath),
OwnedDynamic::B64(b) => set_runpath_::<u64>(dynstr, b, rpath),
OwnedDynamic::B32(b) => set_runpath_::<u32>(id, dynstr, b, rpath),
OwnedDynamic::B64(b) => set_runpath_::<u64>(id, dynstr, b, rpath),
impl<'a> Iterator for Dynamic<'a> {
type Item = Elf64Dyn;
fn next(&mut self) -> Option<Self::Item> {
match self {
Dynamic::B32(x) => {
if x.is_empty() {
None
} else {
let (a, b) = x.split_at(1);
*x = b;
let a = a[0];
Some(Elf64Dyn {
tag: DynTag64(a.tag.0 as u64),
d_un: a.d_un as u64,
})
}
}
Dynamic::B64(x) => {
if x.is_empty() {
None
} else {
let (a, b) = x.split_at(1);
*x = b;
Some(a[0])
}
}
}
}
}
fn d_un(_: Self::Dyn) -> usize;
fn set_d_un(_: &mut Self::Dyn, _: usize);
fn tag(_: Self::Dyn) -> Self::DynTag;
fn d_un(id: Endianness, _: Self::Dyn) -> usize;
fn set_d_un(id: Endianness, _: &mut Self::Dyn, _: usize);
fn tag(id: Endianness, _: Self::Dyn) -> Self::DynTag;
fn set_shoff(&mut self, id: Endianness, off: usize) {
pub fn set_phnum(&mut self, id: Endianness, off: u16) {
match self {
HeaderMut_::B32(h) => h.phnum = <Endian<u16>>::from(id, off),
HeaderMut_::B64(h) => h.phnum = <Endian<u16>>::from(id, off),
}
}
pub fn phnum(&self, id: Endianness) -> u16 {
match self {
HeaderMut_::B32(h) => EndianT::endian(h.phnum, id),
HeaderMut_::B64(h) => EndianT::endian(h.phnum, id),
}
}
pub fn phentsize(&self, id: Endianness) -> u16 {
unsafe fn dynamic_from_raw<'a>(class: Bits, ptr: *const u8, size: usize) -> Dynamic<'a> {
match class {
Bits::B32 => {
assert_eq!((ptr as usize) % std::mem::align_of::<Elf32Dyn>(), 0);
Dynamic::B32(unsafe { std::slice::from_raw_parts(
ptr as *const Elf32Dyn,
size / std::mem::size_of::<Elf32Dyn>(),
) })
}
Bits::B64 => {
assert_eq!((ptr as usize) % std::mem::align_of::<Elf64Dyn>(), 0);
Dynamic::B64(unsafe {std::slice::from_raw_parts(
ptr as *const Elf64Dyn,
size / std::mem::size_of::<Elf64Dyn>(),
)})
}
_ => panic!(),
}
}