#include "MCTargetDesc/X86BaseInfo.h"
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSizeOpts.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
#include <iterator>
using namespace llvm;
#define DEBUG_TYPE "x86-optimize-LEAs"
static cl::opt<bool>
DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
cl::desc("X86: Disable LEA optimizations."),
cl::init(false));
STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
static inline bool isIdenticalOp(const MachineOperand &MO1,
const MachineOperand &MO2);
static bool isSimilarDispOp(const MachineOperand &MO1,
const MachineOperand &MO2);
static inline bool isLEA(const MachineInstr &MI);
namespace {
class MemOpKey {
public:
MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
const MachineOperand *Index, const MachineOperand *Segment,
const MachineOperand *Disp)
: Disp(Disp) {
Operands[0] = Base;
Operands[1] = Scale;
Operands[2] = Index;
Operands[3] = Segment;
}
bool operator==(const MemOpKey &Other) const {
for (int i = 0; i < 4; ++i)
if (!isIdenticalOp(*Operands[i], *Other.Operands[i]))
return false;
return isSimilarDispOp(*Disp, *Other.Disp);
}
const MachineOperand *Operands[4];
const MachineOperand *Disp;
};
}
namespace llvm {
template <> struct DenseMapInfo<MemOpKey> {
using PtrInfo = DenseMapInfo<const MachineOperand *>;
static inline MemOpKey getEmptyKey() {
return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
PtrInfo::getEmptyKey());
}
static inline MemOpKey getTombstoneKey() {
return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
PtrInfo::getTombstoneKey());
}
static unsigned getHashValue(const MemOpKey &Val) {
assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
assert(Val.Disp != PtrInfo::getTombstoneKey() &&
"Cannot hash the tombstone key");
hash_code Hash = hash_combine(*Val.Operands[0], *Val.Operands[1],
*Val.Operands[2], *Val.Operands[3]);
switch (Val.Disp->getType()) {
case MachineOperand::MO_Immediate:
break;
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_JumpTableIndex:
Hash = hash_combine(Hash, Val.Disp->getIndex());
break;
case MachineOperand::MO_ExternalSymbol:
Hash = hash_combine(Hash, Val.Disp->getSymbolName());
break;
case MachineOperand::MO_GlobalAddress:
Hash = hash_combine(Hash, Val.Disp->getGlobal());
break;
case MachineOperand::MO_BlockAddress:
Hash = hash_combine(Hash, Val.Disp->getBlockAddress());
break;
case MachineOperand::MO_MCSymbol:
Hash = hash_combine(Hash, Val.Disp->getMCSymbol());
break;
case MachineOperand::MO_MachineBasicBlock:
Hash = hash_combine(Hash, Val.Disp->getMBB());
break;
default:
llvm_unreachable("Invalid address displacement operand");
}
return (unsigned)Hash;
}
static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
if (RHS.Disp == PtrInfo::getEmptyKey())
return LHS.Disp == PtrInfo::getEmptyKey();
if (RHS.Disp == PtrInfo::getTombstoneKey())
return LHS.Disp == PtrInfo::getTombstoneKey();
return LHS == RHS;
}
};
}
static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
assert((isLEA(MI) || MI.mayLoadOrStore()) &&
"The instruction must be a LEA, a load or a store");
return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg),
&MI.getOperand(N + X86::AddrScaleAmt),
&MI.getOperand(N + X86::AddrIndexReg),
&MI.getOperand(N + X86::AddrSegmentReg),
&MI.getOperand(N + X86::AddrDisp));
}
static inline bool isIdenticalOp(const MachineOperand &MO1,
const MachineOperand &MO2) {
return MO1.isIdenticalTo(MO2) &&
(!MO1.isReg() || !Register::isPhysicalRegister(MO1.getReg()));
}
#ifndef NDEBUG
static bool isValidDispOp(const MachineOperand &MO) {
return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
}
#endif
static bool isSimilarDispOp(const MachineOperand &MO1,
const MachineOperand &MO2) {
assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
"Address displacement operand is not valid");
return (MO1.isImm() && MO2.isImm()) ||
(MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
(MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
(MO1.isSymbol() && MO2.isSymbol() &&
MO1.getSymbolName() == MO2.getSymbolName()) ||
(MO1.isGlobal() && MO2.isGlobal() &&
MO1.getGlobal() == MO2.getGlobal()) ||
(MO1.isBlockAddress() && MO2.isBlockAddress() &&
MO1.getBlockAddress() == MO2.getBlockAddress()) ||
(MO1.isMCSymbol() && MO2.isMCSymbol() &&
MO1.getMCSymbol() == MO2.getMCSymbol()) ||
(MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
}
static inline bool isLEA(const MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
}
namespace {
class X86OptimizeLEAPass : public MachineFunctionPass {
public:
X86OptimizeLEAPass() : MachineFunctionPass(ID) {}
StringRef getPassName() const override { return "X86 LEA Optimize"; }
bool runOnMachineFunction(MachineFunction &MF) override;
static char ID;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<ProfileSummaryInfoWrapperPass>();
AU.addRequired<LazyMachineBlockFrequencyInfoPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
private:
using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>;
int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
const MachineInstr &MI, MachineInstr *&BestLEA,
int64_t &AddrDispShift, int &Dist);
int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
const MachineInstr &MI2, unsigned N2) const;
bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
int64_t &AddrDispShift) const;
void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
bool removeRedundantAddrCalc(MemOpMap &LEAs);
MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned OldReg,
unsigned NewReg, int64_t AddrDispShift);
bool removeRedundantLEAs(MemOpMap &LEAs);
DenseMap<const MachineInstr *, unsigned> InstrPos;
MachineRegisterInfo *MRI = nullptr;
const X86InstrInfo *TII = nullptr;
const X86RegisterInfo *TRI = nullptr;
};
}
char X86OptimizeLEAPass::ID = 0;
FunctionPass *llvm::createX86OptimizeLEAs() { return new X86OptimizeLEAPass(); }
INITIALIZE_PASS(X86OptimizeLEAPass, DEBUG_TYPE, "X86 optimize LEA pass", false,
false)
int X86OptimizeLEAPass::calcInstrDist(const MachineInstr &First,
const MachineInstr &Last) {
assert(Last.getParent() == First.getParent() &&
"Instructions are in different basic blocks");
assert(InstrPos.find(&First) != InstrPos.end() &&
InstrPos.find(&Last) != InstrPos.end() &&
"Instructions' positions are undefined");
return InstrPos[&Last] - InstrPos[&First];
}
bool X86OptimizeLEAPass::chooseBestLEA(
const SmallVectorImpl<MachineInstr *> &List, const MachineInstr &MI,
MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist) {
const MachineFunction *MF = MI.getParent()->getParent();
const MCInstrDesc &Desc = MI.getDesc();
int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
X86II::getOperandBias(Desc);
BestLEA = nullptr;
for (auto DefMI : List) {
int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1);
if (!isInt<32>(AddrDispShiftTemp))
continue;
if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI, *MF) !=
MRI->getRegClass(DefMI->getOperand(0).getReg()))
continue;
int DistTemp = calcInstrDist(*DefMI, MI);
assert(DistTemp != 0 &&
"The distance between two different instructions cannot be zero");
if (DistTemp > 0 || BestLEA == nullptr) {
if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
isInt<8>(AddrDispShift))
continue;
BestLEA = DefMI;
AddrDispShift = AddrDispShiftTemp;
Dist = DistTemp;
}
if (DistTemp < 0)
break;
}
return BestLEA != nullptr;
}
int64_t X86OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1,
unsigned N1,
const MachineInstr &MI2,
unsigned N2) const {
const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp);
const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp);
assert(isSimilarDispOp(Op1, Op2) &&
"Address displacement operands are not compatible");
if (Op1.isJTI())
return 0;
return Op1.isImm() ? Op1.getImm() - Op2.getImm()
: Op1.getOffset() - Op2.getOffset();
}
bool X86OptimizeLEAPass::isReplaceable(const MachineInstr &First,
const MachineInstr &Last,
int64_t &AddrDispShift) const {
assert(isLEA(First) && isLEA(Last) &&
"The function works only with LEA instructions");
if (MRI->getRegClass(First.getOperand(0).getReg()) !=
MRI->getRegClass(Last.getOperand(0).getReg()))
return false;
AddrDispShift = getAddrDispShift(Last, 1, First, 1);
for (auto &MO : MRI->use_nodbg_operands(Last.getOperand(0).getReg())) {
MachineInstr &MI = *MO.getParent();
const MCInstrDesc &Desc = MI.getDesc();
int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemOpNo < 0)
return false;
MemOpNo += X86II::getOperandBias(Desc);
if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO))
return false;
for (unsigned i = 0; i < MI.getNumOperands(); i++)
if (i != (unsigned)(MemOpNo + X86::AddrBaseReg) &&
isIdenticalOp(MI.getOperand(i), MO))
return false;
if (MI.getOperand(MemOpNo + X86::AddrDisp).isImm() &&
!isInt<32>(MI.getOperand(MemOpNo + X86::AddrDisp).getImm() +
AddrDispShift))
return false;
}
return true;
}
void X86OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB,
MemOpMap &LEAs) {
unsigned Pos = 0;
for (auto &MI : MBB) {
InstrPos[&MI] = Pos += 2;
if (isLEA(MI))
LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI));
}
}
bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
bool Changed = false;
assert(!LEAs.empty());
MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
if (!MI.mayLoadOrStore())
continue;
const MCInstrDesc &Desc = MI.getDesc();
int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemOpNo < 0)
continue;
MemOpNo += X86II::getOperandBias(Desc);
auto Insns = LEAs.find(getMemOpKey(MI, MemOpNo));
if (Insns == LEAs.end())
continue;
MachineInstr *DefMI;
int64_t AddrDispShift;
int Dist;
if (!chooseBestLEA(Insns->second, MI, DefMI, AddrDispShift, Dist))
continue;
if (Dist < 0) {
DefMI->removeFromParent();
MBB->insert(MachineBasicBlock::iterator(&MI), DefMI);
InstrPos[DefMI] = InstrPos[&MI] - 1;
assert(((InstrPos[DefMI] == 1 &&
MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
InstrPos[DefMI] >
InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
"Instruction positioning is broken");
}
MRI->clearKillFlags(DefMI->getOperand(0).getReg());
++NumSubstLEAs;
LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
MI.getOperand(MemOpNo + X86::AddrBaseReg)
.ChangeToRegister(DefMI->getOperand(0).getReg(), false);
MI.getOperand(MemOpNo + X86::AddrScaleAmt).ChangeToImmediate(1);
MI.getOperand(MemOpNo + X86::AddrIndexReg)
.ChangeToRegister(X86::NoRegister, false);
MI.getOperand(MemOpNo + X86::AddrDisp).ChangeToImmediate(AddrDispShift);
MI.getOperand(MemOpNo + X86::AddrSegmentReg)
.ChangeToRegister(X86::NoRegister, false);
LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
Changed = true;
}
return Changed;
}
MachineInstr *X86OptimizeLEAPass::replaceDebugValue(MachineInstr &MI,
unsigned OldReg,
unsigned NewReg,
int64_t AddrDispShift) {
const DIExpression *Expr = MI.getDebugExpression();
if (AddrDispShift != 0) {
if (MI.isNonListDebugValue()) {
Expr =
DIExpression::prepend(Expr, DIExpression::StackValue, AddrDispShift);
} else {
SmallVector<uint64_t, 3> Ops;
DIExpression::appendOffset(Ops, AddrDispShift);
for (MachineOperand &Op : MI.getDebugOperandsForReg(OldReg)) {
unsigned OpIdx = MI.getDebugOperandIndex(&Op);
Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
}
}
}
MachineBasicBlock *MBB = MI.getParent();
DebugLoc DL = MI.getDebugLoc();
bool IsIndirect = MI.isIndirectDebugValue();
const MDNode *Var = MI.getDebugVariable();
unsigned Opcode = MI.isNonListDebugValue() ? TargetOpcode::DBG_VALUE
: TargetOpcode::DBG_VALUE_LIST;
if (IsIndirect)
assert(MI.getDebugOffset().getImm() == 0 &&
"DBG_VALUE with nonzero offset");
SmallVector<MachineOperand, 4> NewOps;
auto replaceOldReg = [OldReg, NewReg](const MachineOperand &Op) {
if (Op.isReg() && Op.getReg() == OldReg)
return MachineOperand::CreateReg(NewReg, false, false, false, false,
false, false, false, false, false,
true);
return Op;
};
for (const MachineOperand &Op : MI.debug_operands())
NewOps.push_back(replaceOldReg(Op));
return BuildMI(*MBB, MBB->erase(&MI), DL, TII->get(Opcode), IsIndirect,
NewOps, Var, Expr);
}
bool X86OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
bool Changed = false;
for (auto &E : LEAs) {
auto &List = E.second;
auto I1 = List.begin();
while (I1 != List.end()) {
MachineInstr &First = **I1;
auto I2 = std::next(I1);
while (I2 != List.end()) {
MachineInstr &Last = **I2;
int64_t AddrDispShift;
assert(calcInstrDist(First, Last) > 0 &&
"LEAs must be in occurrence order in the list");
if (!isReplaceable(First, Last, AddrDispShift)) {
++I2;
continue;
}
Register FirstVReg = First.getOperand(0).getReg();
Register LastVReg = Last.getOperand(0).getReg();
for (MachineOperand &MO :
llvm::make_early_inc_range(MRI->use_operands(LastVReg))) {
MachineInstr &MI = *MO.getParent();
if (MI.isDebugValue()) {
replaceDebugValue(MI, LastVReg, FirstVReg, AddrDispShift);
continue;
}
const MCInstrDesc &Desc = MI.getDesc();
int MemOpNo =
X86II::getMemoryOperandNo(Desc.TSFlags) +
X86II::getOperandBias(Desc);
MO.setReg(FirstVReg);
MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp);
if (Op.isImm())
Op.setImm(Op.getImm() + AddrDispShift);
else if (!Op.isJTI())
Op.setOffset(Op.getOffset() + AddrDispShift);
}
MRI->clearKillFlags(FirstVReg);
++NumRedundantLEAs;
LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: ";
Last.dump(););
assert(MRI->use_empty(LastVReg) &&
"The LEA's def register must have no uses");
Last.eraseFromParent();
I2 = List.erase(I2);
Changed = true;
}
++I1;
}
}
return Changed;
}
bool X86OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
if (DisableX86LEAOpt || skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
auto *PSI =
&getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
auto *MBFI = (PSI && PSI->hasProfileSummary()) ?
&getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() :
nullptr;
for (auto &MBB : MF) {
MemOpMap LEAs;
InstrPos.clear();
findLEAs(MBB, LEAs);
if (LEAs.empty())
continue;
Changed |= removeRedundantLEAs(LEAs);
bool OptForSize = MF.getFunction().hasOptSize() ||
llvm::shouldOptimizeForSize(&MBB, PSI, MBFI);
if (OptForSize)
Changed |= removeRedundantAddrCalc(LEAs);
}
return Changed;
}