#include "llvm/CodeGen/FastISel.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>
using namespace llvm;
using namespace PatternMatch;
#define DEBUG_TYPE "isel"
STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
"target-independent selector");
STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
"target-specific selector");
STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
void FastISel::startNewBlock() {
assert(LocalValueMap.empty() &&
"local values should be cleared after finishing a BB");
EmitStartPt = nullptr;
if (!FuncInfo.MBB->empty())
EmitStartPt = &FuncInfo.MBB->back();
LastLocalValue = EmitStartPt;
}
void FastISel::finishBasicBlock() { flushLocalValueMap(); }
bool FastISel::lowerArguments() {
if (!FuncInfo.CanLowerReturn)
return false;
if (!fastLowerArguments())
return false;
for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
E = FuncInfo.Fn->arg_end();
I != E; ++I) {
DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
assert(VI != LocalValueMap.end() && "Missed an argument?");
FuncInfo.ValueMap[&*I] = VI->second;
}
return true;
}
static Register findLocalRegDef(MachineInstr &MI) {
Register RegDef;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
if (MO.isDef()) {
if (RegDef)
return Register();
RegDef = MO.getReg();
} else if (MO.getReg().isVirtual()) {
return Register();
}
}
return RegDef;
}
static bool isRegUsedByPhiNodes(Register DefReg,
FunctionLoweringInfo &FuncInfo) {
for (auto &P : FuncInfo.PHINodesToUpdate)
if (P.second == DefReg)
return true;
return false;
}
void FastISel::flushLocalValueMap() {
if (LastLocalValue != EmitStartPt) {
MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
++FirstNonValue;
MachineBasicBlock::reverse_iterator RE =
EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
: FuncInfo.MBB->rend();
MachineBasicBlock::reverse_iterator RI(LastLocalValue);
for (MachineInstr &LocalMI :
llvm::make_early_inc_range(llvm::make_range(RI, RE))) {
Register DefReg = findLocalRegDef(LocalMI);
if (!DefReg)
continue;
if (FuncInfo.RegsWithFixups.count(DefReg))
continue;
bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
if (EmitStartPt == &LocalMI)
EmitStartPt = EmitStartPt->getPrevNode();
LLVM_DEBUG(dbgs() << "removing dead local value materialization"
<< LocalMI);
LocalMI.eraseFromParent();
}
}
if (FirstNonValue != FuncInfo.MBB->end()) {
MachineBasicBlock::iterator FirstLocalValue =
EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
: FuncInfo.MBB->begin();
if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
}
}
LocalValueMap.clear();
LastLocalValue = EmitStartPt;
recomputeInsertPt();
SavedInsertPt = FuncInfo.InsertPt;
}
Register FastISel::getRegForValue(const Value *V) {
EVT RealVT = TLI.getValueType(DL, V->getType(), true);
if (!RealVT.isSimple())
return Register();
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT)) {
if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
else
return Register();
}
Register Reg = lookUpRegForValue(V);
if (Reg)
return Reg;
if (isa<Instruction>(V) &&
(!isa<AllocaInst>(V) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
return FuncInfo.InitializeRegForValue(V);
SavePoint SaveInsertPt = enterLocalValueArea();
Reg = materializeRegForValue(V, VT);
leaveLocalValueArea(SaveInsertPt);
return Reg;
}
Register FastISel::materializeConstant(const Value *V, MVT VT) {
Register Reg;
if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() <= 64)
Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<AllocaInst>(V))
Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
else if (isa<ConstantPointerNull>(V))
Reg =
getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
if (CF->isNullValue())
Reg = fastMaterializeFloatZero(CF);
else
Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
if (!Reg) {
const APFloat &Flt = CF->getValueAPF();
EVT IntVT = TLI.getPointerTy(DL);
uint32_t IntBitWidth = IntVT.getSizeInBits();
APSInt SIntVal(IntBitWidth, false);
bool isExact;
(void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
if (isExact) {
Register IntegerReg =
getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
if (IntegerReg)
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
IntegerReg);
}
}
} else if (const auto *Op = dyn_cast<Operator>(V)) {
if (!selectOperator(Op, Op->getOpcode()))
if (!isa<Instruction>(Op) ||
!fastSelectInstruction(cast<Instruction>(Op)))
return 0;
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
return Reg;
}
Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
Register Reg;
if (isa<Constant>(V))
Reg = fastMaterializeConstant(cast<Constant>(V));
if (!Reg)
Reg = materializeConstant(V, VT);
if (Reg) {
LocalValueMap[V] = Reg;
LastLocalValue = MRI.getVRegDef(Reg);
}
return Reg;
}
Register FastISel::lookUpRegForValue(const Value *V) {
DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
if (I != FuncInfo.ValueMap.end())
return I->second;
return LocalValueMap[V];
}
void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
if (!isa<Instruction>(I)) {
LocalValueMap[I] = Reg;
return;
}
Register &AssignedReg = FuncInfo.ValueMap[I];
if (!AssignedReg)
AssignedReg = Reg;
else if (Reg != AssignedReg) {
for (unsigned i = 0; i < NumRegs; i++) {
FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
FuncInfo.RegsWithFixups.insert(Reg + i);
}
AssignedReg = Reg;
}
}
Register FastISel::getRegForGEPIndex(const Value *Idx) {
Register IdxN = getRegForValue(Idx);
if (!IdxN)
return Register();
MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), false);
if (IdxVT.bitsLT(PtrVT)) {
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
} else if (IdxVT.bitsGT(PtrVT)) {
IdxN =
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
}
return IdxN;
}
void FastISel::recomputeInsertPt() {
if (getLastLocalValue()) {
FuncInfo.InsertPt = getLastLocalValue();
FuncInfo.MBB = FuncInfo.InsertPt->getParent();
++FuncInfo.InsertPt;
} else
FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
++FuncInfo.InsertPt;
}
void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E) {
assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
"Invalid iterator!");
while (I != E) {
if (SavedInsertPt == I)
SavedInsertPt = E;
if (EmitStartPt == I)
EmitStartPt = E.isValid() ? &*E : nullptr;
if (LastLocalValue == I)
LastLocalValue = E.isValid() ? &*E : nullptr;
MachineInstr *Dead = &*I;
++I;
Dead->eraseFromParent();
++NumFastIselDead;
}
recomputeInsertPt();
}
FastISel::SavePoint FastISel::enterLocalValueArea() {
SavePoint OldInsertPt = FuncInfo.InsertPt;
recomputeInsertPt();
return OldInsertPt;
}
void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
LastLocalValue = &*std::prev(FuncInfo.InsertPt);
FuncInfo.InsertPt = OldInsertPt;
}
bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
EVT VT = EVT::getEVT(I->getType(), true);
if (VT == MVT::Other || !VT.isSimple())
return false;
if (!TLI.isTypeLegal(VT)) {
if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
ISDOpcode == ISD::XOR))
VT = TLI.getTypeToTransformTo(I->getContext(), VT);
else
return false;
}
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
Register Op1 = getRegForValue(I->getOperand(1));
if (!Op1)
return false;
Register ResultReg =
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
VT.getSimpleVT());
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
Register Op0 = getRegForValue(I->getOperand(0));
if (!Op0) return false;
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t Imm = CI->getSExtValue();
if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
Imm = Log2_64(Imm);
ISDOpcode = ISD::SRA;
}
if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
isPowerOf2_64(Imm)) {
--Imm;
ISDOpcode = ISD::AND;
}
Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
VT.getSimpleVT());
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
Register Op1 = getRegForValue(I->getOperand(1));
if (!Op1) return false;
Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op1);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectGetElementPtr(const User *I) {
Register N = getRegForValue(I->getOperand(0));
if (!N) return false;
if (isa<VectorType>(I->getType()))
return false;
uint64_t TotalOffs = 0;
uint64_t MaxOffs = 2048;
MVT VT = TLI.getPointerTy(DL);
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (StructType *StTy = GTI.getStructTypeOrNull()) {
uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) return false;
TotalOffs = 0;
}
}
} else {
Type *Ty = GTI.getIndexedType();
if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero())
continue;
uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
if (TotalOffs >= MaxOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) return false;
TotalOffs = 0;
}
continue;
}
if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) return false;
TotalOffs = 0;
}
uint64_t ElementSize = DL.getTypeAllocSize(Ty);
Register IdxN = getRegForGEPIndex(Idx);
if (!IdxN) return false;
if (ElementSize != 1) {
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
if (!IdxN) return false;
}
N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
if (!N) return false;
}
}
if (TotalOffs) {
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
if (!N) return false;
}
updateValueMap(I, N);
return true;
}
bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
const CallInst *CI, unsigned StartIdx) {
for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
Value *Val = CI->getArgOperand(i);
if (const auto *C = dyn_cast<ConstantInt>(Val)) {
Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
} else if (isa<ConstantPointerNull>(Val)) {
Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
Ops.push_back(MachineOperand::CreateImm(0));
} else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
auto SI = FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
Ops.push_back(MachineOperand::CreateFI(SI->second));
else
return false;
} else {
Register Reg = getRegForValue(Val);
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, false));
}
}
return true;
}
bool FastISel::selectStackmap(const CallInst *I) {
assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
"Stackmap cannot return a value.");
SmallVector<MachineOperand, 32> Ops;
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
"Expected a constant integer.");
const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
"Expected a constant integer.");
const auto *NumBytes =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
if (!addStackMapLiveVars(Ops, I, 2))
return false;
CallingConv::ID CC = I->getCallingConv();
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], true, true, false,
false, false, true));
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
auto Builder =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
Builder.addImm(0);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::STACKMAP));
for (auto const &MO : Ops)
MIB.add(MO);
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(0)
.addImm(0);
FuncInfo.MF->getFrameInfo().setHasStackMap();
return true;
}
bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
unsigned NumArgs, const Value *Callee,
bool ForceRetVoidTy, CallLoweringInfo &CLI) {
ArgListTy Args;
Args.reserve(NumArgs);
for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
Value *V = CI->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
ArgListEntry Entry;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(CI, ArgI);
Args.push_back(Entry);
}
Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
: CI->getType();
CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
return lowerCallTo(CLI);
}
FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
SmallString<32> MangledName;
Mangler::getNameWithPrefix(MangledName, Target, DL);
MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
}
bool FastISel::selectPatchpoint(const CallInst *I) {
CallingConv::ID CC = I->getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
bool HasDef = !I->getType()->isVoidTy();
Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
"Expected a constant integer.");
const auto *NumArgsVal =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
unsigned NumArgs = NumArgsVal->getZExtValue();
unsigned NumMetaOpers = PatchPointOpers::CCPos;
assert(I->arg_size() >= NumMetaOpers + NumArgs &&
"Not enough arguments provided to the patchpoint intrinsic");
unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
CallLoweringInfo CLI;
CLI.setIsPatchPoint();
if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
return false;
assert(CLI.Call && "No call instruction specified.");
SmallVector<MachineOperand, 32> Ops;
if (IsAnyRegCC && HasDef) {
assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
CLI.NumResultRegs = 1;
Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, true));
}
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
"Expected a constant integer.");
const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
"Expected a constant integer.");
const auto *NumBytes =
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
uint64_t CalleeConstAddr =
cast<ConstantInt>(C->getOperand(0))->getZExtValue();
Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
} else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
if (C->getOpcode() == Instruction::IntToPtr) {
uint64_t CalleeConstAddr =
cast<ConstantInt>(C->getOperand(0))->getZExtValue();
Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
} else
llvm_unreachable("Unsupported ConstantExpr.");
} else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
Ops.push_back(MachineOperand::CreateGA(GV, 0));
} else if (isa<ConstantPointerNull>(Callee))
Ops.push_back(MachineOperand::CreateImm(0));
else
llvm_unreachable("Unsupported callee address.");
unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
if (IsAnyRegCC) {
for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
Register Reg = getRegForValue(I->getArgOperand(i));
if (!Reg)
return false;
Ops.push_back(MachineOperand::CreateReg(Reg, false));
}
}
for (auto Reg : CLI.OutRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, false));
if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
return false;
Ops.push_back(MachineOperand::CreateRegMask(
TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
for (unsigned i = 0; ScratchRegs[i]; ++i)
Ops.push_back(MachineOperand::CreateReg(
ScratchRegs[i], true, true, false,
false, false, true));
for (auto Reg : CLI.InRegs)
Ops.push_back(MachineOperand::CreateReg(Reg, true,
true));
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
TII.get(TargetOpcode::PATCHPOINT));
for (auto &MO : Ops)
MIB.add(MO);
MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
CLI.Call->eraseFromParent();
FuncInfo.MF->getFrameInfo().setHasPatchPoint();
if (CLI.NumResultRegs)
updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
return true;
}
bool FastISel::selectXRayCustomEvent(const CallInst *I) {
const auto &Triple = TM.getTargetTriple();
if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
return true; SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
for (auto &MO : Ops)
MIB.add(MO);
return true;
}
bool FastISel::selectXRayTypedEvent(const CallInst *I) {
const auto &Triple = TM.getTargetTriple();
if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
return true; SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
for (auto &MO : Ops)
MIB.add(MO);
return true;
}
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
SmallVector<Attribute::AttrKind, 2> Attrs;
if (CLI.RetSExt)
Attrs.push_back(Attribute::SExt);
if (CLI.RetZExt)
Attrs.push_back(Attribute::ZExt);
if (CLI.IsInReg)
Attrs.push_back(Attribute::InReg);
return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
Attrs);
}
bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
unsigned NumArgs) {
MCContext &Ctx = MF->getContext();
SmallString<32> MangledName;
Mangler::getNameWithPrefix(MangledName, SymName, DL);
MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
return lowerCallTo(CI, Sym, NumArgs);
}
bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
unsigned NumArgs) {
FunctionType *FTy = CI->getFunctionType();
Type *RetTy = CI->getType();
ArgListTy Args;
Args.reserve(NumArgs);
for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
Value *V = CI->getOperand(ArgI);
assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
ArgListEntry Entry;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(CI, ArgI);
Args.push_back(Entry);
}
TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
CallLoweringInfo CLI;
CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
return lowerCallTo(CLI);
}
bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
CLI.clearIns();
SmallVector<EVT, 4> RetTys;
ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
bool CanLowerReturn = TLI.CanLowerReturn(
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
if (!CanLowerReturn)
return false;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
MyFlags.ArgVT = VT;
MyFlags.Used = CLI.IsReturnValueUsed;
if (CLI.RetSExt)
MyFlags.Flags.setSExt();
if (CLI.RetZExt)
MyFlags.Flags.setZExt();
if (CLI.IsInReg)
MyFlags.Flags.setInReg();
CLI.Ins.push_back(MyFlags);
}
}
CLI.clearOuts();
for (auto &Arg : CLI.getArgs()) {
Type *FinalType = Arg.Ty;
if (Arg.IsByVal)
FinalType = Arg.IndirectType;
bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
FinalType, CLI.CallConv, CLI.IsVarArg, DL);
ISD::ArgFlagsTy Flags;
if (Arg.IsZExt)
Flags.setZExt();
if (Arg.IsSExt)
Flags.setSExt();
if (Arg.IsInReg)
Flags.setInReg();
if (Arg.IsSRet)
Flags.setSRet();
if (Arg.IsSwiftSelf)
Flags.setSwiftSelf();
if (Arg.IsSwiftAsync)
Flags.setSwiftAsync();
if (Arg.IsSwiftError)
Flags.setSwiftError();
if (Arg.IsCFGuardTarget)
Flags.setCFGuardTarget();
if (Arg.IsByVal)
Flags.setByVal();
if (Arg.IsInAlloca) {
Flags.setInAlloca();
Flags.setByVal();
}
if (Arg.IsPreallocated) {
Flags.setPreallocated();
Flags.setByVal();
}
MaybeAlign MemAlign = Arg.Alignment;
if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
if (!MemAlign)
MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
Flags.setByValSize(FrameSize);
} else if (!MemAlign) {
MemAlign = DL.getABITypeAlign(Arg.Ty);
}
Flags.setMemAlign(*MemAlign);
if (Arg.IsNest)
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
CLI.OutVals.push_back(Arg.Val);
CLI.OutFlags.push_back(Flags);
}
if (!fastLowerCall(CLI))
return false;
assert(CLI.Call && "No call instruction specified.");
CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
if (CLI.NumResultRegs && CLI.CB)
updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
if (CLI.CB)
if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
CLI.Call->setHeapAllocMarker(*MF, MD);
return true;
}
bool FastISel::lowerCall(const CallInst *CI) {
FunctionType *FuncTy = CI->getFunctionType();
Type *RetTy = CI->getType();
ArgListTy Args;
ArgListEntry Entry;
Args.reserve(CI->arg_size());
for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
Value *V = *i;
if (V->getType()->isEmptyTy())
continue;
Entry.Val = V;
Entry.Ty = V->getType();
Entry.setAttributes(CI, i - CI->arg_begin());
Args.push_back(Entry);
}
bool IsTailCall = CI->isTailCall();
if (IsTailCall && !isInTailCallPosition(*CI, TM))
IsTailCall = false;
if (IsTailCall && MF->getFunction()
.getFnAttribute("disable-tail-calls")
.getValueAsBool())
IsTailCall = false;
CallLoweringInfo CLI;
CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
.setTailCall(IsTailCall);
diagnoseDontCall(*CI);
return lowerCallTo(CLI);
}
bool FastISel::selectCall(const User *I) {
const CallInst *Call = cast<CallInst>(I);
if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
if (!IA->getConstraintString().empty())
return false;
unsigned ExtraInfo = 0;
if (IA->hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
if (Call->isConvergent())
ExtraInfo |= InlineAsm::Extra_IsConvergent;
ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::INLINEASM));
MIB.addExternalSymbol(IA->getAsmString().c_str());
MIB.addImm(ExtraInfo);
const MDNode *SrcLoc = Call->getMetadata("srcloc");
if (SrcLoc)
MIB.addMetadata(SrcLoc);
return true;
}
if (const auto *II = dyn_cast<IntrinsicInst>(Call))
return selectIntrinsicCall(II);
return lowerCall(Call);
}
bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
switch (II->getIntrinsicID()) {
default:
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::donothing:
case Intrinsic::sideeffect:
case Intrinsic::assume:
case Intrinsic::experimental_noalias_scope_decl:
return true;
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
assert(DI->getVariable() && "Missing variable");
if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< " (!hasDebugInfo)\n");
return true;
}
const Value *Address = DI->getAddress();
if (!Address || isa<UndefValue>(Address)) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< " (bad/undef address)\n");
return true;
}
const auto *Arg =
dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
return true;
Optional<MachineOperand> Op;
if (Register Reg = lookUpRegForValue(Address))
Op = MachineOperand::CreateReg(Reg, false);
if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
(!isa<AllocaInst>(Address) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
false);
if (Op) {
assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
"Expected inlined-at fields to agree");
auto Builder =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE), true, *Op,
DI->getVariable(), DI->getExpression());
if (UseInstrRefDebugInfo && Op->isReg()) {
Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF));
Builder->getOperand(1).ChangeToImmediate(0);
auto *NewExpr =
DIExpression::prepend(DI->getExpression(), DIExpression::DerefBefore);
Builder->getOperand(3).setMetadata(NewExpr);
}
} else {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< " (no materialized reg for address)\n");
}
return true;
}
case Intrinsic::dbg_value: {
const DbgValueInst *DI = cast<DbgValueInst>(II);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
"Expected inlined-at fields to agree");
if (!V || isa<UndefValue>(V) || DI->hasArgList()) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
DI->getVariable(), DI->getExpression());
} else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
DIExpression *Expr = DI->getExpression();
if (Expr)
std::tie(Expr, CI) = Expr->constantFold(CI);
if (CI->getBitWidth() > 64)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addCImm(CI)
.addImm(0U)
.addMetadata(DI->getVariable())
.addMetadata(Expr);
else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(CI->getZExtValue())
.addImm(0U)
.addMetadata(DI->getVariable())
.addMetadata(Expr);
} else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(CF)
.addImm(0U)
.addMetadata(DI->getVariable())
.addMetadata(DI->getExpression());
} else if (Register Reg = lookUpRegForValue(V)) {
bool IsIndirect = false;
auto Builder =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
DI->getVariable(), DI->getExpression());
if (UseInstrRefDebugInfo) {
Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF));
Builder->getOperand(1).ChangeToImmediate(0);
}
} else {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
case Intrinsic::dbg_label: {
const DbgLabelInst *DI = cast<DbgLabelInst>(II);
assert(DI->getLabel() && "Missing label");
if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
return true;
}
case Intrinsic::objectsize:
llvm_unreachable("llvm.objectsize.* should have been lowered already");
case Intrinsic::is_constant:
llvm_unreachable("llvm.is.constant.* should have been lowered already");
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
case Intrinsic::expect: {
Register ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)
return false;
updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::experimental_stackmap:
return selectStackmap(II);
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
return selectPatchpoint(II);
case Intrinsic::xray_customevent:
return selectXRayCustomEvent(II);
case Intrinsic::xray_typedevent:
return selectXRayTypedEvent(II);
}
return fastLowerIntrinsicCall(II);
}
bool FastISel::selectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
!DstVT.isSimple())
return false;
if (!TLI.isTypeLegal(DstVT))
return false;
if (!TLI.isTypeLegal(SrcVT))
return false;
Register InputReg = getRegForValue(I->getOperand(0));
if (!InputReg)
return false;
Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
Opcode, InputReg);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectBitCast(const User *I) {
EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstEVT = TLI.getValueType(DL, I->getType());
if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
!TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
return false;
MVT SrcVT = SrcEVT.getSimpleVT();
MVT DstVT = DstEVT.getSimpleVT();
Register Op0 = getRegForValue(I->getOperand(0));
if (!Op0) return false;
if (SrcVT == DstVT) {
updateValueMap(I, Op0);
return true;
}
Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectFreeze(const User *I) {
Register Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
return false;
MVT Ty = ETy.getSimpleVT();
const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
Register ResultReg = createResultReg(TyRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
updateValueMap(I, ResultReg);
return true;
}
void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
{
MachineInstr *CurLastLocalValue = getLastLocalValue();
if (CurLastLocalValue != SavedLastLocalValue) {
MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
if (SavedLastLocalValue)
++FirstDeadInst;
else
FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
setLastLocalValue(SavedLastLocalValue);
removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
}
}
bool FastISel::selectInstruction(const Instruction *I) {
flushLocalValueMap();
MachineInstr *SavedLastLocalValue = getLastLocalValue();
if (I->isTerminator()) {
if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
removeDeadLocalValueCode(SavedLastLocalValue);
return false;
}
}
if (auto *Call = dyn_cast<CallBase>(I))
for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
return false;
DbgLoc = I->getDebugLoc();
SavedInsertPt = FuncInfo.InsertPt;
if (const auto *Call = dyn_cast<CallInst>(I)) {
const Function *F = Call->getCalledFunction();
LibFunc Func;
if (F && !F->hasLocalLinkage() && F->hasName() &&
LibInfo->getLibFunc(F->getName(), Func) &&
LibInfo->hasOptimizedCodeGen(Func))
return false;
if (F && F->getIntrinsicID() == Intrinsic::trap &&
Call->hasFnAttr("trap-func-name"))
return false;
}
if (!SkipTargetIndependentISel) {
if (selectOperator(I, I->getOpcode())) {
++NumFastIselSuccessIndependent;
DbgLoc = DebugLoc();
return true;
}
recomputeInsertPt();
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
SavedInsertPt = FuncInfo.InsertPt;
}
if (fastSelectInstruction(I)) {
++NumFastIselSuccessTarget;
DbgLoc = DebugLoc();
return true;
}
recomputeInsertPt();
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
DbgLoc = DebugLoc();
if (I->isTerminator()) {
removeDeadLocalValueCode(SavedLastLocalValue);
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
}
return false;
}
void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
const DebugLoc &DbgLoc) {
if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
} else {
TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
SmallVector<MachineOperand, 0>(), DbgLoc);
}
if (FuncInfo.BPI) {
auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
} else
FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
}
void FastISel::finishCondBranch(const BasicBlock *BranchBB,
MachineBasicBlock *TrueMBB,
MachineBasicBlock *FalseMBB) {
if (TrueMBB != FalseMBB) {
if (FuncInfo.BPI) {
auto BranchProbability =
FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
} else
FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
}
fastEmitBranch(FalseMBB, DbgLoc);
}
bool FastISel::selectFNeg(const User *I, const Value *In) {
Register OpReg = getRegForValue(In);
if (!OpReg)
return false;
EVT VT = TLI.getValueType(DL, I->getType());
Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg);
if (ResultReg) {
updateValueMap(I, ResultReg);
return true;
}
if (VT.getSizeInBits() > 64)
return false;
EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
if (!TLI.isTypeLegal(IntVT))
return false;
Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::BITCAST, OpReg);
if (!IntReg)
return false;
Register IntResultReg = fastEmit_ri_(
IntVT.getSimpleVT(), ISD::XOR, IntReg,
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
if (!IntResultReg)
return false;
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
IntResultReg);
if (!ResultReg)
return false;
updateValueMap(I, ResultReg);
return true;
}
bool FastISel::selectExtractValue(const User *U) {
const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
if (!EVI)
return false;
EVT RealVT = TLI.getValueType(DL, EVI->getType(), true);
if (!RealVT.isSimple())
return false;
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
return false;
const Value *Op0 = EVI->getOperand(0);
Type *AggTy = Op0->getType();
unsigned ResultReg;
DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
if (I != FuncInfo.ValueMap.end())
ResultReg = I->second;
else if (isa<Instruction>(Op0))
ResultReg = FuncInfo.InitializeRegForValue(Op0);
else
return false;
unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
for (unsigned i = 0; i < VTIndex; i++)
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
updateValueMap(EVI, ResultReg);
return true;
}
bool FastISel::selectOperator(const User *I, unsigned Opcode) {
switch (Opcode) {
case Instruction::Add:
return selectBinaryOp(I, ISD::ADD);
case Instruction::FAdd:
return selectBinaryOp(I, ISD::FADD);
case Instruction::Sub:
return selectBinaryOp(I, ISD::SUB);
case Instruction::FSub:
return selectBinaryOp(I, ISD::FSUB);
case Instruction::Mul:
return selectBinaryOp(I, ISD::MUL);
case Instruction::FMul:
return selectBinaryOp(I, ISD::FMUL);
case Instruction::SDiv:
return selectBinaryOp(I, ISD::SDIV);
case Instruction::UDiv:
return selectBinaryOp(I, ISD::UDIV);
case Instruction::FDiv:
return selectBinaryOp(I, ISD::FDIV);
case Instruction::SRem:
return selectBinaryOp(I, ISD::SREM);
case Instruction::URem:
return selectBinaryOp(I, ISD::UREM);
case Instruction::FRem:
return selectBinaryOp(I, ISD::FREM);
case Instruction::Shl:
return selectBinaryOp(I, ISD::SHL);
case Instruction::LShr:
return selectBinaryOp(I, ISD::SRL);
case Instruction::AShr:
return selectBinaryOp(I, ISD::SRA);
case Instruction::And:
return selectBinaryOp(I, ISD::AND);
case Instruction::Or:
return selectBinaryOp(I, ISD::OR);
case Instruction::Xor:
return selectBinaryOp(I, ISD::XOR);
case Instruction::FNeg:
return selectFNeg(I, I->getOperand(0));
case Instruction::GetElementPtr:
return selectGetElementPtr(I);
case Instruction::Br: {
const BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
fastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
return false;
}
case Instruction::Unreachable:
if (TM.Options.TrapUnreachable)
return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
else
return true;
case Instruction::Alloca:
if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
return true;
return false;
case Instruction::Call:
if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
return false;
return selectCall(I);
case Instruction::BitCast:
return selectBitCast(I);
case Instruction::FPToSI:
return selectCast(I, ISD::FP_TO_SINT);
case Instruction::ZExt:
return selectCast(I, ISD::ZERO_EXTEND);
case Instruction::SExt:
return selectCast(I, ISD::SIGN_EXTEND);
case Instruction::Trunc:
return selectCast(I, ISD::TRUNCATE);
case Instruction::SIToFP:
return selectCast(I, ISD::SINT_TO_FP);
case Instruction::IntToPtr: case Instruction::PtrToInt: {
EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(DL, I->getType());
if (DstVT.bitsGT(SrcVT))
return selectCast(I, ISD::ZERO_EXTEND);
if (DstVT.bitsLT(SrcVT))
return selectCast(I, ISD::TRUNCATE);
Register Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
updateValueMap(I, Reg);
return true;
}
case Instruction::ExtractValue:
return selectExtractValue(I);
case Instruction::Freeze:
return selectFreeze(I);
case Instruction::PHI:
llvm_unreachable("FastISel shouldn't visit PHI nodes!");
default:
return false;
}
}
FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo,
bool SkipTargetIndependentISel)
: FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
TII(*MF->getSubtarget().getInstrInfo()),
TLI(*MF->getSubtarget().getTargetLowering()),
TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
SkipTargetIndependentISel(SkipTargetIndependentISel) {}
FastISel::~FastISel() = default;
bool FastISel::fastLowerArguments() { return false; }
bool FastISel::fastLowerCall(CallLoweringInfo & ) { return false; }
bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * ) {
return false;
}
unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned ) {
return 0;
}
unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned ,
unsigned ) {
return 0;
}
unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t ) {
return 0;
}
unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
const ConstantFP * ) {
return 0;
}
unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned ,
uint64_t ) {
return 0;
}
Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
uint64_t Imm, MVT ImmType) {
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
Opcode = ISD::SHL;
Imm = Log2_64(Imm);
} else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
Opcode = ISD::SRL;
Imm = Log2_64(Imm);
}
if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
Imm >= VT.getSizeInBits())
return 0;
Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
if (ResultReg)
return ResultReg;
Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
if (!MaterialReg) {
IntegerType *ITy =
IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
if (!MaterialReg)
return 0;
}
return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
}
Register FastISel::createResultReg(const TargetRegisterClass *RC) {
return MRI.createVirtualRegister(RC);
}
Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
unsigned OpNum) {
if (Op.isVirtual()) {
const TargetRegisterClass *RegClass =
TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
if (!MRI.constrainRegClass(Op, RegClass)) {
Register NewOp = createResultReg(RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
return NewOp;
}
}
return Op;
}
Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC) {
Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
return ResultReg;
}
Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
unsigned Op1) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0)
.addReg(Op1);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0)
.addReg(Op1);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
unsigned Op1, unsigned Op2) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0)
.addReg(Op1)
.addReg(Op2);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0)
.addReg(Op1)
.addReg(Op2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0)
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0)
.addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
uint64_t Imm1, uint64_t Imm2) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0)
.addImm(Imm1)
.addImm(Imm2);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0)
.addImm(Imm1)
.addImm(Imm2);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
const ConstantFP *FPImm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addFPImm(FPImm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(FPImm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
unsigned Op1, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0)
.addReg(Op1)
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0)
.addReg(Op1)
.addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm) {
Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addImm(Imm);
else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
uint32_t Idx) {
Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(Register::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
ResultReg).addReg(Op0, 0, Idx);
return ResultReg;
}
Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) {
return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
}
bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const Instruction *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin()))
continue;
MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
if (!SuccsHandled.insert(SuccMBB).second)
continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
for (const PHINode &PN : SuccBB->phis()) {
if (PN.use_empty())
continue;
EVT VT = TLI.getValueType(DL, PN.getType(), true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
}
const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
DbgLoc = DebugLoc();
if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
DbgLoc = Inst->getDebugLoc();
Register Reg = getRegForValue(PHIOp);
if (!Reg) {
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
return false;
}
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
DbgLoc = DebugLoc();
}
}
return true;
}
bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
assert(LI->hasOneUse() &&
"tryToFoldLoad expected a LoadInst with a single use");
unsigned MaxUsers = 6;
const Instruction *TheUser = LI->user_back();
while (TheUser != FoldInst && TheUser->getParent() == FoldInst->getParent() &&
--MaxUsers) { if (!TheUser->hasOneUse())
return false;
TheUser = TheUser->user_back();
}
if (TheUser != FoldInst)
return false;
if (LI->isVolatile())
return false;
Register LoadReg = getRegForValue(LI);
if (!LoadReg)
return false;
if (!MRI.hasOneUse(LoadReg))
return false;
if (FuncInfo.RegsWithFixups.contains(LoadReg))
return false;
MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
MachineInstr *User = RI->getParent();
FuncInfo.InsertPt = User;
FuncInfo.MBB = User->getParent();
return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
}
bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
if (!isa<AddOperator>(Add))
return false;
if (DL.getTypeSizeInBits(GEP->getType()) !=
DL.getTypeSizeInBits(Add->getType()))
return false;
if (isa<Instruction>(Add) &&
FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
return false;
return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
}
MachineMemOperand *
FastISel::createMachineMemOperandFor(const Instruction *I) const {
const Value *Ptr;
Type *ValTy;
MaybeAlign Alignment;
MachineMemOperand::Flags Flags;
bool IsVolatile;
if (const auto *LI = dyn_cast<LoadInst>(I)) {
Alignment = LI->getAlign();
IsVolatile = LI->isVolatile();
Flags = MachineMemOperand::MOLoad;
Ptr = LI->getPointerOperand();
ValTy = LI->getType();
} else if (const auto *SI = dyn_cast<StoreInst>(I)) {
Alignment = SI->getAlign();
IsVolatile = SI->isVolatile();
Flags = MachineMemOperand::MOStore;
Ptr = SI->getPointerOperand();
ValTy = SI->getValueOperand()->getType();
} else
return nullptr;
bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
AAMDNodes AAInfo = I->getAAMetadata();
if (!Alignment) Alignment = DL.getABITypeAlign(ValTy);
unsigned Size = DL.getTypeStoreSize(ValTy);
if (IsVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (IsNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
if (IsDereferenceable)
Flags |= MachineMemOperand::MODereferenceable;
if (IsInvariant)
Flags |= MachineMemOperand::MOInvariant;
return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
*Alignment, AAInfo, Ranges);
}
CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
CmpInst::Predicate Predicate = CI->getPredicate();
if (CI->getOperand(0) != CI->getOperand(1))
return Predicate;
switch (Predicate) {
default: llvm_unreachable("Invalid predicate!");
case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
}
return Predicate;
}