#include "VEInstrInfo.h"
#include "VE.h"
#include "VEMachineFunctionInfo.h"
#include "VESubtarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#define DEBUG_TYPE "ve-instr-info"
using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "VEGenInstrInfo.inc"
void VEInstrInfo::anchor() {}
VEInstrInfo::VEInstrInfo(VESubtarget &ST)
: VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
static VECC::CondCode GetOppositeBranchCondition(VECC::CondCode CC) {
switch (CC) {
case VECC::CC_IG:
return VECC::CC_ILE;
case VECC::CC_IL:
return VECC::CC_IGE;
case VECC::CC_INE:
return VECC::CC_IEQ;
case VECC::CC_IEQ:
return VECC::CC_INE;
case VECC::CC_IGE:
return VECC::CC_IL;
case VECC::CC_ILE:
return VECC::CC_IG;
case VECC::CC_AF:
return VECC::CC_AT;
case VECC::CC_G:
return VECC::CC_LENAN;
case VECC::CC_L:
return VECC::CC_GENAN;
case VECC::CC_NE:
return VECC::CC_EQNAN;
case VECC::CC_EQ:
return VECC::CC_NENAN;
case VECC::CC_GE:
return VECC::CC_LNAN;
case VECC::CC_LE:
return VECC::CC_GNAN;
case VECC::CC_NUM:
return VECC::CC_NAN;
case VECC::CC_NAN:
return VECC::CC_NUM;
case VECC::CC_GNAN:
return VECC::CC_LE;
case VECC::CC_LNAN:
return VECC::CC_GE;
case VECC::CC_NENAN:
return VECC::CC_EQ;
case VECC::CC_EQNAN:
return VECC::CC_NE;
case VECC::CC_GENAN:
return VECC::CC_L;
case VECC::CC_LENAN:
return VECC::CC_G;
case VECC::CC_AT:
return VECC::CC_AF;
case VECC::UNKNOWN:
return VECC::UNKNOWN;
}
llvm_unreachable("Invalid cond code");
}
static bool isUncondBranchOpcode(int Opc) {
using namespace llvm::VE;
#define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
"Branch relative word/double/float always instructions should not be "
"used!");
return BRKIND(BRCFL);
#undef BRKIND
}
static bool isCondBranchOpcode(int Opc) {
using namespace llvm::VE;
#define BRKIND(NAME) \
(Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
#undef BRKIND
}
static bool isIndirectBranchOpcode(int Opc) {
using namespace llvm::VE;
#define BRKIND(NAME) \
(Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
"Branch word/double/float always instructions should not be used!");
return BRKIND(BCFL);
#undef BRKIND
}
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
SmallVectorImpl<MachineOperand> &Cond) {
Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm()));
Cond.push_back(LastInst->getOperand(1));
Cond.push_back(LastInst->getOperand(2));
Target = LastInst->getOperand(3).getMBB();
}
bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return false;
if (!isUnpredicatedTerminator(*I))
return false;
MachineInstr *LastInst = &*I;
unsigned LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
if (isUncondBranchOpcode(LastOpc)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
if (isCondBranchOpcode(LastOpc)) {
parseCondBranch(LastInst, TBB, Cond);
return false;
}
return true; }
MachineInstr *SecondLastInst = &*I;
unsigned SecondLastOpc = SecondLastInst->getOpcode();
if (AllowModify && isUncondBranchOpcode(LastOpc)) {
while (isUncondBranchOpcode(SecondLastOpc)) {
LastInst->eraseFromParent();
LastInst = SecondLastInst;
LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
SecondLastInst = &*I;
SecondLastOpc = SecondLastInst->getOpcode();
}
}
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
parseCondBranch(SecondLastInst, TBB, Cond);
FBB = LastInst->getOperand(0).getMBB();
return false;
}
if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
TBB = SecondLastInst->getOperand(0).getMBB();
return false;
}
if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return true;
}
return true;
}
unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL, int *BytesAdded) const {
assert(TBB && "insertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 3 || Cond.size() == 0) &&
"VE branch conditions should have three component!");
assert(!BytesAdded && "code size not handled");
if (Cond.empty()) {
assert(!FBB && "Unconditional branch with multiple successors!");
BuildMI(&MBB, DL, get(VE::BRCFLa_t))
.addMBB(TBB);
return 1;
}
assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
unsigned opc[2];
const TargetRegisterInfo *TRI = &getRegisterInfo();
MachineFunction *MF = MBB.getParent();
const MachineRegisterInfo &MRI = MF->getRegInfo();
Register Reg = Cond[2].getReg();
if (IsIntegerCC(Cond[0].getImm())) {
if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
opc[0] = VE::BRCFWir;
opc[1] = VE::BRCFWrr;
} else {
opc[0] = VE::BRCFLir;
opc[1] = VE::BRCFLrr;
}
} else {
if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
opc[0] = VE::BRCFSir;
opc[1] = VE::BRCFSrr;
} else {
opc[0] = VE::BRCFDir;
opc[1] = VE::BRCFDrr;
}
}
if (Cond[1].isImm()) {
BuildMI(&MBB, DL, get(opc[0]))
.add(Cond[0]) .add(Cond[1]) .add(Cond[2]) .addMBB(TBB);
} else {
BuildMI(&MBB, DL, get(opc[1]))
.add(Cond[0])
.add(Cond[1])
.add(Cond[2])
.addMBB(TBB);
}
if (!FBB)
return 1;
BuildMI(&MBB, DL, get(VE::BRCFLa_t))
.addMBB(FBB);
return 2;
}
unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
assert(!BytesRemoved && "code size not handled");
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
while (I != MBB.begin()) {
--I;
if (I->isDebugValue())
continue;
if (!isUncondBranchOpcode(I->getOpcode()) &&
!isCondBranchOpcode(I->getOpcode()))
break;
I->eraseFromParent();
I = MBB.end();
++Count;
}
return Count;
}
bool VEInstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const {
VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
Cond[0].setImm(GetOppositeBranchCondition(CC));
return false;
}
static bool IsAliasOfSX(Register Reg) {
return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
VE::F32RegClass.contains(Reg);
}
static void copyPhysSubRegs(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, const DebugLoc &DL,
MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
const MCInstrDesc &MCID, unsigned int NumSubRegs,
const unsigned *SubRegIdx,
const TargetRegisterInfo *TRI) {
MachineInstr *MovMI = nullptr;
for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
Register SubDest = TRI->getSubReg(DestReg, SubRegIdx[Idx]);
Register SubSrc = TRI->getSubReg(SrcReg, SubRegIdx[Idx]);
assert(SubDest && SubSrc && "Bad sub-register");
if (MCID.getOpcode() == VE::ORri) {
MachineInstrBuilder MIB =
BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0);
MovMI = MIB.getInstr();
} else if (MCID.getOpcode() == VE::ANDMmm) {
MachineInstrBuilder MIB =
BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc);
MovMI = MIB.getInstr();
} else {
llvm_unreachable("Unexpected reg-to-reg copy instruction");
}
}
MovMI->addRegisterDefined(DestReg, TRI);
if (KillSrc)
MovMI->addRegisterKilled(SrcReg, TRI, true);
}
void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, const DebugLoc &DL,
MCRegister DestReg, MCRegister SrcReg,
bool KillSrc) const {
if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) {
BuildMI(MBB, I, DL, get(VE::ORri), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc))
.addImm(0);
} else if (VE::V64RegClass.contains(DestReg, SrcReg)) {
const TargetRegisterInfo *TRI = &getRegisterInfo();
Register TmpReg = VE::SX16;
Register SubTmp = TRI->getSubReg(TmpReg, VE::sub_i32);
BuildMI(MBB, I, DL, get(VE::LEAzii), TmpReg)
.addImm(0)
.addImm(0)
.addImm(256);
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(VE::VORmvl), DestReg)
.addImm(M1(0)) .addReg(SrcReg, getKillRegState(KillSrc))
.addReg(SubTmp, getKillRegState(true));
MIB.getInstr()->addRegisterKilled(TmpReg, TRI, true);
} else if (VE::VMRegClass.contains(DestReg, SrcReg)) {
BuildMI(MBB, I, DL, get(VE::ANDMmm), DestReg)
.addReg(VE::VM0)
.addReg(SrcReg, getKillRegState(KillSrc));
} else if (VE::VM512RegClass.contains(DestReg, SrcReg)) {
const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
unsigned int NumSubRegs = 2;
copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ANDMmm),
NumSubRegs, SubRegIdx, &getRegisterInfo());
} else if (VE::F128RegClass.contains(DestReg, SrcReg)) {
const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
unsigned int NumSubRegs = 2;
copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri),
NumSubRegs, SubRegIdx, &getRegisterInfo());
} else {
const TargetRegisterInfo *TRI = &getRegisterInfo();
dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI)
<< " to " << printReg(DestReg, TRI) << "\n";
llvm_unreachable("Impossible reg-to-reg copy");
}
}
unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (MI.getOpcode() == VE::LDrii || MI.getOpcode() == VE::LDLSXrii || MI.getOpcode() == VE::LDUrii || MI.getOpcode() == VE::LDQrii || MI.getOpcode() == VE::LDVMrii || MI.getOpcode() == VE::LDVM512rii ) {
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
MI.getOperand(3).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
}
return 0;
}
unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
if (MI.getOpcode() == VE::STrii || MI.getOpcode() == VE::STLrii || MI.getOpcode() == VE::STUrii || MI.getOpcode() == VE::STQrii || MI.getOpcode() == VE::STVMrii || MI.getOpcode() == VE::STVM512rii ) {
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
return MI.getOperand(3).getReg();
}
}
return 0;
}
void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
MachineFunction *MF = MBB.getParent();
const MachineFrameInfo &MFI = MF->getFrameInfo();
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
if (RC == &VE::I64RegClass) {
BuildMI(MBB, I, DL, get(VE::STrii))
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::I32RegClass) {
BuildMI(MBB, I, DL, get(VE::STLrii))
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::F32RegClass) {
BuildMI(MBB, I, DL, get(VE::STUrii))
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (VE::F128RegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(VE::STQrii))
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::VMRegClass) {
BuildMI(MBB, I, DL, get(VE::STVMrii))
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (VE::VM512RegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(VE::STVM512rii))
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else
report_fatal_error("Can't store this register to stack slot");
}
void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
MachineFunction *MF = MBB.getParent();
const MachineFrameInfo &MFI = MF->getFrameInfo();
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
if (RC == &VE::I64RegClass) {
BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::I32RegClass) {
BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::F32RegClass) {
BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addMemOperand(MMO);
} else if (VE::F128RegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(VE::LDQrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::VMRegClass) {
BuildMI(MBB, I, DL, get(VE::LDVMrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addMemOperand(MMO);
} else if (VE::VM512RegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(VE::LDVM512rii), DestReg)
.addFrameIndex(FI)
.addImm(0)
.addImm(0)
.addMemOperand(MMO);
} else
report_fatal_error("Can't load this register from stack slot");
}
bool VEInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
Register Reg, MachineRegisterInfo *MRI) const {
LLVM_DEBUG(dbgs() << "FoldImmediate\n");
LLVM_DEBUG(dbgs() << "checking DefMI\n");
int64_t ImmVal;
switch (DefMI.getOpcode()) {
default:
return false;
case VE::ORim:
LLVM_DEBUG(dbgs() << "checking ORim\n");
LLVM_DEBUG(DefMI.dump());
assert(DefMI.getOperand(1).isImm());
assert(DefMI.getOperand(2).isImm());
ImmVal =
DefMI.getOperand(1).getImm() + mimm2Val(DefMI.getOperand(2).getImm());
LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
break;
case VE::LEAzii:
LLVM_DEBUG(dbgs() << "checking LEAzii\n");
LLVM_DEBUG(DefMI.dump());
assert(DefMI.getOperand(2).isImm());
if (!DefMI.getOperand(3).isImm())
return false;
ImmVal = DefMI.getOperand(2).getImm() + DefMI.getOperand(3).getImm();
LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
break;
}
LLVM_DEBUG(dbgs() << "checking UseMI\n");
LLVM_DEBUG(UseMI.dump());
unsigned NewUseOpcSImm7;
unsigned NewUseOpcMImm;
enum InstType {
rr2ri_rm, rr2ir_rm, } InstType;
using namespace llvm::VE;
#define INSTRKIND(NAME) \
case NAME##rr: \
NewUseOpcSImm7 = NAME##ri; \
NewUseOpcMImm = NAME##rm; \
InstType = rr2ri_rm; \
break
#define NCINSTRKIND(NAME) \
case NAME##rr: \
NewUseOpcSImm7 = NAME##ir; \
NewUseOpcMImm = NAME##rm; \
InstType = rr2ir_rm; \
break
switch (UseMI.getOpcode()) {
default:
return false;
INSTRKIND(ADDUL);
INSTRKIND(ADDSWSX);
INSTRKIND(ADDSWZX);
INSTRKIND(ADDSL);
NCINSTRKIND(SUBUL);
NCINSTRKIND(SUBSWSX);
NCINSTRKIND(SUBSWZX);
NCINSTRKIND(SUBSL);
INSTRKIND(MULUL);
INSTRKIND(MULSWSX);
INSTRKIND(MULSWZX);
INSTRKIND(MULSL);
NCINSTRKIND(DIVUL);
NCINSTRKIND(DIVSWSX);
NCINSTRKIND(DIVSWZX);
NCINSTRKIND(DIVSL);
NCINSTRKIND(CMPUL);
NCINSTRKIND(CMPSWSX);
NCINSTRKIND(CMPSWZX);
NCINSTRKIND(CMPSL);
INSTRKIND(MAXSWSX);
INSTRKIND(MAXSWZX);
INSTRKIND(MAXSL);
INSTRKIND(MINSWSX);
INSTRKIND(MINSWZX);
INSTRKIND(MINSL);
INSTRKIND(AND);
INSTRKIND(OR);
INSTRKIND(XOR);
INSTRKIND(EQV);
NCINSTRKIND(NND);
NCINSTRKIND(MRG);
}
#undef INSTRKIND
unsigned NewUseOpc;
unsigned UseIdx;
bool Commute = false;
LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
switch (InstType) {
case rr2ri_rm:
UseIdx = 2;
if (UseMI.getOperand(1).getReg() == Reg) {
Commute = true;
} else {
assert(UseMI.getOperand(2).getReg() == Reg);
}
if (isInt<7>(ImmVal)) {
NewUseOpc = NewUseOpcSImm7;
} else if (isMImmVal(ImmVal)) {
NewUseOpc = NewUseOpcMImm;
ImmVal = val2MImm(ImmVal);
} else
return false;
break;
case rr2ir_rm:
if (UseMI.getOperand(1).getReg() == Reg) {
if (!isInt<7>(ImmVal))
return false;
NewUseOpc = NewUseOpcSImm7;
UseIdx = 1;
} else {
assert(UseMI.getOperand(2).getReg() == Reg);
if (!isMImmVal(ImmVal))
return false;
NewUseOpc = NewUseOpcMImm;
ImmVal = val2MImm(ImmVal);
UseIdx = 2;
}
break;
}
LLVM_DEBUG(dbgs() << "modifying UseMI\n");
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
UseMI.setDesc(get(NewUseOpc));
if (Commute) {
UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg());
}
UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
if (DeleteDef)
DefMI.eraseFromParent();
return true;
}
Register VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
VEMachineFunctionInfo *VEFI = MF->getInfo<VEMachineFunctionInfo>();
Register GlobalBaseReg = VEFI->getGlobalBaseReg();
if (GlobalBaseReg != 0)
return GlobalBaseReg;
GlobalBaseReg = VE::SX15;
MachineBasicBlock &FirstMBB = MF->front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc dl;
BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg);
VEFI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
static Register getVM512Upper(Register reg) {
return (reg - VE::VMP0) * 2 + VE::VM0;
}
static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
MachineBasicBlock *MBB = MI.getParent();
DebugLoc DL = MI.getDebugLoc();
Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
Register VMYu = getVM512Upper(MI.getOperand(1).getReg());
Register VMYl = getVM512Lower(MI.getOperand(1).getReg());
switch (MI.getOpcode()) {
default: {
Register VMZu = getVM512Upper(MI.getOperand(2).getReg());
Register VMZl = getVM512Lower(MI.getOperand(2).getReg());
BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu);
BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl);
break;
}
case VE::NEGMy:
BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu);
BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl);
break;
}
MI.eraseFromParent();
}
static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI,
bool Upper) {
MIB.addReg(Upper ? getVM512Upper(MI.getOperand(0).getReg())
: getVM512Lower(MI.getOperand(0).getReg()));
switch (MI.getNumExplicitOperands()) {
default:
report_fatal_error("unexpected number of operands for pvfmk");
case 2: MIB.addReg(MI.getOperand(1).getReg());
break;
case 4: MIB.addImm(MI.getOperand(1).getImm());
MIB.addReg(MI.getOperand(2).getReg());
MIB.addReg(MI.getOperand(3).getReg());
break;
case 5: MIB.addImm(MI.getOperand(1).getImm());
MIB.addReg(MI.getOperand(2).getReg());
MIB.addReg(Upper ? getVM512Upper(MI.getOperand(3).getReg())
: getVM512Lower(MI.getOperand(3).getReg()));
MIB.addReg(MI.getOperand(4).getReg());
break;
}
}
static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI) {
static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = {
{VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
{VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
{VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
{VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
{VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
{VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
};
unsigned Opcode = MI.getOpcode();
const auto *Found =
llvm::find_if(VFMKMap, [&](auto P) { return P.first == Opcode; });
if (Found == std::end(VFMKMap))
report_fatal_error("unexpected opcode for pseudo vfmk");
unsigned OpcodeUpper = (*Found).second.first;
unsigned OpcodeLower = (*Found).second.second;
MachineBasicBlock *MBB = MI.getParent();
DebugLoc DL = MI.getDebugLoc();
MachineInstrBuilder Bu = BuildMI(*MBB, MI, DL, TI.get(OpcodeUpper));
addOperandsForVFMK(Bu, MI, true);
MachineInstrBuilder Bl = BuildMI(*MBB, MI, DL, TI.get(OpcodeLower));
addOperandsForVFMK(Bl, MI, false);
MI.eraseFromParent();
}
bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case VE::EXTEND_STACK: {
return expandExtendStackPseudo(MI);
}
case VE::EXTEND_STACK_GUARD: {
MI.eraseFromParent(); return true;
}
case VE::GETSTACKTOP: {
return expandGetStackTopPseudo(MI);
}
case VE::ANDMyy:
expandPseudoLogM(MI, get(VE::ANDMmm));
return true;
case VE::ORMyy:
expandPseudoLogM(MI, get(VE::ORMmm));
return true;
case VE::XORMyy:
expandPseudoLogM(MI, get(VE::XORMmm));
return true;
case VE::EQVMyy:
expandPseudoLogM(MI, get(VE::EQVMmm));
return true;
case VE::NNDMyy:
expandPseudoLogM(MI, get(VE::NNDMmm));
return true;
case VE::NEGMy:
expandPseudoLogM(MI, get(VE::NEGMm));
return true;
case VE::LVMyir:
case VE::LVMyim:
case VE::LVMyir_y:
case VE::LVMyim_y: {
Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
int64_t Imm = MI.getOperand(1).getImm();
bool IsSrcReg =
MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
Register Src = IsSrcReg ? MI.getOperand(2).getReg() : VE::NoRegister;
int64_t MImm = IsSrcReg ? 0 : MI.getOperand(2).getImm();
bool KillSrc = IsSrcReg ? MI.getOperand(2).isKill() : false;
Register VMX = VMXl;
if (Imm >= 4) {
VMX = VMXu;
Imm -= 4;
}
MachineBasicBlock *MBB = MI.getParent();
DebugLoc DL = MI.getDebugLoc();
switch (MI.getOpcode()) {
case VE::LVMyir:
BuildMI(*MBB, MI, DL, get(VE::LVMir))
.addDef(VMX)
.addImm(Imm)
.addReg(Src, getKillRegState(KillSrc));
break;
case VE::LVMyim:
BuildMI(*MBB, MI, DL, get(VE::LVMim))
.addDef(VMX)
.addImm(Imm)
.addImm(MImm);
break;
case VE::LVMyir_y:
assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
"LVMyir_y has different register in 3rd operand");
BuildMI(*MBB, MI, DL, get(VE::LVMir_m))
.addDef(VMX)
.addImm(Imm)
.addReg(Src, getKillRegState(KillSrc))
.addReg(VMX);
break;
case VE::LVMyim_y:
assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
"LVMyim_y has different register in 3rd operand");
BuildMI(*MBB, MI, DL, get(VE::LVMim_m))
.addDef(VMX)
.addImm(Imm)
.addImm(MImm)
.addReg(VMX);
break;
}
MI.eraseFromParent();
return true;
}
case VE::SVMyi: {
Register Dest = MI.getOperand(0).getReg();
Register VMZu = getVM512Upper(MI.getOperand(1).getReg());
Register VMZl = getVM512Lower(MI.getOperand(1).getReg());
bool KillSrc = MI.getOperand(1).isKill();
int64_t Imm = MI.getOperand(2).getImm();
Register VMZ = VMZl;
if (Imm >= 4) {
VMZ = VMZu;
Imm -= 4;
}
MachineBasicBlock *MBB = MI.getParent();
DebugLoc DL = MI.getDebugLoc();
MachineInstrBuilder MIB =
BuildMI(*MBB, MI, DL, get(VE::SVMmi), Dest).addReg(VMZ).addImm(Imm);
MachineInstr *Inst = MIB.getInstr();
if (KillSrc) {
const TargetRegisterInfo *TRI = &getRegisterInfo();
Inst->addRegisterKilled(MI.getOperand(1).getReg(), TRI, true);
}
MI.eraseFromParent();
return true;
}
case VE::VFMKyal:
case VE::VFMKynal:
case VE::VFMKWyvl:
case VE::VFMKWyvyl:
case VE::VFMKSyvl:
case VE::VFMKSyvyl:
expandPseudoVFMK(*this, MI);
return true;
}
return false;
}
bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const {
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
const VEInstrInfo &TII = *STI.getInstrInfo();
DebugLoc dl = MBB.findDebugLoc(MI);
MachineBasicBlock *BB = &MBB;
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++(BB->getIterator());
MF.insert(It, syscallMBB);
MF.insert(It, sinkMBB);
sinkMBB->splice(sinkMBB->begin(), BB,
std::next(std::next(MachineBasicBlock::iterator(MI))),
BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(syscallMBB);
BB->addSuccessor(sinkMBB);
BuildMI(BB, dl, TII.get(VE::BRCFLrr_t))
.addImm(VECC::CC_IGE)
.addReg(VE::SX11) .addReg(VE::SX8) .addMBB(sinkMBB);
BB = syscallMBB;
BB->addSuccessor(sinkMBB);
BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
.addReg(VE::SX14)
.addImm(0)
.addImm(0x18);
BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
.addReg(VE::SX0)
.addImm(0);
BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
.addImm(0)
.addImm(0)
.addImm(0x13b);
BuildMI(BB, dl, TII.get(VE::SHMLri))
.addReg(VE::SX61)
.addImm(0)
.addReg(VE::SX63);
BuildMI(BB, dl, TII.get(VE::SHMLri))
.addReg(VE::SX61)
.addImm(8)
.addReg(VE::SX8);
BuildMI(BB, dl, TII.get(VE::SHMLri))
.addReg(VE::SX61)
.addImm(16)
.addReg(VE::SX11);
BuildMI(BB, dl, TII.get(VE::MONC));
BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0)
.addReg(VE::SX62)
.addImm(0);
MI.eraseFromParent(); return true;
}
bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const {
MachineBasicBlock *MBB = MI.getParent();
MachineFunction &MF = *MBB->getParent();
const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
const VEInstrInfo &TII = *STI.getInstrInfo();
DebugLoc DL = MBB->findDebugLoc(MI);
const MachineFrameInfo &MFI = MF.getFrameInfo();
const VEFrameLowering &TFL = *STI.getFrameLowering();
unsigned NumBytes = STI.getAdjustedFrameSize(0);
if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
NumBytes += MFI.getMaxCallFrameSize();
BuildMI(*MBB, MI, DL, TII.get(VE::LEArii))
.addDef(MI.getOperand(0).getReg())
.addReg(VE::SX11)
.addImm(0)
.addImm(NumBytes);
MI.eraseFromParent(); return true;
}