#include "AMDGPURegisterBankInfo.h"
#include "AMDGPU.h"
#include "AMDGPUGlobalISelUtils.h"
#include "AMDGPUInstrInfo.h"
#include "GCNSubtarget.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#define GET_TARGET_REGBANK_IMPL
#include "AMDGPUGenRegisterBank.inc"
#include "AMDGPUGenRegisterBankInfo.def"
using namespace llvm;
using namespace MIPatternMatch;
namespace {
class ApplyRegBankMapping final : public GISelChangeObserver {
private:
const AMDGPURegisterBankInfo &RBI;
MachineRegisterInfo &MRI;
const RegisterBank *NewBank;
SmallVector<MachineInstr *, 4> NewInsts;
public:
ApplyRegBankMapping(const AMDGPURegisterBankInfo &RBI_,
MachineRegisterInfo &MRI_, const RegisterBank *RB)
: RBI(RBI_), MRI(MRI_), NewBank(RB) {}
~ApplyRegBankMapping() {
for (MachineInstr *MI : NewInsts)
applyBank(*MI);
}
void applyBank(MachineInstr &MI) {
const unsigned Opc = MI.getOpcode();
if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT ||
Opc == AMDGPU::G_SEXT) {
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, *RBI.TRI);
if (SrcBank == &AMDGPU::VCCRegBank) {
const LLT S32 = LLT::scalar(32);
assert(MRI.getType(SrcReg) == LLT::scalar(1));
assert(MRI.getType(DstReg) == S32);
assert(NewBank == &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI);
auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1);
auto False = B.buildConstant(S32, 0);
B.buildSelect(DstReg, SrcReg, True, False);
MRI.setRegBank(True.getReg(0), *NewBank);
MRI.setRegBank(False.getReg(0), *NewBank);
MI.eraseFromParent();
}
assert(!MRI.getRegClassOrRegBank(DstReg));
MRI.setRegBank(DstReg, *NewBank);
return;
}
#ifndef NDEBUG
if (Opc == AMDGPU::G_TRUNC) {
Register DstReg = MI.getOperand(0).getReg();
const RegisterBank *DstBank = RBI.getRegBank(DstReg, MRI, *RBI.TRI);
assert(DstBank != &AMDGPU::VCCRegBank);
}
#endif
for (MachineOperand &Op : MI.operands()) {
if (!Op.isReg())
continue;
Register Reg = Op.getReg();
if (Reg.isPhysical() || MRI.getRegClassOrRegBank(Reg))
continue;
const RegisterBank *RB = NewBank;
if (MRI.getType(Reg) == LLT::scalar(1)) {
assert(NewBank == &AMDGPU::VGPRRegBank &&
"s1 operands should only be used for vector bools");
assert((MI.getOpcode() != AMDGPU::G_TRUNC &&
MI.getOpcode() != AMDGPU::G_ANYEXT) &&
"not expecting legalization artifacts here");
RB = &AMDGPU::VCCRegBank;
}
MRI.setRegBank(Reg, *RB);
}
}
void erasingInstr(MachineInstr &MI) override {}
void createdInstr(MachineInstr &MI) override {
NewInsts.push_back(&MI);
}
void changingInstr(MachineInstr &MI) override {}
void changedInstr(MachineInstr &MI) override {
}
};
}
AMDGPURegisterBankInfo::AMDGPURegisterBankInfo(const GCNSubtarget &ST)
: Subtarget(ST), TRI(Subtarget.getRegisterInfo()),
TII(Subtarget.getInstrInfo()) {
static llvm::once_flag InitializeRegisterBankFlag;
static auto InitializeRegisterBankOnce = [this]() {
assert(&getRegBank(AMDGPU::SGPRRegBankID) == &AMDGPU::SGPRRegBank &&
&getRegBank(AMDGPU::VGPRRegBankID) == &AMDGPU::VGPRRegBank &&
&getRegBank(AMDGPU::AGPRRegBankID) == &AMDGPU::AGPRRegBank);
(void)this;
};
llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
}
static bool isVectorRegisterBank(const RegisterBank &Bank) {
unsigned BankID = Bank.getID();
return BankID == AMDGPU::VGPRRegBankID || BankID == AMDGPU::AGPRRegBankID;
}
unsigned AMDGPURegisterBankInfo::copyCost(const RegisterBank &Dst,
const RegisterBank &Src,
unsigned Size) const {
if (Dst.getID() == AMDGPU::SGPRRegBankID &&
(isVectorRegisterBank(Src) || Src.getID() == AMDGPU::VCCRegBankID)) {
return std::numeric_limits<unsigned>::max();
}
if (Size == 1 &&
(Dst.getID() == AMDGPU::SGPRRegBankID) &&
(isVectorRegisterBank(Src) ||
Src.getID() == AMDGPU::SGPRRegBankID ||
Src.getID() == AMDGPU::VCCRegBankID))
return std::numeric_limits<unsigned>::max();
if (Dst.getID() == AMDGPU::AGPRRegBankID &&
Src.getID() == AMDGPU::AGPRRegBankID)
return 4;
return RegisterBankInfo::copyCost(Dst, Src, Size);
}
unsigned AMDGPURegisterBankInfo::getBreakDownCost(
const ValueMapping &ValMapping,
const RegisterBank *CurBank) const {
if (ValMapping.NumBreakDowns >= 2 || ValMapping.BreakDown[0].Length >= 64)
return 10;
assert(ValMapping.NumBreakDowns == 2 &&
ValMapping.BreakDown[0].Length == 32 &&
ValMapping.BreakDown[0].StartIdx == 0 &&
ValMapping.BreakDown[1].Length == 32 &&
ValMapping.BreakDown[1].StartIdx == 32 &&
ValMapping.BreakDown[0].RegBank == ValMapping.BreakDown[1].RegBank);
return 1;
}
const RegisterBank &
AMDGPURegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
LLT Ty) const {
if (&RC == &AMDGPU::SReg_1RegClass)
return AMDGPU::VCCRegBank;
if (TRI->isSGPRClass(&RC)) {
if (!Ty.isValid())
return AMDGPU::SGPRRegBank;
return Ty == LLT::scalar(1) ? AMDGPU::VCCRegBank : AMDGPU::SGPRRegBank;
}
return TRI->isAGPRClass(&RC) ? AMDGPU::AGPRRegBank : AMDGPU::VGPRRegBank;
}
template <unsigned NumOps>
RegisterBankInfo::InstructionMappings
AMDGPURegisterBankInfo::addMappingFromTable(
const MachineInstr &MI, const MachineRegisterInfo &MRI,
const std::array<unsigned, NumOps> RegSrcOpIdx,
ArrayRef<OpRegBankEntry<NumOps>> Table) const {
InstructionMappings AltMappings;
SmallVector<const ValueMapping *, 10> Operands(MI.getNumOperands());
unsigned Sizes[NumOps];
for (unsigned I = 0; I < NumOps; ++I) {
Register Reg = MI.getOperand(RegSrcOpIdx[I]).getReg();
Sizes[I] = getSizeInBits(Reg, MRI, *TRI);
}
for (unsigned I = 0, E = MI.getNumExplicitDefs(); I != E; ++I) {
unsigned SizeI = getSizeInBits(MI.getOperand(I).getReg(), MRI, *TRI);
Operands[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SizeI);
}
unsigned MappingID = 2;
for (const auto &Entry : Table) {
for (unsigned I = 0; I < NumOps; ++I) {
int OpIdx = RegSrcOpIdx[I];
Operands[OpIdx] = AMDGPU::getValueMapping(Entry.RegBanks[I], Sizes[I]);
}
AltMappings.push_back(&getInstructionMapping(MappingID++, Entry.Cost,
getOperandsMapping(Operands),
Operands.size()));
}
return AltMappings;
}
RegisterBankInfo::InstructionMappings
AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsic(
const MachineInstr &MI, const MachineRegisterInfo &MRI) const {
switch (MI.getIntrinsicID()) {
case Intrinsic::amdgcn_readlane: {
static const OpRegBankEntry<3> Table[2] = {
{ { AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID }, 1 },
{ { AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 }
};
const std::array<unsigned, 3> RegSrcOpIdx = { { 0, 2, 3 } };
return addMappingFromTable<3>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table));
}
case Intrinsic::amdgcn_writelane: {
static const OpRegBankEntry<4> Table[4] = {
{ { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 1 },
{ { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 },
{ { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 },
{ { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 3 }
};
const std::array<unsigned, 4> RegSrcOpIdx = { { 0, 2, 3, 4 } };
return addMappingFromTable<4>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table));
}
default:
return RegisterBankInfo::getInstrAlternativeMappings(MI);
}
}
RegisterBankInfo::InstructionMappings
AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsicWSideEffects(
const MachineInstr &MI, const MachineRegisterInfo &MRI) const {
switch (MI.getIntrinsicID()) {
case Intrinsic::amdgcn_s_buffer_load: {
static const OpRegBankEntry<2> Table[4] = {
{ { AMDGPU::SGPRRegBankID, AMDGPU::SGPRRegBankID }, 1 },
{ { AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 300 },
{ { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID }, 1000 },
{ { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 1500 }
};
const std::array<unsigned, 2> RegSrcOpIdx = { { 2, 3 } };
return addMappingFromTable<2>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table));
}
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap: {
static const OpRegBankEntry<3> Table[2] = {
{ { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 1 },
{ { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 }
};
const std::array<unsigned, 3> RegSrcOpIdx = { { 0, 2, 3 } };
return addMappingFromTable<3>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table));
}
case Intrinsic::amdgcn_s_sendmsg:
case Intrinsic::amdgcn_s_sendmsghalt: {
static const OpRegBankEntry<1> Table[2] = {
{ { AMDGPU::SGPRRegBankID }, 1 },
{ { AMDGPU::VGPRRegBankID }, 3 }
};
const std::array<unsigned, 1> RegSrcOpIdx = { { 2 } };
return addMappingFromTable<1>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table));
}
default:
return RegisterBankInfo::getInstrAlternativeMappings(MI);
}
}
static bool isScalarLoadLegal(const MachineInstr &MI) {
if (!MI.hasOneMemOperand())
return false;
const MachineMemOperand *MMO = *MI.memoperands_begin();
const unsigned AS = MMO->getAddrSpace();
const bool IsConst = AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
return MMO->getAlign() >= Align(4) &&
!MMO->isAtomic() &&
(IsConst || !MMO->isVolatile()) &&
(IsConst || MMO->isInvariant() || (MMO->getFlags() & MONoClobber)) &&
AMDGPUInstrInfo::isUniformMMO(MMO);
}
RegisterBankInfo::InstructionMappings
AMDGPURegisterBankInfo::getInstrAlternativeMappings(
const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
InstructionMappings AltMappings;
switch (MI.getOpcode()) {
case TargetOpcode::G_CONSTANT: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
if (Size == 1) {
static const OpRegBankEntry<1> Table[3] = {
{ { AMDGPU::VGPRRegBankID }, 1 },
{ { AMDGPU::SGPRRegBankID }, 1 },
{ { AMDGPU::VCCRegBankID }, 1 }
};
return addMappingFromTable<1>(MI, MRI, {{ 0 }}, Table);
}
LLVM_FALLTHROUGH;
}
case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_FRAME_INDEX:
case TargetOpcode::G_GLOBAL_VALUE: {
static const OpRegBankEntry<1> Table[2] = {
{ { AMDGPU::VGPRRegBankID }, 1 },
{ { AMDGPU::SGPRRegBankID }, 1 }
};
return addMappingFromTable<1>(MI, MRI, {{ 0 }}, Table);
}
case TargetOpcode::G_AND:
case TargetOpcode::G_OR:
case TargetOpcode::G_XOR: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
if (Size == 1) {
const InstructionMapping &SCCMapping = getInstructionMapping(
1, 1, getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32)}),
3); AltMappings.push_back(&SCCMapping);
const InstructionMapping &VCCMapping0 = getInstructionMapping(
2, 1, getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size)}),
3); AltMappings.push_back(&VCCMapping0);
return AltMappings;
}
if (Size != 64)
break;
const InstructionMapping &SSMapping = getInstructionMapping(
1, 1, getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size)}),
3); AltMappings.push_back(&SSMapping);
const InstructionMapping &VVMapping = getInstructionMapping(
2, 2, getOperandsMapping(
{AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size)}),
3); AltMappings.push_back(&VVMapping);
break;
}
case TargetOpcode::G_LOAD:
case TargetOpcode::G_ZEXTLOAD:
case TargetOpcode::G_SEXTLOAD: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
LLT PtrTy = MRI.getType(MI.getOperand(1).getReg());
unsigned PtrSize = PtrTy.getSizeInBits();
unsigned AS = PtrTy.getAddressSpace();
if ((AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::REGION_ADDRESS &&
AS != AMDGPUAS::PRIVATE_ADDRESS) &&
isScalarLoadLegal(MI)) {
const InstructionMapping &SSMapping = getInstructionMapping(
1, 1, getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, PtrSize)}),
2); AltMappings.push_back(&SSMapping);
}
const InstructionMapping &VVMapping = getInstructionMapping(
2, 1,
getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize)}),
2); AltMappings.push_back(&VVMapping);
return AltMappings;
}
case TargetOpcode::G_SELECT: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
const InstructionMapping &SSMapping = getInstructionMapping(1, 1,
getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size)}),
4); AltMappings.push_back(&SSMapping);
const InstructionMapping &VVMapping = getInstructionMapping(2, 1,
getOperandsMapping({AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size)}),
4); AltMappings.push_back(&VVMapping);
return AltMappings;
}
case TargetOpcode::G_UADDE:
case TargetOpcode::G_USUBE:
case TargetOpcode::G_SADDE:
case TargetOpcode::G_SSUBE: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
const InstructionMapping &SSMapping = getInstructionMapping(1, 1,
getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1)}),
5); AltMappings.push_back(&SSMapping);
const InstructionMapping &VVMapping = getInstructionMapping(2, 1,
getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1),
AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1)}),
5); AltMappings.push_back(&VVMapping);
return AltMappings;
}
case AMDGPU::G_BRCOND: {
assert(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 1);
const InstructionMapping &SMapping = getInstructionMapping(
1, 1, getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1), nullptr}),
2); AltMappings.push_back(&SMapping);
const InstructionMapping &VMapping = getInstructionMapping(
1, 1, getOperandsMapping(
{AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1), nullptr }),
2); AltMappings.push_back(&VMapping);
return AltMappings;
}
case AMDGPU::G_INTRINSIC:
return getInstrAlternativeMappingsIntrinsic(MI, MRI);
case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS:
return getInstrAlternativeMappingsIntrinsicWSideEffects(MI, MRI);
default:
break;
}
return RegisterBankInfo::getInstrAlternativeMappings(MI);
}
void AMDGPURegisterBankInfo::split64BitValueForMapping(
MachineIRBuilder &B,
SmallVector<Register, 2> &Regs,
LLT HalfTy,
Register Reg) const {
assert(HalfTy.getSizeInBits() == 32);
MachineRegisterInfo *MRI = B.getMRI();
Register LoLHS = MRI->createGenericVirtualRegister(HalfTy);
Register HiLHS = MRI->createGenericVirtualRegister(HalfTy);
const RegisterBank *Bank = getRegBank(Reg, *MRI, *TRI);
MRI->setRegBank(LoLHS, *Bank);
MRI->setRegBank(HiLHS, *Bank);
Regs.push_back(LoLHS);
Regs.push_back(HiLHS);
B.buildInstr(AMDGPU::G_UNMERGE_VALUES)
.addDef(LoLHS)
.addDef(HiLHS)
.addUse(Reg);
}
static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<Register> Regs,
LLT NewTy) {
for (Register Reg : Regs) {
assert(MRI.getType(Reg).getSizeInBits() == NewTy.getSizeInBits());
MRI.setType(Reg, NewTy);
}
}
static LLT getHalfSizedType(LLT Ty) {
if (Ty.isVector()) {
assert(Ty.getElementCount().isKnownMultipleOf(2));
return LLT::scalarOrVector(Ty.getElementCount().divideCoefficientBy(2),
Ty.getElementType());
}
assert(Ty.getScalarSizeInBits() % 2 == 0);
return LLT::scalar(Ty.getScalarSizeInBits() / 2);
}
Register AMDGPURegisterBankInfo::buildReadFirstLane(MachineIRBuilder &B,
MachineRegisterInfo &MRI,
Register Src) const {
LLT Ty = MRI.getType(Src);
const RegisterBank *Bank = getRegBank(Src, MRI, *TRI);
if (Bank == &AMDGPU::SGPRRegBank)
return Src;
unsigned Bits = Ty.getSizeInBits();
assert(Bits % 32 == 0);
if (Bank != &AMDGPU::VGPRRegBank) {
Src = B.buildCopy(Ty, Src).getReg(0);
MRI.setRegBank(Src, AMDGPU::VGPRRegBank);
}
LLT S32 = LLT::scalar(32);
unsigned NumParts = Bits / 32;
SmallVector<Register, 8> SrcParts;
SmallVector<Register, 8> DstParts;
if (Bits == 32) {
SrcParts.push_back(Src);
} else {
auto Unmerge = B.buildUnmerge(S32, Src);
for (unsigned i = 0; i < NumParts; ++i)
SrcParts.push_back(Unmerge.getReg(i));
}
for (unsigned i = 0; i < NumParts; ++i) {
Register SrcPart = SrcParts[i];
Register DstPart = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
MRI.setType(DstPart, NumParts == 1 ? Ty : S32);
const TargetRegisterClass *Constrained =
constrainGenericRegister(SrcPart, AMDGPU::VGPR_32RegClass, MRI);
(void)Constrained;
assert(Constrained && "Failed to constrain readfirstlane src reg");
B.buildInstr(AMDGPU::V_READFIRSTLANE_B32, {DstPart}, {SrcPart});
DstParts.push_back(DstPart);
}
if (Bits == 32)
return DstParts[0];
Register Dst = B.buildMerge(Ty, DstParts).getReg(0);
MRI.setRegBank(Dst, AMDGPU::SGPRRegBank);
return Dst;
}
bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
MachineIRBuilder &B,
iterator_range<MachineBasicBlock::iterator> Range,
SmallSet<Register, 4> &SGPROperandRegs,
MachineRegisterInfo &MRI) const {
DenseMap<Register, Register> WaterfalledRegMap;
MachineBasicBlock &MBB = B.getMBB();
MachineFunction *MF = &B.getMF();
const TargetRegisterClass *WaveRC = TRI->getWaveMaskRegClass();
const unsigned MovExecOpc =
Subtarget.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
const unsigned MovExecTermOpc =
Subtarget.isWave32() ? AMDGPU::S_MOV_B32_term : AMDGPU::S_MOV_B64_term;
const unsigned XorTermOpc = Subtarget.isWave32() ?
AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
const unsigned AndSaveExecOpc = Subtarget.isWave32() ?
AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
const unsigned ExecReg = Subtarget.isWave32() ?
AMDGPU::EXEC_LO : AMDGPU::EXEC;
#ifndef NDEBUG
const int OrigRangeSize = std::distance(Range.begin(), Range.end());
#endif
Register SaveExecReg = MRI.createVirtualRegister(WaveRC);
Register InitSaveExecReg = MRI.createVirtualRegister(WaveRC);
B.buildInstr(TargetOpcode::IMPLICIT_DEF)
.addDef(InitSaveExecReg);
Register PhiExec = MRI.createVirtualRegister(WaveRC);
Register NewExec = MRI.createVirtualRegister(WaveRC);
MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *BodyBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *RestoreExecBB = MF->CreateMachineBasicBlock();
MachineFunction::iterator MBBI(MBB);
++MBBI;
MF->insert(MBBI, LoopBB);
MF->insert(MBBI, BodyBB);
MF->insert(MBBI, RestoreExecBB);
MF->insert(MBBI, RemainderBB);
LoopBB->addSuccessor(BodyBB);
BodyBB->addSuccessor(RestoreExecBB);
BodyBB->addSuccessor(LoopBB);
RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
RemainderBB->splice(RemainderBB->begin(), &MBB, Range.end(), MBB.end());
MBB.addSuccessor(LoopBB);
RestoreExecBB->addSuccessor(RemainderBB);
B.setInsertPt(*LoopBB, LoopBB->end());
B.buildInstr(TargetOpcode::PHI)
.addDef(PhiExec)
.addReg(InitSaveExecReg)
.addMBB(&MBB)
.addReg(NewExec)
.addMBB(BodyBB);
const DebugLoc &DL = B.getDL();
MachineInstr &FirstInst = *Range.begin();
BodyBB->splice(BodyBB->end(), &MBB, Range.begin(), MBB.end());
MachineBasicBlock::iterator NewBegin = FirstInst.getIterator();
auto NewEnd = BodyBB->end();
B.setMBB(*LoopBB);
LLT S1 = LLT::scalar(1);
Register CondReg;
assert(std::distance(NewBegin, NewEnd) == OrigRangeSize);
for (MachineInstr &MI : make_range(NewBegin, NewEnd)) {
for (MachineOperand &Op : MI.uses()) {
if (!Op.isReg() || Op.isDef())
continue;
Register OldReg = Op.getReg();
if (!SGPROperandRegs.count(OldReg))
continue;
auto OldVal = WaterfalledRegMap.find(OldReg);
if (OldVal != WaterfalledRegMap.end()) {
Op.setReg(OldVal->second);
continue;
}
Register OpReg = Op.getReg();
LLT OpTy = MRI.getType(OpReg);
const RegisterBank *OpBank = getRegBank(OpReg, MRI, *TRI);
if (OpBank != &AMDGPU::VGPRRegBank) {
B.setMBB(MBB);
OpReg = B.buildCopy(OpTy, OpReg).getReg(0);
MRI.setRegBank(OpReg, AMDGPU::VGPRRegBank);
B.setMBB(*LoopBB);
}
Register CurrentLaneReg = buildReadFirstLane(B, MRI, OpReg);
unsigned OpSize = OpTy.getSizeInBits();
bool Is64 = OpSize % 64 == 0;
unsigned PartSize = Is64 ? 64 : 32;
LLT PartTy = LLT::scalar(PartSize);
unsigned NumParts = OpSize / PartSize;
SmallVector<Register, 8> OpParts;
SmallVector<Register, 8> CurrentLaneParts;
if (NumParts == 1) {
OpParts.push_back(OpReg);
CurrentLaneParts.push_back(CurrentLaneReg);
} else {
auto UnmergeOp = B.buildUnmerge(PartTy, OpReg);
auto UnmergeCurrentLane = B.buildUnmerge(PartTy, CurrentLaneReg);
for (unsigned i = 0; i < NumParts; ++i) {
OpParts.push_back(UnmergeOp.getReg(i));
CurrentLaneParts.push_back(UnmergeCurrentLane.getReg(i));
MRI.setRegBank(OpParts[i], AMDGPU::VGPRRegBank);
MRI.setRegBank(CurrentLaneParts[i], AMDGPU::SGPRRegBank);
}
}
for (unsigned i = 0; i < NumParts; ++i) {
auto CmpReg = B.buildICmp(CmpInst::ICMP_EQ, S1, CurrentLaneParts[i],
OpParts[i]).getReg(0);
MRI.setRegBank(CmpReg, AMDGPU::VCCRegBank);
if (!CondReg) {
CondReg = CmpReg;
} else {
CondReg = B.buildAnd(S1, CondReg, CmpReg).getReg(0);
MRI.setRegBank(CondReg, AMDGPU::VCCRegBank);
}
}
Op.setReg(CurrentLaneReg);
WaterfalledRegMap.insert(std::make_pair(OldReg, Op.getReg()));
}
}
CondReg = B.buildIntrinsic(Intrinsic::amdgcn_ballot,
{LLT::scalar(Subtarget.isWave32() ? 32 : 64)},
false)
.addReg(CondReg)
.getReg(0);
MRI.setRegClass(CondReg, WaveRC);
B.buildInstr(AndSaveExecOpc)
.addDef(NewExec)
.addReg(CondReg, RegState::Kill);
MRI.setSimpleHint(NewExec, CondReg);
B.setInsertPt(*BodyBB, BodyBB->end());
B.buildInstr(XorTermOpc)
.addDef(ExecReg)
.addReg(ExecReg)
.addReg(NewExec);
B.buildInstr(AMDGPU::SI_WATERFALL_LOOP).addMBB(LoopBB);
BuildMI(MBB, MBB.end(), DL, TII->get(MovExecOpc), SaveExecReg)
.addReg(ExecReg);
B.setMBB(*RestoreExecBB);
B.buildInstr(MovExecTermOpc)
.addDef(ExecReg)
.addReg(SaveExecReg);
B.setInsertPt(*RemainderBB, RemainderBB->begin());
return true;
}
bool AMDGPURegisterBankInfo::collectWaterfallOperands(
SmallSet<Register, 4> &SGPROperandRegs, MachineInstr &MI,
MachineRegisterInfo &MRI, ArrayRef<unsigned> OpIndices) const {
for (unsigned Op : OpIndices) {
assert(MI.getOperand(Op).isUse());
Register Reg = MI.getOperand(Op).getReg();
const RegisterBank *OpBank = getRegBank(Reg, MRI, *TRI);
if (OpBank->getID() != AMDGPU::SGPRRegBankID)
SGPROperandRegs.insert(Reg);
}
return !SGPROperandRegs.empty();
}
bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
MachineIRBuilder &B, MachineInstr &MI, MachineRegisterInfo &MRI,
ArrayRef<unsigned> OpIndices) const {
SmallSet<Register, 4> SGPROperandRegs;
if (!collectWaterfallOperands(SGPROperandRegs, MI, MRI, OpIndices))
return false;
MachineBasicBlock::iterator I = MI.getIterator();
return executeInWaterfallLoop(B, make_range(I, std::next(I)),
SGPROperandRegs, MRI);
}
bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
MachineInstr &MI, MachineRegisterInfo &MRI,
ArrayRef<unsigned> OpIndices) const {
MachineIRBuilder B(MI);
return executeInWaterfallLoop(B, MI, MRI, OpIndices);
}
void AMDGPURegisterBankInfo::constrainOpWithReadfirstlane(
MachineInstr &MI, MachineRegisterInfo &MRI, unsigned OpIdx) const {
Register Reg = MI.getOperand(OpIdx).getReg();
const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI);
if (Bank == &AMDGPU::SGPRRegBank)
return;
MachineIRBuilder B(MI);
Reg = buildReadFirstLane(B, MRI, Reg);
MI.getOperand(OpIdx).setReg(Reg);
}
static std::pair<LLT, LLT> splitUnequalType(LLT Ty, unsigned FirstSize) {
unsigned TotalSize = Ty.getSizeInBits();
if (!Ty.isVector())
return {LLT::scalar(FirstSize), LLT::scalar(TotalSize - FirstSize)};
LLT EltTy = Ty.getElementType();
unsigned EltSize = EltTy.getSizeInBits();
assert(FirstSize % EltSize == 0);
unsigned FirstPartNumElts = FirstSize / EltSize;
unsigned RemainderElts = (TotalSize - FirstSize) / EltSize;
return {LLT::scalarOrVector(ElementCount::getFixed(FirstPartNumElts), EltTy),
LLT::scalarOrVector(ElementCount::getFixed(RemainderElts), EltTy)};
}
static LLT widen96To128(LLT Ty) {
if (!Ty.isVector())
return LLT::scalar(128);
LLT EltTy = Ty.getElementType();
assert(128 % EltTy.getSizeInBits() == 0);
return LLT::fixed_vector(128 / EltTy.getSizeInBits(), EltTy);
}
bool AMDGPURegisterBankInfo::applyMappingLoad(MachineInstr &MI,
const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper,
MachineRegisterInfo &MRI) const {
Register DstReg = MI.getOperand(0).getReg();
const LLT LoadTy = MRI.getType(DstReg);
unsigned LoadSize = LoadTy.getSizeInBits();
const unsigned MaxNonSmrdLoadSize = 128;
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::SGPRRegBank) {
if (LoadSize != 32 && LoadSize != 96)
return false;
MachineMemOperand *MMO = *MI.memoperands_begin();
const unsigned MemSize = 8 * MMO->getSize();
if (LoadSize == 32 &&
(MemSize == 32 || LoadTy.isVector() || !isScalarLoadLegal(MI)))
return false;
Register PtrReg = MI.getOperand(1).getReg();
ApplyRegBankMapping O(*this, MRI, &AMDGPU::SGPRRegBank);
MachineIRBuilder B(MI, O);
if (LoadSize == 32) {
const LLT S32 = LLT::scalar(32);
if (MI.getOpcode() == AMDGPU::G_SEXTLOAD) {
auto WideLoad = B.buildLoadFromOffset(S32, PtrReg, *MMO, 0);
B.buildSExtInReg(MI.getOperand(0), WideLoad, MemSize);
} else if (MI.getOpcode() == AMDGPU::G_ZEXTLOAD) {
auto WideLoad = B.buildLoadFromOffset(S32, PtrReg, *MMO, 0);
B.buildZExtInReg(MI.getOperand(0), WideLoad, MemSize);
} else
B.buildLoadFromOffset(MI.getOperand(0), PtrReg, *MMO, 0);
} else {
if (MMO->getAlign() < Align(16)) {
MachineFunction *MF = MI.getParent()->getParent();
ApplyRegBankMapping ApplyBank(*this, MRI, DstBank);
MachineIRBuilder B(MI, ApplyBank);
LegalizerHelper Helper(*MF, ApplyBank, B);
LLT Part64, Part32;
std::tie(Part64, Part32) = splitUnequalType(LoadTy, 64);
if (Helper.reduceLoadStoreWidth(cast<GAnyLoad>(MI), 0, Part64) !=
LegalizerHelper::Legalized)
return false;
return true;
} else {
LLT WiderTy = widen96To128(LoadTy);
auto WideLoad = B.buildLoadFromOffset(WiderTy, PtrReg, *MMO, 0);
if (WiderTy.isScalar())
B.buildTrunc(MI.getOperand(0), WideLoad);
else {
B.buildDeleteTrailingVectorElements(MI.getOperand(0).getReg(),
WideLoad);
}
}
}
MI.eraseFromParent();
return true;
}
if (LoadSize <= MaxNonSmrdLoadSize)
return false;
SmallVector<Register, 16> DefRegs(OpdMapper.getVRegs(0));
SmallVector<Register, 1> SrcRegs(OpdMapper.getVRegs(1));
if (SrcRegs.empty())
SrcRegs.push_back(MI.getOperand(1).getReg());
assert(LoadSize % MaxNonSmrdLoadSize == 0);
Register BasePtrReg = SrcRegs[0];
LLT PtrTy = MRI.getType(MI.getOperand(1).getReg());
MRI.setType(BasePtrReg, PtrTy);
unsigned NumSplitParts = LoadTy.getSizeInBits() / MaxNonSmrdLoadSize;
const LLT LoadSplitTy = LoadTy.divide(NumSplitParts);
ApplyRegBankMapping Observer(*this, MRI, &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI, Observer);
LegalizerHelper Helper(B.getMF(), Observer, B);
if (LoadTy.isVector()) {
if (Helper.fewerElementsVector(MI, 0, LoadSplitTy) != LegalizerHelper::Legalized)
return false;
} else {
if (Helper.narrowScalar(MI, 0, LoadSplitTy) != LegalizerHelper::Legalized)
return false;
}
MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank);
return true;
}
bool AMDGPURegisterBankInfo::applyMappingDynStackAlloc(
MachineInstr &MI,
const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper,
MachineRegisterInfo &MRI) const {
const MachineFunction &MF = *MI.getMF();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const auto &TFI = *ST.getFrameLowering();
if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown)
return false;
Register Dst = MI.getOperand(0).getReg();
Register AllocSize = MI.getOperand(1).getReg();
Align Alignment = assumeAligned(MI.getOperand(2).getImm());
const RegisterBank *SizeBank = getRegBank(AllocSize, MRI, *TRI);
if (SizeBank != &AMDGPU::SGPRRegBank)
return false;
LLT PtrTy = MRI.getType(Dst);
LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Register SPReg = Info->getStackPtrOffsetReg();
ApplyRegBankMapping ApplyBank(*this, MRI, &AMDGPU::SGPRRegBank);
MachineIRBuilder B(MI, ApplyBank);
auto WaveSize = B.buildConstant(LLT::scalar(32), ST.getWavefrontSizeLog2());
auto ScaledSize = B.buildShl(IntPtrTy, AllocSize, WaveSize);
auto SPCopy = B.buildCopy(PtrTy, SPReg);
if (Alignment > TFI.getStackAlign()) {
auto PtrAdd = B.buildPtrAdd(PtrTy, SPCopy, ScaledSize);
B.buildMaskLowPtrBits(Dst, PtrAdd,
Log2(Alignment) + ST.getWavefrontSizeLog2());
} else {
B.buildPtrAdd(Dst, SPCopy, ScaledSize);
}
MI.eraseFromParent();
return true;
}
bool AMDGPURegisterBankInfo::applyMappingImage(
MachineInstr &MI, const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper,
MachineRegisterInfo &MRI, int RsrcIdx) const {
const int NumDefs = MI.getNumExplicitDefs();
RsrcIdx += NumDefs + 1;
applyDefaultMapping(OpdMapper);
SmallVector<unsigned, 4> SGPRIndexes;
for (int I = NumDefs, NumOps = MI.getNumOperands(); I != NumOps; ++I) {
if (!MI.getOperand(I).isReg())
continue;
if (I == RsrcIdx || I == RsrcIdx + 1)
SGPRIndexes.push_back(I);
}
executeInWaterfallLoop(MI, MRI, SGPRIndexes);
return true;
}
static Register getSrcRegIgnoringCopies(const MachineRegisterInfo &MRI,
Register Reg) {
MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
if (!Def)
return Reg;
return Def->getOperand(0).getReg();
}
static unsigned setBufferOffsets(MachineIRBuilder &B,
const AMDGPURegisterBankInfo &RBI,
Register CombinedOffset, Register &VOffsetReg,
Register &SOffsetReg, int64_t &InstOffsetVal,
Align Alignment) {
const LLT S32 = LLT::scalar(32);
MachineRegisterInfo *MRI = B.getMRI();
if (Optional<int64_t> Imm = getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
uint32_t SOffset, ImmOffset;
if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget,
Alignment)) {
VOffsetReg = B.buildConstant(S32, 0).getReg(0);
SOffsetReg = B.buildConstant(S32, SOffset).getReg(0);
InstOffsetVal = ImmOffset;
B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank);
B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank);
return SOffset + ImmOffset;
}
}
Register Base;
unsigned Offset;
std::tie(Base, Offset) =
AMDGPU::getBaseWithConstantOffset(*MRI, CombinedOffset);
uint32_t SOffset, ImmOffset;
if ((int)Offset > 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
&RBI.Subtarget, Alignment)) {
if (RBI.getRegBank(Base, *MRI, *RBI.TRI) == &AMDGPU::VGPRRegBank) {
VOffsetReg = Base;
SOffsetReg = B.buildConstant(S32, SOffset).getReg(0);
B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank);
InstOffsetVal = ImmOffset;
return 0; }
if (SOffset == 0) {
VOffsetReg = B.buildConstant(S32, 0).getReg(0);
B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank);
SOffsetReg = Base;
InstOffsetVal = ImmOffset;
return 0; }
}
MachineInstr *Add = getOpcodeDef(AMDGPU::G_ADD, CombinedOffset, *MRI);
if (Add && (int)Offset >= 0) {
Register Src0 = getSrcRegIgnoringCopies(*MRI, Add->getOperand(1).getReg());
Register Src1 = getSrcRegIgnoringCopies(*MRI, Add->getOperand(2).getReg());
const RegisterBank *Src0Bank = RBI.getRegBank(Src0, *MRI, *RBI.TRI);
const RegisterBank *Src1Bank = RBI.getRegBank(Src1, *MRI, *RBI.TRI);
if (Src0Bank == &AMDGPU::VGPRRegBank && Src1Bank == &AMDGPU::SGPRRegBank) {
VOffsetReg = Src0;
SOffsetReg = Src1;
return 0;
}
if (Src0Bank == &AMDGPU::SGPRRegBank && Src1Bank == &AMDGPU::VGPRRegBank) {
VOffsetReg = Src1;
SOffsetReg = Src0;
return 0;
}
}
if (RBI.getRegBank(CombinedOffset, *MRI, *RBI.TRI) == &AMDGPU::VGPRRegBank) {
VOffsetReg = CombinedOffset;
} else {
VOffsetReg = B.buildCopy(S32, CombinedOffset).getReg(0);
B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank);
}
SOffsetReg = B.buildConstant(S32, 0).getReg(0);
B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank);
return 0;
}
bool AMDGPURegisterBankInfo::applyMappingSBufferLoad(
const OperandsMapper &OpdMapper) const {
MachineInstr &MI = OpdMapper.getMI();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
const LLT S32 = LLT::scalar(32);
Register Dst = MI.getOperand(0).getReg();
LLT Ty = MRI.getType(Dst);
const RegisterBank *RSrcBank =
OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
const RegisterBank *OffsetBank =
OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank;
if (RSrcBank == &AMDGPU::SGPRRegBank &&
OffsetBank == &AMDGPU::SGPRRegBank)
return true;
unsigned LoadSize = Ty.getSizeInBits();
int NumLoads = 1;
if (LoadSize == 256 || LoadSize == 512) {
NumLoads = LoadSize / 128;
Ty = Ty.divide(NumLoads);
}
const Align Alignment = NumLoads > 1 ? Align(16 * NumLoads) : Align(1);
MachineIRBuilder B(MI);
MachineFunction &MF = B.getMF();
Register SOffset;
Register VOffset;
int64_t ImmOffset = 0;
unsigned MMOOffset = setBufferOffsets(B, *this, MI.getOperand(2).getReg(),
VOffset, SOffset, ImmOffset, Alignment);
const unsigned MemSize = (Ty.getSizeInBits() + 7) / 8;
const Align MemAlign(4); MachineMemOperand *BaseMMO = MF.getMachineMemOperand(
MachinePointerInfo(),
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant,
MemSize, MemAlign);
if (MMOOffset != 0)
BaseMMO = MF.getMachineMemOperand(BaseMMO, MMOOffset, MemSize);
Register RSrc = MI.getOperand(1).getReg();
Register VIndex = B.buildConstant(S32, 0).getReg(0);
B.getMRI()->setRegBank(VIndex, AMDGPU::VGPRRegBank);
SmallVector<Register, 4> LoadParts(NumLoads);
MachineBasicBlock::iterator MII = MI.getIterator();
MachineInstrSpan Span(MII, &B.getMBB());
for (int i = 0; i < NumLoads; ++i) {
if (NumLoads == 1) {
LoadParts[i] = Dst;
} else {
LoadParts[i] = MRI.createGenericVirtualRegister(Ty);
MRI.setRegBank(LoadParts[i], AMDGPU::VGPRRegBank);
}
MachineMemOperand *MMO = BaseMMO;
if (i != 0)
BaseMMO = MF.getMachineMemOperand(BaseMMO, MMOOffset + 16 * i, MemSize);
B.buildInstr(AMDGPU::G_AMDGPU_BUFFER_LOAD)
.addDef(LoadParts[i]) .addUse(RSrc) .addUse(VIndex) .addUse(VOffset) .addUse(SOffset) .addImm(ImmOffset + 16 * i) .addImm(0) .addImm(0) .addMemOperand(MMO);
}
if (RSrcBank != &AMDGPU::SGPRRegBank) {
B.setInstr(*Span.begin());
MI.eraseFromParent();
SmallSet<Register, 4> OpsToWaterfall;
OpsToWaterfall.insert(RSrc);
executeInWaterfallLoop(B, make_range(Span.begin(), Span.end()),
OpsToWaterfall, MRI);
}
if (NumLoads != 1) {
if (Ty.isVector())
B.buildConcatVectors(Dst, LoadParts);
else
B.buildMerge(Dst, LoadParts);
}
if (RSrcBank == &AMDGPU::SGPRRegBank)
MI.eraseFromParent();
return true;
}
bool AMDGPURegisterBankInfo::applyMappingBFE(const OperandsMapper &OpdMapper,
bool Signed) const {
MachineInstr &MI = OpdMapper.getMI();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
applyDefaultMapping(OpdMapper);
Register DstReg = MI.getOperand(0).getReg();
LLT Ty = MRI.getType(DstReg);
const LLT S32 = LLT::scalar(32);
unsigned FirstOpnd = MI.getOpcode() == AMDGPU::G_INTRINSIC ? 2 : 1;
Register SrcReg = MI.getOperand(FirstOpnd).getReg();
Register OffsetReg = MI.getOperand(FirstOpnd + 1).getReg();
Register WidthReg = MI.getOperand(FirstOpnd + 2).getReg();
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::VGPRRegBank) {
if (Ty == S32)
return true;
ApplyRegBankMapping ApplyBank(*this, MRI, &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI, ApplyBank);
const LLT S64 = LLT::scalar(64);
auto ShiftOffset = Signed ? B.buildAShr(S64, SrcReg, OffsetReg)
: B.buildLShr(S64, SrcReg, OffsetReg);
auto UnmergeSOffset = B.buildUnmerge({S32, S32}, ShiftOffset);
if (auto ConstWidth = getIConstantVRegValWithLookThrough(WidthReg, MRI)) {
auto Zero = B.buildConstant(S32, 0);
auto WidthImm = ConstWidth->Value.getZExtValue();
if (WidthImm <= 32) {
auto Extract =
Signed ? B.buildSbfx(S32, UnmergeSOffset.getReg(0), Zero, WidthReg)
: B.buildUbfx(S32, UnmergeSOffset.getReg(0), Zero, WidthReg);
auto Extend =
Signed ? B.buildAShr(S32, Extract, B.buildConstant(S32, 31)) : Zero;
B.buildMerge(DstReg, {Extract, Extend});
} else {
auto UpperWidth = B.buildConstant(S32, WidthImm - 32);
auto Extract =
Signed
? B.buildSbfx(S32, UnmergeSOffset.getReg(1), Zero, UpperWidth)
: B.buildUbfx(S32, UnmergeSOffset.getReg(1), Zero, UpperWidth);
B.buildMerge(DstReg, {UnmergeSOffset.getReg(0), Extract});
}
MI.eraseFromParent();
return true;
}
auto ExtShift = B.buildSub(S32, B.buildConstant(S32, 64), WidthReg);
auto SignBit = B.buildShl(S64, ShiftOffset, ExtShift);
if (Signed)
B.buildAShr(S64, SignBit, ExtShift);
else
B.buildLShr(S64, SignBit, ExtShift);
MI.eraseFromParent();
return true;
}
ApplyRegBankMapping ApplyBank(*this, MRI, &AMDGPU::SGPRRegBank);
MachineIRBuilder B(MI, ApplyBank);
auto OffsetMask = B.buildConstant(S32, maskTrailingOnes<unsigned>(6));
auto ClampOffset = B.buildAnd(S32, OffsetReg, OffsetMask);
auto ShiftWidth = B.buildShl(S32, WidthReg, B.buildConstant(S32, 16));
auto MergedInputs = B.buildOr(S32, ClampOffset, ShiftWidth);
unsigned Opc = Ty == S32 ? (Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32) :
(Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64);
auto MIB = B.buildInstr(Opc, {DstReg}, {SrcReg, MergedInputs});
if (!constrainSelectedInstRegOperands(*MIB, *TII, *TRI, *this))
llvm_unreachable("failed to constrain BFE");
MI.eraseFromParent();
return true;
}
bool AMDGPURegisterBankInfo::applyMappingMAD_64_32(
const OperandsMapper &OpdMapper) const {
MachineInstr &MI = OpdMapper.getMI();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
applyDefaultMapping(OpdMapper);
Register Dst0 = MI.getOperand(0).getReg();
Register Dst1 = MI.getOperand(1).getReg();
Register Src0 = MI.getOperand(2).getReg();
Register Src1 = MI.getOperand(3).getReg();
Register Src2 = MI.getOperand(4).getReg();
if (MRI.getRegBankOrNull(Src0) == &AMDGPU::VGPRRegBank)
return true;
bool IsUnsigned = MI.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
LLT S1 = LLT::scalar(1);
LLT S32 = LLT::scalar(32);
bool DstOnValu = MRI.getRegBankOrNull(Src2) == &AMDGPU::VGPRRegBank;
bool Accumulate = true;
if (!DstOnValu) {
if (mi_match(Src2, MRI, m_ZeroInt()))
Accumulate = false;
}
MachineIRBuilder B(MI);
Register DstHi;
Register DstLo = B.buildMul(S32, Src0, Src1).getReg(0);
bool MulHiInVgpr = false;
MRI.setRegBank(DstLo, AMDGPU::SGPRRegBank);
if (Subtarget.hasSMulHi()) {
DstHi = IsUnsigned ? B.buildUMulH(S32, Src0, Src1).getReg(0)
: B.buildSMulH(S32, Src0, Src1).getReg(0);
MRI.setRegBank(DstHi, AMDGPU::SGPRRegBank);
} else {
Register VSrc0 = B.buildCopy(S32, Src0).getReg(0);
Register VSrc1 = B.buildCopy(S32, Src1).getReg(0);
MRI.setRegBank(VSrc0, AMDGPU::VGPRRegBank);
MRI.setRegBank(VSrc1, AMDGPU::VGPRRegBank);
DstHi = IsUnsigned ? B.buildUMulH(S32, VSrc0, VSrc1).getReg(0)
: B.buildSMulH(S32, VSrc0, VSrc1).getReg(0);
MRI.setRegBank(DstHi, AMDGPU::VGPRRegBank);
if (!DstOnValu) {
DstHi = buildReadFirstLane(B, MRI, DstHi);
} else {
MulHiInVgpr = true;
}
}
LLT CarryType = DstOnValu ? S1 : S32;
const RegisterBank &CarryBank =
DstOnValu ? AMDGPU::VCCRegBank : AMDGPU::SGPRRegBank;
const RegisterBank &DstBank =
DstOnValu ? AMDGPU::VGPRRegBank : AMDGPU::SGPRRegBank;
Register Carry;
Register Zero;
if (!IsUnsigned) {
Zero = B.buildConstant(S32, 0).getReg(0);
MRI.setRegBank(Zero,
MulHiInVgpr ? AMDGPU::VGPRRegBank : AMDGPU::SGPRRegBank);
Carry = B.buildICmp(CmpInst::ICMP_SLT, MulHiInVgpr ? S1 : S32, DstHi, Zero)
.getReg(0);
MRI.setRegBank(Carry, MulHiInVgpr ? AMDGPU::VCCRegBank
: AMDGPU::SGPRRegBank);
if (DstOnValu && !MulHiInVgpr) {
Carry = B.buildTrunc(S1, Carry).getReg(0);
MRI.setRegBank(Carry, AMDGPU::VCCRegBank);
}
}
if (Accumulate) {
if (DstOnValu) {
DstLo = B.buildCopy(S32, DstLo).getReg(0);
DstHi = B.buildCopy(S32, DstHi).getReg(0);
MRI.setRegBank(DstLo, AMDGPU::VGPRRegBank);
MRI.setRegBank(DstHi, AMDGPU::VGPRRegBank);
}
auto Unmerge = B.buildUnmerge(S32, Src2);
Register Src2Lo = Unmerge.getReg(0);
Register Src2Hi = Unmerge.getReg(1);
MRI.setRegBank(Src2Lo, DstBank);
MRI.setRegBank(Src2Hi, DstBank);
if (!IsUnsigned) {
auto Src2Sign = B.buildICmp(CmpInst::ICMP_SLT, CarryType, Src2Hi, Zero);
MRI.setRegBank(Src2Sign.getReg(0), CarryBank);
Carry = B.buildXor(CarryType, Carry, Src2Sign).getReg(0);
MRI.setRegBank(Carry, CarryBank);
}
auto AddLo = B.buildUAddo(S32, CarryType, DstLo, Src2Lo);
DstLo = AddLo.getReg(0);
Register CarryLo = AddLo.getReg(1);
MRI.setRegBank(DstLo, DstBank);
MRI.setRegBank(CarryLo, CarryBank);
auto AddHi = B.buildUAdde(S32, CarryType, DstHi, Src2Hi, CarryLo);
DstHi = AddHi.getReg(0);
MRI.setRegBank(DstHi, DstBank);
Register CarryHi = AddHi.getReg(1);
MRI.setRegBank(CarryHi, CarryBank);
if (IsUnsigned) {
Carry = CarryHi;
} else {
Carry = B.buildXor(CarryType, Carry, CarryHi).getReg(0);
MRI.setRegBank(Carry, CarryBank);
}
} else {
if (IsUnsigned) {
Carry = B.buildConstant(CarryType, 0).getReg(0);
MRI.setRegBank(Carry, CarryBank);
}
}
B.buildMerge(Dst0, {DstLo, DstHi});
if (DstOnValu) {
B.buildCopy(Dst1, Carry);
} else {
B.buildTrunc(Dst1, Carry);
}
MI.eraseFromParent();
return true;
}
static unsigned getExtendOp(unsigned Opc) {
switch (Opc) {
case TargetOpcode::G_ASHR:
case TargetOpcode::G_SMIN:
case TargetOpcode::G_SMAX:
return TargetOpcode::G_SEXT;
case TargetOpcode::G_LSHR:
case TargetOpcode::G_UMIN:
case TargetOpcode::G_UMAX:
return TargetOpcode::G_ZEXT;
default:
return TargetOpcode::G_ANYEXT;
}
}
static std::pair<Register, Register>
unpackV2S16ToS32(MachineIRBuilder &B, Register Src, unsigned ExtOpcode) {
const LLT S32 = LLT::scalar(32);
auto Bitcast = B.buildBitcast(S32, Src);
if (ExtOpcode == TargetOpcode::G_SEXT) {
auto ExtLo = B.buildSExtInReg(S32, Bitcast, 16);
auto ShiftHi = B.buildAShr(S32, Bitcast, B.buildConstant(S32, 16));
return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0));
}
auto ShiftHi = B.buildLShr(S32, Bitcast, B.buildConstant(S32, 16));
if (ExtOpcode == TargetOpcode::G_ZEXT) {
auto ExtLo = B.buildAnd(S32, Bitcast, B.buildConstant(S32, 0xffff));
return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0));
}
assert(ExtOpcode == TargetOpcode::G_ANYEXT);
return std::make_pair(Bitcast.getReg(0), ShiftHi.getReg(0));
}
static bool substituteSimpleCopyRegs(
const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper, unsigned OpIdx) {
SmallVector<unsigned, 1> SrcReg(OpdMapper.getVRegs(OpIdx));
if (!SrcReg.empty()) {
assert(SrcReg.size() == 1);
OpdMapper.getMI().getOperand(OpIdx).setReg(SrcReg[0]);
return true;
}
return false;
}
Register AMDGPURegisterBankInfo::handleD16VData(MachineIRBuilder &B,
MachineRegisterInfo &MRI,
Register Reg) const {
if (!Subtarget.hasUnpackedD16VMem())
return Reg;
const LLT S16 = LLT::scalar(16);
LLT StoreVT = MRI.getType(Reg);
if (!StoreVT.isVector() || StoreVT.getElementType() != S16)
return Reg;
auto Unmerge = B.buildUnmerge(S16, Reg);
SmallVector<Register, 4> WideRegs;
for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
WideRegs.push_back(Unmerge.getReg(I));
const LLT S32 = LLT::scalar(32);
int NumElts = StoreVT.getNumElements();
return B.buildMerge(LLT::fixed_vector(NumElts, S32), WideRegs).getReg(0);
}
static std::pair<Register, unsigned>
getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg) {
int64_t Const;
if (mi_match(Reg, MRI, m_ICst(Const)))
return std::make_pair(Register(), Const);
Register Base;
if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const))))
return std::make_pair(Base, Const);
return std::make_pair(Reg, 0);
}
std::pair<Register, unsigned>
AMDGPURegisterBankInfo::splitBufferOffsets(MachineIRBuilder &B,
Register OrigOffset) const {
const unsigned MaxImm = 4095;
Register BaseReg;
unsigned ImmOffset;
const LLT S32 = LLT::scalar(32);
std::tie(BaseReg, ImmOffset) = getBaseWithConstantOffset(*B.getMRI(),
OrigOffset);
unsigned C1 = 0;
if (ImmOffset != 0) {
unsigned Overflow = ImmOffset & ~MaxImm;
ImmOffset -= Overflow;
if ((int32_t)Overflow < 0) {
Overflow += ImmOffset;
ImmOffset = 0;
}
C1 = ImmOffset;
if (Overflow != 0) {
if (!BaseReg)
BaseReg = B.buildConstant(S32, Overflow).getReg(0);
else {
auto OverflowVal = B.buildConstant(S32, Overflow);
BaseReg = B.buildAdd(S32, BaseReg, OverflowVal).getReg(0);
}
}
}
if (!BaseReg)
BaseReg = B.buildConstant(S32, 0).getReg(0);
return {BaseReg, C1};
}
bool AMDGPURegisterBankInfo::buildVCopy(MachineIRBuilder &B, Register DstReg,
Register SrcReg) const {
MachineRegisterInfo &MRI = *B.getMRI();
LLT SrcTy = MRI.getType(SrcReg);
if (SrcTy.getSizeInBits() == 32) {
B.buildInstr(AMDGPU::V_MOV_B32_e32)
.addDef(DstReg)
.addUse(SrcReg);
return constrainGenericRegister(DstReg, AMDGPU::VGPR_32RegClass, MRI) &&
constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, MRI);
}
Register TmpReg0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
B.buildInstr(AMDGPU::V_MOV_B32_e32)
.addDef(TmpReg0)
.addUse(SrcReg, 0, AMDGPU::sub0);
B.buildInstr(AMDGPU::V_MOV_B32_e32)
.addDef(TmpReg1)
.addUse(SrcReg, 0, AMDGPU::sub1);
B.buildInstr(AMDGPU::REG_SEQUENCE)
.addDef(DstReg)
.addUse(TmpReg0)
.addImm(AMDGPU::sub0)
.addUse(TmpReg1)
.addImm(AMDGPU::sub1);
return constrainGenericRegister(SrcReg, AMDGPU::SReg_64RegClass, MRI) &&
constrainGenericRegister(DstReg, AMDGPU::VReg_64RegClass, MRI);
}
static void reinsertVectorIndexAdd(MachineIRBuilder &B,
MachineInstr &IdxUseInstr,
unsigned OpIdx,
unsigned ConstOffset) {
MachineRegisterInfo &MRI = *B.getMRI();
const LLT S32 = LLT::scalar(32);
Register WaterfallIdx = IdxUseInstr.getOperand(OpIdx).getReg();
B.setInsertPt(*IdxUseInstr.getParent(), IdxUseInstr.getIterator());
auto MaterializedOffset = B.buildConstant(S32, ConstOffset);
auto Add = B.buildAdd(S32, WaterfallIdx, MaterializedOffset);
MRI.setRegBank(MaterializedOffset.getReg(0), AMDGPU::SGPRRegBank);
MRI.setRegBank(Add.getReg(0), AMDGPU::SGPRRegBank);
IdxUseInstr.getOperand(OpIdx).setReg(Add.getReg(0));
}
static void extendLow32IntoHigh32(MachineIRBuilder &B,
Register Hi32Reg, Register Lo32Reg,
unsigned ExtOpc,
const RegisterBank &RegBank,
bool IsBooleanSrc = false) {
if (ExtOpc == AMDGPU::G_ZEXT) {
B.buildConstant(Hi32Reg, 0);
} else if (ExtOpc == AMDGPU::G_SEXT) {
if (IsBooleanSrc) {
B.buildCopy(Hi32Reg, Lo32Reg);
} else {
auto ShiftAmt = B.buildConstant(LLT::scalar(32), 31);
B.getMRI()->setRegBank(ShiftAmt.getReg(0), RegBank);
B.buildAShr(Hi32Reg, Lo32Reg, ShiftAmt);
}
} else {
assert(ExtOpc == AMDGPU::G_ANYEXT && "not an integer extension");
B.buildUndef(Hi32Reg);
}
}
bool AMDGPURegisterBankInfo::foldExtractEltToCmpSelect(
MachineInstr &MI, MachineRegisterInfo &MRI,
const OperandsMapper &OpdMapper) const {
Register VecReg = MI.getOperand(1).getReg();
Register Idx = MI.getOperand(2).getReg();
const RegisterBank &IdxBank =
*OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank;
bool IsDivergentIdx = IdxBank != AMDGPU::SGPRRegBank;
LLT VecTy = MRI.getType(VecReg);
unsigned EltSize = VecTy.getScalarSizeInBits();
unsigned NumElem = VecTy.getNumElements();
if (!SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem,
IsDivergentIdx, &Subtarget))
return false;
MachineIRBuilder B(MI);
LLT S32 = LLT::scalar(32);
const RegisterBank &DstBank =
*OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
const RegisterBank &SrcBank =
*OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
const RegisterBank &CCBank =
(DstBank == AMDGPU::SGPRRegBank &&
SrcBank == AMDGPU::SGPRRegBank &&
IdxBank == AMDGPU::SGPRRegBank) ? AMDGPU::SGPRRegBank
: AMDGPU::VCCRegBank;
LLT CCTy = (CCBank == AMDGPU::SGPRRegBank) ? S32 : LLT::scalar(1);
if (CCBank == AMDGPU::VCCRegBank && IdxBank == AMDGPU::SGPRRegBank) {
Idx = B.buildCopy(S32, Idx)->getOperand(0).getReg();
MRI.setRegBank(Idx, AMDGPU::VGPRRegBank);
}
LLT EltTy = VecTy.getScalarType();
SmallVector<Register, 2> DstRegs(OpdMapper.getVRegs(0));
unsigned NumLanes = DstRegs.size();
if (!NumLanes)
NumLanes = 1;
else
EltTy = MRI.getType(DstRegs[0]);
auto UnmergeToEltTy = B.buildUnmerge(EltTy, VecReg);
SmallVector<Register, 2> Res(NumLanes);
for (unsigned L = 0; L < NumLanes; ++L)
Res[L] = UnmergeToEltTy.getReg(L);
for (unsigned I = 1; I < NumElem; ++I) {
auto IC = B.buildConstant(S32, I);
MRI.setRegBank(IC->getOperand(0).getReg(), AMDGPU::SGPRRegBank);
auto Cmp = B.buildICmp(CmpInst::ICMP_EQ, CCTy, Idx, IC);
MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank);
for (unsigned L = 0; L < NumLanes; ++L) {
auto S = B.buildSelect(EltTy, Cmp,
UnmergeToEltTy.getReg(I * NumLanes + L), Res[L]);
for (unsigned N : { 0, 2, 3 })
MRI.setRegBank(S->getOperand(N).getReg(), DstBank);
Res[L] = S->getOperand(0).getReg();
}
}
for (unsigned L = 0; L < NumLanes; ++L) {
Register DstReg = (NumLanes == 1) ? MI.getOperand(0).getReg() : DstRegs[L];
B.buildCopy(DstReg, Res[L]);
MRI.setRegBank(DstReg, DstBank);
}
MRI.setRegBank(MI.getOperand(0).getReg(), DstBank);
MI.eraseFromParent();
return true;
}
static Register constrainRegToBank(MachineRegisterInfo &MRI,
MachineIRBuilder &B, Register &Reg,
const RegisterBank &Bank) {
const RegisterBank *CurrBank = MRI.getRegBankOrNull(Reg);
if (CurrBank && *CurrBank != Bank) {
Register Copy = B.buildCopy(MRI.getType(Reg), Reg).getReg(0);
MRI.setRegBank(Copy, Bank);
return Copy;
}
MRI.setRegBank(Reg, Bank);
return Reg;
}
bool AMDGPURegisterBankInfo::foldInsertEltToCmpSelect(
MachineInstr &MI, MachineRegisterInfo &MRI,
const OperandsMapper &OpdMapper) const {
Register VecReg = MI.getOperand(1).getReg();
Register Idx = MI.getOperand(3).getReg();
const RegisterBank &IdxBank =
*OpdMapper.getInstrMapping().getOperandMapping(3).BreakDown[0].RegBank;
bool IsDivergentIdx = IdxBank != AMDGPU::SGPRRegBank;
LLT VecTy = MRI.getType(VecReg);
unsigned EltSize = VecTy.getScalarSizeInBits();
unsigned NumElem = VecTy.getNumElements();
if (!SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem,
IsDivergentIdx, &Subtarget))
return false;
MachineIRBuilder B(MI);
LLT S32 = LLT::scalar(32);
const RegisterBank &DstBank =
*OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
const RegisterBank &SrcBank =
*OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
const RegisterBank &InsBank =
*OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank;
const RegisterBank &CCBank =
(DstBank == AMDGPU::SGPRRegBank &&
SrcBank == AMDGPU::SGPRRegBank &&
InsBank == AMDGPU::SGPRRegBank &&
IdxBank == AMDGPU::SGPRRegBank) ? AMDGPU::SGPRRegBank
: AMDGPU::VCCRegBank;
LLT CCTy = (CCBank == AMDGPU::SGPRRegBank) ? S32 : LLT::scalar(1);
if (CCBank == AMDGPU::VCCRegBank && IdxBank == AMDGPU::SGPRRegBank) {
Idx = B.buildCopy(S32, Idx)->getOperand(0).getReg();
MRI.setRegBank(Idx, AMDGPU::VGPRRegBank);
}
LLT EltTy = VecTy.getScalarType();
SmallVector<Register, 2> InsRegs(OpdMapper.getVRegs(2));
unsigned NumLanes = InsRegs.size();
if (!NumLanes) {
NumLanes = 1;
InsRegs.push_back(MI.getOperand(2).getReg());
} else {
EltTy = MRI.getType(InsRegs[0]);
}
auto UnmergeToEltTy = B.buildUnmerge(EltTy, VecReg);
SmallVector<Register, 16> Ops(NumElem * NumLanes);
for (unsigned I = 0; I < NumElem; ++I) {
auto IC = B.buildConstant(S32, I);
MRI.setRegBank(IC->getOperand(0).getReg(), AMDGPU::SGPRRegBank);
auto Cmp = B.buildICmp(CmpInst::ICMP_EQ, CCTy, Idx, IC);
MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank);
for (unsigned L = 0; L < NumLanes; ++L) {
Register Op0 = constrainRegToBank(MRI, B, InsRegs[L], DstBank);
Register Op1 = UnmergeToEltTy.getReg(I * NumLanes + L);
Op1 = constrainRegToBank(MRI, B, Op1, DstBank);
Register Select = B.buildSelect(EltTy, Cmp, Op0, Op1).getReg(0);
MRI.setRegBank(Select, DstBank);
Ops[I * NumLanes + L] = Select;
}
}
LLT MergeTy = LLT::fixed_vector(Ops.size(), EltTy);
if (MergeTy == MRI.getType(MI.getOperand(0).getReg())) {
B.buildBuildVector(MI.getOperand(0), Ops);
} else {
auto Vec = B.buildBuildVector(MergeTy, Ops);
MRI.setRegBank(Vec->getOperand(0).getReg(), DstBank);
B.buildBitcast(MI.getOperand(0).getReg(), Vec);
}
MRI.setRegBank(MI.getOperand(0).getReg(), DstBank);
MI.eraseFromParent();
return true;
}
void AMDGPURegisterBankInfo::applyMappingImpl(
const OperandsMapper &OpdMapper) const {
MachineInstr &MI = OpdMapper.getMI();
unsigned Opc = MI.getOpcode();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
switch (Opc) {
case AMDGPU::G_PHI: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy != LLT::scalar(1))
break;
const LLT S32 = LLT::scalar(32);
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::VCCRegBank) {
applyDefaultMapping(OpdMapper);
MachineIRBuilder B(*MI.getParent()->getParent());
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
Register SrcReg = MI.getOperand(I).getReg();
const RegisterBank *SrcBank = getRegBank(SrcReg, MRI, *TRI);
if (SrcBank != &AMDGPU::VCCRegBank) {
MachineBasicBlock *SrcMBB = MI.getOperand(I + 1).getMBB();
B.setInsertPt(*SrcMBB, SrcMBB->getFirstTerminator());
auto Copy = B.buildCopy(LLT::scalar(1), SrcReg);
MRI.setRegBank(Copy.getReg(0), AMDGPU::VCCRegBank);
MI.getOperand(I).setReg(Copy.getReg(0));
}
}
return;
}
substituteSimpleCopyRegs(OpdMapper, 0);
MachineFunction *MF = MI.getParent()->getParent();
ApplyRegBankMapping ApplyBank(*this, MRI, DstBank);
MachineIRBuilder B(MI, ApplyBank);
LegalizerHelper Helper(*MF, ApplyBank, B);
if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized)
llvm_unreachable("widen scalar should have succeeded");
return;
}
case AMDGPU::G_ICMP:
case AMDGPU::G_UADDO:
case AMDGPU::G_USUBO:
case AMDGPU::G_UADDE:
case AMDGPU::G_SADDE:
case AMDGPU::G_USUBE:
case AMDGPU::G_SSUBE: {
unsigned BoolDstOp = Opc == AMDGPU::G_ICMP ? 0 : 1;
Register DstReg = MI.getOperand(BoolDstOp).getReg();
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank != &AMDGPU::SGPRRegBank)
break;
const bool HasCarryIn = MI.getNumOperands() == 5;
const LLT S32 = LLT::scalar(32);
Register NewDstReg = MRI.createGenericVirtualRegister(S32);
MRI.setRegBank(NewDstReg, AMDGPU::SGPRRegBank);
MI.getOperand(BoolDstOp).setReg(NewDstReg);
MachineIRBuilder B(MI);
if (HasCarryIn) {
Register NewSrcReg = MRI.createGenericVirtualRegister(S32);
MRI.setRegBank(NewSrcReg, AMDGPU::SGPRRegBank);
B.buildZExt(NewSrcReg, MI.getOperand(4).getReg());
MI.getOperand(4).setReg(NewSrcReg);
}
MachineBasicBlock *MBB = MI.getParent();
B.setInsertPt(*MBB, std::next(MI.getIterator()));
SmallVector<Register, 1> DefRegs(OpdMapper.getVRegs(0));
if (DefRegs.empty())
DefRegs.push_back(DstReg);
B.buildTrunc(DefRegs[0], NewDstReg);
return;
}
case AMDGPU::G_SELECT: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
SmallVector<Register, 1> CondRegs(OpdMapper.getVRegs(1));
if (CondRegs.empty())
CondRegs.push_back(MI.getOperand(1).getReg());
else {
assert(CondRegs.size() == 1);
}
const RegisterBank *CondBank = getRegBank(CondRegs[0], MRI, *TRI);
if (CondBank == &AMDGPU::SGPRRegBank) {
MachineIRBuilder B(MI);
const LLT S32 = LLT::scalar(32);
Register NewCondReg = MRI.createGenericVirtualRegister(S32);
MRI.setRegBank(NewCondReg, AMDGPU::SGPRRegBank);
MI.getOperand(1).setReg(NewCondReg);
B.buildZExt(NewCondReg, CondRegs[0]);
}
if (DstTy.getSizeInBits() != 64)
break;
MachineIRBuilder B(MI);
LLT HalfTy = getHalfSizedType(DstTy);
SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
SmallVector<Register, 2> Src2Regs(OpdMapper.getVRegs(3));
if (DefRegs.empty()) {
assert(Src1Regs.empty() && Src2Regs.empty());
break;
}
if (Src1Regs.empty())
split64BitValueForMapping(B, Src1Regs, HalfTy, MI.getOperand(2).getReg());
else {
setRegsToType(MRI, Src1Regs, HalfTy);
}
if (Src2Regs.empty())
split64BitValueForMapping(B, Src2Regs, HalfTy, MI.getOperand(3).getReg());
else
setRegsToType(MRI, Src2Regs, HalfTy);
setRegsToType(MRI, DefRegs, HalfTy);
B.buildSelect(DefRegs[0], CondRegs[0], Src1Regs[0], Src2Regs[0]);
B.buildSelect(DefRegs[1], CondRegs[0], Src1Regs[1], Src2Regs[1]);
MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank);
MI.eraseFromParent();
return;
}
case AMDGPU::G_BRCOND: {
Register CondReg = MI.getOperand(0).getReg();
const RegisterBank *CondBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (CondBank == &AMDGPU::SGPRRegBank) {
MachineIRBuilder B(MI);
const LLT S32 = LLT::scalar(32);
Register NewCondReg = MRI.createGenericVirtualRegister(S32);
MRI.setRegBank(NewCondReg, AMDGPU::SGPRRegBank);
MI.getOperand(0).setReg(NewCondReg);
B.buildZExt(NewCondReg, CondReg);
return;
}
break;
}
case AMDGPU::G_AND:
case AMDGPU::G_OR:
case AMDGPU::G_XOR: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy.getSizeInBits() == 1) {
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::VCCRegBank)
break;
MachineFunction *MF = MI.getParent()->getParent();
ApplyRegBankMapping ApplyBank(*this, MRI, DstBank);
MachineIRBuilder B(MI, ApplyBank);
LegalizerHelper Helper(*MF, ApplyBank, B);
if (Helper.widenScalar(MI, 0, LLT::scalar(32)) !=
LegalizerHelper::Legalized)
llvm_unreachable("widen scalar should have succeeded");
return;
}
if (DstTy.getSizeInBits() != 64)
break;
LLT HalfTy = getHalfSizedType(DstTy);
SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
SmallVector<Register, 2> Src0Regs(OpdMapper.getVRegs(1));
SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
if (DefRegs.empty()) {
assert(Src0Regs.empty() && Src1Regs.empty());
break;
}
assert(DefRegs.size() == 2);
assert(Src0Regs.size() == Src1Regs.size() &&
(Src0Regs.empty() || Src0Regs.size() == 2));
MachineIRBuilder B(MI);
if (Src0Regs.empty())
split64BitValueForMapping(B, Src0Regs, HalfTy, MI.getOperand(1).getReg());
else
setRegsToType(MRI, Src0Regs, HalfTy);
if (Src1Regs.empty())
split64BitValueForMapping(B, Src1Regs, HalfTy, MI.getOperand(2).getReg());
else
setRegsToType(MRI, Src1Regs, HalfTy);
setRegsToType(MRI, DefRegs, HalfTy);
B.buildInstr(Opc, {DefRegs[0]}, {Src0Regs[0], Src1Regs[0]});
B.buildInstr(Opc, {DefRegs[1]}, {Src0Regs[1], Src1Regs[1]});
MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank);
MI.eraseFromParent();
return;
}
case AMDGPU::G_ABS: {
Register SrcReg = MI.getOperand(1).getReg();
const RegisterBank *SrcBank = MRI.getRegBankOrNull(SrcReg);
if (SrcBank && SrcBank == &AMDGPU::VGPRRegBank) {
MachineFunction *MF = MI.getParent()->getParent();
ApplyRegBankMapping Apply(*this, MRI, &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI, Apply);
LegalizerHelper Helper(*MF, Apply, B);
if (Helper.lowerAbsToMaxNeg(MI) != LegalizerHelper::Legalized)
llvm_unreachable("lowerAbsToMaxNeg should have succeeded");
return;
}
LLVM_FALLTHROUGH;
}
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
case AMDGPU::G_MUL:
case AMDGPU::G_SHL:
case AMDGPU::G_LSHR:
case AMDGPU::G_ASHR:
case AMDGPU::G_SMIN:
case AMDGPU::G_SMAX:
case AMDGPU::G_UMIN:
case AMDGPU::G_UMAX: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy != LLT::scalar(16) && DstTy != LLT::fixed_vector(2, 16))
break;
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::VGPRRegBank)
break;
const LLT S32 = LLT::scalar(32);
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank);
MachineIRBuilder B(MI, ApplySALU);
if (DstTy.isVector()) {
Register WideSrc0Lo, WideSrc0Hi;
Register WideSrc1Lo, WideSrc1Hi;
unsigned ExtendOp = getExtendOp(MI.getOpcode());
std::tie(WideSrc0Lo, WideSrc0Hi)
= unpackV2S16ToS32(B, MI.getOperand(1).getReg(), ExtendOp);
std::tie(WideSrc1Lo, WideSrc1Hi)
= unpackV2S16ToS32(B, MI.getOperand(2).getReg(), ExtendOp);
auto Lo = B.buildInstr(MI.getOpcode(), {S32}, {WideSrc0Lo, WideSrc1Lo});
auto Hi = B.buildInstr(MI.getOpcode(), {S32}, {WideSrc0Hi, WideSrc1Hi});
B.buildBuildVectorTrunc(DstReg, {Lo.getReg(0), Hi.getReg(0)});
MI.eraseFromParent();
} else {
LegalizerHelper Helper(*MF, ApplySALU, B);
if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized)
llvm_unreachable("widen scalar should have succeeded");
if (Opc == AMDGPU::G_SHL || Opc == AMDGPU::G_LSHR ||
Opc == AMDGPU::G_ASHR) {
B.setInsertPt(*MBB, MI.getIterator());
if (Helper.widenScalar(MI, 1, S32) != LegalizerHelper::Legalized)
llvm_unreachable("widen scalar should have succeeded");
}
}
return;
}
case AMDGPU::G_SEXT_INREG: {
SmallVector<Register, 2> SrcRegs(OpdMapper.getVRegs(1));
if (SrcRegs.empty())
break;
const LLT S32 = LLT::scalar(32);
MachineIRBuilder B(MI);
ApplyRegBankMapping O(*this, MRI, &AMDGPU::VGPRRegBank);
GISelObserverWrapper Observer(&O);
B.setChangeObserver(Observer);
SmallVector<Register, 2> DstRegs(OpdMapper.getVRegs(0));
int Amt = MI.getOperand(2).getImm();
if (Amt <= 32) {
if (Amt == 32) {
B.buildCopy(DstRegs[0], SrcRegs[0]);
} else {
B.buildSExtInReg(DstRegs[0], SrcRegs[0], Amt);
}
B.buildAShr(DstRegs[1], DstRegs[0], B.buildConstant(S32, 31));
} else {
B.buildCopy(DstRegs[0], SrcRegs[0]);
B.buildSExtInReg(DstRegs[1], DstRegs[0], Amt - 32);
}
Register DstReg = MI.getOperand(0).getReg();
MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank);
MI.eraseFromParent();
return;
}
case AMDGPU::G_CTPOP:
case AMDGPU::G_BITREVERSE: {
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::SGPRRegBank)
break;
Register SrcReg = MI.getOperand(1).getReg();
const LLT S32 = LLT::scalar(32);
LLT Ty = MRI.getType(SrcReg);
if (Ty == S32)
break;
ApplyRegBankMapping ApplyVALU(*this, MRI, &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI, ApplyVALU);
MachineFunction &MF = B.getMF();
LegalizerHelper Helper(MF, ApplyVALU, B);
if (Helper.narrowScalar(MI, 1, S32) != LegalizerHelper::Legalized)
llvm_unreachable("narrowScalar should have succeeded");
return;
}
case AMDGPU::G_AMDGPU_FFBH_U32:
case AMDGPU::G_AMDGPU_FFBL_B32:
case AMDGPU::G_CTLZ_ZERO_UNDEF:
case AMDGPU::G_CTTZ_ZERO_UNDEF: {
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::SGPRRegBank)
break;
Register SrcReg = MI.getOperand(1).getReg();
const LLT S32 = LLT::scalar(32);
LLT Ty = MRI.getType(SrcReg);
if (Ty == S32)
break;
ApplyRegBankMapping ApplyVALU(*this, MRI, &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI, ApplyVALU);
SmallVector<Register, 2> SrcRegs(OpdMapper.getVRegs(1));
unsigned NewOpc = Opc == AMDGPU::G_CTLZ_ZERO_UNDEF
? (unsigned)AMDGPU::G_AMDGPU_FFBH_U32
: Opc == AMDGPU::G_CTTZ_ZERO_UNDEF
? (unsigned)AMDGPU::G_AMDGPU_FFBL_B32
: Opc;
unsigned Idx = NewOpc == AMDGPU::G_AMDGPU_FFBH_U32;
auto X = B.buildInstr(NewOpc, {S32}, {SrcRegs[Idx]});
auto Y = B.buildInstr(NewOpc, {S32}, {SrcRegs[Idx ^ 1]});
unsigned AddOpc =
Opc == AMDGPU::G_CTLZ_ZERO_UNDEF || Opc == AMDGPU::G_CTTZ_ZERO_UNDEF
? AMDGPU::G_ADD
: AMDGPU::G_UADDSAT;
Y = B.buildInstr(AddOpc, {S32}, {Y, B.buildConstant(S32, 32)});
Register DstReg = MI.getOperand(0).getReg();
B.buildUMin(DstReg, X, Y);
MI.eraseFromParent();
return;
}
case AMDGPU::G_SEXT:
case AMDGPU::G_ZEXT:
case AMDGPU::G_ANYEXT: {
Register SrcReg = MI.getOperand(1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
const bool Signed = Opc == AMDGPU::G_SEXT;
assert(empty(OpdMapper.getVRegs(1)));
MachineIRBuilder B(MI);
const RegisterBank *SrcBank =
OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy.isScalar() &&
SrcBank != &AMDGPU::SGPRRegBank &&
SrcBank != &AMDGPU::VCCRegBank &&
DstTy.getSizeInBits() == 64 &&
SrcTy.getSizeInBits() <= 32) {
SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
if (Signed) {
B.buildSExtOrTrunc(DefRegs[0], SrcReg);
} else if (Opc == AMDGPU::G_ZEXT) {
B.buildZExtOrTrunc(DefRegs[0], SrcReg);
} else {
B.buildAnyExtOrTrunc(DefRegs[0], SrcReg);
}
extendLow32IntoHigh32(B, DefRegs[1], DefRegs[0], Opc, *SrcBank);
MRI.setRegBank(DstReg, *SrcBank);
MI.eraseFromParent();
return;
}
if (SrcTy != LLT::scalar(1))
return;
if (SrcBank == &AMDGPU::VCCRegBank) {
SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
const RegisterBank *DstBank = &AMDGPU::VGPRRegBank;
unsigned DstSize = DstTy.getSizeInBits();
const bool UseSel64 = DstSize > 32 &&
SrcBank->getID() == AMDGPU::SGPRRegBankID;
LLT SelType = UseSel64 ? LLT::scalar(64) : LLT::scalar(32);
auto True = B.buildConstant(SelType, Signed ? -1 : 1);
auto False = B.buildConstant(SelType, 0);
MRI.setRegBank(True.getReg(0), *DstBank);
MRI.setRegBank(False.getReg(0), *DstBank);
MRI.setRegBank(DstReg, *DstBank);
if (DstSize > 32) {
B.buildSelect(DefRegs[0], SrcReg, True, False);
extendLow32IntoHigh32(B, DefRegs[1], DefRegs[0], Opc, *SrcBank, true);
} else if (DstSize < 32) {
auto Sel = B.buildSelect(SelType, SrcReg, True, False);
MRI.setRegBank(Sel.getReg(0), *DstBank);
B.buildTrunc(DstReg, Sel);
} else {
B.buildSelect(DstReg, SrcReg, True, False);
}
MI.eraseFromParent();
return;
}
break;
}
case AMDGPU::G_BUILD_VECTOR:
case AMDGPU::G_BUILD_VECTOR_TRUNC: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy != LLT::fixed_vector(2, 16))
break;
assert(MI.getNumOperands() == 3 && OpdMapper.getVRegs(0).empty());
substituteSimpleCopyRegs(OpdMapper, 1);
substituteSimpleCopyRegs(OpdMapper, 2);
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::SGPRRegBank)
break;
MachineIRBuilder B(MI);
Register Lo = MI.getOperand(1).getReg();
Register Hi = MI.getOperand(2).getReg();
const LLT S32 = LLT::scalar(32);
const RegisterBank *BankLo =
OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
const RegisterBank *BankHi =
OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank;
Register ZextLo;
Register ShiftHi;
if (Opc == AMDGPU::G_BUILD_VECTOR) {
ZextLo = B.buildZExt(S32, Lo).getReg(0);
MRI.setRegBank(ZextLo, *BankLo);
Register ZextHi = B.buildZExt(S32, Hi).getReg(0);
MRI.setRegBank(ZextHi, *BankHi);
auto ShiftAmt = B.buildConstant(S32, 16);
MRI.setRegBank(ShiftAmt.getReg(0), *BankHi);
ShiftHi = B.buildShl(S32, ZextHi, ShiftAmt).getReg(0);
MRI.setRegBank(ShiftHi, *BankHi);
} else {
Register MaskLo = B.buildConstant(S32, 0xffff).getReg(0);
MRI.setRegBank(MaskLo, *BankLo);
auto ShiftAmt = B.buildConstant(S32, 16);
MRI.setRegBank(ShiftAmt.getReg(0), *BankHi);
ShiftHi = B.buildShl(S32, Hi, ShiftAmt).getReg(0);
MRI.setRegBank(ShiftHi, *BankHi);
ZextLo = B.buildAnd(S32, Lo, MaskLo).getReg(0);
MRI.setRegBank(ZextLo, *BankLo);
}
auto Or = B.buildOr(S32, ZextLo, ShiftHi);
MRI.setRegBank(Or.getReg(0), *DstBank);
B.buildBitcast(DstReg, Or);
MI.eraseFromParent();
return;
}
case AMDGPU::G_EXTRACT_VECTOR_ELT: {
SmallVector<Register, 2> DstRegs(OpdMapper.getVRegs(0));
assert(OpdMapper.getVRegs(1).empty() && OpdMapper.getVRegs(2).empty());
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
const LLT S32 = LLT::scalar(32);
LLT DstTy = MRI.getType(DstReg);
LLT SrcTy = MRI.getType(SrcReg);
if (foldExtractEltToCmpSelect(MI, MRI, OpdMapper))
return;
MachineIRBuilder B(MI);
const ValueMapping &DstMapping
= OpdMapper.getInstrMapping().getOperandMapping(0);
const RegisterBank *DstBank = DstMapping.BreakDown[0].RegBank;
const RegisterBank *SrcBank =
OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
const RegisterBank *IdxBank =
OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank;
Register BaseIdxReg;
unsigned ConstOffset;
std::tie(BaseIdxReg, ConstOffset) =
AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(2).getReg());
bool ShouldMoveIndexIntoLoop = IdxBank != &AMDGPU::SGPRRegBank &&
ConstOffset > 0 &&
ConstOffset < SrcTy.getNumElements();
if (ShouldMoveIndexIntoLoop)
MI.getOperand(2).setReg(BaseIdxReg);
const bool NeedCopyToVGPR = DstBank == &AMDGPU::VGPRRegBank &&
SrcBank == &AMDGPU::SGPRRegBank;
if (DstRegs.empty()) {
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, { 2 });
if (NeedCopyToVGPR) {
Register TmpReg = MRI.createGenericVirtualRegister(DstTy);
MRI.setRegBank(TmpReg, AMDGPU::SGPRRegBank);
MI.getOperand(0).setReg(TmpReg);
B.setInsertPt(*MI.getParent(), ++MI.getIterator());
buildVCopy(B, DstReg, TmpReg);
}
if (ShouldMoveIndexIntoLoop)
reinsertVectorIndexAdd(B, MI, 2, ConstOffset);
return;
}
assert(DstTy.getSizeInBits() == 64);
LLT Vec32 = LLT::fixed_vector(2 * SrcTy.getNumElements(), 32);
auto CastSrc = B.buildBitcast(Vec32, SrcReg);
auto One = B.buildConstant(S32, 1);
MachineBasicBlock::iterator MII = MI.getIterator();
MachineInstrSpan Span(MII, &B.getMBB());
auto IdxLo = B.buildShl(S32, BaseIdxReg, One);
auto IdxHi = B.buildAdd(S32, IdxLo, One);
auto Extract0 = B.buildExtractVectorElement(DstRegs[0], CastSrc, IdxLo);
auto Extract1 = B.buildExtractVectorElement(DstRegs[1], CastSrc, IdxHi);
MRI.setRegBank(DstReg, *DstBank);
MRI.setRegBank(CastSrc.getReg(0), *SrcBank);
MRI.setRegBank(One.getReg(0), AMDGPU::SGPRRegBank);
MRI.setRegBank(IdxLo.getReg(0), AMDGPU::SGPRRegBank);
MRI.setRegBank(IdxHi.getReg(0), AMDGPU::SGPRRegBank);
SmallSet<Register, 4> OpsToWaterfall;
if (!collectWaterfallOperands(OpsToWaterfall, MI, MRI, { 2 })) {
MI.eraseFromParent();
return;
}
B.setInstr(*Span.begin());
MI.eraseFromParent();
executeInWaterfallLoop(B, make_range(Span.begin(), Span.end()),
OpsToWaterfall, MRI);
if (NeedCopyToVGPR) {
MachineBasicBlock *LoopBB = Extract1->getParent();
Register TmpReg0 = MRI.createGenericVirtualRegister(S32);
Register TmpReg1 = MRI.createGenericVirtualRegister(S32);
MRI.setRegBank(TmpReg0, AMDGPU::SGPRRegBank);
MRI.setRegBank(TmpReg1, AMDGPU::SGPRRegBank);
Extract0->getOperand(0).setReg(TmpReg0);
Extract1->getOperand(0).setReg(TmpReg1);
B.setInsertPt(*LoopBB, ++Extract1->getIterator());
buildVCopy(B, DstRegs[0], TmpReg0);
buildVCopy(B, DstRegs[1], TmpReg1);
}
if (ShouldMoveIndexIntoLoop)
reinsertVectorIndexAdd(B, *IdxLo, 1, ConstOffset);
return;
}
case AMDGPU::G_INSERT_VECTOR_ELT: {
SmallVector<Register, 2> InsRegs(OpdMapper.getVRegs(2));
Register DstReg = MI.getOperand(0).getReg();
LLT VecTy = MRI.getType(DstReg);
assert(OpdMapper.getVRegs(0).empty());
assert(OpdMapper.getVRegs(3).empty());
if (substituteSimpleCopyRegs(OpdMapper, 1))
MRI.setType(MI.getOperand(1).getReg(), VecTy);
if (foldInsertEltToCmpSelect(MI, MRI, OpdMapper))
return;
const RegisterBank *IdxBank =
OpdMapper.getInstrMapping().getOperandMapping(3).BreakDown[0].RegBank;
Register SrcReg = MI.getOperand(1).getReg();
Register InsReg = MI.getOperand(2).getReg();
LLT InsTy = MRI.getType(InsReg);
(void)InsTy;
Register BaseIdxReg;
unsigned ConstOffset;
std::tie(BaseIdxReg, ConstOffset) =
AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(3).getReg());
bool ShouldMoveIndexIntoLoop = IdxBank != &AMDGPU::SGPRRegBank &&
ConstOffset > 0 &&
ConstOffset < VecTy.getNumElements();
if (ShouldMoveIndexIntoLoop)
MI.getOperand(3).setReg(BaseIdxReg);
if (InsRegs.empty()) {
executeInWaterfallLoop(MI, MRI, { 3 });
if (ShouldMoveIndexIntoLoop) {
MachineIRBuilder B(MI);
reinsertVectorIndexAdd(B, MI, 3, ConstOffset);
}
return;
}
assert(InsTy.getSizeInBits() == 64);
const LLT S32 = LLT::scalar(32);
LLT Vec32 = LLT::fixed_vector(2 * VecTy.getNumElements(), 32);
MachineIRBuilder B(MI);
auto CastSrc = B.buildBitcast(Vec32, SrcReg);
auto One = B.buildConstant(S32, 1);
MachineInstrSpan Span(MachineBasicBlock::iterator(&MI), &B.getMBB());
auto IdxLo = B.buildShl(S32, BaseIdxReg, One);
auto IdxHi = B.buildAdd(S32, IdxLo, One);
auto InsLo = B.buildInsertVectorElement(Vec32, CastSrc, InsRegs[0], IdxLo);
auto InsHi = B.buildInsertVectorElement(Vec32, InsLo, InsRegs[1], IdxHi);
const RegisterBank *DstBank =
OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
const RegisterBank *SrcBank =
OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
const RegisterBank *InsSrcBank =
OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank;
MRI.setRegBank(InsReg, *InsSrcBank);
MRI.setRegBank(CastSrc.getReg(0), *SrcBank);
MRI.setRegBank(InsLo.getReg(0), *DstBank);
MRI.setRegBank(InsHi.getReg(0), *DstBank);
MRI.setRegBank(One.getReg(0), AMDGPU::SGPRRegBank);
MRI.setRegBank(IdxLo.getReg(0), AMDGPU::SGPRRegBank);
MRI.setRegBank(IdxHi.getReg(0), AMDGPU::SGPRRegBank);
SmallSet<Register, 4> OpsToWaterfall;
if (!collectWaterfallOperands(OpsToWaterfall, MI, MRI, { 3 })) {
B.setInsertPt(B.getMBB(), MI);
B.buildBitcast(DstReg, InsHi);
MI.eraseFromParent();
return;
}
B.setInstr(*Span.begin());
MI.eraseFromParent();
executeInWaterfallLoop(B, make_range(Span.begin(), Span.end()),
OpsToWaterfall, MRI);
B.buildBitcast(DstReg, InsHi);
if (ShouldMoveIndexIntoLoop)
reinsertVectorIndexAdd(B, *IdxLo, 1, ConstOffset);
return;
}
case AMDGPU::G_AMDGPU_BUFFER_LOAD:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16:
case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT:
case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT_D16:
case AMDGPU::G_AMDGPU_BUFFER_STORE:
case AMDGPU::G_AMDGPU_BUFFER_STORE_BYTE:
case AMDGPU::G_AMDGPU_BUFFER_STORE_SHORT:
case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT:
case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT_D16:
case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT:
case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT_D16: {
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, {1, 4});
return;
}
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC: {
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, {2, 5});
return;
}
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX: {
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, {2, 5});
return;
}
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP: {
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, {3, 6});
return;
}
case AMDGPU::G_AMDGPU_S_BUFFER_LOAD: {
applyMappingSBufferLoad(OpdMapper);
return;
}
case AMDGPU::G_INTRINSIC: {
switch (MI.getIntrinsicID()) {
case Intrinsic::amdgcn_readlane: {
substituteSimpleCopyRegs(OpdMapper, 2);
assert(OpdMapper.getVRegs(0).empty());
assert(OpdMapper.getVRegs(3).empty());
constrainOpWithReadfirstlane(MI, MRI, 3); return;
}
case Intrinsic::amdgcn_writelane: {
assert(OpdMapper.getVRegs(0).empty());
assert(OpdMapper.getVRegs(2).empty());
assert(OpdMapper.getVRegs(3).empty());
substituteSimpleCopyRegs(OpdMapper, 4); constrainOpWithReadfirstlane(MI, MRI, 2); constrainOpWithReadfirstlane(MI, MRI, 3); return;
}
case Intrinsic::amdgcn_interp_p1:
case Intrinsic::amdgcn_interp_p2:
case Intrinsic::amdgcn_interp_mov:
case Intrinsic::amdgcn_interp_p1_f16:
case Intrinsic::amdgcn_interp_p2_f16:
case Intrinsic::amdgcn_lds_param_load: {
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(MI, MRI, MI.getNumOperands() - 1); return;
}
case Intrinsic::amdgcn_interp_inreg_p10:
case Intrinsic::amdgcn_interp_inreg_p2:
case Intrinsic::amdgcn_interp_inreg_p10_f16:
case Intrinsic::amdgcn_interp_inreg_p2_f16:
applyDefaultMapping(OpdMapper);
return;
case Intrinsic::amdgcn_permlane16:
case Intrinsic::amdgcn_permlanex16: {
substituteSimpleCopyRegs(OpdMapper, 2);
substituteSimpleCopyRegs(OpdMapper, 3);
constrainOpWithReadfirstlane(MI, MRI, 4);
constrainOpWithReadfirstlane(MI, MRI, 5);
return;
}
case Intrinsic::amdgcn_sbfe:
applyMappingBFE(OpdMapper, true);
return;
case Intrinsic::amdgcn_ubfe:
applyMappingBFE(OpdMapper, false);
return;
case Intrinsic::amdgcn_ballot:
break;
}
break;
}
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
const AMDGPU::RsrcIntrinsic *RSrcIntrin
= AMDGPU::lookupRsrcIntrinsic(MI.getIntrinsicID());
assert(RSrcIntrin && RSrcIntrin->IsImage);
applyMappingImage(MI, OpdMapper, MRI, RSrcIntrin->RsrcArg);
return;
}
case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: {
unsigned N = MI.getNumExplicitOperands() - 2;
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, { N });
return;
}
case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
auto IntrID = MI.getIntrinsicID();
switch (IntrID) {
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap: {
assert(OpdMapper.getVRegs(0).empty());
substituteSimpleCopyRegs(OpdMapper, 3);
constrainOpWithReadfirstlane(MI, MRI, 2); return;
}
case Intrinsic::amdgcn_ds_gws_init:
case Intrinsic::amdgcn_ds_gws_barrier:
case Intrinsic::amdgcn_ds_gws_sema_br: {
substituteSimpleCopyRegs(OpdMapper, 1);
constrainOpWithReadfirstlane(MI, MRI, 2); return;
}
case Intrinsic::amdgcn_ds_gws_sema_v:
case Intrinsic::amdgcn_ds_gws_sema_p:
case Intrinsic::amdgcn_ds_gws_sema_release_all: {
constrainOpWithReadfirstlane(MI, MRI, 1); return;
}
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume: {
constrainOpWithReadfirstlane(MI, MRI, 2); return;
}
case Intrinsic::amdgcn_s_sendmsg:
case Intrinsic::amdgcn_s_sendmsghalt: {
constrainOpWithReadfirstlane(MI, MRI, 2); return;
}
case Intrinsic::amdgcn_s_setreg: {
constrainOpWithReadfirstlane(MI, MRI, 2);
return;
}
case Intrinsic::amdgcn_raw_buffer_load_lds: {
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(MI, MRI, 1); constrainOpWithReadfirstlane(MI, MRI, 2); constrainOpWithReadfirstlane(MI, MRI, 5); return;
}
case Intrinsic::amdgcn_struct_buffer_load_lds: {
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(MI, MRI, 1); constrainOpWithReadfirstlane(MI, MRI, 2); constrainOpWithReadfirstlane(MI, MRI, 6); return;
}
case Intrinsic::amdgcn_global_load_lds: {
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(MI, MRI, 2);
return;
}
case Intrinsic::amdgcn_lds_direct_load: {
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(MI, MRI, MI.getNumOperands() - 1); return;
}
case Intrinsic::amdgcn_exp_row:
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(MI, MRI, 8); return;
default: {
if (const AMDGPU::RsrcIntrinsic *RSrcIntrin =
AMDGPU::lookupRsrcIntrinsic(IntrID)) {
if (RSrcIntrin->IsImage) {
applyMappingImage(MI, OpdMapper, MRI, RSrcIntrin->RsrcArg);
return;
}
}
break;
}
}
break;
}
case AMDGPU::G_SI_CALL: {
SmallSet<Register, 4> SGPROperandRegs;
if (!collectWaterfallOperands(SGPROperandRegs, MI, MRI, {1}))
break;
unsigned FrameSetupOpcode = AMDGPU::ADJCALLSTACKUP;
unsigned FrameDestroyOpcode = AMDGPU::ADJCALLSTACKDOWN;
SmallVector<MachineInstr *, 4> NonCopyInstrs;
unsigned NonCopyInstrsLen = 0;
MachineBasicBlock::iterator Start(&MI);
MachineBasicBlock::iterator LastCopy = Start;
MachineBasicBlock *MBB = MI.getParent();
const SIMachineFunctionInfo *Info =
MBB->getParent()->getInfo<SIMachineFunctionInfo>();
while (Start->getOpcode() != FrameSetupOpcode) {
--Start;
bool IsCopy = false;
if (Start->getOpcode() == AMDGPU::COPY) {
auto &Dst = Start->getOperand(0);
if (Dst.isReg()) {
Register Reg = Dst.getReg();
if (Reg.isPhysical() && MI.readsRegister(Reg, TRI)) {
IsCopy = true;
} else {
auto &Src = Start->getOperand(1);
if (Src.isReg()) {
Reg = Src.getReg();
IsCopy = Info->getScratchRSrcReg() == Reg;
}
}
}
}
if (IsCopy) {
LastCopy = Start;
NonCopyInstrsLen = NonCopyInstrs.size();
} else {
NonCopyInstrs.push_back(&*Start);
}
}
NonCopyInstrs.resize(NonCopyInstrsLen);
for (auto *NonCopy : reverse(NonCopyInstrs)) {
MBB->splice(LastCopy, MBB, NonCopy->getIterator());
}
Start = LastCopy;
NonCopyInstrs.clear();
NonCopyInstrsLen = 0;
MachineBasicBlock::iterator End(&MI);
LastCopy = End;
while (End->getOpcode() != FrameDestroyOpcode) {
++End;
bool IsCopy = false;
if (End->getOpcode() == AMDGPU::COPY) {
auto &Src = End->getOperand(1);
if (Src.isReg()) {
Register Reg = Src.getReg();
IsCopy = Reg.isPhysical() && MI.modifiesRegister(Reg, TRI);
}
}
if (IsCopy) {
LastCopy = End;
NonCopyInstrsLen = NonCopyInstrs.size();
} else {
NonCopyInstrs.push_back(&*End);
}
}
NonCopyInstrs.resize(NonCopyInstrsLen);
End = LastCopy;
++LastCopy;
for (auto *NonCopy : reverse(NonCopyInstrs)) {
MBB->splice(LastCopy, MBB, NonCopy->getIterator());
}
++End;
MachineIRBuilder B(*Start);
executeInWaterfallLoop(B, make_range(Start, End), SGPROperandRegs, MRI);
break;
}
case AMDGPU::G_LOAD:
case AMDGPU::G_ZEXTLOAD:
case AMDGPU::G_SEXTLOAD: {
if (applyMappingLoad(MI, OpdMapper, MRI))
return;
break;
}
case AMDGPU::G_DYN_STACKALLOC:
applyMappingDynStackAlloc(MI, OpdMapper, MRI);
return;
case AMDGPU::G_SBFX:
applyMappingBFE(OpdMapper, true);
return;
case AMDGPU::G_UBFX:
applyMappingBFE(OpdMapper, false);
return;
case AMDGPU::G_AMDGPU_MAD_U64_U32:
case AMDGPU::G_AMDGPU_MAD_I64_I32:
applyMappingMAD_64_32(OpdMapper);
return;
default:
break;
}
return applyDefaultMapping(OpdMapper);
}
static unsigned regBankUnion(unsigned RB0, unsigned RB1) {
if (RB0 == AMDGPU::InvalidRegBankID)
return RB1;
if (RB1 == AMDGPU::InvalidRegBankID)
return RB0;
if (RB0 == AMDGPU::SGPRRegBankID && RB1 == AMDGPU::SGPRRegBankID)
return AMDGPU::SGPRRegBankID;
if (RB0 == AMDGPU::AGPRRegBankID && RB1 == AMDGPU::AGPRRegBankID)
return AMDGPU::AGPRRegBankID;
return AMDGPU::VGPRRegBankID;
}
static unsigned regBankBoolUnion(unsigned RB0, unsigned RB1) {
if (RB0 == AMDGPU::InvalidRegBankID)
return RB1;
if (RB1 == AMDGPU::InvalidRegBankID)
return RB0;
if (RB0 == AMDGPU::VCCRegBankID || RB1 == AMDGPU::VCCRegBankID)
return AMDGPU::VCCRegBankID;
return regBankUnion(RB0, RB1);
}
unsigned AMDGPURegisterBankInfo::getMappingType(const MachineRegisterInfo &MRI,
const MachineInstr &MI) const {
unsigned RegBank = AMDGPU::InvalidRegBankID;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
Register Reg = MO.getReg();
if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) {
RegBank = regBankUnion(RegBank, Bank->getID());
if (RegBank == AMDGPU::VGPRRegBankID)
break;
}
}
return RegBank;
}
bool AMDGPURegisterBankInfo::isSALUMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
Register Reg = MO.getReg();
if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) {
if (Bank->getID() != AMDGPU::SGPRRegBankID)
return false;
}
}
return true;
}
const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getDefaultMappingSOP(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &SrcOp = MI.getOperand(i);
if (!SrcOp.isReg())
continue;
unsigned Size = getSizeInBits(SrcOp.getReg(), MRI, *TRI);
OpdsMapping[i] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
}
return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
MI.getNumOperands());
}
const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getDefaultMappingVOP(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &Src = MI.getOperand(i);
if (!Src.isReg())
continue;
unsigned Size = getSizeInBits(Src.getReg(), MRI, *TRI);
unsigned BankID = Size == 1 ? AMDGPU::VCCRegBankID : AMDGPU::VGPRRegBankID;
OpdsMapping[i] = AMDGPU::getValueMapping(BankID, Size);
}
return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
MI.getNumOperands());
}
const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getDefaultMappingAllVGPR(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
const MachineOperand &Op = MI.getOperand(I);
if (!Op.isReg())
continue;
unsigned Size = getSizeInBits(Op.getReg(), MRI, *TRI);
OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
}
return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
MI.getNumOperands());
}
const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getImageMapping(const MachineRegisterInfo &MRI,
const MachineInstr &MI,
int RsrcIdx) const {
RsrcIdx += MI.getNumExplicitDefs() + 1;
const int NumOps = MI.getNumOperands();
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOps);
for (int I = 0; I != NumOps; ++I) {
if (!MI.getOperand(I).isReg())
continue;
Register OpReg = MI.getOperand(I).getReg();
if (!OpReg)
continue;
unsigned Size = getSizeInBits(OpReg, MRI, *TRI);
const bool MustBeSGPR = I == RsrcIdx || I == RsrcIdx + 1;
if (MustBeSGPR) {
unsigned NewBank = getRegBankID(OpReg, MRI, AMDGPU::SGPRRegBankID);
OpdsMapping[I] = AMDGPU::getValueMapping(NewBank, Size);
} else {
OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
}
}
return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping), NumOps);
}
const RegisterBankInfo::ValueMapping *
AMDGPURegisterBankInfo::getValueMappingForPtr(const MachineRegisterInfo &MRI,
Register PtrReg) const {
LLT PtrTy = MRI.getType(PtrReg);
unsigned Size = PtrTy.getSizeInBits();
if (Subtarget.useFlatForGlobal() ||
!AMDGPU::isFlatGlobalAddrSpace(PtrTy.getAddressSpace()))
return AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
const RegisterBank *PtrBank = getRegBank(PtrReg, MRI, *TRI);
return AMDGPU::getValueMapping(PtrBank->getID(), Size);
}
const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
SmallVector<const ValueMapping*, 2> OpdsMapping(2);
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
Register PtrReg = MI.getOperand(1).getReg();
LLT PtrTy = MRI.getType(PtrReg);
unsigned AS = PtrTy.getAddressSpace();
unsigned PtrSize = PtrTy.getSizeInBits();
const ValueMapping *ValMapping;
const ValueMapping *PtrMapping;
const RegisterBank *PtrBank = getRegBank(PtrReg, MRI, *TRI);
if (PtrBank == &AMDGPU::SGPRRegBank && AMDGPU::isFlatGlobalAddrSpace(AS)) {
if (isScalarLoadLegal(MI)) {
ValMapping = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
PtrMapping = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, PtrSize);
} else {
ValMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
unsigned PtrBankID = Subtarget.useFlatForGlobal() ?
AMDGPU::VGPRRegBankID : AMDGPU::SGPRRegBankID;
PtrMapping = AMDGPU::getValueMapping(PtrBankID, PtrSize);
}
} else {
ValMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
PtrMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize);
}
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands());
return Mapping;
}
unsigned
AMDGPURegisterBankInfo::getRegBankID(Register Reg,
const MachineRegisterInfo &MRI,
unsigned Default) const {
const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI);
return Bank ? Bank->getID() : Default;
}
const RegisterBankInfo::ValueMapping *
AMDGPURegisterBankInfo::getSGPROpMapping(Register Reg,
const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {
unsigned Bank = getRegBankID(Reg, MRI, AMDGPU::SGPRRegBankID);
unsigned Size = getSizeInBits(Reg, MRI, TRI);
return AMDGPU::getValueMapping(Bank, Size);
}
const RegisterBankInfo::ValueMapping *
AMDGPURegisterBankInfo::getVGPROpMapping(Register Reg,
const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {
unsigned Size = getSizeInBits(Reg, MRI, TRI);
return AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
}
const RegisterBankInfo::ValueMapping *
AMDGPURegisterBankInfo::getAGPROpMapping(Register Reg,
const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {
unsigned Size = getSizeInBits(Reg, MRI, TRI);
return AMDGPU::getValueMapping(AMDGPU::AGPRRegBankID, Size);
}
const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (MI.isCopy() || MI.getOpcode() == AMDGPU::G_FREEZE) {
const RegisterBank *DstBank = getRegBank(MI.getOperand(0).getReg(), MRI,
*TRI);
const RegisterBank *SrcBank = getRegBank(MI.getOperand(1).getReg(), MRI,
*TRI);
assert(SrcBank && "src bank should have been assigned already");
if (!DstBank)
DstBank = SrcBank;
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
if (cannotCopy(*DstBank, *SrcBank, Size))
return getInvalidInstructionMapping();
const ValueMapping &ValMap = getValueMapping(0, Size, *DstBank);
unsigned OpdsMappingSize = MI.isCopy() ? 1 : 2;
SmallVector<const ValueMapping *, 1> OpdsMapping(OpdsMappingSize);
OpdsMapping[0] = &ValMap;
if (MI.getOpcode() == AMDGPU::G_FREEZE)
OpdsMapping[1] = &ValMap;
return getInstructionMapping(
1, 1,
getOperandsMapping(OpdsMapping), OpdsMappingSize);
}
if (MI.isRegSequence()) {
unsigned BankID = AMDGPU::SGPRRegBankID;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
auto OpBank = getRegBankID(MI.getOperand(I).getReg(), MRI);
if (OpBank != AMDGPU::SGPRRegBankID) {
BankID = AMDGPU::VGPRRegBankID;
break;
}
}
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
const ValueMapping &ValMap = getValueMapping(0, Size, getRegBank(BankID));
return getInstructionMapping(
1, 1,
getOperandsMapping({&ValMap}), 1);
}
if (MI.getOpcode() == TargetOpcode::G_PHI) {
unsigned ResultBank = AMDGPU::InvalidRegBankID;
Register DstReg = MI.getOperand(0).getReg();
if (const RegisterBank *DstBank = getRegBank(DstReg, MRI, *TRI))
ResultBank = DstBank->getID();
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
Register Reg = MI.getOperand(I).getReg();
const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI);
if (!Bank || Bank->getID() == AMDGPU::VGPRRegBankID) {
ResultBank = AMDGPU::VGPRRegBankID;
break;
}
unsigned OpBank = Bank->getID();
ResultBank = regBankBoolUnion(ResultBank, OpBank);
}
assert(ResultBank != AMDGPU::InvalidRegBankID);
unsigned Size = MRI.getType(DstReg).getSizeInBits();
const ValueMapping &ValMap =
getValueMapping(0, Size, getRegBank(ResultBank));
return getInstructionMapping(
1, 1,
getOperandsMapping({&ValMap}), 1);
}
const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
switch (MI.getOpcode()) {
default:
return getInvalidInstructionMapping();
case AMDGPU::G_AND:
case AMDGPU::G_OR:
case AMDGPU::G_XOR: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
if (Size == 1) {
const RegisterBank *DstBank
= getRegBank(MI.getOperand(0).getReg(), MRI, *TRI);
unsigned TargetBankID = AMDGPU::InvalidRegBankID;
unsigned BankLHS = AMDGPU::InvalidRegBankID;
unsigned BankRHS = AMDGPU::InvalidRegBankID;
if (DstBank) {
TargetBankID = DstBank->getID();
if (DstBank == &AMDGPU::VCCRegBank) {
TargetBankID = AMDGPU::VCCRegBankID;
BankLHS = AMDGPU::VCCRegBankID;
BankRHS = AMDGPU::VCCRegBankID;
} else {
BankLHS = getRegBankID(MI.getOperand(1).getReg(), MRI,
AMDGPU::SGPRRegBankID);
BankRHS = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::SGPRRegBankID);
}
} else {
BankLHS = getRegBankID(MI.getOperand(1).getReg(), MRI,
AMDGPU::VCCRegBankID);
BankRHS = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::VCCRegBankID);
if (BankLHS == AMDGPU::VGPRRegBankID || BankRHS == AMDGPU::VGPRRegBankID) {
TargetBankID = AMDGPU::VGPRRegBankID;
} else if (BankLHS == AMDGPU::VCCRegBankID || BankRHS == AMDGPU::VCCRegBankID) {
TargetBankID = AMDGPU::VCCRegBankID;
BankLHS = AMDGPU::VCCRegBankID;
BankRHS = AMDGPU::VCCRegBankID;
} else if (BankLHS == AMDGPU::SGPRRegBankID && BankRHS == AMDGPU::SGPRRegBankID) {
TargetBankID = AMDGPU::SGPRRegBankID;
}
}
OpdsMapping[0] = AMDGPU::getValueMapping(TargetBankID, Size);
OpdsMapping[1] = AMDGPU::getValueMapping(BankLHS, Size);
OpdsMapping[2] = AMDGPU::getValueMapping(BankRHS, Size);
break;
}
if (Size == 64) {
if (isSALUMapping(MI)) {
OpdsMapping[0] = getValueMappingSGPR64Only(AMDGPU::SGPRRegBankID, Size);
OpdsMapping[1] = OpdsMapping[2] = OpdsMapping[0];
} else {
OpdsMapping[0] = getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size);
unsigned Bank1 = getRegBankID(MI.getOperand(1).getReg(), MRI );
OpdsMapping[1] = AMDGPU::getValueMapping(Bank1, Size);
unsigned Bank2 = getRegBankID(MI.getOperand(2).getReg(), MRI );
OpdsMapping[2] = AMDGPU::getValueMapping(Bank2, Size);
}
break;
}
LLVM_FALLTHROUGH;
}
case AMDGPU::G_PTR_ADD:
case AMDGPU::G_PTRMASK:
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
case AMDGPU::G_MUL:
case AMDGPU::G_SHL:
case AMDGPU::G_LSHR:
case AMDGPU::G_ASHR:
case AMDGPU::G_UADDO:
case AMDGPU::G_USUBO:
case AMDGPU::G_UADDE:
case AMDGPU::G_SADDE:
case AMDGPU::G_USUBE:
case AMDGPU::G_SSUBE:
case AMDGPU::G_SMIN:
case AMDGPU::G_SMAX:
case AMDGPU::G_UMIN:
case AMDGPU::G_UMAX:
case AMDGPU::G_ABS:
case AMDGPU::G_SHUFFLE_VECTOR:
case AMDGPU::G_SBFX:
case AMDGPU::G_UBFX:
if (isSALUMapping(MI))
return getDefaultMappingSOP(MI);
LLVM_FALLTHROUGH;
case AMDGPU::G_SADDSAT: case AMDGPU::G_SSUBSAT:
case AMDGPU::G_UADDSAT:
case AMDGPU::G_USUBSAT:
case AMDGPU::G_FADD:
case AMDGPU::G_FSUB:
case AMDGPU::G_FPTOSI:
case AMDGPU::G_FPTOUI:
case AMDGPU::G_FMUL:
case AMDGPU::G_FMA:
case AMDGPU::G_FMAD:
case AMDGPU::G_FSQRT:
case AMDGPU::G_FFLOOR:
case AMDGPU::G_FCEIL:
case AMDGPU::G_FRINT:
case AMDGPU::G_SITOFP:
case AMDGPU::G_UITOFP:
case AMDGPU::G_FPTRUNC:
case AMDGPU::G_FPEXT:
case AMDGPU::G_FEXP2:
case AMDGPU::G_FLOG2:
case AMDGPU::G_FMINNUM:
case AMDGPU::G_FMAXNUM:
case AMDGPU::G_FMINNUM_IEEE:
case AMDGPU::G_FMAXNUM_IEEE:
case AMDGPU::G_FCANONICALIZE:
case AMDGPU::G_INTRINSIC_TRUNC:
case AMDGPU::G_BSWAP: case AMDGPU::G_FSHR: case AMDGPU::G_AMDGPU_FMIN_LEGACY:
case AMDGPU::G_AMDGPU_FMAX_LEGACY:
case AMDGPU::G_AMDGPU_RCP_IFLAG:
case AMDGPU::G_AMDGPU_CVT_F32_UBYTE0:
case AMDGPU::G_AMDGPU_CVT_F32_UBYTE1:
case AMDGPU::G_AMDGPU_CVT_F32_UBYTE2:
case AMDGPU::G_AMDGPU_CVT_F32_UBYTE3:
case AMDGPU::G_AMDGPU_CVT_PK_I16_I32:
case AMDGPU::G_AMDGPU_SMED3:
return getDefaultMappingVOP(MI);
case AMDGPU::G_UMULH:
case AMDGPU::G_SMULH: {
if (Subtarget.hasScalarMulHiInsts() && isSALUMapping(MI))
return getDefaultMappingSOP(MI);
return getDefaultMappingVOP(MI);
}
case AMDGPU::G_AMDGPU_MAD_U64_U32:
case AMDGPU::G_AMDGPU_MAD_I64_I32: {
bool AllSalu = true;
bool MulSalu = true;
for (unsigned i = 0; i < 5; ++i) {
Register Reg = MI.getOperand(i).getReg();
if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) {
if (Bank->getID() != AMDGPU::SGPRRegBankID) {
AllSalu = false;
if (i == 2 || i == 3) {
MulSalu = false;
break;
}
}
}
}
if (AllSalu)
return getDefaultMappingSOP(MI);
if (!MulSalu || Subtarget.hasFullRate64Ops())
return getDefaultMappingVOP(MI);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64);
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64);
break;
}
case AMDGPU::G_IMPLICIT_DEF: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case AMDGPU::G_FCONSTANT:
case AMDGPU::G_CONSTANT:
case AMDGPU::G_GLOBAL_VALUE:
case AMDGPU::G_BLOCK_ADDR:
case AMDGPU::G_READCYCLECOUNTER: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case AMDGPU::G_FRAME_INDEX: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
break;
}
case AMDGPU::G_DYN_STACKALLOC: {
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32);
unsigned SrcBankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
OpdsMapping[1] = AMDGPU::getValueMapping(SrcBankID, 32);
break;
}
case AMDGPU::G_AMDGPU_WAVE_ADDRESS: {
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32);
break;
}
case AMDGPU::G_INSERT: {
unsigned BankID = getMappingType(MRI, MI);
unsigned DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
unsigned SrcSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
unsigned EltSize = getSizeInBits(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[0] = AMDGPU::getValueMapping(BankID, DstSize);
OpdsMapping[1] = AMDGPU::getValueMapping(BankID, SrcSize);
OpdsMapping[2] = AMDGPU::getValueMapping(BankID, EltSize);
OpdsMapping[3] = nullptr;
break;
}
case AMDGPU::G_EXTRACT: {
unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
unsigned DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
unsigned SrcSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[0] = AMDGPU::getValueMapping(BankID, DstSize);
OpdsMapping[1] = AMDGPU::getValueMapping(BankID, SrcSize);
OpdsMapping[2] = nullptr;
break;
}
case AMDGPU::G_BUILD_VECTOR:
case AMDGPU::G_BUILD_VECTOR_TRUNC: {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
if (DstTy == LLT::fixed_vector(2, 16)) {
unsigned DstSize = DstTy.getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
unsigned Src0BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
unsigned Src1BankID = getRegBankID(MI.getOperand(2).getReg(), MRI);
unsigned DstBankID = regBankUnion(Src0BankID, Src1BankID);
OpdsMapping[0] = AMDGPU::getValueMapping(DstBankID, DstSize);
OpdsMapping[1] = AMDGPU::getValueMapping(Src0BankID, SrcSize);
OpdsMapping[2] = AMDGPU::getValueMapping(Src1BankID, SrcSize);
break;
}
LLVM_FALLTHROUGH;
}
case AMDGPU::G_MERGE_VALUES:
case AMDGPU::G_CONCAT_VECTORS: {
unsigned Bank = getMappingType(MRI, MI);
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(Bank, DstSize);
for (unsigned i = 1, e = MI.getNumOperands(); i != e; ++i)
OpdsMapping[i] = AMDGPU::getValueMapping(Bank, SrcSize);
break;
}
case AMDGPU::G_BITREVERSE:
case AMDGPU::G_BITCAST:
case AMDGPU::G_INTTOPTR:
case AMDGPU::G_PTRTOINT:
case AMDGPU::G_FABS:
case AMDGPU::G_FNEG: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
OpdsMapping[0] = OpdsMapping[1] = AMDGPU::getValueMapping(BankID, Size);
break;
}
case AMDGPU::G_AMDGPU_FFBH_U32:
case AMDGPU::G_AMDGPU_FFBL_B32:
case AMDGPU::G_CTLZ_ZERO_UNDEF:
case AMDGPU::G_CTTZ_ZERO_UNDEF: {
unsigned Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
OpdsMapping[0] = AMDGPU::getValueMapping(BankID, 32);
OpdsMapping[1] = AMDGPU::getValueMappingSGPR64Only(BankID, Size);
break;
}
case AMDGPU::G_CTPOP: {
unsigned Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
OpdsMapping[0] = AMDGPU::getValueMapping(BankID, 32);
OpdsMapping[1] = AMDGPU::getValueMapping(BankID, Size);
break;
}
case AMDGPU::G_TRUNC: {
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
unsigned Bank = getRegBankID(Src, MRI);
unsigned DstSize = getSizeInBits(Dst, MRI, *TRI);
unsigned SrcSize = getSizeInBits(Src, MRI, *TRI);
OpdsMapping[0] = AMDGPU::getValueMapping(Bank, DstSize);
OpdsMapping[1] = AMDGPU::getValueMapping(Bank, SrcSize);
break;
}
case AMDGPU::G_ZEXT:
case AMDGPU::G_SEXT:
case AMDGPU::G_ANYEXT:
case AMDGPU::G_SEXT_INREG: {
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
unsigned DstSize = getSizeInBits(Dst, MRI, *TRI);
unsigned SrcSize = getSizeInBits(Src, MRI, *TRI);
unsigned DstBank;
const RegisterBank *SrcBank = getRegBank(Src, MRI, *TRI);
assert(SrcBank);
switch (SrcBank->getID()) {
case AMDGPU::SGPRRegBankID:
DstBank = AMDGPU::SGPRRegBankID;
break;
default:
DstBank = AMDGPU::VGPRRegBankID;
break;
}
OpdsMapping[0] = AMDGPU::getValueMappingSGPR64Only(DstBank, DstSize);
OpdsMapping[1] = AMDGPU::getValueMappingSGPR64Only(SrcBank->getID(),
SrcSize);
break;
}
case AMDGPU::G_FCMP: {
unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
OpdsMapping[1] = nullptr; OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
break;
}
case AMDGPU::G_STORE: {
assert(MI.getOperand(0).isReg());
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
const ValueMapping *ValMapping =
AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg());
break;
}
case AMDGPU::G_ICMP: {
auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
unsigned DstBank = getRegBankID(MI.getOperand(0).getReg(), MRI,
AMDGPU::SGPRRegBankID);
unsigned Op2Bank = getRegBankID(MI.getOperand(2).getReg(), MRI);
unsigned Op3Bank = getRegBankID(MI.getOperand(3).getReg(), MRI);
bool CanUseSCC = DstBank == AMDGPU::SGPRRegBankID &&
Op2Bank == AMDGPU::SGPRRegBankID &&
Op3Bank == AMDGPU::SGPRRegBankID &&
(Size == 32 || (Size == 64 &&
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) &&
Subtarget.hasScalarCompareEq64()));
DstBank = CanUseSCC ? AMDGPU::SGPRRegBankID : AMDGPU::VCCRegBankID;
unsigned SrcBank = CanUseSCC ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID;
const unsigned ResultSize = 1;
OpdsMapping[0] = AMDGPU::getValueMapping(DstBank, ResultSize);
OpdsMapping[2] = AMDGPU::getValueMapping(SrcBank, Size);
OpdsMapping[3] = AMDGPU::getValueMapping(SrcBank, Size);
break;
}
case AMDGPU::G_EXTRACT_VECTOR_ELT: {
unsigned SrcBankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
unsigned IdxSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
unsigned IdxBank = getRegBankID(MI.getOperand(2).getReg(), MRI);
unsigned OutputBankID = regBankUnion(SrcBankID, IdxBank);
OpdsMapping[0] = AMDGPU::getValueMappingSGPR64Only(OutputBankID, DstSize);
OpdsMapping[1] = AMDGPU::getValueMapping(SrcBankID, SrcSize);
OpdsMapping[2] = AMDGPU::getValueMapping(IdxBank, IdxSize);
break;
}
case AMDGPU::G_INSERT_VECTOR_ELT: {
unsigned OutputBankID = isSALUMapping(MI) ?
AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID;
unsigned VecSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned InsertSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
unsigned IdxSize = MRI.getType(MI.getOperand(3).getReg()).getSizeInBits();
unsigned InsertEltBankID = getRegBankID(MI.getOperand(2).getReg(), MRI);
unsigned IdxBankID = getRegBankID(MI.getOperand(3).getReg(), MRI);
OpdsMapping[0] = AMDGPU::getValueMapping(OutputBankID, VecSize);
OpdsMapping[1] = AMDGPU::getValueMapping(OutputBankID, VecSize);
if (InsertSize == 64 && OutputBankID == AMDGPU::VGPRRegBankID) {
OpdsMapping[2] = AMDGPU::getValueMappingSplit64(InsertEltBankID,
InsertSize);
} else {
assert(InsertSize == 32 || InsertSize == 64);
OpdsMapping[2] = AMDGPU::getValueMapping(InsertEltBankID, InsertSize);
}
OpdsMapping[3] = AMDGPU::getValueMapping(IdxBankID, IdxSize);
break;
}
case AMDGPU::G_UNMERGE_VALUES: {
unsigned Bank = getMappingType(MRI, MI);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
unsigned Size = getSizeInBits(MI.getOperand(i).getReg(), MRI, *TRI);
OpdsMapping[i] = AMDGPU::getValueMapping(Bank, Size);
}
break;
}
case AMDGPU::G_AMDGPU_BUFFER_LOAD:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16:
case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT:
case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT_D16:
case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT:
case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT_D16:
case AMDGPU::G_AMDGPU_BUFFER_STORE:
case AMDGPU::G_AMDGPU_BUFFER_STORE_BYTE:
case AMDGPU::G_AMDGPU_BUFFER_STORE_SHORT:
case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT:
case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT_D16: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
break;
}
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN:
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
break;
}
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getSGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
OpdsMapping[6] = getSGPROpMapping(MI.getOperand(6).getReg(), MRI, *TRI);
break;
}
case AMDGPU::G_AMDGPU_S_BUFFER_LOAD: {
OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
unsigned RSrcBank = OpdsMapping[1]->BreakDown[0].RegBank->getID();
unsigned OffsetBank = OpdsMapping[2]->BreakDown[0].RegBank->getID();
unsigned ResultBank = regBankUnion(RSrcBank, OffsetBank);
unsigned Size0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(ResultBank, Size0);
break;
}
case AMDGPU::G_INTRINSIC: {
switch (MI.getIntrinsicID()) {
default:
return getInvalidInstructionMapping();
case Intrinsic::amdgcn_div_fmas:
case Intrinsic::amdgcn_div_fixup:
case Intrinsic::amdgcn_trig_preop:
case Intrinsic::amdgcn_sin:
case Intrinsic::amdgcn_cos:
case Intrinsic::amdgcn_log_clamp:
case Intrinsic::amdgcn_rcp:
case Intrinsic::amdgcn_rcp_legacy:
case Intrinsic::amdgcn_sqrt:
case Intrinsic::amdgcn_rsq:
case Intrinsic::amdgcn_rsq_legacy:
case Intrinsic::amdgcn_rsq_clamp:
case Intrinsic::amdgcn_fmul_legacy:
case Intrinsic::amdgcn_fma_legacy:
case Intrinsic::amdgcn_ldexp:
case Intrinsic::amdgcn_frexp_mant:
case Intrinsic::amdgcn_frexp_exp:
case Intrinsic::amdgcn_fract:
case Intrinsic::amdgcn_cvt_pkrtz:
case Intrinsic::amdgcn_cvt_pknorm_i16:
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16:
case Intrinsic::amdgcn_fmed3:
case Intrinsic::amdgcn_cubeid:
case Intrinsic::amdgcn_cubema:
case Intrinsic::amdgcn_cubesc:
case Intrinsic::amdgcn_cubetc:
case Intrinsic::amdgcn_sffbh:
case Intrinsic::amdgcn_fmad_ftz:
case Intrinsic::amdgcn_mbcnt_lo:
case Intrinsic::amdgcn_mbcnt_hi:
case Intrinsic::amdgcn_mul_u24:
case Intrinsic::amdgcn_mul_i24:
case Intrinsic::amdgcn_mulhi_u24:
case Intrinsic::amdgcn_mulhi_i24:
case Intrinsic::amdgcn_lerp:
case Intrinsic::amdgcn_sad_u8:
case Intrinsic::amdgcn_msad_u8:
case Intrinsic::amdgcn_sad_hi_u8:
case Intrinsic::amdgcn_sad_u16:
case Intrinsic::amdgcn_qsad_pk_u16_u8:
case Intrinsic::amdgcn_mqsad_pk_u16_u8:
case Intrinsic::amdgcn_mqsad_u32_u8:
case Intrinsic::amdgcn_cvt_pk_u8_f32:
case Intrinsic::amdgcn_alignbyte:
case Intrinsic::amdgcn_perm:
case Intrinsic::amdgcn_fdot2:
case Intrinsic::amdgcn_sdot2:
case Intrinsic::amdgcn_udot2:
case Intrinsic::amdgcn_sdot4:
case Intrinsic::amdgcn_udot4:
case Intrinsic::amdgcn_sdot8:
case Intrinsic::amdgcn_udot8:
case Intrinsic::amdgcn_fdot2_bf16_bf16:
case Intrinsic::amdgcn_fdot2_f16_f16:
case Intrinsic::amdgcn_fdot2_f32_bf16:
case Intrinsic::amdgcn_sudot4:
case Intrinsic::amdgcn_sudot8:
case Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16:
case Intrinsic::amdgcn_wmma_f16_16x16x16_f16:
case Intrinsic::amdgcn_wmma_f32_16x16x16_bf16:
case Intrinsic::amdgcn_wmma_f32_16x16x16_f16:
case Intrinsic::amdgcn_wmma_i32_16x16x16_iu4:
case Intrinsic::amdgcn_wmma_i32_16x16x16_iu8:
return getDefaultMappingVOP(MI);
case Intrinsic::amdgcn_sbfe:
case Intrinsic::amdgcn_ubfe:
if (isSALUMapping(MI))
return getDefaultMappingSOP(MI);
return getDefaultMappingVOP(MI);
case Intrinsic::amdgcn_ds_swizzle:
case Intrinsic::amdgcn_ds_permute:
case Intrinsic::amdgcn_ds_bpermute:
case Intrinsic::amdgcn_update_dpp:
case Intrinsic::amdgcn_mov_dpp8:
case Intrinsic::amdgcn_mov_dpp:
case Intrinsic::amdgcn_strict_wwm:
case Intrinsic::amdgcn_wwm:
case Intrinsic::amdgcn_strict_wqm:
case Intrinsic::amdgcn_wqm:
case Intrinsic::amdgcn_softwqm:
case Intrinsic::amdgcn_set_inactive:
case Intrinsic::amdgcn_permlane64:
return getDefaultMappingAllVGPR(MI);
case Intrinsic::amdgcn_kernarg_segment_ptr:
case Intrinsic::amdgcn_s_getpc:
case Intrinsic::amdgcn_groupstaticsize:
case Intrinsic::amdgcn_reloc_constant:
case Intrinsic::returnaddress: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case Intrinsic::amdgcn_wqm_vote: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = OpdsMapping[2]
= AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size);
break;
}
case Intrinsic::amdgcn_ps_live: {
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
break;
}
case Intrinsic::amdgcn_div_scale: {
unsigned Dst0Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned Dst1Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Dst0Size);
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Dst1Size);
unsigned SrcSize = MRI.getType(MI.getOperand(3).getReg()).getSizeInBits();
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize);
break;
}
case Intrinsic::amdgcn_class: {
Register Src0Reg = MI.getOperand(2).getReg();
Register Src1Reg = MI.getOperand(3).getReg();
unsigned Src0Size = MRI.getType(Src0Reg).getSizeInBits();
unsigned Src1Size = MRI.getType(Src1Reg).getSizeInBits();
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, DstSize);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Src0Size);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Src1Size);
break;
}
case Intrinsic::amdgcn_icmp:
case Intrinsic::amdgcn_fcmp: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize);
unsigned OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, OpSize);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, OpSize);
break;
}
case Intrinsic::amdgcn_readlane: {
Register IdxReg = MI.getOperand(3).getReg();
unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits();
unsigned IdxBank = getRegBankID(IdxReg, MRI, AMDGPU::SGPRRegBankID);
OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize);
LLVM_FALLTHROUGH;
}
case Intrinsic::amdgcn_readfirstlane: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize);
break;
}
case Intrinsic::amdgcn_writelane: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
Register SrcReg = MI.getOperand(2).getReg();
unsigned SrcSize = MRI.getType(SrcReg).getSizeInBits();
unsigned SrcBank = getRegBankID(SrcReg, MRI, AMDGPU::SGPRRegBankID);
Register IdxReg = MI.getOperand(3).getReg();
unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits();
unsigned IdxBank = getRegBankID(IdxReg, MRI, AMDGPU::SGPRRegBankID);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
OpdsMapping[2] = AMDGPU::getValueMapping(SrcBank, SrcSize);
OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize);
break;
}
case Intrinsic::amdgcn_if_break: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case Intrinsic::amdgcn_permlane16:
case Intrinsic::amdgcn_permlanex16: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
OpdsMapping[4] = getSGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[5] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_mfma_f32_4x4x1f32:
case Intrinsic::amdgcn_mfma_f32_4x4x4f16:
case Intrinsic::amdgcn_mfma_i32_4x4x4i8:
case Intrinsic::amdgcn_mfma_f32_4x4x2bf16:
case Intrinsic::amdgcn_mfma_f32_16x16x1f32:
case Intrinsic::amdgcn_mfma_f32_16x16x4f32:
case Intrinsic::amdgcn_mfma_f32_16x16x4f16:
case Intrinsic::amdgcn_mfma_f32_16x16x16f16:
case Intrinsic::amdgcn_mfma_i32_16x16x4i8:
case Intrinsic::amdgcn_mfma_i32_16x16x16i8:
case Intrinsic::amdgcn_mfma_f32_16x16x2bf16:
case Intrinsic::amdgcn_mfma_f32_16x16x8bf16:
case Intrinsic::amdgcn_mfma_f32_32x32x1f32:
case Intrinsic::amdgcn_mfma_f32_32x32x2f32:
case Intrinsic::amdgcn_mfma_f32_32x32x4f16:
case Intrinsic::amdgcn_mfma_f32_32x32x8f16:
case Intrinsic::amdgcn_mfma_i32_32x32x4i8:
case Intrinsic::amdgcn_mfma_i32_32x32x8i8:
case Intrinsic::amdgcn_mfma_f32_32x32x2bf16:
case Intrinsic::amdgcn_mfma_f32_32x32x4bf16:
case Intrinsic::amdgcn_mfma_f32_32x32x4bf16_1k:
case Intrinsic::amdgcn_mfma_f32_16x16x4bf16_1k:
case Intrinsic::amdgcn_mfma_f32_4x4x4bf16_1k:
case Intrinsic::amdgcn_mfma_f32_32x32x8bf16_1k:
case Intrinsic::amdgcn_mfma_f32_16x16x16bf16_1k:
case Intrinsic::amdgcn_mfma_f64_16x16x4f64:
case Intrinsic::amdgcn_mfma_f64_4x4x4f64:
case Intrinsic::amdgcn_mfma_i32_16x16x32_i8:
case Intrinsic::amdgcn_mfma_i32_32x32x16_i8:
case Intrinsic::amdgcn_mfma_f32_16x16x8_xf32:
case Intrinsic::amdgcn_mfma_f32_32x32x4_xf32:
case Intrinsic::amdgcn_mfma_f32_16x16x32_bf8_bf8:
case Intrinsic::amdgcn_mfma_f32_16x16x32_bf8_fp8:
case Intrinsic::amdgcn_mfma_f32_16x16x32_fp8_bf8:
case Intrinsic::amdgcn_mfma_f32_16x16x32_fp8_fp8:
case Intrinsic::amdgcn_mfma_f32_32x32x16_bf8_bf8:
case Intrinsic::amdgcn_mfma_f32_32x32x16_bf8_fp8:
case Intrinsic::amdgcn_mfma_f32_32x32x16_fp8_bf8:
case Intrinsic::amdgcn_mfma_f32_32x32x16_fp8_fp8: {
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
OpdsMapping[0] =
Info->mayNeedAGPRs()
? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI)
: getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] =
Info->mayNeedAGPRs()
? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI)
: getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8: {
OpdsMapping[0] = getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_interp_p1:
case Intrinsic::amdgcn_interp_p2:
case Intrinsic::amdgcn_interp_mov:
case Intrinsic::amdgcn_interp_p1_f16:
case Intrinsic::amdgcn_interp_p2_f16:
case Intrinsic::amdgcn_lds_param_load: {
const int M0Idx = MI.getNumOperands() - 1;
Register M0Reg = MI.getOperand(M0Idx).getReg();
unsigned M0Bank = getRegBankID(M0Reg, MRI, AMDGPU::SGPRRegBankID);
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
for (int I = 2; I != M0Idx && MI.getOperand(I).isReg(); ++I)
OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[M0Idx] = AMDGPU::getValueMapping(M0Bank, 32);
break;
}
case Intrinsic::amdgcn_interp_inreg_p10:
case Intrinsic::amdgcn_interp_inreg_p2:
case Intrinsic::amdgcn_interp_inreg_p10_f16:
case Intrinsic::amdgcn_interp_inreg_p2_f16: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
break;
}
case Intrinsic::amdgcn_ballot: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, SrcSize);
break;
}
}
break;
}
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
auto IntrID = MI.getIntrinsicID();
const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(IntrID);
assert(RSrcIntrin && "missing RsrcIntrinsic for image intrinsic");
assert(RSrcIntrin->IsImage);
return getImageMapping(MRI, MI, RSrcIntrin->RsrcArg);
}
case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: {
unsigned N = MI.getNumExplicitOperands() - 2;
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 128);
OpdsMapping[N] = getSGPROpMapping(MI.getOperand(N).getReg(), MRI, *TRI);
if (N == 3) {
unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
if (Size > 256)
Size = 512;
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
} else {
for (unsigned I = 2; I < N; ++I) {
unsigned Size = MRI.getType(MI.getOperand(I).getReg()).getSizeInBits();
OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
}
}
break;
}
case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
auto IntrID = MI.getIntrinsicID();
switch (IntrID) {
case Intrinsic::amdgcn_s_getreg:
case Intrinsic::amdgcn_s_memtime:
case Intrinsic::amdgcn_s_memrealtime:
case Intrinsic::amdgcn_s_get_waveid_in_workgroup:
case Intrinsic::amdgcn_s_sendmsg_rtn: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case Intrinsic::amdgcn_global_atomic_fadd:
case Intrinsic::amdgcn_global_atomic_csub:
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_flat_atomic_fadd:
case Intrinsic::amdgcn_flat_atomic_fmin:
case Intrinsic::amdgcn_flat_atomic_fmax:
case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16:
return getDefaultMappingAllVGPR(MI);
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
unsigned M0Bank = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::SGPRRegBankID);
OpdsMapping[2] = AMDGPU::getValueMapping(M0Bank, 32);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
break;
}
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_exp_compr:
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
break;
case Intrinsic::amdgcn_exp:
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[5] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[6] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
break;
case Intrinsic::amdgcn_exp_row:
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[5] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[6] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[8] = getSGPROpMapping(MI.getOperand(8).getReg(), MRI, *TRI);
break;
case Intrinsic::amdgcn_s_sendmsg:
case Intrinsic::amdgcn_s_sendmsghalt: {
unsigned Bank = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::SGPRRegBankID);
OpdsMapping[2] = AMDGPU::getValueMapping(Bank, 32);
break;
}
case Intrinsic::amdgcn_s_setreg: {
unsigned Bank = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::SGPRRegBankID);
OpdsMapping[2] = AMDGPU::getValueMapping(Bank, 32);
break;
}
case Intrinsic::amdgcn_end_cf: {
unsigned Size = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case Intrinsic::amdgcn_else: {
unsigned WaveSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, WaveSize);
OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, WaveSize);
break;
}
case Intrinsic::amdgcn_live_mask: {
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
break;
}
case Intrinsic::amdgcn_wqm_demote:
case Intrinsic::amdgcn_kill: {
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
break;
}
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_raw_buffer_load_lds: {
OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_raw_buffer_store:
case Intrinsic::amdgcn_raw_buffer_store_format:
case Intrinsic::amdgcn_raw_tbuffer_store: {
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_struct_buffer_load:
case Intrinsic::amdgcn_struct_tbuffer_load: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_struct_buffer_load_lds: {
OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
OpdsMapping[6] = getSGPROpMapping(MI.getOperand(6).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_struct_buffer_store:
case Intrinsic::amdgcn_struct_tbuffer_store: {
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_init_exec_from_input: {
unsigned Size = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
}
case Intrinsic::amdgcn_ds_gws_init:
case Intrinsic::amdgcn_ds_gws_barrier:
case Intrinsic::amdgcn_ds_gws_sema_br: {
OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
unsigned Bank = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::SGPRRegBankID);
OpdsMapping[2] = AMDGPU::getValueMapping(Bank, 32);
break;
}
case Intrinsic::amdgcn_ds_gws_sema_v:
case Intrinsic::amdgcn_ds_gws_sema_p:
case Intrinsic::amdgcn_ds_gws_sema_release_all: {
unsigned Bank = getRegBankID(MI.getOperand(1).getReg(), MRI,
AMDGPU::SGPRRegBankID);
OpdsMapping[1] = AMDGPU::getValueMapping(Bank, 32);
break;
}
case Intrinsic::amdgcn_global_load_lds: {
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
break;
}
case Intrinsic::amdgcn_lds_direct_load: {
const int M0Idx = MI.getNumOperands() - 1;
Register M0Reg = MI.getOperand(M0Idx).getReg();
unsigned M0Bank = getRegBankID(M0Reg, MRI, AMDGPU::SGPRRegBankID);
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
for (int I = 2; I != M0Idx && MI.getOperand(I).isReg(); ++I)
OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
OpdsMapping[M0Idx] = AMDGPU::getValueMapping(M0Bank, 32);
break;
}
case Intrinsic::amdgcn_ds_add_gs_reg_rtn:
case Intrinsic::amdgcn_ds_sub_gs_reg_rtn:
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
break;
default:
return getInvalidInstructionMapping();
}
break;
}
case AMDGPU::G_SELECT: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned Op2Bank = getRegBankID(MI.getOperand(2).getReg(), MRI,
AMDGPU::SGPRRegBankID);
unsigned Op3Bank = getRegBankID(MI.getOperand(3).getReg(), MRI,
AMDGPU::SGPRRegBankID);
bool SGPRSrcs = Op2Bank == AMDGPU::SGPRRegBankID &&
Op3Bank == AMDGPU::SGPRRegBankID;
unsigned CondBankDefault = SGPRSrcs ?
AMDGPU::SGPRRegBankID : AMDGPU::VCCRegBankID;
unsigned CondBank = getRegBankID(MI.getOperand(1).getReg(), MRI,
CondBankDefault);
if (CondBank == AMDGPU::SGPRRegBankID)
CondBank = SGPRSrcs ? AMDGPU::SGPRRegBankID : AMDGPU::VCCRegBankID;
else if (CondBank == AMDGPU::VGPRRegBankID)
CondBank = AMDGPU::VCCRegBankID;
unsigned Bank = SGPRSrcs && CondBank == AMDGPU::SGPRRegBankID ?
AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID;
assert(CondBank == AMDGPU::VCCRegBankID || CondBank == AMDGPU::SGPRRegBankID);
if (Size == 64) {
OpdsMapping[0] = AMDGPU::getValueMappingSGPR64Only(Bank, Size);
OpdsMapping[1] = AMDGPU::getValueMapping(CondBank, 1);
OpdsMapping[2] = AMDGPU::getValueMappingSGPR64Only(Bank, Size);
OpdsMapping[3] = AMDGPU::getValueMappingSGPR64Only(Bank, Size);
} else {
OpdsMapping[0] = AMDGPU::getValueMapping(Bank, Size);
OpdsMapping[1] = AMDGPU::getValueMapping(CondBank, 1);
OpdsMapping[2] = AMDGPU::getValueMapping(Bank, Size);
OpdsMapping[3] = AMDGPU::getValueMapping(Bank, Size);
}
break;
}
case AMDGPU::G_SI_CALL: {
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64);
OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
for (unsigned I = 4; I < MI.getNumOperands(); ++I) {
if (MI.getOperand(I).isReg()) {
Register Reg = MI.getOperand(I).getReg();
auto OpBank = getRegBankID(Reg, MRI);
unsigned Size = getSizeInBits(Reg, MRI, *TRI);
OpdsMapping[I] = AMDGPU::getValueMapping(OpBank, Size);
}
}
break;
}
case AMDGPU::G_LOAD:
case AMDGPU::G_ZEXTLOAD:
case AMDGPU::G_SEXTLOAD:
return getInstrMappingForLoad(MI);
case AMDGPU::G_ATOMICRMW_XCHG:
case AMDGPU::G_ATOMICRMW_ADD:
case AMDGPU::G_ATOMICRMW_SUB:
case AMDGPU::G_ATOMICRMW_AND:
case AMDGPU::G_ATOMICRMW_OR:
case AMDGPU::G_ATOMICRMW_XOR:
case AMDGPU::G_ATOMICRMW_MAX:
case AMDGPU::G_ATOMICRMW_MIN:
case AMDGPU::G_ATOMICRMW_UMAX:
case AMDGPU::G_ATOMICRMW_UMIN:
case AMDGPU::G_ATOMICRMW_FADD:
case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
case AMDGPU::G_AMDGPU_ATOMIC_INC:
case AMDGPU::G_AMDGPU_ATOMIC_DEC:
case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
case AMDGPU::G_AMDGPU_ATOMIC_FMAX: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg());
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
break;
}
case AMDGPU::G_ATOMIC_CMPXCHG: {
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg());
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
break;
}
case AMDGPU::G_BRCOND: {
unsigned Bank = getRegBankID(MI.getOperand(0).getReg(), MRI,
AMDGPU::SGPRRegBankID);
assert(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 1);
if (Bank != AMDGPU::SGPRRegBankID)
Bank = AMDGPU::VCCRegBankID;
OpdsMapping[0] = AMDGPU::getValueMapping(Bank, 1);
break;
}
case AMDGPU::G_FPTRUNC_ROUND_UPWARD:
case AMDGPU::G_FPTRUNC_ROUND_DOWNWARD:
return getDefaultMappingVOP(MI);
}
return getInstructionMapping(1, 1,
getOperandsMapping(OpdsMapping),
MI.getNumOperands());
}