#include "RISCVISelDAGToDAG.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "MCTargetDesc/RISCVMatInt.h"
#include "RISCVISelLowering.h"
#include "RISCVMachineFunctionInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "riscv-isel"
namespace llvm {
namespace RISCV {
#define GET_RISCVVSSEGTable_IMPL
#define GET_RISCVVLSEGTable_IMPL
#define GET_RISCVVLXSEGTable_IMPL
#define GET_RISCVVSXSEGTable_IMPL
#define GET_RISCVVLETable_IMPL
#define GET_RISCVVSETable_IMPL
#define GET_RISCVVLXTable_IMPL
#define GET_RISCVVSXTable_IMPL
#define GET_RISCVMaskedPseudosTable_IMPL
#include "RISCVGenSearchableTables.inc"
} }
void RISCVDAGToDAGISel::PreprocessISelDAG() {
SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
bool MadeChange = false;
while (Position != CurDAG->allnodes_begin()) {
SDNode *N = &*--Position;
if (N->use_empty())
continue;
SDValue Result;
switch (N->getOpcode()) {
case ISD::SPLAT_VECTOR: {
MVT VT = N->getSimpleValueType(0);
unsigned Opc =
VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
SDLoc DL(N);
SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
N->getOperand(0), VL);
break;
}
case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
assert(N->getNumOperands() == 4 && "Unexpected number of operands");
MVT VT = N->getSimpleValueType(0);
SDValue Passthru = N->getOperand(0);
SDValue Lo = N->getOperand(1);
SDValue Hi = N->getOperand(2);
SDValue VL = N->getOperand(3);
assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
"Unexpected VTs!");
MachineFunction &MF = CurDAG->getMachineFunction();
RISCVMachineFunctionInfo *FuncInfo =
MF.getInfo<RISCVMachineFunctionInfo>();
SDLoc DL(N);
int FI = FuncInfo->getMoveF64FrameIndex(MF);
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
SDValue StackSlot =
CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
SDValue Chain = CurDAG->getEntryNode();
Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
SDValue OffsetSlot =
CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
Align(8));
Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
SDValue IntID =
CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
SDValue Ops[] = {Chain,
IntID,
Passthru,
StackSlot,
CurDAG->getRegister(RISCV::X0, MVT::i64),
VL};
Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
MVT::i64, MPI, Align(8),
MachineMemOperand::MOLoad);
break;
}
}
if (Result) {
LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld: ");
LLVM_DEBUG(N->dump(CurDAG));
LLVM_DEBUG(dbgs() << "\nNew: ");
LLVM_DEBUG(Result->dump(CurDAG));
LLVM_DEBUG(dbgs() << "\n");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
MadeChange = true;
}
}
if (MadeChange)
CurDAG->RemoveDeadNodes();
}
void RISCVDAGToDAGISel::PostprocessISelDAG() {
HandleSDNode Dummy(CurDAG->getRoot());
SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
bool MadeChange = false;
while (Position != CurDAG->allnodes_begin()) {
SDNode *N = &*--Position;
if (N->use_empty() || !N->isMachineOpcode())
continue;
MadeChange |= doPeepholeSExtW(N);
MadeChange |= doPeepholeMaskedRVV(N);
}
CurDAG->setRoot(Dummy.getValue());
if (MadeChange)
CurDAG->RemoveDeadNodes();
}
static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
RISCVMatInt::InstSeq &Seq) {
SDNode *Result = nullptr;
SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
for (RISCVMatInt::Inst &Inst : Seq) {
SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, VT);
switch (Inst.getOpndKind()) {
case RISCVMatInt::Imm:
Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SDImm);
break;
case RISCVMatInt::RegX0:
Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg,
CurDAG->getRegister(RISCV::X0, VT));
break;
case RISCVMatInt::RegReg:
Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SrcReg);
break;
case RISCVMatInt::RegImm:
Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SDImm);
break;
}
SrcReg = SDValue(Result, 0);
}
return Result;
}
static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
int64_t Imm, const RISCVSubtarget &Subtarget) {
RISCVMatInt::InstSeq Seq =
RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
return selectImmSeq(CurDAG, DL, VT, Seq);
}
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
unsigned NF, RISCVII::VLMUL LMUL) {
static const unsigned M1TupleRegClassIDs[] = {
RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
RISCV::VRN8M1RegClassID};
static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
RISCV::VRN3M2RegClassID,
RISCV::VRN4M2RegClassID};
assert(Regs.size() >= 2 && Regs.size() <= 8);
unsigned RegClassID;
unsigned SubReg0;
switch (LMUL) {
default:
llvm_unreachable("Invalid LMUL.");
case RISCVII::VLMUL::LMUL_F8:
case RISCVII::VLMUL::LMUL_F4:
case RISCVII::VLMUL::LMUL_F2:
case RISCVII::VLMUL::LMUL_1:
static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
"Unexpected subreg numbering");
SubReg0 = RISCV::sub_vrm1_0;
RegClassID = M1TupleRegClassIDs[NF - 2];
break;
case RISCVII::VLMUL::LMUL_2:
static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
"Unexpected subreg numbering");
SubReg0 = RISCV::sub_vrm2_0;
RegClassID = M2TupleRegClassIDs[NF - 2];
break;
case RISCVII::VLMUL::LMUL_4:
static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
"Unexpected subreg numbering");
SubReg0 = RISCV::sub_vrm4_0;
RegClassID = RISCV::VRN2M4RegClassID;
break;
}
SDLoc DL(Regs[0]);
SmallVector<SDValue, 8> Ops;
Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
for (unsigned I = 0; I < Regs.size(); ++I) {
Ops.push_back(Regs[I]);
Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
}
SDNode *N =
CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
return SDValue(N, 0);
}
void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
bool IsLoad, MVT *IndexVT) {
SDValue Chain = Node->getOperand(0);
SDValue Glue;
Operands.push_back(Node->getOperand(CurOp++));
if (IsStridedOrIndexed) {
Operands.push_back(Node->getOperand(CurOp++)); if (IndexVT)
*IndexVT = Operands.back()->getSimpleValueType(0);
}
if (IsMasked) {
SDValue Mask = Node->getOperand(CurOp++);
Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
Glue = Chain.getValue(1);
Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
}
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
MVT XLenVT = Subtarget->getXLenVT();
SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
Operands.push_back(SEWOp);
if (IsMasked && IsLoad) {
uint64_t Policy = Node->getConstantOperandVal(CurOp++);
SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
Operands.push_back(PolicyOp);
}
Operands.push_back(Chain); if (Glue)
Operands.push_back(Glue);
}
static bool isAllUndef(ArrayRef<SDValue> Values) {
return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
}
void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
bool IsStrided) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
Node->op_begin() + CurOp + NF);
bool IsTU = IsMasked || !isAllUndef(Regs);
if (IsTU) {
SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
Operands.push_back(Merge);
}
CurOp += NF;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands, true);
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
SDValue SuperReg = SDValue(Load, 0);
for (unsigned I = 0; I < NF; ++I) {
unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
ReplaceUses(SDValue(Node, I),
CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
}
ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
CurDAG->RemoveDeadNode(Node);
}
void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 2; MVT VT = Node->getSimpleValueType(0);
MVT XLenVT = Subtarget->getXLenVT();
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 7> Operands;
SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
Node->op_begin() + CurOp + NF);
bool IsTU = IsMasked || !isAllUndef(Regs);
if (IsTU) {
SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
Operands.push_back(MaskedOff);
}
CurOp += NF;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
false, Operands,
true);
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, false, true,
Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
XLenVT, MVT::Other, Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
SDValue SuperReg = SDValue(Load, 0);
for (unsigned I = 0; I < NF; ++I) {
unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
ReplaceUses(SDValue(Node, I),
CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
}
ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); CurDAG->RemoveDeadNode(Node);
}
void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
bool IsOrdered) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
Node->op_begin() + CurOp + NF);
bool IsTU = IsMasked || !isAllUndef(Regs);
if (IsTU) {
SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
Operands.push_back(MaskedOff);
}
CurOp += NF;
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
true, Operands,
true, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
"values when XLEN=32");
}
const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
SDValue SuperReg = SDValue(Load, 0);
for (unsigned I = 0; I < NF; ++I) {
unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
ReplaceUses(SDValue(Node, I),
CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
}
ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
CurDAG->RemoveDeadNode(Node);
}
void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
bool IsStrided) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 4;
if (IsStrided)
NF--;
if (IsMasked)
NF--;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
SmallVector<SDValue, 8> Operands;
Operands.push_back(StoreVal);
unsigned CurOp = 2 + NF;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
ReplaceNode(Node, Store);
}
void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
bool IsOrdered) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 5;
if (IsMasked)
--NF;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
SmallVector<SDValue, 8> Operands;
Operands.push_back(StoreVal);
unsigned CurOp = 2 + NF;
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
true, Operands,
false, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
"values when XLEN=32");
}
const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
ReplaceNode(Node, Store);
}
void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
if (!Subtarget->hasVInstructions())
return;
assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
"Unexpected opcode");
SDLoc DL(Node);
MVT XLenVT = Subtarget->getXLenVT();
bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
unsigned IntNoOffset = HasChain ? 1 : 0;
unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
assert((IntNo == Intrinsic::riscv_vsetvli ||
IntNo == Intrinsic::riscv_vsetvlimax ||
IntNo == Intrinsic::riscv_vsetvli_opt ||
IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
"Unexpected vsetvli intrinsic");
bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
IntNo == Intrinsic::riscv_vsetvlimax_opt;
unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
assert(Node->getNumOperands() == Offset + 2 &&
"Unexpected number of operands");
unsigned SEW =
RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
Node->getConstantOperandVal(Offset + 1) & 0x7);
unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, true,
false);
SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
SmallVector<EVT, 2> VTs = {XLenVT};
if (HasChain)
VTs.push_back(MVT::Other);
SDValue VLOperand;
unsigned Opcode = RISCV::PseudoVSETVLI;
if (VLMax) {
VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
Opcode = RISCV::PseudoVSETVLIX0;
} else {
VLOperand = Node->getOperand(IntNoOffset + 1);
if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
uint64_t AVL = C->getZExtValue();
if (isUInt<5>(AVL)) {
SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
if (HasChain)
Ops.push_back(Node->getOperand(0));
ReplaceNode(
Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
return;
}
}
}
SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
if (HasChain)
Ops.push_back(Node->getOperand(0));
ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
}
void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
Node->setNodeId(-1);
return;
}
unsigned Opcode = Node->getOpcode();
MVT XLenVT = Subtarget->getXLenVT();
SDLoc DL(Node);
MVT VT = Node->getSimpleValueType(0);
switch (Opcode) {
case ISD::Constant: {
auto *ConstNode = cast<ConstantSDNode>(Node);
if (VT == XLenVT && ConstNode->isZero()) {
SDValue New =
CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
ReplaceNode(Node, New.getNode());
return;
}
int64_t Imm = ConstNode->getSExtValue();
if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
hasAllHUsers(Node))
Imm = SignExtend64<16>(Imm);
if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
Imm = SignExtend64<32>(Imm);
ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
return;
}
case ISD::SHL: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
!isa<ConstantSDNode>(N0.getOperand(1)))
break;
unsigned ShAmt = N1C->getZExtValue();
uint64_t Mask = N0.getConstantOperandVal(1);
if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
unsigned TrailingZeros = countTrailingZeros(Mask);
if (TrailingZeros > 0 && LeadingZeros == 32) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(TrailingZeros, DL, VT));
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
ReplaceNode(Node, SLLI);
return;
}
}
break;
}
case ISD::SRL: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
!isa<ConstantSDNode>(N0.getOperand(1)))
break;
unsigned ShAmt = N1C->getZExtValue();
uint64_t Mask = N0.getConstantOperandVal(1);
if (isShiftedMask_64(Mask)) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
unsigned TrailingZeros = countTrailingZeros(Mask);
if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(TrailingZeros, DL, VT));
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
ReplaceNode(Node, SLLI);
return;
}
}
Mask |= maskTrailingOnes<uint64_t>(ShAmt);
if (!isMask_64(Mask))
break;
unsigned TrailingOnes = countTrailingOnes(Mask);
if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
break;
if (Subtarget->hasStdExtZbs() && ShAmt + 1 == TrailingOnes) {
SDNode *BEXTI =
CurDAG->getMachineNode(RISCV::BEXTI, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(ShAmt, DL, VT));
ReplaceNode(Node, BEXTI);
return;
}
unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
ReplaceNode(Node, SRLI);
return;
}
case ISD::SRA: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
break;
unsigned ShAmt = N1C->getZExtValue();
unsigned ExtSize =
cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
if (ExtSize >= 32 || ShAmt >= ExtSize)
break;
unsigned LShAmt = Subtarget->getXLen() - ExtSize;
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRAI = CurDAG->getMachineNode(
RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
ReplaceNode(Node, SRAI);
return;
}
case ISD::AND: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
bool LeftShift = N0.getOpcode() == ISD::SHL;
if (!LeftShift && N0.getOpcode() != ISD::SRL)
break;
auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!C)
break;
unsigned C2 = C->getZExtValue();
unsigned XLen = Subtarget->getXLen();
assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
uint64_t C1 = N1C->getZExtValue();
bool IsCANDI = isInt<6>(N1C->getSExtValue());
if (LeftShift)
C1 &= maskTrailingZeros<uint64_t>(C2);
else
C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
SDValue X = N0.getOperand(0);
if (!LeftShift && isMask_64(C1)) {
unsigned Leading = XLen - (64 - countLeadingZeros(C1));
if (C2 < Leading) {
if (C2 + 32 == Leading) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
ReplaceNode(Node, SRLIW);
return;
}
if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
SDNode *SRAIW =
CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
CurDAG->getTargetConstant(31, DL, VT));
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
CurDAG->getTargetConstant(Leading - 32, DL, VT));
ReplaceNode(Node, SRLIW);
return;
}
bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
if (OneUseOrZExtW && !Skip) {
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, X,
CurDAG->getTargetConstant(Leading - C2, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(Leading, DL, VT));
ReplaceNode(Node, SRLI);
return;
}
}
}
if (LeftShift && isShiftedMask_64(C1)) {
unsigned Leading = XLen - (64 - countLeadingZeros(C1));
if (C2 + Leading < XLen &&
C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
SDNode *SLLI_UW = CurDAG->getMachineNode(
RISCV::SLLI_UW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
ReplaceNode(Node, SLLI_UW);
return;
}
if (OneUseOrZExtW && !IsCANDI) {
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, X,
CurDAG->getTargetConstant(C2 + Leading, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(Leading, DL, VT));
ReplaceNode(Node, SRLI);
return;
}
}
}
if (!LeftShift && isShiftedMask_64(C1)) {
unsigned Leading = XLen - (64 - countLeadingZeros(C1));
unsigned Trailing = countTrailingZeros(C1);
if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW && !IsCANDI) {
unsigned SrliOpc = RISCV::SRLI;
if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
SrliOpc = RISCV::SRLIW;
X = X.getOperand(0);
}
SDNode *SRLI = CurDAG->getMachineNode(
SrliOpc, DL, VT, X,
CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
CurDAG->getTargetConstant(Trailing, DL, VT));
ReplaceNode(Node, SLLI);
return;
}
if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
OneUseOrZExtW && !IsCANDI) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, X,
CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
CurDAG->getTargetConstant(Trailing, DL, VT));
ReplaceNode(Node, SLLI);
return;
}
}
if (LeftShift && isShiftedMask_64(C1)) {
unsigned Leading = XLen - (64 - countLeadingZeros(C1));
unsigned Trailing = countTrailingZeros(C1);
if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, X,
CurDAG->getTargetConstant(Trailing - C2, DL, VT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
CurDAG->getTargetConstant(Trailing, DL, VT));
ReplaceNode(Node, SLLI);
return;
}
if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, X,
CurDAG->getTargetConstant(Trailing - C2, DL, VT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
CurDAG->getTargetConstant(Trailing, DL, VT));
ReplaceNode(Node, SLLI);
return;
}
}
break;
}
case ISD::MUL: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C || !N1C->hasOneUse())
break;
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
break;
uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
if (!isMask_64(C2))
break;
bool IsANDIOrZExt =
isInt<12>(C2) ||
(C2 == UINT64_C(0xFFFF) &&
(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
(C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
break;
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
uint64_t C1 = N1C->getZExtValue();
unsigned ConstantShift = XLen - LeadingZeros;
if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
break;
uint64_t ShiftedC1 = C1 << ConstantShift;
if (XLen == 32)
ShiftedC1 = SignExtend64<32>(ShiftedC1);
SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LeadingZeros, DL, VT));
SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
SDValue(SLLI, 0), SDValue(Imm, 0));
ReplaceNode(Node, MULHU);
return;
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntNo = Node->getConstantOperandVal(0);
switch (IntNo) {
default:
break;
case Intrinsic::riscv_vmsgeu:
case Intrinsic::riscv_vmsge: {
SDValue Src1 = Node->getOperand(1);
SDValue Src2 = Node->getOperand(2);
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
bool IsCmpUnsignedZero = false;
if (Src2.getValueType() != XLenVT)
break;
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
int64_t CVal = C->getSExtValue();
if (CVal >= -15 && CVal <= 16) {
if (!IsUnsigned || CVal != 0)
break;
IsCmpUnsignedZero = true;
}
}
MVT Src1VT = Src1.getSimpleValueType();
unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
break;
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);
if (IsCmpUnsignedZero) {
ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
return;
}
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
{Cmp, Cmp, VL, SEW}));
return;
}
case Intrinsic::riscv_vmsgeu_mask:
case Intrinsic::riscv_vmsge_mask: {
SDValue Src1 = Node->getOperand(2);
SDValue Src2 = Node->getOperand(3);
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
bool IsCmpUnsignedZero = false;
if (Src2.getValueType() != XLenVT)
break;
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
int64_t CVal = C->getSExtValue();
if (CVal >= -15 && CVal <= 16) {
if (!IsUnsigned || CVal != 0)
break;
IsCmpUnsignedZero = true;
}
}
MVT Src1VT = Src1.getSimpleValueType();
unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
VMOROpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
: RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
break;
CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
#undef CASE_VMSLT_OPCODES
}
switch (RISCVTargetLowering::getLMUL(VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
break;
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(5), VL);
SDValue MaskedOff = Node->getOperand(1);
SDValue Mask = Node->getOperand(4);
if (IsCmpUnsignedZero) {
if (Mask == MaskedOff) {
ReplaceUses(Node, Mask.getNode());
return;
}
ReplaceNode(Node,
CurDAG->getMachineNode(VMOROpcode, DL, VT,
{Mask, MaskedOff, VL, MaskSEW}));
return;
}
if (Mask == MaskedOff) {
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
{Mask, Cmp, VL, MaskSEW}));
return;
}
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
RISCV::V0, Mask, SDValue());
SDValue Glue = Chain.getValue(1);
SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
{MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
{Cmp, Mask, VL, MaskSEW}));
return;
}
case Intrinsic::riscv_vsetvli_opt:
case Intrinsic::riscv_vsetvlimax_opt:
return selectVSETVLI(Node);
}
break;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
default:
break;
case Intrinsic::riscv_vsetvli:
case Intrinsic::riscv_vsetvlimax:
return selectVSETVLI(Node);
case Intrinsic::riscv_vlseg2:
case Intrinsic::riscv_vlseg3:
case Intrinsic::riscv_vlseg4:
case Intrinsic::riscv_vlseg5:
case Intrinsic::riscv_vlseg6:
case Intrinsic::riscv_vlseg7:
case Intrinsic::riscv_vlseg8: {
selectVLSEG(Node, false, false);
return;
}
case Intrinsic::riscv_vlseg2_mask:
case Intrinsic::riscv_vlseg3_mask:
case Intrinsic::riscv_vlseg4_mask:
case Intrinsic::riscv_vlseg5_mask:
case Intrinsic::riscv_vlseg6_mask:
case Intrinsic::riscv_vlseg7_mask:
case Intrinsic::riscv_vlseg8_mask: {
selectVLSEG(Node, true, false);
return;
}
case Intrinsic::riscv_vlsseg2:
case Intrinsic::riscv_vlsseg3:
case Intrinsic::riscv_vlsseg4:
case Intrinsic::riscv_vlsseg5:
case Intrinsic::riscv_vlsseg6:
case Intrinsic::riscv_vlsseg7:
case Intrinsic::riscv_vlsseg8: {
selectVLSEG(Node, false, true);
return;
}
case Intrinsic::riscv_vlsseg2_mask:
case Intrinsic::riscv_vlsseg3_mask:
case Intrinsic::riscv_vlsseg4_mask:
case Intrinsic::riscv_vlsseg5_mask:
case Intrinsic::riscv_vlsseg6_mask:
case Intrinsic::riscv_vlsseg7_mask:
case Intrinsic::riscv_vlsseg8_mask: {
selectVLSEG(Node, true, true);
return;
}
case Intrinsic::riscv_vloxseg2:
case Intrinsic::riscv_vloxseg3:
case Intrinsic::riscv_vloxseg4:
case Intrinsic::riscv_vloxseg5:
case Intrinsic::riscv_vloxseg6:
case Intrinsic::riscv_vloxseg7:
case Intrinsic::riscv_vloxseg8:
selectVLXSEG(Node, false, true);
return;
case Intrinsic::riscv_vluxseg2:
case Intrinsic::riscv_vluxseg3:
case Intrinsic::riscv_vluxseg4:
case Intrinsic::riscv_vluxseg5:
case Intrinsic::riscv_vluxseg6:
case Intrinsic::riscv_vluxseg7:
case Intrinsic::riscv_vluxseg8:
selectVLXSEG(Node, false, false);
return;
case Intrinsic::riscv_vloxseg2_mask:
case Intrinsic::riscv_vloxseg3_mask:
case Intrinsic::riscv_vloxseg4_mask:
case Intrinsic::riscv_vloxseg5_mask:
case Intrinsic::riscv_vloxseg6_mask:
case Intrinsic::riscv_vloxseg7_mask:
case Intrinsic::riscv_vloxseg8_mask:
selectVLXSEG(Node, true, true);
return;
case Intrinsic::riscv_vluxseg2_mask:
case Intrinsic::riscv_vluxseg3_mask:
case Intrinsic::riscv_vluxseg4_mask:
case Intrinsic::riscv_vluxseg5_mask:
case Intrinsic::riscv_vluxseg6_mask:
case Intrinsic::riscv_vluxseg7_mask:
case Intrinsic::riscv_vluxseg8_mask:
selectVLXSEG(Node, true, false);
return;
case Intrinsic::riscv_vlseg8ff:
case Intrinsic::riscv_vlseg7ff:
case Intrinsic::riscv_vlseg6ff:
case Intrinsic::riscv_vlseg5ff:
case Intrinsic::riscv_vlseg4ff:
case Intrinsic::riscv_vlseg3ff:
case Intrinsic::riscv_vlseg2ff: {
selectVLSEGFF(Node, false);
return;
}
case Intrinsic::riscv_vlseg8ff_mask:
case Intrinsic::riscv_vlseg7ff_mask:
case Intrinsic::riscv_vlseg6ff_mask:
case Intrinsic::riscv_vlseg5ff_mask:
case Intrinsic::riscv_vlseg4ff_mask:
case Intrinsic::riscv_vlseg3ff_mask:
case Intrinsic::riscv_vlseg2ff_mask: {
selectVLSEGFF(Node, true);
return;
}
case Intrinsic::riscv_vloxei:
case Intrinsic::riscv_vloxei_mask:
case Intrinsic::riscv_vluxei:
case Intrinsic::riscv_vluxei_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
IntNo == Intrinsic::riscv_vluxei_mask;
bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
IntNo == Intrinsic::riscv_vloxei_mask;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
SmallVector<SDValue, 8> Operands;
if (IsTU)
Operands.push_back(Node->getOperand(CurOp++));
else
CurOp++;
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
true, Operands,
true, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
"values when XLEN=32");
}
const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
case Intrinsic::riscv_vlm:
case Intrinsic::riscv_vle:
case Intrinsic::riscv_vle_mask:
case Intrinsic::riscv_vlse:
case Intrinsic::riscv_vlse_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
IntNo == Intrinsic::riscv_vlse_mask;
bool IsStrided =
IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
bool IsTU = HasPassthruOperand &&
(IsMasked || !Node->getOperand(CurOp).isUndef());
SmallVector<SDValue, 8> Operands;
if (IsTU)
Operands.push_back(Node->getOperand(CurOp++));
else if (HasPassthruOperand)
CurOp++;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands, true);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
case Intrinsic::riscv_vleff:
case Intrinsic::riscv_vleff_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
SmallVector<SDValue, 7> Operands;
if (IsTU)
Operands.push_back(Node->getOperand(CurOp++));
else
CurOp++;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
false, Operands,
true);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
RISCV::getVLEPseudo(IsMasked, IsTU, false, true,
Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Load = CurDAG->getMachineNode(
P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
}
break;
}
case ISD::INTRINSIC_VOID: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
case Intrinsic::riscv_vsseg2:
case Intrinsic::riscv_vsseg3:
case Intrinsic::riscv_vsseg4:
case Intrinsic::riscv_vsseg5:
case Intrinsic::riscv_vsseg6:
case Intrinsic::riscv_vsseg7:
case Intrinsic::riscv_vsseg8: {
selectVSSEG(Node, false, false);
return;
}
case Intrinsic::riscv_vsseg2_mask:
case Intrinsic::riscv_vsseg3_mask:
case Intrinsic::riscv_vsseg4_mask:
case Intrinsic::riscv_vsseg5_mask:
case Intrinsic::riscv_vsseg6_mask:
case Intrinsic::riscv_vsseg7_mask:
case Intrinsic::riscv_vsseg8_mask: {
selectVSSEG(Node, true, false);
return;
}
case Intrinsic::riscv_vssseg2:
case Intrinsic::riscv_vssseg3:
case Intrinsic::riscv_vssseg4:
case Intrinsic::riscv_vssseg5:
case Intrinsic::riscv_vssseg6:
case Intrinsic::riscv_vssseg7:
case Intrinsic::riscv_vssseg8: {
selectVSSEG(Node, false, true);
return;
}
case Intrinsic::riscv_vssseg2_mask:
case Intrinsic::riscv_vssseg3_mask:
case Intrinsic::riscv_vssseg4_mask:
case Intrinsic::riscv_vssseg5_mask:
case Intrinsic::riscv_vssseg6_mask:
case Intrinsic::riscv_vssseg7_mask:
case Intrinsic::riscv_vssseg8_mask: {
selectVSSEG(Node, true, true);
return;
}
case Intrinsic::riscv_vsoxseg2:
case Intrinsic::riscv_vsoxseg3:
case Intrinsic::riscv_vsoxseg4:
case Intrinsic::riscv_vsoxseg5:
case Intrinsic::riscv_vsoxseg6:
case Intrinsic::riscv_vsoxseg7:
case Intrinsic::riscv_vsoxseg8:
selectVSXSEG(Node, false, true);
return;
case Intrinsic::riscv_vsuxseg2:
case Intrinsic::riscv_vsuxseg3:
case Intrinsic::riscv_vsuxseg4:
case Intrinsic::riscv_vsuxseg5:
case Intrinsic::riscv_vsuxseg6:
case Intrinsic::riscv_vsuxseg7:
case Intrinsic::riscv_vsuxseg8:
selectVSXSEG(Node, false, false);
return;
case Intrinsic::riscv_vsoxseg2_mask:
case Intrinsic::riscv_vsoxseg3_mask:
case Intrinsic::riscv_vsoxseg4_mask:
case Intrinsic::riscv_vsoxseg5_mask:
case Intrinsic::riscv_vsoxseg6_mask:
case Intrinsic::riscv_vsoxseg7_mask:
case Intrinsic::riscv_vsoxseg8_mask:
selectVSXSEG(Node, true, true);
return;
case Intrinsic::riscv_vsuxseg2_mask:
case Intrinsic::riscv_vsuxseg3_mask:
case Intrinsic::riscv_vsuxseg4_mask:
case Intrinsic::riscv_vsuxseg5_mask:
case Intrinsic::riscv_vsuxseg6_mask:
case Intrinsic::riscv_vsuxseg7_mask:
case Intrinsic::riscv_vsuxseg8_mask:
selectVSXSEG(Node, true, false);
return;
case Intrinsic::riscv_vsoxei:
case Intrinsic::riscv_vsoxei_mask:
case Intrinsic::riscv_vsuxei:
case Intrinsic::riscv_vsuxei_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
IntNo == Intrinsic::riscv_vsuxei_mask;
bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
IntNo == Intrinsic::riscv_vsoxei_mask;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++));
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
true, Operands,
false, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
"values when XLEN=32");
}
const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
IsMasked, false, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
ReplaceNode(Node, Store);
return;
}
case Intrinsic::riscv_vsm:
case Intrinsic::riscv_vse:
case Intrinsic::riscv_vse_mask:
case Intrinsic::riscv_vsse:
case Intrinsic::riscv_vsse_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
IntNo == Intrinsic::riscv_vsse_mask;
bool IsStrided =
IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++));
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
ReplaceNode(Node, Store);
return;
}
}
break;
}
case ISD::BITCAST: {
MVT SrcVT = Node->getOperand(0).getSimpleValueType();
if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
(VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
CurDAG->RemoveDeadNode(Node);
return;
}
break;
}
case ISD::INSERT_SUBVECTOR: {
SDValue V = Node->getOperand(0);
SDValue SubV = Node->getOperand(1);
SDLoc DL(SubV);
auto Idx = Node->getConstantOperandVal(2);
MVT SubVecVT = SubV.getSimpleValueType();
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
MVT SubVecContainerVT = SubVecVT;
if (SubVecVT.isFixedLengthVector())
SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
if (VT.isFixedLengthVector())
VT = TLI.getContainerForFixedLengthVector(VT);
const auto *TRI = Subtarget->getRegisterInfo();
unsigned SubRegIdx;
std::tie(SubRegIdx, Idx) =
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
VT, SubVecContainerVT, Idx, TRI);
if (Idx != 0)
break;
RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
(void)IsSubVecPartReg; assert((!IsSubVecPartReg || V.isUndef()) &&
"Expecting lowering to have created legal INSERT_SUBVECTORs when "
"the subvector is smaller than a full-sized register");
if (SubRegIdx == RISCV::NoSubRegister) {
unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
InRegClassID &&
"Unexpected subvector extraction");
SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
DL, VT, SubV, RC);
ReplaceNode(Node, NewNode);
return;
}
SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
ReplaceNode(Node, Insert.getNode());
return;
}
case ISD::EXTRACT_SUBVECTOR: {
SDValue V = Node->getOperand(0);
auto Idx = Node->getConstantOperandVal(1);
MVT InVT = V.getSimpleValueType();
SDLoc DL(V);
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
MVT SubVecContainerVT = VT;
if (VT.isFixedLengthVector())
SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
if (InVT.isFixedLengthVector())
InVT = TLI.getContainerForFixedLengthVector(InVT);
const auto *TRI = Subtarget->getRegisterInfo();
unsigned SubRegIdx;
std::tie(SubRegIdx, Idx) =
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
InVT, SubVecContainerVT, Idx, TRI);
if (Idx != 0)
break;
if (SubRegIdx == RISCV::NoSubRegister) {
unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
InRegClassID &&
"Unexpected subvector extraction");
SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
SDNode *NewNode =
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
ReplaceNode(Node, NewNode);
return;
}
SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
ReplaceNode(Node, Extract.getNode());
return;
}
case ISD::SPLAT_VECTOR:
case RISCVISD::VMV_S_X_VL:
case RISCVISD::VFMV_S_F_VL:
case RISCVISD::VMV_V_X_VL:
case RISCVISD::VFMV_V_F_VL: {
bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
if (HasPassthruOperand && !Node->getOperand(0).isUndef())
break;
SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
auto *Ld = dyn_cast<LoadSDNode>(Src);
if (!Ld)
break;
EVT MemVT = Ld->getMemoryVT();
if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
break;
if (!IsProfitableToFold(Src, Node, Node) ||
!IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
break;
SDValue VL;
if (Node->getOpcode() == ISD::SPLAT_VECTOR)
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
else if (IsScalarMove) {
if (!isOneConstant(Node->getOperand(2)))
break;
selectVLOp(Node->getOperand(2), VL);
} else
selectVLOp(Node->getOperand(2), VL);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
SDValue Operands[] = {Ld->getBasePtr(),
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
Ld->getChain()};
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
false, false, true, false,
Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
}
SelectCode(Node);
}
bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
switch (ConstraintID) {
case InlineAsm::Constraint_m:
OutOps.push_back(Op);
return false;
case InlineAsm::Constraint_A:
OutOps.push_back(Op);
return false;
default:
break;
}
return true;
}
bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
SDValue &Offset) {
if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
return true;
}
return false;
}
bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) {
if (SelectAddrFrameIndex(Addr, Base, Offset))
return true;
if (!CurDAG->isBaseWithConstantOffset(Addr))
return false;
if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
if (isInt<12>(CVal)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
Subtarget->getXLenVT());
Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
Subtarget->getXLenVT());
return true;
}
}
return false;
}
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
const MVT VT, const RISCVSubtarget *Subtarget,
SDValue Addr, SDValue &Base, SDValue &Offset) {
if (!isa<ConstantSDNode>(Addr))
return false;
int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
int64_t Lo12 = SignExtend64<12>(CVal);
int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
if (Hi) {
int64_t Hi20 = (Hi >> 12) & 0xfffff;
Base = SDValue(
CurDAG->getMachineNode(RISCV::LUI, DL, VT,
CurDAG->getTargetConstant(Hi20, DL, VT)),
0);
} else {
Base = CurDAG->getRegister(RISCV::X0, VT);
}
Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
return true;
}
RISCVMatInt::InstSeq Seq =
RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
if (Seq.back().Opc != RISCV::ADDI)
return false;
Lo12 = Seq.back().Imm;
Seq.pop_back();
assert(!Seq.empty() && "Expected more instructions in sequence");
Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
return true;
}
static bool isWorthFoldingAdd(SDValue Add) {
for (auto Use : Add->uses()) {
if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
Use->getOpcode() != ISD::ATOMIC_LOAD &&
Use->getOpcode() != ISD::ATOMIC_STORE)
return false;
EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
VT != MVT::f64)
return false;
if (Use->getOpcode() == ISD::STORE &&
cast<StoreSDNode>(Use)->getValue() == Add)
return false;
if (Use->getOpcode() == ISD::ATOMIC_STORE &&
cast<AtomicSDNode>(Use)->getVal() == Add)
return false;
}
return true;
}
bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) {
if (SelectAddrFrameIndex(Addr, Base, Offset))
return true;
SDLoc DL(Addr);
MVT VT = Addr.getSimpleValueType();
if (Addr.getOpcode() == RISCVISD::ADD_LO) {
Base = Addr.getOperand(0);
Offset = Addr.getOperand(1);
return true;
}
if (CurDAG->isBaseWithConstantOffset(Addr)) {
int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
if (isInt<12>(CVal)) {
Base = Addr.getOperand(0);
if (Base.getOpcode() == RISCVISD::ADD_LO) {
SDValue LoOperand = Base.getOperand(1);
if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
const DataLayout &DL = CurDAG->getDataLayout();
Align Alignment = commonAlignment(
GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
if (CVal == 0 || Alignment > CVal) {
int64_t CombinedOffset = CVal + GA->getOffset();
Base = Base.getOperand(0);
Offset = CurDAG->getTargetGlobalAddress(
GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
CombinedOffset, GA->getTargetFlags());
return true;
}
}
}
if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
Offset = CurDAG->getTargetConstant(CVal, DL, VT);
return true;
}
}
if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
assert(!isInt<12>(CVal) && "simm12 not already handled?");
if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
int64_t Adj = CVal < 0 ? -2048 : 2047;
Base = SDValue(
CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
CurDAG->getTargetConstant(Adj, DL, VT)),
0);
Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
return true;
}
if (isWorthFoldingAdd(Addr) &&
selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
Offset)) {
Base = SDValue(
CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
0);
return true;
}
}
if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
return true;
Base = Addr;
Offset = CurDAG->getTargetConstant(0, DL, VT);
return true;
}
bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
SDValue &ShAmt) {
if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
const APInt &AndMask = N->getConstantOperandAPInt(1);
assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
if (ShMask.isSubsetOf(AndMask)) {
ShAmt = N.getOperand(0);
return true;
}
KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
ShAmt = N.getOperand(0);
return true;
}
} else if (N.getOpcode() == ISD::SUB &&
isa<ConstantSDNode>(N.getOperand(0))) {
uint64_t Imm = N.getConstantOperandVal(0);
if (Imm != 0 && Imm % ShiftWidth == 0) {
SDLoc DL(N);
EVT VT = N.getValueType();
SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
N.getOperand(1));
ShAmt = SDValue(Neg, 0);
return true;
}
}
ShAmt = N;
return true;
}
bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
Val = N.getOperand(0);
return true;
}
MVT VT = N.getSimpleValueType();
if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
Val = N;
return true;
}
return false;
}
bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
if (N.getOpcode() == ISD::AND) {
auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
Val = N.getOperand(0);
return true;
}
}
MVT VT = N.getSimpleValueType();
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
if (CurDAG->MaskedValueIsZero(N, Mask)) {
Val = N;
return true;
}
return false;
}
bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
SDValue &Val) {
if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
SDValue N0 = N.getOperand(0);
bool LeftShift = N0.getOpcode() == ISD::SHL;
if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N.getConstantOperandVal(1);
unsigned C2 = N0.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
if (LeftShift)
Mask &= maskTrailingZeros<uint64_t>(C2);
else
Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
if (isShiftedMask_64(Mask)) {
unsigned Leading = XLen - (64 - countLeadingZeros(Mask));
unsigned Trailing = countTrailingZeros(Mask);
if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
0);
return true;
}
if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(
CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
0);
return true;
}
}
}
}
bool LeftShift = N.getOpcode() == ISD::SHL;
if ((LeftShift || N.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(N.getOperand(1))) {
SDValue N0 = N.getOperand(0);
if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N0.getConstantOperandVal(1);
if (isShiftedMask_64(Mask)) {
unsigned C1 = N.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
unsigned Leading = XLen - (64 - countLeadingZeros(Mask));
unsigned Trailing = countTrailingZeros(Mask);
if (LeftShift && Leading == 32 && Trailing > 0 &&
(Trailing + C1) == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Trailing, DL, VT)),
0);
return true;
}
if (!LeftShift && Leading == 32 && Trailing > C1 &&
(Trailing - C1) == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Trailing, DL, VT)),
0);
return true;
}
}
}
}
return false;
}
bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
Node->getOpcode() == ISD::SRL ||
Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
Node->getOpcode() == RISCVISD::GREV ||
Node->getOpcode() == RISCVISD::GORC ||
isa<ConstantSDNode>(Node)) &&
"Unexpected opcode");
for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
SDNode *User = *UI;
if (!User->isMachineOpcode())
return false;
switch (User->getMachineOpcode()) {
default:
return false;
case RISCV::ADDW:
case RISCV::ADDIW:
case RISCV::SUBW:
case RISCV::MULW:
case RISCV::SLLW:
case RISCV::SLLIW:
case RISCV::SRAW:
case RISCV::SRAIW:
case RISCV::SRLW:
case RISCV::SRLIW:
case RISCV::DIVW:
case RISCV::DIVUW:
case RISCV::REMW:
case RISCV::REMUW:
case RISCV::ROLW:
case RISCV::RORW:
case RISCV::RORIW:
case RISCV::CLZW:
case RISCV::CTZW:
case RISCV::CPOPW:
case RISCV::SLLI_UW:
case RISCV::FMV_W_X:
case RISCV::FCVT_H_W:
case RISCV::FCVT_H_WU:
case RISCV::FCVT_S_W:
case RISCV::FCVT_S_WU:
case RISCV::FCVT_D_W:
case RISCV::FCVT_D_WU:
if (Bits < 32)
return false;
break;
case RISCV::SLLI:
if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
return false;
break;
case RISCV::ANDI:
if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
return false;
break;
case RISCV::SEXT_B:
if (Bits < 8)
return false;
break;
case RISCV::SEXT_H:
case RISCV::FMV_H_X:
case RISCV::ZEXT_H_RV32:
case RISCV::ZEXT_H_RV64:
if (Bits < 16)
return false;
break;
case RISCV::ADD_UW:
case RISCV::SH1ADD_UW:
case RISCV::SH2ADD_UW:
case RISCV::SH3ADD_UW:
if (UI.getOperandNo() != 0 || Bits < 32)
return false;
break;
case RISCV::SB:
if (UI.getOperandNo() != 0 || Bits < 8)
return false;
break;
case RISCV::SH:
if (UI.getOperandNo() != 0 || Bits < 16)
return false;
break;
case RISCV::SW:
if (UI.getOperandNo() != 0 || Bits < 32)
return false;
break;
}
}
return true;
}
bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
auto *C = dyn_cast<ConstantSDNode>(N);
if (C && isUInt<5>(C->getZExtValue())) {
VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
N->getValueType(0));
} else if (C && C->isAllOnesValue()) {
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
N->getValueType(0));
} else if (isa<RegisterSDNode>(N) &&
cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
N->getValueType(0));
} else {
VL = N;
}
return true;
}
bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
return false;
SplatVal = N.getOperand(1);
return true;
}
using ValidateFn = bool (*)(int64_t);
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
SelectionDAG &DAG,
const RISCVSubtarget &Subtarget,
ValidateFn ValidateImm) {
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
!isa<ConstantSDNode>(N.getOperand(1)))
return false;
int64_t SplatImm =
cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
MVT XLenVT = Subtarget.getXLenVT();
assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
"Unexpected splat operand type");
MVT EltVT = N.getSimpleValueType().getVectorElementType();
if (EltVT.bitsLT(XLenVT))
SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
if (!ValidateImm(SplatImm))
return false;
SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
return true;
}
bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
[](int64_t Imm) { return isInt<5>(Imm); });
}
bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
return selectVSplatSimmHelper(
N, SplatVal, *CurDAG, *Subtarget,
[](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
}
bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
SDValue &SplatVal) {
return selectVSplatSimmHelper(
N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
});
}
bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
!isa<ConstantSDNode>(N.getOperand(1)))
return false;
int64_t SplatImm =
cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
if (!isUInt<5>(SplatImm))
return false;
SplatVal =
CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
return true;
}
bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
SDValue &Imm) {
if (auto *C = dyn_cast<ConstantSDNode>(N)) {
int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
if (!isInt<5>(ImmVal))
return false;
Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
return true;
}
return false;
}
bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
if (N->getMachineOpcode() != RISCV::ADDIW ||
!isNullConstant(N->getOperand(1)))
return false;
SDValue N0 = N->getOperand(0);
if (!N0.isMachineOpcode())
return false;
switch (N0.getMachineOpcode()) {
default:
break;
case RISCV::ADD:
case RISCV::ADDI:
case RISCV::SUB:
case RISCV::MUL:
case RISCV::SLLI: {
unsigned Opc;
switch (N0.getMachineOpcode()) {
default:
llvm_unreachable("Unexpected opcode!");
case RISCV::ADD: Opc = RISCV::ADDW; break;
case RISCV::ADDI: Opc = RISCV::ADDIW; break;
case RISCV::SUB: Opc = RISCV::SUBW; break;
case RISCV::MUL: Opc = RISCV::MULW; break;
case RISCV::SLLI: Opc = RISCV::SLLIW; break;
}
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
if (N0.getMachineOpcode() == RISCV::SLLI &&
!isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
break;
SDNode *Result =
CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
N00, N01);
ReplaceUses(N, Result);
return true;
}
case RISCV::ADDW:
case RISCV::ADDIW:
case RISCV::SUBW:
case RISCV::MULW:
case RISCV::SLLIW:
case RISCV::GREVIW:
case RISCV::GORCIW:
ReplaceUses(N, N0.getNode());
return true;
}
return false;
}
bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
const RISCV::RISCVMaskedPseudoInfo *I =
RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
if (!I)
return false;
unsigned MaskOpIdx = I->MaskOpIdx;
if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
return false;
const auto *Glued = N->getGluedNode();
if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
return false;
if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
return false;
SDValue MaskSetter = Glued->getOperand(2);
const auto IsVMSet = [](unsigned Opc) {
return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
Opc == RISCV::PseudoVMSET_M_B8;
};
if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
return false;
Optional<unsigned> TailPolicyOpIdx;
const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
bool IsTA = true;
if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
TailPolicyOpIdx = N->getNumOperands() - 1;
if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
(*TailPolicyOpIdx)--;
if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
(*TailPolicyOpIdx)--;
if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
RISCVII::TAIL_AGNOSTIC)) {
if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
return false;
if (!N->getOperand(0).isUndef())
IsTA = false;
}
}
unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
uint64_t TSFlags = TII.get(Opc).TSFlags;
assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
RISCVII::hasDummyMaskOp(TSFlags) &&
!RISCVII::hasVecPolicyOp(TSFlags) &&
"Unexpected pseudo to transform to");
(void)TSFlags;
SmallVector<SDValue, 8> Ops;
for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
SDValue Op = N->getOperand(I);
if (I == MaskOpIdx || I == TailPolicyOpIdx ||
Op.getValueType() == MVT::Glue)
continue;
Ops.push_back(Op);
}
if (auto *TGlued = Glued->getGluedNode())
Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
ReplaceUses(N, Result);
return true;
}
FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
CodeGenOpt::Level OptLevel) {
return new RISCVDAGToDAGISel(TM, OptLevel);
}