#include "RISCVTargetTransformInfo.h"
#include "MCTargetDesc/RISCVMatInt.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/TargetLowering.h"
#include <cmath>
using namespace llvm;
#define DEBUG_TYPE "riscvtti"
static cl::opt<unsigned> RVVRegisterWidthLMUL(
"riscv-v-register-bit-width-lmul",
cl::desc(
"The LMUL to use for getRegisterBitWidth queries. Affects LMUL used "
"by autovectorized code. Fractional LMULs are not supported."),
cl::init(1), cl::Hidden);
InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) {
assert(Ty->isIntegerTy() &&
"getIntImmCost can only estimate cost of materialising integers");
if (Imm == 0)
return TTI::TCC_Free;
const DataLayout &DL = getDataLayout();
return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty),
getST()->getFeatureBits());
}
InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind,
Instruction *Inst) {
assert(Ty->isIntegerTy() &&
"getIntImmCost can only estimate cost of materialising integers");
if (Imm == 0)
return TTI::TCC_Free;
bool Takes12BitImm = false;
unsigned ImmArgIdx = ~0U;
switch (Opcode) {
case Instruction::GetElementPtr:
return TTI::TCC_Free;
case Instruction::And:
if (Imm == UINT64_C(0xffff) && ST->hasStdExtZbb())
return TTI::TCC_Free;
if (Imm == UINT64_C(0xffffffff) && ST->hasStdExtZba())
return TTI::TCC_Free;
LLVM_FALLTHROUGH;
case Instruction::Add:
case Instruction::Or:
case Instruction::Xor:
case Instruction::Mul:
Takes12BitImm = true;
break;
case Instruction::Sub:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
Takes12BitImm = true;
ImmArgIdx = 1;
break;
default:
break;
}
if (Takes12BitImm) {
if (Instruction::isCommutative(Opcode) || Idx == ImmArgIdx) {
if (Imm.getMinSignedBits() <= 64 &&
getTLI()->isLegalAddImmediate(Imm.getSExtValue())) {
return TTI::TCC_Free;
}
}
return getIntImmCost(Imm, Ty, CostKind);
}
return TTI::TCC_Free;
}
InstructionCost
RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) {
return TTI::TCC_Free;
}
TargetTransformInfo::PopcntSupportKind
RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
return ST->hasStdExtZbb() ? TTI::PSK_FastHardware : TTI::PSK_Software;
}
bool RISCVTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {
switch (II->getIntrinsicID()) {
default:
return false;
case Intrinsic::vector_reduce_mul:
case Intrinsic::vector_reduce_fmul:
return true;
}
}
Optional<unsigned> RISCVTTIImpl::getMaxVScale() const {
if (ST->hasVInstructions())
return ST->getRealMaxVLen() / RISCV::RVVBitsPerBlock;
return BaseT::getMaxVScale();
}
Optional<unsigned> RISCVTTIImpl::getVScaleForTuning() const {
if (ST->hasVInstructions())
return ST->getRealMinVLen() / RISCV::RVVBitsPerBlock;
return BaseT::getVScaleForTuning();
}
TypeSize
RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
unsigned LMUL = PowerOf2Floor(
std::max<unsigned>(std::min<unsigned>(RVVRegisterWidthLMUL, 8), 1));
switch (K) {
case TargetTransformInfo::RGK_Scalar:
return TypeSize::getFixed(ST->getXLen());
case TargetTransformInfo::RGK_FixedWidthVector:
return TypeSize::getFixed(
ST->useRVVForFixedLengthVectors() ? LMUL * ST->getRealMinVLen() : 0);
case TargetTransformInfo::RGK_ScalableVector:
return TypeSize::getScalable(
ST->hasVInstructions() ? LMUL * RISCV::RVVBitsPerBlock : 0);
}
llvm_unreachable("Unsupported register kind");
}
InstructionCost RISCVTTIImpl::getSpliceCost(VectorType *Tp, int Index) {
std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
unsigned Cost = 2; return Cost * LT.first;
}
InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
VectorType *Tp, ArrayRef<int> Mask,
int Index, VectorType *SubTp,
ArrayRef<const Value *> Args) {
if (isa<ScalableVectorType>(Tp)) {
std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
switch (Kind) {
default:
break;
case TTI::SK_Broadcast: {
return LT.first * 1;
}
case TTI::SK_Splice:
return getSpliceCost(Tp, Index);
case TTI::SK_Reverse:
if (Tp->getElementType()->isIntegerTy(1))
return LT.first * 9;
return LT.first * 6;
}
}
return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
}
InstructionCost
RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind) {
if (!isa<ScalableVectorType>(Src))
return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind);
return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
}
InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
if (CostKind != TTI::TCK_RecipThroughput)
return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
Alignment, CostKind, I);
if ((Opcode == Instruction::Load &&
!isLegalMaskedGather(DataTy, Align(Alignment))) ||
(Opcode == Instruction::Store &&
!isLegalMaskedScatter(DataTy, Align(Alignment))))
return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
Alignment, CostKind, I);
auto &VTy = *cast<VectorType>(DataTy);
InstructionCost MemOpCost = getMemoryOpCost(Opcode, VTy.getElementType(),
Alignment, 0, CostKind, I);
unsigned NumLoads = getMaxVLFor(&VTy);
return NumLoads * MemOpCost;
}
InstructionCost
RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) {
auto *RetTy = ICA.getReturnType();
switch (ICA.getID()) {
case Intrinsic::experimental_stepvector: {
unsigned Cost = 1; auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
return Cost + (LT.first - 1);
}
default:
break;
}
return BaseT::getIntrinsicInstrCost(ICA, CostKind);
}
InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I) {
if (isa<VectorType>(Dst) && isa<VectorType>(Src)) {
if (!isTypeLegal(Src) || !isTypeLegal(Dst))
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
if (Src->getScalarSizeInBits() > ST->getELEN() ||
Dst->getScalarSizeInBits() > ST->getELEN())
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
int PowDiff = (int)Log2_32(Dst->getScalarSizeInBits()) -
(int)Log2_32(Src->getScalarSizeInBits());
switch (ISD) {
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
return 1;
case ISD::TRUNCATE:
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
return std::abs(PowDiff);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
if (std::abs(PowDiff) <= 1)
return 1;
if (Src->isIntOrIntVectorTy())
return 2;
return std::abs(PowDiff);
}
}
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
}
unsigned RISCVTTIImpl::getMaxVLFor(VectorType *Ty) {
if (isa<ScalableVectorType>(Ty)) {
const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
const unsigned VectorBitsMax = ST->getRealMaxVLen();
return RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
}
return cast<FixedVectorType>(Ty)->getNumElements();
}
InstructionCost
RISCVTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
bool IsUnsigned,
TTI::TargetCostKind CostKind) {
if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
if (Ty->getScalarSizeInBits() > ST->getELEN())
return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
if (Ty->getElementType()->isIntegerTy(1))
return (LT.first - 1) + 3;
InstructionCost BaseCost = 2;
unsigned VL = getMaxVLFor(Ty);
return (LT.first - 1) + BaseCost + Log2_32_Ceil(VL);
}
InstructionCost
RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
Optional<FastMathFlags> FMF,
TTI::TargetCostKind CostKind) {
if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
if (Ty->getScalarSizeInBits() > ST->getELEN())
return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
if (ISD != ISD::ADD && ISD != ISD::OR && ISD != ISD::XOR && ISD != ISD::AND &&
ISD != ISD::FADD)
return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
if (Ty->getElementType()->isIntegerTy(1))
return (LT.first - 1) + (ISD == ISD::AND ? 3 : 2);
InstructionCost BaseCost = 2;
unsigned VL = getMaxVLFor(Ty);
if (TTI::requiresOrderedReduction(FMF))
return (LT.first - 1) + BaseCost + VL;
return (LT.first - 1) + BaseCost + Log2_32_Ceil(VL);
}
void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP,
OptimizationRemarkEmitter *ORE) {
if (ST->enableDefaultUnroll())
return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
UP.UpperBound = true;
UP.OptSizeThreshold = 0;
UP.PartialOptSizeThreshold = 0;
if (L->getHeader()->getParent()->hasOptSize())
return;
SmallVector<BasicBlock *, 4> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
LLVM_DEBUG(dbgs() << "Loop has:\n"
<< "Blocks: " << L->getNumBlocks() << "\n"
<< "Exit blocks: " << ExitingBlocks.size() << "\n");
if (ExitingBlocks.size() > 2)
return;
if (L->getNumBlocks() > 4)
return;
if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
return;
InstructionCost Cost = 0;
for (auto *BB : L->getBlocks()) {
for (auto &I : *BB) {
if (I.getType()->isVectorTy())
return;
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
if (!isLoweredToCall(F))
continue;
}
return;
}
SmallVector<const Value *> Operands(I.operand_values());
Cost +=
getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
}
}
LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
UP.Partial = true;
UP.Runtime = true;
UP.UnrollRemainder = true;
UP.UnrollAndJam = true;
UP.UnrollAndJamInnerLoopThreshold = 60;
if (Cost < 12)
UP.Force = true;
}
void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) {
BaseT::getPeelingPreferences(L, SE, PP);
}
unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) {
TypeSize Size = Ty->getPrimitiveSizeInBits();
if (Ty->isVectorTy()) {
if (Size.isScalable() && ST->hasVInstructions())
return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
if (ST->useRVVForFixedLengthVectors())
return divideCeil(Size, ST->getRealMinVLen());
}
return BaseT::getRegUsageForType(Ty);
}