#include "llvm/Transforms/Vectorize/SLPVectorizer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/DemandedBits.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/IVDescriptors.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#ifdef EXPENSIVE_CHECKS
#include "llvm/IR/Verifier.h"
#endif
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/InjectTLIMappings.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Vectorize.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
using namespace llvm;
using namespace llvm::PatternMatch;
using namespace slpvectorizer;
#define SV_NAME "slp-vectorizer"
#define DEBUG_TYPE "SLP"
STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
cl::desc("Run the SLP vectorization passes"));
static cl::opt<int>
SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
cl::desc("Only vectorize if you gain more than this "
"number "));
static cl::opt<bool>
ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
cl::desc("Attempt to vectorize horizontal reductions"));
static cl::opt<bool> ShouldStartVectorizeHorAtStore(
"slp-vectorize-hor-store", cl::init(false), cl::Hidden,
cl::desc(
"Attempt to vectorize horizontal reductions feeding into a store"));
static cl::opt<int>
MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
cl::desc("Attempt to vectorize for this register size in bits"));
static cl::opt<unsigned>
MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
static cl::opt<int>
MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
cl::desc("Maximum depth of the lookup for consecutive stores."));
static cl::opt<int>
ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
cl::desc("Limit the size of the SLP scheduling region per block"));
static cl::opt<int> MinVectorRegSizeOption(
"slp-min-reg-size", cl::init(128), cl::Hidden,
cl::desc("Attempt to vectorize for this register size in bits"));
static cl::opt<unsigned> RecursionMaxDepth(
"slp-recursion-max-depth", cl::init(12), cl::Hidden,
cl::desc("Limit the recursion depth when building a vectorizable tree"));
static cl::opt<unsigned> MinTreeSize(
"slp-min-tree-size", cl::init(3), cl::Hidden,
cl::desc("Only vectorize small trees if they are fully vectorizable"));
static cl::opt<int> LookAheadMaxDepth(
"slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
cl::desc("The maximum look-ahead depth for operand reordering scores"));
static cl::opt<int> RootLookAheadMaxDepth(
"slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden,
cl::desc("The maximum look-ahead depth for searching best rooting option"));
static cl::opt<bool>
ViewSLPTree("view-slp-tree", cl::Hidden,
cl::desc("Display the SLP trees with Graphviz"));
static const unsigned AliasedCheckLimit = 10;
static const unsigned MaxMemDepDistance = 160;
static const int MinScheduleRegionSize = 16;
static bool isValidElementType(Type *Ty) {
return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
!Ty->isPPC_FP128Ty();
}
static bool isConstant(Value *V) {
return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V);
}
static bool isVectorLikeInstWithConstOps(Value *V) {
if (!isa<InsertElementInst, ExtractElementInst>(V) &&
!isa<ExtractValueInst, UndefValue>(V))
return false;
auto *I = dyn_cast<Instruction>(V);
if (!I || isa<ExtractValueInst>(I))
return true;
if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
return false;
if (isa<ExtractElementInst>(I))
return isConstant(I->getOperand(1));
assert(isa<InsertElementInst>(V) && "Expected only insertelement.");
return isConstant(I->getOperand(2));
}
static bool allSameBlock(ArrayRef<Value *> VL) {
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
if (!I0)
return false;
if (all_of(VL, isVectorLikeInstWithConstOps))
return true;
BasicBlock *BB = I0->getParent();
for (int I = 1, E = VL.size(); I < E; I++) {
auto *II = dyn_cast<Instruction>(VL[I]);
if (!II)
return false;
if (BB != II->getParent())
return false;
}
return true;
}
static bool allConstant(ArrayRef<Value *> VL) {
return all_of(VL, isConstant);
}
static bool isSplat(ArrayRef<Value *> VL) {
Value *FirstNonUndef = nullptr;
for (Value *V : VL) {
if (isa<UndefValue>(V))
continue;
if (!FirstNonUndef) {
FirstNonUndef = V;
continue;
}
if (V != FirstNonUndef)
return false;
}
return FirstNonUndef != nullptr;
}
static bool isCommutative(Instruction *I) {
if (auto *Cmp = dyn_cast<CmpInst>(I))
return Cmp->isCommutative();
if (auto *BO = dyn_cast<BinaryOperator>(I))
return BO->isCommutative();
return false;
}
static bool isUndefVector(const Value *V) {
if (isa<UndefValue>(V))
return true;
auto *C = dyn_cast<Constant>(V);
if (!C)
return false;
if (!C->containsUndefOrPoisonElement())
return false;
auto *VecTy = dyn_cast<FixedVectorType>(C->getType());
if (!VecTy)
return false;
for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
if (Constant *Elem = C->getAggregateElement(I))
if (!isa<UndefValue>(Elem))
return false;
}
return true;
}
static Optional<TargetTransformInfo::ShuffleKind>
isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
const auto *It =
find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
if (It == VL.end())
return None;
auto *EI0 = cast<ExtractElementInst>(*It);
if (isa<ScalableVectorType>(EI0->getVectorOperandType()))
return None;
unsigned Size =
cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
Value *Vec1 = nullptr;
Value *Vec2 = nullptr;
enum ShuffleMode { Unknown, Select, Permute };
ShuffleMode CommonShuffleMode = Unknown;
Mask.assign(VL.size(), UndefMaskElem);
for (unsigned I = 0, E = VL.size(); I < E; ++I) {
if (isa<UndefValue>(VL[I]))
continue;
auto *EI = cast<ExtractElementInst>(VL[I]);
if (isa<ScalableVectorType>(EI->getVectorOperandType()))
return None;
auto *Vec = EI->getVectorOperand();
if (isUndefVector(Vec))
continue;
if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
return None;
if (isa<UndefValue>(EI->getIndexOperand()))
continue;
auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
if (!Idx)
return None;
if (Idx->getValue().uge(Size))
continue;
unsigned IntIdx = Idx->getValue().getZExtValue();
Mask[I] = IntIdx;
if (!Vec1 || Vec1 == Vec) {
Vec1 = Vec;
} else if (!Vec2 || Vec2 == Vec) {
Vec2 = Vec;
Mask[I] += Size;
} else {
return None;
}
if (CommonShuffleMode == Permute)
continue;
if (IntIdx != I) {
CommonShuffleMode = Permute;
continue;
}
CommonShuffleMode = Select;
}
if (CommonShuffleMode == Select && Vec2)
return TargetTransformInfo::SK_Select;
return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
: TargetTransformInfo::SK_PermuteSingleSrc;
}
namespace {
struct InstructionsState {
Value *OpValue = nullptr;
Instruction *MainOp = nullptr;
Instruction *AltOp = nullptr;
unsigned getOpcode() const {
return MainOp ? MainOp->getOpcode() : 0;
}
unsigned getAltOpcode() const {
return AltOp ? AltOp->getOpcode() : 0;
}
bool isAltShuffle() const { return AltOp != MainOp; }
bool isOpcodeOrAlt(Instruction *I) const {
unsigned CheckedOpcode = I->getOpcode();
return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
}
InstructionsState() = delete;
InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
: OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
};
}
static Value *isOneOf(const InstructionsState &S, Value *Op) {
auto *I = dyn_cast<Instruction>(Op);
if (I && S.isOpcodeOrAlt(I))
return Op;
return S.OpValue;
}
static bool isValidForAlternation(unsigned Opcode) {
if (Instruction::isIntDivRem(Opcode))
return false;
return true;
}
static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
unsigned BaseIndex = 0);
static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
Value *Op1) {
return (isConstant(BaseOp0) && isConstant(Op0)) ||
(isConstant(BaseOp1) && isConstant(Op1)) ||
(!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
!isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
getSameOpcode({BaseOp0, Op0}).getOpcode() ||
getSameOpcode({BaseOp1, Op1}).getOpcode();
}
static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
unsigned BaseIndex) {
if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
return InstructionsState(VL[BaseIndex], nullptr, nullptr);
bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]);
CmpInst::Predicate BasePred =
IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate()
: CmpInst::BAD_ICMP_PREDICATE;
unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
unsigned AltOpcode = Opcode;
unsigned AltIndex = BaseIndex;
for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
if (InstOpcode == Opcode || InstOpcode == AltOpcode)
continue;
if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
isValidForAlternation(Opcode)) {
AltOpcode = InstOpcode;
AltIndex = Cnt;
continue;
}
} else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
if (Ty0 == Ty1) {
if (InstOpcode == Opcode || InstOpcode == AltOpcode)
continue;
if (Opcode == AltOpcode) {
assert(isValidForAlternation(Opcode) &&
isValidForAlternation(InstOpcode) &&
"Cast isn't safe for alternation, logic needs to be updated!");
AltOpcode = InstOpcode;
AltIndex = Cnt;
continue;
}
}
} else if (IsCmpOp && isa<CmpInst>(VL[Cnt])) {
auto *BaseInst = cast<Instruction>(VL[BaseIndex]);
auto *Inst = cast<Instruction>(VL[Cnt]);
Type *Ty0 = BaseInst->getOperand(0)->getType();
Type *Ty1 = Inst->getOperand(0)->getType();
if (Ty0 == Ty1) {
Value *BaseOp0 = BaseInst->getOperand(0);
Value *BaseOp1 = BaseInst->getOperand(1);
Value *Op0 = Inst->getOperand(0);
Value *Op1 = Inst->getOperand(1);
CmpInst::Predicate CurrentPred =
cast<CmpInst>(VL[Cnt])->getPredicate();
CmpInst::Predicate SwappedCurrentPred =
CmpInst::getSwappedPredicate(CurrentPred);
if (InstOpcode == Opcode) {
if (BasePred == CurrentPred &&
areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1))
continue;
if (BasePred == SwappedCurrentPred &&
areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0))
continue;
if (E == 2 &&
(BasePred == CurrentPred || BasePred == SwappedCurrentPred))
continue;
auto *AltInst = cast<CmpInst>(VL[AltIndex]);
CmpInst::Predicate AltPred = AltInst->getPredicate();
Value *AltOp0 = AltInst->getOperand(0);
Value *AltOp1 = AltInst->getOperand(1);
if (AltPred == CurrentPred &&
areCompatibleCmpOps(AltOp0, AltOp1, Op0, Op1))
continue;
if (AltPred == SwappedCurrentPred &&
areCompatibleCmpOps(AltOp0, AltOp1, Op1, Op0))
continue;
}
if (BaseIndex == AltIndex && BasePred != CurrentPred) {
assert(isValidForAlternation(Opcode) &&
isValidForAlternation(InstOpcode) &&
"Cast isn't safe for alternation, logic needs to be updated!");
AltIndex = Cnt;
continue;
}
auto *AltInst = cast<CmpInst>(VL[AltIndex]);
CmpInst::Predicate AltPred = AltInst->getPredicate();
if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
AltPred == CurrentPred || AltPred == SwappedCurrentPred)
continue;
}
} else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
continue;
return InstructionsState(VL[BaseIndex], nullptr, nullptr);
}
return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
cast<Instruction>(VL[AltIndex]));
}
static bool allSameType(ArrayRef<Value *> VL) {
Type *Ty = VL[0]->getType();
for (int i = 1, e = VL.size(); i < e; i++)
if (VL[i]->getType() != Ty)
return false;
return true;
}
static Optional<unsigned> getExtractIndex(Instruction *E) {
unsigned Opcode = E->getOpcode();
assert((Opcode == Instruction::ExtractElement ||
Opcode == Instruction::ExtractValue) &&
"Expected extractelement or extractvalue instruction.");
if (Opcode == Instruction::ExtractElement) {
auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
if (!CI)
return None;
return CI->getZExtValue();
}
ExtractValueInst *EI = cast<ExtractValueInst>(E);
if (EI->getNumIndices() != 1)
return None;
return *EI->idx_begin();
}
static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
TargetLibraryInfo *TLI) {
unsigned Opcode = UserInst->getOpcode();
switch (Opcode) {
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(UserInst);
return (LI->getPointerOperand() == Scalar);
}
case Instruction::Store: {
StoreInst *SI = cast<StoreInst>(UserInst);
return (SI->getPointerOperand() == Scalar);
}
case Instruction::Call: {
CallInst *CI = cast<CallInst>(UserInst);
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
return (CI->getArgOperand(i) == Scalar);
}
LLVM_FALLTHROUGH;
}
default:
return false;
}
}
static MemoryLocation getLocation(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return MemoryLocation::get(SI);
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return MemoryLocation::get(LI);
return MemoryLocation();
}
static bool isSimple(Instruction *I) {
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return LI->isSimple();
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->isSimple();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return !MI->isVolatile();
return true;
}
static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) {
if (SubMask.empty())
return;
if (Mask.empty()) {
Mask.append(SubMask.begin(), SubMask.end());
return;
}
SmallVector<int> NewMask(SubMask.size(), UndefMaskElem);
int TermValue = std::min(Mask.size(), SubMask.size());
for (int I = 0, E = SubMask.size(); I < E; ++I) {
if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
Mask[SubMask[I]] >= TermValue)
continue;
NewMask[I] = Mask[SubMask[I]];
}
Mask.swap(NewMask);
}
static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) {
const unsigned Sz = Order.size();
SmallBitVector UnusedIndices(Sz, true);
SmallBitVector MaskedIndices(Sz);
for (unsigned I = 0; I < Sz; ++I) {
if (Order[I] < Sz)
UnusedIndices.reset(Order[I]);
else
MaskedIndices.set(I);
}
if (MaskedIndices.none())
return;
assert(UnusedIndices.count() == MaskedIndices.count() &&
"Non-synced masked/available indices.");
int Idx = UnusedIndices.find_first();
int MIdx = MaskedIndices.find_first();
while (MIdx >= 0) {
assert(Idx >= 0 && "Indices must be synced.");
Order[MIdx] = Idx;
Idx = UnusedIndices.find_next(Idx);
MIdx = MaskedIndices.find_next(MIdx);
}
}
namespace llvm {
static void inversePermutation(ArrayRef<unsigned> Indices,
SmallVectorImpl<int> &Mask) {
Mask.clear();
const unsigned E = Indices.size();
Mask.resize(E, UndefMaskElem);
for (unsigned I = 0; I < E; ++I)
Mask[Indices[I]] = I;
}
static Optional<unsigned> getInsertIndex(const Value *InsertInst,
unsigned Offset = 0) {
int Index = Offset;
if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
if (const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
auto *VT = cast<FixedVectorType>(IE->getType());
if (CI->getValue().uge(VT->getNumElements()))
return None;
Index *= VT->getNumElements();
Index += CI->getZExtValue();
return Index;
}
return None;
}
const auto *IV = cast<InsertValueInst>(InsertInst);
Type *CurrentType = IV->getType();
for (unsigned I : IV->indices()) {
if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
Index *= ST->getNumElements();
CurrentType = ST->getElementType(I);
} else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) {
Index *= AT->getNumElements();
CurrentType = AT->getElementType();
} else {
return None;
}
Index += I;
}
return Index;
}
static void reorderScalars(SmallVectorImpl<Value *> &Scalars,
ArrayRef<int> Mask) {
assert(!Mask.empty() && "Expected non-empty mask.");
SmallVector<Value *> Prev(Scalars.size(),
UndefValue::get(Scalars.front()->getType()));
Prev.swap(Scalars);
for (unsigned I = 0, E = Prev.size(); I < E; ++I)
if (Mask[I] != UndefMaskElem)
Scalars[Mask[I]] = Prev[I];
}
static bool areAllOperandsNonInsts(Value *V) {
auto *I = dyn_cast<Instruction>(V);
if (!I)
return true;
return !mayHaveNonDefUseDependency(*I) &&
all_of(I->operands(), [I](Value *V) {
auto *IO = dyn_cast<Instruction>(V);
if (!IO)
return true;
return isa<PHINode>(IO) || IO->getParent() != I->getParent();
});
}
static bool isUsedOutsideBlock(Value *V) {
auto *I = dyn_cast<Instruction>(V);
if (!I)
return true;
constexpr int UsesLimit = 8;
return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
all_of(I->users(), [I](User *U) {
auto *IU = dyn_cast<Instruction>(U);
if (!IU)
return true;
return IU->getParent() != I->getParent() || isa<PHINode>(IU);
});
}
static bool doesNotNeedToBeScheduled(Value *V) {
return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V);
}
static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) {
return !VL.empty() &&
(all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts));
}
namespace slpvectorizer {
class BoUpSLP {
struct TreeEntry;
struct ScheduleData;
public:
using ValueList = SmallVector<Value *, 8>;
using InstrList = SmallVector<Instruction *, 16>;
using ValueSet = SmallPtrSet<Value *, 16>;
using StoreList = SmallVector<StoreInst *, 8>;
using ExtraValueToDebugLocsMap =
MapVector<Value *, SmallVector<Instruction *, 2>>;
using OrdersType = SmallVector<unsigned, 4>;
BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
const DataLayout *DL, OptimizationRemarkEmitter *ORE)
: BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li),
DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
CodeMetrics::collectEphemeralValues(F, AC, EphValues);
if (MaxVectorRegSizeOption.getNumOccurrences())
MaxVecRegSize = MaxVectorRegSizeOption;
else
MaxVecRegSize =
TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
.getFixedSize();
if (MinVectorRegSizeOption.getNumOccurrences())
MinVecRegSize = MinVectorRegSizeOption;
else
MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
}
Value *vectorizeTree();
Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
InstructionCost getSpillCost() const;
InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None);
void buildTree(ArrayRef<Value *> Roots,
const SmallDenseSet<Value *> &UserIgnoreLst);
void buildTree(ArrayRef<Value *> Roots);
void
buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
void deleteTree() {
VectorizableTree.clear();
ScalarToTreeEntry.clear();
MustGather.clear();
ExternalUses.clear();
for (auto &Iter : BlocksSchedules) {
BlockScheduling *BS = Iter.second.get();
BS->clear();
}
MinBWs.clear();
InstrElementSize.clear();
UserIgnoreList = nullptr;
}
unsigned getTreeSize() const { return VectorizableTree.size(); }
void optimizeGatherSequence();
Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
Optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
void reorderTopToBottom();
void reorderBottomToTop(bool IgnoreReorder = false);
unsigned getVectorElementSize(Value *V);
void computeMinimumValueSizes();
unsigned getMaxVecRegSize() const {
return MaxVecRegSize;
}
unsigned getMinVecRegSize() const {
return MinVecRegSize;
}
unsigned getMinVF(unsigned Sz) const {
return std::max(2U, getMinVecRegSize() / Sz);
}
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
return MaxVF ? MaxVF : UINT_MAX;
}
unsigned canMapToVector(Type *T, const DataLayout &DL) const;
bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
bool isLoadCombineCandidate() const;
OptimizationRemarkEmitter *getORE() { return ORE; }
struct EdgeInfo {
EdgeInfo() = default;
EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
: UserTE(UserTE), EdgeIdx(EdgeIdx) {}
TreeEntry *UserTE = nullptr;
unsigned EdgeIdx = UINT_MAX;
#ifndef NDEBUG
friend inline raw_ostream &operator<<(raw_ostream &OS,
const BoUpSLP::EdgeInfo &EI) {
EI.dump(OS);
return OS;
}
void dump(raw_ostream &OS) const {
OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
<< " EdgeIdx:" << EdgeIdx << "}";
}
LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
#endif
};
class LookAheadHeuristics {
const DataLayout &DL;
ScalarEvolution &SE;
const BoUpSLP &R;
int NumLanes; int MaxLevel;
public:
LookAheadHeuristics(const DataLayout &DL, ScalarEvolution &SE,
const BoUpSLP &R, int NumLanes, int MaxLevel)
: DL(DL), SE(SE), R(R), NumLanes(NumLanes), MaxLevel(MaxLevel) {}
static const int ScoreConsecutiveLoads = 4;
static const int ScoreSplatLoads = 3;
static const int ScoreReversedLoads = 3;
static const int ScoreConsecutiveExtracts = 4;
static const int ScoreReversedExtracts = 3;
static const int ScoreConstants = 2;
static const int ScoreSameOpcode = 2;
static const int ScoreAltOpcodes = 1;
static const int ScoreSplat = 1;
static const int ScoreUndef = 1;
static const int ScoreFail = 0;
static const int ScoreAllUserVectorized = 1;
int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2,
ArrayRef<Value *> MainAltOps) const {
if (V1 == V2) {
if (isa<LoadInst>(V1)) {
auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
static constexpr unsigned Limit = 8;
if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit))
return false;
auto AllUsersVectorized = [U1, U2, this](Value *V) {
return llvm::all_of(V->users(), [U1, U2, this](Value *U) {
return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr;
});
};
return AllUsersVectorized(V1) && AllUsersVectorized(V2);
};
if (R.TTI->isLegalBroadcastLoad(V1->getType(),
ElementCount::getFixed(NumLanes)) &&
((int)V1->getNumUses() == NumLanes ||
AllUsersAreInternal(V1, V2)))
return LookAheadHeuristics::ScoreSplatLoads;
}
return LookAheadHeuristics::ScoreSplat;
}
auto *LI1 = dyn_cast<LoadInst>(V1);
auto *LI2 = dyn_cast<LoadInst>(V2);
if (LI1 && LI2) {
if (LI1->getParent() != LI2->getParent())
return LookAheadHeuristics::ScoreFail;
Optional<int> Dist = getPointersDiff(
LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
LI2->getPointerOperand(), DL, SE, true);
if (!Dist || *Dist == 0)
return LookAheadHeuristics::ScoreFail;
if (std::abs(*Dist) > NumLanes / 2)
return LookAheadHeuristics::ScoreAltOpcodes;
return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads
: LookAheadHeuristics::ScoreReversedLoads;
}
auto *C1 = dyn_cast<Constant>(V1);
auto *C2 = dyn_cast<Constant>(V2);
if (C1 && C2)
return LookAheadHeuristics::ScoreConstants;
Value *EV1;
ConstantInt *Ex1Idx;
if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
if (isa<UndefValue>(V2))
return LookAheadHeuristics::ScoreConsecutiveExtracts;
Value *EV2 = nullptr;
ConstantInt *Ex2Idx = nullptr;
if (match(V2,
m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx),
m_Undef())))) {
if (!Ex2Idx)
return LookAheadHeuristics::ScoreConsecutiveExtracts;
if (isUndefVector(EV2) && EV2->getType() == EV1->getType())
return LookAheadHeuristics::ScoreConsecutiveExtracts;
if (EV2 == EV1) {
int Idx1 = Ex1Idx->getZExtValue();
int Idx2 = Ex2Idx->getZExtValue();
int Dist = Idx2 - Idx1;
if (std::abs(Dist) == 0)
return LookAheadHeuristics::ScoreSplat;
if (std::abs(Dist) > NumLanes / 2)
return LookAheadHeuristics::ScoreSameOpcode;
return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts
: LookAheadHeuristics::ScoreReversedExtracts;
}
return LookAheadHeuristics::ScoreAltOpcodes;
}
return LookAheadHeuristics::ScoreFail;
}
auto *I1 = dyn_cast<Instruction>(V1);
auto *I2 = dyn_cast<Instruction>(V2);
if (I1 && I2) {
if (I1->getParent() != I2->getParent())
return LookAheadHeuristics::ScoreFail;
SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end());
Ops.push_back(I1);
Ops.push_back(I2);
InstructionsState S = getSameOpcode(Ops);
if (S.getOpcode() &&
(S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() ||
!S.isAltShuffle()) &&
all_of(Ops, [&S](Value *V) {
return cast<Instruction>(V)->getNumOperands() ==
S.MainOp->getNumOperands();
}))
return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes
: LookAheadHeuristics::ScoreSameOpcode;
}
if (isa<UndefValue>(V2))
return LookAheadHeuristics::ScoreUndef;
return LookAheadHeuristics::ScoreFail;
}
int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1,
Instruction *U2, int CurrLevel,
ArrayRef<Value *> MainAltOps) const {
int ShallowScoreAtThisLevel =
getShallowScore(LHS, RHS, U1, U2, MainAltOps);
auto *I1 = dyn_cast<Instruction>(LHS);
auto *I2 = dyn_cast<Instruction>(RHS);
if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail ||
(((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
(I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
(isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
ShallowScoreAtThisLevel))
return ShallowScoreAtThisLevel;
assert(I1 && I2 && "Should have early exited.");
SmallSet<unsigned, 4> Op2Used;
for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
OpIdx1 != NumOperands1; ++OpIdx1) {
int MaxTmpScore = 0;
unsigned MaxOpIdx2 = 0;
bool FoundBest = false;
unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
unsigned ToIdx = isCommutative(I2)
? I2->getNumOperands()
: std::min(I2->getNumOperands(), OpIdx1 + 1);
assert(FromIdx <= ToIdx && "Bad index");
for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
if (Op2Used.count(OpIdx2))
continue;
int TmpScore =
getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
I1, I2, CurrLevel + 1, None);
if (TmpScore > LookAheadHeuristics::ScoreFail &&
TmpScore > MaxTmpScore) {
MaxTmpScore = TmpScore;
MaxOpIdx2 = OpIdx2;
FoundBest = true;
}
}
if (FoundBest) {
Op2Used.insert(MaxOpIdx2);
ShallowScoreAtThisLevel += MaxTmpScore;
}
}
return ShallowScoreAtThisLevel;
}
};
class VLOperands {
struct OperandData {
OperandData() = default;
OperandData(Value *V, bool APO, bool IsUsed)
: V(V), APO(APO), IsUsed(IsUsed) {}
Value *V = nullptr;
bool APO = false;
bool IsUsed = false;
};
enum class ReorderingMode {
Load, Opcode, Constant, Splat, Failed, };
using OperandDataVec = SmallVector<OperandData, 2>;
SmallVector<OperandDataVec, 4> OpsVec;
const DataLayout &DL;
ScalarEvolution &SE;
const BoUpSLP &R;
OperandData &getData(unsigned OpIdx, unsigned Lane) {
return OpsVec[OpIdx][Lane];
}
const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
return OpsVec[OpIdx][Lane];
}
void clearUsed() {
for (unsigned OpIdx = 0, NumOperands = getNumOperands();
OpIdx != NumOperands; ++OpIdx)
for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
++Lane)
OpsVec[OpIdx][Lane].IsUsed = false;
}
void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
}
int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
Value *IdxLaneV = getData(Idx, Lane).V;
if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V)
return 0;
SmallPtrSet<Value *, 4> Uniques;
for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) {
if (Ln == Lane)
continue;
Value *OpIdxLnV = getData(OpIdx, Ln).V;
if (!isa<Instruction>(OpIdxLnV))
return 0;
Uniques.insert(OpIdxLnV);
}
int UniquesCount = Uniques.size();
int UniquesCntWithIdxLaneV =
Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1;
Value *OpIdxLaneV = getData(OpIdx, Lane).V;
int UniquesCntWithOpIdxLaneV =
Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1;
if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
return 0;
return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) -
UniquesCntWithOpIdxLaneV) -
(PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
}
int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
Value *IdxLaneV = getData(Idx, Lane).V;
Value *OpIdxLaneV = getData(OpIdx, Lane).V;
if (isVectorLikeInstWithConstOps(IdxLaneV) &&
isVectorLikeInstWithConstOps(OpIdxLaneV))
return LookAheadHeuristics::ScoreAllUserVectorized;
auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
return 0;
return R.areAllUsersVectorized(IdxLaneI, None)
? LookAheadHeuristics::ScoreAllUserVectorized
: 0;
}
static const int ScoreScaleFactor = 10;
int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
int Lane, unsigned OpIdx, unsigned Idx,
bool &IsUsed) {
LookAheadHeuristics LookAhead(DL, SE, R, getNumLanes(),
LookAheadMaxDepth);
int Score =
LookAhead.getScoreAtLevelRec(LHS, RHS, nullptr, nullptr,
1, MainAltOps);
if (Score) {
int SplatScore = getSplatScore(Lane, OpIdx, Idx);
if (Score <= -SplatScore) {
Score = 1;
} else {
Score += SplatScore;
Score *= ScoreScaleFactor;
Score += getExternalUseScore(Lane, OpIdx, Idx);
IsUsed = true;
}
}
return Score;
}
SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8>
BestScoresPerLanes;
Optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
ArrayRef<ReorderingMode> ReorderingModes,
ArrayRef<Value *> MainAltOps) {
unsigned NumOperands = getNumOperands();
Value *OpLastLane = getData(OpIdx, LastLane).V;
ReorderingMode RMode = ReorderingModes[OpIdx];
if (RMode == ReorderingMode::Failed)
return None;
bool OpIdxAPO = getData(OpIdx, Lane).APO;
struct BestOpData {
Optional<unsigned> Idx = None;
unsigned Score = 0;
} BestOp;
BestOp.Score =
BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
.first->second;
bool IsUsed =
RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant;
for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
OperandData &OpData = getData(Idx, Lane);
Value *Op = OpData.V;
bool OpAPO = OpData.APO;
if (OpData.IsUsed)
continue;
if (OpAPO != OpIdxAPO)
continue;
switch (RMode) {
case ReorderingMode::Load:
case ReorderingMode::Constant:
case ReorderingMode::Opcode: {
bool LeftToRight = Lane > LastLane;
Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
Value *OpRight = (LeftToRight) ? Op : OpLastLane;
int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
OpIdx, Idx, IsUsed);
if (Score > static_cast<int>(BestOp.Score)) {
BestOp.Idx = Idx;
BestOp.Score = Score;
BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
}
break;
}
case ReorderingMode::Splat:
if (Op == OpLastLane)
BestOp.Idx = Idx;
break;
case ReorderingMode::Failed:
llvm_unreachable("Not expected Failed reordering mode.");
}
}
if (BestOp.Idx) {
getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
return BestOp.Idx;
}
return None;
}
unsigned getBestLaneToStartReordering() const {
unsigned Min = UINT_MAX;
unsigned SameOpNumber = 0;
MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap;
for (int I = getNumLanes(); I > 0; --I) {
unsigned Lane = I - 1;
OperandsOrderData NumFreeOpsHash =
getMaxNumOperandsThatCanBeReordered(Lane);
if (NumFreeOpsHash.NumOfAPOs < Min) {
Min = NumFreeOpsHash.NumOfAPOs;
SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
HashMap.clear();
HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
} else if (NumFreeOpsHash.NumOfAPOs == Min &&
NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
} else if (NumFreeOpsHash.NumOfAPOs == Min &&
NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
auto It = HashMap.find(NumFreeOpsHash.Hash);
if (It == HashMap.end())
HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
else
++It->second.first;
}
}
unsigned BestLane = 0;
unsigned CntMin = UINT_MAX;
for (const auto &Data : reverse(HashMap)) {
if (Data.second.first < CntMin) {
CntMin = Data.second.first;
BestLane = Data.second.second;
}
}
return BestLane;
}
struct OperandsOrderData {
unsigned NumOfAPOs = UINT_MAX;
unsigned NumOpsWithSameOpcodeParent = 0;
unsigned Hash = 0;
};
OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
unsigned CntTrue = 0;
unsigned NumOperands = getNumOperands();
bool AllUndefs = true;
unsigned NumOpsWithSameOpcodeParent = 0;
Instruction *OpcodeI = nullptr;
BasicBlock *Parent = nullptr;
unsigned Hash = 0;
for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
const OperandData &OpData = getData(OpIdx, Lane);
if (OpData.APO)
++CntTrue;
if (auto *I = dyn_cast<Instruction>(OpData.V)) {
if (!OpcodeI || !getSameOpcode({OpcodeI, I}).getOpcode() ||
I->getParent() != Parent) {
if (NumOpsWithSameOpcodeParent == 0) {
NumOpsWithSameOpcodeParent = 1;
OpcodeI = I;
Parent = I->getParent();
} else {
--NumOpsWithSameOpcodeParent;
}
} else {
++NumOpsWithSameOpcodeParent;
}
}
Hash = hash_combine(
Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
}
if (AllUndefs)
return {};
OperandsOrderData Data;
Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
Data.Hash = Hash;
return Data;
}
void appendOperandsOfVL(ArrayRef<Value *> VL) {
assert(!VL.empty() && "Bad VL");
assert((empty() || VL.size() == getNumLanes()) &&
"Expected same number of lanes");
assert(isa<Instruction>(VL[0]) && "Expected instruction");
unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
OpsVec.resize(NumOperands);
unsigned NumLanes = VL.size();
for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
OpsVec[OpIdx].resize(NumLanes);
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
bool APO = (OpIdx == 0) ? false : IsInverseOperation;
OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
APO, false};
}
}
}
unsigned getNumOperands() const { return OpsVec.size(); }
unsigned getNumLanes() const { return OpsVec[0].size(); }
Value *getValue(unsigned OpIdx, unsigned Lane) const {
return getData(OpIdx, Lane).V;
}
bool empty() const { return OpsVec.empty(); }
void clear() { OpsVec.clear(); }
bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
bool OpAPO = getData(OpIdx, Lane).APO;
for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
if (Ln == Lane)
continue;
bool FoundCandidate = false;
for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
OperandData &Data = getData(OpI, Ln);
if (Data.APO != OpAPO || Data.IsUsed)
continue;
if (Data.V == Op) {
FoundCandidate = true;
Data.IsUsed = true;
break;
}
}
if (!FoundCandidate)
return false;
}
return true;
}
public:
VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
ScalarEvolution &SE, const BoUpSLP &R)
: DL(DL), SE(SE), R(R) {
appendOperandsOfVL(RootVL);
}
ValueList getVL(unsigned OpIdx) const {
ValueList OpVL(OpsVec[OpIdx].size());
assert(OpsVec[OpIdx].size() == getNumLanes() &&
"Expected same num of lanes across all operands");
for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
OpVL[Lane] = OpsVec[OpIdx][Lane].V;
return OpVL;
}
void reorder() {
unsigned NumOperands = getNumOperands();
unsigned NumLanes = getNumLanes();
SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
unsigned FirstLane = getBestLaneToStartReordering();
for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
Value *OpLane0 = getValue(OpIdx, FirstLane);
if (isa<LoadInst>(OpLane0))
ReorderingModes[OpIdx] = ReorderingMode::Load;
else if (isa<Instruction>(OpLane0)) {
if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
ReorderingModes[OpIdx] = ReorderingMode::Splat;
else
ReorderingModes[OpIdx] = ReorderingMode::Opcode;
}
else if (isa<Constant>(OpLane0))
ReorderingModes[OpIdx] = ReorderingMode::Constant;
else if (isa<Argument>(OpLane0))
ReorderingModes[OpIdx] = ReorderingMode::Splat;
else
ReorderingModes[OpIdx] = ReorderingMode::Failed;
}
auto &&SkipReordering = [this]() {
SmallPtrSet<Value *, 4> UniqueValues;
ArrayRef<OperandData> Op0 = OpsVec.front();
for (const OperandData &Data : Op0)
UniqueValues.insert(Data.V);
for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
if (any_of(Op, [&UniqueValues](const OperandData &Data) {
return !UniqueValues.contains(Data.V);
}))
return false;
}
return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size());
};
for (int Pass = 0; Pass != 2; ++Pass) {
if (SkipReordering())
break;
bool StrategyFailed = false;
clearUsed();
SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
for (unsigned I = 0; I < NumOperands; ++I)
MainAltOps[I].push_back(getData(I, FirstLane).V);
for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
for (int Direction : {+1, -1}) {
int Lane = FirstLane + Direction * Distance;
if (Lane < 0 || Lane >= (int)NumLanes)
continue;
int LastLane = Lane - Direction;
assert(LastLane >= 0 && LastLane < (int)NumLanes &&
"Out of bounds");
for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
Optional<unsigned> BestIdx = getBestOperand(
OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
if (BestIdx) {
swap(OpIdx, *BestIdx, Lane);
} else {
ReorderingModes[OpIdx] = ReorderingMode::Failed;
StrategyFailed = true;
}
if (MainAltOps[OpIdx].size() != 2) {
OperandData &AltOp = getData(OpIdx, Lane);
InstructionsState OpS =
getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V});
if (OpS.getOpcode() && OpS.isAltShuffle())
MainAltOps[OpIdx].push_back(AltOp.V);
}
}
}
}
if (!StrategyFailed)
break;
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
switch (RMode) {
case ReorderingMode::Load:
return "Load";
case ReorderingMode::Opcode:
return "Opcode";
case ReorderingMode::Constant:
return "Constant";
case ReorderingMode::Splat:
return "Splat";
case ReorderingMode::Failed:
return "Failed";
}
llvm_unreachable("Unimplemented Reordering Type");
}
LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
raw_ostream &OS) {
return OS << getModeStr(RMode);
}
LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
printMode(RMode, dbgs());
}
friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
return printMode(RMode, OS);
}
LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
const unsigned Indent = 2;
unsigned Cnt = 0;
for (const OperandDataVec &OpDataVec : OpsVec) {
OS << "Operand " << Cnt++ << "\n";
for (const OperandData &OpData : OpDataVec) {
OS.indent(Indent) << "{";
if (Value *V = OpData.V)
OS << *V;
else
OS << "null";
OS << ", APO:" << OpData.APO << "}\n";
}
OS << "\n";
}
return OS;
}
LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
#endif
};
Optional<int>
findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
int Limit = LookAheadHeuristics::ScoreFail) {
LookAheadHeuristics LookAhead(*DL, *SE, *this, 2,
RootLookAheadMaxDepth);
int BestScore = Limit;
Optional<int> Index = None;
for (int I : seq<int>(0, Candidates.size())) {
int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
Candidates[I].second,
nullptr, nullptr,
1, None);
if (Score > BestScore) {
BestScore = Score;
Index = I;
}
}
return Index;
}
bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
void eraseInstruction(Instruction *I) {
DeletedInstructions.insert(I);
}
bool isAnalyzedReductionRoot(Instruction *I) const {
return AnalyzedReductionsRoots.count(I);
}
void analyzedReductionRoot(Instruction *I) {
AnalyzedReductionsRoots.insert(I);
}
bool areAnalyzedReductionVals(ArrayRef<Value *> VL) {
return AnalyzedReductionVals.contains(hash_value(VL));
}
void analyzedReductionVals(ArrayRef<Value *> VL) {
AnalyzedReductionVals.insert(hash_value(VL));
}
void clearReductionData() {
AnalyzedReductionsRoots.clear();
AnalyzedReductionVals.clear();
}
bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const {
return any_of(MustGather, [&](Value *V) { return Vals.contains(V); });
}
~BoUpSLP();
private:
bool
canReorderOperands(TreeEntry *UserTE,
SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
ArrayRef<TreeEntry *> ReorderableGathers,
SmallVectorImpl<TreeEntry *> &GatherOps);
TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
TreeEntry *TE = nullptr;
const auto *It = find_if(VL, [this, &TE](Value *V) {
TE = getTreeEntry(V);
return TE;
});
if (It != VL.end() && TE->isSame(VL))
return TE;
return nullptr;
}
const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
unsigned OpIdx) const {
return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
const_cast<TreeEntry *>(UserTE), OpIdx);
}
bool areAllUsersVectorized(Instruction *I,
ArrayRef<Value *> VectorizedVals) const;
InstructionCost getEntryCost(const TreeEntry *E,
ArrayRef<Value *> VectorizedVals);
void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
const EdgeInfo &EI);
bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
SmallVectorImpl<unsigned> &CurrentOrder) const;
Value *vectorizeTree(TreeEntry *E);
Value *vectorizeTree(ArrayRef<Value *> VL);
Value *createBuildVector(ArrayRef<Value *> VL);
InstructionCost getGatherCost(FixedVectorType *Ty,
const APInt &ShuffledIndices,
bool NeedToShuffle) const;
Optional<TargetTransformInfo::ShuffleKind>
isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
SmallVectorImpl<const TreeEntry *> &Entries);
InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
void setInsertPointAfterBundle(const TreeEntry *E);
Value *gather(ArrayRef<Value *> VL);
bool isFullyVectorizableTinyTree(bool ForReduction) const;
static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
SmallVectorImpl<Value *> &Left,
SmallVectorImpl<Value *> &Right,
const DataLayout &DL,
ScalarEvolution &SE,
const BoUpSLP &R);
DenseMap<Value *, SmallVector<StoreInst *, 4>>
collectUserStores(const BoUpSLP::TreeEntry *TE) const;
bool CanFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
OrdersType &ReorderIndices) const;
SmallVector<OrdersType, 1>
findExternalStoreUsersReorderIndices(TreeEntry *TE) const;
struct TreeEntry {
using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
TreeEntry(VecTreeTy &Container) : Container(Container) {}
bool isSame(ArrayRef<Value *> VL) const {
auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
if (Mask.size() != VL.size() && VL.size() == Scalars.size())
return std::equal(VL.begin(), VL.end(), Scalars.begin());
return VL.size() == Mask.size() &&
std::equal(VL.begin(), VL.end(), Mask.begin(),
[Scalars](Value *V, int Idx) {
return (isa<UndefValue>(V) &&
Idx == UndefMaskElem) ||
(Idx != UndefMaskElem && V == Scalars[Idx]);
});
};
if (!ReorderIndices.empty()) {
SmallVector<int> Mask;
inversePermutation(ReorderIndices, Mask);
if (VL.size() == Scalars.size())
return IsSame(Scalars, Mask);
if (VL.size() == ReuseShuffleIndices.size()) {
::addMask(Mask, ReuseShuffleIndices);
return IsSame(Scalars, Mask);
}
return false;
}
return IsSame(Scalars, ReuseShuffleIndices);
}
bool hasEqualOperands(const TreeEntry &TE) const {
if (TE.getNumOperands() != getNumOperands())
return false;
SmallBitVector Used(getNumOperands());
for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
unsigned PrevCount = Used.count();
for (unsigned K = 0; K < E; ++K) {
if (Used.test(K))
continue;
if (getOperand(K) == TE.getOperand(I)) {
Used.set(K);
break;
}
}
if (PrevCount == Used.count())
return false;
}
return true;
}
unsigned getVectorFactor() const {
if (!ReuseShuffleIndices.empty())
return ReuseShuffleIndices.size();
return Scalars.size();
};
ValueList Scalars;
Value *VectorizedValue = nullptr;
enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
EntryState State;
SmallVector<int, 4> ReuseShuffleIndices;
SmallVector<unsigned, 4> ReorderIndices;
VecTreeTy &Container;
SmallVector<EdgeInfo, 1> UserTreeIndices;
int Idx = -1;
private:
SmallVector<ValueList, 2> Operands;
Instruction *MainOp = nullptr;
Instruction *AltOp = nullptr;
public:
void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
if (Operands.size() < OpIdx + 1)
Operands.resize(OpIdx + 1);
assert(Operands[OpIdx].empty() && "Already resized?");
assert(OpVL.size() <= Scalars.size() &&
"Number of operands is greater than the number of scalars.");
Operands[OpIdx].resize(OpVL.size());
copy(OpVL, Operands[OpIdx].begin());
}
void setOperandsInOrder() {
assert(Operands.empty() && "Already initialized?");
auto *I0 = cast<Instruction>(Scalars[0]);
Operands.resize(I0->getNumOperands());
unsigned NumLanes = Scalars.size();
for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
OpIdx != NumOperands; ++OpIdx) {
Operands[OpIdx].resize(NumLanes);
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
auto *I = cast<Instruction>(Scalars[Lane]);
assert(I->getNumOperands() == NumOperands &&
"Expected same number of operands");
Operands[OpIdx][Lane] = I->getOperand(OpIdx);
}
}
}
void reorderOperands(ArrayRef<int> Mask) {
for (ValueList &Operand : Operands)
reorderScalars(Operand, Mask);
}
ValueList &getOperand(unsigned OpIdx) {
assert(OpIdx < Operands.size() && "Off bounds");
return Operands[OpIdx];
}
ArrayRef<Value *> getOperand(unsigned OpIdx) const {
assert(OpIdx < Operands.size() && "Off bounds");
return Operands[OpIdx];
}
unsigned getNumOperands() const { return Operands.size(); }
Value *getSingleOperand(unsigned OpIdx) const {
assert(OpIdx < Operands.size() && "Off bounds");
assert(!Operands[OpIdx].empty() && "No operand available");
return Operands[OpIdx][0];
}
bool isAltShuffle() const { return MainOp != AltOp; }
bool isOpcodeOrAlt(Instruction *I) const {
unsigned CheckedOpcode = I->getOpcode();
return (getOpcode() == CheckedOpcode ||
getAltOpcode() == CheckedOpcode);
}
Value *isOneOf(Value *Op) const {
auto *I = dyn_cast<Instruction>(Op);
if (I && isOpcodeOrAlt(I))
return Op;
return MainOp;
}
void setOperations(const InstructionsState &S) {
MainOp = S.MainOp;
AltOp = S.AltOp;
}
Instruction *getMainOp() const {
return MainOp;
}
Instruction *getAltOp() const {
return AltOp;
}
unsigned getOpcode() const {
return MainOp ? MainOp->getOpcode() : 0;
}
unsigned getAltOpcode() const {
return AltOp ? AltOp->getOpcode() : 0;
}
int findLaneForValue(Value *V) const {
unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
if (!ReorderIndices.empty())
FoundLane = ReorderIndices[FoundLane];
assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
if (!ReuseShuffleIndices.empty()) {
FoundLane = std::distance(ReuseShuffleIndices.begin(),
find(ReuseShuffleIndices, FoundLane));
}
return FoundLane;
}
#ifndef NDEBUG
LLVM_DUMP_METHOD void dump() const {
dbgs() << Idx << ".\n";
for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
dbgs() << "Operand " << OpI << ":\n";
for (const Value *V : Operands[OpI])
dbgs().indent(2) << *V << "\n";
}
dbgs() << "Scalars: \n";
for (Value *V : Scalars)
dbgs().indent(2) << *V << "\n";
dbgs() << "State: ";
switch (State) {
case Vectorize:
dbgs() << "Vectorize\n";
break;
case ScatterVectorize:
dbgs() << "ScatterVectorize\n";
break;
case NeedToGather:
dbgs() << "NeedToGather\n";
break;
}
dbgs() << "MainOp: ";
if (MainOp)
dbgs() << *MainOp << "\n";
else
dbgs() << "NULL\n";
dbgs() << "AltOp: ";
if (AltOp)
dbgs() << *AltOp << "\n";
else
dbgs() << "NULL\n";
dbgs() << "VectorizedValue: ";
if (VectorizedValue)
dbgs() << *VectorizedValue << "\n";
else
dbgs() << "NULL\n";
dbgs() << "ReuseShuffleIndices: ";
if (ReuseShuffleIndices.empty())
dbgs() << "Empty";
else
for (int ReuseIdx : ReuseShuffleIndices)
dbgs() << ReuseIdx << ", ";
dbgs() << "\n";
dbgs() << "ReorderIndices: ";
for (unsigned ReorderIdx : ReorderIndices)
dbgs() << ReorderIdx << ", ";
dbgs() << "\n";
dbgs() << "UserTreeIndices: ";
for (const auto &EInfo : UserTreeIndices)
dbgs() << EInfo << ", ";
dbgs() << "\n";
}
#endif
};
#ifndef NDEBUG
void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
InstructionCost VecCost,
InstructionCost ScalarCost) const {
dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
dbgs() << "SLP: Costs:\n";
dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
dbgs() << "SLP: VectorCost = " << VecCost << "\n";
dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " <<
ReuseShuffleCost + VecCost - ScalarCost << "\n";
}
#endif
TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
const InstructionsState &S,
const EdgeInfo &UserTreeIdx,
ArrayRef<int> ReuseShuffleIndices = None,
ArrayRef<unsigned> ReorderIndices = None) {
TreeEntry::EntryState EntryState =
Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
ReuseShuffleIndices, ReorderIndices);
}
TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
TreeEntry::EntryState EntryState,
Optional<ScheduleData *> Bundle,
const InstructionsState &S,
const EdgeInfo &UserTreeIdx,
ArrayRef<int> ReuseShuffleIndices = None,
ArrayRef<unsigned> ReorderIndices = None) {
assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
(Bundle && EntryState != TreeEntry::NeedToGather)) &&
"Need to vectorize gather entry?");
VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
TreeEntry *Last = VectorizableTree.back().get();
Last->Idx = VectorizableTree.size() - 1;
Last->State = EntryState;
Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
ReuseShuffleIndices.end());
if (ReorderIndices.empty()) {
Last->Scalars.assign(VL.begin(), VL.end());
Last->setOperations(S);
} else {
Last->Scalars.assign(VL.size(), nullptr);
transform(ReorderIndices, Last->Scalars.begin(),
[VL](unsigned Idx) -> Value * {
if (Idx >= VL.size())
return UndefValue::get(VL.front()->getType());
return VL[Idx];
});
InstructionsState S = getSameOpcode(Last->Scalars);
Last->setOperations(S);
Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
}
if (Last->State != TreeEntry::NeedToGather) {
for (Value *V : VL) {
assert(!getTreeEntry(V) && "Scalar already in tree!");
ScalarToTreeEntry[V] = Last;
}
ScheduleData *BundleMember = *Bundle;
assert((BundleMember || isa<PHINode>(S.MainOp) ||
isVectorLikeInstWithConstOps(S.MainOp) ||
doesNotNeedToSchedule(VL)) &&
"Bundle and VL out of sync");
if (BundleMember) {
for (Value *V : VL) {
if (doesNotNeedToBeScheduled(V))
continue;
assert(BundleMember && "Unexpected end of bundle.");
BundleMember->TE = Last;
BundleMember = BundleMember->NextInBundle;
}
}
assert(!BundleMember && "Bundle and VL out of sync");
} else {
MustGather.insert(VL.begin(), VL.end());
}
if (UserTreeIdx.UserTE)
Last->UserTreeIndices.push_back(UserTreeIdx);
return Last;
}
TreeEntry::VecTreeTy VectorizableTree;
#ifndef NDEBUG
LLVM_DUMP_METHOD void dumpVectorizableTree() const {
for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
VectorizableTree[Id]->dump();
dbgs() << "\n";
}
}
#endif
TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
const TreeEntry *getTreeEntry(Value *V) const {
return ScalarToTreeEntry.lookup(V);
}
SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
SmallDenseMap<Value *, unsigned> InstrElementSize;
ValueSet MustGather;
struct ExternalUser {
ExternalUser(Value *S, llvm::User *U, int L)
: Scalar(S), User(U), Lane(L) {}
Value *Scalar;
llvm::User *User;
int Lane;
};
using UserList = SmallVector<ExternalUser, 16>;
bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
Instruction *Inst2) {
AliasCacheKey key = std::make_pair(Inst1, Inst2);
Optional<bool> &result = AliasCache[key];
if (result) {
return result.value();
}
bool aliased = true;
if (Loc1.Ptr && isSimple(Inst1))
aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
result = aliased;
return aliased;
}
using AliasCacheKey = std::pair<Instruction *, Instruction *>;
DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
BatchAAResults BatchAA;
DenseSet<Instruction *> DeletedInstructions;
SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots;
DenseSet<size_t> AnalyzedReductionVals;
UserList ExternalUses;
SmallPtrSet<const Value *, 32> EphValues;
SetVector<Instruction *> GatherShuffleSeq;
SetVector<BasicBlock *> CSEBlocks;
struct ScheduleData {
enum { InvalidDeps = -1 };
ScheduleData() = default;
void init(int BlockSchedulingRegionID, Value *OpVal) {
FirstInBundle = this;
NextInBundle = nullptr;
NextLoadStore = nullptr;
IsScheduled = false;
SchedulingRegionID = BlockSchedulingRegionID;
clearDependencies();
OpValue = OpVal;
TE = nullptr;
}
void verify() {
if (hasValidDependencies()) {
assert(UnscheduledDeps <= Dependencies && "invariant");
} else {
assert(UnscheduledDeps == Dependencies && "invariant");
}
if (IsScheduled) {
assert(isSchedulingEntity() &&
"unexpected scheduled state");
for (const ScheduleData *BundleMember = this; BundleMember;
BundleMember = BundleMember->NextInBundle) {
assert(BundleMember->hasValidDependencies() &&
BundleMember->UnscheduledDeps == 0 &&
"unexpected scheduled state");
assert((BundleMember == this || !BundleMember->IsScheduled) &&
"only bundle is marked scheduled");
}
}
assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&
"all bundle members must be in same basic block");
}
bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
bool isSchedulingEntity() const { return FirstInBundle == this; }
bool isPartOfBundle() const {
return NextInBundle != nullptr || FirstInBundle != this || TE;
}
bool isReady() const {
assert(isSchedulingEntity() &&
"can't consider non-scheduling entity for ready list");
return unscheduledDepsInBundle() == 0 && !IsScheduled;
}
int incrementUnscheduledDeps(int Incr) {
assert(hasValidDependencies() &&
"increment of unscheduled deps would be meaningless");
UnscheduledDeps += Incr;
return FirstInBundle->unscheduledDepsInBundle();
}
void resetUnscheduledDeps() {
UnscheduledDeps = Dependencies;
}
void clearDependencies() {
Dependencies = InvalidDeps;
resetUnscheduledDeps();
MemoryDependencies.clear();
ControlDependencies.clear();
}
int unscheduledDepsInBundle() const {
assert(isSchedulingEntity() && "only meaningful on the bundle");
int Sum = 0;
for (const ScheduleData *BundleMember = this; BundleMember;
BundleMember = BundleMember->NextInBundle) {
if (BundleMember->UnscheduledDeps == InvalidDeps)
return InvalidDeps;
Sum += BundleMember->UnscheduledDeps;
}
return Sum;
}
void dump(raw_ostream &os) const {
if (!isSchedulingEntity()) {
os << "/ " << *Inst;
} else if (NextInBundle) {
os << '[' << *Inst;
ScheduleData *SD = NextInBundle;
while (SD) {
os << ';' << *SD->Inst;
SD = SD->NextInBundle;
}
os << ']';
} else {
os << *Inst;
}
}
Instruction *Inst = nullptr;
Value *OpValue = nullptr;
TreeEntry *TE = nullptr;
ScheduleData *FirstInBundle = nullptr;
ScheduleData *NextInBundle = nullptr;
ScheduleData *NextLoadStore = nullptr;
SmallVector<ScheduleData *, 4> MemoryDependencies;
SmallVector<ScheduleData *, 4> ControlDependencies;
int SchedulingRegionID = 0;
int SchedulingPriority = 0;
int Dependencies = InvalidDeps;
int UnscheduledDeps = InvalidDeps;
bool IsScheduled = false;
};
#ifndef NDEBUG
friend inline raw_ostream &operator<<(raw_ostream &os,
const BoUpSLP::ScheduleData &SD) {
SD.dump(os);
return os;
}
#endif
friend struct GraphTraits<BoUpSLP *>;
friend struct DOTGraphTraits<BoUpSLP *>;
struct BlockScheduling {
BlockScheduling(BasicBlock *BB)
: BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
void clear() {
ReadyInsts.clear();
ScheduleStart = nullptr;
ScheduleEnd = nullptr;
FirstLoadStoreInRegion = nullptr;
LastLoadStoreInRegion = nullptr;
RegionHasStackSave = false;
ScheduleRegionSizeLimit -= ScheduleRegionSize;
if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
ScheduleRegionSizeLimit = MinScheduleRegionSize;
ScheduleRegionSize = 0;
++SchedulingRegionID;
}
ScheduleData *getScheduleData(Instruction *I) {
if (BB != I->getParent())
return nullptr;
ScheduleData *SD = ScheduleDataMap.lookup(I);
if (SD && isInSchedulingRegion(SD))
return SD;
return nullptr;
}
ScheduleData *getScheduleData(Value *V) {
if (auto *I = dyn_cast<Instruction>(V))
return getScheduleData(I);
return nullptr;
}
ScheduleData *getScheduleData(Value *V, Value *Key) {
if (V == Key)
return getScheduleData(V);
auto I = ExtraScheduleDataMap.find(V);
if (I != ExtraScheduleDataMap.end()) {
ScheduleData *SD = I->second.lookup(Key);
if (SD && isInSchedulingRegion(SD))
return SD;
}
return nullptr;
}
bool isInSchedulingRegion(ScheduleData *SD) const {
return SD->SchedulingRegionID == SchedulingRegionID;
}
template <typename ReadyListType>
void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
SD->IsScheduled = true;
LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
for (ScheduleData *BundleMember = SD; BundleMember;
BundleMember = BundleMember->NextInBundle) {
if (BundleMember->Inst != BundleMember->OpValue)
continue;
auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
if (OpDef && OpDef->hasValidDependencies() &&
OpDef->incrementUnscheduledDeps(-1) == 0) {
ScheduleData *DepBundle = OpDef->FirstInBundle;
assert(!DepBundle->IsScheduled &&
"already scheduled bundle gets ready");
ReadyList.insert(DepBundle);
LLVM_DEBUG(dbgs()
<< "SLP: gets ready (def): " << *DepBundle << "\n");
}
});
};
if (TreeEntry *TE = BundleMember->TE) {
int Lane = std::distance(TE->Scalars.begin(),
find(TE->Scalars, BundleMember->Inst));
assert(Lane >= 0 && "Lane not set");
auto *In = BundleMember->Inst;
assert(In &&
(isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
In->getNumOperands() == TE->getNumOperands()) &&
"Missed TreeEntry operands?");
(void)In;
for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
OpIdx != NumOperands; ++OpIdx)
if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
DecrUnsched(I);
} else {
for (Use &U : BundleMember->Inst->operands())
if (auto *I = dyn_cast<Instruction>(U.get()))
DecrUnsched(I);
}
for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
if (MemoryDepSD->hasValidDependencies() &&
MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
assert(!DepBundle->IsScheduled &&
"already scheduled bundle gets ready");
ReadyList.insert(DepBundle);
LLVM_DEBUG(dbgs()
<< "SLP: gets ready (mem): " << *DepBundle << "\n");
}
}
for (ScheduleData *DepSD : BundleMember->ControlDependencies) {
if (DepSD->incrementUnscheduledDeps(-1) == 0) {
ScheduleData *DepBundle = DepSD->FirstInBundle;
assert(!DepBundle->IsScheduled &&
"already scheduled bundle gets ready");
ReadyList.insert(DepBundle);
LLVM_DEBUG(dbgs()
<< "SLP: gets ready (ctl): " << *DepBundle << "\n");
}
}
}
}
void verify() {
if (!ScheduleStart)
return;
assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&
ScheduleStart->comesBefore(ScheduleEnd) &&
"Not a valid scheduling region?");
for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
auto *SD = getScheduleData(I);
if (!SD)
continue;
assert(isInSchedulingRegion(SD) &&
"primary schedule data not in window?");
assert(isInSchedulingRegion(SD->FirstInBundle) &&
"entire bundle in window!");
(void)SD;
doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); });
}
for (auto *SD : ReadyInsts) {
assert(SD->isSchedulingEntity() && SD->isReady() &&
"item in ready list not ready?");
(void)SD;
}
}
void doForAllOpcodes(Value *V,
function_ref<void(ScheduleData *SD)> Action) {
if (ScheduleData *SD = getScheduleData(V))
Action(SD);
auto I = ExtraScheduleDataMap.find(V);
if (I != ExtraScheduleDataMap.end())
for (auto &P : I->second)
if (isInSchedulingRegion(P.second))
Action(P.second);
}
template <typename ReadyListType>
void initialFillReadyList(ReadyListType &ReadyList) {
for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
doForAllOpcodes(I, [&](ScheduleData *SD) {
if (SD->isSchedulingEntity() && SD->hasValidDependencies() &&
SD->isReady()) {
ReadyList.insert(SD);
LLVM_DEBUG(dbgs()
<< "SLP: initially in ready list: " << *SD << "\n");
}
});
}
}
ScheduleData *buildBundle(ArrayRef<Value *> VL);
Optional<ScheduleData *>
tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
const InstructionsState &S);
void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
ScheduleData *allocateScheduleDataChunks();
bool extendSchedulingRegion(Value *V, const InstructionsState &S);
void initScheduleData(Instruction *FromI, Instruction *ToI,
ScheduleData *PrevLoadStore,
ScheduleData *NextLoadStore);
void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
BoUpSLP *SLP);
void resetSchedule();
BasicBlock *BB;
std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
int ChunkSize;
int ChunkPos;
DenseMap<Instruction *, ScheduleData *> ScheduleDataMap;
DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
ExtraScheduleDataMap;
SetVector<ScheduleData *> ReadyInsts;
Instruction *ScheduleStart = nullptr;
Instruction *ScheduleEnd = nullptr;
ScheduleData *FirstLoadStoreInRegion = nullptr;
ScheduleData *LastLoadStoreInRegion = nullptr;
bool RegionHasStackSave = false;
int ScheduleRegionSize = 0;
int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
int SchedulingRegionID = 1;
};
MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
void scheduleBlock(BlockScheduling *BS);
const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
struct OrdersTypeDenseMapInfo {
static OrdersType getEmptyKey() {
OrdersType V;
V.push_back(~1U);
return V;
}
static OrdersType getTombstoneKey() {
OrdersType V;
V.push_back(~2U);
return V;
}
static unsigned getHashValue(const OrdersType &V) {
return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
}
static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
return LHS == RHS;
}
};
Function *F;
ScalarEvolution *SE;
TargetTransformInfo *TTI;
TargetLibraryInfo *TLI;
LoopInfo *LI;
DominatorTree *DT;
AssumptionCache *AC;
DemandedBits *DB;
const DataLayout *DL;
OptimizationRemarkEmitter *ORE;
unsigned MaxVecRegSize; unsigned MinVecRegSize;
IRBuilder<> Builder;
MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
};
}
template <> struct GraphTraits<BoUpSLP *> {
using TreeEntry = BoUpSLP::TreeEntry;
using NodeRef = TreeEntry *;
using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
struct ChildIteratorType
: public iterator_adaptor_base<
ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
ContainerTy &VectorizableTree;
ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
ContainerTy &VT)
: ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
NodeRef operator*() { return I->UserTE; }
};
static NodeRef getEntryNode(BoUpSLP &R) {
return R.VectorizableTree[0].get();
}
static ChildIteratorType child_begin(NodeRef N) {
return {N->UserTreeIndices.begin(), N->Container};
}
static ChildIteratorType child_end(NodeRef N) {
return {N->UserTreeIndices.end(), N->Container};
}
class nodes_iterator {
using ItTy = ContainerTy::iterator;
ItTy It;
public:
nodes_iterator(const ItTy &It2) : It(It2) {}
NodeRef operator*() { return It->get(); }
nodes_iterator operator++() {
++It;
return *this;
}
bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
};
static nodes_iterator nodes_begin(BoUpSLP *R) {
return nodes_iterator(R->VectorizableTree.begin());
}
static nodes_iterator nodes_end(BoUpSLP *R) {
return nodes_iterator(R->VectorizableTree.end());
}
static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
};
template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
using TreeEntry = BoUpSLP::TreeEntry;
DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
std::string Str;
raw_string_ostream OS(Str);
if (isSplat(Entry->Scalars))
OS << "<splat> ";
for (auto V : Entry->Scalars) {
OS << *V;
if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
return EU.Scalar == V;
}))
OS << " <extract>";
OS << "\n";
}
return Str;
}
static std::string getNodeAttributes(const TreeEntry *Entry,
const BoUpSLP *) {
if (Entry->State == TreeEntry::NeedToGather)
return "color=red";
return "";
}
};
}
BoUpSLP::~BoUpSLP() {
SmallVector<WeakTrackingVH> DeadInsts;
for (auto *I : DeletedInstructions) {
for (Use &U : I->operands()) {
auto *Op = dyn_cast<Instruction>(U.get());
if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() &&
wouldInstructionBeTriviallyDead(Op, TLI))
DeadInsts.emplace_back(Op);
}
I->dropAllReferences();
}
for (auto *I : DeletedInstructions) {
assert(I->use_empty() &&
"trying to erase instruction with users.");
I->eraseFromParent();
}
RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI);
#ifdef EXPENSIVE_CHECKS
assert(!verifyFunction(*F, &dbgs()));
#endif
}
static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) {
assert(!Mask.empty() && Reuses.size() == Mask.size() &&
"Expected non-empty mask.");
SmallVector<int> Prev(Reuses.begin(), Reuses.end());
Prev.swap(Reuses);
for (unsigned I = 0, E = Prev.size(); I < E; ++I)
if (Mask[I] != UndefMaskElem)
Reuses[Mask[I]] = Prev[I];
}
static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) {
assert(!Mask.empty() && "Expected non-empty mask.");
SmallVector<int> MaskOrder;
if (Order.empty()) {
MaskOrder.resize(Mask.size());
std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
} else {
inversePermutation(Order, MaskOrder);
}
reorderReuses(MaskOrder, Mask);
if (ShuffleVectorInst::isIdentityMask(MaskOrder)) {
Order.clear();
return;
}
Order.assign(Mask.size(), Mask.size());
for (unsigned I = 0, E = Mask.size(); I < E; ++I)
if (MaskOrder[I] != UndefMaskElem)
Order[MaskOrder[I]] = I;
fixupOrderingIndices(Order);
}
Optional<BoUpSLP::OrdersType>
BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
unsigned NumScalars = TE.Scalars.size();
OrdersType CurrentOrder(NumScalars, NumScalars);
SmallVector<int> Positions;
SmallBitVector UsedPositions(NumScalars);
const TreeEntry *STE = nullptr;
for (unsigned I = 0; I < NumScalars; ++I) {
Value *V = TE.Scalars[I];
if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V))
continue;
if (const auto *LocalSTE = getTreeEntry(V)) {
if (!STE)
STE = LocalSTE;
else if (STE != LocalSTE)
return None;
unsigned Lane =
std::distance(STE->Scalars.begin(), find(STE->Scalars, V));
if (Lane >= NumScalars)
return None;
if (CurrentOrder[Lane] != NumScalars) {
if (Lane != I)
continue;
UsedPositions.reset(CurrentOrder[Lane]);
}
CurrentOrder[Lane] = I;
UsedPositions.set(I);
}
}
if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) {
auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) {
for (unsigned I = 0; I < NumScalars; ++I)
if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars)
return false;
return true;
};
if (IsIdentityOrder(CurrentOrder)) {
CurrentOrder.clear();
return CurrentOrder;
}
auto *It = CurrentOrder.begin();
for (unsigned I = 0; I < NumScalars;) {
if (UsedPositions.test(I)) {
++I;
continue;
}
if (*It == NumScalars) {
*It = I;
++I;
}
++It;
}
return CurrentOrder;
}
return None;
}
namespace {
enum class LoadsState { Gather, Vectorize, ScatterVectorize };
}
static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
const TargetTransformInfo &TTI,
const DataLayout &DL, ScalarEvolution &SE,
LoopInfo &LI,
SmallVectorImpl<unsigned> &Order,
SmallVectorImpl<Value *> &PointerOps) {
Type *ScalarTy = VL0->getType();
if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy))
return LoadsState::Gather;
PointerOps.clear();
PointerOps.resize(VL.size());
auto *POIter = PointerOps.begin();
for (Value *V : VL) {
auto *L = cast<LoadInst>(V);
if (!L->isSimple())
return LoadsState::Gather;
*POIter = L->getPointerOperand();
++POIter;
}
Order.clear();
bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order);
if (IsSorted || all_of(PointerOps, [&PointerOps](Value *P) {
if (getUnderlyingObject(P) != getUnderlyingObject(PointerOps.front()))
return false;
auto *GEP = dyn_cast<GetElementPtrInst>(P);
if (!GEP)
return false;
auto *GEP0 = cast<GetElementPtrInst>(PointerOps.front());
return GEP->getNumOperands() == 2 &&
((isConstant(GEP->getOperand(1)) &&
isConstant(GEP0->getOperand(1))) ||
getSameOpcode({GEP->getOperand(1), GEP0->getOperand(1)})
.getOpcode());
})) {
if (IsSorted) {
Value *Ptr0;
Value *PtrN;
if (Order.empty()) {
Ptr0 = PointerOps.front();
PtrN = PointerOps.back();
} else {
Ptr0 = PointerOps[Order.front()];
PtrN = PointerOps[Order.back()];
}
Optional<int> Diff =
getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
if (static_cast<unsigned>(*Diff) == VL.size() - 1)
return LoadsState::Vectorize;
}
Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent());
bool ProfitableGatherPointers =
static_cast<unsigned>(count_if(PointerOps, [L](Value *V) {
return L && L->isLoopInvariant(V);
})) <= VL.size() / 2 && VL.size() > 2;
if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) {
auto *GEP = dyn_cast<GetElementPtrInst>(P);
return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) ||
(GEP && GEP->getNumOperands() == 2);
})) {
Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
for (Value *V : VL)
CommonAlignment =
std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) &&
!TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment))
return LoadsState::ScatterVectorize;
}
}
return LoadsState::Gather;
}
bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
const DataLayout &DL, ScalarEvolution &SE,
SmallVectorImpl<unsigned> &SortedIndices) {
assert(llvm::all_of(
VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
"Expected list of pointer operands.");
MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases;
Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U));
unsigned Cnt = 1;
for (Value *Ptr : VL.drop_front()) {
bool Found = any_of(Bases, [&](auto &Base) {
Optional<int> Diff =
getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE,
true);
if (!Diff)
return false;
Base.second.emplace_back(Ptr, *Diff, Cnt++);
return true;
});
if (!Found) {
if (Bases.size() > VL.size() / 2 - 1)
return false;
Bases[Ptr].emplace_back(Ptr, 0, Cnt++);
}
}
bool AnyConsecutive = false;
for (auto &Base : Bases) {
auto &Vec = Base.second;
if (Vec.size() > 1) {
llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X,
const std::tuple<Value *, int, unsigned> &Y) {
return std::get<1>(X) < std::get<1>(Y);
});
int InitialOffset = std::get<1>(Vec[0]);
AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](auto &P) {
return std::get<1>(P.value()) == int(P.index()) + InitialOffset;
});
}
}
SortedIndices.clear();
if (!AnyConsecutive)
return false;
for (auto &Base : Bases) {
for (auto &T : Base.second)
SortedIndices.push_back(std::get<2>(T));
}
assert(SortedIndices.size() == VL.size() &&
"Expected SortedIndices to be the size of VL");
return true;
}
Optional<BoUpSLP::OrdersType>
BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
Type *ScalarTy = TE.Scalars[0]->getType();
SmallVector<Value *> Ptrs;
Ptrs.reserve(TE.Scalars.size());
for (Value *V : TE.Scalars) {
auto *L = dyn_cast<LoadInst>(V);
if (!L || !L->isSimple())
return None;
Ptrs.push_back(L->getPointerOperand());
}
BoUpSLP::OrdersType Order;
if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order))
return Order;
return None;
}
Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
bool TopToBottom) {
if (!TE.ReuseShuffleIndices.empty())
return None;
if (TE.State == TreeEntry::Vectorize &&
(isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
(TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) &&
!TE.isAltShuffle())
return TE.ReorderIndices;
if (TE.State == TreeEntry::NeedToGather) {
if (((TE.getOpcode() == Instruction::ExtractElement &&
!TE.isAltShuffle()) ||
(all_of(TE.Scalars,
[](Value *V) {
return isa<UndefValue, ExtractElementInst>(V);
}) &&
any_of(TE.Scalars,
[](Value *V) { return isa<ExtractElementInst>(V); }))) &&
all_of(TE.Scalars,
[](Value *V) {
auto *EE = dyn_cast<ExtractElementInst>(V);
return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
}) &&
allSameType(TE.Scalars)) {
OrdersType CurrentOrder;
bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder);
if (Reuse || !CurrentOrder.empty()) {
if (!CurrentOrder.empty())
fixupOrderingIndices(CurrentOrder);
return CurrentOrder;
}
}
if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
return CurrentOrder;
if (TE.Scalars.size() >= 4)
if (Optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
return Order;
}
return None;
}
void BoUpSLP::reorderTopToBottom() {
DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>>
ExternalUserReorderMap;
TargetTransformInfo &TTIRef = *TTI;
for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries,
&GathersToOrders, &ExternalUserReorderMap,
&AltShufflesToOrders](
const std::unique_ptr<TreeEntry> &TE) {
SmallVector<OrdersType, 1> ExternalUserReorderIndices =
findExternalStoreUsersReorderIndices(TE.get());
if (!ExternalUserReorderIndices.empty()) {
VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
ExternalUserReorderMap.try_emplace(TE.get(),
std::move(ExternalUserReorderIndices));
}
if (TE->isAltShuffle()) {
VectorType *VecTy =
FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size());
unsigned Opcode0 = TE->getOpcode();
unsigned Opcode1 = TE->getAltOpcode();
SmallBitVector OpcodeMask(TE->Scalars.size(), false);
for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size()))
if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1)
OpcodeMask.set(Lane);
if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
}
}
if (Optional<OrdersType> CurrentOrder =
getReorderingData(*TE, true)) {
unsigned Cnt = 0;
const TreeEntry *UserTE = TE.get();
while (UserTE && Cnt < RecursionMaxDepth) {
if (UserTE->UserTreeIndices.size() != 1)
break;
if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
return EI.UserTE->State == TreeEntry::Vectorize &&
EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
}))
return;
UserTE = UserTE->UserTreeIndices.back().UserTE;
++Cnt;
}
VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
if (TE->State != TreeEntry::Vectorize)
GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
}
});
for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1;
VF /= 2) {
auto It = VFToOrderedEntries.find(VF);
if (It == VFToOrderedEntries.end())
continue;
ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
MapVector<OrdersType, unsigned,
DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
OrdersUses;
SmallPtrSet<const TreeEntry *, 4> VisitedOps;
for (const TreeEntry *OpTE : OrderedEntries) {
if (!OpTE->ReuseShuffleIndices.empty())
continue;
const auto &Order = [OpTE, &GathersToOrders,
&AltShufflesToOrders]() -> const OrdersType & {
if (OpTE->State == TreeEntry::NeedToGather) {
auto It = GathersToOrders.find(OpTE);
if (It != GathersToOrders.end())
return It->second;
}
if (OpTE->isAltShuffle()) {
auto It = AltShufflesToOrders.find(OpTE);
if (It != AltShufflesToOrders.end())
return It->second;
}
return OpTE->ReorderIndices;
}();
auto It = ExternalUserReorderMap.find(OpTE);
if (It != ExternalUserReorderMap.end()) {
const auto &ExternalUserReorderIndices = It->second;
for (const OrdersType &ExtOrder : ExternalUserReorderIndices)
++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second;
if (Order.empty())
continue;
}
if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
SmallVector<int> Mask;
inversePermutation(Order, Mask);
unsigned E = Order.size();
OrdersType CurrentOrder(E, E);
transform(Mask, CurrentOrder.begin(), [E](int Idx) {
return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
});
fixupOrderingIndices(CurrentOrder);
++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
} else {
++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
}
}
if (OrdersUses.empty())
continue;
ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
unsigned Cnt = OrdersUses.front().second;
for (const auto &Pair : drop_begin(OrdersUses)) {
if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
BestOrder = Pair.first;
Cnt = Pair.second;
}
}
if (BestOrder.empty())
continue;
SmallVector<int> Mask;
inversePermutation(BestOrder, Mask);
SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
unsigned E = BestOrder.size();
transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
return I < E ? static_cast<int>(I) : UndefMaskElem;
});
for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
if (TE->Scalars.size() != VF) {
if (TE->ReuseShuffleIndices.size() == VF) {
assert(all_of(TE->UserTreeIndices,
[VF, &TE](const EdgeInfo &EI) {
return EI.UserTE->Scalars.size() == VF ||
EI.UserTE->Scalars.size() ==
TE->Scalars.size();
}) &&
"All users must be of VF size.");
reorderReuses(TE->ReuseShuffleIndices, Mask);
}
continue;
}
if (TE->State == TreeEntry::Vectorize &&
isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
InsertElementInst>(TE->getMainOp()) &&
!TE->isAltShuffle()) {
reorderOrder(TE->ReorderIndices, Mask);
if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
TE->reorderOperands(Mask);
} else {
TE->reorderOperands(Mask);
assert(TE->ReorderIndices.empty() &&
"Expected empty reorder sequence.");
reorderScalars(TE->Scalars, Mask);
}
if (!TE->ReuseShuffleIndices.empty()) {
OrdersType CurrentOrder;
reorderOrder(CurrentOrder, MaskOrder);
SmallVector<int> NewReuses;
inversePermutation(CurrentOrder, NewReuses);
addMask(NewReuses, TE->ReuseShuffleIndices);
TE->ReuseShuffleIndices.swap(NewReuses);
}
}
}
}
bool BoUpSLP::canReorderOperands(
TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
ArrayRef<TreeEntry *> ReorderableGathers,
SmallVectorImpl<TreeEntry *> &GatherOps) {
for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
return OpData.first == I &&
OpData.second->State == TreeEntry::Vectorize;
}))
continue;
if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
if (any_of(TE->UserTreeIndices,
[UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
return false;
Edges.emplace_back(I, TE);
if (TE->State != TreeEntry::Vectorize && TE->ReuseShuffleIndices.empty())
GatherOps.push_back(TE);
continue;
}
TreeEntry *Gather = nullptr;
if (count_if(ReorderableGathers,
[&Gather, UserTE, I](TreeEntry *TE) {
assert(TE->State != TreeEntry::Vectorize &&
"Only non-vectorized nodes are expected.");
if (any_of(TE->UserTreeIndices,
[UserTE, I](const EdgeInfo &EI) {
return EI.UserTE == UserTE && EI.EdgeIdx == I;
})) {
assert(TE->isSame(UserTE->getOperand(I)) &&
"Operand entry does not match operands.");
Gather = TE;
return true;
}
return false;
}) > 1 &&
!all_of(UserTE->getOperand(I), isConstant))
return false;
if (Gather)
GatherOps.push_back(Gather);
}
return true;
}
void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
SetVector<TreeEntry *> OrderedEntries;
DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
SmallVector<TreeEntry *> NonVectorized;
for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders,
&NonVectorized](
const std::unique_ptr<TreeEntry> &TE) {
if (TE->State != TreeEntry::Vectorize)
NonVectorized.push_back(TE.get());
if (Optional<OrdersType> CurrentOrder =
getReorderingData(*TE, false)) {
OrderedEntries.insert(TE.get());
if (TE->State != TreeEntry::Vectorize)
GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
}
});
SmallPtrSet<const TreeEntry *, 4> Visited;
while (!OrderedEntries.empty()) {
DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users;
SmallVector<TreeEntry *> Filtered;
for (TreeEntry *TE : OrderedEntries) {
if (!(TE->State == TreeEntry::Vectorize ||
(TE->State == TreeEntry::NeedToGather &&
GathersToOrders.count(TE))) ||
TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
!all_of(drop_begin(TE->UserTreeIndices),
[TE](const EdgeInfo &EI) {
return EI.UserTE == TE->UserTreeIndices.front().UserTE;
}) ||
!Visited.insert(TE).second) {
Filtered.push_back(TE);
continue;
}
for (EdgeInfo &EI : TE->UserTreeIndices) {
TreeEntry *UserTE = EI.UserTE;
auto It = Users.find(UserTE);
if (It == Users.end())
It = Users.insert({UserTE, {}}).first;
It->second.emplace_back(EI.EdgeIdx, TE);
}
}
for_each(Filtered,
[&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); });
SmallVector<
std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>>
UsersVec(Users.begin(), Users.end());
sort(UsersVec, [](const auto &Data1, const auto &Data2) {
return Data1.first->Idx > Data2.first->Idx;
});
for (auto &Data : UsersVec) {
SmallVector<TreeEntry *> GatherOps;
if (!canReorderOperands(Data.first, Data.second, NonVectorized,
GatherOps)) {
for_each(Data.second,
[&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
OrderedEntries.remove(Op.second);
});
continue;
}
MapVector<OrdersType, unsigned,
DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
OrdersUses;
SmallPtrSet<const TreeEntry *, 4> VisitedOps;
SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
for (const auto &Op : Data.second) {
TreeEntry *OpTE = Op.second;
if (!VisitedOps.insert(OpTE).second)
continue;
if (!OpTE->ReuseShuffleIndices.empty())
continue;
const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
if (OpTE->State == TreeEntry::NeedToGather)
return GathersToOrders.find(OpTE)->second;
return OpTE->ReorderIndices;
}();
unsigned NumOps = count_if(
Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
return P.second == OpTE;
});
if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
SmallVector<int> Mask;
inversePermutation(Order, Mask);
unsigned E = Order.size();
OrdersType CurrentOrder(E, E);
transform(Mask, CurrentOrder.begin(), [E](int Idx) {
return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
});
fixupOrderingIndices(CurrentOrder);
OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
NumOps;
} else {
OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
}
auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders](
const TreeEntry *TE) {
if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
(TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
(IgnoreReorder && TE->Idx == 0))
return true;
if (TE->State == TreeEntry::NeedToGather) {
auto It = GathersToOrders.find(TE);
if (It != GathersToOrders.end())
return !It->second.empty();
return true;
}
return false;
};
for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
TreeEntry *UserTE = EI.UserTE;
if (!VisitedUsers.insert(UserTE).second)
continue;
if (AllowsReordering(UserTE))
continue;
ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE];
if (static_cast<unsigned>(count_if(
Ops, [UserTE, &AllowsReordering](
const std::pair<unsigned, TreeEntry *> &Op) {
return AllowsReordering(Op.second) &&
all_of(Op.second->UserTreeIndices,
[UserTE](const EdgeInfo &EI) {
return EI.UserTE == UserTE;
});
})) <= Ops.size() / 2)
++Res.first->second;
}
}
if (OrdersUses.empty()) {
for_each(Data.second,
[&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
OrderedEntries.remove(Op.second);
});
continue;
}
ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
unsigned Cnt = OrdersUses.front().second;
for (const auto &Pair : drop_begin(OrdersUses)) {
if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
BestOrder = Pair.first;
Cnt = Pair.second;
}
}
if (BestOrder.empty()) {
for_each(Data.second,
[&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
OrderedEntries.remove(Op.second);
});
continue;
}
VisitedOps.clear();
SmallVector<int> Mask;
inversePermutation(BestOrder, Mask);
SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
unsigned E = BestOrder.size();
transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
return I < E ? static_cast<int>(I) : UndefMaskElem;
});
for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
TreeEntry *TE = Op.second;
OrderedEntries.remove(TE);
if (!VisitedOps.insert(TE).second)
continue;
if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
reorderReuses(TE->ReuseShuffleIndices, Mask);
continue;
}
if (TE->State != TreeEntry::Vectorize)
continue;
assert((BestOrder.size() == TE->ReorderIndices.size() ||
TE->ReorderIndices.empty()) &&
"Non-matching sizes of user/operand entries.");
reorderOrder(TE->ReorderIndices, Mask);
if (IgnoreReorder && TE == VectorizableTree.front().get())
IgnoreReorder = false;
}
for (TreeEntry *Gather : GatherOps) {
assert(Gather->ReorderIndices.empty() &&
"Unexpected reordering of gathers.");
if (!Gather->ReuseShuffleIndices.empty()) {
reorderReuses(Gather->ReuseShuffleIndices, Mask);
continue;
}
reorderScalars(Gather->Scalars, Mask);
OrderedEntries.remove(Gather);
}
if (Data.first->State != TreeEntry::Vectorize ||
!isa<ExtractElementInst, ExtractValueInst, LoadInst>(
Data.first->getMainOp()) ||
Data.first->isAltShuffle())
Data.first->reorderOperands(Mask);
if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
Data.first->isAltShuffle()) {
reorderScalars(Data.first->Scalars, Mask);
reorderOrder(Data.first->ReorderIndices, MaskOrder);
if (Data.first->ReuseShuffleIndices.empty() &&
!Data.first->ReorderIndices.empty() &&
!Data.first->isAltShuffle()) {
OrderedEntries.insert(Data.first);
}
} else {
reorderOrder(Data.first->ReorderIndices, Mask);
}
}
}
if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
VectorizableTree.front()->ReuseShuffleIndices.empty())
VectorizableTree.front()->ReorderIndices.clear();
}
void BoUpSLP::buildExternalUses(
const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
for (auto &TEPtr : VectorizableTree) {
TreeEntry *Entry = TEPtr.get();
if (Entry->State == TreeEntry::NeedToGather)
continue;
for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
Value *Scalar = Entry->Scalars[Lane];
int FoundLane = Entry->findLaneForValue(Scalar);
auto ExtI = ExternallyUsedValues.find(Scalar);
if (ExtI != ExternallyUsedValues.end()) {
LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
<< Lane << " from " << *Scalar << ".\n");
ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
}
for (User *U : Scalar->users()) {
LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
Instruction *UserInst = dyn_cast<Instruction>(U);
if (!UserInst)
continue;
if (isDeleted(UserInst))
continue;
if (TreeEntry *UseEntry = getTreeEntry(U)) {
Value *UseScalar = UseEntry->Scalars[0];
if (UseScalar != U ||
UseEntry->State == TreeEntry::ScatterVectorize ||
!InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
<< ".\n");
assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
continue;
}
}
if (UserIgnoreList && UserIgnoreList->contains(UserInst))
continue;
LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
<< Lane << " from " << *Scalar << ".\n");
ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
}
}
}
}
DenseMap<Value *, SmallVector<StoreInst *, 4>>
BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap;
for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
Value *V = TE->Scalars[Lane];
static constexpr unsigned UsersLimit = 4;
if (V->hasNUsesOrMore(UsersLimit))
break;
for (User *U : V->users()) {
auto *SI = dyn_cast<StoreInst>(U);
if (SI == nullptr || !SI->isSimple() ||
!isValidElementType(SI->getValueOperand()->getType()))
continue;
if (getTreeEntry(U))
continue;
Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
auto &StoresVec = PtrToStoresMap[Ptr];
if (StoresVec.size() > Lane)
continue;
if (!StoresVec.empty() &&
SI->getParent() != StoresVec.back()->getParent())
continue;
if (!StoresVec.empty() &&
SI->getValueOperand()->getType() !=
StoresVec.back()->getValueOperand()->getType())
continue;
StoresVec.push_back(SI);
}
}
return PtrToStoresMap;
}
bool BoUpSLP::CanFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
OrdersType &ReorderIndices) const {
SmallVector<std::pair<StoreInst *, int>, 4> StoreOffsetVec(StoresVec.size());
StoreInst *S0 = StoresVec[0];
StoreOffsetVec[0] = {S0, 0};
Type *S0Ty = S0->getValueOperand()->getType();
Value *S0Ptr = S0->getPointerOperand();
for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
StoreInst *SI = StoresVec[Idx];
Optional<int> Diff =
getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
SI->getPointerOperand(), *DL, *SE,
true);
if (!Diff)
return false;
StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff};
}
stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1,
const std::pair<StoreInst *, int> &Pair2) {
int Offset1 = Pair1.second;
int Offset2 = Pair2.second;
return Offset1 < Offset2;
});
for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size()))
if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx-1].second + 1)
return false;
ReorderIndices.reserve(StoresVec.size());
for (StoreInst *SI : StoresVec) {
unsigned Idx = find_if(StoreOffsetVec,
[SI](const std::pair<StoreInst *, int> &Pair) {
return Pair.first == SI;
}) -
StoreOffsetVec.begin();
ReorderIndices.push_back(Idx);
}
auto IsIdentityOrder = [](const OrdersType &Order) {
for (unsigned Idx : seq<unsigned>(0, Order.size()))
if (Idx != Order[Idx])
return false;
return true;
};
if (IsIdentityOrder(ReorderIndices))
ReorderIndices.clear();
return true;
}
#ifndef NDEBUG
LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) {
for (unsigned Idx : Order)
dbgs() << Idx << ", ";
dbgs() << "\n";
}
#endif
SmallVector<BoUpSLP::OrdersType, 1>
BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const {
unsigned NumLanes = TE->Scalars.size();
DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap =
collectUserStores(TE);
SmallVector<OrdersType, 1> ExternalReorderIndices;
for (const auto &Pair : PtrToStoresMap) {
auto &StoresVec = Pair.second;
if (StoresVec.size() != NumLanes)
continue;
OrdersType ReorderIndices;
if (!CanFormVector(StoresVec, ReorderIndices))
continue;
ExternalReorderIndices.push_back(ReorderIndices);
}
return ExternalReorderIndices;
}
void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
const SmallDenseSet<Value *> &UserIgnoreLst) {
deleteTree();
UserIgnoreList = &UserIgnoreLst;
if (!allSameType(Roots))
return;
buildTree_rec(Roots, 0, EdgeInfo());
}
void BoUpSLP::buildTree(ArrayRef<Value *> Roots) {
deleteTree();
if (!allSameType(Roots))
return;
buildTree_rec(Roots, 0, EdgeInfo());
}
#ifndef NDEBUG
static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) {
Value *NeedsScheduling = nullptr;
for (Value *V : VL) {
if (doesNotNeedToBeScheduled(V))
continue;
if (!NeedsScheduling) {
NeedsScheduling = V;
continue;
}
return false;
}
return NeedsScheduling;
}
#endif
static std::pair<size_t, size_t> generateKeySubkey(
Value *V, const TargetLibraryInfo *TLI,
function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator,
bool AllowAlternate) {
hash_code Key = hash_value(V->getValueID() + 2);
hash_code SubKey = hash_value(0);
if (auto *LI = dyn_cast<LoadInst>(V)) {
Key = hash_combine(hash_value(Instruction::Load), Key);
if (LI->isSimple())
SubKey = hash_value(LoadsSubkeyGenerator(Key, LI));
else
SubKey = hash_value(LI);
} else if (isVectorLikeInstWithConstOps(V)) {
if (isa<ExtractElementInst, UndefValue>(V))
Key = hash_value(Value::UndefValueVal + 1);
if (auto *EI = dyn_cast<ExtractElementInst>(V)) {
if (!isUndefVector(EI->getVectorOperand()) &&
!isa<UndefValue>(EI->getIndexOperand()))
SubKey = hash_value(EI->getVectorOperand());
}
} else if (auto *I = dyn_cast<Instruction>(V)) {
if ((isa<BinaryOperator>(I) || isa<CastInst>(I)) &&
isValidForAlternation(I->getOpcode())) {
if (AllowAlternate)
Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0);
else
Key = hash_combine(hash_value(I->getOpcode()), Key);
SubKey = hash_combine(
hash_value(I->getOpcode()), hash_value(I->getType()),
hash_value(isa<BinaryOperator>(I)
? I->getType()
: cast<CastInst>(I)->getOperand(0)->getType()));
if (isa<CastInst>(I)) {
std::pair<size_t, size_t> OpVals =
generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator,
true);
Key = hash_combine(OpVals.first, Key);
SubKey = hash_combine(OpVals.first, SubKey);
}
} else if (auto *CI = dyn_cast<CmpInst>(I)) {
CmpInst::Predicate Pred = CI->getPredicate();
if (CI->isCommutative())
Pred = std::min(Pred, CmpInst::getInversePredicate(Pred));
CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred);
SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred),
hash_value(SwapPred),
hash_value(CI->getOperand(0)->getType()));
} else if (auto *Call = dyn_cast<CallInst>(I)) {
Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI);
if (isTriviallyVectorizable(ID)) {
SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID));
} else if (!VFDatabase(*Call).getMappings(*Call).empty()) {
SubKey = hash_combine(hash_value(I->getOpcode()),
hash_value(Call->getCalledFunction()));
} else {
Key = hash_combine(hash_value(Call), Key);
SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call));
}
for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos())
SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End),
hash_value(Op.Tag), SubKey);
} else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1)))
SubKey = hash_value(Gep->getPointerOperand());
else
SubKey = hash_value(Gep);
} else if (BinaryOperator::isIntDivRem(I->getOpcode()) &&
!isa<ConstantInt>(I->getOperand(1))) {
SubKey = hash_value(I);
} else {
SubKey = hash_value(I->getOpcode());
}
Key = hash_combine(hash_value(I->getParent()), Key);
}
return std::make_pair(Key, SubKey);
}
void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
const EdgeInfo &UserTreeIdx) {
assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
SmallVector<int> ReuseShuffleIndicies;
SmallVector<Value *> UniqueValues;
auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues,
&UserTreeIdx,
this](const InstructionsState &S) {
DenseMap<Value *, unsigned> UniquePositions;
for (Value *V : VL) {
if (isConstant(V)) {
ReuseShuffleIndicies.emplace_back(
isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size());
UniqueValues.emplace_back(V);
continue;
}
auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
ReuseShuffleIndicies.emplace_back(Res.first->second);
if (Res.second)
UniqueValues.emplace_back(V);
}
size_t NumUniqueScalarValues = UniqueValues.size();
if (NumUniqueScalarValues == VL.size()) {
ReuseShuffleIndicies.clear();
} else {
LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
if (NumUniqueScalarValues <= 1 ||
(UniquePositions.size() == 1 && all_of(UniqueValues,
[](Value *V) {
return isa<UndefValue>(V) ||
!isConstant(V);
})) ||
!llvm::isPowerOf2_32(NumUniqueScalarValues)) {
LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
return false;
}
VL = UniqueValues;
}
return true;
};
InstructionsState S = getSameOpcode(VL);
if (Depth >= RecursionMaxDepth &&
!(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp &&
VL.size() >= 4 &&
(match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) {
return match(I,
m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) &&
cast<Instruction>(I)->getOpcode() ==
cast<Instruction>(S.MainOp)->getOpcode();
})))) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
if (TryToFindDuplicates(S))
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
if (S.getOpcode() == Instruction::ExtractElement &&
isa<ScalableVectorType>(
cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
if (TryToFindDuplicates(S))
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
if (S.OpValue->getType()->isVectorTy() &&
!isa<InsertElementInst>(S.OpValue)) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
return;
}
if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
if (SI->getValueOperand()->getType()->isVectorTy()) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
return;
}
auto &&NotProfitableForVectorization = [&S, this,
Depth](ArrayRef<Value *> VL) {
if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2)
return false;
if (VectorizableTree.size() < MinTreeSize)
return false;
if (Depth >= RecursionMaxDepth - 1)
return true;
SmallVector<unsigned, 2> InstsCount(VL.size(), 0);
for (Value *V : VL) {
auto *I = cast<Instruction>(V);
InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) {
return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op);
}));
}
bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp);
if ((IsCommutative &&
std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) ||
(!IsCommutative &&
all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; })))
return true;
assert(VL.size() == 2 && "Expected only 2 alternate op instructions.");
SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
auto *I1 = cast<Instruction>(VL.front());
auto *I2 = cast<Instruction>(VL.back());
for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
Candidates.emplace_back().emplace_back(I1->getOperand(Op),
I2->getOperand(Op));
if (static_cast<unsigned>(count_if(
Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
})) >= S.MainOp->getNumOperands() / 2)
return false;
if (S.MainOp->getNumOperands() > 2)
return true;
if (IsCommutative) {
Candidates.clear();
for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
Candidates.emplace_back().emplace_back(I1->getOperand(Op),
I2->getOperand((Op + 1) % E));
if (any_of(
Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
}))
return false;
}
return true;
};
SmallVector<unsigned> SortedIndices;
BasicBlock *BB = nullptr;
bool IsScatterVectorizeUserTE =
UserTreeIdx.UserTE &&
UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize;
bool AreAllSameInsts =
(S.getOpcode() && allSameBlock(VL)) ||
(S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE &&
VL.size() > 2 &&
all_of(VL,
[&BB](Value *V) {
auto *I = dyn_cast<GetElementPtrInst>(V);
if (!I)
return doesNotNeedToBeScheduled(V);
if (!BB)
BB = I->getParent();
return BB == I->getParent() && I->getNumOperands() == 2;
}) &&
BB &&
sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE,
SortedIndices));
if (allConstant(VL) || isSplat(VL) || !AreAllSameInsts ||
(isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(
S.OpValue) &&
!all_of(VL, isVectorLikeInstWithConstOps)) ||
NotProfitableForVectorization(VL)) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n");
if (TryToFindDuplicates(S))
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
if (!EphValues.empty()) {
for (Value *V : VL) {
if (EphValues.count(V)) {
LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
<< ") is ephemeral.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
return;
}
}
}
if (TreeEntry *E = getTreeEntry(S.OpValue)) {
LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
if (!E->isSame(VL)) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
if (TryToFindDuplicates(S))
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
E->UserTreeIndices.push_back(UserTreeIdx);
LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
<< ".\n");
return;
}
for (Value *V : VL) {
if (!IsScatterVectorizeUserTE && !isa<Instruction>(V))
continue;
if (getTreeEntry(V)) {
LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
<< ") is already in tree.\n");
if (TryToFindDuplicates(S))
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
}
if (UserIgnoreList && !UserIgnoreList->empty()) {
for (Value *V : VL) {
if (UserIgnoreList && UserIgnoreList->contains(V)) {
LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
if (TryToFindDuplicates(S))
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
}
}
if (AreAllSameInsts && !(S.getOpcode() && allSameBlock(VL)) &&
UserTreeIdx.UserTE &&
UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize) {
assert(S.OpValue->getType()->isPointerTy() &&
count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >=
2 &&
"Expected pointers only.");
const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
assert(It != VL.end() && "Expected at least one GEP.");
S = getSameOpcode(*It);
}
auto *VL0 = cast<Instruction>(S.OpValue);
BB = VL0->getParent();
if (!DT->isReachableFromEntry(BB)) {
LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
return;
}
if (isa<CatchSwitchInst>(BB->getTerminator())) {
LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
return;
}
if (!TryToFindDuplicates(S))
return;
auto &BSRef = BlocksSchedules[BB];
if (!BSRef)
BSRef = std::make_unique<BlockScheduling>(BB);
BlockScheduling &BS = *BSRef;
Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
#ifdef EXPENSIVE_CHECKS
BS.verify();
#endif
if (!Bundle) {
LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
assert((!BS.getScheduleData(VL0) ||
!BS.getScheduleData(VL0)->isPartOfBundle()) &&
"tryScheduleBundle should cancelScheduling on failure");
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
unsigned ShuffleOrOp = S.isAltShuffle() ?
(unsigned) Instruction::ShuffleVector : S.getOpcode();
switch (ShuffleOrOp) {
case Instruction::PHI: {
auto *PH = cast<PHINode>(VL0);
for (Value *V : VL)
for (Value *Incoming : cast<PHINode>(V)->incoming_values()) {
Instruction *Term = dyn_cast<Instruction>(Incoming);
if (Term && Term->isTerminator()) {
LLVM_DEBUG(dbgs()
<< "SLP: Need to swizzle PHINodes (terminator use).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
}
TreeEntry *TE =
newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
SmallVector<ValueList, 2> OperandsVec;
for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
TE->setOperand(I, Operands);
OperandsVec.push_back(Operands);
continue;
}
ValueList Operands;
for (Value *V : VL)
Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
PH->getIncomingBlock(I)));
TE->setOperand(I, Operands);
OperandsVec.push_back(Operands);
}
for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
return;
}
case Instruction::ExtractValue:
case Instruction::ExtractElement: {
OrdersType CurrentOrder;
bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
if (Reuse) {
LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
ValueList Op0;
Op0.assign(VL.size(), VL0->getOperand(0));
VectorizableTree.back()->setOperand(0, Op0);
return;
}
if (!CurrentOrder.empty()) {
LLVM_DEBUG({
dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order";
for (unsigned Idx : CurrentOrder)
dbgs() << " " << Idx;
dbgs() << "\n";
});
fixupOrderingIndices(CurrentOrder);
newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies, CurrentOrder);
ValueList Op0;
Op0.assign(VL.size(), VL0->getOperand(0));
VectorizableTree.back()->setOperand(0, Op0);
return;
}
LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
BS.cancelScheduling(VL, VL0);
return;
}
case Instruction::InsertElement: {
assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
ValueSet SourceVectors;
for (Value *V : VL) {
SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
assert(getInsertIndex(V) != None && "Non-constant or undef index?");
}
if (count_if(VL, [&SourceVectors](Value *V) {
return !SourceVectors.contains(V);
}) >= 2) {
LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
"different source vectors.\n");
newTreeEntry(VL, None , S, UserTreeIdx);
BS.cancelScheduling(VL, VL0);
return;
}
auto OrdCompare = [](const std::pair<int, int> &P1,
const std::pair<int, int> &P2) {
return P1.first > P2.first;
};
PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>,
decltype(OrdCompare)>
Indices(OrdCompare);
for (int I = 0, E = VL.size(); I < E; ++I) {
unsigned Idx = *getInsertIndex(VL[I]);
Indices.emplace(Idx, I);
}
OrdersType CurrentOrder(VL.size(), VL.size());
bool IsIdentity = true;
for (int I = 0, E = VL.size(); I < E; ++I) {
CurrentOrder[Indices.top().second] = I;
IsIdentity &= Indices.top().second == I;
Indices.pop();
}
if (IsIdentity)
CurrentOrder.clear();
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
None, CurrentOrder);
LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
constexpr int NumOps = 2;
ValueList VectorOperands[NumOps];
for (int I = 0; I < NumOps; ++I) {
for (Value *V : VL)
VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
TE->setOperand(I, VectorOperands[I]);
}
buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1});
return;
}
case Instruction::Load: {
SmallVector<Value *> PointerOps;
OrdersType CurrentOrder;
TreeEntry *TE = nullptr;
switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, CurrentOrder,
PointerOps)) {
case LoadsState::Vectorize:
if (CurrentOrder.empty()) {
TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
} else {
fixupOrderingIndices(CurrentOrder);
TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies, CurrentOrder);
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
}
TE->setOperandsInOrder();
break;
case LoadsState::ScatterVectorize:
TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
UserTreeIdx, ReuseShuffleIndicies);
TE->setOperandsInOrder();
buildTree_rec(PointerOps, Depth + 1, {TE, 0});
LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
break;
case LoadsState::Gather:
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
#ifndef NDEBUG
Type *ScalarTy = VL0->getType();
if (DL->getTypeSizeInBits(ScalarTy) !=
DL->getTypeAllocSizeInBits(ScalarTy))
LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
else if (any_of(VL, [](Value *V) {
return !cast<LoadInst>(V)->isSimple();
}))
LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
else
LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
#endif break;
}
return;
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast: {
Type *SrcTy = VL0->getOperand(0)->getType();
for (Value *V : VL) {
Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
if (Ty != SrcTy || !isValidElementType(Ty)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs()
<< "SLP: Gathering casts with different src types.\n");
return;
}
}
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
TE->setOperandsInOrder();
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
ValueList Operands;
for (Value *V : VL)
Operands.push_back(cast<Instruction>(V)->getOperand(i));
buildTree_rec(Operands, Depth + 1, {TE, i});
}
return;
}
case Instruction::ICmp:
case Instruction::FCmp: {
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
Type *ComparedTy = VL0->getOperand(0)->getType();
for (Value *V : VL) {
CmpInst *Cmp = cast<CmpInst>(V);
if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
Cmp->getOperand(0)->getType() != ComparedTy) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs()
<< "SLP: Gathering cmp with different predicate.\n");
return;
}
}
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
ValueList Left, Right;
if (cast<CmpInst>(VL0)->isCommutative()) {
assert(P0 == SwapP0 && "Commutative Predicate mismatch");
reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
} else {
for (Value *V : VL) {
auto *Cmp = cast<CmpInst>(V);
Value *LHS = Cmp->getOperand(0);
Value *RHS = Cmp->getOperand(1);
if (Cmp->getPredicate() != P0)
std::swap(LHS, RHS);
Left.push_back(LHS);
Right.push_back(RHS);
}
}
TE->setOperand(0, Left);
TE->setOperand(1, Right);
buildTree_rec(Left, Depth + 1, {TE, 0});
buildTree_rec(Right, Depth + 1, {TE, 1});
return;
}
case Instruction::Select:
case Instruction::FNeg:
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
ValueList Left, Right;
reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
TE->setOperand(0, Left);
TE->setOperand(1, Right);
buildTree_rec(Left, Depth + 1, {TE, 0});
buildTree_rec(Right, Depth + 1, {TE, 1});
return;
}
TE->setOperandsInOrder();
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
ValueList Operands;
for (Value *V : VL)
Operands.push_back(cast<Instruction>(V)->getOperand(i));
buildTree_rec(Operands, Depth + 1, {TE, i});
}
return;
}
case Instruction::GetElementPtr: {
for (Value *V : VL) {
auto *I = dyn_cast<GetElementPtrInst>(V);
if (!I)
continue;
if (I->getNumOperands() != 2) {
LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
}
Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType();
for (Value *V : VL) {
auto *GEP = dyn_cast<GEPOperator>(V);
if (!GEP)
continue;
Type *CurTy = GEP->getSourceElementType();
if (Ty0 != CurTy) {
LLVM_DEBUG(dbgs()
<< "SLP: not-vectorizable GEP (different types).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
}
Type *Ty1 = VL0->getOperand(1)->getType();
for (Value *V : VL) {
auto *I = dyn_cast<GetElementPtrInst>(V);
if (!I)
continue;
auto *Op = I->getOperand(1);
if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
(Op->getType() != Ty1 &&
((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
Op->getType()->getScalarSizeInBits() >
DL->getIndexSizeInBits(
V->getType()->getPointerAddressSpace())))) {
LLVM_DEBUG(dbgs()
<< "SLP: not-vectorizable GEP (non-constant indexes).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
}
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
SmallVector<ValueList, 2> Operands(2);
for (Value *V : VL) {
auto *GEP = dyn_cast<GetElementPtrInst>(V);
if (!GEP) {
Operands.front().push_back(V);
continue;
}
Operands.front().push_back(GEP->getPointerOperand());
}
TE->setOperand(0, Operands.front());
int IndexIdx = 1;
Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
Type *Ty = all_of(VL,
[VL0Ty, IndexIdx](Value *V) {
auto *GEP = dyn_cast<GetElementPtrInst>(V);
if (!GEP)
return true;
return VL0Ty == GEP->getOperand(IndexIdx)->getType();
})
? VL0Ty
: DL->getIndexType(cast<GetElementPtrInst>(VL0)
->getPointerOperandType()
->getScalarType());
for (Value *V : VL) {
auto *I = dyn_cast<GetElementPtrInst>(V);
if (!I) {
Operands.back().push_back(
ConstantInt::get(Ty, 0, false));
continue;
}
auto *Op = I->getOperand(IndexIdx);
auto *CI = dyn_cast<ConstantInt>(Op);
if (!CI)
Operands.back().push_back(Op);
else
Operands.back().push_back(ConstantExpr::getIntegerCast(
CI, Ty, CI->getValue().isSignBitSet()));
}
TE->setOperand(IndexIdx, Operands.back());
for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
buildTree_rec(Operands[I], Depth + 1, {TE, I});
return;
}
case Instruction::Store: {
llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
if (DL->getTypeSizeInBits(ScalarTy) !=
DL->getTypeAllocSizeInBits(ScalarTy)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
return;
}
SmallVector<Value *, 4> PointerOps(VL.size());
ValueList Operands(VL.size());
auto POIter = PointerOps.begin();
auto OIter = Operands.begin();
for (Value *V : VL) {
auto *SI = cast<StoreInst>(V);
if (!SI->isSimple()) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
return;
}
*POIter = SI->getPointerOperand();
*OIter = SI->getValueOperand();
++POIter;
++OIter;
}
OrdersType CurrentOrder;
if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
Value *Ptr0;
Value *PtrN;
if (CurrentOrder.empty()) {
Ptr0 = PointerOps.front();
PtrN = PointerOps.back();
} else {
Ptr0 = PointerOps[CurrentOrder.front()];
PtrN = PointerOps[CurrentOrder.back()];
}
Optional<int> Dist =
getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
if (CurrentOrder.empty()) {
TreeEntry *TE = newTreeEntry(VL, Bundle , S,
UserTreeIdx, ReuseShuffleIndicies);
TE->setOperandsInOrder();
buildTree_rec(Operands, Depth + 1, {TE, 0});
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
} else {
fixupOrderingIndices(CurrentOrder);
TreeEntry *TE =
newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies, CurrentOrder);
TE->setOperandsInOrder();
buildTree_rec(Operands, Depth + 1, {TE, 0});
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
}
return;
}
}
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
return;
}
case Instruction::Call: {
CallInst *CI = cast<CallInst>(VL0);
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
VFShape Shape = VFShape::get(
*CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
false );
Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
if (!VecFunc && !isTriviallyVectorizable(ID)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
return;
}
Function *F = CI->getCalledFunction();
unsigned NumArgs = CI->arg_size();
SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
for (unsigned j = 0; j != NumArgs; ++j)
if (isVectorIntrinsicWithScalarOpAtArg(ID, j))
ScalarArgs[j] = CI->getArgOperand(j);
for (Value *V : VL) {
CallInst *CI2 = dyn_cast<CallInst>(V);
if (!CI2 || CI2->getCalledFunction() != F ||
getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
(VecFunc &&
VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
!CI->hasIdenticalOperandBundleSchema(*CI2)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
<< "\n");
return;
}
for (unsigned j = 0; j != NumArgs; ++j) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, j)) {
Value *A1J = CI2->getArgOperand(j);
if (ScalarArgs[j] != A1J) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
<< " argument " << ScalarArgs[j] << "!=" << A1J
<< "\n");
return;
}
}
}
if (CI->hasOperandBundles() &&
!std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
CI->op_begin() + CI->getBundleOperandsEndIndex(),
CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
<< *CI << "!=" << *V << '\n');
return;
}
}
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
TE->setOperandsInOrder();
for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
continue;
ValueList Operands;
for (Value *V : VL) {
auto *CI2 = cast<CallInst>(V);
Operands.push_back(CI2->getArgOperand(i));
}
buildTree_rec(Operands, Depth + 1, {TE, i});
}
return;
}
case Instruction::ShuffleVector: {
if (!S.isAltShuffle()) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
return;
}
TreeEntry *TE = newTreeEntry(VL, Bundle , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
auto *CI = dyn_cast<CmpInst>(VL0);
if (isa<BinaryOperator>(VL0) || CI) {
ValueList Left, Right;
if (!CI || all_of(VL, [](Value *V) {
return cast<CmpInst>(V)->isCommutative();
})) {
reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
} else {
CmpInst::Predicate P0 = CI->getPredicate();
CmpInst::Predicate AltP0 = cast<CmpInst>(S.AltOp)->getPredicate();
assert(P0 != AltP0 &&
"Expected different main/alternate predicates.");
CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0);
Value *BaseOp0 = VL0->getOperand(0);
Value *BaseOp1 = VL0->getOperand(1);
for (Value *V : VL) {
auto *Cmp = cast<CmpInst>(V);
Value *LHS = Cmp->getOperand(0);
Value *RHS = Cmp->getOperand(1);
CmpInst::Predicate CurrentPred = Cmp->getPredicate();
if (P0 == AltP0Swapped) {
if (CI != Cmp && S.AltOp != Cmp &&
((P0 == CurrentPred &&
!areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS)) ||
(AltP0 == CurrentPred &&
areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS))))
std::swap(LHS, RHS);
} else if (P0 != CurrentPred && AltP0 != CurrentPred) {
std::swap(LHS, RHS);
}
Left.push_back(LHS);
Right.push_back(RHS);
}
}
TE->setOperand(0, Left);
TE->setOperand(1, Right);
buildTree_rec(Left, Depth + 1, {TE, 0});
buildTree_rec(Right, Depth + 1, {TE, 1});
return;
}
TE->setOperandsInOrder();
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
ValueList Operands;
for (Value *V : VL)
Operands.push_back(cast<Instruction>(V)->getOperand(i));
buildTree_rec(Operands, Depth + 1, {TE, i});
}
return;
}
default:
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, None , S, UserTreeIdx,
ReuseShuffleIndicies);
LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
return;
}
}
unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
unsigned N = 1;
Type *EltTy = T;
while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
isa<VectorType>(EltTy)) {
if (auto *ST = dyn_cast<StructType>(EltTy)) {
for (const auto *Ty : ST->elements())
if (Ty != *ST->element_begin())
return 0;
N *= ST->getNumElements();
EltTy = *ST->element_begin();
} else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
N *= AT->getNumElements();
EltTy = AT->getElementType();
} else {
auto *VT = cast<FixedVectorType>(EltTy);
N *= VT->getNumElements();
EltTy = VT->getElementType();
}
}
if (!isValidElementType(EltTy))
return 0;
uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
return 0;
return N;
}
bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
SmallVectorImpl<unsigned> &CurrentOrder) const {
const auto *It = find_if(VL, [](Value *V) {
return isa<ExtractElementInst, ExtractValueInst>(V);
});
assert(It != VL.end() && "Expected at least one extract instruction.");
auto *E0 = cast<Instruction>(*It);
assert(all_of(VL,
[](Value *V) {
return isa<UndefValue, ExtractElementInst, ExtractValueInst>(
V);
}) &&
"Invalid opcode");
Value *Vec = E0->getOperand(0);
CurrentOrder.clear();
unsigned NElts;
if (E0->getOpcode() == Instruction::ExtractValue) {
const DataLayout &DL = E0->getModule()->getDataLayout();
NElts = canMapToVector(Vec->getType(), DL);
if (!NElts)
return false;
LoadInst *LI = dyn_cast<LoadInst>(Vec);
if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
return false;
} else {
NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
}
if (NElts != VL.size())
return false;
bool ShouldKeepOrder = true;
unsigned E = VL.size();
CurrentOrder.assign(E, E);
unsigned I = 0;
for (; I < E; ++I) {
auto *Inst = dyn_cast<Instruction>(VL[I]);
if (!Inst)
continue;
if (Inst->getOperand(0) != Vec)
break;
if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
if (isa<UndefValue>(EE->getIndexOperand()))
continue;
Optional<unsigned> Idx = getExtractIndex(Inst);
if (!Idx)
break;
const unsigned ExtIdx = *Idx;
if (ExtIdx != I) {
if (ExtIdx >= E || CurrentOrder[ExtIdx] != E)
break;
ShouldKeepOrder = false;
CurrentOrder[ExtIdx] = I;
} else {
if (CurrentOrder[I] != E)
break;
CurrentOrder[I] = I;
}
}
if (I < E) {
CurrentOrder.clear();
return false;
}
if (ShouldKeepOrder)
CurrentOrder.clear();
return ShouldKeepOrder;
}
bool BoUpSLP::areAllUsersVectorized(Instruction *I,
ArrayRef<Value *> VectorizedVals) const {
return (I->hasOneUse() && is_contained(VectorizedVals, I)) ||
all_of(I->users(), [this](User *U) {
return ScalarToTreeEntry.count(U) > 0 ||
isVectorLikeInstWithConstOps(U) ||
(isa<ExtractElementInst>(U) && MustGather.contains(U));
});
}
static std::pair<InstructionCost, InstructionCost>
getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
SmallVector<Type *, 4> VecTys;
for (Use &Arg : CI->args())
VecTys.push_back(
FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
FastMathFlags FMF;
if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
FMF = FPCI->getFastMathFlags();
SmallVector<const Value *> Arguments(CI->args());
IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
dyn_cast<IntrinsicInst>(CI));
auto IntrinsicCost =
TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
VecTy->getNumElements())),
false );
Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
auto LibCost = IntrinsicCost;
if (!CI->isNoBuiltin() && VecFunc) {
LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
TTI::TCK_RecipThroughput);
}
return {IntrinsicCost, LibCost};
}
static InstructionCost
computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy,
TargetTransformInfo::ShuffleKind ShuffleKind,
ArrayRef<int> Mask, TargetTransformInfo &TTI) {
unsigned NumOfParts = TTI.getNumberOfParts(VecTy);
if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts ||
VecTy->getNumElements() < NumOfParts)
return TTI.getShuffleCost(ShuffleKind, VecTy, Mask);
bool AllConsecutive = true;
unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts;
unsigned Idx = -1;
InstructionCost Cost = 0;
SmallVector<int> RegMask(EltsPerVector, UndefMaskElem);
for (auto *V : VL) {
++Idx;
if (Idx % EltsPerVector == 0) {
RegMask.assign(EltsPerVector, UndefMaskElem);
AllConsecutive = true;
continue;
}
if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem)
continue;
unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V));
if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) {
unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1]));
AllConsecutive &= PrevIdx + 1 == CurrentIdx &&
CurrentIdx % EltsPerVector == Idx % EltsPerVector;
RegMask[Idx % EltsPerVector] = CurrentIdx % EltsPerVector;
}
if (AllConsecutive)
continue;
if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size())
continue;
Cost += TTI.getShuffleCost(
TargetTransformInfo::SK_PermuteSingleSrc,
FixedVectorType::get(VecTy->getElementType(), EltsPerVector), RegMask);
}
return Cost;
}
static void
buildShuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices,
ArrayRef<int> ReusesIndices,
const function_ref<bool(Instruction *)> IsAltOp,
SmallVectorImpl<int> &Mask,
SmallVectorImpl<Value *> *OpScalars = nullptr,
SmallVectorImpl<Value *> *AltScalars = nullptr) {
unsigned Sz = VL.size();
Mask.assign(Sz, UndefMaskElem);
SmallVector<int> OrderMask;
if (!ReorderIndices.empty())
inversePermutation(ReorderIndices, OrderMask);
for (unsigned I = 0; I < Sz; ++I) {
unsigned Idx = I;
if (!ReorderIndices.empty())
Idx = OrderMask[I];
auto *OpInst = cast<Instruction>(VL[Idx]);
if (IsAltOp(OpInst)) {
Mask[I] = Sz + Idx;
if (AltScalars)
AltScalars->push_back(OpInst);
} else {
Mask[I] = Idx;
if (OpScalars)
OpScalars->push_back(OpInst);
}
}
if (!ReusesIndices.empty()) {
SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem);
transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) {
return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem;
});
Mask.swap(NewMask);
}
}
static bool isAlternateInstruction(const Instruction *I,
const Instruction *MainOp,
const Instruction *AltOp) {
if (auto *CI0 = dyn_cast<CmpInst>(MainOp)) {
auto *AltCI0 = cast<CmpInst>(AltOp);
auto *CI = cast<CmpInst>(I);
CmpInst::Predicate P0 = CI0->getPredicate();
CmpInst::Predicate AltP0 = AltCI0->getPredicate();
assert(P0 != AltP0 && "Expected different main/alternate predicates.");
CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0);
CmpInst::Predicate CurrentPred = CI->getPredicate();
if (P0 == AltP0Swapped)
return I == AltCI0 ||
(I != MainOp &&
!areCompatibleCmpOps(CI0->getOperand(0), CI0->getOperand(1),
CI->getOperand(0), CI->getOperand(1)));
return AltP0 == CurrentPred || AltP0Swapped == CurrentPred;
}
return I->getOpcode() == AltOp->getOpcode();
}
InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
ArrayRef<Value *> VectorizedVals) {
ArrayRef<Value*> VL = E->Scalars;
Type *ScalarTy = VL[0]->getType();
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
ScalarTy = SI->getValueOperand()->getType();
else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
ScalarTy = CI->getOperand(0)->getType();
else if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
ScalarTy = IE->getOperand(1)->getType();
auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
if (MinBWs.count(VL[0]))
VecTy = FixedVectorType::get(
IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
unsigned EntryVF = E->getVectorFactor();
auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF);
bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
TargetTransformInfo &TTIRef = *TTI;
auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy,
VectorizedVals, E](InstructionCost &Cost) {
DenseMap<Value *, int> ExtractVectorsTys;
SmallPtrSet<Value *, 4> CheckedExtracts;
for (auto *V : VL) {
if (isa<UndefValue>(V))
continue;
const TreeEntry *VE = getTreeEntry(V);
if (!CheckedExtracts.insert(V).second ||
!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) ||
(VE && VE != E))
continue;
auto *EE = cast<ExtractElementInst>(V);
Optional<unsigned> EEIdx = getExtractIndex(EE);
if (!EEIdx)
continue;
unsigned Idx = *EEIdx;
if (TTIRef.getNumberOfParts(VecTy) !=
TTIRef.getNumberOfParts(EE->getVectorOperandType())) {
auto It =
ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
It->getSecond() = std::min<int>(It->second, Idx);
}
if (EE->hasOneUse()) {
Instruction *Ext = EE->user_back();
if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
all_of(Ext->users(),
[](User *U) { return isa<GetElementPtrInst>(U); })) {
Cost -=
TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
EE->getVectorOperandType(), Idx);
Cost += TTIRef.getCastInstrCost(
Ext->getOpcode(), Ext->getType(), EE->getType(),
TTI::getCastContextHint(Ext), CostKind, Ext);
continue;
}
}
Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement,
EE->getVectorOperandType(), Idx);
}
for (const auto &Data : ExtractVectorsTys) {
auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
unsigned NumElts = VecTy->getNumElements();
if (Data.second % NumElts == 0)
continue;
if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) {
unsigned Idx = (Data.second / NumElts) * NumElts;
unsigned EENumElts = EEVTy->getNumElements();
if (Idx + NumElts <= EENumElts) {
Cost +=
TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
EEVTy, None, Idx, VecTy);
} else {
auto *SubVT =
FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
Cost +=
TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
EEVTy, None, Idx, SubVT);
}
} else {
Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
VecTy, None, 0, EEVTy);
}
}
};
if (E->State == TreeEntry::NeedToGather) {
if (allConstant(VL))
return 0;
if (isa<InsertElementInst>(VL[0]))
return InstructionCost::getInvalid();
SmallVector<int> Mask;
SmallVector<const TreeEntry *> Entries;
Optional<TargetTransformInfo::ShuffleKind> Shuffle =
isGatherShuffledEntry(E, Mask, Entries);
if (Shuffle) {
InstructionCost GatherCost = 0;
if (ShuffleVectorInst::isIdentityMask(Mask)) {
LLVM_DEBUG(
dbgs()
<< "SLP: perfect diamond match for gather bundle that starts with "
<< *VL.front() << ".\n");
if (NeedToShuffleReuses)
GatherCost =
TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
FinalVecTy, E->ReuseShuffleIndices);
} else {
LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()
<< " entries for bundle that starts with "
<< *VL.front() << ".\n");
::addMask(Mask, E->ReuseShuffleIndices);
GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask);
}
return GatherCost;
}
if ((E->getOpcode() == Instruction::ExtractElement ||
all_of(E->Scalars,
[](Value *V) {
return isa<ExtractElementInst, UndefValue>(V);
})) &&
allSameType(VL)) {
SmallVector<int> Mask;
Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
isFixedVectorShuffle(VL, Mask);
if (ShuffleKind) {
InstructionCost Cost =
computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI);
AdjustExtractsCost(Cost);
if (NeedToShuffleReuses)
Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
FinalVecTy, E->ReuseShuffleIndices);
return Cost;
}
}
if (isSplat(VL)) {
assert(VecTy == FinalVecTy &&
"No reused scalars expected for broadcast.");
return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
None, 0,
nullptr, VL[0]);
}
InstructionCost ReuseShuffleCost = 0;
if (NeedToShuffleReuses)
ReuseShuffleCost = TTI->getShuffleCost(
TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices);
if (VL.size() > 2 && E->getOpcode() == Instruction::Load &&
!E->isAltShuffle()) {
BoUpSLP::ValueSet VectorizedLoads;
unsigned StartIdx = 0;
unsigned VF = VL.size() / 2;
unsigned VectorizedCnt = 0;
unsigned ScatterVectorizeCnt = 0;
const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType());
for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) {
for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End;
Cnt += VF) {
ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
if (!VectorizedLoads.count(Slice.front()) &&
!VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) {
SmallVector<Value *> PointerOps;
OrdersType CurrentOrder;
LoadsState LS =
canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, *SE, *LI,
CurrentOrder, PointerOps);
switch (LS) {
case LoadsState::Vectorize:
case LoadsState::ScatterVectorize:
if (LS == LoadsState::Vectorize)
++VectorizedCnt;
else
++ScatterVectorizeCnt;
VectorizedLoads.insert(Slice.begin(), Slice.end());
if (Cnt == StartIdx)
StartIdx += VF;
break;
case LoadsState::Gather:
break;
}
}
}
if (StartIdx >= VL.size())
break;
if (!VectorizedLoads.empty())
break;
}
if (!VectorizedLoads.empty()) {
InstructionCost GatherCost = 0;
unsigned NumParts = TTI->getNumberOfParts(VecTy);
bool NeedInsertSubvectorAnalysis =
!NumParts || (VL.size() / VF) > NumParts;
for (unsigned I = 0, End = VL.size(); I < End; I += VF) {
if (VectorizedLoads.contains(VL[I]))
continue;
GatherCost += getGatherCost(VL.slice(I, VF));
}
InstructionCost ScalarsCost = 0;
for (Value *V : VectorizedLoads) {
auto *LI = cast<LoadInst>(V);
ScalarsCost += TTI->getMemoryOpCost(
Instruction::Load, LI->getType(), LI->getAlign(),
LI->getPointerAddressSpace(), CostKind, LI);
}
auto *LI = cast<LoadInst>(E->getMainOp());
auto *LoadTy = FixedVectorType::get(LI->getType(), VF);
Align Alignment = LI->getAlign();
GatherCost +=
VectorizedCnt *
TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment,
LI->getPointerAddressSpace(), CostKind, LI);
GatherCost += ScatterVectorizeCnt *
TTI->getGatherScatterOpCost(
Instruction::Load, LoadTy, LI->getPointerOperand(),
false, Alignment, CostKind, LI);
if (NeedInsertSubvectorAnalysis) {
for (int I = VF, E = VL.size(); I < E; I += VF)
GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy,
None, I, LoadTy);
}
return ReuseShuffleCost + GatherCost - ScalarsCost;
}
}
return ReuseShuffleCost + getGatherCost(VL);
}
InstructionCost CommonCost = 0;
SmallVector<int> Mask;
if (!E->ReorderIndices.empty()) {
SmallVector<int> NewMask;
if (E->getOpcode() == Instruction::Store) {
NewMask.resize(E->ReorderIndices.size());
copy(E->ReorderIndices, NewMask.begin());
} else {
inversePermutation(E->ReorderIndices, NewMask);
}
::addMask(Mask, NewMask);
}
if (NeedToShuffleReuses)
::addMask(Mask, E->ReuseShuffleIndices);
if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask))
CommonCost =
TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
assert((E->State == TreeEntry::Vectorize ||
E->State == TreeEntry::ScatterVectorize) &&
"Unhandled state");
assert(E->getOpcode() &&
((allSameType(VL) && allSameBlock(VL)) ||
(E->getOpcode() == Instruction::GetElementPtr &&
E->getMainOp()->getType()->isPointerTy())) &&
"Invalid VL");
Instruction *VL0 = E->getMainOp();
unsigned ShuffleOrOp =
E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
switch (ShuffleOrOp) {
case Instruction::PHI:
return 0;
case Instruction::ExtractValue:
case Instruction::ExtractElement: {
if (NeedToShuffleReuses) {
unsigned Idx = 0;
for (unsigned I : E->ReuseShuffleIndices) {
if (ShuffleOrOp == Instruction::ExtractElement) {
auto *EE = cast<ExtractElementInst>(VL[I]);
CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
EE->getVectorOperandType(),
*getExtractIndex(EE));
} else {
CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
VecTy, Idx);
++Idx;
}
}
Idx = EntryVF;
for (Value *V : VL) {
if (ShuffleOrOp == Instruction::ExtractElement) {
auto *EE = cast<ExtractElementInst>(V);
CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
EE->getVectorOperandType(),
*getExtractIndex(EE));
} else {
--Idx;
CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
VecTy, Idx);
}
}
}
if (ShuffleOrOp == Instruction::ExtractValue) {
for (unsigned I = 0, E = VL.size(); I < E; ++I) {
auto *EI = cast<Instruction>(VL[I]);
if (EI->hasOneUse()) {
Instruction *Ext = EI->user_back();
if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
all_of(Ext->users(),
[](User *U) { return isa<GetElementPtrInst>(U); })) {
CommonCost -= TTI->getExtractWithExtendCost(
Ext->getOpcode(), Ext->getType(), VecTy, I);
CommonCost += TTI->getCastInstrCost(
Ext->getOpcode(), Ext->getType(), EI->getType(),
TTI::getCastContextHint(Ext), CostKind, Ext);
continue;
}
}
CommonCost -=
TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
}
} else {
AdjustExtractsCost(CommonCost);
}
return CommonCost;
}
case Instruction::InsertElement: {
assert(E->ReuseShuffleIndices.empty() &&
"Unique insertelements only are expected.");
auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
unsigned const NumElts = SrcVecTy->getNumElements();
unsigned const NumScalars = VL.size();
unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
unsigned OffsetBeg = *getInsertIndex(VL.front());
unsigned OffsetEnd = OffsetBeg;
for (Value *V : VL.drop_front()) {
unsigned Idx = *getInsertIndex(V);
if (OffsetBeg > Idx)
OffsetBeg = Idx;
else if (OffsetEnd < Idx)
OffsetEnd = Idx;
}
unsigned VecScalarsSz = PowerOf2Ceil(NumElts);
if (NumOfParts > 0)
VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts);
unsigned VecSz =
(1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) *
VecScalarsSz;
unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz);
unsigned InsertVecSz = std::min<unsigned>(
PowerOf2Ceil(OffsetEnd - OffsetBeg + 1),
((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) *
VecScalarsSz);
bool IsWholeSubvector =
OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0);
if (OffsetBeg + InsertVecSz > VecSz) {
OffsetBeg = alignDown(OffsetBeg, VecSz, Offset);
InsertVecSz = VecSz;
}
APInt DemandedElts = APInt::getZero(NumElts);
SmallVector<int> Mask;
if (!E->ReorderIndices.empty()) {
inversePermutation(E->ReorderIndices, Mask);
Mask.append(InsertVecSz - Mask.size(), UndefMaskElem);
} else {
Mask.assign(VecSz, UndefMaskElem);
std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0);
}
bool IsIdentity = true;
SmallVector<int> PrevMask(InsertVecSz, UndefMaskElem);
Mask.swap(PrevMask);
for (unsigned I = 0; I < NumScalars; ++I) {
unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]);
DemandedElts.setBit(InsertIdx);
IsIdentity &= InsertIdx - OffsetBeg == I;
Mask[InsertIdx - OffsetBeg] = I;
}
assert(Offset < NumElts && "Failed to find vector index offset");
InstructionCost Cost = 0;
Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
true, false);
auto *InsertVecTy =
FixedVectorType::get(SrcVecTy->getElementType(), InsertVecSz);
if (!IsIdentity)
Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
InsertVecTy, Mask);
auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
}));
if (!isUndefVector(FirstInsert->getOperand(0)) && NumScalars != NumElts &&
!IsWholeSubvector) {
if (InsertVecSz != VecSz) {
auto *ActualVecTy =
FixedVectorType::get(SrcVecTy->getElementType(), VecSz);
Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy,
None, OffsetBeg - Offset, InsertVecTy);
} else {
for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I)
Mask[I] = I;
for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset;
I <= End; ++I)
if (Mask[I] != UndefMaskElem)
Mask[I] = I + VecSz;
for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I)
Mask[I] = I;
Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask);
}
}
return Cost;
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast: {
Type *SrcTy = VL0->getOperand(0)->getType();
InstructionCost ScalarEltCost =
TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
TTI::getCastContextHint(VL0), CostKind, VL0);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
InstructionCost ScalarCost = VL.size() * ScalarEltCost;
auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
InstructionCost VecCost = 0;
if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
VecCost = CommonCost + TTI->getCastInstrCost(
E->getOpcode(), VecTy, SrcVecTy,
TTI::getCastContextHint(VL0), CostKind, VL0);
}
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
return VecCost - ScalarCost;
}
case Instruction::FCmp:
case Instruction::ICmp:
case Instruction::Select: {
InstructionCost ScalarEltCost =
TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
bool First = true;
for (auto *V : VL) {
CmpInst::Predicate CurrentPred;
auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
!match(V, MatchCmp)) ||
(!First && VecPred != CurrentPred)) {
VecPred = CmpInst::BAD_ICMP_PREDICATE;
break;
}
First = false;
VecPred = CurrentPred;
}
InstructionCost VecCost = TTI->getCmpSelInstrCost(
E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
{VecTy, VecTy});
InstructionCost IntrinsicCost =
TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
if (IntrinsicAndUse.second)
IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy,
MaskTy, VecPred, CostKind);
VecCost = std::min(VecCost, IntrinsicCost);
}
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
return CommonCost + VecCost - ScalarCost;
}
case Instruction::FNeg:
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
TargetTransformInfo::OperandValueKind Op1VK =
TargetTransformInfo::OK_AnyValue;
TargetTransformInfo::OperandValueKind Op2VK =
TargetTransformInfo::OK_UniformConstantValue;
TargetTransformInfo::OperandValueProperties Op1VP =
TargetTransformInfo::OP_None;
TargetTransformInfo::OperandValueProperties Op2VP =
TargetTransformInfo::OP_PowerOf2;
ConstantInt *CInt0 = nullptr;
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
const Instruction *I = cast<Instruction>(VL[i]);
unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
if (!CInt) {
Op2VK = TargetTransformInfo::OK_AnyValue;
Op2VP = TargetTransformInfo::OP_None;
break;
}
if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
!CInt->getValue().isPowerOf2())
Op2VP = TargetTransformInfo::OP_None;
if (i == 0) {
CInt0 = CInt;
continue;
}
if (CInt0 != CInt)
Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
}
SmallVector<const Value *, 4> Operands(VL0->operand_values());
InstructionCost ScalarEltCost =
TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK,
Op2VK, Op1VP, Op2VP, Operands, VL0);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
InstructionCost VecCost =
TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK,
Op2VK, Op1VP, Op2VP, Operands, VL0);
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
return CommonCost + VecCost - ScalarCost;
}
case Instruction::GetElementPtr: {
TargetTransformInfo::OperandValueKind Op1VK =
TargetTransformInfo::OK_AnyValue;
TargetTransformInfo::OperandValueKind Op2VK =
any_of(VL,
[](Value *V) {
return isa<GetElementPtrInst>(V) &&
!isConstant(
cast<GetElementPtrInst>(V)->getOperand(1));
})
? TargetTransformInfo::OK_AnyValue
: TargetTransformInfo::OK_UniformConstantValue;
InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost(
Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
InstructionCost VecCost = TTI->getArithmeticInstrCost(
Instruction::Add, VecTy, CostKind, Op1VK, Op2VK);
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
return CommonCost + VecCost - ScalarCost;
}
case Instruction::Load: {
Align Alignment = cast<LoadInst>(VL0)->getAlign();
InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
InstructionCost VecLdCost;
if (E->State == TreeEntry::Vectorize) {
VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
CostKind, VL0);
} else {
assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
Align CommonAlignment = Alignment;
for (Value *V : VL)
CommonAlignment =
std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
VecLdCost = TTI->getGatherScatterOpCost(
Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
false, CommonAlignment, CostKind, VL0);
}
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost));
return CommonCost + VecLdCost - ScalarLdCost;
}
case Instruction::Store: {
bool IsReorder = !E->ReorderIndices.empty();
auto *SI =
cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
Align Alignment = SI->getAlign();
InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0);
InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
InstructionCost VecStCost = TTI->getMemoryOpCost(
Instruction::Store, VecTy, Alignment, 0, CostKind, VL0);
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
return CommonCost + VecStCost - ScalarStCost;
}
case Instruction::Call: {
CallInst *CI = cast<CallInst>(VL0);
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
InstructionCost ScalarEltCost =
TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
InstructionCost VecCallCost =
std::min(VecCallCosts.first, VecCallCosts.second);
LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
<< " (" << VecCallCost << "-" << ScalarCallCost << ")"
<< " for " << *CI << "\n");
return CommonCost + VecCallCost - ScalarCallCost;
}
case Instruction::ShuffleVector: {
assert(E->isAltShuffle() &&
((Instruction::isBinaryOp(E->getOpcode()) &&
Instruction::isBinaryOp(E->getAltOpcode())) ||
(Instruction::isCast(E->getOpcode()) &&
Instruction::isCast(E->getAltOpcode())) ||
(isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
"Invalid Shuffle Vector Operand");
InstructionCost ScalarCost = 0;
if (NeedToShuffleReuses) {
for (unsigned Idx : E->ReuseShuffleIndices) {
Instruction *I = cast<Instruction>(VL[Idx]);
CommonCost -= TTI->getInstructionCost(I, CostKind);
}
for (Value *V : VL) {
Instruction *I = cast<Instruction>(V);
CommonCost += TTI->getInstructionCost(I, CostKind);
}
}
for (Value *V : VL) {
Instruction *I = cast<Instruction>(V);
assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
ScalarCost += TTI->getInstructionCost(I, CostKind);
}
InstructionCost VecCost = 0;
auto &&TryFindNodeWithEqualOperands = [this, E]() {
for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
if (TE.get() == E)
break;
if (TE->isAltShuffle() &&
((TE->getOpcode() == E->getOpcode() &&
TE->getAltOpcode() == E->getAltOpcode()) ||
(TE->getOpcode() == E->getAltOpcode() &&
TE->getAltOpcode() == E->getOpcode())) &&
TE->hasEqualOperands(*E))
return true;
}
return false;
};
if (TryFindNodeWithEqualOperands()) {
LLVM_DEBUG({
dbgs() << "SLP: diamond match for alternate node found.\n";
E->dump();
});
} else if (Instruction::isBinaryOp(E->getOpcode())) {
VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
CostKind);
} else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy,
Builder.getInt1Ty(),
CI0->getPredicate(), CostKind, VL0);
VecCost += TTI->getCmpSelInstrCost(
E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind,
E->getAltOp());
} else {
Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
TTI::CastContextHint::None, CostKind);
VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
TTI::CastContextHint::None, CostKind);
}
if (E->ReuseShuffleIndices.empty()) {
CommonCost =
TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy);
} else {
SmallVector<int> Mask;
buildShuffleEntryMask(
E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
[E](Instruction *I) {
assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
return I->getOpcode() == E->getAltOpcode();
},
Mask);
CommonCost = TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc,
FinalVecTy, Mask);
}
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
return CommonCost + VecCost - ScalarCost;
}
default:
llvm_unreachable("Unknown instruction");
}
}
bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
<< VectorizableTree.size() << " is fully vectorizable .\n");
auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) {
SmallVector<int> Mask;
return TE->State == TreeEntry::NeedToGather &&
!any_of(TE->Scalars,
[this](Value *V) { return EphValues.contains(V); }) &&
(allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
TE->Scalars.size() < Limit ||
((TE->getOpcode() == Instruction::ExtractElement ||
all_of(TE->Scalars,
[](Value *V) {
return isa<ExtractElementInst, UndefValue>(V);
})) &&
isFixedVectorShuffle(TE->Scalars, Mask)) ||
(TE->State == TreeEntry::NeedToGather &&
TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()));
};
if (VectorizableTree.size() == 1 &&
(VectorizableTree[0]->State == TreeEntry::Vectorize ||
(ForReduction &&
AreVectorizableGathers(VectorizableTree[0].get(),
VectorizableTree[0]->Scalars.size()) &&
VectorizableTree[0]->getVectorFactor() > 2)))
return true;
if (VectorizableTree.size() != 2)
return false;
SmallVector<int> Mask;
if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
AreVectorizableGathers(VectorizableTree[1].get(),
VectorizableTree[0]->Scalars.size()))
return true;
if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
(VectorizableTree[1]->State == TreeEntry::NeedToGather &&
VectorizableTree[0]->State != TreeEntry::ScatterVectorize))
return false;
return true;
}
static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
TargetTransformInfo *TTI,
bool MustMatchOrInst) {
Value *ZextLoad = Root;
const APInt *ShAmtC;
bool FoundOr = false;
while (!isa<ConstantExpr>(ZextLoad) &&
(match(ZextLoad, m_Or(m_Value(), m_Value())) ||
(match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
ShAmtC->urem(8) == 0))) {
auto *BinOp = cast<BinaryOperator>(ZextLoad);
ZextLoad = BinOp->getOperand(0);
if (BinOp->getOpcode() == Instruction::Or)
FoundOr = true;
}
Value *Load;
if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
!match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load))
return false;
Type *SrcTy = Load->getType();
unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
return false;
LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
<< *(cast<Instruction>(Root)) << "\n");
return true;
}
bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
if (RdxKind != RecurKind::Or)
return false;
unsigned NumElts = VectorizableTree[0]->Scalars.size();
Value *FirstReduced = VectorizableTree[0]->Scalars[0];
return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
false);
}
bool BoUpSLP::isLoadCombineCandidate() const {
unsigned NumElts = VectorizableTree[0]->Scalars.size();
for (Value *Scalar : VectorizableTree[0]->Scalars) {
Value *X;
if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
!isLoadCombineCandidateImpl(X, NumElts, TTI, true))
return false;
}
return true;
}
bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
if (VectorizableTree.size() == 2 &&
isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
VectorizableTree[1]->State == TreeEntry::NeedToGather &&
(VectorizableTree[1]->getVectorFactor() <= 2 ||
!(isSplat(VectorizableTree[1]->Scalars) ||
allConstant(VectorizableTree[1]->Scalars))))
return true;
if (VectorizableTree.size() >= MinTreeSize)
return false;
if (isFullyVectorizableTinyTree(ForReduction))
return false;
assert(VectorizableTree.empty()
? ExternalUses.empty()
: true && "We shouldn't have any external users");
return true;
}
InstructionCost BoUpSLP::getSpillCost() const {
unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
InstructionCost Cost = 0;
SmallPtrSet<Instruction*, 4> LiveValues;
Instruction *PrevInst = nullptr;
SmallVector<Instruction *, 16> OrderedScalars;
for (const auto &TEPtr : VectorizableTree) {
Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
if (!Inst)
continue;
OrderedScalars.push_back(Inst);
}
llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
auto *NodeA = DT->getNode(A->getParent());
auto *NodeB = DT->getNode(B->getParent());
assert(NodeA && "Should only process reachable instructions");
assert(NodeB && "Should only process reachable instructions");
assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
"Different nodes should have different DFS numbers");
if (NodeA != NodeB)
return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
return B->comesBefore(A);
});
for (Instruction *Inst : OrderedScalars) {
if (!PrevInst) {
PrevInst = Inst;
continue;
}
LiveValues.erase(PrevInst);
for (auto &J : PrevInst->operands()) {
if (isa<Instruction>(&*J) && getTreeEntry(&*J))
LiveValues.insert(cast<Instruction>(&*J));
}
LLVM_DEBUG({
dbgs() << "SLP: #LV: " << LiveValues.size();
for (auto *X : LiveValues)
dbgs() << " " << X->getName();
dbgs() << ", Looking at ";
Inst->dump();
});
unsigned NumCalls = 0;
BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
PrevInstIt =
PrevInst->getIterator().getReverse();
while (InstIt != PrevInstIt) {
if (PrevInstIt == PrevInst->getParent()->rend()) {
PrevInstIt = Inst->getParent()->rbegin();
continue;
}
if ((isa<CallInst>(&*PrevInstIt) &&
!isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
&*PrevInstIt != PrevInst)
NumCalls++;
++PrevInstIt;
}
if (NumCalls) {
SmallVector<Type*, 4> V;
for (auto *II : LiveValues) {
auto *ScalarTy = II->getType();
if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
ScalarTy = VectorTy->getElementType();
V.push_back(FixedVectorType::get(ScalarTy, BundleWidth));
}
Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
}
PrevInst = Inst;
}
return Cost;
}
static bool areTwoInsertFromSameBuildVector(InsertElementInst *VU,
InsertElementInst *V) {
if (VU->getParent() != V->getParent())
return false;
if (VU->getType() != V->getType())
return false;
if (!VU->hasOneUse() && !V->hasOneUse())
return false;
auto *IE1 = VU;
auto *IE2 = V;
unsigned Idx1 = *getInsertIndex(IE1);
unsigned Idx2 = *getInsertIndex(IE2);
do {
if (IE2 == VU)
return VU->hasOneUse();
if (IE1 == V)
return V->hasOneUse();
if (IE1) {
if ((IE1 != VU && !IE1->hasOneUse()) ||
getInsertIndex(IE1).value_or(Idx2) == Idx2)
IE1 = nullptr;
else
IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0));
}
if (IE2) {
if ((IE2 != V && !IE2->hasOneUse()) ||
getInsertIndex(IE2).value_or(Idx1) == Idx1)
IE2 = nullptr;
else
IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0));
}
} while (IE1 || IE2);
return false;
}
static bool isFirstInsertElement(const InsertElementInst *IE1,
const InsertElementInst *IE2) {
if (IE1 == IE2)
return false;
const auto *I1 = IE1;
const auto *I2 = IE2;
const InsertElementInst *PrevI1;
const InsertElementInst *PrevI2;
unsigned Idx1 = *getInsertIndex(IE1);
unsigned Idx2 = *getInsertIndex(IE2);
do {
if (I2 == IE1)
return true;
if (I1 == IE2)
return false;
PrevI1 = I1;
PrevI2 = I2;
if (I1 && (I1 == IE1 || I1->hasOneUse()) &&
getInsertIndex(I1).value_or(Idx2) != Idx2)
I1 = dyn_cast<InsertElementInst>(I1->getOperand(0));
if (I2 && ((I2 == IE2 || I2->hasOneUse())) &&
getInsertIndex(I2).value_or(Idx1) != Idx1)
I2 = dyn_cast<InsertElementInst>(I2->getOperand(0));
} while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2));
llvm_unreachable("Two different buildvectors not expected.");
}
namespace {
struct ValueSelect {
template <typename U>
static typename std::enable_if<std::is_same<Value *, U>::value, Value *>::type
get(Value *V) {
return V;
}
template <typename U>
static typename std::enable_if<!std::is_same<Value *, U>::value, U>::type
get(Value *) {
return U();
}
};
}
template <typename T>
static T *performExtractsShuffleAction(
MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base,
function_ref<unsigned(T *)> GetVF,
function_ref<std::pair<T *, bool>(T *, ArrayRef<int>)> ResizeAction,
function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) {
assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts.");
SmallVector<int> Mask(ShuffleMask.begin()->second);
auto VMIt = std::next(ShuffleMask.begin());
T *Prev = nullptr;
bool IsBaseNotUndef = !isUndefVector(Base);
if (IsBaseNotUndef) {
std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask);
for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
if (Mask[Idx] == UndefMaskElem)
Mask[Idx] = Idx;
else
Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF;
}
auto *V = ValueSelect::get<T *>(Base);
(void)V;
assert((!V || GetVF(V) == Mask.size()) &&
"Expected base vector of VF number of elements.");
Prev = Action(Mask, {nullptr, Res.first});
} else if (ShuffleMask.size() == 1) {
std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask);
if (Res.second)
Prev = Res.first;
else
Prev = Action(Mask, {ShuffleMask.begin()->first});
} else {
unsigned Vec1VF = GetVF(ShuffleMask.begin()->first);
unsigned Vec2VF = GetVF(VMIt->first);
if (Vec1VF == Vec2VF) {
ArrayRef<int> SecMask = VMIt->second;
for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
if (SecMask[I] != UndefMaskElem) {
assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars.");
Mask[I] = SecMask[I] + Vec1VF;
}
}
Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first});
} else {
std::pair<T *, bool> Res1 =
ResizeAction(ShuffleMask.begin()->first, Mask);
std::pair<T *, bool> Res2 = ResizeAction(VMIt->first, VMIt->second);
ArrayRef<int> SecMask = VMIt->second;
for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
if (Mask[I] != UndefMaskElem) {
assert(SecMask[I] == UndefMaskElem && "Multiple uses of scalars.");
if (Res1.second)
Mask[I] = I;
} else if (SecMask[I] != UndefMaskElem) {
assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars.");
Mask[I] = (Res2.second ? I : SecMask[I]) + VF;
}
}
Prev = Action(Mask, {Res1.first, Res2.first});
}
VMIt = std::next(VMIt);
}
for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) {
std::pair<T *, bool> Res = ResizeAction(VMIt->first, VMIt->second);
ArrayRef<int> SecMask = VMIt->second;
for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
if (SecMask[I] != UndefMaskElem) {
assert((Mask[I] == UndefMaskElem || IsBaseNotUndef) &&
"Multiple uses of scalars.");
Mask[I] = (Res.second ? I : SecMask[I]) + VF;
} else if (Mask[I] != UndefMaskElem) {
Mask[I] = I;
}
}
Prev = Action(Mask, {Prev, Res.first});
}
return Prev;
}
InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
InstructionCost Cost = 0;
LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
<< VectorizableTree.size() << ".\n");
unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
TreeEntry &TE = *VectorizableTree[I];
InstructionCost C = getEntryCost(&TE, VectorizedVals);
Cost += C;
LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
<< " for bundle that starts with " << *TE.Scalars[0]
<< ".\n"
<< "SLP: Current total cost = " << Cost << "\n");
}
SmallPtrSet<Value *, 16> ExtractCostCalculated;
InstructionCost ExtractCost = 0;
SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks;
SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers;
SmallVector<APInt> DemandedElts;
for (ExternalUser &EU : ExternalUses) {
if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
!ExtractCostCalculated.insert(EU.Scalar).second)
continue;
if (EphValues.count(EU.User))
continue;
if (isa<FixedVectorType>(EU.Scalar->getType()))
continue;
if (isa<ExtractElementInst>(EU.Scalar))
continue;
if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) {
if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
Optional<unsigned> InsertIdx = getInsertIndex(VU);
if (InsertIdx) {
const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
auto *It =
find_if(FirstUsers,
[VU](const std::pair<Value *, const TreeEntry *> &Pair) {
return areTwoInsertFromSameBuildVector(
VU, cast<InsertElementInst>(Pair.first));
});
int VecId = -1;
if (It == FirstUsers.end()) {
(void)ShuffleMasks.emplace_back();
SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE];
if (Mask.empty())
Mask.assign(FTy->getNumElements(), UndefMaskElem);
Value *Base = VU;
while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
if (IEBase != EU.User &&
(!IEBase->hasOneUse() ||
getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx))
break;
if (const TreeEntry *E = getTreeEntry(IEBase)) {
VU = IEBase;
do {
IEBase = cast<InsertElementInst>(Base);
int Idx = *getInsertIndex(IEBase);
assert(Mask[Idx] == UndefMaskElem &&
"InsertElementInstruction used already.");
Mask[Idx] = Idx;
Base = IEBase->getOperand(0);
} while (E == getTreeEntry(Base));
break;
}
Base = cast<InsertElementInst>(Base)->getOperand(0);
}
FirstUsers.emplace_back(VU, ScalarTE);
DemandedElts.push_back(APInt::getZero(FTy->getNumElements()));
VecId = FirstUsers.size() - 1;
} else {
if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first)))
It->first = VU;
VecId = std::distance(FirstUsers.begin(), It);
}
int InIdx = *InsertIdx;
SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE];
if (Mask.empty())
Mask.assign(FTy->getNumElements(), UndefMaskElem);
Mask[InIdx] = EU.Lane;
DemandedElts[VecId].setBit(InIdx);
continue;
}
}
}
auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
if (MinBWs.count(ScalarRoot)) {
auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
auto Extend =
MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
VecTy = FixedVectorType::get(MinTy, BundleWidth);
ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
VecTy, EU.Lane);
} else {
ExtractCost +=
TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
}
}
InstructionCost SpillCost = getSpillCost();
Cost += SpillCost + ExtractCost;
auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask) {
InstructionCost C = 0;
unsigned VF = Mask.size();
unsigned VecVF = TE->getVectorFactor();
if (VF != VecVF &&
(any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) ||
(all_of(Mask,
[VF](int Idx) { return Idx < 2 * static_cast<int>(VF); }) &&
!ShuffleVectorInst::isIdentityMask(Mask)))) {
SmallVector<int> OrigMask(VecVF, UndefMaskElem);
std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)),
OrigMask.begin());
C = TTI->getShuffleCost(
TTI::SK_PermuteSingleSrc,
FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask);
LLVM_DEBUG(
dbgs() << "SLP: Adding cost " << C
<< " for final shuffle of insertelement external users.\n";
TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n");
Cost += C;
return std::make_pair(TE, true);
}
return std::make_pair(TE, false);
};
for (int I = 0, E = FirstUsers.size(); I < E; ++I) {
Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0);
unsigned VF = ShuffleMasks[I].begin()->second.size();
auto *FTy = FixedVectorType::get(
cast<VectorType>(FirstUsers[I].first->getType())->getElementType(), VF);
auto Vector = ShuffleMasks[I].takeVector();
auto &&EstimateShufflesCost = [this, FTy,
&Cost](ArrayRef<int> Mask,
ArrayRef<const TreeEntry *> TEs) {
assert((TEs.size() == 1 || TEs.size() == 2) &&
"Expected exactly 1 or 2 tree entries.");
if (TEs.size() == 1) {
int Limit = 2 * Mask.size();
if (!all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) ||
!ShuffleVectorInst::isIdentityMask(Mask)) {
InstructionCost C =
TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask);
LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
<< " for final shuffle of insertelement "
"external users.\n";
TEs.front()->dump();
dbgs() << "SLP: Current total cost = " << Cost << "\n");
Cost += C;
}
} else {
InstructionCost C =
TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask);
LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
<< " for final shuffle of vector node and external "
"insertelement users.\n";
if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump();
dbgs() << "SLP: Current total cost = " << Cost << "\n");
Cost += C;
}
return TEs.back();
};
(void)performExtractsShuffleAction<const TreeEntry>(
makeMutableArrayRef(Vector.data(), Vector.size()), Base,
[](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF,
EstimateShufflesCost);
InstructionCost InsertCost = TTI->getScalarizationOverhead(
cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I],
true, false);
Cost -= InsertCost;
}
#ifndef NDEBUG
SmallString<256> Str;
{
raw_svector_ostream OS(Str);
OS << "SLP: Spill Cost = " << SpillCost << ".\n"
<< "SLP: Extract Cost = " << ExtractCost << ".\n"
<< "SLP: Total Cost = " << Cost << ".\n";
}
LLVM_DEBUG(dbgs() << Str);
if (ViewSLPTree)
ViewGraph(this, "SLP" + F->getName(), false, Str);
#endif
return Cost;
}
Optional<TargetTransformInfo::ShuffleKind>
BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
SmallVectorImpl<const TreeEntry *> &Entries) {
Mask.assign(TE->Scalars.size(), UndefMaskElem);
Entries.clear();
DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs;
for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) {
if (EntryPtr.get() == TE)
break;
if (EntryPtr->State != TreeEntry::NeedToGather)
continue;
for (Value *V : EntryPtr->Scalars)
ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get());
}
SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
DenseMap<Value *, int> UsedValuesEntry;
for (Value *V : TE->Scalars) {
if (isa<UndefValue>(V))
continue;
SmallPtrSet<const TreeEntry *, 4> VToTEs;
auto It = ValueToTEs.find(V);
if (It != ValueToTEs.end())
VToTEs = It->second;
if (const TreeEntry *VTE = getTreeEntry(V))
VToTEs.insert(VTE);
if (VToTEs.empty())
return None;
if (UsedTEs.empty()) {
UsedTEs.push_back(VToTEs);
} else {
SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
unsigned Idx = 0;
for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
set_intersect(VToTEs, Set);
if (!VToTEs.empty()) {
Set.swap(VToTEs);
break;
}
VToTEs = SavedVToTEs;
++Idx;
}
if (Idx == UsedTEs.size()) {
if (UsedTEs.size() == 2)
return None;
UsedTEs.push_back(SavedVToTEs);
Idx = UsedTEs.size() - 1;
}
UsedValuesEntry.try_emplace(V, Idx);
}
}
if (UsedTEs.empty()) {
assert(all_of(TE->Scalars, UndefValue::classof) &&
"Expected vector of undefs only.");
return None;
}
unsigned VF = 0;
if (UsedTEs.size() == 1) {
auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) {
return EntryPtr->isSame(TE->Scalars);
});
if (It != UsedTEs.front().end()) {
Entries.push_back(*It);
std::iota(Mask.begin(), Mask.end(), 0);
return TargetTransformInfo::SK_PermuteSingleSrc;
}
Entries.push_back(*UsedTEs.front().begin());
} else {
assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
DenseMap<int, const TreeEntry *> VFToTE;
for (const TreeEntry *TE : UsedTEs.front())
VFToTE.try_emplace(TE->getVectorFactor(), TE);
for (const TreeEntry *TE : UsedTEs.back()) {
auto It = VFToTE.find(TE->getVectorFactor());
if (It != VFToTE.end()) {
VF = It->first;
Entries.push_back(It->second);
Entries.push_back(TE);
break;
}
}
if (Entries.empty())
return None;
}
for (int I = 0, E = TE->Scalars.size(); I < E; ++I) {
Value *V = TE->Scalars[I];
if (isa<UndefValue>(V))
continue;
unsigned Idx = UsedValuesEntry.lookup(V);
const TreeEntry *VTE = Entries[Idx];
int FoundLane = VTE->findLaneForValue(V);
Mask[I] = Idx * VF + FoundLane;
if (Mask[I] >= 2 * E)
return None;
}
switch (Entries.size()) {
case 1:
return TargetTransformInfo::SK_PermuteSingleSrc;
case 2:
return TargetTransformInfo::SK_PermuteTwoSrc;
default:
break;
}
return None;
}
InstructionCost BoUpSLP::getGatherCost(FixedVectorType *Ty,
const APInt &ShuffledIndices,
bool NeedToShuffle) const {
InstructionCost Cost =
TTI->getScalarizationOverhead(Ty, ~ShuffledIndices, true,
false);
if (NeedToShuffle)
Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
return Cost;
}
InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
Type *ScalarTy = VL[0]->getType();
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
ScalarTy = SI->getValueOperand()->getType();
auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
bool DuplicateNonConst = false;
APInt ShuffledElements = APInt::getZero(VL.size());
DenseSet<Value *> UniqueElements;
for (unsigned I = VL.size(); I > 0; --I) {
unsigned Idx = I - 1;
if (isConstant(VL[Idx])) {
ShuffledElements.setBit(Idx);
continue;
}
if (!UniqueElements.insert(VL[Idx]).second) {
DuplicateNonConst = true;
ShuffledElements.setBit(Idx);
}
}
return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst);
}
void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
SmallVectorImpl<Value *> &Left,
SmallVectorImpl<Value *> &Right,
const DataLayout &DL,
ScalarEvolution &SE,
const BoUpSLP &R) {
if (VL.empty())
return;
VLOperands Ops(VL, DL, SE, R);
Ops.reorder();
Left = Ops.getVL(0);
Right = Ops.getVL(1);
}
void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
auto *Front = E->getMainOp();
auto *BB = Front->getParent();
assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {
if (E->getOpcode() == Instruction::GetElementPtr &&
!isa<GetElementPtrInst>(V))
return true;
auto *I = cast<Instruction>(V);
return !E->isOpcodeOrAlt(I) || I->getParent() == BB ||
isVectorLikeInstWithConstOps(I);
}));
auto &&FindLastInst = [E, Front, this, &BB]() {
Instruction *LastInst = Front;
for (Value *V : E->Scalars) {
auto *I = dyn_cast<Instruction>(V);
if (!I)
continue;
if (LastInst->getParent() == I->getParent()) {
if (LastInst->comesBefore(I))
LastInst = I;
continue;
}
assert(isVectorLikeInstWithConstOps(LastInst) &&
isVectorLikeInstWithConstOps(I) &&
"Expected vector-like insts only.");
if (!DT->isReachableFromEntry(LastInst->getParent())) {
LastInst = I;
continue;
}
if (!DT->isReachableFromEntry(I->getParent()))
continue;
auto *NodeA = DT->getNode(LastInst->getParent());
auto *NodeB = DT->getNode(I->getParent());
assert(NodeA && "Should only process reachable instructions");
assert(NodeB && "Should only process reachable instructions");
assert((NodeA == NodeB) ==
(NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
"Different nodes should have different DFS numbers");
if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn())
LastInst = I;
}
BB = LastInst->getParent();
return LastInst;
};
auto &&FindFirstInst = [E, Front]() {
Instruction *FirstInst = Front;
for (Value *V : E->Scalars) {
auto *I = dyn_cast<Instruction>(V);
if (!I)
continue;
if (I->comesBefore(FirstInst))
FirstInst = I;
}
return FirstInst;
};
if (E->State != TreeEntry::NeedToGather &&
doesNotNeedToSchedule(E->Scalars)) {
Instruction *InsertInst;
if (all_of(E->Scalars, isUsedOutsideBlock))
InsertInst = FindLastInst();
else
InsertInst = FindFirstInst();
if (isa<PHINode>(InsertInst))
InsertInst = BB->getFirstNonPHI();
BasicBlock::iterator InsertPt = InsertInst->getIterator();
Builder.SetInsertPoint(BB, InsertPt);
Builder.SetCurrentDebugLocation(Front->getDebugLoc());
return;
}
Instruction *LastInst = nullptr;
if (BlocksSchedules.count(BB)) {
Value *V = E->isOneOf(E->Scalars.back());
if (doesNotNeedToBeScheduled(V))
V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled);
auto *Bundle = BlocksSchedules[BB]->getScheduleData(V);
if (Bundle && Bundle->isPartOfBundle())
for (; Bundle; Bundle = Bundle->NextInBundle)
if (Bundle->OpValue == Bundle->Inst)
LastInst = Bundle->Inst;
}
if (!LastInst) {
LastInst = FindLastInst();
if (isa<PHINode>(LastInst))
LastInst = BB->getFirstNonPHI()->getPrevNode();
}
assert(LastInst && "Failed to find last instruction in bundle");
Builder.SetInsertPoint(BB, std::next(LastInst->getIterator()));
Builder.SetCurrentDebugLocation(Front->getDebugLoc());
}
Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
SmallSet<int, 4> PostponedIndices;
Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
SmallPtrSet<BasicBlock *, 4> Visited;
while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
InsertBB = InsertBB->getSinglePredecessor();
return InsertBB && InsertBB == InstBB;
};
for (int I = 0, E = VL.size(); I < E; ++I) {
if (auto *Inst = dyn_cast<Instruction>(VL[I]))
if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
getTreeEntry(Inst) || (L && (L->contains(Inst)))) &&
PostponedIndices.insert(I).second)
PostponedInsts.emplace_back(Inst, I);
}
auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) {
Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos));
auto *InsElt = dyn_cast<InsertElementInst>(Vec);
if (!InsElt)
return Vec;
GatherShuffleSeq.insert(InsElt);
CSEBlocks.insert(InsElt->getParent());
if (TreeEntry *Entry = getTreeEntry(V)) {
unsigned FoundLane = Entry->findLaneForValue(V);
ExternalUses.emplace_back(V, InsElt, FoundLane);
}
return Vec;
};
Value *Val0 =
isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
Value *Vec = PoisonValue::get(VecTy);
SmallVector<int> NonConsts;
for (int I = 0, E = VL.size(); I < E; ++I) {
if (PostponedIndices.contains(I))
continue;
if (!isConstant(VL[I])) {
NonConsts.push_back(I);
continue;
}
Vec = CreateInsertElement(Vec, VL[I], I);
}
for (int I : NonConsts)
Vec = CreateInsertElement(Vec, VL[I], I);
for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
Vec = CreateInsertElement(Vec, Pair.first, Pair.second);
return Vec;
}
namespace {
class ShuffleInstructionBuilder {
IRBuilderBase &Builder;
const unsigned VF = 0;
bool IsFinalized = false;
SmallVector<int, 4> Mask;
SetVector<Instruction *> &GatherShuffleSeq;
SetVector<BasicBlock *> &CSEBlocks;
public:
ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF,
SetVector<Instruction *> &GatherShuffleSeq,
SetVector<BasicBlock *> &CSEBlocks)
: Builder(Builder), VF(VF), GatherShuffleSeq(GatherShuffleSeq),
CSEBlocks(CSEBlocks) {}
void addInversedMask(ArrayRef<unsigned> SubMask) {
if (SubMask.empty())
return;
SmallVector<int, 4> NewMask;
inversePermutation(SubMask, NewMask);
addMask(NewMask);
}
void addMask(ArrayRef<unsigned> SubMask) {
SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
addMask(NewMask);
}
void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); }
Value *finalize(Value *V) {
IsFinalized = true;
unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements();
if (VF == ValueVF && Mask.empty())
return V;
SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem);
std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0);
addMask(NormalizedMask);
if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask))
return V;
Value *Vec = Builder.CreateShuffleVector(V, Mask, "shuffle");
if (auto *I = dyn_cast<Instruction>(Vec)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
return Vec;
}
~ShuffleInstructionBuilder() {
assert((IsFinalized || Mask.empty()) &&
"Shuffle construction must be finalized.");
}
};
}
Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
const unsigned VF = VL.size();
InstructionsState S = getSameOpcode(VL);
if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
const auto *It =
find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
if (It != VL.end())
S = getSameOpcode(*It);
}
if (S.getOpcode()) {
if (TreeEntry *E = getTreeEntry(S.OpValue))
if (E->isSame(VL)) {
Value *V = vectorizeTree(E);
if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
if (!E->ReuseShuffleIndices.empty()) {
SmallVector<int> UniqueIdxs(VF, UndefMaskElem);
SmallSet<int, 4> UsedIdxs;
int Pos = 0;
int Sz = VL.size();
for (int Idx : E->ReuseShuffleIndices) {
if (Idx != Sz && Idx != UndefMaskElem &&
UsedIdxs.insert(Idx).second)
UniqueIdxs[Idx] = Pos;
++Pos;
}
assert(VF >= UsedIdxs.size() && "Expected vectorization factor "
"less than original vector size.");
UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem);
V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle");
} else {
assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
"Expected vectorization factor less "
"than original vector size.");
SmallVector<int> UniformMask(VF, 0);
std::iota(UniformMask.begin(), UniformMask.end(), 0);
V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle");
}
if (auto *I = dyn_cast<Instruction>(V)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
}
return V;
}
}
return createBuildVector(VL);
}
Value *BoUpSLP::createBuildVector(ArrayRef<Value *> VL) {
assert(any_of(VectorizableTree,
[VL](const std::unique_ptr<TreeEntry> &TE) {
return TE->State == TreeEntry::NeedToGather && TE->isSame(VL);
}) &&
"Non-matching gather node.");
unsigned VF = VL.size();
SmallVector<int> ReuseShuffleIndicies;
SmallVector<Value *> UniqueValues;
if (VL.size() > 2) {
DenseMap<Value *, unsigned> UniquePositions;
unsigned NumValues =
std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) {
return !isa<UndefValue>(V);
}).base());
VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues));
int UniqueVals = 0;
for (Value *V : VL.drop_back(VL.size() - VF)) {
if (isa<UndefValue>(V)) {
ReuseShuffleIndicies.emplace_back(UndefMaskElem);
continue;
}
if (isConstant(V)) {
ReuseShuffleIndicies.emplace_back(UniqueValues.size());
UniqueValues.emplace_back(V);
continue;
}
auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
ReuseShuffleIndicies.emplace_back(Res.first->second);
if (Res.second) {
UniqueValues.emplace_back(V);
++UniqueVals;
}
}
if (UniqueVals == 1 && UniqueValues.size() == 1) {
ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(),
UndefMaskElem);
} else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) {
if (UniqueValues.empty()) {
assert(all_of(VL, UndefValue::classof) && "Expected list of undefs.");
NumValues = VF;
}
ReuseShuffleIndicies.clear();
UniqueValues.clear();
UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues));
}
UniqueValues.append(VF - UniqueValues.size(),
PoisonValue::get(VL[0]->getType()));
VL = UniqueValues;
}
ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq,
CSEBlocks);
Value *Vec = gather(VL);
if (!ReuseShuffleIndicies.empty()) {
ShuffleBuilder.addMask(ReuseShuffleIndicies);
Vec = ShuffleBuilder.finalize(Vec);
}
return Vec;
}
Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
IRBuilder<>::InsertPointGuard Guard(Builder);
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
return E->VectorizedValue;
}
bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
unsigned VF = E->getVectorFactor();
ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq,
CSEBlocks);
if (E->State == TreeEntry::NeedToGather) {
if (E->getMainOp())
setInsertPointAfterBundle(E);
Value *Vec;
SmallVector<int> Mask;
SmallVector<const TreeEntry *> Entries;
Optional<TargetTransformInfo::ShuffleKind> Shuffle =
isGatherShuffledEntry(E, Mask, Entries);
if (Shuffle) {
assert((Entries.size() == 1 || Entries.size() == 2) &&
"Expected shuffle of 1 or 2 entries.");
Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue,
Entries.back()->VectorizedValue, Mask);
if (auto *I = dyn_cast<Instruction>(Vec)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
} else {
Vec = gather(E->Scalars);
}
if (NeedToShuffleReuses) {
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
Vec = ShuffleBuilder.finalize(Vec);
}
E->VectorizedValue = Vec;
return Vec;
}
assert((E->State == TreeEntry::Vectorize ||
E->State == TreeEntry::ScatterVectorize) &&
"Unhandled state");
unsigned ShuffleOrOp =
E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
Instruction *VL0 = E->getMainOp();
Type *ScalarTy = VL0->getType();
if (auto *Store = dyn_cast<StoreInst>(VL0))
ScalarTy = Store->getValueOperand()->getType();
else if (auto *IE = dyn_cast<InsertElementInst>(VL0))
ScalarTy = IE->getOperand(1)->getType();
auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
switch (ShuffleOrOp) {
case Instruction::PHI: {
assert((E->ReorderIndices.empty() ||
E != VectorizableTree.front().get() ||
!E->UserTreeIndices.empty()) &&
"PHI reordering is free.");
auto *PH = cast<PHINode>(VL0);
Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
Value *V = NewPhi;
Builder.SetInsertPoint(&*PH->getParent()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
SmallPtrSet<BasicBlock*, 4> VisitedBBs;
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
ValueList Operands;
BasicBlock *IBB = PH->getIncomingBlock(i);
if (!VisitedBBs.insert(IBB).second) {
NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
continue;
}
Builder.SetInsertPoint(IBB->getTerminator());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
Value *Vec = vectorizeTree(E->getOperand(i));
NewPhi->addIncoming(Vec, IBB);
}
assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
"Invalid number of incoming values");
return V;
}
case Instruction::ExtractElement: {
Value *V = E->getSingleOperand(0);
Builder.SetInsertPoint(VL0);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
return V;
}
case Instruction::ExtractValue: {
auto *LI = cast<LoadInst>(E->getSingleOperand(0));
Builder.SetInsertPoint(LI);
auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
Value *NewV = propagateMetadata(V, E->Scalars);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
NewV = ShuffleBuilder.finalize(NewV);
E->VectorizedValue = NewV;
return NewV;
}
case Instruction::InsertElement: {
assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique");
Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back()));
Value *V = vectorizeTree(E->getOperand(1));
auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
}));
const unsigned NumElts =
cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
const unsigned NumScalars = E->Scalars.size();
unsigned Offset = *getInsertIndex(VL0);
assert(Offset < NumElts && "Failed to find vector index offset");
SmallVector<int> Mask;
if (!E->ReorderIndices.empty()) {
inversePermutation(E->ReorderIndices, Mask);
Mask.append(NumElts - NumScalars, UndefMaskElem);
} else {
Mask.assign(NumElts, UndefMaskElem);
std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
}
bool IsIdentity = true;
SmallVector<int> PrevMask(NumElts, UndefMaskElem);
Mask.swap(PrevMask);
for (unsigned I = 0; I < NumScalars; ++I) {
Value *Scalar = E->Scalars[PrevMask[I]];
unsigned InsertIdx = *getInsertIndex(Scalar);
IsIdentity &= InsertIdx - Offset == I;
Mask[InsertIdx - Offset] = I;
}
if (!IsIdentity || NumElts != NumScalars) {
V = Builder.CreateShuffleVector(V, Mask);
if (auto *I = dyn_cast<Instruction>(V)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
}
if ((!IsIdentity || Offset != 0 ||
!isUndefVector(FirstInsert->getOperand(0))) &&
NumElts != NumScalars) {
SmallVector<int> InsertMask(NumElts);
std::iota(InsertMask.begin(), InsertMask.end(), 0);
for (unsigned I = 0; I < NumElts; I++) {
if (Mask[I] != UndefMaskElem)
InsertMask[Offset + I] = NumElts + I;
}
V = Builder.CreateShuffleVector(
FirstInsert->getOperand(0), V, InsertMask,
cast<Instruction>(E->Scalars.back())->getName());
if (auto *I = dyn_cast<Instruction>(V)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
}
++NumVectorInstructions;
E->VectorizedValue = V;
return V;
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast: {
setInsertPointAfterBundle(E);
Value *InVec = vectorizeTree(E->getOperand(0));
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
auto *CI = cast<CastInst>(VL0);
Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::FCmp:
case Instruction::ICmp: {
setInsertPointAfterBundle(E);
Value *L = vectorizeTree(E->getOperand(0));
Value *R = vectorizeTree(E->getOperand(1));
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
Value *V = Builder.CreateCmp(P0, L, R);
propagateIRFlags(V, E->Scalars, VL0);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::Select: {
setInsertPointAfterBundle(E);
Value *Cond = vectorizeTree(E->getOperand(0));
Value *True = vectorizeTree(E->getOperand(1));
Value *False = vectorizeTree(E->getOperand(2));
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
Value *V = Builder.CreateSelect(Cond, True, False);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::FNeg: {
setInsertPointAfterBundle(E);
Value *Op = vectorizeTree(E->getOperand(0));
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
Value *V = Builder.CreateUnOp(
static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
propagateIRFlags(V, E->Scalars, VL0);
if (auto *I = dyn_cast<Instruction>(V))
V = propagateMetadata(I, E->Scalars);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
setInsertPointAfterBundle(E);
Value *LHS = vectorizeTree(E->getOperand(0));
Value *RHS = vectorizeTree(E->getOperand(1));
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
Value *V = Builder.CreateBinOp(
static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
RHS);
propagateIRFlags(V, E->Scalars, VL0);
if (auto *I = dyn_cast<Instruction>(V))
V = propagateMetadata(I, E->Scalars);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::Load: {
setInsertPointAfterBundle(E);
LoadInst *LI = cast<LoadInst>(VL0);
Instruction *NewLI;
unsigned AS = LI->getPointerAddressSpace();
Value *PO = LI->getPointerOperand();
if (E->State == TreeEntry::Vectorize) {
Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
if (TreeEntry *Entry = getTreeEntry(PO)) {
unsigned FoundLane = Entry->findLaneForValue(PO);
ExternalUses.emplace_back(
PO, PO != VecPtr ? cast<User>(VecPtr) : NewLI, FoundLane);
}
} else {
assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
Value *VecPtr = vectorizeTree(E->getOperand(0));
Align CommonAlignment = LI->getAlign();
for (Value *V : E->Scalars)
CommonAlignment =
std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
}
Value *V = propagateMetadata(NewLI, E->Scalars);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::Store: {
auto *SI = cast<StoreInst>(VL0);
unsigned AS = SI->getPointerAddressSpace();
setInsertPointAfterBundle(E);
Value *VecValue = vectorizeTree(E->getOperand(0));
ShuffleBuilder.addMask(E->ReorderIndices);
VecValue = ShuffleBuilder.finalize(VecValue);
Value *ScalarPtr = SI->getPointerOperand();
Value *VecPtr = Builder.CreateBitCast(
ScalarPtr, VecValue->getType()->getPointerTo(AS));
StoreInst *ST =
Builder.CreateAlignedStore(VecValue, VecPtr, SI->getAlign());
if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) {
unsigned FoundLane = Entry->findLaneForValue(ScalarPtr);
ExternalUses.push_back(ExternalUser(
ScalarPtr, ScalarPtr != VecPtr ? cast<User>(VecPtr) : ST,
FoundLane));
}
Value *V = propagateMetadata(ST, E->Scalars);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::GetElementPtr: {
auto *GEP0 = cast<GetElementPtrInst>(VL0);
setInsertPointAfterBundle(E);
Value *Op0 = vectorizeTree(E->getOperand(0));
SmallVector<Value *> OpVecs;
for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) {
Value *OpVec = vectorizeTree(E->getOperand(J));
OpVecs.push_back(OpVec);
}
Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs);
if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) {
SmallVector<Value *> GEPs;
for (Value *V : E->Scalars) {
if (isa<GetElementPtrInst>(V))
GEPs.push_back(V);
}
V = propagateMetadata(I, GEPs);
}
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::Call: {
CallInst *CI = cast<CallInst>(VL0);
setInsertPointAfterBundle(E);
Intrinsic::ID IID = Intrinsic::not_intrinsic;
if (Function *FI = CI->getCalledFunction())
IID = FI->getIntrinsicID();
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
VecCallCosts.first <= VecCallCosts.second;
Value *ScalarArg = nullptr;
std::vector<Value *> OpVecs;
SmallVector<Type *, 2> TysForDecl =
{FixedVectorType::get(CI->getType(), E->Scalars.size())};
for (int j = 0, e = CI->arg_size(); j < e; ++j) {
ValueList OpVL;
if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(IID, j)) {
CallInst *CEI = cast<CallInst>(VL0);
ScalarArg = CEI->getArgOperand(j);
OpVecs.push_back(CEI->getArgOperand(j));
if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j))
TysForDecl.push_back(ScalarArg->getType());
continue;
}
Value *OpVec = vectorizeTree(E->getOperand(j));
LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
OpVecs.push_back(OpVec);
if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j))
TysForDecl.push_back(OpVec->getType());
}
Function *CF;
if (!UseIntrinsic) {
VFShape Shape =
VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
VecTy->getNumElements())),
false );
CF = VFDatabase(*CI).getVectorizedFunction(Shape);
} else {
CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
}
SmallVector<OperandBundleDef, 1> OpBundles;
CI->getOperandBundlesAsDefs(OpBundles);
Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
if (ScalarArg) {
if (TreeEntry *Entry = getTreeEntry(ScalarArg)) {
unsigned FoundLane = Entry->findLaneForValue(ScalarArg);
ExternalUses.push_back(
ExternalUser(ScalarArg, cast<User>(V), FoundLane));
}
}
propagateIRFlags(V, E->Scalars, VL0);
ShuffleBuilder.addInversedMask(E->ReorderIndices);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
case Instruction::ShuffleVector: {
assert(E->isAltShuffle() &&
((Instruction::isBinaryOp(E->getOpcode()) &&
Instruction::isBinaryOp(E->getAltOpcode())) ||
(Instruction::isCast(E->getOpcode()) &&
Instruction::isCast(E->getAltOpcode())) ||
(isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
"Invalid Shuffle Vector Operand");
Value *LHS = nullptr, *RHS = nullptr;
if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) {
setInsertPointAfterBundle(E);
LHS = vectorizeTree(E->getOperand(0));
RHS = vectorizeTree(E->getOperand(1));
} else {
setInsertPointAfterBundle(E);
LHS = vectorizeTree(E->getOperand(0));
}
if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
Value *V0, *V1;
if (Instruction::isBinaryOp(E->getOpcode())) {
V0 = Builder.CreateBinOp(
static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
V1 = Builder.CreateBinOp(
static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
} else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS);
auto *AltCI = cast<CmpInst>(E->getAltOp());
CmpInst::Predicate AltPred = AltCI->getPredicate();
V1 = Builder.CreateCmp(AltPred, LHS, RHS);
} else {
V0 = Builder.CreateCast(
static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
V1 = Builder.CreateCast(
static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
}
for (Value *V : {V0, V1}) {
if (auto *I = dyn_cast<Instruction>(V)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
}
ValueList OpScalars, AltScalars;
SmallVector<int> Mask;
buildShuffleEntryMask(
E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
[E](Instruction *I) {
assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
return isAlternateInstruction(I, E->getMainOp(), E->getAltOp());
},
Mask, &OpScalars, &AltScalars);
propagateIRFlags(V0, OpScalars);
propagateIRFlags(V1, AltScalars);
Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
if (auto *I = dyn_cast<Instruction>(V)) {
V = propagateMetadata(I, E->Scalars);
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
V = ShuffleBuilder.finalize(V);
E->VectorizedValue = V;
++NumVectorInstructions;
return V;
}
default:
llvm_unreachable("unknown inst");
}
return nullptr;
}
Value *BoUpSLP::vectorizeTree() {
ExtraValueToDebugLocsMap ExternallyUsedValues;
return vectorizeTree(ExternallyUsedValues);
}
namespace {
struct ShuffledInsertData {
SmallVector<InsertElementInst *> InsertElements;
MapVector<Value *, SmallVector<int>> ValueMasks;
};
}
Value *
BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
for (auto &BSIter : BlocksSchedules) {
scheduleBlock(BSIter.second.get());
}
Builder.SetInsertPoint(&F->getEntryBlock().front());
auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
if (MinBWs.count(ScalarRoot)) {
if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
if (isa<PHINode>(I))
Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
else
Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
}
auto BundleWidth = VectorizableTree[0]->Scalars.size();
auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
VectorizableTree[0]->VectorizedValue = Trunc;
}
LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
<< " values .\n");
SmallVector<ShuffledInsertData> ShuffledInserts;
DenseMap<Value *, InsertElementInst *> VectorToInsertElement;
for (const auto &ExternalUse : ExternalUses) {
Value *Scalar = ExternalUse.Scalar;
llvm::User *User = ExternalUse.User;
if (User && !is_contained(Scalar->users(), User))
continue;
TreeEntry *E = getTreeEntry(Scalar);
assert(E && "Invalid scalar");
assert(E->State != TreeEntry::NeedToGather &&
"Extracting from a gather list");
if (E->getOpcode() == Instruction::GetElementPtr &&
!isa<GetElementPtrInst>(Scalar))
continue;
Value *Vec = E->VectorizedValue;
assert(Vec && "Can't find vectorizable value");
Value *Lane = Builder.getInt32(ExternalUse.Lane);
auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
if (Scalar->getType() != Vec->getType()) {
Value *Ex;
if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) {
Ex = Builder.CreateExtractElement(ES->getOperand(0),
ES->getOperand(1));
} else {
Ex = Builder.CreateExtractElement(Vec, Lane);
}
if (!MinBWs.count(ScalarRoot))
return Ex;
if (MinBWs[ScalarRoot].second)
return Builder.CreateSExt(Ex, Scalar->getType());
return Builder.CreateZExt(Ex, Scalar->getType());
}
assert(isa<FixedVectorType>(Scalar->getType()) &&
isa<InsertElementInst>(Scalar) &&
"In-tree scalar of vector type is not insertelement?");
auto *IE = cast<InsertElementInst>(Scalar);
VectorToInsertElement.try_emplace(Vec, IE);
return Vec;
};
if (!User) {
assert(ExternallyUsedValues.count(Scalar) &&
"Scalar with nullptr as an external user must be registered in "
"ExternallyUsedValues map");
if (auto *VecI = dyn_cast<Instruction>(Vec)) {
Builder.SetInsertPoint(VecI->getParent(),
std::next(VecI->getIterator()));
} else {
Builder.SetInsertPoint(&F->getEntryBlock().front());
}
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
auto &NewInstLocs = ExternallyUsedValues[NewInst];
auto It = ExternallyUsedValues.find(Scalar);
assert(It != ExternallyUsedValues.end() &&
"Externally used scalar is not found in ExternallyUsedValues");
NewInstLocs.append(It->second);
ExternallyUsedValues.erase(Scalar);
Scalar->replaceAllUsesWith(NewInst);
continue;
}
if (auto *VU = dyn_cast<InsertElementInst>(User)) {
if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) {
if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) {
Optional<unsigned> InsertIdx = getInsertIndex(VU);
if (InsertIdx) {
if (MinBWs.count(Scalar) &&
VectorizableTree[0]->VectorizedValue == Vec)
Vec = VectorRoot;
auto *It =
find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) {
InsertElementInst *VecInsert = Data.InsertElements.front();
return areTwoInsertFromSameBuildVector(VU, VecInsert);
});
unsigned Idx = *InsertIdx;
if (It == ShuffledInserts.end()) {
(void)ShuffledInserts.emplace_back();
It = std::next(ShuffledInserts.begin(),
ShuffledInserts.size() - 1);
SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
if (Mask.empty())
Mask.assign(FTy->getNumElements(), UndefMaskElem);
Value *Base = VU;
while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
if (IEBase != User &&
(!IEBase->hasOneUse() ||
getInsertIndex(IEBase).value_or(Idx) == Idx))
break;
if (const TreeEntry *E = getTreeEntry(IEBase)) {
do {
IEBase = cast<InsertElementInst>(Base);
int IEIdx = *getInsertIndex(IEBase);
assert(Mask[Idx] == UndefMaskElem &&
"InsertElementInstruction used already.");
Mask[IEIdx] = IEIdx;
Base = IEBase->getOperand(0);
} while (E == getTreeEntry(Base));
break;
}
Base = cast<InsertElementInst>(Base)->getOperand(0);
auto It = VectorToInsertElement.find(Base);
if (It != VectorToInsertElement.end())
Base = It->second;
}
}
SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
if (Mask.empty())
Mask.assign(FTy->getNumElements(), UndefMaskElem);
Mask[Idx] = ExternalUse.Lane;
It->InsertElements.push_back(cast<InsertElementInst>(User));
continue;
}
}
}
}
if (auto *VecI = dyn_cast<Instruction>(Vec)) {
if (PHINode *PH = dyn_cast<PHINode>(User)) {
for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
if (PH->getIncomingValue(i) == Scalar) {
Instruction *IncomingTerminator =
PH->getIncomingBlock(i)->getTerminator();
if (isa<CatchSwitchInst>(IncomingTerminator)) {
Builder.SetInsertPoint(VecI->getParent(),
std::next(VecI->getIterator()));
} else {
Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
}
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
CSEBlocks.insert(PH->getIncomingBlock(i));
PH->setOperand(i, NewInst);
}
}
} else {
Builder.SetInsertPoint(cast<Instruction>(User));
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
CSEBlocks.insert(cast<Instruction>(User)->getParent());
User->replaceUsesOfWith(Scalar, NewInst);
}
} else {
Builder.SetInsertPoint(&F->getEntryBlock().front());
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
CSEBlocks.insert(&F->getEntryBlock());
User->replaceUsesOfWith(Scalar, NewInst);
}
LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
}
auto &&IsIdentityMask = [](ArrayRef<int> Mask, FixedVectorType *VecTy) {
int Limit = Mask.size();
return VecTy->getNumElements() == Mask.size() &&
all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) &&
ShuffleVectorInst::isIdentityMask(Mask);
};
auto &&CombineMasks = [](SmallVectorImpl<int> &Mask, ArrayRef<int> ExtMask) {
SmallVector<int> NewMask(ExtMask.size(), UndefMaskElem);
for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
if (ExtMask[I] == UndefMaskElem)
continue;
NewMask[I] = Mask[ExtMask[I]];
}
Mask.swap(NewMask);
};
auto &&PeekThroughShuffles =
[&IsIdentityMask, &CombineMasks](Value *&V, SmallVectorImpl<int> &Mask,
bool CheckForLengthChange = false) {
while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
if (!isa<FixedVectorType>(SV->getType()) ||
(CheckForLengthChange && SV->changesLength()))
break;
if (IsIdentityMask(Mask, cast<FixedVectorType>(SV->getType())) ||
SV->isZeroEltSplat())
break;
bool IsOp1Undef = isUndefVector(SV->getOperand(0));
bool IsOp2Undef = isUndefVector(SV->getOperand(1));
if (!IsOp1Undef && !IsOp2Undef)
break;
SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(),
SV->getShuffleMask().end());
CombineMasks(ShuffleMask, Mask);
Mask.swap(ShuffleMask);
if (IsOp2Undef)
V = SV->getOperand(0);
else
V = SV->getOperand(1);
}
};
auto &&CreateShuffle = [this, &IsIdentityMask, &PeekThroughShuffles,
&CombineMasks](Value *V1, Value *V2,
ArrayRef<int> Mask) -> Value * {
assert(V1 && "Expected at least one vector value.");
if (V2 && !isUndefVector(V2)) {
Value *Op1 = V1;
Value *Op2 = V2;
int VF =
cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
SmallVector<int> CombinedMask1(Mask.size(), UndefMaskElem);
SmallVector<int> CombinedMask2(Mask.size(), UndefMaskElem);
for (int I = 0, E = Mask.size(); I < E; ++I) {
if (Mask[I] < VF)
CombinedMask1[I] = Mask[I];
else
CombinedMask2[I] = Mask[I] - VF;
}
Value *PrevOp1;
Value *PrevOp2;
do {
PrevOp1 = Op1;
PrevOp2 = Op2;
PeekThroughShuffles(Op1, CombinedMask1, true);
PeekThroughShuffles(Op2, CombinedMask2, true);
if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1))
if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2))
if (SV1->getOperand(0)->getType() ==
SV2->getOperand(0)->getType() &&
SV1->getOperand(0)->getType() != SV1->getType() &&
isUndefVector(SV1->getOperand(1)) &&
isUndefVector(SV2->getOperand(1))) {
Op1 = SV1->getOperand(0);
Op2 = SV2->getOperand(0);
SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(),
SV1->getShuffleMask().end());
CombineMasks(ShuffleMask1, CombinedMask1);
CombinedMask1.swap(ShuffleMask1);
SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(),
SV2->getShuffleMask().end());
CombineMasks(ShuffleMask2, CombinedMask2);
CombinedMask2.swap(ShuffleMask2);
}
} while (PrevOp1 != Op1 || PrevOp2 != Op2);
VF = cast<VectorType>(Op1->getType())
->getElementCount()
.getKnownMinValue();
for (int I = 0, E = Mask.size(); I < E; ++I) {
if (CombinedMask2[I] != UndefMaskElem) {
assert(CombinedMask1[I] == UndefMaskElem &&
"Expected undefined mask element");
CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF);
}
}
Value *Vec = Builder.CreateShuffleVector(
Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2,
CombinedMask1);
if (auto *I = dyn_cast<Instruction>(Vec)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
return Vec;
}
if (isa<PoisonValue>(V1))
return PoisonValue::get(FixedVectorType::get(
cast<VectorType>(V1->getType())->getElementType(), Mask.size()));
Value *Op = V1;
SmallVector<int> CombinedMask(Mask.begin(), Mask.end());
PeekThroughShuffles(Op, CombinedMask);
if (!isa<FixedVectorType>(Op->getType()) ||
!IsIdentityMask(CombinedMask, cast<FixedVectorType>(Op->getType()))) {
Value *Vec = Builder.CreateShuffleVector(Op, CombinedMask);
if (auto *I = dyn_cast<Instruction>(Vec)) {
GatherShuffleSeq.insert(I);
CSEBlocks.insert(I->getParent());
}
return Vec;
}
return Op;
};
auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask) {
unsigned VF = Mask.size();
unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
if (VF != VecVF) {
if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) {
Vec = CreateShuffle(Vec, nullptr, Mask);
return std::make_pair(Vec, true);
}
SmallVector<int> ResizeMask(VF, UndefMaskElem);
for (unsigned I = 0; I < VF; ++I) {
if (Mask[I] != UndefMaskElem)
ResizeMask[Mask[I]] = Mask[I];
}
Vec = CreateShuffle(Vec, nullptr, ResizeMask);
}
return std::make_pair(Vec, false);
};
for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
sort(ShuffledInserts[I].InsertElements, isFirstInsertElement);
InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front();
InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back();
Builder.SetInsertPoint(LastInsert);
auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
Value *NewInst = performExtractsShuffleAction<Value>(
makeMutableArrayRef(Vector.data(), Vector.size()),
FirstInsert->getOperand(0),
[](Value *Vec) {
return cast<VectorType>(Vec->getType())
->getElementCount()
.getKnownMinValue();
},
ResizeToVF,
[FirstInsert, &CreateShuffle](ArrayRef<int> Mask,
ArrayRef<Value *> Vals) {
assert((Vals.size() == 1 || Vals.size() == 2) &&
"Expected exactly 1 or 2 input values.");
if (Vals.size() == 1) {
if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType())
->getNumElements() ||
!ShuffleVectorInst::isIdentityMask(Mask))
return CreateShuffle(Vals.front(), nullptr, Mask);
return Vals.front();
}
return CreateShuffle(Vals.front() ? Vals.front()
: FirstInsert->getOperand(0),
Vals.back(), Mask);
});
auto It = ShuffledInserts[I].InsertElements.rbegin();
InsertElementInst *II = nullptr;
if (It != ShuffledInserts[I].InsertElements.rend())
II = *It;
SmallVector<Instruction *> Inserts;
while (It != ShuffledInserts[I].InsertElements.rend()) {
assert(II && "Must be an insertelement instruction.");
if (*It == II)
++It;
else
Inserts.push_back(cast<Instruction>(II));
II = dyn_cast<InsertElementInst>(II->getOperand(0));
}
for (Instruction *II : reverse(Inserts)) {
II->replaceUsesOfWith(II->getOperand(0), NewInst);
if (auto *NewI = dyn_cast<Instruction>(NewInst))
if (II->getParent() == NewI->getParent() && II->comesBefore(NewI))
II->moveAfter(NewI);
NewInst = II;
}
LastInsert->replaceAllUsesWith(NewInst);
for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) {
IE->replaceUsesOfWith(IE->getOperand(0),
PoisonValue::get(IE->getOperand(0)->getType()));
IE->replaceUsesOfWith(IE->getOperand(1),
PoisonValue::get(IE->getOperand(1)->getType()));
eraseInstruction(IE);
}
CSEBlocks.insert(LastInsert->getParent());
}
for (auto &TEPtr : VectorizableTree) {
TreeEntry *Entry = TEPtr.get();
if (Entry->State == TreeEntry::NeedToGather)
continue;
assert(Entry->VectorizedValue && "Can't find vectorizable value");
for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
Value *Scalar = Entry->Scalars[Lane];
if (Entry->getOpcode() == Instruction::GetElementPtr &&
!isa<GetElementPtrInst>(Scalar))
continue;
#ifndef NDEBUG
Type *Ty = Scalar->getType();
if (!Ty->isVoidTy()) {
for (User *U : Scalar->users()) {
LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
assert((getTreeEntry(U) ||
(UserIgnoreList && UserIgnoreList->contains(U)) ||
(isa_and_nonnull<Instruction>(U) &&
isDeleted(cast<Instruction>(U)))) &&
"Deleting out-of-tree value");
}
}
#endif
LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
eraseInstruction(cast<Instruction>(Scalar));
}
}
Builder.ClearInsertionPoint();
InstrElementSize.clear();
return VectorizableTree[0]->VectorizedValue;
}
void BoUpSLP::optimizeGatherSequence() {
LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleSeq.size()
<< " gather sequences instructions.\n");
for (Instruction *I : GatherShuffleSeq) {
if (isDeleted(I))
continue;
Loop *L = LI->getLoopFor(I->getParent());
if (!L)
continue;
BasicBlock *PreHeader = L->getLoopPreheader();
if (!PreHeader)
continue;
if (any_of(I->operands(), [L](Value *V) {
auto *OpI = dyn_cast<Instruction>(V);
return OpI && L->contains(OpI);
}))
continue;
I->moveBefore(PreHeader->getTerminator());
}
SmallVector<const DomTreeNode *, 8> CSEWorkList;
CSEWorkList.reserve(CSEBlocks.size());
for (BasicBlock *BB : CSEBlocks)
if (DomTreeNode *N = DT->getNode(BB)) {
assert(DT->isReachableFromEntry(N));
CSEWorkList.push_back(N);
}
llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
"Different nodes should have different DFS numbers");
return A->getDFSNumIn() < B->getDFSNumIn();
});
auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2,
SmallVectorImpl<int> &NewMask) {
if (I1->getType() != I2->getType())
return false;
auto *SI1 = dyn_cast<ShuffleVectorInst>(I1);
auto *SI2 = dyn_cast<ShuffleVectorInst>(I2);
if (!SI1 || !SI2)
return I1->isIdenticalTo(I2);
if (SI1->isIdenticalTo(SI2))
return true;
for (int I = 0, E = SI1->getNumOperands(); I < E; ++I)
if (SI1->getOperand(I) != SI2->getOperand(I))
return false;
NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end());
ArrayRef<int> SM1 = SI1->getShuffleMask();
unsigned LastUndefsCnt = 0;
for (int I = 0, E = NewMask.size(); I < E; ++I) {
if (SM1[I] == UndefMaskElem)
++LastUndefsCnt;
else
LastUndefsCnt = 0;
if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem &&
NewMask[I] != SM1[I])
return false;
if (NewMask[I] == UndefMaskElem)
NewMask[I] = SM1[I];
}
return SM1.size() - LastUndefsCnt > 1 &&
TTI->getNumberOfParts(SI1->getType()) ==
TTI->getNumberOfParts(
FixedVectorType::get(SI1->getType()->getElementType(),
SM1.size() - LastUndefsCnt));
};
SmallVector<Instruction *, 16> Visited;
for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
assert(*I &&
(I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
"Worklist not sorted properly!");
BasicBlock *BB = (*I)->getBlock();
for (Instruction &In : llvm::make_early_inc_range(*BB)) {
if (isDeleted(&In))
continue;
if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) &&
!isa<ShuffleVectorInst>(&In) && !GatherShuffleSeq.contains(&In))
continue;
bool Replaced = false;
for (Instruction *&V : Visited) {
SmallVector<int> NewMask;
if (IsIdenticalOrLessDefined(&In, V, NewMask) &&
DT->dominates(V->getParent(), In.getParent())) {
In.replaceAllUsesWith(V);
eraseInstruction(&In);
if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
if (!NewMask.empty())
SI->setShuffleMask(NewMask);
Replaced = true;
break;
}
if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) &&
GatherShuffleSeq.contains(V) &&
IsIdenticalOrLessDefined(V, &In, NewMask) &&
DT->dominates(In.getParent(), V->getParent())) {
In.moveAfter(V);
V->replaceAllUsesWith(&In);
eraseInstruction(V);
if (auto *SI = dyn_cast<ShuffleVectorInst>(&In))
if (!NewMask.empty())
SI->setShuffleMask(NewMask);
V = &In;
Replaced = true;
break;
}
}
if (!Replaced) {
assert(!is_contained(Visited, &In));
Visited.push_back(&In);
}
}
}
CSEBlocks.clear();
GatherShuffleSeq.clear();
}
BoUpSLP::ScheduleData *
BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
ScheduleData *Bundle = nullptr;
ScheduleData *PrevInBundle = nullptr;
for (Value *V : VL) {
if (doesNotNeedToBeScheduled(V))
continue;
ScheduleData *BundleMember = getScheduleData(V);
assert(BundleMember &&
"no ScheduleData for bundle member "
"(maybe not in same basic block)");
assert(BundleMember->isSchedulingEntity() &&
"bundle member already part of other bundle");
if (PrevInBundle) {
PrevInBundle->NextInBundle = BundleMember;
} else {
Bundle = BundleMember;
}
BundleMember->FirstInBundle = Bundle;
PrevInBundle = BundleMember;
}
assert(Bundle && "Failed to find schedule bundle");
return Bundle;
}
Optional<BoUpSLP::ScheduleData *>
BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
const InstructionsState &S) {
if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) ||
doesNotNeedToSchedule(VL))
return nullptr;
Instruction *OldScheduleEnd = ScheduleEnd;
LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n");
auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
ScheduleData *Bundle) {
if (ScheduleEnd != OldScheduleEnd) {
for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
ReSchedule = true;
}
if (Bundle) {
LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
<< " in block " << BB->getName() << "\n");
calculateDependencies(Bundle, true, SLP);
}
if (ReSchedule) {
resetSchedule();
initialFillReadyList(ReadyInsts);
}
while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
!ReadyInsts.empty()) {
ScheduleData *Picked = ReadyInsts.pop_back_val();
assert(Picked->isSchedulingEntity() && Picked->isReady() &&
"must be ready to schedule");
schedule(Picked, ReadyInsts);
}
};
for (Value *V : VL) {
if (doesNotNeedToBeScheduled(V))
continue;
if (!extendSchedulingRegion(V, S)) {
TryScheduleBundleImpl(false, nullptr);
return None;
}
}
bool ReSchedule = false;
for (Value *V : VL) {
if (doesNotNeedToBeScheduled(V))
continue;
ScheduleData *BundleMember = getScheduleData(V);
assert(BundleMember &&
"no ScheduleData for bundle member (maybe not in same basic block)");
ReadyInsts.remove(BundleMember);
if (!BundleMember->IsScheduled)
continue;
LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
<< " was already scheduled\n");
ReSchedule = true;
}
auto *Bundle = buildBundle(VL);
TryScheduleBundleImpl(ReSchedule, Bundle);
if (!Bundle->isReady()) {
cancelScheduling(VL, S.OpValue);
return None;
}
return Bundle;
}
void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
Value *OpValue) {
if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) ||
doesNotNeedToSchedule(VL))
return;
if (doesNotNeedToBeScheduled(OpValue))
OpValue = *find_if_not(VL, doesNotNeedToBeScheduled);
ScheduleData *Bundle = getScheduleData(OpValue);
LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
assert(!Bundle->IsScheduled &&
"Can't cancel bundle which is already scheduled");
assert(Bundle->isSchedulingEntity() &&
(Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) &&
"tried to unbundle something which is not a bundle");
if (Bundle->isReady())
ReadyInsts.remove(Bundle);
ScheduleData *BundleMember = Bundle;
while (BundleMember) {
assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
BundleMember->FirstInBundle = BundleMember;
ScheduleData *Next = BundleMember->NextInBundle;
BundleMember->NextInBundle = nullptr;
BundleMember->TE = nullptr;
if (BundleMember->unscheduledDepsInBundle() == 0) {
ReadyInsts.insert(BundleMember);
}
BundleMember = Next;
}
}
BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
if (ChunkPos >= ChunkSize) {
ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
ChunkPos = 0;
}
return &(ScheduleDataChunks.back()[ChunkPos++]);
}
bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
const InstructionsState &S) {
if (getScheduleData(V, isOneOf(S, V)))
return true;
Instruction *I = dyn_cast<Instruction>(V);
assert(I && "bundle member must be an instruction");
assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&
!doesNotNeedToBeScheduled(I) &&
"phi nodes/insertelements/extractelements/extractvalues don't need to "
"be scheduled");
auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool {
ScheduleData *ISD = getScheduleData(I);
if (!ISD)
return false;
assert(isInSchedulingRegion(ISD) &&
"ScheduleData not in scheduling region");
ScheduleData *SD = allocateScheduleDataChunks();
SD->Inst = I;
SD->init(SchedulingRegionID, S.OpValue);
ExtraScheduleDataMap[I][S.OpValue] = SD;
return true;
};
if (CheckScheduleForI(I))
return true;
if (!ScheduleStart) {
initScheduleData(I, I->getNextNode(), nullptr, nullptr);
ScheduleStart = I;
ScheduleEnd = I->getNextNode();
if (isOneOf(S, I) != I)
CheckScheduleForI(I);
assert(ScheduleEnd && "tried to vectorize a terminator?");
LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
return true;
}
BasicBlock::reverse_iterator UpIter =
++ScheduleStart->getIterator().getReverse();
BasicBlock::reverse_iterator UpperEnd = BB->rend();
BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
BasicBlock::iterator LowerEnd = BB->end();
while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
&*DownIter != I) {
if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
return false;
}
++UpIter;
++DownIter;
}
if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
assert(I->getParent() == ScheduleStart->getParent() &&
"Instruction is in wrong basic block.");
initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
ScheduleStart = I;
if (isOneOf(S, I) != I)
CheckScheduleForI(I);
LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
<< "\n");
return true;
}
assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
"Expected to reach top of the basic block or instruction down the "
"lower end.");
assert(I->getParent() == ScheduleEnd->getParent() &&
"Instruction is in wrong basic block.");
initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
nullptr);
ScheduleEnd = I->getNextNode();
if (isOneOf(S, I) != I)
CheckScheduleForI(I);
assert(ScheduleEnd && "tried to vectorize a terminator?");
LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
return true;
}
void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
Instruction *ToI,
ScheduleData *PrevLoadStore,
ScheduleData *NextLoadStore) {
ScheduleData *CurrentLoadStore = PrevLoadStore;
for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
if (doesNotNeedToBeScheduled(I))
continue;
ScheduleData *SD = ScheduleDataMap.lookup(I);
if (!SD) {
SD = allocateScheduleDataChunks();
ScheduleDataMap[I] = SD;
SD->Inst = I;
}
assert(!isInSchedulingRegion(SD) &&
"new ScheduleData already in scheduling region");
SD->init(SchedulingRegionID, I);
if (I->mayReadOrWriteMemory() &&
(!isa<IntrinsicInst>(I) ||
(cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
cast<IntrinsicInst>(I)->getIntrinsicID() !=
Intrinsic::pseudoprobe))) {
if (CurrentLoadStore) {
CurrentLoadStore->NextLoadStore = SD;
} else {
FirstLoadStoreInRegion = SD;
}
CurrentLoadStore = SD;
}
if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
match(I, m_Intrinsic<Intrinsic::stackrestore>()))
RegionHasStackSave = true;
}
if (NextLoadStore) {
if (CurrentLoadStore)
CurrentLoadStore->NextLoadStore = NextLoadStore;
} else {
LastLoadStoreInRegion = CurrentLoadStore;
}
}
void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
bool InsertInReadyList,
BoUpSLP *SLP) {
assert(SD->isSchedulingEntity());
SmallVector<ScheduleData *, 10> WorkList;
WorkList.push_back(SD);
while (!WorkList.empty()) {
ScheduleData *SD = WorkList.pop_back_val();
for (ScheduleData *BundleMember = SD; BundleMember;
BundleMember = BundleMember->NextInBundle) {
assert(isInSchedulingRegion(BundleMember));
if (BundleMember->hasValidDependencies())
continue;
LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
<< "\n");
BundleMember->Dependencies = 0;
BundleMember->resetUnscheduledDeps();
if (BundleMember->OpValue != BundleMember->Inst) {
if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) {
BundleMember->Dependencies++;
ScheduleData *DestBundle = UseSD->FirstInBundle;
if (!DestBundle->IsScheduled)
BundleMember->incrementUnscheduledDeps(1);
if (!DestBundle->hasValidDependencies())
WorkList.push_back(DestBundle);
}
} else {
for (User *U : BundleMember->Inst->users()) {
if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) {
BundleMember->Dependencies++;
ScheduleData *DestBundle = UseSD->FirstInBundle;
if (!DestBundle->IsScheduled)
BundleMember->incrementUnscheduledDeps(1);
if (!DestBundle->hasValidDependencies())
WorkList.push_back(DestBundle);
}
}
}
auto makeControlDependent = [&](Instruction *I) {
auto *DepDest = getScheduleData(I);
assert(DepDest && "must be in schedule window");
DepDest->ControlDependencies.push_back(BundleMember);
BundleMember->Dependencies++;
ScheduleData *DestBundle = DepDest->FirstInBundle;
if (!DestBundle->IsScheduled)
BundleMember->incrementUnscheduledDeps(1);
if (!DestBundle->hasValidDependencies())
WorkList.push_back(DestBundle);
};
if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
for (Instruction *I = BundleMember->Inst->getNextNode();
I != ScheduleEnd; I = I->getNextNode()) {
if (isSafeToSpeculativelyExecute(I, &*BB->begin()))
continue;
makeControlDependent(I);
if (!isGuaranteedToTransferExecutionToSuccessor(I))
break;
}
}
if (RegionHasStackSave) {
if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) ||
match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) {
for (Instruction *I = BundleMember->Inst->getNextNode();
I != ScheduleEnd; I = I->getNextNode()) {
if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
match(I, m_Intrinsic<Intrinsic::stackrestore>()))
break;
if (!isa<AllocaInst>(I))
continue;
makeControlDependent(I);
}
}
if (isa<AllocaInst>(BundleMember->Inst)) {
for (Instruction *I = BundleMember->Inst->getNextNode();
I != ScheduleEnd; I = I->getNextNode()) {
if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) &&
!match(I, m_Intrinsic<Intrinsic::stackrestore>()))
continue;
makeControlDependent(I);
break;
}
}
}
ScheduleData *DepDest = BundleMember->NextLoadStore;
if (!DepDest)
continue;
Instruction *SrcInst = BundleMember->Inst;
assert(SrcInst->mayReadOrWriteMemory() &&
"NextLoadStore list for non memory effecting bundle?");
MemoryLocation SrcLoc = getLocation(SrcInst);
bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
unsigned numAliased = 0;
unsigned DistToSrc = 1;
for ( ; DepDest; DepDest = DepDest->NextLoadStore) {
assert(isInSchedulingRegion(DepDest));
if (DistToSrc >= MaxMemDepDistance ||
((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
(numAliased >= AliasedCheckLimit ||
SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
numAliased++;
DepDest->MemoryDependencies.push_back(BundleMember);
BundleMember->Dependencies++;
ScheduleData *DestBundle = DepDest->FirstInBundle;
if (!DestBundle->IsScheduled) {
BundleMember->incrementUnscheduledDeps(1);
}
if (!DestBundle->hasValidDependencies()) {
WorkList.push_back(DestBundle);
}
}
if (DistToSrc >= 2 * MaxMemDepDistance)
break;
DistToSrc++;
}
}
if (InsertInReadyList && SD->isReady()) {
ReadyInsts.insert(SD);
LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst
<< "\n");
}
}
}
void BoUpSLP::BlockScheduling::resetSchedule() {
assert(ScheduleStart &&
"tried to reset schedule on block which has not been scheduled");
for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
doForAllOpcodes(I, [&](ScheduleData *SD) {
assert(isInSchedulingRegion(SD) &&
"ScheduleData not in scheduling region");
SD->IsScheduled = false;
SD->resetUnscheduledDeps();
});
}
ReadyInsts.clear();
}
void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
if (!BS->ScheduleStart)
return;
LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
BS->resetSchedule();
struct ScheduleDataCompare {
bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
return SD2->SchedulingPriority < SD1->SchedulingPriority;
}
};
std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
int Idx = 0;
for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
I = I->getNextNode()) {
BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) {
TreeEntry *SDTE = getTreeEntry(SD->Inst);
(void)SDTE;
assert((isVectorLikeInstWithConstOps(SD->Inst) ||
SD->isPartOfBundle() ==
(SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) &&
"scheduler and vectorizer bundle mismatch");
SD->FirstInBundle->SchedulingPriority = Idx++;
if (SD->isSchedulingEntity() && SD->isPartOfBundle())
BS->calculateDependencies(SD, false, this);
});
}
BS->initialFillReadyList(ReadyInsts);
Instruction *LastScheduledInst = BS->ScheduleEnd;
while (!ReadyInsts.empty()) {
ScheduleData *picked = *ReadyInsts.begin();
ReadyInsts.erase(ReadyInsts.begin());
for (ScheduleData *BundleMember = picked; BundleMember;
BundleMember = BundleMember->NextInBundle) {
Instruction *pickedInst = BundleMember->Inst;
if (pickedInst->getNextNode() != LastScheduledInst)
pickedInst->moveBefore(LastScheduledInst);
LastScheduledInst = pickedInst;
}
BS->schedule(picked, ReadyInsts);
}
#ifdef EXPENSIVE_CHECKS
BS->verify();
#endif
#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) {
BS->doForAllOpcodes(I, [&](ScheduleData *SD) {
if (SD->isSchedulingEntity() && SD->hasValidDependencies()) {
assert(SD->IsScheduled && "must be scheduled at this point");
}
});
}
#endif
BS->ScheduleStart = nullptr;
}
unsigned BoUpSLP::getVectorElementSize(Value *V) {
if (auto *Store = dyn_cast<StoreInst>(V))
return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
if (auto *IEI = dyn_cast<InsertElementInst>(V))
return getVectorElementSize(IEI->getOperand(1));
auto E = InstrElementSize.find(V);
if (E != InstrElementSize.end())
return E->second;
SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
SmallPtrSet<Instruction *, 16> Visited;
if (auto *I = dyn_cast<Instruction>(V)) {
Worklist.emplace_back(I, I->getParent());
Visited.insert(I);
}
auto Width = 0u;
while (!Worklist.empty()) {
Instruction *I;
BasicBlock *Parent;
std::tie(I, Parent) = Worklist.pop_back_val();
auto *Ty = I->getType();
if (isa<VectorType>(Ty))
continue;
if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) ||
isa<ExtractValueInst>(I))
Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) ||
isa<UnaryOperator>(I)) {
for (Use &U : I->operands())
if (auto *J = dyn_cast<Instruction>(U.get()))
if (Visited.insert(J).second &&
(isa<PHINode>(I) || J->getParent() == Parent))
Worklist.emplace_back(J, J->getParent());
} else {
break;
}
}
if (!Width) {
if (auto *CI = dyn_cast<CmpInst>(V))
V = CI->getOperand(0);
Width = DL->getTypeSizeInBits(V->getType());
}
for (Instruction *I : Visited)
InstrElementSize[I] = Width;
return Width;
}
static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
SmallVectorImpl<Value *> &ToDemote,
SmallVectorImpl<Value *> &Roots) {
if (isa<Constant>(V)) {
ToDemote.push_back(V);
return true;
}
auto *I = dyn_cast<Instruction>(V);
if (!I || !I->hasOneUse() || !Expr.count(I))
return false;
switch (I->getOpcode()) {
case Instruction::Trunc:
Roots.push_back(I->getOperand(0));
break;
case Instruction::ZExt:
case Instruction::SExt:
if (isa<ExtractElementInst>(I->getOperand(0)) ||
isa<InsertElementInst>(I->getOperand(0)))
return false;
break;
case Instruction::Add:
case Instruction::Sub:
case Instruction::Mul:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
!collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
return false;
break;
case Instruction::Select: {
SelectInst *SI = cast<SelectInst>(I);
if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
!collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
return false;
break;
}
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(I);
for (Value *IncValue : PN->incoming_values())
if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
return false;
break;
}
default:
return false;
}
ToDemote.push_back(V);
return true;
}
void BoUpSLP::computeMinimumValueSizes() {
if (ExternalUses.empty())
return;
auto &TreeRoot = VectorizableTree[0]->Scalars;
auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
if (!TreeRootIT)
return;
SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
for (auto &EU : ExternalUses)
if (!Expr.erase(EU.Scalar))
return;
if (!Expr.empty())
return;
for (auto &EntryPtr : VectorizableTree)
Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
for (auto *Root : TreeRoot)
if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
return;
SmallVector<Value *, 32> ToDemote;
SmallVector<Value *, 4> Roots;
for (auto *Root : TreeRoot)
if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
return;
auto MaxBitWidth = 8u;
for (auto *Root : TreeRoot) {
auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
MaxBitWidth = std::max<unsigned>(
Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
}
bool IsKnownPositive = true;
if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
llvm::all_of(TreeRoot, [](Value *R) {
assert(R->hasOneUse() && "Root should have only one use!");
return isa<GetElementPtrInst>(R->user_back());
})) {
MaxBitWidth = 8u;
IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
KnownBits Known = computeKnownBits(R, *DL);
return Known.isNonNegative();
});
for (auto *Scalar : ToDemote) {
auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
}
if (!IsKnownPositive)
++MaxBitWidth;
}
if (!isPowerOf2_64(MaxBitWidth))
MaxBitWidth = NextPowerOf2(MaxBitWidth);
if (MaxBitWidth >= TreeRootIT->getBitWidth())
return;
while (!Roots.empty())
collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
for (auto *Scalar : ToDemote)
MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
}
namespace {
struct SLPVectorizer : public FunctionPass {
SLPVectorizerPass Impl;
static char ID;
explicit SLPVectorizer() : FunctionPass(ID) {
initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
}
bool doInitialization(Module &M) override { return false; }
bool runOnFunction(Function &F) override {
if (skipFunction(F))
return false;
auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<DemandedBitsWrapperPass>();
AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
AU.addRequired<InjectTLIMappingsLegacy>();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<AAResultsWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
AU.setPreservesCFG();
}
};
}
PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
auto *AA = &AM.getResult<AAManager>(F);
auto *LI = &AM.getResult<LoopAnalysis>(F);
auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
auto *AC = &AM.getResult<AssumptionAnalysis>(F);
auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
if (!Changed)
return PreservedAnalyses::all();
PreservedAnalyses PA;
PA.preserveSet<CFGAnalyses>();
return PA;
}
bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
TargetTransformInfo *TTI_,
TargetLibraryInfo *TLI_, AAResults *AA_,
LoopInfo *LI_, DominatorTree *DT_,
AssumptionCache *AC_, DemandedBits *DB_,
OptimizationRemarkEmitter *ORE_) {
if (!RunSLPVectorization)
return false;
SE = SE_;
TTI = TTI_;
TLI = TLI_;
AA = AA_;
LI = LI_;
DT = DT_;
AC = AC_;
DB = DB_;
DL = &F.getParent()->getDataLayout();
Stores.clear();
GEPs.clear();
bool Changed = false;
if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
LLVM_DEBUG(
dbgs() << "SLP: Didn't find any vector registers for target, abort.\n");
return false;
}
if (F.hasFnAttribute(Attribute::NoImplicitFloat))
return false;
LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
DT->updateDFSNumbers();
for (auto BB : post_order(&F.getEntryBlock())) {
R.clearReductionData();
collectSeedInstructions(BB);
if (!Stores.empty()) {
LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
<< " underlying objects.\n");
Changed |= vectorizeStoreChains(R);
}
Changed |= vectorizeChainsInBlock(BB, R);
if (!GEPs.empty()) {
LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
<< " underlying objects.\n");
Changed |= vectorizeGEPIndices(BB, R);
}
}
if (Changed) {
R.optimizeGatherSequence();
LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
}
return Changed;
}
bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
unsigned Idx, unsigned MinVF) {
LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
<< "\n");
const unsigned Sz = R.getVectorElementSize(Chain[0]);
unsigned VF = Chain.size();
if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
return false;
LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
<< "\n");
R.buildTree(Chain);
if (R.isTreeTinyAndNotFullyVectorizable())
return false;
if (R.isLoadCombineCandidate())
return false;
R.reorderTopToBottom();
R.reorderBottomToTop();
R.buildExternalUses();
R.computeMinimumValueSizes();
InstructionCost Cost = R.getTreeCost();
LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
if (Cost < -SLPCostThreshold) {
LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
using namespace ore;
R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
cast<StoreInst>(Chain[0]))
<< "Stores SLP vectorized with cost " << NV("Cost", Cost)
<< " and with tree size "
<< NV("TreeSize", R.getTreeSize()));
R.vectorizeTree();
return true;
}
return false;
}
bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
BoUpSLP &R) {
BoUpSLP::ValueSet VectorizedStores;
bool Changed = false;
int E = Stores.size();
SmallBitVector Tails(E, false);
int MaxIter = MaxStoreLookup.getValue();
SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
E, std::make_pair(E, INT_MAX));
SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
int IterCnt;
auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
&CheckedPairs,
&ConsecutiveChain](int K, int Idx) {
if (IterCnt >= MaxIter)
return true;
if (CheckedPairs[Idx].test(K))
return ConsecutiveChain[K].second == 1 &&
ConsecutiveChain[K].first == Idx;
++IterCnt;
CheckedPairs[Idx].set(K);
CheckedPairs[K].set(Idx);
Optional<int> Diff = getPointersDiff(
Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
Stores[Idx]->getValueOperand()->getType(),
Stores[Idx]->getPointerOperand(), *DL, *SE, true);
if (!Diff || *Diff == 0)
return false;
int Val = *Diff;
if (Val < 0) {
if (ConsecutiveChain[Idx].second > -Val) {
Tails.set(K);
ConsecutiveChain[Idx] = std::make_pair(K, -Val);
}
return false;
}
if (ConsecutiveChain[K].second <= Val)
return false;
Tails.set(Idx);
ConsecutiveChain[K] = std::make_pair(Idx, Val);
return Val == 1;
};
for (int Idx = E - 1; Idx >= 0; --Idx) {
const int MaxLookDepth = std::max(E - Idx, Idx + 1);
IterCnt = 0;
for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
(Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
break;
}
SmallBitVector TriedTails(E, false);
for (int Cnt = E; Cnt > 0; --Cnt) {
int I = Cnt - 1;
if (ConsecutiveChain[I].first == E || Tails.test(I))
continue;
BoUpSLP::ValueList Operands;
while (I != E && !VectorizedStores.count(Stores[I])) {
Operands.push_back(Stores[I]);
Tails.set(I);
if (ConsecutiveChain[I].second != 1) {
if (ConsecutiveChain[I].first != E &&
Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) &&
!VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
TriedTails.set(I);
Tails.reset(ConsecutiveChain[I].first);
if (Cnt < ConsecutiveChain[I].first + 2)
Cnt = ConsecutiveChain[I].first + 2;
}
break;
}
I = ConsecutiveChain[I].first;
}
assert(!Operands.empty() && "Expected non-empty list of stores.");
unsigned MaxVecRegSize = R.getMaxVecRegSize();
unsigned EltSize = R.getVectorElementSize(Operands[0]);
unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
MaxElts);
auto *Store = cast<StoreInst>(Operands[0]);
Type *StoreTy = Store->getValueOperand()->getType();
Type *ValueTy = StoreTy;
if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
ValueTy = Trunc->getSrcTy();
unsigned MinVF = TTI->getStoreMinimumVF(
R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy);
unsigned StartIdx = 0;
for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
if (!VectorizedStores.count(Slice.front()) &&
!VectorizedStores.count(Slice.back()) &&
vectorizeStoreChain(Slice, R, Cnt, MinVF)) {
VectorizedStores.insert(Slice.begin(), Slice.end());
Changed = true;
if (Cnt == StartIdx)
StartIdx += Size;
Cnt += Size;
continue;
}
++Cnt;
}
if (StartIdx >= Operands.size())
break;
}
}
return Changed;
}
void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
Stores.clear();
GEPs.clear();
for (Instruction &I : *BB) {
if (auto *SI = dyn_cast<StoreInst>(&I)) {
if (!SI->isSimple())
continue;
if (!isValidElementType(SI->getValueOperand()->getType()))
continue;
Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
}
else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
auto Idx = GEP->idx_begin()->get();
if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
continue;
if (!isValidElementType(Idx->getType()))
continue;
if (GEP->getType()->isVectorTy())
continue;
GEPs[GEP->getPointerOperand()].push_back(GEP);
}
}
}
bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
if (!A || !B)
return false;
if (isa<InsertElementInst>(A) || isa<InsertElementInst>(B))
return false;
Value *VL[] = {A, B};
return tryToVectorizeList(VL, R);
}
bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
bool LimitForRegisterSize) {
if (VL.size() < 2)
return false;
LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
<< VL.size() << ".\n");
InstructionsState S = getSameOpcode(VL);
if (!S.getOpcode())
return false;
Instruction *I0 = cast<Instruction>(S.OpValue);
for (Value *V : VL) {
Type *Ty = V->getType();
if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
R.getORE()->emit([&]() {
std::string type_str;
llvm::raw_string_ostream rso(type_str);
Ty->print(rso);
return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
<< "Cannot SLP vectorize list: type "
<< rso.str() + " is unsupported by vectorizer";
});
return false;
}
}
unsigned Sz = R.getVectorElementSize(I0);
unsigned MinVF = R.getMinVF(Sz);
unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
if (MaxVF < 2) {
R.getORE()->emit([&]() {
return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
<< "Cannot SLP vectorize list: vectorization factor "
<< "less than 2 is not supported";
});
return false;
}
bool Changed = false;
bool CandidateFound = false;
InstructionCost MinCost = SLPCostThreshold.getValue();
Type *ScalarTy = VL[0]->getType();
if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
ScalarTy = IE->getOperand(1)->getType();
unsigned NextInst = 0, MaxInst = VL.size();
for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
auto *VecTy = FixedVectorType::get(ScalarTy, VF);
if (TTI->getNumberOfParts(VecTy) == VF)
continue;
for (unsigned I = NextInst; I < MaxInst; ++I) {
unsigned OpsWidth = 0;
if (I + VF > MaxInst)
OpsWidth = MaxInst - I;
else
OpsWidth = VF;
if (!isPowerOf2_32(OpsWidth))
continue;
if ((LimitForRegisterSize && OpsWidth < MaxVF) ||
(VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2))
break;
ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
if (llvm::any_of(Ops, [&R](Value *V) {
auto *I = dyn_cast<Instruction>(V);
return I && R.isDeleted(I);
}))
continue;
LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
<< "\n");
R.buildTree(Ops);
if (R.isTreeTinyAndNotFullyVectorizable())
continue;
R.reorderTopToBottom();
R.reorderBottomToTop(!isa<InsertElementInst>(Ops.front()));
R.buildExternalUses();
R.computeMinimumValueSizes();
InstructionCost Cost = R.getTreeCost();
CandidateFound = true;
MinCost = std::min(MinCost, Cost);
if (Cost < -SLPCostThreshold) {
LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
cast<Instruction>(Ops[0]))
<< "SLP vectorized with cost " << ore::NV("Cost", Cost)
<< " and with tree size "
<< ore::NV("TreeSize", R.getTreeSize()));
R.vectorizeTree();
I += VF - 1;
NextInst = I + 1;
Changed = true;
}
}
}
if (!Changed && CandidateFound) {
R.getORE()->emit([&]() {
return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
<< "List vectorization was possible but not beneficial with cost "
<< ore::NV("Cost", MinCost) << " >= "
<< ore::NV("Treshold", -SLPCostThreshold);
});
} else if (!Changed) {
R.getORE()->emit([&]() {
return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
<< "Cannot SLP vectorize list: vectorization was impossible"
<< " with available vectorization factors";
});
}
return Changed;
}
bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
if (!I)
return false;
if ((!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) ||
isa<VectorType>(I->getType()))
return false;
Value *P = I->getParent();
auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
return false;
SmallVector<std::pair<Value *, Value *>, 4> Candidates;
Candidates.emplace_back(Op0, Op1);
auto *A = dyn_cast<BinaryOperator>(Op0);
auto *B = dyn_cast<BinaryOperator>(Op1);
if (A && B && B->hasOneUse()) {
auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
if (B0 && B0->getParent() == P)
Candidates.emplace_back(A, B0);
if (B1 && B1->getParent() == P)
Candidates.emplace_back(A, B1);
}
if (B && A && A->hasOneUse()) {
auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
if (A0 && A0->getParent() == P)
Candidates.emplace_back(A0, B);
if (A1 && A1->getParent() == P)
Candidates.emplace_back(A1, B);
}
if (Candidates.size() == 1)
return tryToVectorizePair(Op0, Op1, R);
Optional<int> BestCandidate = R.findBestRootPair(Candidates);
if (!BestCandidate)
return false;
return tryToVectorizePair(Candidates[*BestCandidate].first,
Candidates[*BestCandidate].second, R);
}
namespace {
class HorizontalReduction {
using ReductionOpsType = SmallVector<Value *, 16>;
using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
ReductionOpsListType ReductionOps;
SmallVector<SmallVector<Value *>> ReducedVals;
DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps;
MapVector<Instruction *, Value *> ExtraArgs;
WeakTrackingVH ReductionRoot;
RecurKind RdxKind;
static bool isCmpSelMinMax(Instruction *I) {
return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
}
static bool isBoolLogicOp(Instruction *I) {
return match(I, m_LogicalAnd(m_Value(), m_Value())) ||
match(I, m_LogicalOr(m_Value(), m_Value()));
}
static bool isVectorizable(RecurKind Kind, Instruction *I) {
if (Kind == RecurKind::None)
return false;
if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
isBoolLogicOp(I))
return true;
if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
return I->getFastMathFlags().noNaNs();
}
return I->isAssociative();
}
static Value *getRdxOperand(Instruction *I, unsigned Index) {
if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
return I->getOperand(2);
return I->getOperand(Index);
}
static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
Value *RHS, const Twine &Name, bool UseSelect) {
unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
switch (Kind) {
case RecurKind::Or:
if (UseSelect &&
LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name);
return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
Name);
case RecurKind::And:
if (UseSelect &&
LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name);
return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
Name);
case RecurKind::Add:
case RecurKind::Mul:
case RecurKind::Xor:
case RecurKind::FAdd:
case RecurKind::FMul:
return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
Name);
case RecurKind::FMax:
return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
case RecurKind::FMin:
return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
case RecurKind::SMax:
if (UseSelect) {
Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
return Builder.CreateSelect(Cmp, LHS, RHS, Name);
}
return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
case RecurKind::SMin:
if (UseSelect) {
Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
return Builder.CreateSelect(Cmp, LHS, RHS, Name);
}
return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
case RecurKind::UMax:
if (UseSelect) {
Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
return Builder.CreateSelect(Cmp, LHS, RHS, Name);
}
return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
case RecurKind::UMin:
if (UseSelect) {
Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
return Builder.CreateSelect(Cmp, LHS, RHS, Name);
}
return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
default:
llvm_unreachable("Unknown reduction operation.");
}
}
static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
Value *RHS, const Twine &Name,
const ReductionOpsListType &ReductionOps) {
bool UseSelect = ReductionOps.size() == 2 ||
(ReductionOps.size() == 1 &&
isa<SelectInst>(ReductionOps.front().front()));
assert((!UseSelect || ReductionOps.size() != 2 ||
isa<SelectInst>(ReductionOps[1][0])) &&
"Expected cmp + select pairs for reduction");
Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
if (auto *Sel = dyn_cast<SelectInst>(Op)) {
propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr,
false);
propagateIRFlags(Op, ReductionOps[1], nullptr,
false);
return Op;
}
}
propagateIRFlags(Op, ReductionOps[0], nullptr, false);
return Op;
}
static RecurKind getRdxKind(Value *V) {
auto *I = dyn_cast<Instruction>(V);
if (!I)
return RecurKind::None;
if (match(I, m_Add(m_Value(), m_Value())))
return RecurKind::Add;
if (match(I, m_Mul(m_Value(), m_Value())))
return RecurKind::Mul;
if (match(I, m_And(m_Value(), m_Value())) ||
match(I, m_LogicalAnd(m_Value(), m_Value())))
return RecurKind::And;
if (match(I, m_Or(m_Value(), m_Value())) ||
match(I, m_LogicalOr(m_Value(), m_Value())))
return RecurKind::Or;
if (match(I, m_Xor(m_Value(), m_Value())))
return RecurKind::Xor;
if (match(I, m_FAdd(m_Value(), m_Value())))
return RecurKind::FAdd;
if (match(I, m_FMul(m_Value(), m_Value())))
return RecurKind::FMul;
if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
return RecurKind::FMax;
if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
return RecurKind::FMin;
if (match(I, m_SMax(m_Value(), m_Value())))
return RecurKind::SMax;
if (match(I, m_SMin(m_Value(), m_Value())))
return RecurKind::SMin;
if (match(I, m_UMax(m_Value(), m_Value())))
return RecurKind::UMax;
if (match(I, m_UMin(m_Value(), m_Value())))
return RecurKind::UMin;
if (auto *Select = dyn_cast<SelectInst>(I)) {
CmpInst::Predicate Pred;
Instruction *L1;
Instruction *L2;
Value *LHS = Select->getTrueValue();
Value *RHS = Select->getFalseValue();
Value *Cond = Select->getCondition();
if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
if (!isa<ExtractElementInst>(RHS) ||
!L2->isIdenticalTo(cast<Instruction>(RHS)))
return RecurKind::None;
} else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
if (!isa<ExtractElementInst>(LHS) ||
!L1->isIdenticalTo(cast<Instruction>(LHS)))
return RecurKind::None;
} else {
if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
return RecurKind::None;
if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
!L1->isIdenticalTo(cast<Instruction>(LHS)) ||
!L2->isIdenticalTo(cast<Instruction>(RHS)))
return RecurKind::None;
}
switch (Pred) {
default:
return RecurKind::None;
case CmpInst::ICMP_SGT:
case CmpInst::ICMP_SGE:
return RecurKind::SMax;
case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE:
return RecurKind::SMin;
case CmpInst::ICMP_UGT:
case CmpInst::ICMP_UGE:
return RecurKind::UMax;
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
return RecurKind::UMin;
}
}
return RecurKind::None;
}
static unsigned getFirstOperandIndex(Instruction *I) {
return isCmpSelMinMax(I) ? 1 : 0;
}
static unsigned getNumberOfOperands(Instruction *I) {
return isCmpSelMinMax(I) ? 3 : 2;
}
static bool hasSameParent(Instruction *I, BasicBlock *BB) {
if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) {
auto *Sel = cast<SelectInst>(I);
auto *Cmp = dyn_cast<Instruction>(Sel->getCondition());
return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB;
}
return I->getParent() == BB;
}
static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
if (IsCmpSelMinMax) {
if (auto *Sel = dyn_cast<SelectInst>(I))
return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
return I->hasNUses(2);
}
return I->hasOneUse();
}
void initReductionOps(Instruction *I) {
if (isCmpSelMinMax(I))
ReductionOps.assign(2, ReductionOpsType());
else
ReductionOps.assign(1, ReductionOpsType());
}
void addReductionOps(Instruction *I) {
if (isCmpSelMinMax(I)) {
ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
ReductionOps[1].emplace_back(I);
} else {
ReductionOps[0].emplace_back(I);
}
}
static Value *getLHS(RecurKind Kind, Instruction *I) {
if (Kind == RecurKind::None)
return nullptr;
return I->getOperand(getFirstOperandIndex(I));
}
static Value *getRHS(RecurKind Kind, Instruction *I) {
if (Kind == RecurKind::None)
return nullptr;
return I->getOperand(getFirstOperandIndex(I) + 1);
}
public:
HorizontalReduction() = default;
bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst,
ScalarEvolution &SE, const DataLayout &DL,
const TargetLibraryInfo &TLI) {
assert((!Phi || is_contained(Phi->operands(), Inst)) &&
"Phi needs to use the binary operator");
assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||
isa<IntrinsicInst>(Inst)) &&
"Expected binop, select, or intrinsic for reduction matching");
RdxKind = getRdxKind(Inst);
if (Phi) {
if (getLHS(RdxKind, Inst) == Phi) {
Phi = nullptr;
Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst));
if (!Inst)
return false;
RdxKind = getRdxKind(Inst);
} else if (getRHS(RdxKind, Inst) == Phi) {
Phi = nullptr;
Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst));
if (!Inst)
return false;
RdxKind = getRdxKind(Inst);
}
}
if (!isVectorizable(RdxKind, Inst))
return false;
Type *Ty = Inst->getType();
if (!isValidElementType(Ty) || Ty->isPointerTy())
return false;
if (auto *Sel = dyn_cast<SelectInst>(Inst))
if (!Sel->getCondition()->hasOneUse())
return false;
ReductionRoot = Inst;
BasicBlock *BB = Inst->getParent();
bool IsCmpSelMinMax = isCmpSelMinMax(Inst);
SmallVector<Instruction *> Worklist(1, Inst);
auto &&CheckOperands = [this, IsCmpSelMinMax,
BB](Instruction *TreeN,
SmallVectorImpl<Value *> &ExtraArgs,
SmallVectorImpl<Value *> &PossibleReducedVals,
SmallVectorImpl<Instruction *> &ReductionOps) {
for (int I = getFirstOperandIndex(TreeN),
End = getNumberOfOperands(TreeN);
I < End; ++I) {
Value *EdgeVal = getRdxOperand(TreeN, I);
ReducedValsToOps[EdgeVal].push_back(TreeN);
auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) &&
!hasSameParent(EdgeInst, BB)) {
ExtraArgs.push_back(EdgeVal);
continue;
}
if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind ||
IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) ||
!hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) ||
!isVectorizable(getRdxKind(EdgeInst), EdgeInst)) {
PossibleReducedVals.push_back(EdgeVal);
continue;
}
ReductionOps.push_back(EdgeInst);
}
};
MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>>
PossibleReducedVals;
initReductionOps(Inst);
while (!Worklist.empty()) {
Instruction *TreeN = Worklist.pop_back_val();
SmallVector<Value *> Args;
SmallVector<Value *> PossibleRedVals;
SmallVector<Instruction *> PossibleReductionOps;
CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps);
if (Args.size() < 2) {
addReductionOps(TreeN);
if (!Args.empty()) {
assert(Args.size() == 1 && "Expected only single argument.");
ExtraArgs[TreeN] = Args.front();
}
for (Value *V : PossibleRedVals) {
size_t Key, Idx;
std::tie(Key, Idx) = generateKeySubkey(
V, &TLI,
[&PossibleReducedVals, &DL, &SE](size_t Key, LoadInst *LI) {
auto It = PossibleReducedVals.find(Key);
if (It != PossibleReducedVals.end()) {
for (const auto &LoadData : It->second) {
auto *RLI = cast<LoadInst>(LoadData.second.front().first);
if (getPointersDiff(RLI->getType(),
RLI->getPointerOperand(), LI->getType(),
LI->getPointerOperand(), DL, SE,
true))
return hash_value(RLI->getPointerOperand());
}
}
return hash_value(LI->getPointerOperand());
},
false);
++PossibleReducedVals[Key][Idx]
.insert(std::make_pair(V, 0))
.first->second;
}
Worklist.append(PossibleReductionOps.rbegin(),
PossibleReductionOps.rend());
} else {
size_t Key, Idx;
std::tie(Key, Idx) = generateKeySubkey(
TreeN, &TLI,
[&PossibleReducedVals, &DL, &SE](size_t Key, LoadInst *LI) {
auto It = PossibleReducedVals.find(Key);
if (It != PossibleReducedVals.end()) {
for (const auto &LoadData : It->second) {
auto *RLI = cast<LoadInst>(LoadData.second.front().first);
if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(),
LI->getType(), LI->getPointerOperand(),
DL, SE, true))
return hash_value(RLI->getPointerOperand());
}
}
return hash_value(LI->getPointerOperand());
},
false);
++PossibleReducedVals[Key][Idx]
.insert(std::make_pair(TreeN, 0))
.first->second;
}
}
auto PossibleReducedValsVect = PossibleReducedVals.takeVector();
for (auto &PossibleReducedVals : PossibleReducedValsVect) {
auto PossibleRedVals = PossibleReducedVals.second.takeVector();
SmallVector<SmallVector<Value *>> PossibleRedValsVect;
for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end();
It != E; ++It) {
PossibleRedValsVect.emplace_back();
auto RedValsVect = It->second.takeVector();
stable_sort(RedValsVect, llvm::less_second());
for (const std::pair<Value *, unsigned> &Data : RedValsVect)
PossibleRedValsVect.back().append(Data.second, Data.first);
}
stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) {
return P1.size() > P2.size();
});
ReducedVals.emplace_back();
for (ArrayRef<Value *> Data : PossibleRedValsVect)
ReducedVals.back().append(Data.rbegin(), Data.rend());
}
stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) {
return P1.size() > P2.size();
});
return true;
}
Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
constexpr int ReductionLimit = 4;
constexpr unsigned RegMaxNumber = 4;
constexpr unsigned RedValsMaxNumber = 128;
unsigned NumReducedVals = std::accumulate(
ReducedVals.begin(), ReducedVals.end(), 0,
[](int Num, ArrayRef<Value *> Vals) { return Num + Vals.size(); });
if (NumReducedVals < ReductionLimit)
return nullptr;
IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
DenseMap<Value *, WeakTrackingVH> TrackedVals;
BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
assert(Pair.first && "DebugLoc must be set.");
ExternallyUsedValues[Pair.second].push_back(Pair.first);
TrackedVals.try_emplace(Pair.second, Pair.second);
}
auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
assert(isa<SelectInst>(RdxRootInst) &&
"Expected min/max reduction to have select root instruction");
Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
assert(isa<Instruction>(ScalarCond) &&
"Expected min/max reduction to have compare condition");
return cast<Instruction>(ScalarCond);
};
ExternallyUsedValues[ReductionRoot];
SmallDenseSet<Value *> IgnoreList;
for (ReductionOpsType &RdxOps : ReductionOps)
for (Value *RdxOp : RdxOps) {
if (!RdxOp)
continue;
IgnoreList.insert(RdxOp);
}
bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot));
for (ArrayRef<Value *> Candidates : ReducedVals)
for (Value *V : Candidates)
TrackedVals.try_emplace(V, V);
DenseMap<Value *, unsigned> VectorizedVals;
Value *VectorizedTree = nullptr;
bool CheckForReusedReductionOps = false;
for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) {
ArrayRef<Value *> OrigReducedVals = ReducedVals[I];
InstructionsState S = getSameOpcode(OrigReducedVals);
SmallVector<Value *> Candidates;
DenseMap<Value *, Value *> TrackedToOrig;
for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) {
Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second;
if (auto *Inst = dyn_cast<Instruction>(RdxVal))
if (isVectorLikeInstWithConstOps(Inst) &&
(!S.getOpcode() || !S.isOpcodeOrAlt(Inst)))
continue;
Candidates.push_back(RdxVal);
TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]);
}
bool ShuffledExtracts = false;
if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() &&
I + 1 < E) {
InstructionsState NextS = getSameOpcode(ReducedVals[I + 1]);
if (NextS.getOpcode() == Instruction::ExtractElement &&
!NextS.isAltShuffle()) {
SmallVector<Value *> CommonCandidates(Candidates);
for (Value *RV : ReducedVals[I + 1]) {
Value *RdxVal = TrackedVals.find(RV)->second;
if (auto *Inst = dyn_cast<Instruction>(RdxVal))
if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst))
continue;
CommonCandidates.push_back(RdxVal);
TrackedToOrig.try_emplace(RdxVal, RV);
}
SmallVector<int> Mask;
if (isFixedVectorShuffle(CommonCandidates, Mask)) {
++I;
Candidates.swap(CommonCandidates);
ShuffledExtracts = true;
}
}
}
unsigned NumReducedVals = Candidates.size();
if (NumReducedVals < ReductionLimit)
continue;
unsigned MaxVecRegSize = V.getMaxVecRegSize();
unsigned EltSize = V.getVectorElementSize(Candidates[0]);
unsigned MaxElts = RegMaxNumber * PowerOf2Floor(MaxVecRegSize / EltSize);
unsigned ReduxWidth = std::min<unsigned>(
PowerOf2Floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts));
unsigned Start = 0;
unsigned Pos = Start;
unsigned PrevReduxWidth = ReduxWidth;
bool CheckForReusedReductionOpsLocal = false;
auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals,
&CheckForReusedReductionOpsLocal,
&PrevReduxWidth, &V,
&IgnoreList](bool IgnoreVL = false) {
bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList);
if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) {
CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered;
}
++Pos;
if (Pos < NumReducedVals - ReduxWidth + 1)
return IsAnyRedOpGathered;
Pos = Start;
ReduxWidth /= 2;
return IsAnyRedOpGathered;
};
while (Pos < NumReducedVals - ReduxWidth + 1 &&
ReduxWidth >= ReductionLimit) {
if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth &&
Start == 0) {
CheckForReusedReductionOps = true;
break;
}
PrevReduxWidth = ReduxWidth;
ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth);
if (V.areAnalyzedReductionVals(VL)) {
(void)AdjustReducedVals(true);
continue;
}
if (any_of(VL, [&V](Value *RedVal) {
auto *RedValI = dyn_cast<Instruction>(RedVal);
if (!RedValI)
return false;
return V.isDeleted(RedValI);
}))
break;
V.buildTree(VL, IgnoreList);
if (V.isTreeTinyAndNotFullyVectorizable(true)) {
if (!AdjustReducedVals())
V.analyzedReductionVals(VL);
continue;
}
if (V.isLoadCombineReductionCandidate(RdxKind)) {
if (!AdjustReducedVals())
V.analyzedReductionVals(VL);
continue;
}
V.reorderTopToBottom();
V.reorderBottomToTop(true);
BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues(
ExternallyUsedValues);
for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) {
if (Cnt == I || (ShuffledExtracts && Cnt == I - 1))
continue;
for_each(ReducedVals[Cnt],
[&LocalExternallyUsedValues, &TrackedVals](Value *V) {
if (isa<Instruction>(V))
LocalExternallyUsedValues[TrackedVals[V]];
});
}
SmallDenseMap<Value *, unsigned> NumUses;
for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) {
Value *V = Candidates[Cnt];
if (NumUses.count(V) > 0)
continue;
NumUses[V] = std::count(VL.begin(), VL.end(), V);
}
for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) {
Value *V = Candidates[Cnt];
if (NumUses.count(V) > 0)
continue;
NumUses[V] = std::count(VL.begin(), VL.end(), V);
}
SmallPtrSet<Value *, 4> Visited;
for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) {
Value *V = Candidates[Cnt];
if (!Visited.insert(V).second)
continue;
unsigned NumOps = VectorizedVals.lookup(V) + NumUses[V];
if (NumOps != ReducedValsToOps.find(V)->second.size())
LocalExternallyUsedValues[V];
}
for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) {
Value *V = Candidates[Cnt];
if (!Visited.insert(V).second)
continue;
unsigned NumOps = VectorizedVals.lookup(V) + NumUses[V];
if (NumOps != ReducedValsToOps.find(V)->second.size())
LocalExternallyUsedValues[V];
}
V.buildExternalUses(LocalExternallyUsedValues);
V.computeMinimumValueSizes();
FastMathFlags RdxFMF;
RdxFMF.set();
for (Value *U : IgnoreList)
if (auto *FPMO = dyn_cast<FPMathOperator>(U))
RdxFMF &= FPMO->getFastMathFlags();
InstructionCost TreeCost = V.getTreeCost(VL);
InstructionCost ReductionCost =
getReductionCost(TTI, VL, ReduxWidth, RdxFMF);
InstructionCost Cost = TreeCost + ReductionCost;
if (!Cost.isValid()) {
LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n");
return nullptr;
}
if (Cost >= -SLPCostThreshold) {
V.getORE()->emit([&]() {
return OptimizationRemarkMissed(
SV_NAME, "HorSLPNotBeneficial",
ReducedValsToOps.find(VL[0])->second.front())
<< "Vectorizing horizontal reduction is possible "
<< "but not beneficial with cost " << ore::NV("Cost", Cost)
<< " and threshold "
<< ore::NV("Threshold", -SLPCostThreshold);
});
if (!AdjustReducedVals())
V.analyzedReductionVals(VL);
continue;
}
LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
<< Cost << ". (HorRdx)\n");
V.getORE()->emit([&]() {
return OptimizationRemark(
SV_NAME, "VectorizedHorizontalReduction",
ReducedValsToOps.find(VL[0])->second.front())
<< "Vectorized horizontal reduction with cost "
<< ore::NV("Cost", Cost) << " and with tree size "
<< ore::NV("TreeSize", V.getTreeSize());
});
Builder.setFastMathFlags(RdxFMF);
Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues);
Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
if (IsCmpSelMinMax)
Builder.SetInsertPoint(GetCmpForMinMaxReduction(RdxRootInst));
else
Builder.SetInsertPoint(RdxRootInst);
if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst))
VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
Value *ReducedSubTree =
emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
if (!VectorizedTree) {
VectorizedTree = ReducedSubTree;
} else {
Builder.SetCurrentDebugLocation(
cast<Instruction>(ReductionOps.front().front())->getDebugLoc());
VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
ReducedSubTree, "op.rdx", ReductionOps);
}
for (Value *V : VL)
++VectorizedVals.try_emplace(TrackedToOrig.find(V)->second, 0)
.first->getSecond();
Pos += ReduxWidth;
Start = Pos;
ReduxWidth = PowerOf2Floor(NumReducedVals - Pos);
}
}
if (VectorizedTree) {
auto &&FinalGen =
[this, &Builder,
&TrackedVals](ArrayRef<std::pair<Instruction *, Value *>> InstVals) {
unsigned Sz = InstVals.size();
SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 +
Sz % 2);
for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) {
Instruction *RedOp = InstVals[I + 1].first;
Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
Value *RdxVal1 = InstVals[I].second;
Value *StableRdxVal1 = RdxVal1;
auto It1 = TrackedVals.find(RdxVal1);
if (It1 != TrackedVals.end())
StableRdxVal1 = It1->second;
Value *RdxVal2 = InstVals[I + 1].second;
Value *StableRdxVal2 = RdxVal2;
auto It2 = TrackedVals.find(RdxVal2);
if (It2 != TrackedVals.end())
StableRdxVal2 = It2->second;
Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1,
StableRdxVal2, "op.rdx", ReductionOps);
ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed);
}
if (Sz % 2 == 1)
ExtraReds[Sz / 2] = InstVals.back();
return ExtraReds;
};
SmallVector<std::pair<Instruction *, Value *>> ExtraReductions;
SmallPtrSet<Value *, 8> Visited;
for (ArrayRef<Value *> Candidates : ReducedVals) {
for (Value *RdxVal : Candidates) {
if (!Visited.insert(RdxVal).second)
continue;
unsigned NumOps = VectorizedVals.lookup(RdxVal);
for (Instruction *RedOp :
makeArrayRef(ReducedValsToOps.find(RdxVal)->second)
.drop_back(NumOps))
ExtraReductions.emplace_back(RedOp, RdxVal);
}
}
for (auto &Pair : ExternallyUsedValues) {
for (auto *I : Pair.second)
ExtraReductions.emplace_back(I, Pair.first);
}
while (ExtraReductions.size() > 1) {
SmallVector<std::pair<Instruction *, Value *>> NewReds =
FinalGen(ExtraReductions);
ExtraReductions.swap(NewReds);
}
if (ExtraReductions.size() == 1) {
Instruction *RedOp = ExtraReductions.back().first;
Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
Value *RdxVal = ExtraReductions.back().second;
Value *StableRdxVal = RdxVal;
auto It = TrackedVals.find(RdxVal);
if (It != TrackedVals.end())
StableRdxVal = It->second;
VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
StableRdxVal, "op.rdx", ReductionOps);
}
ReductionRoot->replaceAllUsesWith(VectorizedTree);
#ifndef NDEBUG
SmallSet<Value *, 4> IgnoreSet;
for (ArrayRef<Value *> RdxOps : ReductionOps)
IgnoreSet.insert(RdxOps.begin(), RdxOps.end());
#endif
for (ArrayRef<Value *> RdxOps : ReductionOps) {
for (Value *Ignore : RdxOps) {
if (!Ignore)
continue;
#ifndef NDEBUG
for (auto *U : Ignore->users()) {
assert(IgnoreSet.count(U) &&
"All users must be either in the reduction ops list.");
}
#endif
if (!Ignore->use_empty()) {
Value *Undef = UndefValue::get(Ignore->getType());
Ignore->replaceAllUsesWith(Undef);
}
V.eraseInstruction(cast<Instruction>(Ignore));
}
}
} else if (!CheckForReusedReductionOps) {
for (ReductionOpsType &RdxOps : ReductionOps)
for (Value *RdxOp : RdxOps)
V.analyzedReductionRoot(cast<Instruction>(RdxOp));
}
return VectorizedTree;
}
private:
InstructionCost getReductionCost(TargetTransformInfo *TTI,
ArrayRef<Value *> ReducedVals,
unsigned ReduxWidth, FastMathFlags FMF) {
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
Value *FirstReducedVal = ReducedVals.front();
Type *ScalarTy = FirstReducedVal->getType();
FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
InstructionCost VectorCost = 0, ScalarCost;
bool AllConsts = all_of(ReducedVals, isConstant);
switch (RdxKind) {
case RecurKind::Add:
case RecurKind::Mul:
case RecurKind::Or:
case RecurKind::And:
case RecurKind::Xor:
case RecurKind::FAdd:
case RecurKind::FMul: {
unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
if (!AllConsts)
VectorCost =
TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind);
ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind);
break;
}
case RecurKind::FMax:
case RecurKind::FMin: {
auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
if (!AllConsts) {
auto *VecCondTy =
cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
VectorCost =
TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
false, CostKind);
}
CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy,
SclCondTy, RdxPred, CostKind) +
TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
SclCondTy, RdxPred, CostKind);
break;
}
case RecurKind::SMax:
case RecurKind::SMin:
case RecurKind::UMax:
case RecurKind::UMin: {
auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
if (!AllConsts) {
auto *VecCondTy =
cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
bool IsUnsigned =
RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
IsUnsigned, CostKind);
}
CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
SclCondTy, RdxPred, CostKind) +
TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
SclCondTy, RdxPred, CostKind);
break;
}
default:
llvm_unreachable("Expected arithmetic or min/max reduction operation");
}
ScalarCost *= (ReduxWidth - 1);
LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
<< " for reduction that starts with " << *FirstReducedVal
<< " (It is a splitting reduction)\n");
return VectorCost - ScalarCost;
}
Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
unsigned ReduxWidth, const TargetTransformInfo *TTI) {
assert(VectorizedValue && "Need to have a vectorized tree node");
assert(isPowerOf2_32(ReduxWidth) &&
"We only handle power-of-two reductions for now");
assert(RdxKind != RecurKind::FMulAdd &&
"A call to the llvm.fmuladd intrinsic is not handled yet");
++NumVectorInstructions;
return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind);
}
};
}
static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
return cast<FixedVectorType>(IE->getType())->getNumElements();
unsigned AggregateSize = 1;
auto *IV = cast<InsertValueInst>(InsertInst);
Type *CurrentType = IV->getType();
do {
if (auto *ST = dyn_cast<StructType>(CurrentType)) {
for (auto *Elt : ST->elements())
if (Elt != ST->getElementType(0)) return None;
AggregateSize *= ST->getNumElements();
CurrentType = ST->getElementType(0);
} else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
AggregateSize *= AT->getNumElements();
CurrentType = AT->getElementType();
} else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
AggregateSize *= VT->getNumElements();
return AggregateSize;
} else if (CurrentType->isSingleValueType()) {
return AggregateSize;
} else {
return None;
}
} while (true);
}
static void findBuildAggregate_rec(Instruction *LastInsertInst,
TargetTransformInfo *TTI,
SmallVectorImpl<Value *> &BuildVectorOpds,
SmallVectorImpl<Value *> &InsertElts,
unsigned OperandOffset) {
do {
Value *InsertedOperand = LastInsertInst->getOperand(1);
Optional<unsigned> OperandIndex =
getInsertIndex(LastInsertInst, OperandOffset);
if (!OperandIndex)
return;
if (isa<InsertElementInst>(InsertedOperand) ||
isa<InsertValueInst>(InsertedOperand)) {
findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
BuildVectorOpds, InsertElts, *OperandIndex);
} else {
BuildVectorOpds[*OperandIndex] = InsertedOperand;
InsertElts[*OperandIndex] = LastInsertInst;
}
LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
} while (LastInsertInst != nullptr &&
(isa<InsertValueInst>(LastInsertInst) ||
isa<InsertElementInst>(LastInsertInst)) &&
LastInsertInst->hasOneUse());
}
static bool findBuildAggregate(Instruction *LastInsertInst,
TargetTransformInfo *TTI,
SmallVectorImpl<Value *> &BuildVectorOpds,
SmallVectorImpl<Value *> &InsertElts) {
assert((isa<InsertElementInst>(LastInsertInst) ||
isa<InsertValueInst>(LastInsertInst)) &&
"Expected insertelement or insertvalue instruction!");
assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
"Expected empty result vectors!");
Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
if (!AggregateSize)
return false;
BuildVectorOpds.resize(*AggregateSize);
InsertElts.resize(*AggregateSize);
findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0);
llvm::erase_value(BuildVectorOpds, nullptr);
llvm::erase_value(InsertElts, nullptr);
if (BuildVectorOpds.size() >= 2)
return true;
return false;
}
static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
BasicBlock *ParentBB, LoopInfo *LI) {
auto DominatedReduxValue = [&](Value *R) {
return isa<Instruction>(R) &&
DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
};
Value *Rdx = nullptr;
if (P->getIncomingBlock(0) == ParentBB) {
Rdx = P->getIncomingValue(0);
} else if (P->getIncomingBlock(1) == ParentBB) {
Rdx = P->getIncomingValue(1);
}
if (Rdx && DominatedReduxValue(Rdx))
return Rdx;
Loop *BBL = LI->getLoopFor(ParentBB);
if (!BBL)
return nullptr;
BasicBlock *BBLatch = BBL->getLoopLatch();
if (!BBLatch)
return nullptr;
if (P->getIncomingBlock(0) == BBLatch) {
Rdx = P->getIncomingValue(0);
} else if (P->getIncomingBlock(1) == BBLatch) {
Rdx = P->getIncomingValue(1);
}
if (Rdx && DominatedReduxValue(Rdx))
return Rdx;
return nullptr;
}
static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
return true;
if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
return true;
if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
return true;
if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
return true;
if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
return true;
if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
return true;
if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
return true;
return false;
}
static bool tryToVectorizeHorReductionOrInstOperands(
PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
TargetTransformInfo *TTI, ScalarEvolution &SE, const DataLayout &DL,
const TargetLibraryInfo &TLI,
const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
if (!ShouldVectorizeHor)
return false;
if (!Root)
return false;
if (Root->getParent() != BB || isa<PHINode>(Root))
return false;
std::queue<std::pair<Instruction *, unsigned>> Stack;
Stack.emplace(Root, 0);
SmallPtrSet<Value *, 8> VisitedInstrs;
SmallVector<WeakTrackingVH> PostponedInsts;
bool Res = false;
auto &&TryToReduce = [TTI, &SE, &DL, &P, &R, &TLI](Instruction *Inst,
Value *&B0,
Value *&B1) -> Value * {
if (R.isAnalyzedReductionRoot(Inst))
return nullptr;
bool IsBinop = matchRdxBop(Inst, B0, B1);
bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
if (IsBinop || IsSelect) {
HorizontalReduction HorRdx;
if (HorRdx.matchAssociativeReduction(P, Inst, SE, DL, TLI))
return HorRdx.tryToReduce(R, TTI);
}
return nullptr;
};
while (!Stack.empty()) {
Instruction *Inst;
unsigned Level;
std::tie(Inst, Level) = Stack.front();
Stack.pop();
if (R.isDeleted(Inst))
continue;
Value *B0 = nullptr, *B1 = nullptr;
if (Value *V = TryToReduce(Inst, B0, B1)) {
Res = true;
P = nullptr;
if (auto *I = dyn_cast<Instruction>(V)) {
Stack.emplace(I, Level);
continue;
}
} else {
bool IsBinop = B0 && B1;
if (P && IsBinop) {
Inst = dyn_cast<Instruction>(B0);
if (Inst == P)
Inst = dyn_cast<Instruction>(B1);
if (!Inst) {
P = nullptr;
continue;
}
}
P = nullptr;
if (!isa<CmpInst, InsertElementInst, InsertValueInst>(Inst))
PostponedInsts.push_back(Inst);
}
if (++Level < RecursionMaxDepth)
for (auto *Op : Inst->operand_values())
if (VisitedInstrs.insert(Op).second)
if (auto *I = dyn_cast<Instruction>(Op))
if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) &&
!R.isDeleted(I) && I->getParent() == BB)
Stack.emplace(I, Level);
}
for (Value *V : PostponedInsts)
if (auto *Inst = dyn_cast<Instruction>(V))
if (!R.isDeleted(Inst))
Res |= Vectorize(Inst, R);
return Res;
}
bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
BasicBlock *BB, BoUpSLP &R,
TargetTransformInfo *TTI) {
auto *I = dyn_cast_or_null<Instruction>(V);
if (!I)
return false;
if (!isa<BinaryOperator>(I))
P = nullptr;
auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
return tryToVectorize(I, R);
};
return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, *SE, *DL,
*TLI, ExtraVectorization);
}
bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
BasicBlock *BB, BoUpSLP &R) {
const DataLayout &DL = BB->getModule()->getDataLayout();
if (!R.canMapToVector(IVI->getType(), DL))
return false;
SmallVector<Value *, 16> BuildVectorOpds;
SmallVector<Value *, 16> BuildVectorInsts;
if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
return false;
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
return tryToVectorizeList(BuildVectorOpds, R);
}
bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
BasicBlock *BB, BoUpSLP &R) {
SmallVector<Value *, 16> BuildVectorInsts;
SmallVector<Value *, 16> BuildVectorOpds;
SmallVector<int> Mask;
if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
(llvm::all_of(
BuildVectorOpds,
[](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) &&
isFixedVectorShuffle(BuildVectorOpds, Mask)))
return false;
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
return tryToVectorizeList(BuildVectorInsts, R);
}
template <typename T>
static bool
tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
function_ref<unsigned(T *)> Limit,
function_ref<bool(T *, T *)> Comparator,
function_ref<bool(T *, T *)> AreCompatible,
function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
bool LimitForRegisterSize) {
bool Changed = false;
stable_sort(Incoming, Comparator);
SmallVector<T *> Candidates;
for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) {
auto *SameTypeIt = IncIt;
while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt))
++SameTypeIt;
unsigned NumElts = (SameTypeIt - IncIt);
LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes ("
<< NumElts << ")\n");
if (NumElts > 1 &&
TryToVectorizeHelper(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) {
Changed = true;
} else if (NumElts < Limit(*IncIt) &&
(Candidates.empty() ||
Candidates.front()->getType() == (*IncIt)->getType())) {
Candidates.append(IncIt, std::next(IncIt, NumElts));
}
if (Candidates.size() > 1 &&
(SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
if (TryToVectorizeHelper(Candidates, false)) {
Changed = true;
} else if (LimitForRegisterSize) {
for (auto *It = Candidates.begin(), *End = Candidates.end();
It != End;) {
auto *SameTypeIt = It;
while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It))
++SameTypeIt;
unsigned NumElts = (SameTypeIt - It);
if (NumElts > 1 && TryToVectorizeHelper(makeArrayRef(It, NumElts),
false))
Changed = true;
It = SameTypeIt;
}
}
Candidates.clear();
}
IncIt = SameTypeIt;
}
return Changed;
}
template <bool IsCompatibility>
static bool compareCmp(Value *V, Value *V2,
function_ref<bool(Instruction *)> IsDeleted) {
auto *CI1 = cast<CmpInst>(V);
auto *CI2 = cast<CmpInst>(V2);
if (IsDeleted(CI2) || !isValidElementType(CI2->getType()))
return false;
if (CI1->getOperand(0)->getType()->getTypeID() <
CI2->getOperand(0)->getType()->getTypeID())
return !IsCompatibility;
if (CI1->getOperand(0)->getType()->getTypeID() >
CI2->getOperand(0)->getType()->getTypeID())
return false;
CmpInst::Predicate Pred1 = CI1->getPredicate();
CmpInst::Predicate Pred2 = CI2->getPredicate();
CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2);
CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1);
CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2);
if (BasePred1 < BasePred2)
return !IsCompatibility;
if (BasePred1 > BasePred2)
return false;
bool LEPreds = Pred1 <= Pred2;
bool GEPreds = Pred1 >= Pred2;
for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) {
auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1);
auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1);
if (Op1->getValueID() < Op2->getValueID())
return !IsCompatibility;
if (Op1->getValueID() > Op2->getValueID())
return false;
if (auto *I1 = dyn_cast<Instruction>(Op1))
if (auto *I2 = dyn_cast<Instruction>(Op2)) {
if (I1->getParent() != I2->getParent())
return false;
InstructionsState S = getSameOpcode({I1, I2});
if (S.getOpcode())
continue;
return false;
}
}
return IsCompatibility;
}
bool SLPVectorizerPass::vectorizeSimpleInstructions(
SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R,
bool AtTerminator) {
bool OpsChanged = false;
SmallVector<Instruction *, 4> PostponedCmps;
for (auto *I : reverse(Instructions)) {
if (R.isDeleted(I))
continue;
if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
} else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
} else if (isa<CmpInst>(I)) {
PostponedCmps.push_back(I);
continue;
}
OpsChanged |= vectorizeRootInstruction(nullptr, I, BB, R, TTI);
}
if (AtTerminator) {
for (Instruction *I : PostponedCmps) {
if (R.isDeleted(I))
continue;
for (Value *Op : I->operands())
OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI);
}
for (Instruction *I : PostponedCmps) {
if (R.isDeleted(I))
continue;
OpsChanged |= tryToVectorize(I, R);
}
auto &&CompareSorter = [&R](Value *V, Value *V2) {
return compareCmp<false>(V, V2,
[&R](Instruction *I) { return R.isDeleted(I); });
};
auto &&AreCompatibleCompares = [&R](Value *V1, Value *V2) {
if (V1 == V2)
return true;
return compareCmp<true>(V1, V2,
[&R](Instruction *I) { return R.isDeleted(I); });
};
auto Limit = [&R](Value *V) {
unsigned EltSize = R.getVectorElementSize(V);
return std::max(2U, R.getMaxVecRegSize() / EltSize);
};
SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end());
OpsChanged |= tryToVectorizeSequence<Value>(
Vals, Limit, CompareSorter, AreCompatibleCompares,
[this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
bool ArePossiblyReducedInOtherBlock =
any_of(Candidates, [](Value *V) {
return any_of(V->users(), [V](User *U) {
return isa<SelectInst>(U) &&
cast<SelectInst>(U)->getParent() !=
cast<Instruction>(V)->getParent();
});
});
if (ArePossiblyReducedInOtherBlock)
return false;
return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
},
true);
Instructions.clear();
} else {
Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend());
}
return OpsChanged;
}
bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
bool Changed = false;
SmallVector<Value *, 4> Incoming;
SmallPtrSet<Value *, 16> VisitedInstrs;
DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) {
assert(isValidElementType(V1->getType()) &&
isValidElementType(V2->getType()) &&
"Expected vectorizable types only.");
if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
return true;
if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
return false;
ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
if (Opcodes1.size() < Opcodes2.size())
return true;
if (Opcodes1.size() > Opcodes2.size())
return false;
Optional<bool> ConstOrder;
for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) {
if (!ConstOrder)
ConstOrder =
!isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I]);
continue;
}
if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
if (!NodeI1)
return NodeI2 != nullptr;
if (!NodeI2)
return false;
assert((NodeI1 == NodeI2) ==
(NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
"Different nodes should have different DFS numbers");
if (NodeI1 != NodeI2)
return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
InstructionsState S = getSameOpcode({I1, I2});
if (S.getOpcode())
continue;
return I1->getOpcode() < I2->getOpcode();
}
if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) {
if (!ConstOrder)
ConstOrder = Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID();
continue;
}
if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID())
return true;
if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID())
return false;
}
return ConstOrder && *ConstOrder;
};
auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) {
if (V1 == V2)
return true;
if (V1->getType() != V2->getType())
return false;
ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
if (Opcodes1.size() != Opcodes2.size())
return false;
for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
continue;
if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
if (I1->getParent() != I2->getParent())
return false;
InstructionsState S = getSameOpcode({I1, I2});
if (S.getOpcode())
continue;
return false;
}
if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
continue;
if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
return false;
}
return true;
};
auto Limit = [&R](Value *V) {
unsigned EltSize = R.getVectorElementSize(V);
return std::max(2U, R.getMaxVecRegSize() / EltSize);
};
bool HaveVectorizedPhiNodes = false;
do {
Incoming.clear();
for (Instruction &I : *BB) {
PHINode *P = dyn_cast<PHINode>(&I);
if (!P)
break;
if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
isValidElementType(P->getType()))
Incoming.push_back(P);
}
for (Value *V : Incoming) {
SmallVectorImpl<Value *> &Opcodes =
PHIToOpcodes.try_emplace(V).first->getSecond();
if (!Opcodes.empty())
continue;
SmallVector<Value *, 4> Nodes(1, V);
SmallPtrSet<Value *, 4> Visited;
while (!Nodes.empty()) {
auto *PHI = cast<PHINode>(Nodes.pop_back_val());
if (!Visited.insert(PHI).second)
continue;
for (Value *V : PHI->incoming_values()) {
if (auto *PHI1 = dyn_cast<PHINode>((V))) {
Nodes.push_back(PHI1);
continue;
}
Opcodes.emplace_back(V);
}
}
}
HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>(
Incoming, Limit, PHICompare, AreCompatiblePHIs,
[this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
},
true);
Changed |= HaveVectorizedPhiNodes;
VisitedInstrs.insert(Incoming.begin(), Incoming.end());
} while (HaveVectorizedPhiNodes);
VisitedInstrs.clear();
SmallVector<Instruction *, 8> PostProcessInstructions;
SmallDenseSet<Instruction *, 4> KeyNodes;
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
if (isa<ScalableVectorType>(it->getType()))
continue;
if (R.isDeleted(&*it))
continue;
if (!VisitedInstrs.insert(&*it).second) {
if (it->use_empty() && KeyNodes.contains(&*it) &&
vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
it->isTerminator())) {
Changed = true;
it = BB->begin();
e = BB->end();
}
continue;
}
if (isa<DbgInfoIntrinsic>(it))
continue;
if (PHINode *P = dyn_cast<PHINode>(it)) {
if (P->getNumIncomingValues() == 2) {
if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
TTI)) {
Changed = true;
it = BB->begin();
e = BB->end();
continue;
}
}
for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
if (BB == P->getIncomingBlock(I) ||
!DT->isReachableFromEntry(P->getIncomingBlock(I)))
continue;
Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
P->getIncomingBlock(I), R, TTI);
}
continue;
}
if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
isa<InvokeInst>(it))) {
KeyNodes.insert(&*it);
bool OpsChanged = false;
if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
for (auto *V : it->operand_values()) {
OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
}
}
OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
it->isTerminator());
if (OpsChanged) {
Changed = true;
it = BB->begin();
e = BB->end();
continue;
}
}
if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
isa<InsertValueInst>(it))
PostProcessInstructions.push_back(&*it);
}
return Changed;
}
bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
auto Changed = false;
for (auto &Entry : GEPs) {
if (Entry.second.size() < 2)
continue;
LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
<< Entry.second.size() << ".\n");
unsigned MaxVecRegSize = R.getMaxVecRegSize();
unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
if (MaxVecRegSize < EltSize)
continue;
unsigned MaxElts = MaxVecRegSize / EltSize;
for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
auto Len = std::min<unsigned>(BE - BI, MaxElts);
ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
Candidates.remove_if(
[&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
auto *GEPI = GEPList[I];
if (!Candidates.count(GEPI))
continue;
auto *SCEVI = SE->getSCEV(GEPList[I]);
for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
auto *GEPJ = GEPList[J];
auto *SCEVJ = SE->getSCEV(GEPList[J]);
if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
Candidates.remove(GEPI);
Candidates.remove(GEPJ);
} else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
Candidates.remove(GEPJ);
}
}
}
if (Candidates.size() < 2)
continue;
SmallVector<Value *, 16> Bundle(Candidates.size());
auto BundleIndex = 0u;
for (auto *V : Candidates) {
auto *GEP = cast<GetElementPtrInst>(V);
auto *GEPIdx = GEP->idx_begin()->get();
assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
Bundle[BundleIndex++] = GEPIdx;
}
Changed |= tryToVectorizeList(Bundle, R);
}
}
return Changed;
}
bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
bool Changed = false;
auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
if (V->getPointerOperandType()->getTypeID() <
V2->getPointerOperandType()->getTypeID())
return true;
if (V->getPointerOperandType()->getTypeID() >
V2->getPointerOperandType()->getTypeID())
return false;
if (isa<UndefValue>(V->getValueOperand()) ||
isa<UndefValue>(V2->getValueOperand()))
return false;
if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
DT->getNode(I1->getParent());
DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
DT->getNode(I2->getParent());
assert(NodeI1 && "Should only process reachable instructions");
assert(NodeI2 && "Should only process reachable instructions");
assert((NodeI1 == NodeI2) ==
(NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
"Different nodes should have different DFS numbers");
if (NodeI1 != NodeI2)
return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
InstructionsState S = getSameOpcode({I1, I2});
if (S.getOpcode())
return false;
return I1->getOpcode() < I2->getOpcode();
}
if (isa<Constant>(V->getValueOperand()) &&
isa<Constant>(V2->getValueOperand()))
return false;
return V->getValueOperand()->getValueID() <
V2->getValueOperand()->getValueID();
};
auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) {
if (V1 == V2)
return true;
if (V1->getPointerOperandType() != V2->getPointerOperandType())
return false;
if (isa<UndefValue>(V1->getValueOperand()) ||
isa<UndefValue>(V2->getValueOperand()))
return true;
if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
if (I1->getParent() != I2->getParent())
return false;
InstructionsState S = getSameOpcode({I1, I2});
return S.getOpcode() > 0;
}
if (isa<Constant>(V1->getValueOperand()) &&
isa<Constant>(V2->getValueOperand()))
return true;
return V1->getValueOperand()->getValueID() ==
V2->getValueOperand()->getValueID();
};
auto Limit = [&R, this](StoreInst *SI) {
unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
return R.getMinVF(EltSize);
};
for (auto &Pair : Stores) {
if (Pair.second.size() < 2)
continue;
LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
<< Pair.second.size() << ".\n");
if (!isValidElementType(Pair.second.front()->getValueOperand()->getType()))
continue;
Changed |= tryToVectorizeSequence<StoreInst>(
Pair.second, Limit, StoreSorter, AreCompatibleStores,
[this, &R](ArrayRef<StoreInst *> Candidates, bool) {
return vectorizeStores(Candidates, R);
},
false);
}
return Changed;
}
char SLPVectorizer::ID = 0;
static const char lv_name[] = "SLP Vectorizer";
INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }