#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/GVNExpression.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "gvn-sink"
STATISTIC(NumRemoved, "Number of instructions removed");
namespace llvm {
namespace GVNExpression {
LLVM_DUMP_METHOD void Expression::dump() const {
print(dbgs());
dbgs() << "\n";
}
} }
namespace {
static bool isMemoryInst(const Instruction *I) {
return isa<LoadInst>(I) || isa<StoreInst>(I) ||
(isa<InvokeInst>(I) && !cast<InvokeInst>(I)->doesNotAccessMemory()) ||
(isa<CallInst>(I) && !cast<CallInst>(I)->doesNotAccessMemory());
}
class LockstepReverseIterator {
ArrayRef<BasicBlock *> Blocks;
SmallSetVector<BasicBlock *, 4> ActiveBlocks;
SmallVector<Instruction *, 4> Insts;
bool Fail;
public:
LockstepReverseIterator(ArrayRef<BasicBlock *> Blocks) : Blocks(Blocks) {
reset();
}
void reset() {
Fail = false;
ActiveBlocks.clear();
for (BasicBlock *BB : Blocks)
ActiveBlocks.insert(BB);
Insts.clear();
for (BasicBlock *BB : Blocks) {
if (BB->size() <= 1) {
ActiveBlocks.remove(BB);
continue;
}
Insts.push_back(BB->getTerminator()->getPrevNode());
}
if (Insts.empty())
Fail = true;
}
bool isValid() const { return !Fail; }
ArrayRef<Instruction *> operator*() const { return Insts; }
SmallSetVector<BasicBlock *, 4> &getActiveBlocks() { return ActiveBlocks; }
void restrictToBlocks(SmallSetVector<BasicBlock *, 4> &Blocks) {
for (auto II = Insts.begin(); II != Insts.end();) {
if (!llvm::is_contained(Blocks, (*II)->getParent())) {
ActiveBlocks.remove((*II)->getParent());
II = Insts.erase(II);
} else {
++II;
}
}
}
void operator--() {
if (Fail)
return;
SmallVector<Instruction *, 4> NewInsts;
for (auto *Inst : Insts) {
if (Inst == &Inst->getParent()->front())
ActiveBlocks.remove(Inst->getParent());
else
NewInsts.push_back(Inst->getPrevNode());
}
if (NewInsts.empty()) {
Fail = true;
return;
}
Insts = NewInsts;
}
};
struct SinkingInstructionCandidate {
unsigned NumBlocks;
unsigned NumInstructions;
unsigned NumPHIs;
unsigned NumMemoryInsts;
int Cost = -1;
SmallVector<BasicBlock *, 4> Blocks;
void calculateCost(unsigned NumOrigPHIs, unsigned NumOrigBlocks) {
unsigned NumExtraPHIs = NumPHIs - NumOrigPHIs;
unsigned SplitEdgeCost = (NumOrigBlocks > NumBlocks) ? 2 : 0;
Cost = (NumInstructions * (NumBlocks - 1)) -
(NumExtraPHIs *
NumExtraPHIs) - SplitEdgeCost;
}
bool operator>(const SinkingInstructionCandidate &Other) const {
return Cost > Other.Cost;
}
};
#ifndef NDEBUG
raw_ostream &operator<<(raw_ostream &OS, const SinkingInstructionCandidate &C) {
OS << "<Candidate Cost=" << C.Cost << " #Blocks=" << C.NumBlocks
<< " #Insts=" << C.NumInstructions << " #PHIs=" << C.NumPHIs << ">";
return OS;
}
#endif
class ModelledPHI {
SmallVector<Value *, 4> Values;
SmallVector<BasicBlock *, 4> Blocks;
public:
ModelledPHI() = default;
ModelledPHI(const PHINode *PN) {
SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
llvm::sort(Ops);
for (auto &P : Ops) {
Blocks.push_back(P.first);
Values.push_back(P.second);
}
}
static ModelledPHI createDummy(size_t ID) {
ModelledPHI M;
M.Values.push_back(reinterpret_cast<Value*>(ID));
return M;
}
template <typename VArray, typename BArray>
ModelledPHI(const VArray &V, const BArray &B) {
llvm::copy(V, std::back_inserter(Values));
llvm::copy(B, std::back_inserter(Blocks));
}
template <typename BArray>
ModelledPHI(ArrayRef<Instruction *> Insts, unsigned OpNum, const BArray &B) {
llvm::copy(B, std::back_inserter(Blocks));
for (auto *I : Insts)
Values.push_back(I->getOperand(OpNum));
}
void restrictToBlocks(const SmallSetVector<BasicBlock *, 4> &NewBlocks) {
auto BI = Blocks.begin();
auto VI = Values.begin();
while (BI != Blocks.end()) {
assert(VI != Values.end());
if (!llvm::is_contained(NewBlocks, *BI)) {
BI = Blocks.erase(BI);
VI = Values.erase(VI);
} else {
++BI;
++VI;
}
}
assert(Blocks.size() == NewBlocks.size());
}
ArrayRef<Value *> getValues() const { return Values; }
bool areAllIncomingValuesSame() const {
return llvm::all_of(Values, [&](Value *V) { return V == Values[0]; });
}
bool areAllIncomingValuesSameType() const {
return llvm::all_of(
Values, [&](Value *V) { return V->getType() == Values[0]->getType(); });
}
bool areAnyIncomingValuesConstant() const {
return llvm::any_of(Values, [&](Value *V) { return isa<Constant>(V); });
}
unsigned hash() const {
return (unsigned)hash_combine_range(Values.begin(), Values.end());
}
bool operator==(const ModelledPHI &Other) const {
return Values == Other.Values && Blocks == Other.Blocks;
}
};
template <typename ModelledPHI> struct DenseMapInfo {
static inline ModelledPHI &getEmptyKey() {
static ModelledPHI Dummy = ModelledPHI::createDummy(0);
return Dummy;
}
static inline ModelledPHI &getTombstoneKey() {
static ModelledPHI Dummy = ModelledPHI::createDummy(1);
return Dummy;
}
static unsigned getHashValue(const ModelledPHI &V) { return V.hash(); }
static bool isEqual(const ModelledPHI &LHS, const ModelledPHI &RHS) {
return LHS == RHS;
}
};
using ModelledPHISet = DenseSet<ModelledPHI, DenseMapInfo<ModelledPHI>>;
class InstructionUseExpr : public GVNExpression::BasicExpression {
unsigned MemoryUseOrder = -1;
bool Volatile = false;
ArrayRef<int> ShuffleMask;
public:
InstructionUseExpr(Instruction *I, ArrayRecycler<Value *> &R,
BumpPtrAllocator &A)
: GVNExpression::BasicExpression(I->getNumUses()) {
allocateOperands(R, A);
setOpcode(I->getOpcode());
setType(I->getType());
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
ShuffleMask = SVI->getShuffleMask().copy(A);
for (auto &U : I->uses())
op_push_back(U.getUser());
llvm::sort(op_begin(), op_end());
}
void setMemoryUseOrder(unsigned MUO) { MemoryUseOrder = MUO; }
void setVolatile(bool V) { Volatile = V; }
hash_code getHashValue() const override {
return hash_combine(GVNExpression::BasicExpression::getHashValue(),
MemoryUseOrder, Volatile, ShuffleMask);
}
template <typename Function> hash_code getHashValue(Function MapFn) {
hash_code H = hash_combine(getOpcode(), getType(), MemoryUseOrder, Volatile,
ShuffleMask);
for (auto *V : operands())
H = hash_combine(H, MapFn(V));
return H;
}
};
using BasicBlocksSet = SmallPtrSet<const BasicBlock *, 32>;
class ValueTable {
DenseMap<Value *, uint32_t> ValueNumbering;
DenseMap<GVNExpression::Expression *, uint32_t> ExpressionNumbering;
DenseMap<size_t, uint32_t> HashNumbering;
BumpPtrAllocator Allocator;
ArrayRecycler<Value *> Recycler;
uint32_t nextValueNumber = 1;
BasicBlocksSet ReachableBBs;
InstructionUseExpr *createExpr(Instruction *I) {
InstructionUseExpr *E =
new (Allocator) InstructionUseExpr(I, Recycler, Allocator);
if (isMemoryInst(I))
E->setMemoryUseOrder(getMemoryUseOrder(I));
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
CmpInst::Predicate Predicate = C->getPredicate();
E->setOpcode((C->getOpcode() << 8) | Predicate);
}
return E;
}
template <class Inst> InstructionUseExpr *createMemoryExpr(Inst *I) {
if (isStrongerThanUnordered(I->getOrdering()) || I->isAtomic())
return nullptr;
InstructionUseExpr *E = createExpr(I);
E->setVolatile(I->isVolatile());
return E;
}
public:
ValueTable() = default;
void setReachableBBs(const BasicBlocksSet &ReachableBBs) {
this->ReachableBBs = ReachableBBs;
}
uint32_t lookupOrAdd(Value *V) {
auto VI = ValueNumbering.find(V);
if (VI != ValueNumbering.end())
return VI->second;
if (!isa<Instruction>(V)) {
ValueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
Instruction *I = cast<Instruction>(V);
if (!ReachableBBs.contains(I->getParent()))
return ~0U;
InstructionUseExpr *exp = nullptr;
switch (I->getOpcode()) {
case Instruction::Load:
exp = createMemoryExpr(cast<LoadInst>(I));
break;
case Instruction::Store:
exp = createMemoryExpr(cast<StoreInst>(I));
break;
case Instruction::Call:
case Instruction::Invoke:
case Instruction::FNeg:
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
case Instruction::Select:
case Instruction::ExtractElement:
case Instruction::InsertElement:
case Instruction::ShuffleVector:
case Instruction::InsertValue:
case Instruction::GetElementPtr:
exp = createExpr(I);
break;
default:
break;
}
if (!exp) {
ValueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
uint32_t e = ExpressionNumbering[exp];
if (!e) {
hash_code H = exp->getHashValue([=](Value *V) { return lookupOrAdd(V); });
auto I = HashNumbering.find(H);
if (I != HashNumbering.end()) {
e = I->second;
} else {
e = nextValueNumber++;
HashNumbering[H] = e;
ExpressionNumbering[exp] = e;
}
}
ValueNumbering[V] = e;
return e;
}
uint32_t lookup(Value *V) const {
auto VI = ValueNumbering.find(V);
assert(VI != ValueNumbering.end() && "Value not numbered?");
return VI->second;
}
void clear() {
ValueNumbering.clear();
ExpressionNumbering.clear();
HashNumbering.clear();
Recycler.clear(Allocator);
nextValueNumber = 1;
}
uint32_t getMemoryUseOrder(Instruction *Inst) {
auto *BB = Inst->getParent();
for (auto I = std::next(Inst->getIterator()), E = BB->end();
I != E && !I->isTerminator(); ++I) {
if (!isMemoryInst(&*I))
continue;
if (isa<LoadInst>(&*I))
continue;
CallInst *CI = dyn_cast<CallInst>(&*I);
if (CI && CI->onlyReadsMemory())
continue;
InvokeInst *II = dyn_cast<InvokeInst>(&*I);
if (II && II->onlyReadsMemory())
continue;
return lookupOrAdd(&*I);
}
return 0;
}
};
class GVNSink {
public:
GVNSink() = default;
bool run(Function &F) {
LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName()
<< "\n");
unsigned NumSunk = 0;
ReversePostOrderTraversal<Function*> RPOT(&F);
VN.setReachableBBs(BasicBlocksSet(RPOT.begin(), RPOT.end()));
for (auto *N : RPOT)
NumSunk += sinkBB(N);
return NumSunk > 0;
}
private:
ValueTable VN;
bool shouldAvoidSinkingInstruction(Instruction *I) {
if (isa<PHINode>(I) || I->isEHPad() || isa<AllocaInst>(I) ||
I->getType()->isTokenTy())
return true;
return false;
}
Optional<SinkingInstructionCandidate> analyzeInstructionForSinking(
LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents);
void analyzeInitialPHIs(BasicBlock *BB, ModelledPHISet &PHIs,
SmallPtrSetImpl<Value *> &PHIContents) {
for (PHINode &PN : BB->phis()) {
auto MPHI = ModelledPHI(&PN);
PHIs.insert(MPHI);
for (auto *V : MPHI.getValues())
PHIContents.insert(V);
}
}
unsigned sinkBB(BasicBlock *BBEnd);
void sinkLastInstruction(ArrayRef<BasicBlock *> Blocks, BasicBlock *BBEnd);
void foldPointlessPHINodes(BasicBlock *BB) {
auto I = BB->begin();
while (PHINode *PN = dyn_cast<PHINode>(I++)) {
if (!llvm::all_of(PN->incoming_values(), [&](const Value *V) {
return V == PN->getIncomingValue(0);
}))
continue;
if (PN->getIncomingValue(0) != PN)
PN->replaceAllUsesWith(PN->getIncomingValue(0));
else
PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
PN->eraseFromParent();
}
}
};
Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking(
LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents) {
auto Insts = *LRI;
LLVM_DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I
: Insts) {
I->dump();
} dbgs() << " ]\n";);
DenseMap<uint32_t, unsigned> VNums;
for (auto *I : Insts) {
uint32_t N = VN.lookupOrAdd(I);
LLVM_DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n");
if (N == ~0U)
return None;
VNums[N]++;
}
unsigned VNumToSink =
std::max_element(VNums.begin(), VNums.end(), llvm::less_second())->first;
if (VNums[VNumToSink] == 1)
return None;
auto &ActivePreds = LRI.getActiveBlocks();
unsigned InitialActivePredSize = ActivePreds.size();
SmallVector<Instruction *, 4> NewInsts;
for (auto *I : Insts) {
if (VN.lookup(I) != VNumToSink)
ActivePreds.remove(I->getParent());
else
NewInsts.push_back(I);
}
for (auto *I : NewInsts)
if (shouldAvoidSinkingInstruction(I))
return None;
bool RecomputePHIContents = false;
if (ActivePreds.size() != InitialActivePredSize) {
ModelledPHISet NewNeededPHIs;
for (auto P : NeededPHIs) {
P.restrictToBlocks(ActivePreds);
NewNeededPHIs.insert(P);
}
NeededPHIs = NewNeededPHIs;
LRI.restrictToBlocks(ActivePreds);
RecomputePHIContents = true;
}
ModelledPHI NewPHI(NewInsts, ActivePreds);
if (NeededPHIs.erase(NewPHI))
RecomputePHIContents = true;
if (RecomputePHIContents) {
PHIContents.clear();
for (auto &PHI : NeededPHIs)
PHIContents.insert(PHI.getValues().begin(), PHI.getValues().end());
}
for (auto *V : NewPHI.getValues())
if (PHIContents.count(V))
return None;
Instruction *I0 = NewInsts[0];
auto hasDifferentNumOperands = [&I0](Instruction *I) {
return I->getNumOperands() != I0->getNumOperands();
};
if (any_of(NewInsts, hasDifferentNumOperands))
return None;
for (unsigned OpNum = 0, E = I0->getNumOperands(); OpNum != E; ++OpNum) {
ModelledPHI PHI(NewInsts, OpNum, ActivePreds);
if (PHI.areAllIncomingValuesSame())
continue;
if (!canReplaceOperandWithVariable(I0, OpNum))
return None;
if (NeededPHIs.count(PHI))
continue;
if (!PHI.areAllIncomingValuesSameType())
return None;
if ((isa<CallInst>(I0) || isa<InvokeInst>(I0)) && OpNum == E - 1 &&
PHI.areAnyIncomingValuesConstant())
return None;
NeededPHIs.reserve(NeededPHIs.size());
NeededPHIs.insert(PHI);
PHIContents.insert(PHI.getValues().begin(), PHI.getValues().end());
}
if (isMemoryInst(NewInsts[0]))
++MemoryInstNum;
SinkingInstructionCandidate Cand;
Cand.NumInstructions = ++InstNum;
Cand.NumMemoryInsts = MemoryInstNum;
Cand.NumBlocks = ActivePreds.size();
Cand.NumPHIs = NeededPHIs.size();
append_range(Cand.Blocks, ActivePreds);
return Cand;
}
unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
LLVM_DEBUG(dbgs() << "GVNSink: running on basic block ";
BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
SmallVector<BasicBlock *, 4> Preds;
for (auto *B : predecessors(BBEnd)) {
auto *T = B->getTerminator();
if (isa<BranchInst>(T) || isa<SwitchInst>(T))
Preds.push_back(B);
else
return 0;
}
if (Preds.size() < 2)
return 0;
llvm::sort(Preds);
unsigned NumOrigPreds = Preds.size();
llvm::erase_if(Preds, [](BasicBlock *BB) {
return BB->getTerminator()->getNumSuccessors() != 1;
});
LockstepReverseIterator LRI(Preds);
SmallVector<SinkingInstructionCandidate, 4> Candidates;
unsigned InstNum = 0, MemoryInstNum = 0;
ModelledPHISet NeededPHIs;
SmallPtrSet<Value *, 4> PHIContents;
analyzeInitialPHIs(BBEnd, NeededPHIs, PHIContents);
unsigned NumOrigPHIs = NeededPHIs.size();
while (LRI.isValid()) {
auto Cand = analyzeInstructionForSinking(LRI, InstNum, MemoryInstNum,
NeededPHIs, PHIContents);
if (!Cand)
break;
Cand->calculateCost(NumOrigPHIs, Preds.size());
Candidates.emplace_back(*Cand);
--LRI;
}
llvm::stable_sort(Candidates, std::greater<SinkingInstructionCandidate>());
LLVM_DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C
: Candidates) dbgs()
<< " " << C << "\n";);
if (Candidates.empty() || Candidates.front().Cost <= 0)
return 0;
auto C = Candidates.front();
LLVM_DEBUG(dbgs() << " -- Sinking: " << C << "\n");
BasicBlock *InsertBB = BBEnd;
if (C.Blocks.size() < NumOrigPreds) {
LLVM_DEBUG(dbgs() << " -- Splitting edge to ";
BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
InsertBB = SplitBlockPredecessors(BBEnd, C.Blocks, ".gvnsink.split");
if (!InsertBB) {
LLVM_DEBUG(dbgs() << " -- FAILED to split edge!\n");
return 0;
}
}
for (unsigned I = 0; I < C.NumInstructions; ++I)
sinkLastInstruction(C.Blocks, InsertBB);
return C.NumInstructions;
}
void GVNSink::sinkLastInstruction(ArrayRef<BasicBlock *> Blocks,
BasicBlock *BBEnd) {
SmallVector<Instruction *, 4> Insts;
for (BasicBlock *BB : Blocks)
Insts.push_back(BB->getTerminator()->getPrevNode());
Instruction *I0 = Insts.front();
SmallVector<Value *, 4> NewOperands;
for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) {
bool NeedPHI = llvm::any_of(Insts, [&I0, O](const Instruction *I) {
return I->getOperand(O) != I0->getOperand(O);
});
if (!NeedPHI) {
NewOperands.push_back(I0->getOperand(O));
continue;
}
auto *Op = I0->getOperand(O);
assert(!Op->getType()->isTokenTy() && "Can't PHI tokens!");
auto *PN = PHINode::Create(Op->getType(), Insts.size(),
Op->getName() + ".sink", &BBEnd->front());
for (auto *I : Insts)
PN->addIncoming(I->getOperand(O), I->getParent());
NewOperands.push_back(PN);
}
for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O)
I0->getOperandUse(O).set(NewOperands[O]);
I0->moveBefore(&*BBEnd->getFirstInsertionPt());
for (auto *I : Insts)
if (I != I0) {
combineMetadataForCSE(I0, I, true);
I0->andIRFlags(I);
}
for (auto *I : Insts)
if (I != I0)
I->replaceAllUsesWith(I0);
foldPointlessPHINodes(BBEnd);
for (auto *I : Insts)
if (I != I0)
I->eraseFromParent();
NumRemoved += Insts.size() - 1;
}
class GVNSinkLegacyPass : public FunctionPass {
public:
static char ID;
GVNSinkLegacyPass() : FunctionPass(ID) {
initializeGVNSinkLegacyPassPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override {
if (skipFunction(F))
return false;
GVNSink G;
return G.run(F);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<GlobalsAAWrapperPass>();
}
};
}
PreservedAnalyses GVNSinkPass::run(Function &F, FunctionAnalysisManager &AM) {
GVNSink G;
if (!G.run(F))
return PreservedAnalyses::all();
return PreservedAnalyses::none();
}
char GVNSinkLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(GVNSinkLegacyPass, "gvn-sink",
"Early GVN sinking of Expressions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
INITIALIZE_PASS_END(GVNSinkLegacyPass, "gvn-sink",
"Early GVN sinking of Expressions", false, false)
FunctionPass *llvm::createGVNSinkPass() { return new GVNSinkLegacyPass(); }