#include "CGOpenMPRuntime.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceManager.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <numeric>
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
namespace {
class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
public:
enum CGOpenMPRegionKind {
ParallelOutlinedRegion,
TaskOutlinedRegion,
InlinedRegion,
TargetRegion,
};
CGOpenMPRegionInfo(const CapturedStmt &CS,
const CGOpenMPRegionKind RegionKind,
const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
bool HasCancel)
: CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
bool HasCancel)
: CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
Kind(Kind), HasCancel(HasCancel) {}
virtual const VarDecl *getThreadIDVariable() const = 0;
void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
virtual void emitUntiedSwitch(CodeGenFunction & ) {}
CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
bool hasCancel() const { return HasCancel; }
static bool classof(const CGCapturedStmtInfo *Info) {
return Info->getKind() == CR_OpenMP;
}
~CGOpenMPRegionInfo() override = default;
protected:
CGOpenMPRegionKind RegionKind;
RegionCodeGenTy CodeGen;
OpenMPDirectiveKind Kind;
bool HasCancel;
};
class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
public:
CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind, bool HasCancel,
StringRef HelperName)
: CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
HasCancel),
ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
StringRef getHelperName() const override { return HelperName; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
ParallelOutlinedRegion;
}
private:
const VarDecl *ThreadIDVar;
StringRef HelperName;
};
class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
public:
class UntiedTaskActionTy final : public PrePostActionTy {
bool Untied;
const VarDecl *PartIDVar;
const RegionCodeGenTy UntiedCodeGen;
llvm::SwitchInst *UntiedSwitch = nullptr;
public:
UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
const RegionCodeGenTy &UntiedCodeGen)
: Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
void Enter(CodeGenFunction &CGF) override {
if (Untied) {
LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(PartIDVar),
PartIDVar->getType()->castAs<PointerType>());
llvm::Value *Res =
CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
CGF.EmitBlock(DoneBB);
CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
UntiedSwitch->addCase(CGF.Builder.getInt32(0),
CGF.Builder.GetInsertBlock());
emitUntiedSwitch(CGF);
}
}
void emitUntiedSwitch(CodeGenFunction &CGF) const {
if (Untied) {
LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(PartIDVar),
PartIDVar->getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
PartIdLVal);
UntiedCodeGen(CGF);
CodeGenFunction::JumpDest CurPoint =
CGF.getJumpDestInCurrentScope(".untied.next.");
CGF.EmitBranch(CGF.ReturnBlock.getBlock());
CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
CGF.Builder.GetInsertBlock());
CGF.EmitBranchThroughCleanup(CurPoint);
CGF.EmitBlock(CurPoint.getBlock());
}
}
unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
};
CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
const VarDecl *ThreadIDVar,
const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind, bool HasCancel,
const UntiedTaskActionTy &Action)
: CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
ThreadIDVar(ThreadIDVar), Action(Action) {
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
StringRef getHelperName() const override { return ".omp_outlined."; }
void emitUntiedSwitch(CodeGenFunction &CGF) override {
Action.emitUntiedSwitch(CGF);
}
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
TaskOutlinedRegion;
}
private:
const VarDecl *ThreadIDVar;
const UntiedTaskActionTy &Action;
};
class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind, bool HasCancel)
: CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
OldCSI(OldCSI),
OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
llvm::Value *getContextValue() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getContextValue();
llvm_unreachable("No context value for inlined OpenMP region");
}
void setContextValue(llvm::Value *V) override {
if (OuterRegionInfo) {
OuterRegionInfo->setContextValue(V);
return;
}
llvm_unreachable("No context value for inlined OpenMP region");
}
const FieldDecl *lookup(const VarDecl *VD) const override {
if (OuterRegionInfo)
return OuterRegionInfo->lookup(VD);
return nullptr;
}
FieldDecl *getThisFieldDecl() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getThisFieldDecl();
return nullptr;
}
const VarDecl *getThreadIDVariable() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getThreadIDVariable();
return nullptr;
}
LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
if (OuterRegionInfo)
return OuterRegionInfo->getThreadIDVariableLValue(CGF);
llvm_unreachable("No LValue for inlined OpenMP construct");
}
StringRef getHelperName() const override {
if (auto *OuterRegionInfo = getOldCSI())
return OuterRegionInfo->getHelperName();
llvm_unreachable("No helper name for inlined OpenMP construct");
}
void emitUntiedSwitch(CodeGenFunction &CGF) override {
if (OuterRegionInfo)
OuterRegionInfo->emitUntiedSwitch(CGF);
}
CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
}
~CGOpenMPInlinedRegionInfo() override = default;
private:
CodeGenFunction::CGCapturedStmtInfo *OldCSI;
CGOpenMPRegionInfo *OuterRegionInfo;
};
class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
public:
CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
const RegionCodeGenTy &CodeGen, StringRef HelperName)
: CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
false),
HelperName(HelperName) {}
const VarDecl *getThreadIDVariable() const override { return nullptr; }
StringRef getHelperName() const override { return HelperName; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
}
private:
StringRef HelperName;
};
static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
llvm_unreachable("No codegen for expressions");
}
class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
public:
CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
: CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
OMPD_unknown,
false),
PrivScope(CGF) {
for (const auto &C : CS.captures()) {
if (!C.capturesVariable() && !C.capturesVariableByCopy())
continue;
const VarDecl *VD = C.getCapturedVar();
if (VD->isLocalVarDeclOrParm())
continue;
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
(void)PrivScope.Privatize();
}
const FieldDecl *lookup(const VarDecl *VD) const override {
if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
return FD;
return nullptr;
}
void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
llvm_unreachable("No body for expressions");
}
const VarDecl *getThreadIDVariable() const override {
llvm_unreachable("No thread id for expressions");
}
StringRef getHelperName() const override {
llvm_unreachable("No helper name for expressions");
}
static bool classof(const CGCapturedStmtInfo *Info) { return false; }
private:
CodeGenFunction::OMPPrivateScope PrivScope;
};
class InlinedOpenMPRegionRAII {
CodeGenFunction &CGF;
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField = nullptr;
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
bool NoInheritance = false;
public:
InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind, bool HasCancel,
bool NoInheritance = true)
: CGF(CGF), NoInheritance(NoInheritance) {
CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
if (NoInheritance) {
std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
LambdaThisCaptureField = CGF.LambdaThisCaptureField;
CGF.LambdaThisCaptureField = nullptr;
BlockInfo = CGF.BlockInfo;
CGF.BlockInfo = nullptr;
}
}
~InlinedOpenMPRegionRAII() {
auto *OldCSI =
cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
delete CGF.CapturedStmtInfo;
CGF.CapturedStmtInfo = OldCSI;
if (NoInheritance) {
std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
CGF.LambdaThisCaptureField = LambdaThisCaptureField;
CGF.BlockInfo = BlockInfo;
}
}
};
enum OpenMPLocationFlags : unsigned {
OMP_IDENT_IMD = 0x01,
OMP_IDENT_KMPC = 0x02,
OMP_ATOMIC_REDUCE = 0x10,
OMP_IDENT_BARRIER_EXPL = 0x20,
OMP_IDENT_BARRIER_IMPL = 0x40,
OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
OMP_IDENT_WORK_LOOP = 0x200,
OMP_IDENT_WORK_SECTIONS = 0x400,
OMP_IDENT_WORK_DISTRIBUTE = 0x800,
LLVM_MARK_AS_BITMASK_ENUM(OMP_IDENT_WORK_DISTRIBUTE)
};
namespace {
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
enum OpenMPOffloadingRequiresDirFlags : int64_t {
OMP_REQ_UNDEFINED = 0x000,
OMP_REQ_NONE = 0x001,
OMP_REQ_REVERSE_OFFLOAD = 0x002,
OMP_REQ_UNIFIED_ADDRESS = 0x004,
OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
LLVM_MARK_AS_BITMASK_ENUM(OMP_REQ_DYNAMIC_ALLOCATORS)
};
enum OpenMPOffloadingReservedDeviceIDs {
OMP_DEVICEID_UNDEF = -1,
};
}
enum IdentFieldIndex {
IdentField_Reserved_1,
IdentField_Flags,
IdentField_Reserved_2,
IdentField_Reserved_3,
IdentField_PSource
};
enum OpenMPSchedType {
OMP_sch_lower = 32,
OMP_sch_static_chunked = 33,
OMP_sch_static = 34,
OMP_sch_dynamic_chunked = 35,
OMP_sch_guided_chunked = 36,
OMP_sch_runtime = 37,
OMP_sch_auto = 38,
OMP_sch_static_balanced_chunked = 45,
OMP_ord_lower = 64,
OMP_ord_static_chunked = 65,
OMP_ord_static = 66,
OMP_ord_dynamic_chunked = 67,
OMP_ord_guided_chunked = 68,
OMP_ord_runtime = 69,
OMP_ord_auto = 70,
OMP_sch_default = OMP_sch_static,
OMP_dist_sch_static_chunked = 91,
OMP_dist_sch_static = 92,
OMP_sch_modifier_monotonic = (1 << 29),
OMP_sch_modifier_nonmonotonic = (1 << 30),
};
class CleanupTy final : public EHScopeStack::Cleanup {
PrePostActionTy *Action;
public:
explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
void Emit(CodeGenFunction &CGF, Flags ) override {
if (!CGF.HaveInsertPoint())
return;
Action->Exit(CGF);
}
};
}
void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
CodeGenFunction::RunCleanupsScope Scope(CGF);
if (PrePostAction) {
CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
Callback(CodeGen, CGF, *PrePostAction);
} else {
PrePostActionTy Action;
Callback(CodeGen, CGF, Action);
}
}
static const OMPDeclareReductionDecl *
getReductionInit(const Expr *ReductionOp) {
if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
if (const auto *DRE =
dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
return DRD;
return nullptr;
}
static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
const OMPDeclareReductionDecl *DRD,
const Expr *InitOp,
Address Private, Address Original,
QualType Ty) {
if (DRD->getInitializer()) {
std::pair<llvm::Function *, llvm::Function *> Reduction =
CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
const auto *CE = cast<CallExpr>(InitOp);
const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
const Expr *LHS = CE->getArg(0)->IgnoreParenImpCasts();
const Expr *RHS = CE->getArg(1)->IgnoreParenImpCasts();
const auto *LHSDRE =
cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
const auto *RHSDRE =
cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()), Private);
PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()), Original);
(void)PrivateScope.Privatize();
RValue Func = RValue::get(Reduction.second);
CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
CGF.EmitIgnoredExpr(InitOp);
} else {
llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), true,
llvm::GlobalValue::PrivateLinkage, Init, Name);
LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
RValue InitRVal;
switch (CGF.getEvaluationKind(Ty)) {
case TEK_Scalar:
InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
break;
case TEK_Complex:
InitRVal =
RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
break;
case TEK_Aggregate: {
OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
false);
return;
}
}
OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
false);
}
}
static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
QualType Type, bool EmitDeclareReductionInit,
const Expr *Init,
const OMPDeclareReductionDecl *DRD,
Address SrcAddr = Address::invalid()) {
QualType ElementTy;
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
if (DRD)
SrcAddr =
CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
llvm::Value *SrcBegin = nullptr;
if (DRD)
SrcBegin = SrcAddr.getPointer();
llvm::Value *DestBegin = DestAddr.getPointer();
llvm::Value *DestEnd =
CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
llvm::Value *IsEmpty =
CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(BodyBB);
CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
llvm::PHINode *SrcElementPHI = nullptr;
Address SrcElementCurrent = Address::invalid();
if (DRD) {
SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
"omp.arraycpy.srcElementPast");
SrcElementPHI->addIncoming(SrcBegin, EntryBB);
SrcElementCurrent =
Address(SrcElementPHI, SrcAddr.getElementType(),
SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
}
llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
DestElementPHI->addIncoming(DestBegin, EntryBB);
Address DestElementCurrent =
Address(DestElementPHI, DestAddr.getElementType(),
DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
{
CodeGenFunction::RunCleanupsScope InitScope(CGF);
if (EmitDeclareReductionInit) {
emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
SrcElementCurrent, ElementTy);
} else
CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
false);
}
if (DRD) {
llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
SrcAddr.getElementType(), SrcElementPHI, 1,
"omp.arraycpy.dest.element");
SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
}
llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
DestAddr.getElementType(), DestElementPHI, 1,
"omp.arraycpy.dest.element");
llvm::Value *Done =
CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
CGF.EmitBlock(DoneBB, true);
}
LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
return CGF.EmitOMPSharedLValue(E);
}
LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
const Expr *E) {
if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
return CGF.EmitOMPArraySectionExpr(OASE, false);
return LValue();
}
void ReductionCodeGen::emitAggregateInitialization(
CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD) {
const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
bool EmitDeclareReductionInit =
DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
EmitDeclareReductionInit,
EmitDeclareReductionInit ? ClausesData[N].ReductionOp
: PrivateVD->getInit(),
DRD, SharedAddr);
}
ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps) {
ClausesData.reserve(Shareds.size());
SharedAddresses.reserve(Shareds.size());
Sizes.reserve(Shareds.size());
BaseDecls.reserve(Shareds.size());
const auto *IOrig = Origs.begin();
const auto *IPriv = Privates.begin();
const auto *IRed = ReductionOps.begin();
for (const Expr *Ref : Shareds) {
ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
std::advance(IOrig, 1);
std::advance(IPriv, 1);
std::advance(IRed, 1);
}
}
void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
"Number of generated lvalues must be exactly N.");
LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
SharedAddresses.emplace_back(First, Second);
if (ClausesData[N].Shared == ClausesData[N].Ref) {
OrigAddresses.emplace_back(First, Second);
} else {
LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
OrigAddresses.emplace_back(First, Second);
}
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
QualType PrivateType = getPrivateType(N);
bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
if (!PrivateType->isVariablyModifiedType()) {
Sizes.emplace_back(
CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
nullptr);
return;
}
llvm::Value *Size;
llvm::Value *SizeInChars;
auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
Size = CGF.Builder.CreatePtrDiff(ElemType,
OrigAddresses[N].second.getPointer(CGF),
OrigAddresses[N].first.getPointer(CGF));
Size = CGF.Builder.CreateNUWAdd(
Size, llvm::ConstantInt::get(Size->getType(), 1));
SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
} else {
SizeInChars =
CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
}
Sizes.emplace_back(SizeInChars, Size);
CodeGenFunction::OpaqueValueMapping OpaqueMap(
CGF,
cast<OpaqueValueExpr>(
CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
RValue::get(Size));
CGF.EmitVariablyModifiedType(PrivateType);
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
llvm::Value *Size) {
QualType PrivateType = getPrivateType(N);
if (!PrivateType->isVariablyModifiedType()) {
assert(!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.");
return;
}
CodeGenFunction::OpaqueValueMapping OpaqueMap(
CGF,
cast<OpaqueValueExpr>(
CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
RValue::get(Size));
CGF.EmitVariablyModifiedType(PrivateType);
}
void ReductionCodeGen::emitInitialization(
CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
assert(SharedAddresses.size() > N && "No variable was generated");
const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
const OMPDeclareReductionDecl *DRD =
getReductionInit(ClausesData[N].ReductionOp);
if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
if (DRD && DRD->getInitializer())
(void)DefaultInit(CGF);
emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
} else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
(void)DefaultInit(CGF);
QualType SharedType = SharedAddresses[N].first.getType();
emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
PrivateAddr, SharedAddr, SharedType);
} else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
!CGF.isTrivialInitializer(PrivateVD->getInit())) {
CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
PrivateVD->getType().getQualifiers(),
false);
}
}
bool ReductionCodeGen::needCleanups(unsigned N) {
QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
return DTorKind != QualType::DK_none;
}
void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr) {
QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
if (needCleanups(N)) {
PrivateAddr = CGF.Builder.CreateElementBitCast(
PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
}
}
static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
LValue BaseLV) {
BaseTy = BaseTy.getNonReferenceType();
while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
!CGF.getContext().hasSameType(BaseTy, ElTy)) {
if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
} else {
LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
}
BaseTy = BaseTy->getPointeeType();
}
return CGF.MakeAddrLValue(
CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
CGF.ConvertTypeForMem(ElTy)),
BaseLV.getType(), BaseLV.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
}
static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
Address OriginalBaseAddress, llvm::Value *Addr) {
Address Tmp = Address::invalid();
Address TopTmp = Address::invalid();
Address MostTopTmp = Address::invalid();
BaseTy = BaseTy.getNonReferenceType();
while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
!CGF.getContext().hasSameType(BaseTy, ElTy)) {
Tmp = CGF.CreateMemTemp(BaseTy);
if (TopTmp.isValid())
CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
else
MostTopTmp = Tmp;
TopTmp = Tmp;
BaseTy = BaseTy->getPointeeType();
}
if (Tmp.isValid()) {
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr, Tmp.getElementType());
CGF.Builder.CreateStore(Addr, Tmp);
return MostTopTmp;
}
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr, OriginalBaseAddress.getType());
return OriginalBaseAddress.withPointer(Addr);
}
static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
const VarDecl *OrigVD = nullptr;
if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
DE = cast<DeclRefExpr>(Base);
OrigVD = cast<VarDecl>(DE->getDecl());
} else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
DE = cast<DeclRefExpr>(Base);
OrigVD = cast<VarDecl>(DE->getDecl());
}
return OrigVD;
}
Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr) {
const DeclRefExpr *DE;
if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
BaseDecls.emplace_back(OrigVD);
LValue OriginalBaseLValue = CGF.EmitLValue(DE);
LValue BaseLValue =
loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
OriginalBaseLValue);
Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
SharedAddr.getPointer());
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivateAddr.getPointer(), SharedAddr.getType());
llvm::Value *Ptr = CGF.Builder.CreateGEP(
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
OriginalBaseLValue.getAddress(CGF), Ptr);
}
BaseDecls.emplace_back(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
return PrivateAddr;
}
bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
const OMPDeclareReductionDecl *DRD =
getReductionInit(ClausesData[N].ReductionOp);
return DRD && DRD->getInitializer();
}
LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
return CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(getThreadIDVariable()),
getThreadIDVariable()->getType()->castAs<PointerType>());
}
void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
if (!CGF.HaveInsertPoint())
return;
CGF.EHStack.pushTerminate();
if (S)
CGF.incrementProfileCounter(S);
CodeGen(CGF);
CGF.EHStack.popTerminate();
}
LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
CodeGenFunction &CGF) {
return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
getThreadIDVariable()->getType(),
AlignmentSource::Decl);
}
static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
QualType FieldTy) {
auto *Field = FieldDecl::Create(
C, DC, SourceLocation(), SourceLocation(), nullptr, FieldTy,
C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
nullptr, false, ICIS_NoInit);
Field->setAccess(AS_public);
DC->addDecl(Field);
return Field;
}
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator)
: CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, 8);
OMPBuilder.initialize();
loadOffloadInfoMetadata();
}
void CGOpenMPRuntime::clear() {
InternalVars.clear();
for (const auto &Data : EmittedNonTargetVariables) {
if (!Data.getValue().pointsToAliveValue())
continue;
auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
if (!GV)
continue;
if (!GV->isDeclaration() || GV->getNumUses() > 0)
continue;
GV->eraseFromParent();
}
}
std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
SmallString<128> Buffer;
llvm::raw_svector_ostream OS(Buffer);
StringRef Sep = FirstSeparator;
for (StringRef Part : Parts) {
OS << Sep << Part;
Sep = Separator;
}
return std::string(OS.str());
}
static llvm::Function *
emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
const Expr *CombinerInitializer, const VarDecl *In,
const VarDecl *Out, bool IsCombiner) {
ASTContext &C = CGM.getContext();
QualType PtrTy = C.getPointerType(Ty).withRestrict();
FunctionArgList Args;
ImplicitParamDecl OmpOutParm(C, nullptr, Out->getLocation(),
nullptr, PtrTy, ImplicitParamDecl::Other);
ImplicitParamDecl OmpInParm(C, nullptr, In->getLocation(),
nullptr, PtrTy, ImplicitParamDecl::Other);
Args.push_back(&OmpOutParm);
Args.push_back(&OmpInParm);
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
std::string Name = CGM.getOpenMPRuntime().getName(
{IsCombiner ? "omp_combiner" : "omp_initializer", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
if (CGM.getLangOpts().Optimize) {
Fn->removeFnAttr(llvm::Attribute::NoInline);
Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
Fn->addFnAttr(llvm::Attribute::AlwaysInline);
}
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
Out->getLocation());
CodeGenFunction::OMPPrivateScope Scope(CGF);
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
Scope.addPrivate(
In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
.getAddress(CGF));
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
Scope.addPrivate(
Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
.getAddress(CGF));
(void)Scope.Privatize();
if (!IsCombiner && Out->hasInit() &&
!CGF.isTrivialInitializer(Out->getInit())) {
CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
Out->getType().getQualifiers(),
true);
}
if (CombinerInitializer)
CGF.EmitIgnoredExpr(CombinerInitializer);
Scope.ForceCleanup();
CGF.FinishFunction();
return Fn;
}
void CGOpenMPRuntime::emitUserDefinedReduction(
CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
if (UDRMap.count(D) > 0)
return;
llvm::Function *Combiner = emitCombinerOrInitializer(
CGM, D->getType(), D->getCombiner(),
cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
true);
llvm::Function *Initializer = nullptr;
if (const Expr *Init = D->getInitializer()) {
Initializer = emitCombinerOrInitializer(
CGM, D->getType(),
D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
: nullptr,
cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
false);
}
UDRMap.try_emplace(D, Combiner, Initializer);
if (CGF) {
auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
Decls.second.push_back(D);
}
}
std::pair<llvm::Function *, llvm::Function *>
CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
auto I = UDRMap.find(D);
if (I != UDRMap.end())
return I->second;
emitUserDefinedReduction(nullptr, D);
return UDRMap.lookup(D);
}
namespace {
struct PushAndPopStackRAII {
PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
bool HasCancel, llvm::omp::Directive Kind)
: OMPBuilder(OMPBuilder) {
if (!OMPBuilder)
return;
auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
assert(IP.getBlock()->end() == IP.getPoint() &&
"Clang CG should cause non-terminated block!");
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.restoreIP(IP);
CodeGenFunction::JumpDest Dest =
CGF.getOMPCancelDestination(OMPD_parallel);
CGF.EmitBranchThroughCleanup(Dest);
};
llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
OMPBuilder->pushFinalizationCB(std::move(FI));
}
~PushAndPopStackRAII() {
if (OMPBuilder)
OMPBuilder->popFinalizationCB();
}
llvm::OpenMPIRBuilder *OMPBuilder;
};
}
static llvm::Function *emitParallelOrTeamsOutlinedFunction(
CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
assert(ThreadIDVar->getType()->isPointerType() &&
"thread id variable must be of type kmp_int32 *");
CodeGenFunction CGF(CGM, true);
bool HasCancel = false;
if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
HasCancel = OPD->hasCancel();
else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
HasCancel = OPD->hasCancel();
else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
HasCancel = OPSD->hasCancel();
else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
else if (const auto *OPFD =
dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
else if (const auto *OPFD =
dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
HasCancel, OutlinedHelperName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
}
llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
return emitParallelOrTeamsOutlinedFunction(
CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
}
llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
return emitParallelOrTeamsOutlinedFunction(
CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
}
llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) {
auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
PrePostActionTy &) {
llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
llvm::Value *TaskArgs[] = {
UpLoc, ThreadID,
CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
TaskTVar->getType()->castAs<PointerType>())
.getPointer(CGF)};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_task),
TaskArgs);
};
CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
UntiedCodeGen);
CodeGen.setAction(Action);
assert(!ThreadIDVar->getType()->isPointerType() &&
"thread id variable must be of type kmp_int32 for tasks");
const OpenMPDirectiveKind Region =
isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
: OMPD_task;
const CapturedStmt *CS = D.getCapturedStmt(Region);
bool HasCancel = false;
if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
HasCancel = TD->hasCancel();
else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
HasCancel = TD->hasCancel();
else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
HasCancel = TD->hasCancel();
else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
HasCancel = TD->hasCancel();
CodeGenFunction CGF(CGM, true);
CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
InnermostKind, HasCancel, Action);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
if (!Tied)
NumberOfParts = Action.getNumberOfParts();
return Res;
}
void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
if (AtCurrentPoint) {
Elem.second.ServiceInsertPt = new llvm::BitCastInst(
Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
} else {
Elem.second.ServiceInsertPt =
new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
}
}
void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
if (Elem.second.ServiceInsertPt) {
llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
Elem.second.ServiceInsertPt = nullptr;
Ptr->eraseFromParent();
}
}
static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
SourceLocation Loc,
SmallString<128> &Buffer) {
llvm::raw_svector_ostream OS(Buffer);
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
OS << ";" << PLoc.getFilename() << ";";
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
OS << FD->getQualifiedNameAsString();
OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
return OS.str();
}
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned Flags) {
uint32_t SrcLocStrSize;
llvm::Constant *SrcLocStr;
if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
Loc.isInvalid()) {
SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
} else {
std::string FunctionName;
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
FunctionName = FD->getQualifiedNameAsString();
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
const char *FileName = PLoc.getFilename();
unsigned Line = PLoc.getLine();
unsigned Column = PLoc.getColumn();
SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
Column, SrcLocStrSize);
}
unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
return OMPBuilder.getOrCreateIdent(
SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
SourceLocation Loc) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
if (CGM.getLangOpts().OpenMPIRBuilder) {
SmallString<128> Buffer;
OMPBuilder.updateToLocation(CGF.Builder.saveIP());
uint32_t SrcLocStrSize;
auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
return OMPBuilder.getOrCreateThreadID(
OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
}
llvm::Value *ThreadID = nullptr;
auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
if (I != OpenMPLocThreadIDMap.end()) {
ThreadID = I->second.ThreadID;
if (ThreadID != nullptr)
return ThreadID;
}
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (OMPRegionInfo->getThreadIDVariable()) {
LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
!CGF.getLangOpts().CXXExceptions ||
CGF.Builder.GetInsertBlock() == TopBlock ||
!isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
TopBlock ||
cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
CGF.Builder.GetInsertBlock()) {
ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
if (CGF.Builder.GetInsertBlock() == TopBlock) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.ThreadID = ThreadID;
}
return ThreadID;
}
}
}
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
if (!Elem.second.ServiceInsertPt)
setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
llvm::CallInst *Call = CGF.Builder.CreateCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
Call->setCallingConv(CGF.getRuntimeCC());
Elem.second.ThreadID = Call;
return Call;
}
void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
clearLocThreadIdInsertPt(CGF);
OpenMPLocThreadIDMap.erase(CGF.CurFn);
}
if (FunctionUDRMap.count(CGF.CurFn) > 0) {
for(const auto *D : FunctionUDRMap[CGF.CurFn])
UDRMap.erase(D);
FunctionUDRMap.erase(CGF.CurFn);
}
auto I = FunctionUDMMap.find(CGF.CurFn);
if (I != FunctionUDMMap.end()) {
for(const auto *D : I->second)
UDMMap.erase(D);
FunctionUDMMap.erase(I);
}
LastprivateConditionalToTypes.erase(CGF.CurFn);
FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
return OMPBuilder.IdentPtr;
}
llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
if (!Kmpc_MicroTy) {
llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
llvm::PointerType::getUnqual(CGM.Int32Ty)};
Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
}
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
llvm::FunctionCallee
CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned,
bool IsGPUDistribute) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name;
if (IsGPUDistribute)
Name = IVSize == 32 ? (IVSigned ? "__kmpc_distribute_static_init_4"
: "__kmpc_distribute_static_init_4u")
: (IVSigned ? "__kmpc_distribute_static_init_8"
: "__kmpc_distribute_static_init_8u");
else
Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
: "__kmpc_for_static_init_4u")
: (IVSigned ? "__kmpc_for_static_init_8"
: "__kmpc_for_static_init_8u");
llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
auto *PtrTy = llvm::PointerType::getUnqual(ITy);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, llvm::PointerType::getUnqual(CGM.Int32Ty), PtrTy, PtrTy, PtrTy, ITy, ITy };
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::FunctionCallee
CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
: (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
llvm::Type *TypeParams[] = { getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, ITy, ITy, ITy, ITy };
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::FunctionCallee
CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
: (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, };
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::FunctionCallee
CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
StringRef Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
: (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
auto *PtrTy = llvm::PointerType::getUnqual(ITy);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, llvm::PointerType::getUnqual(CGM.Int32Ty), PtrTy, PtrTy, PtrTy };
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
unsigned &DeviceID, unsigned &FileID,
unsigned &LineNum) {
SourceManager &SM = C.getSourceManager();
assert(Loc.isValid() && "Source location is expected to be always valid.");
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
assert(PLoc.isValid() && "Source location is expected to be always valid.");
llvm::sys::fs::UniqueID ID;
if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
PLoc = SM.getPresumedLoc(Loc, false);
assert(PLoc.isValid() && "Source location is expected to be always valid.");
if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
SM.getDiagnostics().Report(diag::err_cannot_open_file)
<< PLoc.getFilename() << EC.message();
}
DeviceID = ID.getDevice();
FileID = ID.getFile();
LineNum = PLoc.getLine();
}
Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
if (CGM.getLangOpts().OpenMPSimd)
return Address::invalid();
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
HasRequiresUnifiedSharedMemory))) {
SmallString<64> PtrName;
{
llvm::raw_svector_ostream OS(PtrName);
OS << CGM.getMangledName(GlobalDecl(VD));
if (!VD->isExternallyVisible()) {
unsigned DeviceID, FileID, Line;
getTargetEntryUniqueInfo(CGM.getContext(),
VD->getCanonicalDecl()->getBeginLoc(),
DeviceID, FileID, Line);
OS << llvm::format("_%x", FileID);
}
OS << "_decl_tgt_ref_ptr";
}
llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(PtrTy);
if (!Ptr) {
Ptr = getOrCreateInternalVariable(LlvmPtrTy, PtrName);
auto *GV = cast<llvm::GlobalVariable>(Ptr);
GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
if (!CGM.getLangOpts().OpenMPIsDevice)
GV->setInitializer(CGM.GetAddrOfGlobal(VD));
registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
}
return Address(Ptr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
}
return Address::invalid();
}
llvm::Constant *
CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
assert(!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported());
std::string Suffix = getName({"cache", ""});
return getOrCreateInternalVariable(
CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
}
Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
return VDAddr;
llvm::Type *VarTy = VDAddr.getElementType();
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy),
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
return Address(
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
Args),
CGF.Int8Ty, VDAddr.getAlignment());
}
void CGOpenMPRuntime::emitThreadPrivateVarInit(
CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_global_thread_num),
OMPLoc);
llvm::Value *Args[] = {
OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
Args);
}
llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
const VarDecl *VD, Address VDAddr, SourceLocation Loc,
bool PerformInit, CodeGenFunction *CGF) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
return nullptr;
VD = VD->getDefinition(CGM.getContext());
if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
QualType ASTTy = VD->getType();
llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
const Expr *Init = VD->getAnyInitializer();
if (CGM.getLangOpts().CPlusPlus && PerformInit) {
CodeGenFunction CtorCGF(CGM);
FunctionArgList Args;
ImplicitParamDecl Dst(CGM.getContext(), nullptr, Loc,
nullptr, CGM.getContext().VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&Dst);
const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
CGM.getContext().VoidPtrTy, Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string Name = getName({"__kmpc_global_ctor_", ""});
llvm::Function *Fn =
CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
Args, Loc, Loc);
llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst), false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
Address Arg(ArgVal, CtorCGF.Int8Ty, VDAddr.getAlignment());
Arg = CtorCGF.Builder.CreateElementBitCast(
Arg, CtorCGF.ConvertTypeForMem(ASTTy));
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
true);
ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst), false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
CtorCGF.FinishFunction();
Ctor = Fn;
}
if (VD->getType().isDestructedType() != QualType::DK_none) {
CodeGenFunction DtorCGF(CGM);
FunctionArgList Args;
ImplicitParamDecl Dst(CGM.getContext(), nullptr, Loc,
nullptr, CGM.getContext().VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&Dst);
const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
CGM.getContext().VoidTy, Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string Name = getName({"__kmpc_global_dtor_", ""});
llvm::Function *Fn =
CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
Loc, Loc);
auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
false, CGM.getContext().VoidPtrTy, Dst.getLocation());
DtorCGF.emitDestroy(
Address(ArgVal, DtorCGF.Int8Ty, VDAddr.getAlignment()), ASTTy,
DtorCGF.getDestroyer(ASTTy.isDestructedType()),
DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
Dtor = Fn;
}
if (!Ctor && !Dtor)
return nullptr;
llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
false)
->getPointerTo();
CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
if (Ctor == nullptr) {
auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
false)
->getPointerTo();
Ctor = llvm::Constant::getNullValue(CtorTy);
}
if (Dtor == nullptr) {
auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
false)
->getPointerTo();
Dtor = llvm::Constant::getNullValue(DtorTy);
}
if (!CGF) {
auto *InitFunctionTy =
llvm::FunctionType::get(CGM.VoidTy, false);
std::string Name = getName({"__omp_threadprivate_init_", ""});
llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
CodeGenFunction InitCGF(CGM);
FunctionArgList ArgList;
InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
CGM.getTypes().arrangeNullaryFunction(), ArgList,
Loc, Loc);
emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
InitCGF.FinishFunction();
return InitFunction;
}
emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
}
return nullptr;
}
bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit) {
if (CGM.getLangOpts().OMPTargetTriples.empty() &&
!CGM.getLangOpts().OpenMPIsDevice)
return false;
Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
HasRequiresUnifiedSharedMemory))
return CGM.getLangOpts().OpenMPIsDevice;
VD = VD->getDefinition(CGM.getContext());
assert(VD && "Unknown VarDecl");
if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
return CGM.getLangOpts().OpenMPIsDevice;
QualType ASTTy = VD->getType();
SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
unsigned DeviceID;
unsigned FileID;
unsigned Line;
getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
SmallString<128> Buffer, Out;
{
llvm::raw_svector_ostream OS(Buffer);
OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
<< llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
}
const Expr *Init = VD->getAnyInitializer();
if (CGM.getLangOpts().CPlusPlus && PerformInit) {
llvm::Constant *Ctor;
llvm::Constant *ID;
if (CGM.getLangOpts().OpenMPIsDevice) {
CodeGenFunction CtorCGF(CGM);
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, Twine(Buffer, "_ctor"), FI, Loc, false,
llvm::GlobalValue::WeakODRLinkage);
if (CGM.getTriple().isAMDGCN())
Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
FunctionArgList(), Loc, Loc);
auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
llvm::Constant *AddrInAS0 = Addr;
if (Addr->getAddressSpace() != 0)
AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
Addr, llvm::PointerType::getWithSamePointeeType(
cast<llvm::PointerType>(Addr->getType()), 0));
CtorCGF.EmitAnyExprToMem(Init,
Address(AddrInAS0, Addr->getValueType(),
CGM.getContext().getDeclAlign(VD)),
Init->getType().getQualifiers(),
true);
CtorCGF.FinishFunction();
Ctor = Fn;
ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
} else {
Ctor = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, true,
llvm::GlobalValue::PrivateLinkage,
llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
ID = Ctor;
}
Out.clear();
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
}
if (VD->getType().isDestructedType() != QualType::DK_none) {
llvm::Constant *Dtor;
llvm::Constant *ID;
if (CGM.getLangOpts().OpenMPIsDevice) {
CodeGenFunction DtorCGF(CGM);
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, Twine(Buffer, "_dtor"), FI, Loc, false,
llvm::GlobalValue::WeakODRLinkage);
if (CGM.getTriple().isAMDGCN())
Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
FunctionArgList(), Loc, Loc);
auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
llvm::Constant *AddrInAS0 = Addr;
if (Addr->getAddressSpace() != 0)
AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
Addr, llvm::PointerType::getWithSamePointeeType(
cast<llvm::PointerType>(Addr->getType()), 0));
DtorCGF.emitDestroy(Address(AddrInAS0, Addr->getValueType(),
CGM.getContext().getDeclAlign(VD)),
ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
Dtor = Fn;
ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
} else {
Dtor = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, true,
llvm::GlobalValue::PrivateLinkage,
llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
ID = Dtor;
}
Out.clear();
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
}
return CGM.getLangOpts().OpenMPIsDevice;
}
Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) {
std::string Suffix = getName({"artificial", ""});
llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
llvm::GlobalVariable *GAddr =
getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
CGM.getTarget().isTLSSupported()) {
GAddr->setThreadLocal(true);
return Address(GAddr, GAddr->getValueType(),
CGM.getContext().getTypeAlignInChars(VarType));
}
std::string CacheSuffix = getName({"cache", ""});
llvm::Value *Args[] = {
emitUpdateLocation(CGF, SourceLocation()),
getThreadID(CGF, SourceLocation()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
false),
getOrCreateInternalVariable(
CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
return Address(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
Args),
VarLVType->getPointerTo(0)),
VarLVType, CGM.getContext().getTypeAlignInChars(VarType));
}
void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen) {
CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
bool CondConstant;
if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
if (CondConstant)
ThenGen(CGF);
else
ElseGen(CGF);
return;
}
llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, 0);
CGF.EmitBlock(ThenBlock);
ThenGen(CGF);
CGF.EmitBranch(ContBlock);
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ElseBlock);
ElseGen(CGF);
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock, true);
}
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond,
llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
auto &M = CGM.getModule();
auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
this](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
RTLoc,
CGF.Builder.getInt32(CapturedVars.size()), CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
llvm::SmallVector<llvm::Value *, 16> RealArgs;
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
llvm::FunctionCallee RTLFn =
OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
};
auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
this](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
llvm::Value *Args[] = {RTLoc, ThreadID};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_serialized_parallel),
Args);
Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
Address ZeroAddrBound =
CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
".bound.zero.addr");
CGF.Builder.CreateStore(CGF.Builder.getInt32( 0), ZeroAddrBound);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline);
OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_end_serialized_parallel),
EndArgs);
};
if (IfCond) {
emitIfClause(CGF, IfCond, ThenGen, ElseGen);
} else {
RegionCodeGenTy ThenRCG(ThenGen);
ThenRCG(CGF);
}
}
Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
SourceLocation Loc) {
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
QualType Int32Ty =
CGF.getContext().getIntTypeForBitwidth( 32, true);
Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, ".threadid_temp.");
CGF.EmitStoreOfScalar(ThreadID,
CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
return ThreadIDTemp;
}
llvm::GlobalVariable *CGOpenMPRuntime::getOrCreateInternalVariable(
llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << Name;
StringRef RuntimeName = Out.str();
auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
if (Elem.second) {
assert(Elem.second->getType()->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested");
return &*Elem.second;
}
return Elem.second = new llvm::GlobalVariable(
CGM.getModule(), Ty, false,
llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
Elem.first(), nullptr,
llvm::GlobalValue::NotThreadLocal, AddressSpace);
}
llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
std::string Name = getName({Prefix, "var"});
return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
}
namespace {
class CommonActionTy final : public PrePostActionTy {
llvm::FunctionCallee EnterCallee;
ArrayRef<llvm::Value *> EnterArgs;
llvm::FunctionCallee ExitCallee;
ArrayRef<llvm::Value *> ExitArgs;
bool Conditional;
llvm::BasicBlock *ContBlock = nullptr;
public:
CommonActionTy(llvm::FunctionCallee EnterCallee,
ArrayRef<llvm::Value *> EnterArgs,
llvm::FunctionCallee ExitCallee,
ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
: EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
ExitArgs(ExitArgs), Conditional(Conditional) {}
void Enter(CodeGenFunction &CGF) override {
llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
if (Conditional) {
llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
ContBlock = CGF.createBasicBlock("omp_if.end");
CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
CGF.EmitBlock(ThenBlock);
}
}
void Done(CodeGenFunction &CGF) {
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock, true);
}
void Exit(CodeGenFunction &CGF) override {
CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
}
};
}
void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc, const Expr *Hint) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
getCriticalRegionLock(CriticalName)};
llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
std::end(Args));
if (Hint) {
EnterArgs.push_back(CGF.Builder.CreateIntCast(
CGF.EmitScalarExpr(Hint), CGM.Int32Ty, false));
}
CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(),
Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
EnterArgs,
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_end_critical),
Args);
CriticalOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
}
void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_master),
Args,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_master),
Args,
true);
MasterOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
Action.Done(CGF);
}
void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen,
SourceLocation Loc, const Expr *Filter) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *FilterVal = Filter
? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
: llvm::ConstantInt::get(CGM.Int32Ty, 0);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
FilterVal};
llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
getThreadID(CGF, Loc)};
CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_masked),
Args,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_masked),
ArgsEnd,
true);
MaskedOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
Action.Done(CGF);
}
void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
OMPBuilder.createTaskyield(CGF.Builder);
} else {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
llvm::ConstantInt::get(CGM.IntTy, 0, true)};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
Args);
}
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
}
void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_taskgroup),
Args,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
Args);
TaskgroupOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
}
static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
unsigned Index, const VarDecl *Var) {
Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
llvm::Type *ElemTy = CGF.ConvertTypeForMem(Var->getType());
return Address(
CGF.Builder.CreateBitCast(
Ptr, ElemTy->getPointerTo(Ptr->getType()->getPointerAddressSpace())),
ElemTy, CGF.getContext().getDeclAlign(Var));
}
static llvm::Value *emitCopyprivateCopyFunction(
CodeGenModule &CGM, llvm::Type *ArgsElemType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
SourceLocation Loc) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl LHSArg(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
ImplicitParamDecl RHSArg(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
const auto &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
std::string Name =
CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
llvm::GlobalValue::InternalLinkage, Name,
&CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
ArgsElemType->getPointerTo()),
ArgsElemType, CGF.getPointerAlign());
Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
ArgsElemType->getPointerTo()),
ArgsElemType, CGF.getPointerAlign());
for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
const auto *DestVar =
cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
const auto *SrcVar =
cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
QualType Type = VD->getType();
CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
}
CGF.FinishFunction();
return Fn;
}
void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> DstExprs,
ArrayRef<const Expr *> AssignmentOps) {
if (!CGF.HaveInsertPoint())
return;
assert(CopyprivateVars.size() == SrcExprs.size() &&
CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size());
ASTContext &C = CGM.getContext();
Address DidIt = Address::invalid();
if (!CopyprivateVars.empty()) {
QualType KmpInt32Ty =
C.getIntTypeForBitwidth(32, 1);
DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
}
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_single),
Args,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_single),
Args,
true);
SingleOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
if (DidIt.isValid()) {
CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
}
Action.Done(CGF);
if (DidIt.isValid()) {
llvm::APInt ArraySize(32, CopyprivateVars.size());
QualType CopyprivateArrayTy = C.getConstantArrayType(
C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
0);
Address CopyprivateList =
CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
CGF.VoidPtrTy),
Elem);
}
llvm::Value *CpyFn = emitCopyprivateCopyFunction(
CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy), CopyprivateVars,
SrcExprs, DstExprs, AssignmentOps, Loc);
llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
Address CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CopyprivateList, CGF.VoidPtrTy, CGF.Int8Ty);
llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), BufSize, CL.getPointer(), CpyFn, DidItVal };
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_copyprivate),
Args);
}
}
void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) {
if (!CGF.HaveInsertPoint())
return;
if (IsThreads) {
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_ordered),
Args,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_ordered),
Args);
OrderedOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
return;
}
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
}
unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
unsigned Flags;
if (Kind == OMPD_for)
Flags = OMP_IDENT_BARRIER_IMPL_FOR;
else if (Kind == OMPD_sections)
Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
else if (Kind == OMPD_single)
Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
else if (Kind == OMPD_barrier)
Flags = OMP_IDENT_BARRIER_EXPL;
else
Flags = OMP_IDENT_BARRIER_IMPL;
return Flags;
}
void CGOpenMPRuntime::getDefaultScheduleAndChunk(
CodeGenFunction &CGF, const OMPLoopDirective &S,
OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
if (llvm::any_of(
S.getClausesOfKind<OMPOrderedClause>(),
[](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
ScheduleKind = OMPC_SCHEDULE_static;
llvm::APInt ChunkSize(32, 1);
ChunkExpr = IntegerLiteral::Create(
CGF.getContext(), ChunkSize,
CGF.getContext().getIntTypeForBitwidth(32, 0),
SourceLocation());
}
}
void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks,
bool ForceSimpleCall) {
auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
CGF.Builder.restoreIP(OMPBuilder.createBarrier(
CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
return;
}
if (!CGF.HaveInsertPoint())
return;
unsigned Flags = getDefaultFlagsForBarriers(Kind);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
getThreadID(CGF, Loc)};
if (OMPRegionInfo) {
if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
llvm::Value *Result = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_cancel_barrier),
Args);
if (EmitChecks) {
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
CodeGenFunction::JumpDest CancelDestination =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDestination);
CGF.EmitBlock(ContBB, true);
}
return;
}
}
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_barrier),
Args);
}
static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked, bool Ordered) {
switch (ScheduleKind) {
case OMPC_SCHEDULE_static:
return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
: (Ordered ? OMP_ord_static : OMP_sch_static);
case OMPC_SCHEDULE_dynamic:
return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
case OMPC_SCHEDULE_guided:
return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
case OMPC_SCHEDULE_runtime:
return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
case OMPC_SCHEDULE_auto:
return Ordered ? OMP_ord_auto : OMP_sch_auto;
case OMPC_SCHEDULE_unknown:
assert(!Chunked && "chunk was specified but schedule kind not known");
return Ordered ? OMP_ord_static : OMP_sch_static;
}
llvm_unreachable("Unexpected runtime schedule");
}
static OpenMPSchedType
getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
}
bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const {
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, Chunked, false);
return Schedule == OMP_sch_static;
}
bool CGOpenMPRuntime::isStaticNonchunked(
OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
return Schedule == OMP_dist_sch_static;
}
bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const {
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, Chunked, false);
return Schedule == OMP_sch_static_chunked;
}
bool CGOpenMPRuntime::isStaticChunked(
OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
return Schedule == OMP_dist_sch_static_chunked;
}
bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, false, false);
assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
return Schedule != OMP_sch_static;
}
static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
OpenMPScheduleClauseModifier M1,
OpenMPScheduleClauseModifier M2) {
int Modifier = 0;
switch (M1) {
case OMPC_SCHEDULE_MODIFIER_monotonic:
Modifier = OMP_sch_modifier_monotonic;
break;
case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
Modifier = OMP_sch_modifier_nonmonotonic;
break;
case OMPC_SCHEDULE_MODIFIER_simd:
if (Schedule == OMP_sch_static_chunked)
Schedule = OMP_sch_static_balanced_chunked;
break;
case OMPC_SCHEDULE_MODIFIER_last:
case OMPC_SCHEDULE_MODIFIER_unknown:
break;
}
switch (M2) {
case OMPC_SCHEDULE_MODIFIER_monotonic:
Modifier = OMP_sch_modifier_monotonic;
break;
case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
Modifier = OMP_sch_modifier_nonmonotonic;
break;
case OMPC_SCHEDULE_MODIFIER_simd:
if (Schedule == OMP_sch_static_chunked)
Schedule = OMP_sch_static_balanced_chunked;
break;
case OMPC_SCHEDULE_MODIFIER_last:
case OMPC_SCHEDULE_MODIFIER_unknown:
break;
}
if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
Schedule == OMP_sch_static_balanced_chunked ||
Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
Schedule == OMP_dist_sch_static_chunked ||
Schedule == OMP_dist_sch_static))
Modifier = OMP_sch_modifier_nonmonotonic;
}
return Schedule | Modifier;
}
void CGOpenMPRuntime::emitForDispatchInit(
CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
bool Ordered, const DispatchRTInput &DispatchValues) {
if (!CGF.HaveInsertPoint())
return;
OpenMPSchedType Schedule = getRuntimeSchedule(
ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
assert(Ordered ||
(Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
Schedule != OMP_sch_static_balanced_chunked));
llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
: CGF.Builder.getIntN(IVSize, 1);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc),
getThreadID(CGF, Loc),
CGF.Builder.getInt32(addMonoNonMonoModifier(
CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), DispatchValues.LB, DispatchValues.UB, CGF.Builder.getIntN(IVSize, 1), Chunk };
CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
}
static void emitForStaticInitCall(
CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
const CGOpenMPRuntime::StaticRTInput &Values) {
if (!CGF.HaveInsertPoint())
return;
assert(!Values.Ordered);
assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
Schedule == OMP_sch_static_balanced_chunked ||
Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
Schedule == OMP_dist_sch_static ||
Schedule == OMP_dist_sch_static_chunked);
llvm::Value *Chunk = Values.Chunk;
if (Chunk == nullptr) {
assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule");
Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
} else {
assert((Schedule == OMP_sch_static_chunked ||
Schedule == OMP_sch_static_balanced_chunked ||
Schedule == OMP_ord_static_chunked ||
Schedule == OMP_dist_sch_static_chunked) &&
"expected static chunked schedule");
}
llvm::Value *Args[] = {
UpdateLocation,
ThreadId,
CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
M2)), Values.IL.getPointer(), Values.LB.getPointer(), Values.UB.getPointer(), Values.ST.getPointer(), CGF.Builder.getIntN(Values.IVSize, 1), Chunk };
CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
}
void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) {
OpenMPSchedType ScheduleNum = getRuntimeSchedule(
ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
assert(isOpenMPWorksharingDirective(DKind) &&
"Expected loop-based or sections-based directive.");
llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
isOpenMPLoopDirective(DKind)
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::FunctionCallee StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned, false);
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
}
void CGOpenMPRuntime::emitDistributeStaticInit(
CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const CGOpenMPRuntime::StaticRTInput &Values) {
OpenMPSchedType ScheduleNum =
getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
llvm::Value *UpdatedLocation =
emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::FunctionCallee StaticInitFunction;
bool isGPUDistribute =
CGM.getLangOpts().OpenMPIsDevice &&
(CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX());
StaticInitFunction = createForStaticInitFunction(
Values.IVSize, Values.IVSigned, isGPUDistribute);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
OMPC_SCHEDULE_MODIFIER_unknown, Values);
}
void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind DKind) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc,
isOpenMPDistributeDirective(DKind)
? OMP_IDENT_WORK_DISTRIBUTE
: isOpenMPLoopDirective(DKind)
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
if (isOpenMPDistributeDirective(DKind) && CGM.getLangOpts().OpenMPIsDevice &&
(CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX()))
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_distribute_static_fini),
Args);
else
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_for_static_fini),
Args);
}
void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned IVSize,
bool IVSigned) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
}
llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned, Address IL,
Address LB, Address UB,
Address ST) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc),
getThreadID(CGF, Loc),
IL.getPointer(), LB.getPointer(), UB.getPointer(), ST.getPointer() };
llvm::Value *Call =
CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
return CGF.EmitScalarConversion(
Call, CGF.getContext().getIntTypeForBitwidth(32, 1),
CGF.getContext().BoolTy, Loc);
}
void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, true)};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_push_num_threads),
Args);
}
void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
ProcBindKind ProcBind,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.");
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), true)};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
Args);
}
void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
SourceLocation Loc, llvm::AtomicOrdering AO) {
if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
OMPBuilder.createFlush(CGF.Builder);
} else {
if (!CGF.HaveInsertPoint())
return;
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_flush),
emitUpdateLocation(CGF, Loc));
}
}
namespace {
enum KmpTaskTFields {
KmpTaskTShareds,
KmpTaskTRoutine,
KmpTaskTPartId,
Data1,
Data2,
KmpTaskTLowerBound,
KmpTaskTUpperBound,
KmpTaskTStride,
KmpTaskTLastIter,
KmpTaskTReductions,
};
}
bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
return OffloadEntriesTargetRegion.empty() &&
OffloadEntriesDeviceGlobalVar.empty();
}
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order) {
assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device "
"code generation.");
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
OffloadEntryInfoTargetRegion(Order, nullptr, nullptr,
OMPTargetRegionEntryTargetRegion);
++OffloadingEntriesNum;
}
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags) {
if (CGM.getLangOpts().OpenMPIsDevice) {
if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
return;
auto &Entry =
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
Entry.setAddress(Addr);
Entry.setID(ID);
Entry.setFlags(Flags);
} else {
if (Flags ==
OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion &&
hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
true))
return;
assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
"Target region entry already registered!");
OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
++OffloadingEntriesNum;
}
}
bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
bool IgnoreAddressId) const {
auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
if (PerDevice == OffloadEntriesTargetRegion.end())
return false;
auto PerFile = PerDevice->second.find(FileID);
if (PerFile == PerDevice->second.end())
return false;
auto PerParentName = PerFile->second.find(ParentName);
if (PerParentName == PerFile->second.end())
return false;
auto PerLine = PerParentName->second.find(LineNum);
if (PerLine == PerParentName->second.end())
return false;
if (!IgnoreAddressId &&
(PerLine->second.getAddress() || PerLine->second.getID()))
return false;
return true;
}
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action) {
for (const auto &D : OffloadEntriesTargetRegion)
for (const auto &F : D.second)
for (const auto &P : F.second)
for (const auto &L : P.second)
Action(D.first, F.first, P.first(), L.first, L.second);
}
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order) {
assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device "
"code generation.");
OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
++OffloadingEntriesNum;
}
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage) {
if (CGM.getLangOpts().OpenMPIsDevice) {
if (!hasDeviceGlobalVarEntryInfo(VarName))
return;
auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
if (Entry.getVarSize().isZero()) {
Entry.setVarSize(VarSize);
Entry.setLinkage(Linkage);
}
return;
}
Entry.setVarSize(VarSize);
Entry.setLinkage(Linkage);
Entry.setAddress(Addr);
} else {
if (hasDeviceGlobalVarEntryInfo(VarName)) {
auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
assert(Entry.isValid() && Entry.getFlags() == Flags &&
"Entry not initialized!");
if (Entry.getVarSize().isZero()) {
Entry.setVarSize(VarSize);
Entry.setLinkage(Linkage);
}
return;
}
OffloadEntriesDeviceGlobalVar.try_emplace(
VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
++OffloadingEntriesNum;
}
}
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
for (const auto &E : OffloadEntriesDeviceGlobalVar)
Action(E.getKey(), E.getValue());
}
void CGOpenMPRuntime::createOffloadEntry(
llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage) {
OMPBuilder.emitOffloadingEntry(ID, Addr->getName(), Size, Flags);
}
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
return;
llvm::Module &M = CGM.getModule();
llvm::LLVMContext &C = M.getContext();
SmallVector<std::tuple<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *,
SourceLocation, StringRef>,
16>
OrderedEntries(OffloadEntriesInfoManager.size());
llvm::SmallVector<StringRef, 16> ParentFunctions(
OffloadEntriesInfoManager.size());
auto &&GetMDInt = [this](unsigned V) {
return llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(CGM.Int32Ty, V));
};
auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
auto &&TargetRegionMetadataEmitter =
[this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
&GetMDString](
unsigned DeviceID, unsigned FileID, StringRef ParentName,
unsigned Line,
const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
GetMDInt(FileID), GetMDString(ParentName),
GetMDInt(Line), GetMDInt(E.getOrder())};
SourceLocation Loc;
for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
E = CGM.getContext().getSourceManager().fileinfo_end();
I != E; ++I) {
if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
I->getFirst()->getUniqueID().getFile() == FileID) {
Loc = CGM.getContext().getSourceManager().translateFileLineCol(
I->getFirst(), Line, 1);
break;
}
}
OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
ParentFunctions[E.getOrder()] = ParentName;
MD->addOperand(llvm::MDNode::get(C, Ops));
};
OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
TargetRegionMetadataEmitter);
auto &&DeviceGlobalVarMetadataEmitter =
[&C, &OrderedEntries, &GetMDInt, &GetMDString,
MD](StringRef MangledName,
const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
&E) {
llvm::Metadata *Ops[] = {
GetMDInt(E.getKind()), GetMDString(MangledName),
GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
OrderedEntries[E.getOrder()] =
std::make_tuple(&E, SourceLocation(), MangledName);
MD->addOperand(llvm::MDNode::get(C, Ops));
};
OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
DeviceGlobalVarMetadataEmitter);
for (const auto &E : OrderedEntries) {
assert(std::get<0>(E) && "All ordered entries must exist!");
if (const auto *CE =
dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
std::get<0>(E))) {
if (!CE->getID() || !CE->getAddress()) {
StringRef FnName = ParentFunctions[CE->getOrder()];
if (!CGM.GetGlobalValue(FnName))
continue;
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error,
"Offloading entry for target region in %0 is incorrect: either the "
"address or the ID is invalid.");
CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
continue;
}
createOffloadEntry(CE->getID(), CE->getAddress(), 0,
CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
} else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
OffloadEntryInfoDeviceGlobalVar>(
std::get<0>(E))) {
OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
CE->getFlags());
switch (Flags) {
case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
if (CGM.getLangOpts().OpenMPIsDevice &&
CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
continue;
if (!CE->getAddress()) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error, "Offloading entry for declare target "
"variable %0 is incorrect: the "
"address is invalid.");
CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
continue;
}
if (CE->getVarSize().isZero())
continue;
break;
}
case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
(!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
"Declaret target link address is set.");
if (CGM.getLangOpts().OpenMPIsDevice)
continue;
if (!CE->getAddress()) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error,
"Offloading entry for declare target variable is incorrect: the "
"address is invalid.");
CGM.getDiags().Report(DiagID);
continue;
}
break;
}
if (auto *GV = dyn_cast<llvm::GlobalValue>(CE->getAddress()))
if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
continue;
createOffloadEntry(CE->getAddress(), CE->getAddress(),
CE->getVarSize().getQuantity(), Flags,
CE->getLinkage());
} else {
llvm_unreachable("Unsupported entry kind.");
}
}
}
void CGOpenMPRuntime::loadOffloadInfoMetadata() {
if (!CGM.getLangOpts().OpenMPIsDevice)
return;
if (CGM.getLangOpts().OMPHostIRFile.empty())
return;
auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
if (auto EC = Buf.getError()) {
CGM.getDiags().Report(diag::err_cannot_open_file)
<< CGM.getLangOpts().OMPHostIRFile << EC.message();
return;
}
llvm::LLVMContext C;
auto ME = expectedToErrorOrAndEmitErrors(
C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
if (auto EC = ME.getError()) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
CGM.getDiags().Report(DiagID)
<< CGM.getLangOpts().OMPHostIRFile << EC.message();
return;
}
llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
if (!MD)
return;
for (llvm::MDNode *MN : MD->operands()) {
auto &&GetMDInt = [MN](unsigned Idx) {
auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
};
auto &&GetMDString = [MN](unsigned Idx) {
auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
return V->getString();
};
switch (GetMDInt(0)) {
default:
llvm_unreachable("Unexpected metadata!");
break;
case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
OffloadingEntryInfoTargetRegion:
OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
GetMDInt(1), GetMDInt(2),
GetMDString(3), GetMDInt(4),
GetMDInt(5));
break;
case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
OffloadingEntryInfoDeviceGlobalVar:
OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
GetMDString(1),
static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
GetMDInt(2)),
GetMDInt(3));
break;
}
}
}
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
if (!KmpRoutineEntryPtrTy) {
ASTContext &C = CGM.getContext();
QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
KmpRoutineEntryPtrQTy = C.getPointerType(
C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
}
}
namespace {
struct PrivateHelpersTy {
PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
: OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
PrivateElemInit(PrivateElemInit) {}
PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
const Expr *OriginalRef = nullptr;
const VarDecl *Original = nullptr;
const VarDecl *PrivateCopy = nullptr;
const VarDecl *PrivateElemInit = nullptr;
bool isLocalPrivate() const {
return !OriginalRef && !PrivateCopy && !PrivateElemInit;
}
};
typedef std::pair<CharUnits , PrivateHelpersTy> PrivateDataTy;
}
static bool isAllocatableDecl(const VarDecl *VD) {
const VarDecl *CVD = VD->getCanonicalDecl();
if (!CVD->hasAttr<OMPAllocateDeclAttr>())
return false;
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
!AA->getAllocator());
}
static RecordDecl *
createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
if (!Privates.empty()) {
ASTContext &C = CGM.getContext();
RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
RD->startDefinition();
for (const auto &Pair : Privates) {
const VarDecl *VD = Pair.second.Original;
QualType Type = VD->getType().getNonReferenceType();
if (Pair.second.isLocalPrivate()) {
if (VD->getType()->isLValueReferenceType())
Type = C.getPointerType(Type);
if (isAllocatableDecl(VD))
Type = C.getPointerType(Type);
}
FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
if (VD->hasAttrs()) {
for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
E(VD->getAttrs().end());
I != E; ++I)
FD->addAttr(*I);
}
}
RD->completeDefinition();
return RD;
}
return nullptr;
}
static RecordDecl *
createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
QualType KmpInt32Ty,
QualType KmpRoutineEntryPointerQTy) {
ASTContext &C = CGM.getContext();
RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
UD->startDefinition();
addFieldToRecordDecl(C, UD, KmpInt32Ty);
addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
UD->completeDefinition();
QualType KmpCmplrdataTy = C.getRecordType(UD);
RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
addFieldToRecordDecl(C, RD, KmpInt32Ty);
addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
if (isOpenMPTaskLoopDirective(Kind)) {
QualType KmpUInt64Ty =
CGM.getContext().getIntTypeForBitwidth(64, 0);
QualType KmpInt64Ty =
CGM.getContext().getIntTypeForBitwidth(64, 1);
addFieldToRecordDecl(C, RD, KmpUInt64Ty);
addFieldToRecordDecl(C, RD, KmpUInt64Ty);
addFieldToRecordDecl(C, RD, KmpInt64Ty);
addFieldToRecordDecl(C, RD, KmpInt32Ty);
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
}
RD->completeDefinition();
return RD;
}
static RecordDecl *
createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
ArrayRef<PrivateDataTy> Privates) {
ASTContext &C = CGM.getContext();
RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
RD->startDefinition();
addFieldToRecordDecl(C, RD, KmpTaskTQTy);
if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
RD->completeDefinition();
return RD;
}
static llvm::Function *
emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
QualType KmpTaskTWithPrivatesPtrQTy,
QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
QualType SharedsPtrTy, llvm::Function *TaskFunction,
llvm::Value *TaskPrivatesMap) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, nullptr, Loc, nullptr, KmpInt32Ty,
ImplicitParamDecl::Other);
ImplicitParamDecl TaskTypeArg(C, nullptr, Loc, nullptr,
KmpTaskTWithPrivatesPtrQTy.withRestrict(),
ImplicitParamDecl::Other);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
const auto &TaskEntryFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
llvm::FunctionType *TaskEntryTy =
CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
auto *TaskEntry = llvm::Function::Create(
TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
TaskEntry->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
Loc, Loc);
llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
CGF.GetAddrOfLocalVar(&GtidArg), false, KmpInt32Ty, Loc);
LValue TDBase = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskTypeArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
const auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
LValue Base =
CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(SharedsLVal, Loc),
CGF.ConvertTypeForMem(SharedsPtrTy));
auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
llvm::Value *PrivatesParam;
if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
} else {
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
llvm::Value *CommonArgs[] = {
GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
CGF.Builder
.CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
CGF.VoidPtrTy, CGF.Int8Ty)
.getPointer()};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
std::end(CommonArgs));
if (isOpenMPTaskLoopDirective(Kind)) {
auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
CallArgs.push_back(LBParam);
CallArgs.push_back(UBParam);
CallArgs.push_back(StParam);
CallArgs.push_back(LIParam);
CallArgs.push_back(RParam);
}
CallArgs.push_back(SharedsParam);
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
CallArgs);
CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(0)),
CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
CGF.FinishFunction();
return TaskEntry;
}
static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
SourceLocation Loc,
QualType KmpInt32Ty,
QualType KmpTaskTWithPrivatesPtrQTy,
QualType KmpTaskTWithPrivatesQTy) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, nullptr, Loc, nullptr, KmpInt32Ty,
ImplicitParamDecl::Other);
ImplicitParamDecl TaskTypeArg(C, nullptr, Loc, nullptr,
KmpTaskTWithPrivatesPtrQTy.withRestrict(),
ImplicitParamDecl::Other);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
const auto &DestructorFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
llvm::FunctionType *DestructorFnTy =
CGM.getTypes().GetFunctionType(DestructorFnInfo);
std::string Name =
CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
auto *DestructorFn =
llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
DestructorFnInfo);
DestructorFn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
Args, Loc, Loc);
LValue Base = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskTypeArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
const auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
Base = CGF.EmitLValueForField(Base, *FI);
for (const auto *Field :
cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
if (QualType::DestructionKind DtorKind =
Field->getType().isDestructedType()) {
LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
}
}
CGF.FinishFunction();
return DestructorFn;
}
static llvm::Value *
emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
const OMPTaskDataTy &Data, QualType PrivatesQTy,
ArrayRef<PrivateDataTy> Privates) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl TaskPrivatesArg(
C, nullptr, Loc, nullptr,
C.getPointerType(PrivatesQTy).withConst().withRestrict(),
ImplicitParamDecl::Other);
Args.push_back(&TaskPrivatesArg);
llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
unsigned Counter = 1;
for (const Expr *E : Data.PrivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, nullptr, Loc, nullptr,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
ImplicitParamDecl::Other));
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
for (const Expr *E : Data.FirstprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, nullptr, Loc, nullptr,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
ImplicitParamDecl::Other));
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
for (const Expr *E : Data.LastprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, nullptr, Loc, nullptr,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
ImplicitParamDecl::Other));
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
for (const VarDecl *VD : Data.PrivateLocals) {
QualType Ty = VD->getType().getNonReferenceType();
if (VD->getType()->isLValueReferenceType())
Ty = C.getPointerType(Ty);
if (isAllocatableDecl(VD))
Ty = C.getPointerType(Ty);
Args.push_back(ImplicitParamDecl::Create(
C, nullptr, Loc, nullptr,
C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
ImplicitParamDecl::Other));
PrivateVarsPos[VD] = Counter;
++Counter;
}
const auto &TaskPrivatesMapFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *TaskPrivatesMapTy =
CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
std::string Name =
CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
auto *TaskPrivatesMap = llvm::Function::Create(
TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
&CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
TaskPrivatesMapFnInfo);
if (CGM.getLangOpts().Optimize) {
TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
}
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
TaskPrivatesMapFnInfo, Args, Loc, Loc);
LValue Base = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
TaskPrivatesArg.getType()->castAs<PointerType>());
const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
Counter = 0;
for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
LValue RefLVal =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
++Counter;
}
CGF.FinishFunction();
return TaskPrivatesMap;
}
static void emitPrivatesInit(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
Address KmpTaskSharedsPtr, LValue TDBase,
const RecordDecl *KmpTaskTWithPrivatesQTyRD,
QualType SharedsTy, QualType SharedsPtrTy,
const OMPTaskDataTy &Data,
ArrayRef<PrivateDataTy> Privates, bool ForDup) {
ASTContext &C = CGF.getContext();
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
? OMPD_taskloop
: OMPD_task;
const CapturedStmt &CS = *D.getCapturedStmt(Kind);
CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
LValue SrcBase;
bool IsTargetTask =
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
isOpenMPTargetExecutionDirective(D.getDirectiveKind());
if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
(IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy),
CGF.ConvertTypeForMem(SharedsTy)),
SharedsTy);
}
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
for (const PrivateDataTy &Pair : Privates) {
if (Pair.second.isLocalPrivate()) {
++FI;
continue;
}
const VarDecl *VD = Pair.second.PrivateCopy;
const Expr *Init = VD->getAnyInitializer();
if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
!CGF.isTrivialInitializer(Init)))) {
LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
const VarDecl *OriginalVD = Pair.second.Original;
LValue SharedRefLValue;
QualType Type = PrivateLValue.getType();
const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
if (IsTargetTask && !SharedField) {
assert(isa<ImplicitParamDecl>(OriginalVD) &&
isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
cast<CapturedDecl>(OriginalVD->getDeclContext())
->getNumParams() == 0 &&
isa<TranslationUnitDecl>(
cast<CapturedDecl>(OriginalVD->getDeclContext())
->getDeclContext()) &&
"Expected artificial target data variable.");
SharedRefLValue =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
} else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
SharedRefLValue.getAddress(CGF).withAlignment(
C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
} else if (CGF.LambdaCaptureFields.count(
Pair.second.Original->getCanonicalDecl()) > 0 ||
isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl)) {
SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
} else {
InlinedOpenMPRegionRAII Region(
CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
false, true);
SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
}
if (Type->isArrayType()) {
if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
} else {
CGF.EmitOMPAggregateAssign(
PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
Type,
[&CGF, Elem, Init, &CapturesInfo](Address DestElement,
Address SrcElement) {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
InitScope.addPrivate(Elem, SrcElement);
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
CGF, &CapturesInfo);
CGF.EmitAnyExprToMem(Init, DestElement,
Init->getType().getQualifiers(),
false);
});
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
false);
}
} else {
CGF.EmitExprAsInit(Init, VD, PrivateLValue, false);
}
}
++FI;
}
}
static bool checkInitIsRequired(CodeGenFunction &CGF,
ArrayRef<PrivateDataTy> Privates) {
bool InitRequired = false;
for (const PrivateDataTy &Pair : Privates) {
if (Pair.second.isLocalPrivate())
continue;
const VarDecl *VD = Pair.second.PrivateCopy;
const Expr *Init = VD->getAnyInitializer();
InitRequired = InitRequired || (isa_and_nonnull<CXXConstructExpr>(Init) &&
!CGF.isTrivialInitializer(Init));
if (InitRequired)
break;
}
return InitRequired;
}
static llvm::Value *
emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
const OMPExecutableDirective &D,
QualType KmpTaskTWithPrivatesPtrQTy,
const RecordDecl *KmpTaskTWithPrivatesQTyRD,
const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
QualType SharedsPtrTy, const OMPTaskDataTy &Data,
ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl DstArg(C, nullptr, Loc, nullptr,
KmpTaskTWithPrivatesPtrQTy,
ImplicitParamDecl::Other);
ImplicitParamDecl SrcArg(C, nullptr, Loc, nullptr,
KmpTaskTWithPrivatesPtrQTy,
ImplicitParamDecl::Other);
ImplicitParamDecl LastprivArg(C, nullptr, Loc, nullptr, C.IntTy,
ImplicitParamDecl::Other);
Args.push_back(&DstArg);
Args.push_back(&SrcArg);
Args.push_back(&LastprivArg);
const auto &TaskDupFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
auto *TaskDup = llvm::Function::Create(
TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
TaskDup->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
Loc);
LValue TDBase = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&DstArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
if (WithLastIter) {
auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
LValue Base = CGF.EmitLValueForField(
TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
CGF.GetAddrOfLocalVar(&LastprivArg), false, C.IntTy, Loc);
CGF.EmitStoreOfScalar(Lastpriv, LILVal);
}
assert(!Privates.empty());
Address KmpTaskSharedsPtr = Address::invalid();
if (!Data.FirstprivateVars.empty()) {
LValue TDBase = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&SrcArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
LValue Base = CGF.EmitLValueForField(
TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
KmpTaskSharedsPtr = Address(
CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
Base, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
}
emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
SharedsTy, SharedsPtrTy, Data, Privates, true);
CGF.FinishFunction();
return TaskDup;
}
static bool
checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
ArrayRef<PrivateDataTy> Privates) {
for (const PrivateDataTy &P : Privates) {
if (P.second.isLocalPrivate())
continue;
QualType Ty = P.second.Original->getType().getNonReferenceType();
if (Ty.isDestructedType())
return true;
}
return false;
}
namespace {
class OMPIteratorGeneratorScope final
: public CodeGenFunction::OMPPrivateScope {
CodeGenFunction &CGF;
const OMPIteratorExpr *E = nullptr;
SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
OMPIteratorGeneratorScope() = delete;
OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
public:
OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
: CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
if (!E)
return;
SmallVector<llvm::Value *, 4> Uppers;
for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
addPrivate(VD, CGF.CreateMemTemp(VD->getType(), VD->getName()));
const OMPIteratorHelperData &HelperData = E->getHelper(I);
addPrivate(
HelperData.CounterVD,
CGF.CreateMemTemp(HelperData.CounterVD->getType(), "counter.addr"));
}
Privatize();
for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
const OMPIteratorHelperData &HelperData = E->getHelper(I);
LValue CLVal =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
HelperData.CounterVD->getType());
CGF.EmitStoreOfScalar(
llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
CLVal);
CodeGenFunction::JumpDest &ContDest =
ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
CodeGenFunction::JumpDest &ExitDest =
ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
llvm::Value *N = Uppers[I];
CGF.EmitBlock(ContDest.getBlock());
auto *CVal =
CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
llvm::Value *Cmp =
HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
? CGF.Builder.CreateICmpSLT(CVal, N)
: CGF.Builder.CreateICmpULT(CVal, N);
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
CGF.EmitBlock(BodyBB);
CGF.EmitIgnoredExpr(HelperData.Update);
}
}
~OMPIteratorGeneratorScope() {
if (!E)
return;
for (unsigned I = E->numOfIterators(); I > 0; --I) {
const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
CGF.EmitBlock(ExitDests[I - 1].getBlock(), I == 1);
}
}
};
}
static std::pair<llvm::Value *, llvm::Value *>
getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
llvm::Value *Addr;
if (OASE) {
const Expr *Base = OASE->getBase();
Addr = CGF.EmitScalarExpr(Base);
} else {
Addr = CGF.EmitLValue(E).getPointer(CGF);
}
llvm::Value *SizeVal;
QualType Ty = E->getType();
if (OASE) {
SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
for (const Expr *SE : OASE->getDimensions()) {
llvm::Value *Sz = CGF.EmitScalarExpr(SE);
Sz = CGF.EmitScalarConversion(
Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
}
} else if (const auto *ASE =
dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal =
CGF.EmitOMPArraySectionExpr(ASE, false);
Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), 1);
llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
} else {
SizeVal = CGF.getTypeSize(Ty);
}
return std::make_pair(Addr, SizeVal);
}
static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
QualType FlagsTy = C.getIntTypeForBitwidth(32, false);
if (KmpTaskAffinityInfoTy.isNull()) {
RecordDecl *KmpAffinityInfoRD =
C.buildImplicitRecord("kmp_task_affinity_info_t");
KmpAffinityInfoRD->startDefinition();
addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
KmpAffinityInfoRD->completeDefinition();
KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
}
}
CGOpenMPRuntime::TaskResultTy
CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data) {
ASTContext &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 4> Privates;
const auto *I = Data.PrivateCopies.begin();
for (const Expr *E : Data.PrivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
nullptr));
++I;
}
I = Data.FirstprivateCopies.begin();
const auto *IElemInitRef = Data.FirstprivateInits.begin();
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(
E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
++I;
++IElemInitRef;
}
I = Data.LastprivateCopies.begin();
for (const Expr *E : Data.LastprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
nullptr));
++I;
}
for (const VarDecl *VD : Data.PrivateLocals) {
if (isAllocatableDecl(VD))
Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
else
Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
}
llvm::stable_sort(Privates,
[](const PrivateDataTy &L, const PrivateDataTy &R) {
return L.first > R.first;
});
QualType KmpInt32Ty = C.getIntTypeForBitwidth(32, 1);
emitKmpRoutineEntryT(KmpInt32Ty);
if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
if (SavedKmpTaskloopTQTy.isNull()) {
SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskloopTQTy;
} else {
assert((D.getDirectiveKind() == OMPD_task ||
isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
"Expected taskloop, task or target directive");
if (SavedKmpTaskTQTy.isNull()) {
SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskTQTy;
}
const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
const RecordDecl *KmpTaskTWithPrivatesQTyRD =
createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
QualType KmpTaskTWithPrivatesPtrQTy =
C.getPointerType(KmpTaskTWithPrivatesQTy);
llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
llvm::Type *KmpTaskTWithPrivatesPtrTy =
KmpTaskTWithPrivatesTy->getPointerTo();
llvm::Value *KmpTaskTWithPrivatesTySize =
CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
QualType SharedsPtrTy = C.getPointerType(SharedsTy);
llvm::Value *TaskPrivatesMap = nullptr;
llvm::Type *TaskPrivatesMapTy =
std::next(TaskFunction->arg_begin(), 3)->getType();
if (!Privates.empty()) {
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
TaskPrivatesMap =
emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TaskPrivatesMap, TaskPrivatesMapTy);
} else {
TaskPrivatesMap = llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(TaskPrivatesMapTy));
}
llvm::Function *TaskEntry = emitProxyTaskFunction(
CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
TaskPrivatesMap);
enum {
TiedFlag = 0x1,
FinalFlag = 0x2,
DestructorsFlag = 0x8,
PriorityFlag = 0x20,
DetachableFlag = 0x40,
};
unsigned Flags = Data.Tied ? TiedFlag : 0;
bool NeedsCleanup = false;
if (!Privates.empty()) {
NeedsCleanup =
checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
if (NeedsCleanup)
Flags = Flags | DestructorsFlag;
}
if (Data.Priority.getInt())
Flags = Flags | PriorityFlag;
if (D.hasClausesOfKind<OMPDetachClause>())
Flags = Flags | DetachableFlag;
llvm::Value *TaskFlags =
Data.Final.getPointer()
? CGF.Builder.CreateSelect(Data.Final.getPointer(),
CGF.Builder.getInt32(FinalFlag),
CGF.Builder.getInt32(0))
: CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TaskEntry, KmpRoutineEntryPtrTy)};
llvm::Value *NewTask;
if (D.hasClausesOfKind<OMPNowaitClause>()) {
const Expr *Device = nullptr;
if (auto *C = D.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
llvm::Value *DeviceID;
if (Device)
DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
CGF.Int64Ty, true);
else
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
AllocArgs.push_back(DeviceID);
NewTask = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
AllocArgs);
} else {
NewTask =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
AllocArgs);
}
if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
LValue EvtLVal = CGF.EmitLValue(Evt);
llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, false);
llvm::Value *EvtVal = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
{Loc, Tid, NewTask});
EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
Evt->getExprLoc());
CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
}
if (D.hasClausesOfKind<OMPAffinityClause>()) {
ASTContext &C = CGM.getContext();
Address AffinitiesArray = Address::invalid();
llvm::Value *NumOfElements = nullptr;
unsigned NumAffinities = 0;
for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
if (const Expr *Modifier = C->getModifier()) {
const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, false);
NumOfElements =
NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
}
} else {
NumAffinities += C->varlist_size();
}
}
getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
QualType KmpTaskAffinityInfoArrayTy;
if (NumOfElements) {
NumOfElements = CGF.Builder.CreateNUWAdd(
llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
auto *OVE = new (C) OpaqueValueExpr(
Loc,
C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), 0),
VK_PRValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
RValue::get(NumOfElements));
KmpTaskAffinityInfoArrayTy =
C.getVariableArrayType(KmpTaskAffinityInfoTy, OVE, ArrayType::Normal,
0, SourceRange(Loc, Loc));
auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
ImplicitParamDecl::Other);
CGF.EmitVarDecl(*PD);
AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
false);
} else {
KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
KmpTaskAffinityInfoTy,
llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
ArrayType::Normal, 0);
AffinitiesArray =
CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
false);
}
const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
unsigned Pos = 0;
bool HasIterator = false;
for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
if (C->getModifier()) {
HasIterator = true;
continue;
}
for (const Expr *E : C->varlists()) {
llvm::Value *Addr;
llvm::Value *Size;
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
LValue Base =
CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
KmpTaskAffinityInfoTy);
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
BaseAddrLVal);
LValue LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
CGF.EmitStoreOfScalar(Size, LenLVal);
++Pos;
}
}
LValue PosLVal;
if (HasIterator) {
PosLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
C.getSizeType());
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
}
for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
const Expr *Modifier = C->getModifier();
if (!Modifier)
continue;
OMPIteratorGeneratorScope IteratorScope(
CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
for (const Expr *E : C->varlists()) {
llvm::Value *Addr;
llvm::Value *Size;
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
LValue Base = CGF.MakeAddrLValue(
CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
BaseAddrLVal);
LValue LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
CGF.EmitStoreOfScalar(Size, LenLVal);
Idx = CGF.Builder.CreateNUWAdd(
Idx, llvm::ConstantInt::get(Idx->getType(), 1));
CGF.EmitStoreOfScalar(Idx, PosLVal);
}
}
llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
llvm::Value *GTid = getThreadID(CGF, Loc);
llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
AffinitiesArray.getPointer(), CGM.VoidPtrTy);
(void)CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
{LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
}
llvm::Value *NewTaskNewTaskTTy =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
NewTask, KmpTaskTWithPrivatesPtrTy);
LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
KmpTaskTWithPrivatesQTy);
LValue TDBase =
CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
Address KmpTaskSharedsPtr = Address::invalid();
if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
KmpTaskSharedsPtr = Address(
CGF.EmitLoadOfScalar(
CGF.EmitLValueForField(
TDBase,
*std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
Loc),
CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
}
TaskResultTy Result;
if (!Privates.empty()) {
emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
SharedsTy, SharedsPtrTy, Data, Privates,
false);
if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
(!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
Result.TaskDupFn = emitTaskDupFunction(
CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
!Data.LastprivateVars.empty());
}
}
enum { Priority = 0, Destructors = 1 };
auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
const RecordDecl *KmpCmplrdataUD =
(*FI)->getType()->getAsUnionType()->getDecl();
if (NeedsCleanup) {
llvm::Value *DestructorFn = emitDestructorsFunction(
CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
KmpTaskTWithPrivatesQTy);
LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
LValue DestructorsLV = CGF.EmitLValueForField(
Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
DestructorFn, KmpRoutineEntryPtrTy),
DestructorsLV);
}
if (Data.Priority.getInt()) {
LValue Data2LV = CGF.EmitLValueForField(
TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
LValue PriorityLV = CGF.EmitLValueForField(
Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
}
Result.NewTask = NewTask;
Result.TaskEntry = TaskEntry;
Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
Result.TDBase = TDBase;
Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
return Result;
}
namespace {
enum RTLDependenceKindTy {
DepIn = 0x01,
DepInOut = 0x3,
DepMutexInOutSet = 0x4,
DepInOutSet = 0x8,
DepOmpAllMem = 0x80,
};
enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
}
static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
RTLDependenceKindTy DepKind;
switch (K) {
case OMPC_DEPEND_in:
DepKind = DepIn;
break;
case OMPC_DEPEND_out:
case OMPC_DEPEND_inout:
DepKind = DepInOut;
break;
case OMPC_DEPEND_mutexinoutset:
DepKind = DepMutexInOutSet;
break;
case OMPC_DEPEND_inoutset:
DepKind = DepInOutSet;
break;
case OMPC_DEPEND_outallmemory:
DepKind = DepOmpAllMem;
break;
case OMPC_DEPEND_source:
case OMPC_DEPEND_sink:
case OMPC_DEPEND_depobj:
case OMPC_DEPEND_inoutallmemory:
case OMPC_DEPEND_unknown:
llvm_unreachable("Unknown task dependence type");
}
return DepKind;
}
static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
QualType &FlagsTy) {
FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), false);
if (KmpDependInfoTy.isNull()) {
RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
KmpDependInfoRD->startDefinition();
addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
KmpDependInfoRD->completeDefinition();
KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
}
}
std::pair<llvm::Value *, LValue>
CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc) {
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
RecordDecl *KmpDependInfoRD =
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
CGF.Builder.CreateElementBitCast(
DepobjLVal.getAddress(CGF),
CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
Base.getAddress(CGF),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, true));
LValue NumDepsBase = CGF.MakeAddrLValue(
DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
return std::make_pair(NumDeps, Base);
}
static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
llvm::PointerUnion<unsigned *, LValue *> Pos,
const OMPTaskDataTy::DependData &Data,
Address DependenciesArray) {
CodeGenModule &CGM = CGF.CGM;
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
RecordDecl *KmpDependInfoRD =
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
OMPIteratorGeneratorScope IteratorScope(
CGF, cast_or_null<OMPIteratorExpr>(
Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
: nullptr));
for (const Expr *E : Data.DepExprs) {
llvm::Value *Addr;
llvm::Value *Size;
if (E) {
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
Addr = CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy);
} else {
Addr = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
Size = llvm::ConstantInt::get(CGF.SizeTy, 0);
}
LValue Base;
if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
Base = CGF.MakeAddrLValue(
CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
} else {
assert(E && "Expected a non-null expression");
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
}
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(Addr, BaseAddrLVal);
LValue LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Len));
CGF.EmitStoreOfScalar(Size, LenLVal);
RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
LValue FlagsLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
FlagsLVal);
if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
++(*P);
} else {
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Idx = CGF.Builder.CreateNUWAdd(Idx,
llvm::ConstantInt::get(Idx->getType(), 1));
CGF.EmitStoreOfScalar(Idx, PosLVal);
}
}
}
SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
CodeGenFunction &CGF, QualType &KmpDependInfoTy,
const OMPTaskDataTy::DependData &Data) {
assert(Data.DepKind == OMPC_DEPEND_depobj &&
"Expected depobj dependecy kind.");
SmallVector<llvm::Value *, 4> Sizes;
SmallVector<LValue, 4> SizeLVals;
ASTContext &C = CGF.getContext();
{
OMPIteratorGeneratorScope IteratorScope(
CGF, cast_or_null<OMPIteratorExpr>(
Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
: nullptr));
for (const Expr *E : Data.DepExprs) {
llvm::Value *NumDeps;
LValue Base;
LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
std::tie(NumDeps, Base) =
getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
LValue NumLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
C.getUIntPtrType());
CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
NumLVal.getAddress(CGF));
llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
CGF.EmitStoreOfScalar(Add, NumLVal);
SizeLVals.push_back(NumLVal);
}
}
for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
llvm::Value *Size =
CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
Sizes.push_back(Size);
}
return Sizes;
}
void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
QualType &KmpDependInfoTy,
LValue PosLVal,
const OMPTaskDataTy::DependData &Data,
Address DependenciesArray) {
assert(Data.DepKind == OMPC_DEPEND_depobj &&
"Expected depobj dependecy kind.");
llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
{
OMPIteratorGeneratorScope IteratorScope(
CGF, cast_or_null<OMPIteratorExpr>(
Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
: nullptr));
for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
const Expr *E = Data.DepExprs[I];
llvm::Value *NumDeps;
LValue Base;
LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
std::tie(NumDeps, Base) =
getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
llvm::Value *Size = CGF.Builder.CreateNUWMul(
ElSize,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
CGF.EmitStoreOfScalar(Add, PosLVal);
}
}
}
std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc) {
if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
return D.DepExprs.empty();
}))
return std::make_pair(nullptr, Address::invalid());
ASTContext &C = CGM.getContext();
Address DependenciesArray = Address::invalid();
llvm::Value *NumOfElements = nullptr;
unsigned NumDependencies = std::accumulate(
Dependencies.begin(), Dependencies.end(), 0,
[](unsigned V, const OMPTaskDataTy::DependData &D) {
return D.DepKind == OMPC_DEPEND_depobj
? V
: (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
});
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
bool HasDepobjDeps = false;
bool HasRegularWithIterators = false;
llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
llvm::Value *NumOfRegularWithIterators =
llvm::ConstantInt::get(CGF.IntPtrTy, 0);
for (const OMPTaskDataTy::DependData &D : Dependencies) {
if (D.DepKind == OMPC_DEPEND_depobj) {
SmallVector<llvm::Value *, 4> Sizes =
emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
for (llvm::Value *Size : Sizes) {
NumOfDepobjElements =
CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
}
HasDepobjDeps = true;
continue;
}
if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, false);
llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
Sz, llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
NumOfRegularWithIterators =
CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
}
HasRegularWithIterators = true;
continue;
}
}
QualType KmpDependInfoArrayTy;
if (HasDepobjDeps || HasRegularWithIterators) {
NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
false);
if (HasDepobjDeps) {
NumOfElements =
CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
}
if (HasRegularWithIterators) {
NumOfElements =
CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
}
auto *OVE = new (C) OpaqueValueExpr(
Loc, C.getIntTypeForBitwidth(64, 0),
VK_PRValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
RValue::get(NumOfElements));
KmpDependInfoArrayTy =
C.getVariableArrayType(KmpDependInfoTy, OVE, ArrayType::Normal,
0, SourceRange(Loc, Loc));
auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
ImplicitParamDecl::Other);
CGF.EmitVarDecl(*PD);
DependenciesArray = CGF.GetAddrOfLocalVar(PD);
NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
false);
} else {
KmpDependInfoArrayTy = C.getConstantArrayType(
KmpDependInfoTy, llvm::APInt(64, NumDependencies), nullptr,
ArrayType::Normal, 0);
DependenciesArray =
CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
false);
}
unsigned Pos = 0;
for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
Dependencies[I].IteratorExpr)
continue;
emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
DependenciesArray);
}
LValue PosLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
!Dependencies[I].IteratorExpr)
continue;
emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
DependenciesArray);
}
if (HasDepobjDeps) {
for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
continue;
emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
DependenciesArray);
}
}
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
DependenciesArray, CGF.VoidPtrTy, CGF.Int8Ty);
return std::make_pair(NumOfElements, DependenciesArray);
}
Address CGOpenMPRuntime::emitDepobjDependClause(
CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc) {
if (Dependencies.DepExprs.empty())
return Address::invalid();
ASTContext &C = CGM.getContext();
Address DependenciesArray = Address::invalid();
unsigned NumDependencies = Dependencies.DepExprs.size();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
RecordDecl *KmpDependInfoRD =
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
llvm::Value *Size;
llvm::Value *NumDepsVal;
CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
if (const auto *IE =
cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, false);
NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
}
Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
NumDepsVal);
CharUnits SizeInBytes =
C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
llvm::Value *RecSize = CGM.getSize(SizeInBytes);
Size = CGF.Builder.CreateNUWMul(Size, RecSize);
NumDepsVal =
CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, false);
} else {
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
KmpDependInfoTy, llvm::APInt(64, NumDependencies + 1),
nullptr, ArrayType::Normal, 0);
CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
Size = CGM.getSize(Sz.alignTo(Align));
NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
}
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
llvm::Value *Args[] = {ThreadID, Size, Allocator};
llvm::Value *Addr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc),
Args, ".dep.arr.addr");
llvm::Type *KmpDependInfoLlvmTy = CGF.ConvertTypeForMem(KmpDependInfoTy);
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr, KmpDependInfoLlvmTy->getPointerTo());
DependenciesArray = Address(Addr, KmpDependInfoLlvmTy, Align);
LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
llvm::PointerUnion<unsigned *, LValue *> Pos;
unsigned Idx = 1;
LValue PosLVal;
if (Dependencies.IteratorExpr) {
PosLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
C.getSizeType());
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
true);
Pos = &PosLVal;
} else {
Pos = &Idx;
}
emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy,
CGF.Int8Ty);
return DependenciesArray;
}
void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc) {
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, true));
DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
CGF.VoidPtrTy);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
(void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free),
Args);
}
void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind,
SourceLocation Loc) {
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
RecordDecl *KmpDependInfoRD =
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
llvm::Value *NumDeps;
LValue Base;
std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
Address Begin = Base.getAddress(CGF);
llvm::Value *End = CGF.Builder.CreateGEP(
Begin.getElementType(), Begin.getPointer(), NumDeps);
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(BodyBB);
llvm::PHINode *ElementPHI =
CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
Begin = Begin.withPointer(ElementPHI);
Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
LValue FlagsLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
FlagsLVal);
Address ElementNext =
CGF.Builder.CreateConstGEP(Begin, 1, "omp.elementNext");
ElementPHI->addIncoming(ElementNext.getPointer(),
CGF.Builder.GetInsertBlock());
llvm::Value *IsEmpty =
CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
CGF.EmitBlock(DoneBB, true);
}
void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond,
const OMPTaskDataTy &Data) {
if (!CGF.HaveInsertPoint())
return;
TaskResultTy Result =
emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
llvm::Value *NewTask = Result.NewTask;
llvm::Function *TaskEntry = Result.TaskEntry;
llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
LValue TDBase = Result.TDBase;
const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
Address DependenciesArray = Address::invalid();
llvm::Value *NumOfElements;
std::tie(NumOfElements, DependenciesArray) =
emitDependClause(CGF, Data.Dependences, Loc);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
llvm::Value *DepTaskArgs[7];
if (!Data.Dependences.empty()) {
DepTaskArgs[0] = UpLoc;
DepTaskArgs[1] = ThreadID;
DepTaskArgs[2] = NewTask;
DepTaskArgs[3] = NumOfElements;
DepTaskArgs[4] = DependenciesArray.getPointer();
DepTaskArgs[5] = CGF.Builder.getInt32(0);
DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
&DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
if (!Data.Tied) {
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
}
if (!Data.Dependences.empty()) {
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
DepTaskArgs);
} else {
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_omp_task),
TaskArgs);
}
if (auto *Region =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
};
llvm::Value *DepWaitTaskArgs[6];
if (!Data.Dependences.empty()) {
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
DepWaitTaskArgs[2] = NumOfElements;
DepWaitTaskArgs[3] = DependenciesArray.getPointer();
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
auto &M = CGM.getModule();
auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
TaskEntry, &Data, &DepWaitTaskArgs,
Loc](CodeGenFunction &CGF, PrePostActionTy &) {
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
if (!Data.Dependences.empty())
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
DepWaitTaskArgs);
auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
OutlinedFnArgs);
};
RegionCodeGenTy RCG(CodeGen);
CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_omp_task_begin_if0),
TaskArgs,
OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_omp_task_complete_if0),
TaskArgs);
RCG.setAction(Action);
RCG(CGF);
};
if (IfCond) {
emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
} else {
RegionCodeGenTy ThenRCG(ThenCodeGen);
ThenRCG(CGF);
}
}
void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond,
const OMPTaskDataTy &Data) {
if (!CGF.HaveInsertPoint())
return;
TaskResultTy Result =
emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *IfVal;
if (IfCond) {
IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
true);
} else {
IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, 1);
}
LValue LBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
const auto *LBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
LBLVal.getQuals(),
true);
LValue UBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
const auto *UBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
UBLVal.getQuals(),
true);
LValue StLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
const auto *StVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
StLVal.getQuals(),
true);
LValue RedLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
if (Data.Reductions) {
CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
} else {
CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
CGF.getContext().VoidPtrTy);
}
enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
llvm::Value *TaskArgs[] = {
UpLoc,
ThreadID,
Result.NewTask,
IfVal,
LBLVal.getPointer(CGF),
UBLVal.getPointer(CGF),
CGF.EmitLoadOfScalar(StLVal, Loc),
llvm::ConstantInt::getSigned(
CGF.IntTy, 1), llvm::ConstantInt::getSigned(
CGF.IntTy, Data.Schedule.getPointer()
? Data.Schedule.getInt() ? NumTasks : Grainsize
: NoSchedule),
Data.Schedule.getPointer()
? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
false)
: llvm::ConstantInt::get(CGF.Int64Ty, 0),
Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Result.TaskDupFn, CGF.VoidPtrTy)
: llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_taskloop),
TaskArgs);
}
static void EmitOMPAggregateReduction(
CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
const VarDecl *RHSVar,
const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
const Expr *, const Expr *)> &RedOpGen,
const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
const Expr *UpExpr = nullptr) {
QualType ElementTy;
Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
llvm::Value *RHSBegin = RHSAddr.getPointer();
llvm::Value *LHSBegin = LHSAddr.getPointer();
llvm::Value *LHSEnd =
CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
llvm::Value *IsEmpty =
CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(BodyBB);
CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
RHSElementPHI->addIncoming(RHSBegin, EntryBB);
Address RHSElementCurrent(
RHSElementPHI, RHSAddr.getElementType(),
RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
LHSElementPHI->addIncoming(LHSBegin, EntryBB);
Address LHSElementCurrent(
LHSElementPHI, LHSAddr.getElementType(),
LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
CodeGenFunction::OMPPrivateScope Scope(CGF);
Scope.addPrivate(LHSVar, LHSElementCurrent);
Scope.addPrivate(RHSVar, RHSElementCurrent);
Scope.Privatize();
RedOpGen(CGF, XExpr, EExpr, UpExpr);
Scope.ForceCleanup();
llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
LHSAddr.getElementType(), LHSElementPHI, 1,
"omp.arraycpy.dest.element");
llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
RHSAddr.getElementType(), RHSElementPHI, 1,
"omp.arraycpy.src.element");
llvm::Value *Done =
CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
CGF.EmitBlock(DoneBB, true);
}
static void emitReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp) {
if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
if (const auto *DRE =
dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
if (const auto *DRD =
dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
std::pair<llvm::Function *, llvm::Function *> Reduction =
CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
RValue Func = RValue::get(Reduction.first);
CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
CGF.EmitIgnoredExpr(ReductionOp);
return;
}
CGF.EmitIgnoredExpr(ReductionOp);
}
llvm::Function *CGOpenMPRuntime::emitReductionFunction(
SourceLocation Loc, llvm::Type *ArgsElemType,
ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl LHSArg(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
ImplicitParamDecl RHSArg(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
const auto &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
std::string Name = getName({"omp", "reduction", "reduction_func"});
auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
llvm::GlobalValue::InternalLinkage, Name,
&CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
ArgsElemType->getPointerTo()),
ArgsElemType, CGF.getPointerAlign());
Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
ArgsElemType->getPointerTo()),
ArgsElemType, CGF.getPointerAlign());
CodeGenFunction::OMPPrivateScope Scope(CGF);
const auto *IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
const auto *RHSVar =
cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
Scope.addPrivate(RHSVar, emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar));
const auto *LHSVar =
cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
Scope.addPrivate(LHSVar, emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar));
QualType PrivTy = (*IPriv)->getType();
if (PrivTy->isVariablyModifiedType()) {
++Idx;
Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
const VariableArrayType *VLA =
CGF.getContext().getAsVariableArrayType(PrivTy);
const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
CodeGenFunction::OpaqueValueMapping OpaqueMap(
CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
CGF.EmitVariablyModifiedType(PrivTy);
}
}
Scope.Privatize();
IPriv = Privates.begin();
const auto *ILHS = LHSExprs.begin();
const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
if ((*IPriv)->getType()->isArrayType()) {
const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
EmitOMPAggregateReduction(
CGF, (*IPriv)->getType(), LHSVar, RHSVar,
[=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
emitReductionCombiner(CGF, E);
});
} else {
emitReductionCombiner(CGF, E);
}
++IPriv;
++ILHS;
++IRHS;
}
Scope.ForceCleanup();
CGF.FinishFunction();
return Fn;
}
void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS) {
if (PrivateRef->getType()->isArrayType()) {
const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
EmitOMPAggregateReduction(
CGF, PrivateRef->getType(), LHSVar, RHSVar,
[=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
emitReductionCombiner(CGF, ReductionOp);
});
} else {
emitReductionCombiner(CGF, ReductionOp);
}
}
void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) {
if (!CGF.HaveInsertPoint())
return;
bool WithNowait = Options.WithNowait;
bool SimpleReduction = Options.SimpleReduction;
ASTContext &C = CGM.getContext();
if (SimpleReduction) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
const auto *IPriv = Privates.begin();
const auto *ILHS = LHSExprs.begin();
const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
++IPriv;
++ILHS;
++IRHS;
}
return;
}
auto Size = RHSExprs.size();
for (const Expr *E : Privates) {
if (E->getType()->isVariablyModifiedType())
++Size;
}
llvm::APInt ArraySize(32, Size);
QualType ReductionArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
0);
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
const auto *IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
Elem);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
++Idx;
Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
llvm::Value *Size = CGF.Builder.CreateIntCast(
CGF.getVLASize(
CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
.NumElts,
CGF.SizeTy, false);
CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
Elem);
}
}
llvm::Function *ReductionFn =
emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
Privates, LHSExprs, RHSExprs, ReductionOps);
std::string Name = getName({"reduction"});
llvm::Value *Lock = getCriticalRegionLock(Name);
llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, ThreadId, CGF.Builder.getInt32(RHSExprs.size()), ReductionArrayTySize, RL, ReductionFn, Lock };
llvm::Value *Res = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(),
WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
Args);
llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
llvm::SwitchInst *SwInst =
CGF.Builder.CreateSwitch(Res, DefaultBB, 2);
llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
CGF.EmitBlock(Case1BB);
llvm::Value *EndArgs[] = {
IdentTLoc, ThreadId, Lock };
auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
CodeGenFunction &CGF, PrePostActionTy &Action) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
const auto *IPriv = Privates.begin();
const auto *ILHS = LHSExprs.begin();
const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
++IPriv;
++ILHS;
++IRHS;
}
};
RegionCodeGenTy RCG(CodeGen);
CommonActionTy Action(
nullptr, llvm::None,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
: OMPRTL___kmpc_end_reduce),
EndArgs);
RCG.setAction(Action);
RCG(CGF);
CGF.EmitBranch(DefaultBB);
llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
CGF.EmitBlock(Case2BB);
auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
CodeGenFunction &CGF, PrePostActionTy &Action) {
const auto *ILHS = LHSExprs.begin();
const auto *IRHS = RHSExprs.begin();
const auto *IPriv = Privates.begin();
for (const Expr *E : ReductionOps) {
const Expr *XExpr = nullptr;
const Expr *EExpr = nullptr;
const Expr *UpExpr = nullptr;
BinaryOperatorKind BO = BO_Comma;
if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Assign) {
XExpr = BO->getLHS();
UpExpr = BO->getRHS();
}
}
const Expr *RHSExpr = UpExpr;
if (RHSExpr) {
if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
RHSExpr->IgnoreParenImpCasts())) {
RHSExpr = ACO->getCond();
}
if (const auto *BORHS =
dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
EExpr = BORHS->getRHS();
BO = BORHS->getOpcode();
}
}
if (XExpr) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
auto &&AtomicRedGen = [BO, VD,
Loc](CodeGenFunction &CGF, const Expr *XExpr,
const Expr *EExpr, const Expr *UpExpr) {
LValue X = CGF.EmitLValue(XExpr);
RValue E;
if (EExpr)
E = CGF.EmitAnyExpr(EExpr);
CGF.EmitOMPAtomicSimpleUpdateExpr(
X, E, BO, true,
llvm::AtomicOrdering::Monotonic, Loc,
[&CGF, UpExpr, VD, Loc](RValue XRValue) {
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
Address LHSTemp = CGF.CreateMemTemp(VD->getType());
CGF.emitOMPSimpleStore(
CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
VD->getType().getNonReferenceType(), Loc);
PrivateScope.addPrivate(VD, LHSTemp);
(void)PrivateScope.Privatize();
return CGF.EmitAnyExpr(UpExpr);
});
};
if ((*IPriv)->getType()->isArrayType()) {
const auto *RHSVar =
cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
AtomicRedGen, XExpr, EExpr, UpExpr);
} else {
AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
}
} else {
auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
const Expr *, const Expr *) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
std::string Name = RT.getName({"atomic_reduction"});
RT.emitCriticalRegion(
CGF, Name,
[=](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
emitReductionCombiner(CGF, E);
},
Loc);
};
if ((*IPriv)->getType()->isArrayType()) {
const auto *LHSVar =
cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
const auto *RHSVar =
cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
CritRedGen);
} else {
CritRedGen(CGF, nullptr, nullptr, nullptr);
}
}
++ILHS;
++IRHS;
++IPriv;
}
};
RegionCodeGenTy AtomicRCG(AtomicCodeGen);
if (!WithNowait) {
llvm::Value *EndArgs[] = {
IdentTLoc, ThreadId, Lock };
CommonActionTy Action(nullptr, llvm::None,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_reduce),
EndArgs);
AtomicRCG.setAction(Action);
AtomicRCG(CGF);
} else {
AtomicRCG(CGF);
}
CGF.EmitBranch(DefaultBB);
CGF.EmitBlock(DefaultBB, true);
}
static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
const Expr *Ref) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
const clang::DeclRefExpr *DE;
const VarDecl *D = ::getBaseDecl(Ref, DE);
if (!D)
D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
D = D->getCanonicalDecl();
std::string Name = CGM.getOpenMPRuntime().getName(
{D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
Out << Prefix << Name << "_"
<< D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
return std::string(Out.str());
}
static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) {
ASTContext &C = CGM.getContext();
QualType VoidPtrTy = C.VoidPtrTy;
VoidPtrTy.addRestrict();
FunctionArgList Args;
ImplicitParamDecl Param(C, nullptr, Loc, nullptr, VoidPtrTy,
ImplicitParamDecl::Other);
ImplicitParamDecl ParamOrig(C, nullptr, Loc, nullptr, VoidPtrTy,
ImplicitParamDecl::Other);
Args.emplace_back(&Param);
Args.emplace_back(&ParamOrig);
const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
QualType PrivateType = RCG.getPrivateType(N);
Address PrivateAddr = CGF.EmitLoadOfPointer(
CGF.Builder.CreateElementBitCast(
CGF.GetAddrOfLocalVar(&Param),
CGF.ConvertTypeForMem(PrivateType)->getPointerTo()),
C.getPointerType(PrivateType)->castAs<PointerType>());
llvm::Value *Size = nullptr;
if (RCG.getSizes(N).second) {
Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
Size = CGF.EmitLoadOfScalar(SizeAddr, false,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
Address OrigAddr = Address::invalid();
if (RCG.usesReductionInitializer(N)) {
Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
OrigAddr = CGF.EmitLoadOfPointer(
SharedAddr,
CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
}
RCG.emitInitialization(CGF, N, PrivateAddr, OrigAddr,
[](CodeGenFunction &) { return false; });
CGF.FinishFunction();
return Fn;
}
static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N,
const Expr *ReductionOp,
const Expr *LHS, const Expr *RHS,
const Expr *PrivateRef) {
ASTContext &C = CGM.getContext();
const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
FunctionArgList Args;
ImplicitParamDecl ParamInOut(C, nullptr, Loc, nullptr,
C.VoidPtrTy, ImplicitParamDecl::Other);
ImplicitParamDecl ParamIn(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
Args.emplace_back(&ParamInOut);
Args.emplace_back(&ParamIn);
const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
llvm::Value *Size = nullptr;
if (RCG.getSizes(N).second) {
Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
Size = CGF.EmitLoadOfScalar(SizeAddr, false,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
PrivateScope.addPrivate(
LHSVD,
CGF.EmitLoadOfPointer(
CGF.Builder.CreateElementBitCast(
CGF.GetAddrOfLocalVar(&ParamInOut),
CGF.ConvertTypeForMem(LHSVD->getType())->getPointerTo()),
C.getPointerType(LHSVD->getType())->castAs<PointerType>()));
PrivateScope.addPrivate(
RHSVD,
CGF.EmitLoadOfPointer(
CGF.Builder.CreateElementBitCast(
CGF.GetAddrOfLocalVar(&ParamIn),
CGF.ConvertTypeForMem(RHSVD->getType())->getPointerTo()),
C.getPointerType(RHSVD->getType())->castAs<PointerType>()));
PrivateScope.Privatize();
CGM.getOpenMPRuntime().emitSingleReductionCombiner(
CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
cast<DeclRefExpr>(RHS));
CGF.FinishFunction();
return Fn;
}
static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) {
if (!RCG.needCleanups(N))
return nullptr;
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl Param(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
Args.emplace_back(&Param);
const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
Address PrivateAddr = CGF.EmitLoadOfPointer(
CGF.GetAddrOfLocalVar(&Param), C.VoidPtrTy.castAs<PointerType>());
llvm::Value *Size = nullptr;
if (RCG.getSizes(N).second) {
Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
Size = CGF.EmitLoadOfScalar(SizeAddr, false,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
RCG.emitCleanups(CGF, N, PrivateAddr);
CGF.FinishFunction(Loc);
return Fn;
}
llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
return nullptr;
ASTContext &C = CGM.getContext();
RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
RD->startDefinition();
const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *FlagsFD = addFieldToRecordDecl(
C, RD, C.getIntTypeForBitwidth(32, false));
RD->completeDefinition();
QualType RDType = C.getRecordType(RD);
unsigned Size = Data.ReductionVars.size();
llvm::APInt ArraySize(64, Size);
QualType ArrayRDType = C.getConstantArrayType(
RDType, ArraySize, nullptr, ArrayType::Normal, 0);
Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
Data.ReductionCopies, Data.ReductionOps);
for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, 0),
llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
false, false, Loc,
".rd_input.gep.");
LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
RCG.emitSharedOrigLValue(CGF, Cnt);
llvm::Value *CastedShared =
CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
llvm::Value *CastedOrig =
CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
RCG.emitAggregateType(CGF, Cnt);
llvm::Value *SizeValInChars;
llvm::Value *SizeVal;
std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
bool DelayedCreation = !!SizeVal;
SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
false);
LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
llvm::Value *InitAddr =
CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
CGF.EmitStoreOfScalar(InitAddr, InitLVal);
LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
llvm::Value *FiniAddr = Fini
? CGF.EmitCastToVoidPtr(Fini)
: llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
CGF.EmitStoreOfScalar(CombAddr, CombLVal);
LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
if (DelayedCreation) {
CGF.EmitStoreOfScalar(
llvm::ConstantInt::get(CGM.Int32Ty, 1, true),
FlagsLVal);
} else
CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
FlagsLVal.getType());
}
if (Data.IsReductionWithTaskMod) {
llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
CGM.IntTy, true);
llvm::Value *Args[] = {
IdentTLoc, GTid,
llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
true),
llvm::ConstantInt::get(CGM.IntTy, Size, true),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TaskRedInput.getPointer(), CGM.VoidPtrTy)};
return CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
Args);
}
llvm::Value *Args[] = {
CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
true),
llvm::ConstantInt::get(CGM.IntTy, Size, true),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
CGM.VoidPtrTy)};
return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_taskred_init),
Args);
}
void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
SourceLocation Loc,
bool IsWorksharingReduction) {
llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
CGM.IntTy, true);
llvm::Value *Args[] = {IdentTLoc, GTid,
llvm::ConstantInt::get(CGM.IntTy,
IsWorksharingReduction ? 1 : 0,
true)};
(void)CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
Args);
}
void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
SourceLocation Loc,
ReductionCodeGen &RCG,
unsigned N) {
auto Sizes = RCG.getSizes(N);
if (Sizes.second) {
llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
false);
Address SizeAddr = getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
CGF.Builder.CreateStore(SizeVal, SizeAddr, false);
}
}
Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) {
llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
CGM.IntTy,
true),
ReductionsPtr,
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
return Address(
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
Args),
CGF.Int8Ty, SharedLVal.getAlignment());
}
void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPTaskDataTy &Data) {
if (!CGF.HaveInsertPoint())
return;
if (CGF.CGM.getLangOpts().OpenMPIRBuilder && Data.Dependences.empty()) {
OMPBuilder.createTaskwait(CGF.Builder);
} else {
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
auto &M = CGM.getModule();
Address DependenciesArray = Address::invalid();
llvm::Value *NumOfElements;
std::tie(NumOfElements, DependenciesArray) =
emitDependClause(CGF, Data.Dependences, Loc);
llvm::Value *DepWaitTaskArgs[6];
if (!Data.Dependences.empty()) {
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
DepWaitTaskArgs[2] = NumOfElements;
DepWaitTaskArgs[3] = DependenciesArray.getPointer();
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
DepWaitTaskArgs);
} else {
llvm::Value *Args[] = {UpLoc, ThreadID};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_taskwait),
Args);
}
}
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
}
void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnerKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel) {
if (!CGF.HaveInsertPoint())
return;
InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel,
InnerKind != OMPD_critical &&
InnerKind != OMPD_master &&
InnerKind != OMPD_masked);
CGF.CapturedStmtInfo->EmitBody(CGF, nullptr);
}
namespace {
enum RTCancelKind {
CancelNoreq = 0,
CancelParallel = 1,
CancelLoop = 2,
CancelSections = 3,
CancelTaskgroup = 4
};
}
static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
RTCancelKind CancelKind = CancelNoreq;
if (CancelRegion == OMPD_parallel)
CancelKind = CancelParallel;
else if (CancelRegion == OMPD_for)
CancelKind = CancelLoop;
else if (CancelRegion == OMPD_sections)
CancelKind = CancelSections;
else {
assert(CancelRegion == OMPD_taskgroup);
CancelKind = CancelTaskgroup;
}
return CancelKind;
}
void CGOpenMPRuntime::emitCancellationPointCall(
CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) {
if (!CGF.HaveInsertPoint())
return;
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
llvm::Value *Result = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
Args);
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
if (CancelRegion == OMPD_parallel)
emitBarrierCall(CGF, Loc, OMPD_unknown, false);
CodeGenFunction::JumpDest CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, true);
}
}
}
void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) {
if (!CGF.HaveInsertPoint())
return;
auto &M = CGM.getModule();
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
auto &&ThenGen = [this, &M, Loc, CancelRegion,
OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
llvm::Value *Result = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
if (CancelRegion == OMPD_parallel)
RT.emitBarrierCall(CGF, Loc, OMPD_unknown, false);
CodeGenFunction::JumpDest CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, true);
};
if (IfCond) {
emitIfClause(CGF, IfCond, ThenGen,
[](CodeGenFunction &, PrePostActionTy &) {});
} else {
RegionCodeGenTy ThenRCG(ThenGen);
ThenRCG(CGF);
}
}
}
namespace {
class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
public:
OMPUsesAllocatorsActionTy(
ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
: Allocators(Allocators) {}
void Enter(CodeGenFunction &CGF) override {
if (!CGF.HaveInsertPoint())
return;
for (const auto &AllocatorData : Allocators) {
CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
CGF, AllocatorData.first, AllocatorData.second);
}
}
void Exit(CodeGenFunction &CGF) override {
if (!CGF.HaveInsertPoint())
return;
for (const auto &AllocatorData : Allocators) {
CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
AllocatorData.first);
}
}
};
}
void CGOpenMPRuntime::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
assert(!ParentName.empty() && "Invalid target region parent name!");
HasEmittedTargetRegion = true;
SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
if (!D.AllocatorTraits)
continue;
Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
}
}
OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
CodeGen.setAction(UsesAllocatorAction);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
}
void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
const Expr *Allocator,
const Expr *AllocatorTraits) {
llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, true);
llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
llvm::Value *NumTraits = llvm::ConstantInt::get(
CGF.IntTy, cast<ConstantArrayType>(
AllocatorTraits->getType()->getAsArrayTypeUnsafe())
->getSize()
.getLimitedValue());
LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
llvm::Value *Traits =
CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
llvm::Value *AllocatorVal =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_init_allocator),
{ThreadId, MemSpaceHandle, NumTraits, Traits});
CGF.EmitVarDecl(*cast<VarDecl>(
cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
AllocatorVal =
CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
Allocator->getType(), Allocator->getExprLoc());
CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
}
void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
const Expr *Allocator) {
llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, true);
LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
llvm::Value *AllocatorVal =
CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
CGF.getContext().VoidPtrTy,
Allocator->getExprLoc());
(void)CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_destroy_allocator),
{ThreadId, AllocatorVal});
}
void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
const bool BuildOutlinedFn = CGM.getLangOpts().OpenMPIsDevice ||
!CGM.getLangOpts().OpenMPOffloadMandatory;
unsigned DeviceID;
unsigned FileID;
unsigned Line;
getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
Line);
SmallString<64> EntryFnName;
{
llvm::raw_svector_ostream OS(EntryFnName);
OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
<< llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
}
const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
CodeGenFunction CGF(CGM, true);
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
if (BuildOutlinedFn)
OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
if (!IsOffloadEntry)
return;
if (CGM.getLangOpts().OpenMPIsDevice) {
OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
OutlinedFn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
OutlinedFn->setDSOLocal(false);
if (CGM.getTriple().isAMDGCN())
OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
} else {
std::string Name = getName({EntryFnName, "region_id"});
OutlinedFnID = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, true,
llvm::GlobalValue::WeakAnyLinkage,
llvm::Constant::getNullValue(CGM.Int8Ty), Name);
}
llvm::Constant *TargetRegionEntryAddr = OutlinedFn;
if (!BuildOutlinedFn) {
assert(!CGM.getModule().getGlobalVariable(EntryFnName, true) &&
"Named kernel already exists?");
TargetRegionEntryAddr = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, true,
llvm::GlobalValue::InternalLinkage,
llvm::Constant::getNullValue(CGM.Int8Ty), EntryFnName);
}
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
DeviceID, FileID, ParentName, Line, TargetRegionEntryAddr, OutlinedFnID,
OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
int32_t DefaultValTeams = -1;
getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
if (DefaultValTeams > 0 && OutlinedFn) {
OutlinedFn->addFnAttr("omp_target_num_teams",
std::to_string(DefaultValTeams));
}
int32_t DefaultValThreads = -1;
getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
if (DefaultValThreads > 0 && OutlinedFn) {
OutlinedFn->addFnAttr("omp_target_thread_limit",
std::to_string(DefaultValThreads));
}
if (BuildOutlinedFn)
CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
}
static bool isTrivial(ASTContext &Ctx, const Expr * E) {
return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
!E->hasNonTrivialCall(Ctx)) &&
!E->HasSideEffects(Ctx, true);
}
const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
const Stmt *Body) {
const Stmt *Child = Body->IgnoreContainers();
while (const auto *C = dyn_cast_or_null<CompoundStmt>(Child)) {
Child = nullptr;
for (const Stmt *S : C->body()) {
if (const auto *E = dyn_cast<Expr>(S)) {
if (isTrivial(Ctx, E))
continue;
}
if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
continue;
if (const auto *DS = dyn_cast<DeclStmt>(S)) {
if (llvm::all_of(DS->decls(), [](const Decl *D) {
if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
isa<UsingDirectiveDecl>(D) ||
isa<OMPDeclareReductionDecl>(D) ||
isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
return true;
const auto *VD = dyn_cast<VarDecl>(D);
if (!VD)
return false;
return VD->hasGlobalStorage() || !VD->isUsed();
}))
continue;
}
if (Child)
return nullptr;
Child = S;
}
if (Child)
Child = Child->IgnoreContainers();
}
return Child;
}
const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
int32_t &DefaultVal) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
"Expected target-based executable directive.");
switch (DirectiveKind) {
case OMPD_target: {
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(true);
const Stmt *ChildStmt =
CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body);
if (const auto *NestedDir =
dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
const Expr *NumTeams =
NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant =
NumTeams->getIntegerConstantExpr(CGF.getContext()))
DefaultVal = Constant->getExtValue();
return NumTeams;
}
DefaultVal = 0;
return nullptr;
}
if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
isOpenMPSimdDirective(NestedDir->getDirectiveKind())) {
DefaultVal = 1;
return nullptr;
}
DefaultVal = 1;
return nullptr;
}
DefaultVal = -1;
return nullptr;
}
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
const Expr *NumTeams =
D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
DefaultVal = Constant->getExtValue();
return NumTeams;
}
DefaultVal = 0;
return nullptr;
}
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_simd:
DefaultVal = 1;
return nullptr;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_depobj:
case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_metadirective:
case OMPD_unknown:
break;
default:
break;
}
llvm_unreachable("Unexpected directive kind.");
}
llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D) {
assert(!CGF.getLangOpts().OpenMPIsDevice &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
CGBuilderTy &Bld = CGF.Builder;
int32_t DefaultNT = -1;
const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT);
if (NumTeams != nullptr) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
switch (DirectiveKind) {
case OMPD_target: {
const auto *CS = D.getInnermostCapturedStmt();
CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
true);
return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
true);
}
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
true);
return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
true);
}
default:
break;
}
}
return llvm::ConstantInt::get(CGF.Int32Ty, DefaultNT);
}
static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
llvm::Value *DefaultThreadLimitVal) {
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
llvm::Value *NumThreads = nullptr;
llvm::Value *CondVal = nullptr;
if (Dir->hasClausesOfKind<OMPIfClause>()) {
CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
const OMPIfClause *IfClause = nullptr;
for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
C->getNameModifier() == OMPD_parallel) {
IfClause = C;
break;
}
}
if (IfClause) {
const Expr *Cond = IfClause->getCondition();
bool Result;
if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
if (!Result)
return CGF.Builder.getInt32(1);
} else {
CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
if (const auto *PreInit =
cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
for (const auto *I : PreInit->decls()) {
if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
CGF.EmitVarDecl(cast<VarDecl>(*I));
} else {
CodeGenFunction::AutoVarEmission Emission =
CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
CGF.EmitAutoVarCleanups(Emission);
}
}
}
CondVal = CGF.EvaluateExprAsBool(Cond);
}
}
}
if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
const auto *NumThreadsClause =
Dir->getSingleClause<OMPNumThreadsClause>();
CodeGenFunction::LexicalScope Scope(
CGF, NumThreadsClause->getNumThreads()->getSourceRange());
if (const auto *PreInit =
cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
for (const auto *I : PreInit->decls()) {
if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
CGF.EmitVarDecl(cast<VarDecl>(*I));
} else {
CodeGenFunction::AutoVarEmission Emission =
CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
CGF.EmitAutoVarCleanups(Emission);
}
}
}
NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
false);
if (DefaultThreadLimitVal)
NumThreads = CGF.Builder.CreateSelect(
CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
DefaultThreadLimitVal, NumThreads);
} else {
NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
: CGF.Builder.getInt32(0);
}
if (CondVal) {
NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
CGF.Builder.getInt32(1));
}
return NumThreads;
}
if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
return CGF.Builder.getInt32(1);
return DefaultThreadLimitVal;
}
return DefaultThreadLimitVal ? DefaultThreadLimitVal
: CGF.Builder.getInt32(0);
}
const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
int32_t &DefaultVal) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
"Expected target-based executable directive.");
switch (DirectiveKind) {
case OMPD_target:
return nullptr;
case OMPD_target_teams:
case OMPD_target_teams_distribute:
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit();
if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant =
ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
DefaultVal = Constant->getExtValue();
return ThreadLimit;
}
return nullptr;
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
Expr *ThreadLimit = nullptr;
Expr *NumThreads = nullptr;
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
ThreadLimit = ThreadLimitClause->getThreadLimit();
if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant =
ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
DefaultVal = Constant->getExtValue();
}
if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
NumThreads = NumThreadsClause->getNumThreads();
if (NumThreads->isIntegerConstantExpr(CGF.getContext())) {
if (auto Constant =
NumThreads->getIntegerConstantExpr(CGF.getContext())) {
if (Constant->getExtValue() < DefaultVal) {
DefaultVal = Constant->getExtValue();
ThreadLimit = NumThreads;
}
}
}
}
return ThreadLimit;
}
case OMPD_target_teams_distribute_simd:
case OMPD_target_simd:
DefaultVal = 1;
return nullptr;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_depobj:
case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
break;
default:
break;
}
llvm_unreachable("Unsupported directive kind.");
}
llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D) {
assert(!CGF.getLangOpts().OpenMPIsDevice &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
"Expected target-based executable directive.");
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *ThreadLimitVal = nullptr;
llvm::Value *NumThreadsVal = nullptr;
switch (DirectiveKind) {
case OMPD_target: {
const CapturedStmt *CS = D.getInnermostCapturedStmt();
if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
return NumThreads;
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
if (Dir->hasClausesOfKind<OMPThreadLimitClause>()) {
CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
const auto *ThreadLimitClause =
Dir->getSingleClause<OMPThreadLimitClause>();
CodeGenFunction::LexicalScope Scope(
CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
if (const auto *PreInit =
cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
for (const auto *I : PreInit->decls()) {
if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
CGF.EmitVarDecl(cast<VarDecl>(*I));
} else {
CodeGenFunction::AutoVarEmission Emission =
CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
CGF.EmitAutoVarCleanups(Emission);
}
}
}
llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
ThreadLimitClause->getThreadLimit(), true);
ThreadLimitVal =
Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, false);
}
if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
!isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
CS = Dir->getInnermostCapturedStmt();
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
}
if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
!isOpenMPSimdDirective(Dir->getDirectiveKind())) {
CS = Dir->getInnermostCapturedStmt();
if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
return NumThreads;
}
if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
return Bld.getInt32(1);
}
return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
}
case OMPD_target_teams: {
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
ThreadLimitClause->getThreadLimit(), true);
ThreadLimitVal =
Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, false);
}
const CapturedStmt *CS = D.getInnermostCapturedStmt();
if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
return NumThreads;
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
if (Dir->getDirectiveKind() == OMPD_distribute) {
CS = Dir->getInnermostCapturedStmt();
if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
return NumThreads;
}
}
return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
}
case OMPD_target_teams_distribute:
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
ThreadLimitClause->getThreadLimit(), true);
ThreadLimitVal =
Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, false);
}
return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal);
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
llvm::Value *CondVal = nullptr;
if (D.hasClausesOfKind<OMPIfClause>()) {
const OMPIfClause *IfClause = nullptr;
for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
C->getNameModifier() == OMPD_parallel) {
IfClause = C;
break;
}
}
if (IfClause) {
const Expr *Cond = IfClause->getCondition();
bool Result;
if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
if (!Result)
return Bld.getInt32(1);
} else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
CondVal = CGF.EvaluateExprAsBool(Cond);
}
}
}
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
ThreadLimitClause->getThreadLimit(), true);
ThreadLimitVal =
Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, false);
}
if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
llvm::Value *NumThreads = CGF.EmitScalarExpr(
NumThreadsClause->getNumThreads(), true);
NumThreadsVal =
Bld.CreateIntCast(NumThreads, CGF.Int32Ty, false);
ThreadLimitVal = ThreadLimitVal
? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
ThreadLimitVal),
NumThreadsVal, ThreadLimitVal)
: NumThreadsVal;
}
if (!ThreadLimitVal)
ThreadLimitVal = Bld.getInt32(0);
if (CondVal)
return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
return ThreadLimitVal;
}
case OMPD_target_teams_distribute_simd:
case OMPD_target_simd:
return Bld.getInt32(1);
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_depobj:
case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_metadirective:
case OMPD_unknown:
break;
default:
break;
}
llvm_unreachable("Unsupported directive kind.");
}
namespace {
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
class MappableExprsHandler {
public:
enum OpenMPOffloadMappingFlags : uint64_t {
OMP_MAP_NONE = 0x0,
OMP_MAP_TO = 0x01,
OMP_MAP_FROM = 0x02,
OMP_MAP_ALWAYS = 0x04,
OMP_MAP_DELETE = 0x08,
OMP_MAP_PTR_AND_OBJ = 0x10,
OMP_MAP_TARGET_PARAM = 0x20,
OMP_MAP_RETURN_PARAM = 0x40,
OMP_MAP_PRIVATE = 0x80,
OMP_MAP_LITERAL = 0x100,
OMP_MAP_IMPLICIT = 0x200,
OMP_MAP_CLOSE = 0x400,
OMP_MAP_PRESENT = 0x1000,
OMP_MAP_OMPX_HOLD = 0x2000,
OMP_MAP_NON_CONTIG = 0x100000000000,
OMP_MAP_MEMBER_OF = 0xffff000000000000,
LLVM_MARK_AS_BITMASK_ENUM( OMP_MAP_MEMBER_OF),
};
static unsigned getFlagMemberOffset() {
unsigned Offset = 0;
for (uint64_t Remain = OMP_MAP_MEMBER_OF; !(Remain & 1);
Remain = Remain >> 1)
Offset++;
return Offset;
}
class MappingExprInfo {
const ValueDecl *MapDecl = nullptr;
const Expr *MapExpr = nullptr;
public:
MappingExprInfo(const ValueDecl *MapDecl, const Expr *MapExpr = nullptr)
: MapDecl(MapDecl), MapExpr(MapExpr) {}
const ValueDecl *getMapDecl() const { return MapDecl; }
const Expr *getMapExpr() const { return MapExpr; }
};
class BasePointerInfo {
llvm::Value *Ptr = nullptr;
const ValueDecl *DevPtrDecl = nullptr;
public:
BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
: Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
llvm::Value *operator*() const { return Ptr; }
const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
};
using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
using MapDimArrayTy = SmallVector<uint64_t, 4>;
using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
struct MapCombinedInfoTy {
struct StructNonContiguousInfo {
bool IsNonContiguous = false;
MapDimArrayTy Dims;
MapNonContiguousArrayTy Offsets;
MapNonContiguousArrayTy Counts;
MapNonContiguousArrayTy Strides;
};
MapExprsArrayTy Exprs;
MapBaseValuesArrayTy BasePointers;
MapValuesArrayTy Pointers;
MapValuesArrayTy Sizes;
MapFlagsArrayTy Types;
MapMappersArrayTy Mappers;
StructNonContiguousInfo NonContigInfo;
void append(MapCombinedInfoTy &CurInfo) {
Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
BasePointers.append(CurInfo.BasePointers.begin(),
CurInfo.BasePointers.end());
Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
CurInfo.NonContigInfo.Dims.end());
NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
CurInfo.NonContigInfo.Offsets.end());
NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
CurInfo.NonContigInfo.Counts.end());
NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
CurInfo.NonContigInfo.Strides.end());
}
};
struct StructRangeInfoTy {
MapCombinedInfoTy PreliminaryMapData;
std::pair<unsigned , Address > LowestElem = {
0, Address::invalid()};
std::pair<unsigned , Address > HighestElem = {
0, Address::invalid()};
Address Base = Address::invalid();
Address LB = Address::invalid();
bool IsArraySection = false;
bool HasCompleteRecord = false;
};
private:
struct MapInfo {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
bool ReturnDevicePointer = false;
bool IsImplicit = false;
const ValueDecl *Mapper = nullptr;
const Expr *VarRef = nullptr;
bool ForDeviceAddr = false;
MapInfo() = default;
MapInfo(
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPMapClauseKind MapType,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
bool ReturnDevicePointer, bool IsImplicit,
const ValueDecl *Mapper = nullptr, const Expr *VarRef = nullptr,
bool ForDeviceAddr = false)
: Components(Components), MapType(MapType), MapModifiers(MapModifiers),
MotionModifiers(MotionModifiers),
ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {}
};
struct DeferredDevicePtrEntryTy {
const Expr *IE = nullptr;
const ValueDecl *VD = nullptr;
bool ForDeviceAddr = false;
DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
bool ForDeviceAddr)
: IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
};
llvm::PointerUnion<const OMPExecutableDirective *,
const OMPDeclareMapperDecl *>
CurDir;
CodeGenFunction &CGF;
llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, bool> FirstPrivateDecls;
llvm::DenseMap<
const ValueDecl *,
SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
DevPointersMap;
llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
llvm::Value *getExprTypeSize(const Expr *E) const {
QualType ExprTy = E->getType().getCanonicalType();
if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
llvm::Value *Size =
CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
for (const Expr *SE : OAE->getDimensions()) {
llvm::Value *Sz = CGF.EmitScalarExpr(SE);
Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
CGF.getContext().getSizeType(),
SE->getExprLoc());
Size = CGF.Builder.CreateNUWMul(Size, Sz);
}
return Size;
}
if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
ExprTy = RefTy->getPointeeType().getCanonicalType();
if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
OAE->getBase()->IgnoreParenImpCasts())
.getCanonicalType();
if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
!OAE->getLowerBound())
return CGF.getTypeSize(BaseTy);
llvm::Value *ElemSize;
if (const auto *PTy = BaseTy->getAs<PointerType>()) {
ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
} else {
const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
assert(ATy && "Expecting array type if not a pointer type.");
ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
}
if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
return ElemSize;
if (const Expr *LenExpr = OAE->getLength()) {
llvm::Value *LengthVal = CGF.EmitScalarExpr(LenExpr);
LengthVal = CGF.EmitScalarConversion(LengthVal, LenExpr->getType(),
CGF.getContext().getSizeType(),
LenExpr->getExprLoc());
return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
}
assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
OAE->getLowerBound() && "expected array_section[lb:].");
llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
llvm::Value *LBVal = CGF.EmitScalarExpr(OAE->getLowerBound());
LBVal = CGF.EmitScalarConversion(LBVal, OAE->getLowerBound()->getType(),
CGF.getContext().getSizeType(),
OAE->getLowerBound()->getExprLoc());
LBVal = CGF.Builder.CreateNUWMul(LBVal, ElemSize);
llvm::Value *Cmp = CGF.Builder.CreateICmpUGT(LengthVal, LBVal);
llvm::Value *TrueVal = CGF.Builder.CreateNUWSub(LengthVal, LBVal);
LengthVal = CGF.Builder.CreateSelect(
Cmp, TrueVal, llvm::ConstantInt::get(CGF.SizeTy, 0));
return LengthVal;
}
return CGF.getTypeSize(ExprTy);
}
OpenMPOffloadMappingFlags getMapTypeBits(
OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const {
OpenMPOffloadMappingFlags Bits =
IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
switch (MapType) {
case OMPC_MAP_alloc:
case OMPC_MAP_release:
break;
case OMPC_MAP_to:
Bits |= OMP_MAP_TO;
break;
case OMPC_MAP_from:
Bits |= OMP_MAP_FROM;
break;
case OMPC_MAP_tofrom:
Bits |= OMP_MAP_TO | OMP_MAP_FROM;
break;
case OMPC_MAP_delete:
Bits |= OMP_MAP_DELETE;
break;
case OMPC_MAP_unknown:
llvm_unreachable("Unexpected map type!");
}
if (AddPtrFlag)
Bits |= OMP_MAP_PTR_AND_OBJ;
if (AddIsTargetParamFlag)
Bits |= OMP_MAP_TARGET_PARAM;
if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_always))
Bits |= OMP_MAP_ALWAYS;
if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_close))
Bits |= OMP_MAP_CLOSE;
if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_present) ||
llvm::is_contained(MotionModifiers, OMPC_MOTION_MODIFIER_present))
Bits |= OMP_MAP_PRESENT;
if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_ompx_hold))
Bits |= OMP_MAP_OMPX_HOLD;
if (IsNonContiguous)
Bits |= OMP_MAP_NON_CONTIG;
return Bits;
}
bool isFinalArraySectionExpression(const Expr *E) const {
const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
if (!OASE)
return false;
if (OASE->getColonLocFirst().isInvalid())
return false;
const Expr *Length = OASE->getLength();
if (!Length) {
QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
OASE->getBase()->IgnoreParenImpCasts())
.getCanonicalType();
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
return ATy->getSize().getSExtValue() != 1;
return true;
}
Expr::EvalResult Result;
if (!Length->EvaluateAsInt(Result, CGF.getContext()))
return true;
llvm::APSInt ConstLength = Result.Val.getInt();
return ConstLength.getSExtValue() != 1;
}
void generateInfoForComponentList(
OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
bool IsFirstComponentList, bool IsImplicit,
const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedElements = llvm::None) const {
bool IsCaptureFirstInfo = IsFirstComponentList;
bool RequiresReference = false;
auto CI = Components.rbegin();
auto CE = Components.rend();
auto I = CI;
bool IsExpressionFirstInfo = true;
bool FirstPointerInComplexData = false;
Address BP = Address::invalid();
const Expr *AssocExpr = I->getAssociatedExpression();
const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
if (isa<MemberExpr>(AssocExpr)) {
BP = CGF.LoadCXXThisAddress();
} else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
(OASE &&
isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
} else if (OAShE &&
isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
BP = Address(
CGF.EmitScalarExpr(OAShE->getBase()),
CGF.ConvertTypeForMem(OAShE->getBase()->getType()->getPointeeType()),
CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
} else {
BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
RequiresReference = true;
BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
}
}
}
QualType Ty =
I->getAssociatedDeclaration()->getType().getNonReferenceType();
if (Ty->isAnyPointerType() && std::next(I) != CE) {
const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
!VD || VD->hasLocalStorage())
BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
else
FirstPointerInComplexData = true;
++I;
}
}
bool ShouldBeMemberOf = false;
const MemberExpr *EncounteredME = nullptr;
uint64_t DimSize = 1;
bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
bool IsPrevMemberReference = false;
for (; I != CE; ++I) {
if (!EncounteredME) {
EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
if (EncounteredME) {
ShouldBeMemberOf = true;
if (FirstPointerInComplexData) {
QualType Ty = std::prev(I)
->getAssociatedDeclaration()
->getType()
.getNonReferenceType();
BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
FirstPointerInComplexData = false;
}
}
}
auto Next = std::next(I);
bool IsFinalArraySection =
!IsNonContiguous &&
isFinalArraySectionExpression(I->getAssociatedExpression());
const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
? I->getAssociatedDeclaration()
: BaseDecl;
MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression()
: MapExpr;
const auto *OASE =
dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
const auto *OAShE =
dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
bool IsPointer =
OAShE ||
(OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
.getCanonicalType()
->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
MapDecl &&
MapDecl->getType()->isLValueReferenceType();
bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
if (OASE)
++DimSize;
if (Next == CE || IsMemberReference || IsNonDerefPointer ||
IsFinalArraySection) {
assert((Next == CE ||
isa<MemberExpr>(Next->getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||
isa<UnaryOperator>(Next->getAssociatedExpression()) ||
isa<BinaryOperator>(Next->getAssociatedExpression())) &&
"Unexpected expression");
Address LB = Address::invalid();
Address LowestElem = Address::invalid();
auto &&EmitMemberExprBase = [](CodeGenFunction &CGF,
const MemberExpr *E) {
const Expr *BaseExpr = E->getBase();
LValue BaseLV;
if (E->isArrow()) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr =
CGF.EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
QualType PtrTy = BaseExpr->getType()->getPointeeType();
BaseLV = CGF.MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
} else {
BaseLV = CGF.EmitOMPSharedLValue(BaseExpr);
}
return BaseLV;
};
if (OAShE) {
LowestElem = LB =
Address(CGF.EmitScalarExpr(OAShE->getBase()),
CGF.ConvertTypeForMem(
OAShE->getBase()->getType()->getPointeeType()),
CGF.getContext().getTypeAlignInChars(
OAShE->getBase()->getType()));
} else if (IsMemberReference) {
const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
LowestElem = CGF.EmitLValueForFieldInitialization(
BaseLVal, cast<FieldDecl>(MapDecl))
.getAddress(CGF);
LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
.getAddress(CGF);
} else {
LowestElem = LB =
CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
.getAddress(CGF);
}
bool IsMemberPointerOrAddr =
EncounteredME &&
(((IsPointer || ForDeviceAddr) &&
I->getAssociatedExpression() == EncounteredME) ||
(IsPrevMemberReference && !IsPointer) ||
(IsMemberReference && Next != CE &&
!Next->getAssociatedExpression()->getType()->isPointerType()));
if (!OverlappedElements.empty() && Next == CE) {
assert(!PartialStruct.Base.isValid() && "The base element is set.");
assert(!IsPointer &&
"Unexpected base element with the pointer type.");
PartialStruct.LowestElem = {0, LowestElem};
CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
I->getAssociatedExpression()->getType());
Address HB = CGF.Builder.CreateConstGEP(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
LowestElem, CGF.VoidPtrTy, CGF.Int8Ty),
TypeSize.getQuantity() - 1);
PartialStruct.HighestElem = {
std::numeric_limits<decltype(
PartialStruct.HighestElem.first)>::max(),
HB};
PartialStruct.Base = BP;
PartialStruct.LB = LB;
assert(
PartialStruct.PreliminaryMapData.BasePointers.empty() &&
"Overlapped elements must be used only once for the variable.");
std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
OpenMPOffloadMappingFlags Flags =
OMP_MAP_MEMBER_OF |
getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
false,
false, IsNonContiguous);
llvm::Value *Size = nullptr;
for (OMPClauseMappableExprCommon::MappableExprComponentListRef
Component : OverlappedElements) {
Address ComponentLB = Address::invalid();
for (const OMPClauseMappableExprCommon::MappableComponent &MC :
Component) {
if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
const auto *FD = dyn_cast<FieldDecl>(VD);
if (FD && FD->getType()->isLValueReferenceType()) {
const auto *ME =
cast<MemberExpr>(MC.getAssociatedExpression());
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
ComponentLB =
CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
.getAddress(CGF);
} else {
ComponentLB =
CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
.getAddress(CGF);
}
Size = CGF.Builder.CreatePtrDiff(
CGF.Int8Ty, CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
CGF.EmitCastToVoidPtr(LB.getPointer()));
break;
}
}
assert(Size && "Failed to determine structure size");
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
CombinedInfo.Pointers.push_back(LB.getPointer());
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, true));
CombinedInfo.Types.push_back(Flags);
CombinedInfo.Mappers.push_back(nullptr);
CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
: 1);
LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
CGF.EmitCastToVoidPtr(LB.getPointer()));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, true));
CombinedInfo.Types.push_back(Flags);
CombinedInfo.Mappers.push_back(nullptr);
CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
: 1);
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
if (!IsMemberPointerOrAddr ||
(Next == CE && MapType != OMPC_MAP_unknown)) {
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
CombinedInfo.Pointers.push_back(LB.getPointer());
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, true));
CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
: 1);
bool HasMapper = Mapper && Next == CE;
CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
OpenMPOffloadMappingFlags Flags = getMapTypeBits(
MapType, MapModifiers, MotionModifiers, IsImplicit,
!IsExpressionFirstInfo || RequiresReference ||
FirstPointerInComplexData || IsMemberReference,
IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
if (!IsExpressionFirstInfo || IsMemberReference) {
if (IsPointer || (IsMemberReference && Next != CE))
Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
OMP_MAP_DELETE | OMP_MAP_CLOSE);
if (ShouldBeMemberOf) {
Flags |= OMP_MAP_MEMBER_OF;
ShouldBeMemberOf = false;
}
}
CombinedInfo.Types.push_back(Flags);
}
if (EncounteredME) {
const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
unsigned FieldIndex = FD->getFieldIndex();
if (!PartialStruct.Base.isValid()) {
PartialStruct.LowestElem = {FieldIndex, LowestElem};
if (IsFinalArraySection) {
Address HB =
CGF.EmitOMPArraySectionExpr(OASE, false)
.getAddress(CGF);
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
}
PartialStruct.Base = BP;
PartialStruct.LB = BP;
} else if (FieldIndex < PartialStruct.LowestElem.first) {
PartialStruct.LowestElem = {FieldIndex, LowestElem};
} else if (FieldIndex > PartialStruct.HighestElem.first) {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
}
}
if (IsFinalArraySection || IsNonContiguous)
PartialStruct.IsArraySection = true;
if (IsFinalArraySection)
break;
if (Next != CE)
BP = IsMemberReference ? LowestElem : LB;
IsExpressionFirstInfo = false;
IsCaptureFirstInfo = false;
FirstPointerInComplexData = false;
IsPrevMemberReference = IsMemberReference;
} else if (FirstPointerInComplexData) {
QualType Ty = Components.rbegin()
->getAssociatedDeclaration()
->getType()
.getNonReferenceType();
BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
FirstPointerInComplexData = false;
}
}
if (!EncounteredME)
PartialStruct.HasCompleteRecord = true;
if (!IsNonContiguous)
return;
const ASTContext &Context = CGF.getContext();
MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 0)};
MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
MapValuesArrayTy CurStrides;
MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
uint64_t ElementTypeSize;
for (const OMPClauseMappableExprCommon::MappableComponent &Component :
Components) {
const Expr *AssocExpr = Component.getAssociatedExpression();
const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
if (!OASE)
continue;
QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
auto *CAT = Context.getAsConstantArrayType(Ty);
auto *VAT = Context.getAsVariableArrayType(Ty);
assert((VAT || CAT || &Component == &*Components.begin()) &&
"Should be either ConstantArray or VariableArray if not the "
"first Component");
if (CurStrides.empty()) {
const Type *ElementType = nullptr;
if (CAT)
ElementType = CAT->getElementType().getTypePtr();
else if (VAT)
ElementType = VAT->getElementType().getTypePtr();
else
assert(&Component == &*Components.begin() &&
"Only expect pointer (non CAT or VAT) when this is the "
"first Component");
if (ElementType) {
if (&Component != &*Components.begin())
ElementType = ElementType->getPointeeOrArrayElementType();
ElementTypeSize =
Context.getTypeSizeInChars(ElementType).getQuantity();
CurStrides.push_back(
llvm::ConstantInt::get(CGF.Int64Ty, ElementTypeSize));
}
}
if (DimSizes.size() < Components.size() - 1) {
if (CAT)
DimSizes.push_back(llvm::ConstantInt::get(
CGF.Int64Ty, CAT->getSize().getZExtValue()));
else if (VAT)
DimSizes.push_back(CGF.Builder.CreateIntCast(
CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
false));
}
}
auto *DI = DimSizes.begin() + 1;
llvm::Value *DimProd =
llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
for (const OMPClauseMappableExprCommon::MappableComponent &Component :
Components) {
const Expr *AssocExpr = Component.getAssociatedExpression();
if (const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr)) {
llvm::Value *Offset = CGF.Builder.CreateIntCast(
CGF.EmitScalarExpr(AE->getIdx()), CGF.Int64Ty,
false);
CurOffsets.push_back(Offset);
CurCounts.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 1));
CurStrides.push_back(CurStrides.back());
continue;
}
const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
if (!OASE)
continue;
const Expr *OffsetExpr = OASE->getLowerBound();
llvm::Value *Offset = nullptr;
if (!OffsetExpr) {
Offset = llvm::ConstantInt::get(CGF.Int64Ty, 0);
} else {
Offset = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(OffsetExpr),
CGF.Int64Ty,
false);
}
CurOffsets.push_back(Offset);
const Expr *CountExpr = OASE->getLength();
llvm::Value *Count = nullptr;
if (!CountExpr) {
if (!OASE->getColonLocFirst().isValid() &&
!OASE->getColonLocSecond().isValid()) {
Count = llvm::ConstantInt::get(CGF.Int64Ty, 1);
} else {
const Expr *StrideExpr = OASE->getStride();
llvm::Value *Stride =
StrideExpr
? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
CGF.Int64Ty, false)
: nullptr;
if (Stride)
Count = CGF.Builder.CreateUDiv(
CGF.Builder.CreateNUWSub(*DI, Offset), Stride);
else
Count = CGF.Builder.CreateNUWSub(*DI, Offset);
}
} else {
Count = CGF.EmitScalarExpr(CountExpr);
}
Count = CGF.Builder.CreateIntCast(Count, CGF.Int64Ty, false);
CurCounts.push_back(Count);
const Expr *StrideExpr = OASE->getStride();
llvm::Value *Stride =
StrideExpr
? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
CGF.Int64Ty, false)
: nullptr;
DimProd = CGF.Builder.CreateNUWMul(DimProd, *(DI - 1));
if (Stride)
CurStrides.push_back(CGF.Builder.CreateNUWMul(DimProd, Stride));
else
CurStrides.push_back(DimProd);
if (DI != DimSizes.end())
++DI;
}
CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets);
CombinedInfo.NonContigInfo.Counts.push_back(CurCounts);
CombinedInfo.NonContigInfo.Strides.push_back(CurStrides);
}
MappableExprsHandler::OpenMPOffloadMappingFlags
getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
assert(Cap.capturesVariable() && "Expected capture by reference only!");
if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
if (Cap.getCapturedVar()->getType()->isAnyPointerType())
return MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
return MappableExprsHandler::OMP_MAP_PRIVATE |
MappableExprsHandler::OMP_MAP_TO;
}
auto I = LambdasMap.find(Cap.getCapturedVar()->getCanonicalDecl());
if (I != LambdasMap.end())
return getMapTypeBits(
I->getSecond()->getMapType(), I->getSecond()->getMapTypeModifiers(),
llvm::None, I->getSecond()->isImplicit(),
false,
false,
false);
return MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM;
}
static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
<< getFlagMemberOffset());
}
static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
OpenMPOffloadMappingFlags MemberOfFlag) {
if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
return;
Flags &= ~OMP_MAP_MEMBER_OF;
Flags |= MemberOfFlag;
}
void getPlainLayout(const CXXRecordDecl *RD,
llvm::SmallVectorImpl<const FieldDecl *> &Layout,
bool AsBase) const {
const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
llvm::StructType *St =
AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
unsigned NumElements = St->getNumElements();
llvm::SmallVector<
llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
RecordLayout(NumElements);
for (const auto &I : RD->bases()) {
if (I.isVirtual())
continue;
const auto *Base = I.getType()->getAsCXXRecordDecl();
if (Base->isEmpty() || CGF.getContext()
.getASTRecordLayout(Base)
.getNonVirtualSize()
.isZero())
continue;
unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
RecordLayout[FieldIndex] = Base;
}
for (const auto &I : RD->vbases()) {
const auto *Base = I.getType()->getAsCXXRecordDecl();
if (Base->isEmpty())
continue;
unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
if (RecordLayout[FieldIndex])
continue;
RecordLayout[FieldIndex] = Base;
}
assert(!RD->isUnion() && "Unexpected union.");
for (const auto *Field : RD->fields()) {
if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
unsigned FieldIndex = RL.getLLVMFieldNo(Field);
RecordLayout[FieldIndex] = Field;
}
}
for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
&Data : RecordLayout) {
if (Data.isNull())
continue;
if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
getPlainLayout(Base, Layout, true);
else
Layout.push_back(Data.get<const FieldDecl *>());
}
}
void generateAllInfoForClauses(
ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
enum MapKind { Present, Allocs, Other, Total };
llvm::MapVector<CanonicalDeclPtr<const Decl>,
SmallVector<SmallVector<MapInfo, 8>, 4>>
Info;
auto &&InfoGen =
[&Info, &SkipVarSet](
const ValueDecl *D, MapKind Kind,
OMPClauseMappableExprCommon::MappableExprComponentListRef L,
OpenMPMapClauseKind MapType,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
const Expr *VarRef = nullptr, bool ForDeviceAddr = false) {
if (SkipVarSet.contains(D))
return;
auto It = Info.find(D);
if (It == Info.end())
It = Info
.insert(std::make_pair(
D, SmallVector<SmallVector<MapInfo, 8>, 4>(Total)))
.first;
It->second[Kind].emplace_back(
L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer,
IsImplicit, Mapper, VarRef, ForDeviceAddr);
};
for (const auto *Cl : Clauses) {
const auto *C = dyn_cast<OMPMapClause>(Cl);
if (!C)
continue;
MapKind Kind = Other;
if (llvm::is_contained(C->getMapTypeModifiers(),
OMPC_MAP_MODIFIER_present))
Kind = Present;
else if (C->getMapType() == OMPC_MAP_alloc)
Kind = Allocs;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(),
C->getMapTypeModifiers(), llvm::None,
false, C->isImplicit(), std::get<2>(L),
E);
++EI;
}
}
for (const auto *Cl : Clauses) {
const auto *C = dyn_cast<OMPToClause>(Cl);
if (!C)
continue;
MapKind Kind = Other;
if (llvm::is_contained(C->getMotionModifiers(),
OMPC_MOTION_MODIFIER_present))
Kind = Present;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, llvm::None,
C->getMotionModifiers(), false,
C->isImplicit(), std::get<2>(L), *EI);
++EI;
}
}
for (const auto *Cl : Clauses) {
const auto *C = dyn_cast<OMPFromClause>(Cl);
if (!C)
continue;
MapKind Kind = Other;
if (llvm::is_contained(C->getMotionModifiers(),
OMPC_MOTION_MODIFIER_present))
Kind = Present;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, llvm::None,
C->getMotionModifiers(), false,
C->isImplicit(), std::get<2>(L), *EI);
++EI;
}
}
llvm::MapVector<CanonicalDeclPtr<const Decl>,
SmallVector<DeferredDevicePtrEntryTy, 4>>
DeferredInfo;
MapCombinedInfoTy UseDevicePtrCombinedInfo;
for (const auto *Cl : Clauses) {
const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
if (!C)
continue;
for (const auto L : C->component_lists()) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
std::get<1>(L);
assert(!Components.empty() &&
"Not expecting empty list of components!");
const ValueDecl *VD = Components.back().getAssociatedDeclaration();
VD = cast<ValueDecl>(VD->getCanonicalDecl());
const Expr *IE = Components.back().getAssociatedExpression();
auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
if (It != Info.end()) {
bool Found = false;
for (auto &Data : It->second) {
auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
return MI.Components.back().getAssociatedDeclaration() == VD;
});
if (CI != Data.end()) {
auto PrevCI = std::next(CI->Components.rbegin());
const auto *VarD = dyn_cast<VarDecl>(VD);
if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
isa<MemberExpr>(IE) ||
!VD->getType().getNonReferenceType()->isPointerType() ||
PrevCI == CI->Components.rend() ||
isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
VarD->hasLocalStorage()) {
CI->ReturnDevicePointer = true;
Found = true;
break;
}
}
}
if (Found)
continue;
}
if (isa<MemberExpr>(IE)) {
InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, llvm::None,
llvm::None, false, C->isImplicit(),
nullptr);
DeferredInfo[nullptr].emplace_back(IE, VD, false);
} else {
llvm::Value *Ptr =
CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
UseDevicePtrCombinedInfo.Exprs.push_back(VD);
UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD);
UseDevicePtrCombinedInfo.Pointers.push_back(Ptr);
UseDevicePtrCombinedInfo.Sizes.push_back(
llvm::Constant::getNullValue(CGF.Int64Ty));
UseDevicePtrCombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
UseDevicePtrCombinedInfo.Mappers.push_back(nullptr);
}
}
}
llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
for (const auto *Cl : Clauses) {
const auto *C = dyn_cast<OMPUseDeviceAddrClause>(Cl);
if (!C)
continue;
for (const auto L : C->component_lists()) {
assert(!std::get<1>(L).empty() &&
"Not expecting empty list of components!");
const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
if (!Processed.insert(VD).second)
continue;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
if (It != Info.end()) {
bool Found = false;
for (auto &Data : It->second) {
auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
return MI.Components.back().getAssociatedDeclaration() == VD;
});
if (CI != Data.end()) {
CI->ReturnDevicePointer = true;
Found = true;
break;
}
}
if (Found)
continue;
}
if (isa<MemberExpr>(IE)) {
InfoGen(nullptr, Other, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
llvm::None, false, C->isImplicit(),
nullptr, nullptr, true);
DeferredInfo[nullptr].emplace_back(IE, VD, true);
} else {
llvm::Value *Ptr;
if (IE->isGLValue())
Ptr = CGF.EmitLValue(IE).getPointer(CGF);
else
Ptr = CGF.EmitScalarExpr(IE);
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.emplace_back(Ptr, VD);
CombinedInfo.Pointers.push_back(Ptr);
CombinedInfo.Sizes.push_back(
llvm::Constant::getNullValue(CGF.Int64Ty));
CombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
CombinedInfo.Mappers.push_back(nullptr);
}
}
}
for (const auto &Data : Info) {
StructRangeInfoTy PartialStruct;
MapCombinedInfoTy CurInfo;
const Decl *D = Data.first;
const ValueDecl *VD = cast_or_null<ValueDecl>(D);
for (const auto &M : Data.second) {
for (const MapInfo &L : M) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
CurInfo.NonContigInfo.IsNonContiguous =
L.Components.back().isNonContiguous();
generateInfoForComponentList(
L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
CurInfo, PartialStruct, false,
L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
if (L.ReturnDevicePointer) {
assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
"Unexpected number of mapped base pointers.");
const ValueDecl *RelevantVD =
L.Components.back().getAssociatedDeclaration();
assert(RelevantVD &&
"No relevant declaration related with device pointer??");
CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
RelevantVD);
CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
}
}
}
auto CI = DeferredInfo.find(Data.first);
if (CI != DeferredInfo.end()) {
for (const DeferredDevicePtrEntryTy &L : CI->second) {
llvm::Value *BasePtr;
llvm::Value *Ptr;
if (L.ForDeviceAddr) {
if (L.IE->isGLValue())
Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
else
Ptr = this->CGF.EmitScalarExpr(L.IE);
BasePtr = Ptr;
CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
} else {
BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
L.IE->getExprLoc());
CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
OMP_MAP_MEMBER_OF);
}
CurInfo.Exprs.push_back(L.VD);
CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
CurInfo.Pointers.push_back(Ptr);
CurInfo.Sizes.push_back(
llvm::Constant::getNullValue(this->CGF.Int64Ty));
CurInfo.Mappers.push_back(nullptr);
}
}
if (PartialStruct.Base.isValid()) {
CurInfo.NonContigInfo.Dims.push_back(0);
emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
}
CombinedInfo.append(CurInfo);
}
CombinedInfo.append(UseDevicePtrCombinedInfo);
}
public:
MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
: CurDir(&Dir), CGF(CGF) {
for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
for (const auto *D : C->varlists())
FirstPrivateDecls.try_emplace(
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
true);
else if (const auto *VD = dyn_cast<VarDecl>(
cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
->getDecl()))
FirstPrivateDecls.try_emplace(VD, true);
}
}
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
if (C->getMapType() != OMPC_MAP_to)
continue;
for (auto L : C->component_lists()) {
const ValueDecl *VD = std::get<0>(L);
const auto *RD = VD ? VD->getType()
.getCanonicalType()
.getNonReferenceType()
->getAsCXXRecordDecl()
: nullptr;
if (RD && RD->isLambda())
LambdasMap.try_emplace(std::get<0>(L), C);
}
}
}
MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
: CurDir(&Dir), CGF(CGF) {}
void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
MapFlagsArrayTy &CurTypes,
const StructRangeInfoTy &PartialStruct,
const ValueDecl *VD = nullptr,
bool NotTargetParams = true) const {
if (CurTypes.size() == 1 &&
((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
!PartialStruct.IsArraySection)
return;
Address LBAddr = PartialStruct.LowestElem.second;
Address HBAddr = PartialStruct.HighestElem.second;
if (PartialStruct.HasCompleteRecord) {
LBAddr = PartialStruct.LB;
HBAddr = PartialStruct.LB;
}
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
llvm::Value *LB = LBAddr.getPointer();
CombinedInfo.Pointers.push_back(LB);
CombinedInfo.Mappers.push_back(nullptr);
llvm::Value *HB = HBAddr.getPointer();
llvm::Value *HAddr =
CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, 1);
llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, CHAddr, CLAddr);
llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
false);
CombinedInfo.Sizes.push_back(Size);
CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
: OMP_MAP_TARGET_PARAM);
if (CurTypes.end() !=
llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
return Type & OMP_MAP_PRESENT;
}))
CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
(*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
if (CurTypes.end() !=
llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
return Type & OMP_MAP_OMPX_HOLD;
})) {
CombinedInfo.Types.back() |= OMP_MAP_OMPX_HOLD;
for (auto &M : CurTypes)
M |= OMP_MAP_OMPX_HOLD;
}
OpenMPOffloadMappingFlags MemberOfFlag =
getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
for (auto &M : CurTypes)
setCorrectMemberOfFlag(M, MemberOfFlag);
}
void generateAllInfo(
MapCombinedInfoTy &CombinedInfo,
const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet);
}
void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
"Expect a declare mapper directive");
const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo);
}
void generateInfoForLambdaCaptures(
const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
QualType VDType = VD->getType().getCanonicalType().getNonReferenceType();
const auto *RD = VDType->getAsCXXRecordDecl();
if (!RD || !RD->isLambda())
return;
Address VDAddr(Arg, CGF.ConvertTypeForMem(VDType),
CGF.getContext().getDeclAlign(VD));
LValue VDLVal = CGF.MakeAddrLValue(VDAddr, VDType);
llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
FieldDecl *ThisCapture = nullptr;
RD->getCaptureFields(Captures, ThisCapture);
if (ThisCapture) {
LValue ThisLVal =
CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
CGF.Int64Ty, true));
CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
CombinedInfo.Mappers.push_back(nullptr);
}
for (const LambdaCapture &LC : RD->captures()) {
if (!LC.capturesVariable())
continue;
const VarDecl *VD = LC.getCapturedVar();
if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
continue;
auto It = Captures.find(VD);
assert(It != Captures.end() && "Found lambda capture without field.");
LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
if (LC.getCaptureKind() == LCK_ByRef) {
LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(
VD->getType().getCanonicalType().getNonReferenceType()),
CGF.Int64Ty, true));
} else {
RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
}
CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
CombinedInfo.Mappers.push_back(nullptr);
}
}
void adjustMemberOfForLambdaCaptures(
const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapFlagsArrayTy &Types) const {
for (unsigned I = 0, E = Types.size(); I < E; ++I) {
if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
continue;
llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
assert(BasePtr && "Unable to find base lambda address.");
int TgtIdx = -1;
for (unsigned J = I; J > 0; --J) {
unsigned Idx = J - 1;
if (Pointers[Idx] != BasePtr)
continue;
TgtIdx = Idx;
break;
}
assert(TgtIdx != -1 && "Unable to find parent lambda.");
OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
setCorrectMemberOfFlag(Types[I], MemberOfFlag);
}
}
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
StructRangeInfoTy &PartialStruct) const {
assert(!Cap->capturesVariableArrayType() &&
"Not expecting to generate map info for a variable array type!");
const ValueDecl *VD = Cap->capturesThis()
? nullptr
: Cap->getCapturedVar()->getCanonicalDecl();
if (LambdasMap.count(VD))
return;
if (DevPointersMap.count(VD)) {
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.emplace_back(Arg, VD);
CombinedInfo.Pointers.push_back(Arg);
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
true));
CombinedInfo.Types.push_back(
(Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
OMP_MAP_TARGET_PARAM);
CombinedInfo.Mappers.push_back(nullptr);
return;
}
using MapData =
std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
const ValueDecl *, const Expr *>;
SmallVector<MapData, 4> DeclComponentLists;
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->decl_component_lists(VD)) {
const ValueDecl *VDecl, *Mapper;
const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
std::tie(VDecl, Components, Mapper) = L;
assert(VDecl == VD && "We got information for the wrong declaration??");
assert(!Components.empty() &&
"Not expecting declaration with no component lists.");
DeclComponentLists.emplace_back(Components, C->getMapType(),
C->getMapTypeModifiers(),
C->isImplicit(), Mapper, E);
++EI;
}
}
llvm::stable_sort(DeclComponentLists, [](const MapData &LHS,
const MapData &RHS) {
ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
OpenMPMapClauseKind MapType = std::get<1>(RHS);
bool HasPresent =
llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
bool HasAllocs = MapType == OMPC_MAP_alloc;
MapModifiers = std::get<2>(RHS);
MapType = std::get<1>(LHS);
bool HasPresentR =
llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
bool HasAllocsR = MapType == OMPC_MAP_alloc;
return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
});
llvm::SmallDenseMap<
const MapData *,
llvm::SmallVector<
OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
4>
OverlappedData;
size_t Count = 0;
for (const MapData &L : DeclComponentLists) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool IsImplicit;
const ValueDecl *Mapper;
const Expr *VarRef;
std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
L;
++Count;
for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
VarRef) = L1;
auto CI = Components.rbegin();
auto CE = Components.rend();
auto SI = Components1.rbegin();
auto SE = Components1.rend();
for (; CI != CE && SI != SE; ++CI, ++SI) {
if (CI->getAssociatedExpression()->getStmtClass() !=
SI->getAssociatedExpression()->getStmtClass())
break;
if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
break;
}
if (CI == CE || SI == SE) {
if (CI == CE && SI == SE)
continue;
const auto It = (SI == SE) ? CI : SI;
if (!isa<MemberExpr>(It->getAssociatedExpression()) ||
(std::prev(It)->getAssociatedDeclaration() &&
std::prev(It)
->getAssociatedDeclaration()
->getType()
->isPointerType()) ||
(It->getAssociatedDeclaration() &&
It->getAssociatedDeclaration()->getType()->isPointerType() &&
std::next(It) != CE && std::next(It) != SE))
continue;
const MapData &BaseData = CI == CE ? L : L1;
OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
SI == SE ? Components : Components1;
auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
OverlappedElements.getSecond().push_back(SubData);
}
}
}
llvm::SmallVector<const FieldDecl *, 4> Layout;
if (!OverlappedData.empty()) {
const Type *BaseType = VD->getType().getCanonicalType().getTypePtr();
const Type *OrigType = BaseType->getPointeeOrArrayElementType();
while (BaseType != OrigType) {
BaseType = OrigType->getCanonicalTypeInternal().getTypePtr();
OrigType = BaseType->getPointeeOrArrayElementType();
}
if (const auto *CRD = BaseType->getAsCXXRecordDecl())
getPlainLayout(CRD, Layout, false);
else {
const auto *RD = BaseType->getAsRecordDecl();
Layout.append(RD->field_begin(), RD->field_end());
}
}
for (auto &Pair : OverlappedData) {
llvm::stable_sort(
Pair.getSecond(),
[&Layout](
OMPClauseMappableExprCommon::MappableExprComponentListRef First,
OMPClauseMappableExprCommon::MappableExprComponentListRef
Second) {
auto CI = First.rbegin();
auto CE = First.rend();
auto SI = Second.rbegin();
auto SE = Second.rend();
for (; CI != CE && SI != SE; ++CI, ++SI) {
if (CI->getAssociatedExpression()->getStmtClass() !=
SI->getAssociatedExpression()->getStmtClass())
break;
if (CI->getAssociatedDeclaration() !=
SI->getAssociatedDeclaration())
break;
}
if (CI == CE && SI == SE)
return false;
if (CI == CE || SI == SE)
return CI == CE;
const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
if (FD1->getParent() == FD2->getParent())
return FD1->getFieldIndex() < FD2->getFieldIndex();
const auto *It =
llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
return FD == FD1 || FD == FD2;
});
return *It == FD1;
});
}
bool IsFirstComponentList = true;
for (const auto &Pair : OverlappedData) {
const MapData &L = *Pair.getFirst();
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool IsImplicit;
const ValueDecl *Mapper;
const Expr *VarRef;
std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
L;
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedComponents = Pair.getSecond();
generateInfoForComponentList(
MapType, MapModifiers, llvm::None, Components, CombinedInfo,
PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
false, VD, VarRef, OverlappedComponents);
IsFirstComponentList = false;
}
for (const MapData &L : DeclComponentLists) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType;
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool IsImplicit;
const ValueDecl *Mapper;
const Expr *VarRef;
std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
L;
auto It = OverlappedData.find(&L);
if (It == OverlappedData.end())
generateInfoForComponentList(MapType, MapModifiers, llvm::None,
Components, CombinedInfo, PartialStruct,
IsFirstComponentList, IsImplicit, Mapper,
false, VD, VarRef);
IsFirstComponentList = false;
}
}
void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
const FieldDecl &RI, llvm::Value *CV,
MapCombinedInfoTy &CombinedInfo) const {
bool IsImplicit = true;
if (CI.capturesThis()) {
CombinedInfo.Exprs.push_back(nullptr);
CombinedInfo.BasePointers.push_back(CV);
CombinedInfo.Pointers.push_back(CV);
const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
CGF.Int64Ty, true));
CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM);
} else if (CI.capturesVariableByCopy()) {
const VarDecl *VD = CI.getCapturedVar();
CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
CombinedInfo.BasePointers.push_back(CV);
CombinedInfo.Pointers.push_back(CV);
if (!RI.getType()->isAnyPointerType()) {
CombinedInfo.Types.push_back(OMP_MAP_LITERAL);
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(RI.getType()), CGF.Int64Ty, true));
} else {
CombinedInfo.Types.push_back(OMP_MAP_NONE);
CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
}
auto I = FirstPrivateDecls.find(VD);
if (I != FirstPrivateDecls.end())
IsImplicit = I->getSecond();
} else {
assert(CI.capturesVariable() && "Expected captured reference.");
const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
QualType ElementType = PtrTy->getPointeeType();
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(ElementType), CGF.Int64Ty, true));
CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
const VarDecl *VD = CI.getCapturedVar();
auto I = FirstPrivateDecls.find(VD);
CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
CombinedInfo.BasePointers.push_back(CV);
if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
CV, ElementType, CGF.getContext().getDeclAlign(VD),
AlignmentSource::Decl));
CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
} else {
CombinedInfo.Pointers.push_back(CV);
}
if (I != FirstPrivateDecls.end())
IsImplicit = I->getSecond();
}
CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM;
if (IsImplicit)
CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT;
CombinedInfo.Mappers.push_back(nullptr);
}
};
}
static void emitNonContiguousDescriptor(
CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
CGOpenMPRuntime::TargetDataInfo &Info) {
CodeGenModule &CGM = CGF.CGM;
MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
&NonContigInfo = CombinedInfo.NonContigInfo;
ASTContext &C = CGF.getContext();
QualType Int64Ty = C.getIntTypeForBitwidth(64, 0);
RecordDecl *RD;
RD = C.buildImplicitRecord("descriptor_dim");
RD->startDefinition();
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
RD->completeDefinition();
QualType DimTy = C.getRecordType(RD);
enum { OffsetFD = 0, CountFD, StrideFD };
for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
if (NonContigInfo.Dims[I] == 1)
continue;
llvm::APInt Size(32, NonContigInfo.Dims[I]);
QualType ArrayTy =
C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
unsigned RevIdx = EE - II - 1;
LValue DimsLVal = CGF.MakeAddrLValue(
CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
LValue OffsetLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), OffsetFD));
CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
LValue CountLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), CountFD));
CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
LValue StrideLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), StrideFD));
CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
}
Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
DimsAddr, CGM.Int8PtrTy, CGM.Int8Ty);
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.PointersArray, 0, I);
Address PAddr(P, CGM.VoidPtrTy, CGF.getPointerAlign());
CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
++L;
}
}
static ValueDecl *getDeclFromThisExpr(const Expr *E) {
if (!E)
return nullptr;
if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenCasts()))
if (const MemberExpr *ME =
dyn_cast<MemberExpr>(OASE->getBase()->IgnoreParenImpCasts()))
return ME->getMemberDecl();
return nullptr;
}
llvm::Constant *
emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
MappableExprsHandler::MappingExprInfo &MapExprs) {
uint32_t SrcLocStrSize;
if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
SourceLocation Loc;
if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
if (const ValueDecl *VD = getDeclFromThisExpr(MapExprs.getMapExpr()))
Loc = VD->getLocation();
else
Loc = MapExprs.getMapExpr()->getExprLoc();
} else {
Loc = MapExprs.getMapDecl()->getLocation();
}
std::string ExprName;
if (MapExprs.getMapExpr()) {
PrintingPolicy P(CGF.getContext().getLangOpts());
llvm::raw_string_ostream OS(ExprName);
MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
OS.flush();
} else {
ExprName = MapExprs.getMapDecl()->getNameAsString();
}
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName,
PLoc.getLine(), PLoc.getColumn(),
SrcLocStrSize);
}
static void emitOffloadingArrays(
CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
bool IsNonContiguous = false) {
CodeGenModule &CGM = CGF.CGM;
ASTContext &Ctx = CGF.getContext();
Info.clearArrayInfo();
Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
if (Info.NumberOfPtrs) {
llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, true);
QualType PointerArrayType = Ctx.getConstantArrayType(
Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
0);
Info.BasePointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
Info.PointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
Address MappersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
Info.MappersArray = MappersArray.getPointer();
QualType Int64Ty =
Ctx.getIntTypeForBitwidth(64, 1);
SmallVector<llvm::Constant *> ConstSizes(
CombinedInfo.Sizes.size(), llvm::ConstantInt::get(CGF.Int64Ty, 0));
llvm::SmallBitVector RuntimeSizes(CombinedInfo.Sizes.size());
for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
if (auto *CI = dyn_cast<llvm::Constant>(CombinedInfo.Sizes[I])) {
if (!isa<llvm::ConstantExpr>(CI) && !isa<llvm::GlobalValue>(CI)) {
if (IsNonContiguous && (CombinedInfo.Types[I] &
MappableExprsHandler::OMP_MAP_NON_CONTIG))
ConstSizes[I] = llvm::ConstantInt::get(
CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]);
else
ConstSizes[I] = CI;
continue;
}
}
RuntimeSizes.set(I);
}
if (RuntimeSizes.all()) {
QualType SizeArrayType = Ctx.getConstantArrayType(
Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
0);
Info.SizesArray =
CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
} else {
auto *SizesArrayInit = llvm::ConstantArray::get(
llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
auto *SizesArrayGbl = new llvm::GlobalVariable(
CGM.getModule(), SizesArrayInit->getType(), true,
llvm::GlobalValue::PrivateLinkage, SizesArrayInit, Name);
SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
if (RuntimeSizes.any()) {
QualType SizeArrayType = Ctx.getConstantArrayType(
Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
0);
Address Buffer = CGF.CreateMemTemp(SizeArrayType, ".offload_sizes");
llvm::Value *GblConstPtr =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
SizesArrayGbl, CGM.Int64Ty->getPointerTo());
CGF.Builder.CreateMemCpy(
Buffer,
Address(GblConstPtr, CGM.Int64Ty,
CGM.getNaturalTypeAlignment(Ctx.getIntTypeForBitwidth(
64, false))),
CGF.getTypeSize(SizeArrayType));
Info.SizesArray = Buffer.getPointer();
} else {
Info.SizesArray = SizesArrayGbl;
}
}
SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
llvm::copy(CombinedInfo.Types, Mapping.begin());
std::string MaptypesName =
CGM.getOpenMPRuntime().getName({"offload_maptypes"});
auto *MapTypesArrayGbl =
OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
Info.MapTypesArray = MapTypesArrayGbl;
if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
Info.MapNamesArray = llvm::Constant::getNullValue(
llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
} else {
auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
return emitMappingInformation(CGF, OMPBuilder, MapExpr);
};
SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
std::string MapnamesName =
CGM.getOpenMPRuntime().getName({"offload_mapnames"});
auto *MapNamesArrayGbl =
OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
Info.MapNamesArray = MapNamesArrayGbl;
}
if (Info.separateBeginEndCalls()) {
bool EndMapTypesDiffer = false;
for (uint64_t &Type : Mapping) {
if (Type & MappableExprsHandler::OMP_MAP_PRESENT) {
Type &= ~MappableExprsHandler::OMP_MAP_PRESENT;
EndMapTypesDiffer = true;
}
}
if (EndMapTypesDiffer) {
MapTypesArrayGbl =
OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
Info.MapTypesArrayEnd = MapTypesArrayGbl;
}
}
for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.BasePointersArray, 0, I);
BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
BP, BPVal->getType()->getPointerTo(0));
Address BPAddr(BP, BPVal->getType(),
Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(BPVal, BPAddr);
if (Info.requiresDevicePointerInfo())
if (const ValueDecl *DevVD =
CombinedInfo.BasePointers[I].getDevicePtrDecl())
Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
llvm::Value *PVal = CombinedInfo.Pointers[I];
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.PointersArray, 0, I);
P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
P, PVal->getType()->getPointerTo(0));
Address PAddr(P, PVal->getType(), Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(PVal, PAddr);
if (RuntimeSizes.test(I)) {
llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
Info.SizesArray,
0,
I);
Address SAddr(S, CGM.Int64Ty, Ctx.getTypeAlignInChars(Int64Ty));
CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
CGM.Int64Ty,
true),
SAddr);
}
llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
if (CombinedInfo.Mappers[I]) {
MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
Info.HasMapper = true;
}
Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
CGF.Builder.CreateStore(MFunc, MAddr);
}
}
if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
Info.NumberOfPtrs == 0)
return;
emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
}
namespace {
struct ArgumentsOptions {
bool ForEndCall = false;
ArgumentsOptions() = default;
ArgumentsOptions(bool ForEndCall) : ForEndCall(ForEndCall) {}
};
}
static void emitOffloadingArraysArgument(
CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
llvm::Value *&MapTypesArrayArg, llvm::Value *&MapNamesArrayArg,
llvm::Value *&MappersArrayArg, CGOpenMPRuntime::TargetDataInfo &Info,
const ArgumentsOptions &Options = ArgumentsOptions()) {
assert((!Options.ForEndCall || Info.separateBeginEndCalls()) &&
"expected region end call to runtime only when end call is separate");
CodeGenModule &CGM = CGF.CGM;
if (Info.NumberOfPtrs) {
BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.BasePointersArray,
0, 0);
PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.PointersArray,
0,
0);
SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray,
0, 0);
MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
Options.ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd
: Info.MapTypesArray,
0,
0);
if (CGF.CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
else
MapNamesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.MapNamesArray,
0,
0);
if (!Info.HasMapper)
MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
else
MappersArrayArg =
CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy);
} else {
BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
MapTypesArrayArg =
llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
}
}
static const OMPExecutableDirective *
getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(true);
const Stmt *ChildStmt =
CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
if (const auto *NestedDir =
dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
if (isOpenMPDistributeDirective(DKind))
return NestedDir;
if (DKind == OMPD_teams) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
true);
if (!Body)
return nullptr;
ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
if (const auto *NND =
dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPDistributeDirective(DKind))
return NND;
}
}
return nullptr;
case OMPD_target_teams:
if (isOpenMPDistributeDirective(DKind))
return NestedDir;
return nullptr;
case OMPD_target_parallel:
case OMPD_target_simd:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
return nullptr;
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_depobj:
case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_metadirective:
case OMPD_unknown:
default:
llvm_unreachable("Unexpected directive.");
}
}
return nullptr;
}
void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF) {
if (UDMMap.count(D) > 0)
return;
ASTContext &C = CGM.getContext();
QualType Ty = D->getType();
QualType PtrTy = C.getPointerType(Ty).withRestrict();
QualType Int64Ty = C.getIntTypeForBitwidth(64, true);
auto *MapperVarDecl =
cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
SourceLocation Loc = D->getLocation();
CharUnits ElementSize = C.getTypeSizeInChars(Ty);
llvm::Type *ElemTy = CGM.getTypes().ConvertTypeForMem(Ty);
ImplicitParamDecl HandleArg(C, nullptr, Loc, nullptr,
C.VoidPtrTy, ImplicitParamDecl::Other);
ImplicitParamDecl BaseArg(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
ImplicitParamDecl BeginArg(C, nullptr, Loc, nullptr,
C.VoidPtrTy, ImplicitParamDecl::Other);
ImplicitParamDecl SizeArg(C, nullptr, Loc, nullptr, Int64Ty,
ImplicitParamDecl::Other);
ImplicitParamDecl TypeArg(C, nullptr, Loc, nullptr, Int64Ty,
ImplicitParamDecl::Other);
ImplicitParamDecl NameArg(C, nullptr, Loc, nullptr, C.VoidPtrTy,
ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&HandleArg);
Args.push_back(&BaseArg);
Args.push_back(&BeginArg);
Args.push_back(&SizeArg);
Args.push_back(&TypeArg);
Args.push_back(&NameArg);
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
SmallString<64> TyStr;
llvm::raw_svector_ostream Out(TyStr);
CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out);
std::string Name = getName({"omp_mapper", TyStr, D->getName()});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
CodeGenFunction MapperCGF(CGM);
MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&SizeArg), false,
C.getPointerType(Int64Ty), Loc);
llvm::Value *Handle = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&HandleArg),
false, C.getPointerType(C.VoidPtrTy), Loc);
llvm::Value *BaseIn = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&BaseArg),
false, C.getPointerType(C.VoidPtrTy), Loc);
llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&BeginArg),
false, C.getPointerType(C.VoidPtrTy), Loc);
Size = MapperCGF.Builder.CreateExactUDiv(
Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(ElemTy, PtrBegin, Size);
llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&TypeArg), false,
C.getPointerType(Int64Ty), Loc);
llvm::Value *MapName = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&NameArg),
false, C.getPointerType(C.VoidPtrTy), Loc);
llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head");
emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
MapName, ElementSize, HeadBB, true);
MapperCGF.EmitBlock(HeadBB);
llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.arraymap.body");
llvm::BasicBlock *DoneBB = MapperCGF.createBasicBlock("omp.done");
llvm::Value *IsEmpty =
MapperCGF.Builder.CreateICmpEQ(PtrBegin, PtrEnd, "omp.arraymap.isempty");
MapperCGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
llvm::BasicBlock *EntryBB = MapperCGF.Builder.GetInsertBlock();
MapperCGF.EmitBlock(BodyBB);
llvm::BasicBlock *LastBB = BodyBB;
llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
PtrPHI->addIncoming(PtrBegin, EntryBB);
Address PtrCurrent(PtrPHI, ElemTy,
MapperCGF.GetAddrOfLocalVar(&BeginArg)
.getAlignment()
.alignmentOfArrayElement(ElementSize));
CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
Scope.addPrivate(MapperVarDecl, PtrCurrent);
(void)Scope.Privatize();
MappableExprsHandler::MapCombinedInfoTy Info;
MappableExprsHandler MEHandler(*D, MapperCGF);
MEHandler.generateAllInfoForMapper(Info);
llvm::Value *OffloadingArgs[] = {Handle};
llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___tgt_mapper_num_components),
OffloadingArgs);
llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
PreviousSize,
MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
*Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurSizeArg = Info.Sizes[I];
llvm::Value *CurNameArg =
(CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
: emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
llvm::Value *MemberMapType =
MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd(
MapType,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM));
llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc");
llvm::BasicBlock *AllocElseBB =
MapperCGF.createBasicBlock("omp.type.alloc.else");
llvm::BasicBlock *ToBB = MapperCGF.createBasicBlock("omp.type.to");
llvm::BasicBlock *ToElseBB = MapperCGF.createBasicBlock("omp.type.to.else");
llvm::BasicBlock *FromBB = MapperCGF.createBasicBlock("omp.type.from");
llvm::BasicBlock *EndBB = MapperCGF.createBasicBlock("omp.type.end");
llvm::Value *IsAlloc = MapperCGF.Builder.CreateIsNull(LeftToFrom);
MapperCGF.Builder.CreateCondBr(IsAlloc, AllocBB, AllocElseBB);
MapperCGF.EmitBlock(AllocBB);
llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd(
MemberMapType,
MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM)));
MapperCGF.Builder.CreateBr(EndBB);
MapperCGF.EmitBlock(AllocElseBB);
llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ(
LeftToFrom,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO));
MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB);
MapperCGF.EmitBlock(ToBB);
llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd(
MemberMapType,
MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_FROM));
MapperCGF.Builder.CreateBr(EndBB);
MapperCGF.EmitBlock(ToElseBB);
llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ(
LeftToFrom,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_FROM));
MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB);
MapperCGF.EmitBlock(FromBB);
llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd(
MemberMapType,
MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO));
MapperCGF.EmitBlock(EndBB);
LastBB = EndBB;
llvm::PHINode *CurMapType =
MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype");
CurMapType->addIncoming(AllocMapType, AllocBB);
CurMapType->addIncoming(ToMapType, ToBB);
CurMapType->addIncoming(FromMapType, FromBB);
CurMapType->addIncoming(MemberMapType, ToElseBB);
llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
CurSizeArg, CurMapType, CurNameArg};
if (Info.Mappers[I]) {
llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc(
cast<OMPDeclareMapperDecl>(Info.Mappers[I]));
assert(MapperFunc && "Expect a valid mapper function is available.");
MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs);
} else {
MapperCGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___tgt_push_mapper_component),
OffloadingArgs);
}
}
llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
ElemTy, PtrPHI, 1, "omp.arraymap.next");
PtrPHI->addIncoming(PtrNext, LastBB);
llvm::Value *IsDone =
MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit");
MapperCGF.Builder.CreateCondBr(IsDone, ExitBB, BodyBB);
MapperCGF.EmitBlock(ExitBB);
emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
MapName, ElementSize, DoneBB, false);
MapperCGF.EmitBlock(DoneBB, true);
MapperCGF.FinishFunction();
UDMMap.try_emplace(D, Fn);
if (CGF) {
auto &Decls = FunctionUDMMap.FindAndConstruct(CGF->CurFn);
Decls.second.push_back(D);
}
}
void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base,
llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType,
llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB,
bool IsInit) {
StringRef Prefix = IsInit ? ".init" : ".del";
llvm::BasicBlock *BodyBB =
MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGT(
Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
MapType,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE));
llvm::Value *DeleteCond;
llvm::Value *Cond;
if (IsInit) {
llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateICmpNE(Base, Begin);
llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
MapType,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_PTR_AND_OBJ));
PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit);
BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit);
Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin);
DeleteCond = MapperCGF.Builder.CreateIsNull(
DeleteBit, getName({"omp.array", Prefix, ".delete"}));
} else {
Cond = IsArray;
DeleteCond = MapperCGF.Builder.CreateIsNotNull(
DeleteBit, getName({"omp.array", Prefix, ".delete"}));
}
Cond = MapperCGF.Builder.CreateAnd(Cond, DeleteCond);
MapperCGF.Builder.CreateCondBr(Cond, BodyBB, ExitBB);
MapperCGF.EmitBlock(BodyBB);
llvm::Value *ArraySize = MapperCGF.Builder.CreateNUWMul(
Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd(
MapType,
MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM)));
MapTypeArg = MapperCGF.Builder.CreateOr(
MapTypeArg,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_IMPLICIT));
llvm::Value *OffloadingArgs[] = {Handle, Base, Begin,
ArraySize, MapTypeArg, MapName};
MapperCGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___tgt_push_mapper_component),
OffloadingArgs);
}
llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
const OMPDeclareMapperDecl *D) {
auto I = UDMMap.find(D);
if (I != UDMMap.end())
return I->second;
emitUserDefinedMapper(D);
return UDMMap.lookup(D);
}
llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
OpenMPDirectiveKind Kind = D.getDirectiveKind();
const OMPExecutableDirective *TD = &D;
if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
TD = getNestedDistributeDirective(CGM.getContext(), D);
if (!TD)
return llvm::ConstantInt::get(CGF.Int64Ty, 0);
const auto *LD = cast<OMPLoopDirective>(TD);
if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD))
return NumIterations;
return llvm::ConstantInt::get(CGF.Int64Ty, 0);
}
void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
if (!CGF.HaveInsertPoint())
return;
const bool OffloadingMandatory = !CGM.getLangOpts().OpenMPIsDevice &&
CGM.getLangOpts().OpenMPOffloadMandatory;
assert((OffloadingMandatory || OutlinedFn) && "Invalid outlined function!");
const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
D.hasClausesOfKind<OMPNowaitClause>() ||
D.hasClausesOfKind<OMPInReductionClause>();
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
PrePostActionTy &) {
CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
};
emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
llvm::Value *MapNamesArray = nullptr;
auto &&FallbackGen = [this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask,
&CS, OffloadingMandatory](CodeGenFunction &CGF) {
if (OffloadingMandatory) {
CGF.Builder.CreateUnreachable();
} else {
if (RequiresOuterTask) {
CapturedVars.clear();
CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
}
emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
}
};
auto &&ThenGen = [this, Device, OutlinedFnID, &D, &InputInfo, &MapTypesArray,
&MapNamesArray, SizeEmitter,
FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
if (Device.getInt() == OMPC_DEVICE_ancestor) {
FallbackGen(CGF);
return;
}
assert(OutlinedFnID && "Invalid outlined function ID!");
(void)OutlinedFnID;
llvm::Value *DeviceID;
if (Device.getPointer()) {
assert((Device.getInt() == OMPC_DEVICE_unknown ||
Device.getInt() == OMPC_DEVICE_device_num) &&
"Expected device_num modifier.");
llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
DeviceID =
CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, true);
} else {
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
}
llvm::Value *PointerNum =
CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
llvm::Value *Return;
llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
llvm::Value *NumIterations =
emitTargetNumIterationsCall(CGF, D, SizeEmitter);
SmallVector<llvm::Value *> KernelArgs{
CGF.Builder.getInt32( 1),
PointerNum,
InputInfo.BasePointersArray.getPointer(),
InputInfo.PointersArray.getPointer(),
InputInfo.SizesArray.getPointer(),
MapTypesArray,
MapNamesArray,
InputInfo.MappersArray.getPointer(),
NumIterations};
SmallVector<llvm::Value *> NoWaitKernelArgs{
CGF.Builder.getInt32(0),
llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
CGF.Builder.getInt32(0),
llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
};
bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
CGF.Builder.restoreIP(
HasNoWait ? OMPBuilder.emitTargetKernel(
CGF.Builder, Return, RTLoc, DeviceID, NumTeams,
NumThreads, OutlinedFnID, KernelArgs, NoWaitKernelArgs)
: OMPBuilder.emitTargetKernel(CGF.Builder, Return, RTLoc,
DeviceID, NumTeams, NumThreads,
OutlinedFnID, KernelArgs));
llvm::BasicBlock *OffloadFailedBlock =
CGF.createBasicBlock("omp_offload.failed");
llvm::BasicBlock *OffloadContBlock =
CGF.createBasicBlock("omp_offload.cont");
llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
CGF.EmitBlock(OffloadFailedBlock);
FallbackGen(CGF);
CGF.EmitBranch(OffloadContBlock);
CGF.EmitBlock(OffloadContBlock, true);
};
auto &&ElseGen = [FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
FallbackGen(CGF);
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
&MapNamesArray, &CapturedVars, RequiresOuterTask,
&CS](CodeGenFunction &CGF, PrePostActionTy &) {
MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
MappableExprsHandler MEHandler(D, CGF);
llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
auto RI = CS.getCapturedRecordDecl()->field_begin();
auto *CV = CapturedVars.begin();
for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
CE = CS.capture_end();
CI != CE; ++CI, ++RI, ++CV) {
MappableExprsHandler::MapCombinedInfoTy CurInfo;
MappableExprsHandler::StructRangeInfoTy PartialStruct;
if (CI->capturesVariableArrayType()) {
CurInfo.Exprs.push_back(nullptr);
CurInfo.BasePointers.push_back(*CV);
CurInfo.Pointers.push_back(*CV);
CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(RI->getType()), CGF.Int64Ty, true));
CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
MappableExprsHandler::OMP_MAP_TARGET_PARAM |
MappableExprsHandler::OMP_MAP_IMPLICIT);
CurInfo.Mappers.push_back(nullptr);
} else {
MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
if (!CI->capturesThis())
MappedVarSet.insert(CI->getCapturedVar());
else
MappedVarSet.insert(nullptr);
if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
if (CI->capturesVariable())
MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
CurInfo, LambdaPointers);
}
assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
"Non-existing map pointer for capture!");
assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
CurInfo.BasePointers.size() == CurInfo.Types.size() &&
CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!");
if (PartialStruct.Base.isValid()) {
CombinedInfo.append(PartialStruct.PreliminaryMapData);
MEHandler.emitCombinedEntry(
CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
!PartialStruct.PreliminaryMapData.BasePointers.empty());
}
CombinedInfo.append(CurInfo);
}
MEHandler.adjustMemberOfForLambdaCaptures(
LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
CombinedInfo.Types);
MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
TargetDataInfo Info;
emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
emitOffloadingArraysArgument(
CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
{false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
Address(Info.BasePointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.PointersArray =
Address(Info.PointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.SizesArray =
Address(Info.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
InputInfo.MappersArray =
Address(Info.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
MapTypesArray = Info.MapTypesArray;
MapNamesArray = Info.MapNamesArray;
if (RequiresOuterTask)
CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
else
emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
};
auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
CodeGenFunction &CGF, PrePostActionTy &) {
if (RequiresOuterTask) {
CodeGenFunction::OMPTargetDataInfo InputInfo;
CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
} else {
emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
}
};
if (OutlinedFnID) {
if (IfCond) {
emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
} else {
RegionCodeGenTy ThenRCG(TargetThenGen);
ThenRCG(CGF);
}
} else {
RegionCodeGenTy ElseRCG(TargetElseGen);
ElseRCG(CGF);
}
}
void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
StringRef ParentName) {
if (!S)
return;
bool RequiresDeviceCodegen =
isa<OMPExecutableDirective>(S) &&
isOpenMPTargetExecutionDirective(
cast<OMPExecutableDirective>(S)->getDirectiveKind());
if (RequiresDeviceCodegen) {
const auto &E = *cast<OMPExecutableDirective>(S);
unsigned DeviceID;
unsigned FileID;
unsigned Line;
getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
FileID, Line);
if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
ParentName, Line))
return;
switch (E.getDirectiveKind()) {
case OMPD_target:
CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
cast<OMPTargetDirective>(E));
break;
case OMPD_target_parallel:
CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
CGM, ParentName, cast<OMPTargetParallelDirective>(E));
break;
case OMPD_target_teams:
CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
break;
case OMPD_target_teams_distribute:
CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
break;
case OMPD_target_teams_distribute_simd:
CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
break;
case OMPD_target_parallel_for:
CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
break;
case OMPD_target_parallel_for_simd:
CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
break;
case OMPD_target_simd:
CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
CGM, ParentName, cast<OMPTargetSimdDirective>(E));
break;
case OMPD_target_teams_distribute_parallel_for:
CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
CGM, ParentName,
cast<OMPTargetTeamsDistributeParallelForDirective>(E));
break;
case OMPD_target_teams_distribute_parallel_for_simd:
CodeGenFunction::
EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
CGM, ParentName,
cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
break;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_depobj:
case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_metadirective:
case OMPD_unknown:
default:
llvm_unreachable("Unknown target directive for OpenMP device codegen.");
}
return;
}
if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
return;
scanForTargetRegionsFunctions(E->getRawStmt(), ParentName);
return;
}
if (const auto *L = dyn_cast<LambdaExpr>(S))
S = L->getBody();
for (const Stmt *II : S->children())
scanForTargetRegionsFunctions(II, ParentName);
}
static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(VD);
if (!DevTy)
return false;
if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
return true;
if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host)
return true;
return false;
}
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
if (!CGM.getLangOpts().OpenMPIsDevice) {
if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
CGM.getLangOpts().OpenMPIsDevice))
return true;
return false;
}
const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
StringRef Name = CGM.getMangledName(GD);
scanForTargetRegionsFunctions(FD->getBody(), Name);
if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
CGM.getLangOpts().OpenMPIsDevice))
return true;
}
return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
AlreadyEmittedTargetDecls.count(VD) == 0;
}
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
CGM.getLangOpts().OpenMPIsDevice))
return true;
if (!CGM.getLangOpts().OpenMPIsDevice)
return false;
QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
for (const CXXConstructorDecl *Ctor : RD->ctors()) {
StringRef ParentName =
CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
}
if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
StringRef ParentName =
CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
}
}
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
cast<VarDecl>(GD.getDecl()));
if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
HasRequiresUnifiedSharedMemory)) {
DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
return true;
}
return false;
}
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
if (CGM.getLangOpts().OMPTargetTriples.empty() &&
!CGM.getLangOpts().OpenMPIsDevice)
return;
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(VD);
if (DevTy && *DevTy != OMPDeclareTargetDeclAttr::DT_Any)
return;
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res) {
if (CGM.getLangOpts().OpenMPIsDevice) {
StringRef VarName = CGM.getMangledName(VD);
EmittedNonTargetVariables.try_emplace(VarName, Addr);
}
return;
}
OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
StringRef VarName;
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
!HasRequiresUnifiedSharedMemory) {
Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
VarName = CGM.getMangledName(VD);
if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
assert(!VarSize.isZero() && "Expected non-zero size of the variable");
} else {
VarSize = CharUnits::Zero();
}
Linkage = CGM.getLLVMLinkageVarDefinition(VD, false);
if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
return;
std::string RefName = getName({VarName, "ref"});
if (!CGM.GetGlobalValue(RefName)) {
llvm::Constant *AddrRef =
getOrCreateInternalVariable(Addr->getType(), RefName);
auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
GVAddrRef->setConstant(true);
GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
GVAddrRef->setInitializer(Addr);
CGM.addCompilerUsedGlobal(GVAddrRef);
}
}
} else {
assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
HasRequiresUnifiedSharedMemory)) &&
"Declare target attribute must link or to with unified memory.");
if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
else
Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
if (CGM.getLangOpts().OpenMPIsDevice) {
VarName = Addr->getName();
Addr = nullptr;
} else {
VarName = getAddrOfDeclareTargetVar(VD).getName();
Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
}
VarSize = CGM.getPointerSize();
Linkage = llvm::GlobalValue::WeakAnyLinkage;
}
OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
VarName, Addr, VarSize, Flags, Linkage);
}
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
if (isa<FunctionDecl>(GD.getDecl()) ||
isa<OMPDeclareReductionDecl>(GD.getDecl()))
return emitTargetFunctions(GD);
return emitTargetGlobalVariable(GD);
}
void CGOpenMPRuntime::emitDeferredTargetDecls() const {
for (const VarDecl *VD : DeferredGlobalVariables) {
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res)
continue;
if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
!HasRequiresUnifiedSharedMemory) {
CGM.EmitGlobal(VD);
} else {
assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||
(*Res == OMPDeclareTargetDeclAttr::MT_To &&
HasRequiresUnifiedSharedMemory)) &&
"Expected link clause or to clause with unified memory.");
(void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
}
}
}
void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
" Expected target-based directive.");
}
void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
HasRequiresUnifiedSharedMemory = true;
} else if (const auto *AC =
dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
switch (AC->getAtomicDefaultMemOrderKind()) {
case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
break;
case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
break;
case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
break;
case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
break;
}
}
}
}
llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
return RequiresAtomicOrdering;
}
bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
LangAS &AS) {
if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
return false;
const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
switch(A->getAllocatorType()) {
case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
case OMPAllocateDeclAttr::OMPThreadMemAlloc:
case OMPAllocateDeclAttr::OMPConstMemAlloc:
case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
AS = LangAS::Default;
return true;
case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
llvm_unreachable("Expected predefined allocator for the variables with the "
"static storage.");
}
return false;
}
bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
return HasRequiresUnifiedSharedMemory;
}
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
CodeGenModule &CGM)
: CGM(CGM) {
if (CGM.getLangOpts().OpenMPIsDevice) {
SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
}
}
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
if (CGM.getLangOpts().OpenMPIsDevice)
CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
}
bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
return true;
const auto *D = cast<FunctionDecl>(GD.getDecl());
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
if (D->hasBody() && AlreadyEmittedTargetDecls.count(D) == 0) {
if (auto *F = dyn_cast_or_null<llvm::Function>(
CGM.GetGlobalValue(CGM.getMangledName(GD))))
return !F->isDeclaration();
return false;
}
return true;
}
return !AlreadyEmittedTargetDecls.insert(D).second;
}
llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
if (CGM.getLangOpts().OMPTargetTriples.empty() ||
CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
(OffloadEntriesInfoManager.empty() &&
!HasEmittedDeclareTargetRegion &&
!HasEmittedTargetRegion))
return nullptr;
ASTContext &C = CGM.getContext();
llvm::Function *RequiresRegFn;
{
CodeGenFunction CGF(CGM);
const auto &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string ReqName = getName({"omp_offloading", "requires_reg"});
RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
assert((HasEmittedTargetRegion ||
HasEmittedDeclareTargetRegion ||
!OffloadEntriesInfoManager.empty()) &&
"Target or declare target region expected.");
if (HasRequiresUnifiedSharedMemory)
Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___tgt_register_requires),
llvm::ConstantInt::get(CGM.Int64Ty, Flags));
CGF.FinishFunction();
}
return RequiresRegFn;
}
void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
CodeGenFunction::RunCleanupsScope Scope(CGF);
llvm::Value *Args[] = {
RTLoc,
CGF.Builder.getInt32(CapturedVars.size()), CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
llvm::SmallVector<llvm::Value *, 16> RealArgs;
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_fork_teams);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
}
void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
const Expr *NumTeams,
const Expr *ThreadLimit,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *NumTeamsVal =
NumTeams
? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
CGF.CGM.Int32Ty, true)
: CGF.Builder.getInt32(0);
llvm::Value *ThreadLimitVal =
ThreadLimit
? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
CGF.CGM.Int32Ty, true)
: CGF.Builder.getInt32(0);
llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
ThreadLimitVal};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_push_num_teams),
PushNumTeamsArgs);
}
void CGOpenMPRuntime::emitTargetDataCalls(
CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
if (!CGF.HaveInsertPoint())
return;
PrePostActionTy NoPrivAction;
auto &&BeginThenGen = [this, &D, Device, &Info,
&CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
MappableExprsHandler MEHandler(D, CGF);
MEHandler.generateAllInfo(CombinedInfo);
emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
true);
llvm::Value *BasePointersArrayArg = nullptr;
llvm::Value *PointersArrayArg = nullptr;
llvm::Value *SizesArrayArg = nullptr;
llvm::Value *MapTypesArrayArg = nullptr;
llvm::Value *MapNamesArrayArg = nullptr;
llvm::Value *MappersArrayArg = nullptr;
emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
SizesArrayArg, MapTypesArrayArg,
MapNamesArrayArg, MappersArrayArg, Info);
llvm::Value *DeviceID = nullptr;
if (Device) {
DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
CGF.Int64Ty, true);
} else {
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
}
llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
llvm::Value *OffloadingArgs[] = {RTLoc,
DeviceID,
PointerNum,
BasePointersArrayArg,
PointersArrayArg,
SizesArrayArg,
MapTypesArrayArg,
MapNamesArrayArg,
MappersArrayArg};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
OffloadingArgs);
if (!Info.CaptureDeviceAddrMap.empty())
CodeGen(CGF);
};
auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
PrePostActionTy &) {
assert(Info.isValid() && "Invalid data environment closing arguments.");
llvm::Value *BasePointersArrayArg = nullptr;
llvm::Value *PointersArrayArg = nullptr;
llvm::Value *SizesArrayArg = nullptr;
llvm::Value *MapTypesArrayArg = nullptr;
llvm::Value *MapNamesArrayArg = nullptr;
llvm::Value *MappersArrayArg = nullptr;
emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
SizesArrayArg, MapTypesArrayArg,
MapNamesArrayArg, MappersArrayArg, Info,
{true});
llvm::Value *DeviceID = nullptr;
if (Device) {
DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
CGF.Int64Ty, true);
} else {
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
}
llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
llvm::Value *OffloadingArgs[] = {RTLoc,
DeviceID,
PointerNum,
BasePointersArrayArg,
PointersArrayArg,
SizesArrayArg,
MapTypesArrayArg,
MapNamesArrayArg,
MappersArrayArg};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
OffloadingArgs);
};
auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
PrePostActionTy &) {
if (!Info.CaptureDeviceAddrMap.empty()) {
CodeGen.setAction(NoPrivAction);
CodeGen(CGF);
}
};
auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
if (IfCond) {
emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
} else {
RegionCodeGenTy RCG(BeginThenGen);
RCG(CGF);
}
if (Info.CaptureDeviceAddrMap.empty()) {
CodeGen.setAction(NoPrivAction);
CodeGen(CGF);
}
if (IfCond) {
emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
} else {
RegionCodeGenTy RCG(EndThenGen);
RCG(CGF);
}
}
void CGOpenMPRuntime::emitTargetDataStandAloneCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device) {
if (!CGF.HaveInsertPoint())
return;
assert((isa<OMPTargetEnterDataDirective>(D) ||
isa<OMPTargetExitDataDirective>(D) ||
isa<OMPTargetUpdateDirective>(D)) &&
"Expecting either target enter, exit data, or update directives.");
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
llvm::Value *MapNamesArray = nullptr;
auto &&ThenGen = [this, &D, Device, &InputInfo, &MapTypesArray,
&MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) {
llvm::Value *DeviceID = nullptr;
if (Device) {
DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
CGF.Int64Ty, true);
} else {
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
}
llvm::Constant *PointerNum =
CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
llvm::Value *OffloadingArgs[] = {RTLoc,
DeviceID,
PointerNum,
InputInfo.BasePointersArray.getPointer(),
InputInfo.PointersArray.getPointer(),
InputInfo.SizesArray.getPointer(),
MapTypesArray,
MapNamesArray,
InputInfo.MappersArray.getPointer()};
const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
RuntimeFunction RTLFn;
switch (D.getDirectiveKind()) {
case OMPD_target_enter_data:
RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
: OMPRTL___tgt_target_data_begin_mapper;
break;
case OMPD_target_exit_data:
RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
: OMPRTL___tgt_target_data_end_mapper;
break;
case OMPD_target_update:
RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
: OMPRTL___tgt_target_data_update_mapper;
break;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_depobj:
case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_declare_simd:
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_target:
case OMPD_target_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_target_teams:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_requires:
case OMPD_metadirective:
case OMPD_unknown:
default:
llvm_unreachable("Unexpected standalone target data directive.");
break;
}
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
OffloadingArgs);
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
&MapNamesArray](CodeGenFunction &CGF,
PrePostActionTy &) {
MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
MappableExprsHandler MEHandler(D, CGF);
MEHandler.generateAllInfo(CombinedInfo);
TargetDataInfo Info;
emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
true);
bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
D.hasClausesOfKind<OMPNowaitClause>();
emitOffloadingArraysArgument(
CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
{false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
Address(Info.BasePointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.PointersArray =
Address(Info.PointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.SizesArray =
Address(Info.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
InputInfo.MappersArray =
Address(Info.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
MapTypesArray = Info.MapTypesArray;
MapNamesArray = Info.MapNamesArray;
if (RequiresOuterTask)
CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
else
emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
};
if (IfCond) {
emitIfClause(CGF, IfCond, TargetThenGen,
[](CodeGenFunction &CGF, PrePostActionTy &) {});
} else {
RegionCodeGenTy ThenRCG(TargetThenGen);
ThenRCG(CGF);
}
}
namespace {
enum ParamKindTy {
Linear,
LinearRef,
LinearUVal,
LinearVal,
Uniform,
Vector,
};
struct ParamAttrTy {
ParamKindTy Kind = Vector;
llvm::APSInt StrideOrArg;
llvm::APSInt Alignment;
bool HasVarStride = false;
};
}
static unsigned evaluateCDTSize(const FunctionDecl *FD,
ArrayRef<ParamAttrTy> ParamAttrs) {
QualType RetType = FD->getReturnType();
if (RetType.isNull())
return 0;
ASTContext &C = FD->getASTContext();
QualType CDT;
if (!RetType.isNull() && !RetType->isVoidType()) {
CDT = RetType;
} else {
unsigned Offset = 0;
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (ParamAttrs[Offset].Kind == Vector)
CDT = C.getPointerType(C.getRecordType(MD->getParent()));
++Offset;
}
if (CDT.isNull()) {
for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
if (ParamAttrs[I + Offset].Kind == Vector) {
CDT = FD->getParamDecl(I)->getType();
break;
}
}
}
}
if (CDT.isNull())
CDT = C.IntTy;
CDT = CDT->getCanonicalTypeUnqualified();
if (CDT->isRecordType() || CDT->isUnionType())
CDT = C.IntTy;
return C.getTypeSize(CDT);
}
static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
for (const auto &ParamAttr : ParamAttrs) {
switch (ParamAttr.Kind) {
case Linear:
Out << 'l';
break;
case LinearRef:
Out << 'R';
break;
case LinearUVal:
Out << 'U';
break;
case LinearVal:
Out << 'L';
break;
case Uniform:
Out << 'u';
break;
case Vector:
Out << 'v';
break;
}
if (ParamAttr.HasVarStride)
Out << "s" << ParamAttr.StrideOrArg;
else if (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef ||
ParamAttr.Kind == LinearUVal || ParamAttr.Kind == LinearVal) {
if (ParamAttr.StrideOrArg < 0)
Out << 'n' << -ParamAttr.StrideOrArg;
else if (ParamAttr.StrideOrArg != 1)
Out << ParamAttr.StrideOrArg;
}
if (!!ParamAttr.Alignment)
Out << 'a' << ParamAttr.Alignment;
}
return std::string(Out.str());
}
static void
emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
const llvm::APSInt &VLENVal,
ArrayRef<ParamAttrTy> ParamAttrs,
OMPDeclareSimdDeclAttr::BranchStateTy State) {
struct ISADataTy {
char ISA;
unsigned VecRegSize;
};
ISADataTy ISAData[] = {
{
'b', 128
}, {
'c', 256
}, {
'd', 256
}, {
'e', 512
}, };
llvm::SmallVector<char, 2> Masked;
switch (State) {
case OMPDeclareSimdDeclAttr::BS_Undefined:
Masked.push_back('N');
Masked.push_back('M');
break;
case OMPDeclareSimdDeclAttr::BS_Notinbranch:
Masked.push_back('N');
break;
case OMPDeclareSimdDeclAttr::BS_Inbranch:
Masked.push_back('M');
break;
}
for (char Mask : Masked) {
for (const ISADataTy &Data : ISAData) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << "_ZGV" << Data.ISA << Mask;
if (!VLENVal) {
unsigned NumElts = evaluateCDTSize(FD, ParamAttrs);
assert(NumElts && "Non-zero simdlen/cdtsize expected");
Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts);
} else {
Out << VLENVal;
}
Out << mangleVectorParameters(ParamAttrs);
Out << '_' << Fn->getName();
Fn->addFnAttr(Out.str());
}
}
}
static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
QT = QT.getCanonicalType();
if (QT->isVoidType())
return false;
if (Kind == ParamKindTy::Uniform)
return false;
if (Kind == ParamKindTy::LinearUVal || ParamKindTy::LinearRef)
return false;
if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) &&
!QT->isReferenceType())
return false;
return true;
}
static bool getAArch64PBV(QualType QT, ASTContext &C) {
QT = QT.getCanonicalType();
unsigned Size = C.getTypeSize(QT);
if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
return false;
if (QT->isFloatingType())
return true;
if (QT->isIntegerType())
return true;
if (QT->isPointerType())
return true;
return false;
}
static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
QualType PTy = QT.getCanonicalType()->getPointeeType();
if (getAArch64PBV(PTy, C))
return C.getTypeSize(PTy);
}
if (getAArch64PBV(QT, C))
return C.getTypeSize(QT);
return C.getTypeSize(C.getUIntPtrType());
}
static std::tuple<unsigned, unsigned, bool>
getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
QualType RetType = FD->getReturnType().getCanonicalType();
ASTContext &C = FD->getASTContext();
bool OutputBecomesInput = false;
llvm::SmallVector<unsigned, 8> Sizes;
if (!RetType->isVoidType()) {
Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C));
if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {}))
OutputBecomesInput = true;
}
for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
QualType QT = FD->getParamDecl(I)->getType().getCanonicalType();
Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C));
}
assert(!Sizes.empty() && "Unable to determine NDS and WDS.");
assert(llvm::all_of(Sizes,
[](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 ||
Size == 64 || Size == 128;
}) &&
"Invalid size");
return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
*std::max_element(std::begin(Sizes), std::end(Sizes)),
OutputBecomesInput);
}
template <typename T>
static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix,
char ISA, StringRef ParSeq,
StringRef MangledName, bool OutputBecomesInput,
llvm::Function *Fn) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << Prefix << ISA << LMask << VLEN;
if (OutputBecomesInput)
Out << "v";
Out << ParSeq << "_" << MangledName;
Fn->addFnAttr(Out.str());
}
static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask,
StringRef Prefix, char ISA,
StringRef ParSeq, StringRef MangledName,
bool OutputBecomesInput,
llvm::Function *Fn) {
switch (NDS) {
case 8:
addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case 16:
addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case 32:
addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case 64:
case 128:
addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
default:
llvm_unreachable("Scalar type is too wide.");
}
}
static void emitAArch64DeclareSimdFunction(
CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN,
ArrayRef<ParamAttrTy> ParamAttrs,
OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) {
const auto Data = getNDSWDS(FD, ParamAttrs);
const unsigned NDS = std::get<0>(Data);
const unsigned WDS = std::get<1>(Data);
const bool OutputBecomesInput = std::get<2>(Data);
if (UserVLEN == 1) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Warning,
"The clause simdlen(1) has no effect when targeting aarch64.");
CGM.getDiags().Report(SLoc, DiagID);
return;
}
if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Warning, "The value specified in simdlen must be a "
"power of 2 when targeting Advanced SIMD.");
CGM.getDiags().Report(SLoc, DiagID);
return;
}
if (ISA == 's' && UserVLEN != 0) {
if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit "
"lanes in the architectural constraints "
"for SVE (min is 128-bit, max is "
"2048-bit, by steps of 128-bit)");
CGM.getDiags().Report(SLoc, DiagID) << WDS;
return;
}
}
const std::string ParSeq = mangleVectorParameters(ParamAttrs);
StringRef Prefix = "_ZGV";
if (UserVLEN) {
if (ISA == 's') {
addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
} else {
assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
switch (State) {
case OMPDeclareSimdDeclAttr::BS_Undefined:
addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case OMPDeclareSimdDeclAttr::BS_Notinbranch:
addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case OMPDeclareSimdDeclAttr::BS_Inbranch:
addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
}
}
} else {
if (ISA == 's') {
addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
} else {
assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
switch (State) {
case OMPDeclareSimdDeclAttr::BS_Undefined:
addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case OMPDeclareSimdDeclAttr::BS_Notinbranch:
addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
case OMPDeclareSimdDeclAttr::BS_Inbranch:
addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
OutputBecomesInput, Fn);
break;
}
}
}
}
void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn) {
ASTContext &C = CGM.getContext();
FD = FD->getMostRecentDecl();
while (FD) {
llvm::DenseMap<const Decl *, unsigned> ParamPositions;
if (isa<CXXMethodDecl>(FD))
ParamPositions.try_emplace(FD, 0);
unsigned ParamPos = ParamPositions.size();
for (const ParmVarDecl *P : FD->parameters()) {
ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
++ParamPos;
}
for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
for (const Expr *E : Attr->uniforms()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
auto It = ParamPositions.find(PVD);
assert(It != ParamPositions.end() && "Function parameter not found");
Pos = It->second;
}
ParamAttrs[Pos].Kind = Uniform;
}
auto *NI = Attr->alignments_begin();
for (const Expr *E : Attr->aligneds()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
QualType ParmTy;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
ParmTy = E->getType();
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
auto It = ParamPositions.find(PVD);
assert(It != ParamPositions.end() && "Function parameter not found");
Pos = It->second;
ParmTy = PVD->getType();
}
ParamAttrs[Pos].Alignment =
(*NI)
? (*NI)->EvaluateKnownConstInt(C)
: llvm::APSInt::getUnsigned(
C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
.getQuantity());
++NI;
}
auto *SI = Attr->steps_begin();
auto *MI = Attr->modifiers_begin();
for (const Expr *E : Attr->linears()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
bool IsReferenceType = false;
unsigned PtrRescalingFactor = 1;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
auto *P = cast<PointerType>(E->getType());
PtrRescalingFactor = CGM.getContext()
.getTypeSizeInChars(P->getPointeeType())
.getQuantity();
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
auto It = ParamPositions.find(PVD);
assert(It != ParamPositions.end() && "Function parameter not found");
Pos = It->second;
if (auto *P = dyn_cast<PointerType>(PVD->getType()))
PtrRescalingFactor = CGM.getContext()
.getTypeSizeInChars(P->getPointeeType())
.getQuantity();
else if (PVD->getType()->isReferenceType()) {
IsReferenceType = true;
PtrRescalingFactor =
CGM.getContext()
.getTypeSizeInChars(PVD->getType().getNonReferenceType())
.getQuantity();
}
}
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
if (*MI == OMPC_LINEAR_ref)
ParamAttr.Kind = LinearRef;
else if (*MI == OMPC_LINEAR_uval)
ParamAttr.Kind = LinearUVal;
else if (IsReferenceType)
ParamAttr.Kind = LinearVal;
else
ParamAttr.Kind = Linear;
ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
if (*SI) {
Expr::EvalResult Result;
if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
if (const auto *DRE =
cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
if (const auto *StridePVD =
dyn_cast<ParmVarDecl>(DRE->getDecl())) {
ParamAttr.HasVarStride = true;
auto It = ParamPositions.find(StridePVD->getCanonicalDecl());
assert(It != ParamPositions.end() &&
"Function parameter not found");
ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(It->second);
}
}
} else {
ParamAttr.StrideOrArg = Result.Val.getInt();
}
}
if (!ParamAttr.HasVarStride &&
(ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef))
ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
++SI;
++MI;
}
llvm::APSInt VLENVal;
SourceLocation ExprLoc;
const Expr *VLENExpr = Attr->getSimdlen();
if (VLENExpr) {
VLENVal = VLENExpr->EvaluateKnownConstInt(C);
ExprLoc = VLENExpr->getExprLoc();
}
OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
if (CGM.getTriple().isX86()) {
emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
} else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) {
unsigned VLEN = VLENVal.getExtValue();
StringRef MangledName = Fn->getName();
if (CGM.getTarget().hasFeature("sve"))
emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
MangledName, 's', 128, Fn, ExprLoc);
if (CGM.getTarget().hasFeature("neon"))
emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
MangledName, 'n', 128, Fn, ExprLoc);
}
}
FD = FD->getPreviousDecl();
}
}
namespace {
class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
public:
static const int DoacrossFinArgs = 2;
private:
llvm::FunctionCallee RTLFn;
llvm::Value *Args[DoacrossFinArgs];
public:
DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
ArrayRef<llvm::Value *> CallArgs)
: RTLFn(RTLFn) {
assert(CallArgs.size() == DoacrossFinArgs);
std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
}
void Emit(CodeGenFunction &CGF, Flags ) override {
if (!CGF.HaveInsertPoint())
return;
CGF.EmitRuntimeCall(RTLFn, Args);
}
};
}
void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) {
if (!CGF.HaveInsertPoint())
return;
ASTContext &C = CGM.getContext();
QualType Int64Ty = C.getIntTypeForBitwidth(64, true);
RecordDecl *RD;
if (KmpDimTy.isNull()) {
RD = C.buildImplicitRecord("kmp_dim");
RD->startDefinition();
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
RD->completeDefinition();
KmpDimTy = C.getRecordType(RD);
} else {
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
}
llvm::APInt Size(32, NumIterations.size());
QualType ArrayTy =
C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0);
Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
CGF.EmitNullInitialization(DimsAddr, ArrayTy);
enum { LowerFD = 0, UpperFD, StrideFD };
for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
LValue DimsLVal = CGF.MakeAddrLValue(
CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy);
LValue UpperLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), UpperFD));
llvm::Value *NumIterVal = CGF.EmitScalarConversion(
CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
Int64Ty, NumIterations[I]->getExprLoc());
CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
LValue StrideLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), StrideFD));
CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, 1),
StrideLVal);
}
llvm::Value *Args[] = {
emitUpdateLocation(CGF, D.getBeginLoc()),
getThreadID(CGF, D.getBeginLoc()),
llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
CGM.VoidPtrTy)};
llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_doacross_init);
CGF.EmitRuntimeCall(RTLFn, Args);
llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_doacross_fini);
CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
}
void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) {
QualType Int64Ty =
CGM.getContext().getIntTypeForBitwidth(64, 1);
llvm::APInt Size(32, C->getNumLoops());
QualType ArrayTy = CGM.getContext().getConstantArrayType(
Int64Ty, Size, nullptr, ArrayType::Normal, 0);
Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
const Expr *CounterVal = C->getLoopData(I);
assert(CounterVal);
llvm::Value *CntVal = CGF.EmitScalarConversion(
CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
CounterVal->getExprLoc());
CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I),
false, Int64Ty);
}
llvm::Value *Args[] = {
emitUpdateLocation(CGF, C->getBeginLoc()),
getThreadID(CGF, C->getBeginLoc()),
CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
llvm::FunctionCallee RTLFn;
if (C->getDependencyKind() == OMPC_DEPEND_source) {
RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_doacross_post);
} else {
assert(C->getDependencyKind() == OMPC_DEPEND_sink);
RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_doacross_wait);
}
CGF.EmitRuntimeCall(RTLFn, Args);
}
void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args) const {
assert(Loc.isValid() && "Outlined function call location must be valid.");
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
if (auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
if (Fn->doesNotThrow()) {
CGF.EmitNounwindRuntimeCall(Fn, Args);
return;
}
}
CGF.EmitRuntimeCall(Callee, Args);
}
void CGOpenMPRuntime::emitOutlinedFunctionCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args) const {
emitCall(CGF, Loc, OutlinedFn, Args);
}
void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D))
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
HasEmittedDeclareTargetRegion = true;
}
Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const {
return CGF.GetAddrOfLocalVar(NativeParam);
}
static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
const Expr *Allocator) {
llvm::Value *AllocVal;
if (Allocator) {
AllocVal = CGF.EmitScalarExpr(Allocator);
AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
CGF.getContext().VoidPtrTy,
Allocator->getExprLoc());
} else {
AllocVal = llvm::Constant::getNullValue(
CGF.CGM.getTypes().ConvertType(CGF.getContext().VoidPtrTy));
}
return AllocVal;
}
static llvm::Value *getAlignmentValue(CodeGenModule &CGM, const VarDecl *VD) {
llvm::Optional<CharUnits> AllocateAlignment = CGM.getOMPAllocateAlignment(VD);
if (!AllocateAlignment)
return nullptr;
return llvm::ConstantInt::get(CGM.SizeTy, AllocateAlignment->getQuantity());
}
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (!VD)
return Address::invalid();
Address UntiedAddr = Address::invalid();
Address UntiedRealAddr = Address::invalid();
auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
if (It != FunctionToUntiedTaskStackMap.end()) {
const UntiedLocalVarsAddressesMap &UntiedData =
UntiedLocalVarsStack[It->second];
auto I = UntiedData.find(VD);
if (I != UntiedData.end()) {
UntiedAddr = I->second.first;
UntiedRealAddr = I->second.second;
}
}
const VarDecl *CVD = VD->getCanonicalDecl();
if (CVD->hasAttr<OMPAllocateDeclAttr>()) {
if (!isAllocatableDecl(VD))
return UntiedAddr;
llvm::Value *Size;
CharUnits Align = CGM.getContext().getDeclAlign(CVD);
if (CVD->getType()->isVariablyModifiedType()) {
Size = CGF.getTypeSize(CVD->getType());
Size = CGF.Builder.CreateNUWAdd(
Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
} else {
CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
Size = CGM.getSize(Sz.alignTo(Align));
}
llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
const Expr *Allocator = AA->getAllocator();
llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
llvm::Value *Alignment = getAlignmentValue(CGM, CVD);
SmallVector<llvm::Value *, 4> Args;
Args.push_back(ThreadID);
if (Alignment)
Args.push_back(Alignment);
Args.push_back(Size);
Args.push_back(AllocVal);
llvm::omp::RuntimeFunction FnID =
Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
llvm::Value *Addr = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args,
getName({CVD->getName(), ".void.addr"}));
llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free);
QualType Ty = CGM.getContext().getPointerType(CVD->getType());
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"}));
if (UntiedAddr.isValid())
CGF.EmitStoreOfScalar(Addr, UntiedAddr, false, Ty);
class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
llvm::FunctionCallee RTLFn;
SourceLocation::UIntTy LocEncoding;
Address Addr;
const Expr *AllocExpr;
public:
OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
SourceLocation::UIntTy LocEncoding, Address Addr,
const Expr *AllocExpr)
: RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
AllocExpr(AllocExpr) {}
void Emit(CodeGenFunction &CGF, Flags ) override {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *Args[3];
Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
CGF, SourceLocation::getFromRawEncoding(LocEncoding));
Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr.getPointer(), CGF.VoidPtrTy);
llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
Args[2] = AllocVal;
CGF.EmitRuntimeCall(RTLFn, Args);
}
};
Address VDAddr =
UntiedRealAddr.isValid()
? UntiedRealAddr
: Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
VDAddr, Allocator);
if (UntiedRealAddr.isValid())
if (auto *Region =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
return VDAddr;
}
return UntiedAddr;
}
bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF,
const VarDecl *VD) const {
auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
if (It == FunctionToUntiedTaskStackMap.end())
return false;
return UntiedLocalVarsStack[It->second].count(VD) > 0;
}
CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
CodeGenModule &CGM, const OMPLoopDirective &S)
: CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
if (!NeedToPush)
return;
NontemporalDeclsSet &DS =
CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back();
for (const auto *C : S.getClausesOfKind<OMPNontemporalClause>()) {
for (const Stmt *Ref : C->private_refs()) {
const auto *SimpleRefExpr = cast<Expr>(Ref)->IgnoreParenImpCasts();
const ValueDecl *VD;
if (const auto *DRE = dyn_cast<DeclRefExpr>(SimpleRefExpr)) {
VD = DRE->getDecl();
} else {
const auto *ME = cast<MemberExpr>(SimpleRefExpr);
assert((ME->isImplicitCXXThis() ||
isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) &&
"Expected member of current class.");
VD = ME->getMemberDecl();
}
DS.insert(VD);
}
}
}
CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
if (!NeedToPush)
return;
CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
}
CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>> &LocalVars)
: CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
if (!NeedToPush)
return;
CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace(
CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
}
CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() {
if (!NeedToPush)
return;
CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
}
bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
return llvm::any_of(
CGM.getOpenMPRuntime().NontemporalDeclsStack,
[VD](const NontemporalDeclsSet &Set) { return Set.contains(VD); });
}
void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
const {
llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
isOpenMPTaskingDirective(S.getDirectiveKind())) {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
for (const CapturedStmt::Capture &Cap : CS->captures()) {
if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
NeedToCheckForLPCs.insert(Cap.getCapturedVar());
}
}
for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
for (const Expr *Ref : C->varlists()) {
if (!Ref->getType()->isScalarType())
continue;
const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
if (!DRE)
continue;
NeedToCheckForLPCs.insert(DRE->getDecl());
}
}
for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
for (const Expr *Ref : C->varlists()) {
if (!Ref->getType()->isScalarType())
continue;
const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
if (!DRE)
continue;
NeedToCheckForLPCs.insert(DRE->getDecl());
}
}
for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
for (const Expr *Ref : C->varlists()) {
if (!Ref->getType()->isScalarType())
continue;
const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
if (!DRE)
continue;
NeedToCheckForLPCs.insert(DRE->getDecl());
}
}
for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
for (const Expr *Ref : C->varlists()) {
if (!Ref->getType()->isScalarType())
continue;
const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
if (!DRE)
continue;
NeedToCheckForLPCs.insert(DRE->getDecl());
}
}
for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
for (const Expr *Ref : C->varlists()) {
if (!Ref->getType()->isScalarType())
continue;
const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
if (!DRE)
continue;
NeedToCheckForLPCs.insert(DRE->getDecl());
}
}
for (const Decl *VD : NeedToCheckForLPCs) {
for (const LastprivateConditionalData &Data :
llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
if (Data.DeclToUniqueName.count(VD) > 0) {
if (!Data.Disabled)
NeedToAddForLPCsAsDisabled.insert(VD);
break;
}
}
}
}
CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
: CGM(CGF.CGM),
Action((CGM.getLangOpts().OpenMP >= 50 &&
llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
[](const OMPLastprivateClause *C) {
return C->getKind() ==
OMPC_LASTPRIVATE_conditional;
}))
? ActionToDo::PushAsLastprivateConditional
: ActionToDo::DoNotPush) {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
return;
assert(Action == ActionToDo::PushAsLastprivateConditional &&
"Expected a push action.");
LastprivateConditionalData &Data =
CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
if (C->getKind() != OMPC_LASTPRIVATE_conditional)
continue;
for (const Expr *Ref : C->varlists()) {
Data.DeclToUniqueName.insert(std::make_pair(
cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
}
}
Data.IVLVal = IVLVal;
Data.Fn = CGF.CurFn;
}
CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
CodeGenFunction &CGF, const OMPExecutableDirective &S)
: CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
if (CGM.getLangOpts().OpenMP < 50)
return;
llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
if (!NeedToAddForLPCsAsDisabled.empty()) {
Action = ActionToDo::DisableLastprivateConditional;
LastprivateConditionalData &Data =
CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
for (const Decl *VD : NeedToAddForLPCsAsDisabled)
Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
Data.Fn = CGF.CurFn;
Data.Disabled = true;
}
}
CGOpenMPRuntime::LastprivateConditionalRAII
CGOpenMPRuntime::LastprivateConditionalRAII::disable(
CodeGenFunction &CGF, const OMPExecutableDirective &S) {
return LastprivateConditionalRAII(CGF, S);
}
CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
if (CGM.getLangOpts().OpenMP < 50)
return;
if (Action == ActionToDo::DisableLastprivateConditional) {
assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
"Expected list of disabled private vars.");
CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
}
if (Action == ActionToDo::PushAsLastprivateConditional) {
assert(
!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
"Expected list of lastprivate conditional vars.");
CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
}
}
Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD) {
ASTContext &C = CGM.getContext();
auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
if (I == LastprivateConditionalToTypes.end())
I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
QualType NewType;
const FieldDecl *VDField;
const FieldDecl *FiredField;
LValue BaseLVal;
auto VI = I->getSecond().find(VD);
if (VI == I->getSecond().end()) {
RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
RD->startDefinition();
VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
RD->completeDefinition();
NewType = C.getRecordType(RD);
Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
} else {
NewType = std::get<0>(VI->getSecond());
VDField = std::get<1>(VI->getSecond());
FiredField = std::get<2>(VI->getSecond());
BaseLVal = std::get<3>(VI->getSecond());
}
LValue FiredLVal =
CGF.EmitLValueForField(BaseLVal, FiredField);
CGF.EmitStoreOfScalar(
llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
FiredLVal);
return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
}
namespace {
class LastprivateConditionalRefChecker final
: public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
const Expr *FoundE = nullptr;
const Decl *FoundD = nullptr;
StringRef UniqueDeclName;
LValue IVLVal;
llvm::Function *FoundFn = nullptr;
SourceLocation Loc;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
for (const CGOpenMPRuntime::LastprivateConditionalData &D :
llvm::reverse(LPM)) {
auto It = D.DeclToUniqueName.find(E->getDecl());
if (It == D.DeclToUniqueName.end())
continue;
if (D.Disabled)
return false;
FoundE = E;
FoundD = E->getDecl()->getCanonicalDecl();
UniqueDeclName = It->second;
IVLVal = D.IVLVal;
FoundFn = D.Fn;
break;
}
return FoundE == E;
}
bool VisitMemberExpr(const MemberExpr *E) {
if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
return false;
for (const CGOpenMPRuntime::LastprivateConditionalData &D :
llvm::reverse(LPM)) {
auto It = D.DeclToUniqueName.find(E->getMemberDecl());
if (It == D.DeclToUniqueName.end())
continue;
if (D.Disabled)
return false;
FoundE = E;
FoundD = E->getMemberDecl()->getCanonicalDecl();
UniqueDeclName = It->second;
IVLVal = D.IVLVal;
FoundFn = D.Fn;
break;
}
return FoundE == E;
}
bool VisitStmt(const Stmt *S) {
for (const Stmt *Child : S->children()) {
if (!Child)
continue;
if (const auto *E = dyn_cast<Expr>(Child))
if (!E->isGLValue())
continue;
if (Visit(Child))
return true;
}
return false;
}
explicit LastprivateConditionalRefChecker(
ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
: LPM(LPM) {}
std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
getFoundData() const {
return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
}
};
}
void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
LValue IVLVal,
StringRef UniqueDeclName,
LValue LVal,
SourceLocation Loc) {
llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
llvm::Constant *LastIV =
getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
cast<llvm::GlobalVariable>(LastIV)->setAlignment(
IVLVal.getAlignment().getAsAlign());
LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
llvm::GlobalVariable *Last = getOrCreateInternalVariable(
CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
Last->setAlignment(LVal.getAlignment().getAsAlign());
LValue LastLVal = CGF.MakeAddrLValue(
Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
llvm::Value *CmpRes;
if (IVLVal.getType()->isSignedIntegerType()) {
CmpRes = CGF.Builder.CreateICmpSLE(LastIVVal, IVVal);
} else {
assert(IVLVal.getType()->isUnsignedIntegerType() &&
"Loop iteration variable must be integer.");
CmpRes = CGF.Builder.CreateICmpULE(LastIVVal, IVVal);
}
llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lp_cond_then");
llvm::BasicBlock *ExitBB = CGF.createBasicBlock("lp_cond_exit");
CGF.Builder.CreateCondBr(CmpRes, ThenBB, ExitBB);
CGF.EmitBlock(ThenBB);
CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
switch (CGF.getEvaluationKind(LVal.getType())) {
case TEK_Scalar: {
llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
CGF.EmitStoreOfScalar(PrivVal, LastLVal);
break;
}
case TEK_Complex: {
CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
CGF.EmitStoreOfComplex(PrivVal, LastLVal, false);
break;
}
case TEK_Aggregate:
llvm_unreachable(
"Aggregates are not supported in lastprivate conditional.");
}
CGF.EmitBranch(ExitBB);
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ExitBB, true);
};
if (CGM.getLangOpts().OpenMPSimd) {
RegionCodeGenTy ThenRCG(CodeGen);
ThenRCG(CGF);
} else {
emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
}
}
void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS) {
if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
return;
LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
if (!Checker.Visit(LHS))
return;
const Expr *FoundE;
const Decl *FoundD;
StringRef UniqueDeclName;
LValue IVLVal;
llvm::Function *FoundFn;
std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
Checker.getFoundData();
if (FoundFn != CGF.CurFn) {
auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
assert(It != LastprivateConditionalToTypes[FoundFn].end() &&
"Lastprivate conditional is not found in outer region.");
QualType StructTy = std::get<0>(It->getSecond());
const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
LValue PrivLVal = CGF.EmitLValue(FoundE);
Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivLVal.getAddress(CGF),
CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
CGF.ConvertTypeForMem(StructTy));
LValue BaseLVal =
CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
FiredLVal, llvm::AtomicOrdering::Unordered,
true, false);
return;
}
LValue LVal = CGF.EmitLValue(FoundE);
emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
FoundE->getExprLoc());
}
void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
return;
auto Range = llvm::reverse(LastprivateConditionalStack);
auto It = llvm::find_if(
Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
if (It == Range.end() || It->Fn != CGF.CurFn)
return;
auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
assert(LPCI != LastprivateConditionalToTypes.end() &&
"Lastprivates must be registered already.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
for (const auto &Pair : It->DeclToUniqueName) {
const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
if (!CS->capturesVariable(VD) || IgnoredDecls.contains(VD))
continue;
auto I = LPCI->getSecond().find(Pair.first);
assert(I != LPCI->getSecond().end() &&
"Lastprivate must be rehistered already.");
LValue BaseLVal = std::get<3>(I->getSecond());
LValue FiredLVal =
CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
CGF.EmitBlock(ThenBB);
Address Addr = CGF.GetAddrOfLocalVar(VD);
LValue LVal;
if (VD->getType()->isReferenceType())
LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
AlignmentSource::Decl);
else
LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
AlignmentSource::Decl);
emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
D.getBeginLoc());
auto AL = ApplyDebugLocation::CreateArtificial(CGF);
CGF.EmitBlock(DoneBB, true);
}
}
void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD,
SourceLocation Loc) {
if (CGF.getLangOpts().OpenMP < 50)
return;
auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&
"Unknown lastprivate conditional variable.");
StringRef UniqueName = It->second;
llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
if (!GV)
return;
LValue LPLVal = CGF.MakeAddrLValue(
Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
PrivLVal.getType().getNonReferenceType());
llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
CGF.EmitStoreOfScalar(Res, PrivLVal);
}
llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond,
llvm::Value *NumThreads) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitCriticalRegion(
CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
const Expr *Hint) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc,
const Expr *Filter) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitSingleRegion(
CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc,
bool IsThreads) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks,
bool ForceSimpleCall) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitForDispatchInit(
CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
bool Ordered, const DispatchRTInput &DispatchValues) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitForStaticInit(
CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned IVSize,
bool IVSigned) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind DKind) {
llvm_unreachable("Not supported in SIMD-only mode");
}
llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
ProcBindKind ProcBind,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF) {
llvm_unreachable("Not supported in SIMD-only mode");
}
Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
CodeGenFunction &CGF, QualType VarType, StringRef Name) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
ArrayRef<const Expr *> Vars,
SourceLocation Loc,
llvm::AtomicOrdering AO) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond,
const OMPTaskDataTy &Data) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskLoopCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitReduction(
CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
assert(Options.SimpleReduction && "Only simple reduction is expected.");
CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
ReductionOps, Options);
}
llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
SourceLocation Loc,
bool IsWorksharingReduction) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
SourceLocation Loc,
ReductionCodeGen &RCG,
unsigned N) {
llvm_unreachable("Not supported in SIMD-only mode");
}
Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
SourceLocation Loc,
const OMPTaskDataTy &Data) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitCancellationPointCall(
CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
SourceLocation Loc, const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
llvm_unreachable("Not supported in SIMD-only mode");
}
bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
llvm_unreachable("Not supported in SIMD-only mode");
}
bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
llvm_unreachable("Not supported in SIMD-only mode");
}
bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
return false;
}
void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
const Expr *NumTeams,
const Expr *ThreadLimit,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTargetDataCalls(
CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) {
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) {
llvm_unreachable("Not supported in SIMD-only mode");
}
const VarDecl *
CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
llvm_unreachable("Not supported in SIMD-only mode");
}
Address
CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const {
llvm_unreachable("Not supported in SIMD-only mode");
}