#include "CGCall.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
using namespace clang;
using namespace CodeGen;
namespace {
class AtomicInfo {
CodeGenFunction &CGF;
QualType AtomicTy;
QualType ValueTy;
uint64_t AtomicSizeInBits;
uint64_t ValueSizeInBits;
CharUnits AtomicAlign;
CharUnits ValueAlign;
TypeEvaluationKind EvaluationKind;
bool UseLibcall;
LValue LVal;
CGBitFieldInfo BFI;
public:
AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
: CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
EvaluationKind(TEK_Scalar), UseLibcall(true) {
assert(!lvalue.isGlobalReg());
ASTContext &C = CGF.getContext();
if (lvalue.isSimple()) {
AtomicTy = lvalue.getType();
if (auto *ATy = AtomicTy->getAs<AtomicType>())
ValueTy = ATy->getValueType();
else
ValueTy = AtomicTy;
EvaluationKind = CGF.getEvaluationKind(ValueTy);
uint64_t ValueAlignInBits;
uint64_t AtomicAlignInBits;
TypeInfo ValueTI = C.getTypeInfo(ValueTy);
ValueSizeInBits = ValueTI.Width;
ValueAlignInBits = ValueTI.Align;
TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
AtomicSizeInBits = AtomicTI.Width;
AtomicAlignInBits = AtomicTI.Align;
assert(ValueSizeInBits <= AtomicSizeInBits);
assert(ValueAlignInBits <= AtomicAlignInBits);
AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
if (lvalue.getAlignment().isZero())
lvalue.setAlignment(AtomicAlign);
LVal = lvalue;
} else if (lvalue.isBitField()) {
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
auto &OrigBFI = lvalue.getBitFieldInfo();
auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.alignTo(lvalue.getAlignment()));
auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
llvm::Type *IntTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
VoidPtrAddr, IntTy->getPointerTo(), "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
BFI.StorageOffset += OffsetInChars;
LVal = LValue::MakeBitfield(Address(Addr, IntTy, lvalue.getAlignment()),
BFI, lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo());
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
if (AtomicTy.isNull()) {
llvm::APInt Size(
32,
C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
AtomicTy =
C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
0);
}
AtomicAlign = ValueAlign = lvalue.getAlignment();
} else if (lvalue.isVectorElt()) {
ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = lvalue.getType();
AtomicSizeInBits = C.getTypeSize(AtomicTy);
AtomicAlign = ValueAlign = lvalue.getAlignment();
LVal = lvalue;
} else {
assert(lvalue.isExtVectorElt());
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
lvalue.getType(), cast<llvm::FixedVectorType>(
lvalue.getExtVectorAddress().getElementType())
->getNumElements());
AtomicSizeInBits = C.getTypeSize(AtomicTy);
AtomicAlign = ValueAlign = lvalue.getAlignment();
LVal = lvalue;
}
UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
}
QualType getAtomicType() const { return AtomicTy; }
QualType getValueType() const { return ValueTy; }
CharUnits getAtomicAlignment() const { return AtomicAlign; }
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
const LValue &getAtomicLValue() const { return LVal; }
llvm::Value *getAtomicPointer() const {
if (LVal.isSimple())
return LVal.getPointer(CGF);
else if (LVal.isBitField())
return LVal.getBitFieldPointer();
else if (LVal.isVectorElt())
return LVal.getVectorPointer();
assert(LVal.isExtVectorElt());
return LVal.getExtVectorPointer();
}
Address getAtomicAddress() const {
llvm::Type *ElTy;
if (LVal.isSimple())
ElTy = LVal.getAddress(CGF).getElementType();
else if (LVal.isBitField())
ElTy = LVal.getBitFieldAddress().getElementType();
else if (LVal.isVectorElt())
ElTy = LVal.getVectorAddress().getElementType();
else
ElTy = LVal.getExtVectorAddress().getElementType();
return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
}
Address getAtomicAddressAsAtomicIntPointer() const {
return emitCastToAtomicIntPointer(getAtomicAddress());
}
bool hasPadding() const {
return (ValueSizeInBits != AtomicSizeInBits);
}
bool emitMemSetZeroIfNecessary() const;
llvm::Value *getAtomicSizeValue() const {
CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
return CGF.CGM.getSize(size);
}
Address emitCastToAtomicIntPointer(Address Addr) const;
Address convertToAtomicIntPointer(Address Addr) const;
RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
SourceLocation loc, bool AsValue) const;
llvm::Value *convertRValueToInt(RValue RVal) const;
RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
AggValueSlot ResultSlot,
SourceLocation Loc, bool AsValue) const;
void emitCopyIntoMemory(RValue rvalue) const;
LValue projectValue() const {
assert(LVal.isSimple());
Address addr = getAtomicAddress();
if (hasPadding())
addr = CGF.Builder.CreateStructGEP(addr, 0);
return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
LVal.getBaseInfo(), LVal.getTBAAInfo());
}
RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
bool AsValue, llvm::AtomicOrdering AO,
bool IsVolatile);
std::pair<RValue, llvm::Value *>
EmitAtomicCompareExchange(RValue Expected, RValue Desired,
llvm::AtomicOrdering Success =
llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering Failure =
llvm::AtomicOrdering::SequentiallyConsistent,
bool IsWeak = false);
void EmitAtomicUpdate(llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
bool IsVolatile);
Address materializeRValue(RValue rvalue) const;
Address CreateTempAlloca() const;
private:
bool requiresMemSetZero(llvm::Type *type) const;
void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
llvm::AtomicOrdering AO, bool IsVolatile);
llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
llvm::Value *EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
llvm::AtomicOrdering Success =
llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering Failure =
llvm::AtomicOrdering::SequentiallyConsistent);
std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
llvm::AtomicOrdering Success =
llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering Failure =
llvm::AtomicOrdering::SequentiallyConsistent,
bool IsWeak = false);
void
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
bool IsVolatile);
void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
bool IsVolatile);
};
}
Address AtomicInfo::CreateTempAlloca() const {
Address TempAlloca = CGF.CreateMemTemp(
(LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
: AtomicTy,
getAtomicAlignment(),
"atomic-temp");
if (LVal.isBitField())
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TempAlloca, getAtomicAddress().getType(),
getAtomicAddress().getElementType());
return TempAlloca;
}
static RValue emitAtomicLibcall(CodeGenFunction &CGF,
StringRef fnName,
QualType resultType,
CallArgList &args) {
const CGFunctionInfo &fnInfo =
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
fnAttrB.addAttribute(llvm::Attribute::WillReturn);
llvm::AttributeList fnAttrs = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
llvm::FunctionCallee fn =
CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
auto callee = CGCallee::forDirect(fn);
return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
}
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
uint64_t expectedSize) {
return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
}
bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
if (hasPadding()) return true;
switch (getEvaluationKind()) {
case TEK_Scalar:
return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
case TEK_Complex:
return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
AtomicSizeInBits / 2);
case TEK_Aggregate:
return false;
}
llvm_unreachable("bad evaluation kind");
}
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
Address addr = LVal.getAddress(CGF);
if (!requiresMemSetZero(addr.getElementType()))
return false;
CGF.Builder.CreateMemSet(
addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
LVal.getAlignment().getAsAlign());
return true;
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Address Dest, Address Ptr,
Address Val1, Address Val2,
uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder,
llvm::SyncScope::ID Scope) {
llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
Scope);
Pair->setVolatile(E->isVolatile());
Pair->setWeak(IsWeak);
llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
llvm::BasicBlock *StoreExpectedBB =
CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
llvm::BasicBlock *ContinueBB =
CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
CGF.Builder.SetInsertPoint(StoreExpectedBB);
CGF.Builder.CreateStore(Old, Val1);
CGF.Builder.CreateBr(ContinueBB);
CGF.Builder.SetInsertPoint(ContinueBB);
CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
}
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
bool IsWeak, Address Dest, Address Ptr,
Address Val1, Address Val2,
llvm::Value *FailureOrderVal,
uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
if (!llvm::isValidAtomicOrderingCABI(FOS))
FailureOrder = llvm::AtomicOrdering::Monotonic;
else
switch ((llvm::AtomicOrderingCABI)FOS) {
case llvm::AtomicOrderingCABI::relaxed:
case llvm::AtomicOrderingCABI::release:
case llvm::AtomicOrderingCABI::acq_rel:
FailureOrder = llvm::AtomicOrdering::Monotonic;
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
FailureOrder = llvm::AtomicOrdering::Acquire;
break;
case llvm::AtomicOrderingCABI::seq_cst:
FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
break;
}
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
FailureOrder, Scope);
return;
}
auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
AcquireBB);
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
CGF.Builder.SetInsertPoint(MonotonicBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(AcquireBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(SeqCstBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
}
static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
AtomicExpr::AtomicOp Op,
bool IsSigned,
llvm::Value *OldVal,
llvm::Value *RHS) {
llvm::CmpInst::Predicate Pred;
switch (Op) {
default:
llvm_unreachable("Unexpected min/max operation");
case AtomicExpr::AO__atomic_max_fetch:
Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
break;
case AtomicExpr::AO__atomic_min_fetch:
Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
break;
}
llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
}
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
uint64_t Size, llvm::AtomicOrdering Order,
llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
bool PostOpMinMax = false;
unsigned PostOp = 0;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled!");
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
Val1, Val2, FailureOrder, Size, Order, Scope);
} else {
llvm::BasicBlock *StrongBB =
CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
llvm::BasicBlock *ContBB =
CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
SI->addCase(CGF.Builder.getInt1(false), StrongBB);
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
}
return;
}
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order, Scope);
Load->setVolatile(E->isVolatile());
CGF.Builder.CreateStore(Load, Dest);
return;
}
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n: {
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
Store->setAtomic(Order, Scope);
Store->setVolatile(E->isVolatile());
return;
}
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
Op = llvm::AtomicRMWInst::Xchg;
break;
case AtomicExpr::AO__atomic_add_fetch:
PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
: llvm::Instruction::Add;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
: llvm::AtomicRMWInst::Add;
break;
case AtomicExpr::AO__atomic_sub_fetch:
PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
: llvm::Instruction::Sub;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
: llvm::AtomicRMWInst::Sub;
break;
case AtomicExpr::AO__atomic_min_fetch:
PostOpMinMax = true;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_min:
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
: llvm::AtomicRMWInst::UMin;
break;
case AtomicExpr::AO__atomic_max_fetch:
PostOpMinMax = true;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_max:
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
: llvm::AtomicRMWInst::UMax;
break;
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
Op = llvm::AtomicRMWInst::And;
break;
case AtomicExpr::AO__atomic_or_fetch:
PostOp = llvm::Instruction::Or;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
Op = llvm::AtomicRMWInst::Or;
break;
case AtomicExpr::AO__atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
Op = llvm::AtomicRMWInst::Xor;
break;
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
}
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::AtomicRMWInst *RMWI =
CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
RMWI->setVolatile(E->isVolatile());
llvm::Value *Result = RMWI;
if (PostOpMinMax)
Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
E->getValueType()->isSignedIntegerType(),
RMWI, LoadVal1);
else if (PostOp)
Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
LoadVal1);
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
Result = CGF.Builder.CreateNot(Result);
CGF.Builder.CreateStore(Result, Dest);
}
static Address
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
true);
return DeclPtr;
}
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
uint64_t Size, llvm::AtomicOrdering Order,
llvm::Value *Scope) {
auto ScopeModel = Expr->getScopeModel();
if (!ScopeModel) {
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
return;
}
if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
Order, CGF.CGM.getLLVMContext());
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, SCID);
return;
}
auto &Builder = CGF.Builder;
auto Scopes = ScopeModel->getRuntimeValues();
llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
for (auto S : Scopes)
BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
llvm::BasicBlock *ContBB =
CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
auto FallBack = ScopeModel->getFallBackValue();
llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
for (auto S : Scopes) {
auto *B = BB[S];
if (S != FallBack)
SI->addCase(Builder.getInt32(S), B);
Builder.SetInsertPoint(B);
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order,
CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
ScopeModel->map(S),
Order,
CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}
Builder.SetInsertPoint(ContBB);
}
static void
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
SourceLocation Loc, CharUnits SizeInChars) {
if (UseOptimizedLibcall) {
CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
ValTy =
CGF.getContext().getIntTypeForBitwidth(SizeInBits, false);
llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
Address Ptr = Address(CGF.Builder.CreateBitCast(Val, ITy->getPointerTo()),
ITy, Align);
Val = CGF.EmitLoadOfScalar(Ptr, false,
CGF.getContext().getPointerType(ValTy),
Loc);
Args.add(RValue::get(Val), ValTy);
} else {
Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
CGF.getContext().VoidPtrTy);
}
}
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
QualType MemTy = AtomicTy;
if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
MemTy = AT->getValueType();
llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
Address Val1 = Address::invalid();
Address Val2 = Address::invalid();
Address Dest = Address::invalid();
Address Ptr = EmitPointerWithAlignment(E->getPtr());
if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
EmitAtomicInit(E->getVal1(), lvalue);
return RValue::get(nullptr);
}
auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
uint64_t Size = TInfo.Width.getQuantity();
unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
bool UseLibcall = Misaligned | Oversized;
bool ShouldCastToIntPtrTy = true;
CharUnits MaxInlineWidth =
getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
DiagnosticsEngine &Diags = CGM.getDiags();
if (Misaligned) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
<< (int)TInfo.Width.getQuantity()
<< (int)Ptr.getAlignment().getQuantity();
}
if (Oversized) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
<< (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
}
llvm::Value *Order = EmitScalarExpr(E->getOrder());
llvm::Value *Scope =
E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled above with EmitAtomicInit!");
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
break;
case AtomicExpr::AO__atomic_load:
Dest = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_store:
Val1 = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_exchange:
Val1 = EmitPointerWithAlignment(E->getVal1());
Dest = EmitPointerWithAlignment(E->getVal2());
break;
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__atomic_compare_exchange:
Val1 = EmitPointerWithAlignment(E->getVal1());
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
Val2 = EmitPointerWithAlignment(E->getVal2());
else
Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
IsWeak = EmitScalarExpr(E->getWeak());
break;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
if (MemTy->isPointerType()) {
QualType Val1Ty = E->getVal1()->getType();
llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
CharUnits PointeeIncAmt =
getContext().getTypeSizeInChars(MemTy->getPointeeType());
Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
Val1 = Temp;
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
ShouldCastToIntPtrTy = !MemTy->isFloatingType();
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
Val1 = EmitValToTemp(*this, E->getVal1());
break;
}
QualType RValTy = E->getType().getUnqualifiedType();
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
if (ShouldCastToIntPtrTy) {
Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
if (Val1.isValid())
Val1 = Atomics.convertToAtomicIntPointer(Val1);
if (Val2.isValid())
Val2 = Atomics.convertToAtomicIntPointer(Val2);
}
if (Dest.isValid()) {
if (ShouldCastToIntPtrTy)
Dest = Atomics.emitCastToAtomicIntPointer(Dest);
} else if (E->isCmpXChg())
Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
else if (!RValTy->isVoidType()) {
Dest = Atomics.CreateTempAlloca();
if (ShouldCastToIntPtrTy)
Dest = Atomics.emitCastToAtomicIntPointer(Dest);
}
if (UseLibcall) {
bool UseOptimizedLibcall = false;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled above with EmitAtomicInit!");
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__atomic_min_fetch:
UseOptimizedLibcall = true;
break;
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_exchange:
case AtomicExpr::AO__atomic_compare_exchange:
if (Misaligned)
break;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_compare_exchange_n:
if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
UseOptimizedLibcall = true;
break;
}
CallArgList Args;
if (!UseOptimizedLibcall) {
Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
getContext().getSizeType());
}
auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
if (!E->isOpenCL())
return V;
auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
if (AS == LangAS::opencl_generic)
return V;
auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
auto T = llvm::cast<llvm::PointerType>(V->getType());
auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
return getTargetHooks().performAddrSpaceCast(
*this, V, AS, LangAS::opencl_generic, DestType, false);
};
Args.add(RValue::get(CastToGenericAddrSpace(
EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
getContext().VoidPtrTy);
std::string LibCallName;
QualType LoweredMemTy =
MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
QualType RetTy;
bool HaveRetTy = false;
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
bool PostOpMinMax = false;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled!");
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
Args.add(
RValue::get(CastToGenericAddrSpace(
EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
LibCallName = "__atomic_store";
RetTy = getContext().VoidTy;
HaveRetTy = true;
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_load_n:
LibCallName = "__atomic_load";
break;
case AtomicExpr::AO__atomic_add_fetch:
PostOp = llvm::Instruction::Add;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_or_fetch:
PostOp = llvm::Instruction::Or;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_sub_fetch:
PostOp = llvm::Instruction::Sub;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_min_fetch:
PostOpMinMax = true;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_min"
: "__atomic_fetch_umin";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_max_fetch:
PostOpMinMax = true;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_max"
: "__atomic_fetch_umax";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
}
if (E->isOpenCL()) {
LibCallName = std::string("__opencl") +
StringRef(LibCallName).drop_front(1).str();
}
if (UseOptimizedLibcall)
LibCallName += "_" + llvm::utostr(Size);
if (!HaveRetTy) {
if (UseOptimizedLibcall) {
RetTy = getContext().getIntTypeForBitwidth(
getContext().toBits(TInfo.Width), false);
} else {
RetTy = getContext().VoidTy;
Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
getContext().VoidPtrTy);
}
}
Args.add(RValue::get(Order),
getContext().IntTy);
if (E->isOpenCL())
Args.add(RValue::get(Scope), getContext().IntTy);
assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
if (E->isCmpXChg())
return Res;
if (UseOptimizedLibcall && Res.getScalarVal()) {
llvm::Value *ResVal = Res.getScalarVal();
if (PostOpMinMax) {
llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
E->getValueType()->isSignedIntegerType(),
ResVal, LoadVal1);
} else if (PostOp) {
llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
}
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
ResVal = Builder.CreateNot(ResVal);
Builder.CreateStore(
ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
return convertTempToRValue(
Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
E->getOp() == AtomicExpr::AO__hip_atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store_n;
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
E->getOp() == AtomicExpr::AO__hip_atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;
if (isa<llvm::ConstantInt>(Order)) {
auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
if (llvm::isValidAtomicOrderingCABI(ord))
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Monotonic, Scope);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Acquire, Scope);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Release, Scope);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::AcquireRelease, Scope);
break;
case llvm::AtomicOrderingCABI::seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
break;
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
return convertTempToRValue(
Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
*ReleaseBB = nullptr, *AcqRelBB = nullptr,
*SeqCstBB = nullptr;
MonotonicBB = createBasicBlock("monotonic", CurFn);
if (!IsStore)
AcquireBB = createBasicBlock("acquire", CurFn);
if (!IsLoad)
ReleaseBB = createBasicBlock("release", CurFn);
if (!IsLoad && !IsStore)
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
Builder.SetInsertPoint(MonotonicBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Monotonic, Scope);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
AcquireBB);
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
Builder.SetInsertPoint(ContBB);
if (RValTy->isVoidType())
return RValue::get(nullptr);
assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
return convertTempToRValue(
Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
return CGF.Builder.CreateElementBitCast(addr, ty);
}
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
llvm::Type *Ty = Addr.getElementType();
uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
if (SourceSizeInBits != AtomicSizeInBits) {
Address Tmp = CreateTempAlloca();
CGF.Builder.CreateMemCpy(Tmp, Addr,
std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
Addr = Tmp;
}
return emitCastToAtomicIntPointer(Addr);
}
RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
AggValueSlot resultSlot,
SourceLocation loc,
bool asValue) const {
if (LVal.isSimple()) {
if (EvaluationKind == TEK_Aggregate)
return resultSlot.asRValue();
if (hasPadding())
addr = CGF.Builder.CreateStructGEP(addr, 0);
return CGF.convertTempToRValue(addr, getValueType(), loc);
}
if (!asValue)
return RValue::get(CGF.Builder.CreateLoad(addr));
if (LVal.isBitField())
return CGF.EmitLoadOfBitfieldLValue(
LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
LVal.getBaseInfo(), TBAAAccessInfo()), loc);
if (LVal.isVectorElt())
return CGF.EmitLoadOfLValue(
LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
LVal.getBaseInfo(), TBAAAccessInfo()), loc);
assert(LVal.isExtVectorElt());
return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
addr, LVal.getExtVectorElts(), LVal.getType(),
LVal.getBaseInfo(), TBAAAccessInfo()));
}
RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
AggValueSlot ResultSlot,
SourceLocation Loc,
bool AsValue) const {
assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
if (getEvaluationKind() == TEK_Scalar &&
(((!LVal.isBitField() ||
LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
!hasPadding()) ||
!AsValue)) {
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
: getAtomicAddress().getElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
} else if (ValTy->isPointerTy())
return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
}
Address Temp = Address::invalid();
bool TempIsVolatile = false;
if (AsValue && getEvaluationKind() == TEK_Aggregate) {
assert(!ResultSlot.isIgnored());
Temp = ResultSlot.getAddress();
TempIsVolatile = ResultSlot.isVolatile();
} else {
Temp = CreateTempAlloca();
}
Address CastTemp = emitCastToAtomicIntPointer(Temp);
CGF.Builder.CreateStore(IntVal, CastTemp)
->setVolatile(TempIsVolatile);
return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
}
void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
llvm::AtomicOrdering AO, bool) {
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
CGF.getContext().VoidPtrTy);
Args.add(
RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
CGF.getContext().IntTy);
emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
}
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile) {
Address Addr = getAtomicAddressAsAtomicIntPointer();
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
Load->setAtomic(AO);
if (IsVolatile)
Load->setVolatile(true);
CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
return Load;
}
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
if (!CGM.getCodeGenOpts().MSVolatile) return false;
AtomicInfo AI(*this, LV);
bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
bool AtomicIsInline = !AI.shouldUseLibcall();
if (getContext().getTypeSize(LV.getType()) >
getContext().getTypeSize(getContext().getIntPtrType()))
return false;
return IsVolatile && AtomicIsInline;
}
RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
AggValueSlot Slot) {
llvm::AtomicOrdering AO;
bool IsVolatile = LV.isVolatileQualified();
if (LV.getType()->isAtomicType()) {
AO = llvm::AtomicOrdering::SequentiallyConsistent;
} else {
AO = llvm::AtomicOrdering::Acquire;
IsVolatile = true;
}
return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
}
RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
bool AsValue, llvm::AtomicOrdering AO,
bool IsVolatile) {
if (shouldUseLibcall()) {
Address TempAddr = Address::invalid();
if (LVal.isSimple() && !ResultSlot.isIgnored()) {
assert(getEvaluationKind() == TEK_Aggregate);
TempAddr = ResultSlot.getAddress();
} else
TempAddr = CreateTempAlloca();
EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
}
auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
return RValue::getAggregate(Address::invalid(), false);
return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
}
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
llvm::AtomicOrdering AO, bool IsVolatile,
AggValueSlot resultSlot) {
AtomicInfo Atomics(*this, src);
return Atomics.EmitAtomicLoad(resultSlot, loc, true, AO,
IsVolatile);
}
void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
assert(LVal.isSimple());
if (rvalue.isAggregate()) {
LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
getAtomicType());
bool IsVolatile = rvalue.isVolatileQualified() ||
LVal.isVolatileQualified();
CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
AggValueSlot::DoesNotOverlap, IsVolatile);
return;
}
emitMemSetZeroIfNecessary();
LValue TempLVal = projectValue();
if (rvalue.isScalar()) {
CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, true);
} else {
CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, true);
}
}
Address AtomicInfo::materializeRValue(RValue rvalue) const {
if (rvalue.isAggregate())
return rvalue.getAggregateAddress();
LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
AtomicInfo Atomics(CGF, TempLV);
Atomics.emitCopyIntoMemory(rvalue);
return TempLV.getAddress(CGF);
}
llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
llvm::Value *Value = RVal.getScalarVal();
if (isa<llvm::IntegerType>(Value->getType()))
return CGF.EmitToMemory(Value, ValueTy);
else {
llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
CGF.getLLVMContext(),
LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
if (isa<llvm::PointerType>(Value->getType()))
return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
return CGF.Builder.CreateBitCast(Value, InputIntTy);
}
}
Address Addr = materializeRValue(RVal);
Addr = emitCastToAtomicIntPointer(Addr);
return CGF.Builder.CreateLoad(Addr);
}
std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
Address Addr = getAtomicAddressAsAtomicIntPointer();
auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
ExpectedVal, DesiredVal,
Success, Failure);
Inst->setVolatile(LVal.isVolatileQualified());
Inst->setWeak(IsWeak);
auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, 0);
auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, 1);
return std::make_pair(PreviousVal, SuccessFailureVal);
}
llvm::Value *
AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
llvm::Value *DesiredAddr,
llvm::AtomicOrdering Success,
llvm::AtomicOrdering Failure) {
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(
llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
CGF.getContext().IntTy);
Args.add(RValue::get(
llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
CGF.getContext().IntTy);
auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
CGF.getContext().BoolTy, Args);
return SuccessFailureRVal.getScalarVal();
}
std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
llvm::AtomicOrdering Failure, bool IsWeak) {
if (shouldUseLibcall()) {
Address ExpectedAddr = materializeRValue(Expected);
Address DesiredAddr = materializeRValue(Desired);
auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
DesiredAddr.getPointer(),
Success, Failure);
return std::make_pair(
convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
SourceLocation(), false),
Res);
}
auto *ExpectedVal = convertRValueToInt(Expected);
auto *DesiredVal = convertRValueToInt(Desired);
auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
Failure, IsWeak);
return std::make_pair(
ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
SourceLocation(), false),
Res.second);
}
static void
EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
Address DesiredAddr) {
RValue UpRVal;
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
if (AtomicLVal.isSimple()) {
UpRVal = OldRVal;
DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
} else {
Address Ptr = Atomics.materializeRValue(OldRVal);
LValue UpdateLVal;
if (AtomicLVal.isBitField()) {
UpdateLVal =
LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
AtomicLVal.getType(),
AtomicLVal.getBaseInfo(),
AtomicLVal.getTBAAInfo());
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
AtomicLVal.getTBAAInfo());
} else if (AtomicLVal.isVectorElt()) {
UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
AtomicLVal.getType(),
AtomicLVal.getBaseInfo(),
AtomicLVal.getTBAAInfo());
DesiredLVal = LValue::MakeVectorElt(
DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
} else {
assert(AtomicLVal.isExtVectorElt());
UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
AtomicLVal.getType(),
AtomicLVal.getBaseInfo(),
AtomicLVal.getTBAAInfo());
DesiredLVal = LValue::MakeExtVectorElt(
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
}
UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
}
RValue NewRVal = UpdateOp(UpRVal);
if (NewRVal.isScalar()) {
CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
} else {
assert(NewRVal.isComplex());
CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
false);
}
}
void AtomicInfo::EmitAtomicUpdateLibcall(
llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
Address ExpectedAddr = CreateTempAlloca();
EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
Address DesiredAddr = CreateTempAlloca();
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
AggValueSlot::ignored(),
SourceLocation(), false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
auto *Res =
EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
DesiredAddr.getPointer(),
AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, true);
}
void AtomicInfo::EmitAtomicUpdateOp(
llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
auto *CurBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(ContBB);
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
SourceLocation(), false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, true);
}
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
RValue UpdateRVal, Address DesiredAddr) {
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
if (AtomicLVal.isBitField()) {
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
AtomicLVal.getTBAAInfo());
} else if (AtomicLVal.isVectorElt()) {
DesiredLVal =
LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
AtomicLVal.getTBAAInfo());
} else {
assert(AtomicLVal.isExtVectorElt());
DesiredLVal = LValue::MakeExtVectorElt(
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
}
assert(UpdateRVal.isScalar());
CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
}
void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal, bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
Address ExpectedAddr = CreateTempAlloca();
EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
Address DesiredAddr = CreateTempAlloca();
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
auto *Res =
EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
DesiredAddr.getPointer(),
AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, true);
}
void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
auto *CurBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(ContBB);
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, true);
}
void AtomicInfo::EmitAtomicUpdate(
llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile) {
if (shouldUseLibcall()) {
EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
} else {
EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
}
}
void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
bool IsVolatile) {
if (shouldUseLibcall()) {
EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
} else {
EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
}
}
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
bool isInit) {
bool IsVolatile = lvalue.isVolatileQualified();
llvm::AtomicOrdering AO;
if (lvalue.getType()->isAtomicType()) {
AO = llvm::AtomicOrdering::SequentiallyConsistent;
} else {
AO = llvm::AtomicOrdering::Release;
IsVolatile = true;
}
return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
}
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
llvm::AtomicOrdering AO, bool IsVolatile,
bool isInit) {
assert(!rvalue.isAggregate() ||
rvalue.getAggregateAddress().getElementType() ==
dest.getAddress(*this).getElementType());
AtomicInfo atomics(*this, dest);
LValue LVal = atomics.getAtomicLValue();
if (LVal.isSimple()) {
if (isInit) {
atomics.emitCopyIntoMemory(rvalue);
return;
}
if (atomics.shouldUseLibcall()) {
Address srcAddr = atomics.materializeRValue(rvalue);
CallArgList args;
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
getContext().VoidPtrTy);
args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
getContext().VoidPtrTy);
args.add(
RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
getContext().IntTy);
emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
return;
}
llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
Address addr =
atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
intValue = Builder.CreateIntCast(
intValue, addr.getElementType(), false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
if (AO == llvm::AtomicOrdering::Acquire)
AO = llvm::AtomicOrdering::Monotonic;
else if (AO == llvm::AtomicOrdering::AcquireRelease)
AO = llvm::AtomicOrdering::Release;
if (!isInit)
store->setAtomic(AO);
if (IsVolatile)
store->setVolatile(true);
CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
return;
}
atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
}
std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
AggValueSlot Slot) {
assert(!Expected.isAggregate() ||
Expected.getAggregateAddress().getElementType() ==
Obj.getAddress(*this).getElementType());
assert(!Desired.isAggregate() ||
Desired.getAggregateAddress().getElementType() ==
Obj.getAddress(*this).getElementType());
AtomicInfo Atomics(*this, Obj);
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
IsWeak);
}
void CodeGenFunction::EmitAtomicUpdate(
LValue LVal, llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
AtomicInfo Atomics(*this, LVal);
Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
}
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
AtomicInfo atomics(*this, dest);
switch (atomics.getEvaluationKind()) {
case TEK_Scalar: {
llvm::Value *value = EmitScalarExpr(init);
atomics.emitCopyIntoMemory(RValue::get(value));
return;
}
case TEK_Complex: {
ComplexPairTy value = EmitComplexExpr(init);
atomics.emitCopyIntoMemory(RValue::getComplex(value));
return;
}
case TEK_Aggregate: {
bool Zeroed = false;
if (!init->getType()->isAtomicType()) {
Zeroed = atomics.emitMemSetZeroIfNecessary();
dest = atomics.projectValue();
}
AggValueSlot slot = AggValueSlot::forLValue(
dest, *this, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap,
Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
EmitAggExpr(init, slot);
return;
}
}
llvm_unreachable("bad evaluation kind");
}