#include "SIISelLowering.h"
#include "AMDGPU.h"
#include "AMDGPUInstrInfo.h"
#include "AMDGPUTargetMachine.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/KnownBits.h"
using namespace llvm;
#define DEBUG_TYPE "si-lower"
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool> DisableLoopAlignment(
"amdgpu-disable-loop-alignment",
cl::desc("Do not align and prefetch loops"),
cl::init(false));
static cl::opt<bool> UseDivergentRegisterIndexing(
"amdgpu-use-divergent-register-indexing",
cl::Hidden,
cl::desc("Use indirect register addressing for divergent indexes"),
cl::init(false));
static bool hasFP32Denormals(const MachineFunction &MF) {
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
return Info->getMode().allFP32Denormals();
}
static bool hasFP64FP16Denormals(const MachineFunction &MF) {
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
return Info->getMode().allFP64FP16Denormals();
}
static unsigned findFirstFreeSGPR(CCState &CCInfo) {
unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
return AMDGPU::SGPR0 + Reg;
}
}
llvm_unreachable("Cannot allocate sgpr");
}
SITargetLowering::SITargetLowering(const TargetMachine &TM,
const GCNSubtarget &STI)
: AMDGPUTargetLowering(TM, STI),
Subtarget(&STI) {
addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
const SIRegisterInfo *TRI = STI.getRegisterInfo();
const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class();
addRegisterClass(MVT::f64, V64RegClass);
addRegisterClass(MVT::v2f32, V64RegClass);
addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96));
addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128));
addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160));
addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass);
addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192));
addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass);
addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192));
addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass);
addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224));
addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass);
addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256));
addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass);
addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256));
addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass);
addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512));
addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass);
addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512));
addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass);
addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024));
if (Subtarget->has16BitInsts()) {
addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::v8i16, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v8f16, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v16i16, &AMDGPU::SGPR_256RegClass);
addRegisterClass(MVT::v16f16, &AMDGPU::SGPR_256RegClass);
}
addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024));
computeRegisterProperties(Subtarget->getRegisterInfo());
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrOneBooleanContent);
setOperationAction(ISD::LOAD,
{MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32,
MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1,
MVT::v32i32},
Custom);
setOperationAction(ISD::STORE,
{MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32,
MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1,
MVT::v32i32},
Custom);
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand);
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand);
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand);
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand);
setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand);
setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand);
setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand);
setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand);
setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand);
setOperationAction(ISD::GlobalAddress, {MVT::i32, MVT::i64}, Custom);
setOperationAction(ISD::SELECT, MVT::i1, Promote);
setOperationAction(ISD::SELECT, MVT::i64, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Promote);
AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
setOperationAction(ISD::SELECT_CC,
{MVT::f32, MVT::i32, MVT::i64, MVT::f64, MVT::i1}, Expand);
setOperationAction(ISD::SETCC, MVT::i1, Promote);
setOperationAction(ISD::SETCC, {MVT::v2i1, MVT::v4i1}, Expand);
AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
setOperationAction(ISD::TRUNCATE,
{MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32,
MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32},
Expand);
setOperationAction(ISD::FP_ROUND,
{MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32},
Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG,
{MVT::v2i1, MVT::v4i1, MVT::v2i8, MVT::v4i8, MVT::v2i16,
MVT::v3i16, MVT::v4i16, MVT::Other},
Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::BR_CC,
{MVT::i1, MVT::i32, MVT::i64, MVT::f32, MVT::f64}, Expand);
setOperationAction({ISD::UADDO, ISD::USUBO}, MVT::i32, Legal);
setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i32, Legal);
setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i64,
Expand);
#if 0#endif
for (MVT VT :
{MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64,
MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v3i64, MVT::v3f64,
MVT::v6i32, MVT::v6f32, MVT::v4i64, MVT::v4f64, MVT::v8i64,
MVT::v8f64, MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16,
MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
case ISD::LOAD:
case ISD::STORE:
case ISD::BUILD_VECTOR:
case ISD::BITCAST:
case ISD::UNDEF:
case ISD::EXTRACT_VECTOR_ELT:
case ISD::INSERT_VECTOR_ELT:
case ISD::EXTRACT_SUBVECTOR:
case ISD::SCALAR_TO_VECTOR:
break;
case ISD::INSERT_SUBVECTOR:
case ISD::CONCAT_VECTORS:
setOperationAction(Op, VT, Custom);
break;
default:
setOperationAction(Op, VT, Expand);
break;
}
}
}
setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
}
for (MVT Vec64 : { MVT::v3i64, MVT::v3f64 }) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32);
setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v6i32);
setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32);
}
for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32);
setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32);
setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32);
}
for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32);
setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32);
setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32);
}
for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32);
setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32);
setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32);
}
setOperationAction(ISD::VECTOR_SHUFFLE,
{MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32},
Expand);
setOperationAction(ISD::BUILD_VECTOR, {MVT::v4f16, MVT::v4i16}, Custom);
setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT},
{MVT::v2i16, MVT::v2f16, MVT::v2i8, MVT::v4i8, MVT::v8i8,
MVT::v4i16, MVT::v4f16},
Custom);
setOperationAction(ISD::INSERT_SUBVECTOR,
{MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32}, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR,
{MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32},
Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i32, MVT::i64}, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, {MVT::i32, MVT::i64},
Expand);
if (Subtarget->hasFlatAddressSpace())
setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal);
setOperationAction(ISD::BSWAP, {MVT::i64, MVT::i32}, Legal);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Custom);
if (Subtarget->has16BitInsts()) {
setOperationAction({ISD::FPOW, ISD::FPOWI}, MVT::f16, Promote);
setOperationAction({ISD::FLOG, ISD::FEXP, ISD::FLOG10}, MVT::f16, Custom);
}
if (Subtarget->hasMadMacF32Insts())
setOperationAction(ISD::FMAD, MVT::f32, Legal);
if (!Subtarget->hasBFI())
setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand);
if (!Subtarget->hasBCNT(32))
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
if (!Subtarget->hasBCNT(64))
setOperationAction(ISD::CTPOP, MVT::i64, Expand);
if (Subtarget->hasFFBH())
setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
if (Subtarget->hasFFBL())
setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
if (Subtarget->hasBFE())
setHasExtractBitsInsn(true);
if (Subtarget->hasIntClamp())
setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Legal);
if (Subtarget->hasAddNoCarry())
setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, {MVT::i16, MVT::i32},
Legal);
setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::f32, MVT::f64},
Custom);
setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE},
{MVT::f32, MVT::f64}, Legal);
if (Subtarget->haveRoundOpsF64())
setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FRINT}, MVT::f64, Legal);
else
setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FFLOOR},
MVT::f64, Custom);
setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FDIV}, MVT::f32, Custom);
setOperationAction(ISD::FDIV, MVT::f64, Custom);
if (Subtarget->has16BitInsts()) {
setOperationAction({ISD::Constant, ISD::SMIN, ISD::SMAX, ISD::UMIN,
ISD::UMAX, ISD::UADDSAT, ISD::USUBSAT},
MVT::i16, Legal);
AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
setOperationAction({ISD::ROTR, ISD::ROTL, ISD::SELECT_CC, ISD::BR_CC},
MVT::i16, Expand);
setOperationAction({ISD::SIGN_EXTEND, ISD::SDIV, ISD::UDIV, ISD::SREM,
ISD::UREM, ISD::BITREVERSE, ISD::CTTZ,
ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF,
ISD::CTPOP},
MVT::i16, Promote);
setOperationAction(ISD::LOAD, MVT::i16, Custom);
setTruncStoreAction(MVT::i64, MVT::i16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, MVT::i16, Custom);
setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
setOperationAction(ISD::LOAD, MVT::f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
setOperationAction(ISD::STORE, MVT::f16, Promote);
AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
setOperationAction(
{ISD::FP_ROUND, ISD::FCOS, ISD::FSIN, ISD::FROUND, ISD::FPTRUNC_ROUND},
MVT::f16, Custom);
setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, MVT::i16, Custom);
setOperationAction(
{ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP},
MVT::f16, Promote);
setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, MVT::f16, Expand);
setOperationAction(ISD::FDIV, MVT::f16, Custom);
setOperationAction(ISD::FMA, MVT::f16, Legal);
if (STI.hasMadF16())
setOperationAction(ISD::FMAD, MVT::f16, Legal);
for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16, MVT::v8i16,
MVT::v8f16, MVT::v16i16, MVT::v16f16}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
case ISD::LOAD:
case ISD::STORE:
case ISD::BUILD_VECTOR:
case ISD::BITCAST:
case ISD::UNDEF:
case ISD::EXTRACT_VECTOR_ELT:
case ISD::INSERT_VECTOR_ELT:
case ISD::INSERT_SUBVECTOR:
case ISD::EXTRACT_SUBVECTOR:
case ISD::SCALAR_TO_VECTOR:
break;
case ISD::CONCAT_VECTORS:
setOperationAction(Op, VT, Custom);
break;
default:
setOperationAction(Op, VT, Expand);
break;
}
}
}
setOperationAction(ISD::BSWAP, {MVT::i16, MVT::v2i16}, Legal);
setOperationAction(ISD::BSWAP, MVT::v4i16, Custom);
setOperationAction(ISD::Constant, {MVT::v2i16, MVT::v2f16}, Legal);
setOperationAction(ISD::UNDEF, {MVT::v2i16, MVT::v2f16}, Legal);
setOperationAction(ISD::STORE, MVT::v2i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
setOperationAction(ISD::STORE, MVT::v2f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
setOperationAction(ISD::AND, MVT::v2i16, Promote);
AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
setOperationAction(ISD::OR, MVT::v2i16, Promote);
AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
setOperationAction(ISD::XOR, MVT::v2i16, Promote);
AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
setOperationAction(ISD::STORE, MVT::v4i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
setOperationAction(ISD::STORE, MVT::v4f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
setOperationAction(ISD::LOAD, MVT::v8i16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v8i16, MVT::v4i32);
setOperationAction(ISD::LOAD, MVT::v8f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v8f16, MVT::v4i32);
setOperationAction(ISD::STORE, MVT::v4i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
setOperationAction(ISD::STORE, MVT::v4f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
setOperationAction(ISD::STORE, MVT::v8i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v8i16, MVT::v4i32);
setOperationAction(ISD::STORE, MVT::v8f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32);
setOperationAction(ISD::LOAD, MVT::v16i16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v16i16, MVT::v8i32);
setOperationAction(ISD::LOAD, MVT::v16f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v16f16, MVT::v8i32);
setOperationAction(ISD::STORE, MVT::v16i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v16i16, MVT::v8i32);
setOperationAction(ISD::STORE, MVT::v16f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v16f16, MVT::v8i32);
setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
MVT::v2i32, Expand);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
MVT::v4i32, Expand);
setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
MVT::v8i32, Expand);
if (!Subtarget->hasVOP3PInsts())
setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i16, MVT::v2f16}, Custom);
setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
setOperationAction(ISD::FABS, MVT::v2f16, Legal);
setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, MVT::f16, Custom);
setOperationAction({ISD::FMAXNUM_IEEE, ISD::FMINNUM_IEEE}, MVT::f16, Legal);
setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE},
{MVT::v4f16, MVT::v8f16, MVT::v16f16}, Custom);
setOperationAction({ISD::FMINNUM, ISD::FMAXNUM},
{MVT::v4f16, MVT::v8f16, MVT::v16f16}, Expand);
for (MVT Vec16 : {MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16}) {
setOperationAction(
{ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT, ISD::SCALAR_TO_VECTOR},
Vec16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand);
}
}
if (Subtarget->hasVOP3PInsts()) {
setOperationAction({ISD::ADD, ISD::SUB, ISD::MUL, ISD::SHL, ISD::SRL,
ISD::SRA, ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX,
ISD::UADDSAT, ISD::USUBSAT, ISD::SADDSAT, ISD::SSUBSAT},
MVT::v2i16, Legal);
setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FMINNUM_IEEE,
ISD::FMAXNUM_IEEE, ISD::FCANONICALIZE},
MVT::v2f16, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2i16, MVT::v2f16},
Custom);
setOperationAction(ISD::VECTOR_SHUFFLE,
{MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::v8i16,
MVT::v16f16, MVT::v16i16},
Custom);
for (MVT VT : {MVT::v4i16, MVT::v8i16, MVT::v16i16})
setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL, ISD::ADD, ISD::SUB,
ISD::MUL, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX,
ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT,
ISD::SSUBSAT},
VT, Custom);
for (MVT VT : {MVT::v4f16, MVT::v8f16, MVT::v16f16})
setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FCANONICALIZE},
VT, Custom);
setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, {MVT::v2f16, MVT::v4f16},
Custom);
setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
setOperationAction(ISD::SELECT, {MVT::v4i16, MVT::v4f16}, Custom);
if (Subtarget->hasPackedFP32Ops()) {
setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FNEG},
MVT::v2f32, Legal);
setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA},
{MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32},
Custom);
}
}
setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v4f16, Custom);
if (Subtarget->has16BitInsts()) {
setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
} else {
setOperationAction(ISD::SELECT, {MVT::v2i16, MVT::v2f16}, Custom);
setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v2f16, Custom);
}
setOperationAction(ISD::SELECT,
{MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8,
MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16},
Custom);
setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::i64, Custom);
if (Subtarget->hasMad64_32())
setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN,
{MVT::Other, MVT::f32, MVT::v4f32, MVT::i16, MVT::f16,
MVT::v2i16, MVT::v2f16},
Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN,
{MVT::v2f16, MVT::v2i16, MVT::v3f16, MVT::v3i16,
MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::Other, MVT::f16,
MVT::i16, MVT::i8},
Custom);
setOperationAction(ISD::INTRINSIC_VOID,
{MVT::Other, MVT::v2i16, MVT::v2f16, MVT::v3i16,
MVT::v3f16, MVT::v4f16, MVT::v4i16, MVT::f16, MVT::i16,
MVT::i8},
Custom);
setTargetDAGCombine({ISD::ADD,
ISD::ADDCARRY,
ISD::SUB,
ISD::SUBCARRY,
ISD::FADD,
ISD::FSUB,
ISD::FMINNUM,
ISD::FMAXNUM,
ISD::FMINNUM_IEEE,
ISD::FMAXNUM_IEEE,
ISD::FMA,
ISD::SMIN,
ISD::SMAX,
ISD::UMIN,
ISD::UMAX,
ISD::SETCC,
ISD::AND,
ISD::OR,
ISD::XOR,
ISD::SINT_TO_FP,
ISD::UINT_TO_FP,
ISD::FCANONICALIZE,
ISD::SCALAR_TO_VECTOR,
ISD::ZERO_EXTEND,
ISD::SIGN_EXTEND_INREG,
ISD::EXTRACT_VECTOR_ELT,
ISD::INSERT_VECTOR_ELT});
setTargetDAGCombine({ISD::LOAD,
ISD::STORE,
ISD::ATOMIC_LOAD,
ISD::ATOMIC_STORE,
ISD::ATOMIC_CMP_SWAP,
ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
ISD::ATOMIC_SWAP,
ISD::ATOMIC_LOAD_ADD,
ISD::ATOMIC_LOAD_SUB,
ISD::ATOMIC_LOAD_AND,
ISD::ATOMIC_LOAD_OR,
ISD::ATOMIC_LOAD_XOR,
ISD::ATOMIC_LOAD_NAND,
ISD::ATOMIC_LOAD_MIN,
ISD::ATOMIC_LOAD_MAX,
ISD::ATOMIC_LOAD_UMIN,
ISD::ATOMIC_LOAD_UMAX,
ISD::ATOMIC_LOAD_FADD,
ISD::INTRINSIC_VOID,
ISD::INTRINSIC_W_CHAIN});
setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32);
setSchedulingPreference(Sched::RegPressure);
}
const GCNSubtarget *SITargetLowering::getSubtarget() const {
return Subtarget;
}
bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
EVT DestVT, EVT SrcVT) const {
return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
(Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
DestVT.getScalarType() == MVT::f32 &&
SrcVT.getScalarType() == MVT::f16 &&
!hasFP32Denormals(DAG.getMachineFunction());
}
bool SITargetLowering::isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
LLT DestTy, LLT SrcTy) const {
return ((Opcode == TargetOpcode::G_FMAD && Subtarget->hasMadMixInsts()) ||
(Opcode == TargetOpcode::G_FMA && Subtarget->hasFmaMixInsts())) &&
DestTy.getScalarSizeInBits() == 32 &&
SrcTy.getScalarSizeInBits() == 16 &&
!hasFP32Denormals(*MI.getMF());
}
bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
return false;
}
MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (CC == CallingConv::AMDGPU_KERNEL)
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
if (VT.isVector()) {
EVT ScalarVT = VT.getScalarType();
unsigned Size = ScalarVT.getSizeInBits();
if (Size == 16) {
if (Subtarget->has16BitInsts())
return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
return VT.isInteger() ? MVT::i32 : MVT::f32;
}
if (Size < 16)
return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32;
return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32;
}
if (VT.getSizeInBits() > 32)
return MVT::i32;
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (CC == CallingConv::AMDGPU_KERNEL)
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
EVT ScalarVT = VT.getScalarType();
unsigned Size = ScalarVT.getSizeInBits();
if (Size == 16 && Subtarget->has16BitInsts())
return (NumElts + 1) / 2;
if (Size <= 32)
return NumElts;
if (Size > 32)
return NumElts * ((Size + 31) / 32);
} else if (VT.getSizeInBits() > 32)
return (VT.getSizeInBits() + 31) / 32;
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}
unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, CallingConv::ID CC,
EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
EVT ScalarVT = VT.getScalarType();
unsigned Size = ScalarVT.getSizeInBits();
if (Size == 16 && Subtarget->has16BitInsts()) {
RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
IntermediateVT = RegisterVT;
NumIntermediates = (NumElts + 1) / 2;
return NumIntermediates;
}
if (Size == 32) {
RegisterVT = ScalarVT.getSimpleVT();
IntermediateVT = RegisterVT;
NumIntermediates = NumElts;
return NumIntermediates;
}
if (Size < 16 && Subtarget->has16BitInsts()) {
RegisterVT = MVT::i16;
IntermediateVT = ScalarVT;
NumIntermediates = NumElts;
return NumIntermediates;
}
if (Size != 16 && Size <= 32) {
RegisterVT = MVT::i32;
IntermediateVT = ScalarVT;
NumIntermediates = NumElts;
return NumIntermediates;
}
if (Size > 32) {
RegisterVT = MVT::i32;
IntermediateVT = RegisterVT;
NumIntermediates = NumElts * ((Size + 31) / 32);
return NumIntermediates;
}
}
return TargetLowering::getVectorTypeBreakdownForCallingConv(
Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
}
static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) {
assert(DMaskLanes != 0);
if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
unsigned NumElts = std::min(DMaskLanes, VT->getNumElements());
return EVT::getVectorVT(Ty->getContext(),
EVT::getEVT(VT->getElementType()),
NumElts);
}
return EVT::getEVT(Ty);
}
static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) {
auto *ST = dyn_cast<StructType>(Ty);
if (!ST)
return memVTFromImageData(Ty, DMaskLanes);
if (ST->getNumContainedTypes() != 2 ||
!ST->getContainedType(1)->isIntegerTy(32))
return EVT();
return memVTFromImageData(ST->getContainedType(0), DMaskLanes);
}
bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &CI,
MachineFunction &MF,
unsigned IntrID) const {
Info.flags = MachineMemOperand::MONone;
if (CI.hasMetadata(LLVMContext::MD_invariant_load))
Info.flags |= MachineMemOperand::MOInvariant;
if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
AMDGPU::lookupRsrcIntrinsic(IntrID)) {
AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
(Intrinsic::ID)IntrID);
if (Attr.hasFnAttr(Attribute::ReadNone))
return false;
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
const GCNTargetMachine &TM =
static_cast<const GCNTargetMachine &>(getTargetMachine());
if (RsrcIntr->IsImage) {
Info.ptrVal = MFI->getImagePSV(TM);
Info.align.reset();
} else {
Info.ptrVal = MFI->getBufferPSV(TM);
}
Info.flags |= MachineMemOperand::MODereferenceable;
if (Attr.hasFnAttr(Attribute::ReadOnly)) {
unsigned DMaskLanes = 4;
if (RsrcIntr->IsImage) {
const AMDGPU::ImageDimIntrinsicInfo *Intr
= AMDGPU::getImageDimIntrinsicInfo(IntrID);
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
if (!BaseOpcode->Gather4) {
unsigned DMask
= cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue();
DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask);
}
Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes);
} else
Info.memVT = EVT::getEVT(CI.getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.flags |= MachineMemOperand::MOLoad;
} else if (Attr.hasFnAttr(Attribute::WriteOnly)) {
Info.opc = ISD::INTRINSIC_VOID;
Type *DataTy = CI.getArgOperand(0)->getType();
if (RsrcIntr->IsImage) {
unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue();
unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask);
Info.memVT = memVTFromImageData(DataTy, DMaskLanes);
} else
Info.memVT = EVT::getEVT(DataTy);
Info.flags |= MachineMemOperand::MOStore;
} else {
Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID :
ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
Info.flags |= MachineMemOperand::MOLoad |
MachineMemOperand::MOStore |
MachineMemOperand::MODereferenceable;
Info.flags |= MachineMemOperand::MOVolatile;
switch (IntrID) {
default:
break;
case Intrinsic::amdgcn_raw_buffer_load_lds:
case Intrinsic::amdgcn_struct_buffer_load_lds: {
unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8);
return true;
}
}
}
return true;
}
switch (IntrID) {
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap:
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
Info.ptrVal = CI.getOperand(0);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
if (!Vol->isZero())
Info.flags |= MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::amdgcn_buffer_atomic_fadd: {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
const GCNTargetMachine &TM =
static_cast<const GCNTargetMachine &>(getTargetMachine());
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
Info.ptrVal = MFI->getBufferPSV(TM);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
if (!Vol || !Vol->isZero())
Info.flags |= MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
Info.ptrVal = CI.getOperand(0);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
if (!Vol->isZero())
Info.flags |= MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::amdgcn_global_atomic_csub: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
Info.ptrVal = CI.getOperand(0);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad |
MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::amdgcn_image_bvh_intersect_ray: {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
const GCNTargetMachine &TM =
static_cast<const GCNTargetMachine &>(getTargetMachine());
Info.ptrVal = MFI->getImagePSV(TM);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad |
MachineMemOperand::MODereferenceable;
return true;
}
case Intrinsic::amdgcn_global_atomic_fadd:
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_flat_atomic_fadd:
case Intrinsic::amdgcn_flat_atomic_fmin:
case Intrinsic::amdgcn_flat_atomic_fmax:
case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
Info.ptrVal = CI.getOperand(0);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad |
MachineMemOperand::MOStore |
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::amdgcn_ds_gws_init:
case Intrinsic::amdgcn_ds_gws_barrier:
case Intrinsic::amdgcn_ds_gws_sema_v:
case Intrinsic::amdgcn_ds_gws_sema_br:
case Intrinsic::amdgcn_ds_gws_sema_p:
case Intrinsic::amdgcn_ds_gws_sema_release_all: {
Info.opc = ISD::INTRINSIC_VOID;
const GCNTargetMachine &TM =
static_cast<const GCNTargetMachine &>(getTargetMachine());
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Info.ptrVal = MFI->getGWSPSV(TM);
Info.memVT = MVT::i32;
Info.size = 4;
Info.align = Align(4);
if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
Info.flags |= MachineMemOperand::MOLoad;
else
Info.flags |= MachineMemOperand::MOStore;
return true;
}
case Intrinsic::amdgcn_global_load_lds: {
Info.opc = ISD::INTRINSIC_VOID;
unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8);
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile;
return true;
}
default:
return false;
}
}
bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
SmallVectorImpl<Value*> &Ops,
Type *&AccessTy) const {
switch (II->getIntrinsicID()) {
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap:
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume:
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax:
case Intrinsic::amdgcn_global_atomic_fadd:
case Intrinsic::amdgcn_flat_atomic_fadd:
case Intrinsic::amdgcn_flat_atomic_fmin:
case Intrinsic::amdgcn_flat_atomic_fmax:
case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_global_atomic_csub: {
Value *Ptr = II->getArgOperand(0);
AccessTy = II->getType();
Ops.push_back(Ptr);
return true;
}
default:
return false;
}
}
bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
if (!Subtarget->hasFlatInstOffsets()) {
return AM.BaseOffs == 0 && AM.Scale == 0;
}
return AM.Scale == 0 &&
(AM.BaseOffs == 0 ||
Subtarget->getInstrInfo()->isLegalFLATOffset(
AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, SIInstrFlags::FLAT));
}
bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
if (Subtarget->hasFlatGlobalInsts())
return AM.Scale == 0 &&
(AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset(
AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS,
SIInstrFlags::FlatGlobal));
if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
return isLegalFlatAddressingMode(AM);
}
return isLegalMUBUFAddressingMode(AM);
}
bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs))
return false;
switch (AM.Scale) {
case 0: return true;
case 1:
return true; case 2:
if (AM.HasBaseReg) {
return false;
}
return true;
default: return false;
}
}
bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS, Instruction *I) const {
if (AM.BaseGV)
return false;
if (AS == AMDGPUAS::GLOBAL_ADDRESS)
return isLegalGlobalAddressingMode(AM);
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
AS == AMDGPUAS::BUFFER_FAT_POINTER) {
if (AM.BaseOffs % 4 != 0)
return isLegalMUBUFAddressingMode(AM);
if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
return isLegalGlobalAddressingMode(AM);
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
if (!isUInt<8>(AM.BaseOffs / 4))
return false;
} else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
if (!isUInt<32>(AM.BaseOffs / 4))
return false;
} else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
if (!isUInt<20>(AM.BaseOffs))
return false;
} else
llvm_unreachable("unhandled generation");
if (AM.Scale == 0) return true;
if (AM.Scale == 1 && AM.HasBaseReg)
return true;
return false;
} else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
return isLegalMUBUFAddressingMode(AM);
} else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
AS == AMDGPUAS::REGION_ADDRESS) {
if (!isUInt<16>(AM.BaseOffs))
return false;
if (AM.Scale == 0) return true;
if (AM.Scale == 1 && AM.HasBaseReg)
return true;
return false;
} else if (AS == AMDGPUAS::FLAT_ADDRESS ||
AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
return isLegalFlatAddressingMode(AM);
}
return isLegalGlobalAddressingMode(AM);
}
bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
const MachineFunction &MF) const {
if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
return (MemVT.getSizeInBits() <= 4 * 32);
} else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
return (MemVT.getSizeInBits() <= MaxPrivateBits);
} else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
return (MemVT.getSizeInBits() <= 2 * 32);
}
return true;
}
bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
unsigned Size, unsigned AddrSpace, Align Alignment,
MachineMemOperand::Flags Flags, bool *IsFast) const {
if (IsFast)
*IsFast = false;
if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
AddrSpace == AMDGPUAS::REGION_ADDRESS) {
if (!Subtarget->hasUnalignedDSAccessEnabled() && Alignment < Align(4))
return false;
Align RequiredAlignment(PowerOf2Ceil(Size/8)); if (Subtarget->hasLDSMisalignedBug() && Size > 32 &&
Alignment < RequiredAlignment)
return false;
switch (Size) {
case 64:
if (!Subtarget->hasUsableDSOffset() && Alignment < Align(8))
return false;
RequiredAlignment = Align(4);
if (Subtarget->hasUnalignedDSAccessEnabled()) {
if (IsFast)
*IsFast = true;
return true;
}
break;
case 96:
if (!Subtarget->hasDS96AndDS128())
return false;
if (Subtarget->hasUnalignedDSAccessEnabled()) {
if (IsFast)
*IsFast = Alignment >= RequiredAlignment || Alignment < Align(4);
return true;
}
break;
case 128:
if (!Subtarget->hasDS96AndDS128() || !Subtarget->useDS128())
return false;
RequiredAlignment = Align(8);
if (Subtarget->hasUnalignedDSAccessEnabled()) {
if (IsFast)
*IsFast = Alignment >= RequiredAlignment || Alignment < Align(4);
return true;
}
break;
default:
if (Size > 32)
return false;
break;
}
if (IsFast)
*IsFast = Alignment >= RequiredAlignment;
return Alignment >= RequiredAlignment ||
Subtarget->hasUnalignedDSAccessEnabled();
}
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
bool AlignedBy4 = Alignment >= Align(4);
if (IsFast)
*IsFast = AlignedBy4;
return AlignedBy4 ||
Subtarget->enableFlatScratch() ||
Subtarget->hasUnalignedScratchAccess();
}
if (AddrSpace == AMDGPUAS::FLAT_ADDRESS &&
!Subtarget->hasUnalignedScratchAccess()) {
bool AlignedBy4 = Alignment >= Align(4);
if (IsFast)
*IsFast = AlignedBy4;
return AlignedBy4;
}
if (Subtarget->hasUnalignedBufferAccessEnabled()) {
if (IsFast) {
*IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
Alignment >= Align(4) : Alignment != Align(2);
}
return true;
}
if (Size < 32)
return false;
if (IsFast)
*IsFast = true;
return Size >= 32 && Alignment >= Align(4);
}
bool SITargetLowering::allowsMisalignedMemoryAccesses(
EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
bool *IsFast) const {
bool Allow = allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace,
Alignment, Flags, IsFast);
if (Allow && IsFast && Subtarget->hasUnalignedDSAccessEnabled() &&
(AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
AddrSpace == AMDGPUAS::REGION_ADDRESS)) {
*IsFast = true;
}
return Allow;
}
EVT SITargetLowering::getOptimalMemOpType(
const MemOp &Op, const AttributeList &FuncAttributes) const {
if (Op.size() >= 16 &&
Op.isDstAligned(Align(4))) return MVT::v4i32;
if (Op.size() >= 8 && Op.isDstAligned(Align(4)))
return MVT::v2i32;
return MVT::Other;
}
bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
const MemSDNode *MemNode = cast<MemSDNode>(N);
return MemNode->getMemOperand()->getFlags() & MONoClobber;
}
bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) {
return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS ||
AS == AMDGPUAS::PRIVATE_ADDRESS;
}
bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const {
if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
return true;
const GCNTargetMachine &TM =
static_cast<const GCNTargetMachine &>(getTargetMachine());
return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
}
bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
const MemSDNode *MemNode = cast<MemSDNode>(N);
return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
}
TargetLoweringBase::LegalizeTypeAction
SITargetLowering::getPreferredVectorAction(MVT VT) const {
if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
VT.getScalarType().bitsLE(MVT::i16))
return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
return true;
}
bool SITargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const {
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
return false;
return Index == 0;
}
bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
if (Subtarget->has16BitInsts() && VT == MVT::i16) {
switch (Op) {
case ISD::LOAD:
case ISD::STORE:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::SELECT:
return true;
default:
return false;
}
}
if (VT == MVT::i1 && Op == ISD::SETCC)
return false;
return TargetLowering::isTypeDesirableForOp(Op, VT);
}
SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
const SDLoc &SL,
SDValue Chain,
uint64_t Offset) const {
const DataLayout &DL = DAG.getDataLayout();
MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
const ArgDescriptor *InputPtrReg;
const TargetRegisterClass *RC;
LLT ArgTy;
MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
std::tie(InputPtrReg, RC, ArgTy) =
Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
if (!InputPtrReg)
return DAG.getConstant(0, SL, PtrVT);
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset));
}
SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
const SDLoc &SL) const {
uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
FIRST_IMPLICIT);
return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
}
SDValue SITargetLowering::getLDSKernelId(SelectionDAG &DAG,
const SDLoc &SL) const {
Function &F = DAG.getMachineFunction().getFunction();
Optional<uint32_t> KnownSize =
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (KnownSize.has_value())
return DAG.getConstant(KnownSize.value(), SL, MVT::i32);
return SDValue();
}
SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
const SDLoc &SL, SDValue Val,
bool Signed,
const ISD::InputArg *Arg) const {
if (VT.isVector() &&
VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
EVT NarrowedVT =
EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
VT.getVectorNumElements());
Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
DAG.getConstant(0, SL, MVT::i32));
}
if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
VT.bitsLT(MemVT)) {
unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
}
if (MemVT.isFloatingPoint())
Val = getFPExtOrFPRound(DAG, Val, SL, VT);
else if (Signed)
Val = DAG.getSExtOrTrunc(Val, SL, VT);
else
Val = DAG.getZExtOrTrunc(Val, SL, VT);
return Val;
}
SDValue SITargetLowering::lowerKernargMemParameter(
SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain,
uint64_t Offset, Align Alignment, bool Signed,
const ISD::InputArg *Arg) const {
MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
if (MemVT.getStoreSize() < 4 && Alignment < 4) {
int64_t AlignDownOffset = alignDown(Offset, 4);
int64_t OffsetDiff = Offset - AlignDownOffset;
EVT IntVT = MemVT.changeTypeToInteger();
SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4),
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
}
SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment,
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
}
SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
const SDLoc &SL, SDValue Chain,
const ISD::InputArg &Arg) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
if (Arg.Flags.isByVal()) {
unsigned Size = Arg.Flags.getByValSize();
int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
return DAG.getFrameIndex(FrameIdx, MVT::i32);
}
unsigned ArgOffset = VA.getLocMemOffset();
unsigned ArgSize = VA.getValVT().getStoreSize();
int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
SDValue ArgValue;
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
MVT MemVT = VA.getValVT();
switch (VA.getLocInfo()) {
default:
break;
case CCValAssign::BCvt:
MemVT = VA.getLocVT();
break;
case CCValAssign::SExt:
ExtType = ISD::SEXTLOAD;
break;
case CCValAssign::ZExt:
ExtType = ISD::ZEXTLOAD;
break;
case CCValAssign::AExt:
ExtType = ISD::EXTLOAD;
break;
}
ArgValue = DAG.getExtLoad(
ExtType, SL, VA.getLocVT(), Chain, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
MemVT);
return ArgValue;
}
SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
const SIMachineFunctionInfo &MFI,
EVT VT,
AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
const ArgDescriptor *Reg;
const TargetRegisterClass *RC;
LLT Ty;
std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID);
if (!Reg) {
if (PVID == AMDGPUFunctionArgInfo::PreloadedValue::KERNARG_SEGMENT_PTR) {
return DAG.getConstant(0, SDLoc(), VT);
}
return DAG.getUNDEF(VT);
}
return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
}
static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
CallingConv::ID CallConv,
ArrayRef<ISD::InputArg> Ins, BitVector &Skipped,
FunctionType *FType,
SIMachineFunctionInfo *Info) {
for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
const ISD::InputArg *Arg = &Ins[I];
assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
"vector type argument should have been split");
if (CallConv == CallingConv::AMDGPU_PS &&
!Arg->Flags.isInReg() && PSInputNum <= 15) {
bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
if (Arg->Flags.isSplit()) {
while (!Arg->Flags.isSplitEnd()) {
assert((!Arg->VT.isVector() ||
Arg->VT.getScalarSizeInBits() == 16) &&
"unexpected vector split in ps argument type");
if (!SkipArg)
Splits.push_back(*Arg);
Arg = &Ins[++I];
}
}
if (SkipArg) {
Skipped.set(Arg->getOrigArgIndex());
++PSInputNum;
continue;
}
Info->markPSInputAllocated(PSInputNum);
if (Arg->Used)
Info->markPSInputEnabled(PSInputNum);
++PSInputNum;
}
Splits.push_back(*Arg);
}
}
void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const {
const LLT S32 = LLT::scalar(32);
MachineRegisterInfo &MRI = MF.getRegInfo();
if (Info.hasWorkItemIDX()) {
Register Reg = AMDGPU::VGPR0;
MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
CCInfo.AllocateReg(Reg);
unsigned Mask = (Subtarget->hasPackedTID() &&
Info.hasWorkItemIDY()) ? 0x3ff : ~0u;
Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask));
}
if (Info.hasWorkItemIDY()) {
assert(Info.hasWorkItemIDX());
if (Subtarget->hasPackedTID()) {
Info.setWorkItemIDY(ArgDescriptor::createRegister(AMDGPU::VGPR0,
0x3ff << 10));
} else {
unsigned Reg = AMDGPU::VGPR1;
MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
CCInfo.AllocateReg(Reg);
Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
}
}
if (Info.hasWorkItemIDZ()) {
assert(Info.hasWorkItemIDX() && Info.hasWorkItemIDY());
if (Subtarget->hasPackedTID()) {
Info.setWorkItemIDZ(ArgDescriptor::createRegister(AMDGPU::VGPR0,
0x3ff << 20));
} else {
unsigned Reg = AMDGPU::VGPR2;
MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
CCInfo.AllocateReg(Reg);
Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
}
}
}
static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
ArgDescriptor Arg = ArgDescriptor()) {
if (Arg.isSet())
return ArgDescriptor::createArg(Arg, Mask);
ArrayRef<MCPhysReg> ArgVGPRs
= makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
if (RegIdx == ArgVGPRs.size()) {
int64_t Offset = CCInfo.AllocateStack(4, Align(4));
return ArgDescriptor::createStack(Offset, Mask);
}
unsigned Reg = ArgVGPRs[RegIdx];
Reg = CCInfo.AllocateReg(Reg);
assert(Reg != AMDGPU::NoRegister);
MachineFunction &MF = CCInfo.getMachineFunction();
Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32));
return ArgDescriptor::createRegister(Reg, Mask);
}
static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
const TargetRegisterClass *RC,
unsigned NumArgRegs) {
ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
if (RegIdx == ArgSGPRs.size())
report_fatal_error("ran out of SGPRs for arguments");
unsigned Reg = ArgSGPRs[RegIdx];
Reg = CCInfo.AllocateReg(Reg);
assert(Reg != AMDGPU::NoRegister);
MachineFunction &MF = CCInfo.getMachineFunction();
MF.addLiveIn(Reg, RC);
return ArgDescriptor::createRegister(Reg);
}
static void allocateFixedSGPRInputImpl(CCState &CCInfo,
const TargetRegisterClass *RC,
MCRegister Reg) {
Reg = CCInfo.AllocateReg(Reg);
assert(Reg != AMDGPU::NoRegister);
MachineFunction &MF = CCInfo.getMachineFunction();
MF.addLiveIn(Reg, RC);
}
static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) {
if (Arg) {
allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_32RegClass,
Arg.getRegister());
} else
Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
}
static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) {
if (Arg) {
allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_64RegClass,
Arg.getRegister());
} else
Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
}
void SITargetLowering::allocateSpecialInputVGPRs(
CCState &CCInfo, MachineFunction &MF,
const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
const unsigned Mask = 0x3ff;
ArgDescriptor Arg;
if (Info.hasWorkItemIDX()) {
Arg = allocateVGPR32Input(CCInfo, Mask);
Info.setWorkItemIDX(Arg);
}
if (Info.hasWorkItemIDY()) {
Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
Info.setWorkItemIDY(Arg);
}
if (Info.hasWorkItemIDZ())
Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
}
void SITargetLowering::allocateSpecialInputVGPRsFixed(
CCState &CCInfo, MachineFunction &MF,
const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31);
if (!Reg)
report_fatal_error("failed to allocated VGPR for implicit arguments");
const unsigned Mask = 0x3ff;
Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask));
Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10));
Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20));
}
void SITargetLowering::allocateSpecialInputSGPRs(
CCState &CCInfo,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const {
auto &ArgInfo = Info.getArgInfo();
if (Info.hasDispatchPtr())
allocateSGPR64Input(CCInfo, ArgInfo.DispatchPtr);
if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5)
allocateSGPR64Input(CCInfo, ArgInfo.QueuePtr);
if (Info.hasImplicitArgPtr())
allocateSGPR64Input(CCInfo, ArgInfo.ImplicitArgPtr);
if (Info.hasDispatchID())
allocateSGPR64Input(CCInfo, ArgInfo.DispatchID);
if (Info.hasWorkGroupIDX())
allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDX);
if (Info.hasWorkGroupIDY())
allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDY);
if (Info.hasWorkGroupIDZ())
allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDZ);
if (Info.hasLDSKernelId())
allocateSGPR32Input(CCInfo, ArgInfo.LDSKernelId);
}
void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const {
if (Info.hasImplicitBufferPtr()) {
Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(ImplicitBufferPtrReg);
}
if (Info.hasPrivateSegmentBuffer()) {
Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
CCInfo.AllocateReg(PrivateSegmentBufferReg);
}
if (Info.hasDispatchPtr()) {
Register DispatchPtrReg = Info.addDispatchPtr(TRI);
MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(DispatchPtrReg);
}
if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) {
Register QueuePtrReg = Info.addQueuePtr(TRI);
MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(QueuePtrReg);
}
if (Info.hasKernargSegmentPtr()) {
MachineRegisterInfo &MRI = MF.getRegInfo();
Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
CCInfo.AllocateReg(InputPtrReg);
Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
}
if (Info.hasDispatchID()) {
Register DispatchIDReg = Info.addDispatchID(TRI);
MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(DispatchIDReg);
}
if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) {
Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
CCInfo.AllocateReg(FlatScratchInitReg);
}
if (Info.hasLDSKernelId()) {
Register Reg = Info.addLDSKernelId();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
}
void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
MachineFunction &MF,
SIMachineFunctionInfo &Info,
CallingConv::ID CallConv,
bool IsShader) const {
if (Subtarget->hasUserSGPRInit16Bug() && !IsShader) {
unsigned CurrentUserSGPRs = Info.getNumUserSGPRs();
unsigned NumRequiredSystemSGPRs = Info.hasWorkGroupIDX() +
Info.hasWorkGroupIDY() +
Info.hasWorkGroupIDZ() +
Info.hasWorkGroupInfo();
for (unsigned i = NumRequiredSystemSGPRs + CurrentUserSGPRs; i < 16; ++i) {
Register Reg = Info.addReservedUserSGPR();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
}
if (Info.hasWorkGroupIDX()) {
Register Reg = Info.addWorkGroupIDX();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasWorkGroupIDY()) {
Register Reg = Info.addWorkGroupIDY();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasWorkGroupIDZ()) {
Register Reg = Info.addWorkGroupIDZ();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasWorkGroupInfo()) {
Register Reg = Info.addWorkGroupInfo();
MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(Reg);
}
if (Info.hasPrivateSegmentWaveByteOffset()) {
unsigned PrivateSegmentWaveByteOffsetReg;
if (IsShader) {
PrivateSegmentWaveByteOffsetReg =
Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
}
} else
PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
}
assert(!Subtarget->hasUserSGPRInit16Bug() || IsShader ||
Info.getNumPreloadedSGPRs() >= 16);
}
static void reservePrivateMemoryRegs(const TargetMachine &TM,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) {
MachineFrameInfo &MFI = MF.getFrameInfo();
bool HasStackObjects = MFI.hasStackObjects();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
if (HasStackObjects)
Info.setHasNonSpillStackObjects(true);
if (TM.getOptLevel() == CodeGenOpt::None)
HasStackObjects = true;
bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
if (!ST.enableFlatScratch()) {
if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
Register PrivateSegmentBufferReg =
Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Info.setScratchRSrcReg(PrivateSegmentBufferReg);
} else {
unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
Info.setScratchRSrcReg(ReservedBufferReg);
}
}
MachineRegisterInfo &MRI = MF.getRegInfo();
if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
} else {
assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
if (MFI.hasCalls())
report_fatal_error("call in graphics shader with too many input SGPRs");
for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
if (!MRI.isLiveIn(Reg)) {
Info.setStackPtrOffsetReg(Reg);
break;
}
}
if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
report_fatal_error("failed to find register for SP");
}
if (ST.getFrameLowering()->hasFP(MF)) {
Info.setFrameOffsetReg(AMDGPU::SGPR33);
}
}
bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
return !Info->isEntryFunction();
}
void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
}
void SITargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
if (!IStart)
return;
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
MachineBasicBlock::iterator MBBI = Entry->begin();
for (const MCPhysReg *I = IStart; *I; ++I) {
const TargetRegisterClass *RC = nullptr;
if (AMDGPU::SReg_64RegClass.contains(*I))
RC = &AMDGPU::SGPR_64RegClass;
else if (AMDGPU::SReg_32RegClass.contains(*I))
RC = &AMDGPU::SGPR_32RegClass;
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
Register NewVR = MRI->createVirtualRegister(RC);
Entry->addLiveIn(*I);
BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
.addReg(*I);
for (auto *Exit : Exits)
BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
TII->get(TargetOpcode::COPY), *I)
.addReg(NewVR);
}
}
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
const Function &Fn = MF.getFunction();
FunctionType *FType = MF.getFunction().getFunctionType();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) {
DiagnosticInfoUnsupported NoGraphicsHSA(
Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
DAG.getContext()->diagnose(NoGraphicsHSA);
return DAG.getEntryNode();
}
Info->allocateModuleLDSGlobal(Fn);
SmallVector<ISD::InputArg, 16> Splits;
SmallVector<CCValAssign, 16> ArgLocs;
BitVector Skipped(Ins.size());
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
bool IsGraphics = AMDGPU::isGraphics(CallConv);
bool IsKernel = AMDGPU::isKernel(CallConv);
bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
if (IsGraphics) {
assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() &&
(!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) &&
!Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
!Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
!Info->hasLDSKernelId() && !Info->hasWorkItemIDX() &&
!Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ());
}
if (CallConv == CallingConv::AMDGPU_PS) {
processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
if ((Info->getPSInputAddr() & 0x7F) == 0 ||
((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) {
CCInfo.AllocateReg(AMDGPU::VGPR0);
CCInfo.AllocateReg(AMDGPU::VGPR1);
Info->markPSInputAllocated(0);
Info->markPSInputEnabled(0);
}
if (Subtarget->isAmdPalOS()) {
unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
if ((PsInputBits & 0x7F) == 0 ||
((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1)))
Info->markPSInputEnabled(
countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
}
} else if (IsKernel) {
assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
} else {
Splits.append(Ins.begin(), Ins.end());
}
if (IsEntryFunc) {
allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
} else if (!IsGraphics) {
allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
}
if (IsKernel) {
analyzeFormalArgumentsCompute(CCInfo, Ins);
} else {
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
}
SmallVector<SDValue, 16> Chains;
const Align KernelArgBaseAlign = Align(16);
for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
const ISD::InputArg &Arg = Ins[i];
if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
InVals.push_back(DAG.getUNDEF(Arg.VT));
continue;
}
CCValAssign &VA = ArgLocs[ArgIdx++];
MVT VT = VA.getLocVT();
if (IsEntryFunc && VA.isMemLoc()) {
VT = Ins[i].VT;
EVT MemVT = VA.getLocVT();
const uint64_t Offset = VA.getLocMemOffset();
Align Alignment = commonAlignment(KernelArgBaseAlign, Offset);
if (Arg.Flags.isByRef()) {
SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset);
const GCNTargetMachine &TM =
static_cast<const GCNTargetMachine &>(getTargetMachine());
if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS,
Arg.Flags.getPointerAddrSpace())) {
Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS,
Arg.Flags.getPointerAddrSpace());
}
InVals.push_back(Ptr);
continue;
}
SDValue Arg = lowerKernargMemParameter(
DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]);
Chains.push_back(Arg.getValue(1));
auto *ParamTy =
dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
DAG.getValueType(MVT::i16));
}
InVals.push_back(Arg);
continue;
} else if (!IsEntryFunc && VA.isMemLoc()) {
SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
InVals.push_back(Val);
if (!Arg.Flags.isByVal())
Chains.push_back(Val.getValue(1));
continue;
}
assert(VA.isRegLoc() && "Parameter must be in a register!");
Register Reg = VA.getLocReg();
const TargetRegisterClass *RC = nullptr;
if (AMDGPU::VGPR_32RegClass.contains(Reg))
RC = &AMDGPU::VGPR_32RegClass;
else if (AMDGPU::SGPR_32RegClass.contains(Reg))
RC = &AMDGPU::SGPR_32RegClass;
else
llvm_unreachable("Unexpected register class in LowerFormalArguments!");
EVT ValVT = VA.getValVT();
Reg = MF.addLiveIn(Reg, RC);
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
if (Arg.Flags.isSRet()) {
unsigned NumBits
= 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
}
switch (VA.getLocInfo()) {
case CCValAssign::Full:
break;
case CCValAssign::BCvt:
Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
break;
case CCValAssign::SExt:
Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
DAG.getValueType(ValVT));
Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
break;
case CCValAssign::ZExt:
Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
DAG.getValueType(ValVT));
Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
break;
case CCValAssign::AExt:
Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
break;
default:
llvm_unreachable("Unknown loc info!");
}
InVals.push_back(Val);
}
if (IsEntryFunc) {
allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics);
} else {
CCInfo.AllocateReg(Info->getScratchRSrcReg());
if (!IsGraphics)
allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
}
auto &ArgUsageInfo =
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
unsigned StackArgSize = CCInfo.getNextStackOffset();
Info->setBytesInStackArgArea(StackArgSize);
return Chains.empty() ? Chain :
DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}
bool SITargetLowering::CanLowerReturn(
CallingConv::ID CallConv,
MachineFunction &MF, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
if (AMDGPU::isEntryFunctionCC(CallConv))
return true;
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
}
SDValue
SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
if (AMDGPU::isKernel(CallConv)) {
return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
OutVals, DL, DAG);
}
bool IsShader = AMDGPU::isShader(CallConv);
Info->setIfReturnsVoid(Outs.empty());
bool IsWaveEnd = Info->returnsVoid() && IsShader;
SmallVector<CCValAssign, 48> RVLocs;
SmallVector<ISD::OutputArg, 48> Splits;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
SDValue Flag;
SmallVector<SDValue, 48> RetOps;
RetOps.push_back(Chain);
for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
++I, ++RealRVLocIdx) {
CCValAssign &VA = RVLocs[I];
assert(VA.isRegLoc() && "Can only return in registers!");
SDValue Arg = OutVals[RealRVLocIdx];
switch (VA.getLocInfo()) {
case CCValAssign::Full:
break;
case CCValAssign::BCvt:
Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
break;
default:
llvm_unreachable("Unknown loc info!");
}
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
if (!Info->isEntryFunction()) {
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
const MCPhysReg *I =
TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
if (I) {
for (; *I; ++I) {
if (AMDGPU::SReg_64RegClass.contains(*I))
RetOps.push_back(DAG.getRegister(*I, MVT::i64));
else if (AMDGPU::SReg_32RegClass.contains(*I))
RetOps.push_back(DAG.getRegister(*I, MVT::i32));
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
}
}
}
RetOps[0] = Chain;
if (Flag.getNode())
RetOps.push_back(Flag);
unsigned Opc = AMDGPUISD::ENDPGM;
if (!IsWaveEnd)
Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
return DAG.getNode(Opc, DL, MVT::Other, RetOps);
}
SDValue SITargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
SDValue ThisVal) const {
CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC);
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign VA = RVLocs[i];
SDValue Val;
if (VA.isRegLoc()) {
Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
Chain = Val.getValue(1);
InFlag = Val.getValue(2);
} else if (VA.isMemLoc()) {
report_fatal_error("TODO: return values in memory");
} else
llvm_unreachable("unknown argument location type");
switch (VA.getLocInfo()) {
case CCValAssign::Full:
break;
case CCValAssign::BCvt:
Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
break;
case CCValAssign::ZExt:
Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
DAG.getValueType(VA.getValVT()));
Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
break;
case CCValAssign::SExt:
Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
DAG.getValueType(VA.getValVT()));
Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
break;
case CCValAssign::AExt:
Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
break;
default:
llvm_unreachable("Unknown loc info!");
}
InVals.push_back(Val);
}
return Chain;
}
void SITargetLowering::passSpecialInputs(
CallLoweringInfo &CLI,
CCState &CCInfo,
const SIMachineFunctionInfo &Info,
SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &MemOpChains,
SDValue Chain) const {
if (!CLI.CB)
return;
SelectionDAG &DAG = CLI.DAG;
const SDLoc &DL = CLI.DL;
const Function &F = DAG.getMachineFunction().getFunction();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
const AMDGPUFunctionArgInfo *CalleeArgInfo
= &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) {
auto &ArgUsageInfo =
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
}
static constexpr std::pair<AMDGPUFunctionArgInfo::PreloadedValue,
StringLiteral> ImplicitAttrs[] = {
{AMDGPUFunctionArgInfo::DISPATCH_PTR, "amdgpu-no-dispatch-ptr"},
{AMDGPUFunctionArgInfo::QUEUE_PTR, "amdgpu-no-queue-ptr" },
{AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, "amdgpu-no-implicitarg-ptr"},
{AMDGPUFunctionArgInfo::DISPATCH_ID, "amdgpu-no-dispatch-id"},
{AMDGPUFunctionArgInfo::WORKGROUP_ID_X, "amdgpu-no-workgroup-id-x"},
{AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,"amdgpu-no-workgroup-id-y"},
{AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,"amdgpu-no-workgroup-id-z"},
{AMDGPUFunctionArgInfo::LDS_KERNEL_ID,"amdgpu-no-lds-kernel-id"},
};
for (auto Attr : ImplicitAttrs) {
const ArgDescriptor *OutgoingArg;
const TargetRegisterClass *ArgRC;
LLT ArgTy;
AMDGPUFunctionArgInfo::PreloadedValue InputID = Attr.first;
if (CLI.CB->hasFnAttr(Attr.second))
continue;
std::tie(OutgoingArg, ArgRC, ArgTy) =
CalleeArgInfo->getPreloadedValue(InputID);
if (!OutgoingArg)
continue;
const ArgDescriptor *IncomingArg;
const TargetRegisterClass *IncomingArgRC;
LLT Ty;
std::tie(IncomingArg, IncomingArgRC, Ty) =
CallerArgInfo.getPreloadedValue(InputID);
assert(IncomingArgRC == ArgRC);
EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
SDValue InputReg;
if (IncomingArg) {
InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
} else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
InputReg = getImplicitArgPtr(DAG, DL);
} else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
Optional<uint32_t> Id = AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (Id.has_value()) {
InputReg = DAG.getConstant(Id.value(), DL, ArgVT);
} else {
InputReg = DAG.getUNDEF(ArgVT);
}
} else {
InputReg = DAG.getUNDEF(ArgVT);
}
if (OutgoingArg->isRegister()) {
RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
report_fatal_error("failed to allocate implicit input argument");
} else {
unsigned SpecialArgOffset =
CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4));
SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
SpecialArgOffset);
MemOpChains.push_back(ArgStore);
}
}
const ArgDescriptor *OutgoingArg;
const TargetRegisterClass *ArgRC;
LLT Ty;
std::tie(OutgoingArg, ArgRC, Ty) =
CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
if (!OutgoingArg)
std::tie(OutgoingArg, ArgRC, Ty) =
CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
if (!OutgoingArg)
std::tie(OutgoingArg, ArgRC, Ty) =
CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
if (!OutgoingArg)
return;
const ArgDescriptor *IncomingArgX = std::get<0>(
CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X));
const ArgDescriptor *IncomingArgY = std::get<0>(
CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y));
const ArgDescriptor *IncomingArgZ = std::get<0>(
CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z));
SDValue InputReg;
SDLoc SL;
const bool NeedWorkItemIDX = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-x");
const bool NeedWorkItemIDY = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-y");
const bool NeedWorkItemIDZ = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-z");
if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
NeedWorkItemIDX) {
if (Subtarget->getMaxWorkitemID(F, 0) != 0) {
InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
} else {
InputReg = DAG.getConstant(0, DL, MVT::i32);
}
}
if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
NeedWorkItemIDY && Subtarget->getMaxWorkitemID(F, 1) != 0) {
SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
DAG.getShiftAmountConstant(10, MVT::i32, SL));
InputReg = InputReg.getNode() ?
DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
}
if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
NeedWorkItemIDZ && Subtarget->getMaxWorkitemID(F, 2) != 0) {
SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
DAG.getShiftAmountConstant(20, MVT::i32, SL));
InputReg = InputReg.getNode() ?
DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
}
if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
InputReg = DAG.getUNDEF(MVT::i32);
} else {
ArgDescriptor IncomingArg = ArgDescriptor::createArg(
IncomingArgX ? *IncomingArgX :
IncomingArgY ? *IncomingArgY :
*IncomingArgZ, ~0u);
InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
}
}
if (OutgoingArg->isRegister()) {
if (InputReg)
RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
CCInfo.AllocateReg(OutgoingArg->getRegister());
} else {
unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4));
if (InputReg) {
SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
SpecialArgOffset);
MemOpChains.push_back(ArgStore);
}
}
}
static bool canGuaranteeTCO(CallingConv::ID CC) {
return CC == CallingConv::Fast;
}
static bool mayTailCallThisCC(CallingConv::ID CC) {
switch (CC) {
case CallingConv::C:
case CallingConv::AMDGPU_Gfx:
return true;
default:
return canGuaranteeTCO(CC);
}
}
bool SITargetLowering::isEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
if (!mayTailCallThisCC(CalleeCC))
return false;
if (Callee->isDivergent())
return false;
MachineFunction &MF = DAG.getMachineFunction();
const Function &CallerF = MF.getFunction();
CallingConv::ID CallerCC = CallerF.getCallingConv();
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (!CallerPreserved)
return false;
bool CCMatch = CallerCC == CalleeCC;
if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
if (canGuaranteeTCO(CalleeCC) && CCMatch)
return true;
return false;
}
if (IsVarArg)
return false;
for (const Argument &Arg : CallerF.args()) {
if (Arg.hasByValAttr())
return false;
}
LLVMContext &Ctx = *DAG.getContext();
if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
CCAssignFnForCall(CalleeCC, IsVarArg),
CCAssignFnForCall(CallerCC, IsVarArg)))
return false;
if (!CCMatch) {
const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
}
if (Outs.empty())
return true;
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
return false;
const MachineRegisterInfo &MRI = MF.getRegInfo();
return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
}
bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
if (!CI->isTailCall())
return false;
const Function *ParentFn = CI->getParent()->getParent();
if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
return false;
return true;
}
SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
const SDLoc &DL = CLI.DL;
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
bool IsSibCall = false;
bool IsThisReturn = false;
MachineFunction &MF = DAG.getMachineFunction();
if (Callee.isUndef() || isNullConstant(Callee)) {
if (!CLI.IsTailCall) {
for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
}
return Chain;
}
if (IsVarArg) {
return lowerUnhandledCall(CLI, InVals,
"unsupported call to variadic function ");
}
if (!CLI.CB)
report_fatal_error("unsupported libcall legalization");
if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
return lowerUnhandledCall(CLI, InVals,
"unsupported required tail call to function ");
}
if (AMDGPU::isShader(CallConv)) {
return lowerUnhandledCall(CLI, InVals,
"unsupported call to a shader function ");
}
if (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
CallConv != CallingConv::AMDGPU_Gfx) {
return lowerUnhandledCall(CLI, InVals,
"unsupported calling convention for call from "
"graphics shader of function ");
}
if (IsTailCall) {
IsTailCall = isEligibleForTailCallOptimization(
Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) {
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
}
bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
if (!TailCallOpt && IsTailCall)
IsSibCall = true;
if (IsTailCall)
++NumTailCalls;
}
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
if (CallConv != CallingConv::AMDGPU_Gfx) {
passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
}
CCInfo.AnalyzeCallOperands(Outs, AssignFn);
unsigned NumBytes = CCInfo.getNextStackOffset();
if (IsSibCall) {
NumBytes = 0;
}
int32_t FPDiff = 0;
MachineFrameInfo &MFI = MF.getFrameInfo();
if (!IsSibCall) {
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
if (!Subtarget->enableFlatScratch()) {
SmallVector<SDValue, 4> CopyFromChains;
SDValue ScratchRSrcReg
= DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
Chain = DAG.getTokenFactor(DL, CopyFromChains);
}
}
MVT PtrVT = MVT::i32;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
switch (VA.getLocInfo()) {
case CCValAssign::Full:
break;
case CCValAssign::BCvt:
Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::FPExt:
Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
break;
default:
llvm_unreachable("Unknown loc info!");
}
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
assert(VA.isMemLoc());
SDValue DstAddr;
MachinePointerInfo DstInfo;
unsigned LocMemOffset = VA.getLocMemOffset();
int32_t Offset = LocMemOffset;
SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
MaybeAlign Alignment;
if (IsTailCall) {
ISD::ArgFlagsTy Flags = Outs[i].Flags;
unsigned OpSize = Flags.isByVal() ?
Flags.getByValSize() : VA.getValVT().getStoreSize();
Alignment =
Flags.isByVal()
? Flags.getNonZeroByValAlign()
: commonAlignment(Subtarget->getStackAlignment(), Offset);
Offset = Offset + FPDiff;
int FI = MFI.CreateFixedObject(OpSize, Offset, true);
DstAddr = DAG.getFrameIndex(FI, PtrVT);
DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
Chain = addTokenForArgument(Chain, DAG, MFI, FI);
} else {
SDValue SP = DAG.getCopyFromReg(Chain, DL, Info->getStackPtrOffsetReg(),
MVT::i32);
DstAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, SP, PtrOff);
DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
Alignment =
commonAlignment(Subtarget->getStackAlignment(), LocMemOffset);
}
if (Outs[i].Flags.isByVal()) {
SDValue SizeNode =
DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
SDValue Cpy =
DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode,
Outs[i].Flags.getNonZeroByValAlign(),
false, true,
false, DstInfo,
MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS));
MemOpChains.push_back(Cpy);
} else {
SDValue Store =
DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment);
MemOpChains.push_back(Store);
}
}
}
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
SDValue InFlag;
for (auto &RegToPass : RegsToPass) {
Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
RegToPass.second, InFlag);
InFlag = Chain.getValue(1);
}
if (IsTailCall && !IsSibCall) {
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getTargetConstant(NumBytes, DL, MVT::i32),
DAG.getTargetConstant(0, DL, MVT::i32),
InFlag, DL);
InFlag = Chain.getValue(1);
}
std::vector<SDValue> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = GSD->getGlobal();
Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
} else {
Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64));
}
if (IsTailCall) {
Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
}
for (auto &RegToPass : RegsToPass) {
Ops.push_back(DAG.getRegister(RegToPass.first,
RegToPass.second.getValueType()));
}
auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
if (InFlag.getNode())
Ops.push_back(InFlag);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (IsTailCall) {
MFI.setHasTailCall();
return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
}
SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
Chain = Call.getValue(0);
InFlag = Call.getValue(1);
uint64_t CalleePopBytes = NumBytes;
Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
InFlag, DL);
if (!Ins.empty())
InFlag = Chain.getValue(1);
return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
InVals, IsThisReturn,
IsThisReturn ? OutVals[0] : SDValue());
}
SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(
SDValue Op, SelectionDAG &DAG) const {
const MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
SDLoc dl(Op);
EVT VT = Op.getValueType();
SDValue Tmp1 = Op;
SDValue Tmp2 = Op.getValue(1);
SDValue Tmp3 = Op.getOperand(2);
SDValue Chain = Tmp1.getOperand(0);
Register SPReg = Info->getStackPtrOffsetReg();
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
SDValue Size = Tmp2.getOperand(1);
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const TargetFrameLowering *TFL = ST.getFrameLowering();
unsigned Opc =
TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ?
ISD::ADD : ISD::SUB;
SDValue ScaledSize = DAG.getNode(
ISD::SHL, dl, VT, Size,
DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32));
Align StackAlign = TFL->getStackAlign();
Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); if (Alignment && *Alignment > StackAlign) {
Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
DAG.getConstant(-(uint64_t)Alignment->value()
<< ST.getWavefrontSizeLog2(),
dl, VT));
}
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); Tmp2 = DAG.getCALLSEQ_END(
Chain, DAG.getIntPtrConstant(0, dl, true),
DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
return DAG.getMergeValues({Tmp1, Tmp2}, dl);
}
SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
SDValue Size = Op.getOperand(1);
if (isa<ConstantSDNode>(Size))
return lowerDYNAMIC_STACKALLOCImpl(Op, DAG);
return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG);
}
Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT,
const MachineFunction &MF) const {
Register Reg = StringSwitch<Register>(RegName)
.Case("m0", AMDGPU::M0)
.Case("exec", AMDGPU::EXEC)
.Case("exec_lo", AMDGPU::EXEC_LO)
.Case("exec_hi", AMDGPU::EXEC_HI)
.Case("flat_scratch", AMDGPU::FLAT_SCR)
.Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
.Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
.Default(Register());
if (Reg == AMDGPU::NoRegister) {
report_fatal_error(Twine("invalid register name \""
+ StringRef(RegName) + "\"."));
}
if (!Subtarget->hasFlatScrRegister() &&
Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
report_fatal_error(Twine("invalid register \""
+ StringRef(RegName) + "\" for subtarget."));
}
switch (Reg) {
case AMDGPU::M0:
case AMDGPU::EXEC_LO:
case AMDGPU::EXEC_HI:
case AMDGPU::FLAT_SCR_LO:
case AMDGPU::FLAT_SCR_HI:
if (VT.getSizeInBits() == 32)
return Reg;
break;
case AMDGPU::EXEC:
case AMDGPU::FLAT_SCR:
if (VT.getSizeInBits() == 64)
return Reg;
break;
default:
llvm_unreachable("missing register type checking");
}
report_fatal_error(Twine("invalid type for register \""
+ StringRef(RegName) + "\"."));
}
MachineBasicBlock *
SITargetLowering::splitKillBlock(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineBasicBlock *SplitBB = BB->splitAt(MI, false );
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
return SplitBB;
}
static std::pair<MachineBasicBlock *, MachineBasicBlock *>
splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
MachineFunction *MF = MBB.getParent();
MachineBasicBlock::iterator I(&MI);
MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
MachineFunction::iterator MBBI(MBB);
++MBBI;
MF->insert(MBBI, LoopBB);
MF->insert(MBBI, RemainderBB);
LoopBB->addSuccessor(LoopBB);
LoopBB->addSuccessor(RemainderBB);
RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
if (InstInLoop) {
auto Next = std::next(I);
LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
} else {
RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
}
MBB.addSuccessor(LoopBB);
return std::make_pair(LoopBB, RemainderBB);
}
void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
MachineBasicBlock *MBB = MI.getParent();
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
auto I = MI.getIterator();
auto E = std::next(I);
BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
.addImm(0);
MIBundleBuilder Bundler(*MBB, I, E);
finalizeBundle(*MBB, Bundler.begin());
}
MachineBasicBlock *
SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
MachineBasicBlock *BB) const {
const DebugLoc &DL = MI.getDebugLoc();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
MachineBasicBlock *LoopBB;
MachineBasicBlock *RemainderBB;
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
Src->setIsKill(false);
std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
MachineBasicBlock::iterator I = LoopBB->end();
const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
.addImm(0)
.addImm(EncodedReg);
bundleInstWithWaitcnt(MI);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
.addImm(EncodedReg);
BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
.addReg(Reg, RegState::Kill)
.addImm(0);
BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
.addMBB(LoopBB);
return RemainderBB;
}
static MachineBasicBlock::iterator
emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI,
MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
const DebugLoc &DL, const MachineOperand &Idx,
unsigned InitReg, unsigned ResultReg, unsigned PhiReg,
unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode,
Register &SGPRIdxReg) {
MachineFunction *MF = OrigBB.getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
MachineBasicBlock::iterator I = LoopBB.begin();
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
Register PhiExec = MRI.createVirtualRegister(BoolRC);
Register NewExec = MRI.createVirtualRegister(BoolRC);
Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register CondReg = MRI.createVirtualRegister(BoolRC);
BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
.addReg(InitReg)
.addMBB(&OrigBB)
.addReg(ResultReg)
.addMBB(&LoopBB);
BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
.addReg(InitSaveExecReg)
.addMBB(&OrigBB)
.addReg(NewExec)
.addMBB(&LoopBB);
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
.addReg(Idx.getReg(), getUndefRegState(Idx.isUndef()));
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
.addReg(CurrentIdxReg)
.addReg(Idx.getReg(), 0, Idx.getSubReg());
BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
: AMDGPU::S_AND_SAVEEXEC_B64),
NewExec)
.addReg(CondReg, RegState::Kill);
MRI.setSimpleHint(NewExec, CondReg);
if (UseGPRIdxMode) {
if (Offset == 0) {
SGPRIdxReg = CurrentIdxReg;
} else {
SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg)
.addReg(CurrentIdxReg, RegState::Kill)
.addImm(Offset);
}
} else {
if (Offset == 0) {
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
.addReg(CurrentIdxReg, RegState::Kill);
} else {
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
.addReg(CurrentIdxReg, RegState::Kill)
.addImm(Offset);
}
}
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
MachineInstr *InsertPt =
BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
: AMDGPU::S_XOR_B64_term), Exec)
.addReg(Exec)
.addReg(NewExec);
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
.addMBB(&LoopBB);
return InsertPt->getIterator();
}
static MachineBasicBlock::iterator
loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI,
unsigned InitResultReg, unsigned PhiReg, int Offset,
bool UseGPRIdxMode, Register &SGPRIdxReg) {
MachineFunction *MF = MBB.getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
Register DstReg = MI.getOperand(0).getReg();
Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
Register TmpExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
.addReg(Exec);
MachineBasicBlock *LoopBB;
MachineBasicBlock *RemainderBB;
std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
InitResultReg, DstReg, PhiReg, TmpExec,
Offset, UseGPRIdxMode, SGPRIdxReg);
MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock();
MachineFunction::iterator MBBI(LoopBB);
++MBBI;
MF->insert(MBBI, LandingPad);
LoopBB->removeSuccessor(RemainderBB);
LandingPad->addSuccessor(RemainderBB);
LoopBB->addSuccessor(LandingPad);
MachineBasicBlock::iterator First = LandingPad->begin();
BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec)
.addReg(SaveExec);
return InsPt;
}
static std::pair<unsigned, int>
computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
const TargetRegisterClass *SuperRC,
unsigned VecReg,
int Offset) {
int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
if (Offset >= NumElts || Offset < 0)
return std::make_pair(AMDGPU::sub0, Offset);
return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0);
}
static void setM0ToIndexFromSGPR(const SIInstrInfo *TII,
MachineRegisterInfo &MRI, MachineInstr &MI,
int Offset) {
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
assert(Idx->getReg() != AMDGPU::NoRegister);
if (Offset == 0) {
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx);
} else {
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
.add(*Idx)
.addImm(Offset);
}
}
static Register getIndirectSGPRIdx(const SIInstrInfo *TII,
MachineRegisterInfo &MRI, MachineInstr &MI,
int Offset) {
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
if (Offset == 0)
return Idx->getReg();
Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
.add(*Idx)
.addImm(Offset);
return Tmp;
}
static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
MachineBasicBlock &MBB,
const GCNSubtarget &ST) {
const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo &TRI = TII->getRegisterInfo();
MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
Register Dst = MI.getOperand(0).getReg();
const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
unsigned SubReg;
std::tie(SubReg, Offset)
= computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
const bool UseGPRIdxMode = ST.useVGPRIndexMode();
if (TII->getRegisterInfo().isSGPRClass(IdxRC)) {
MachineBasicBlock::iterator I(&MI);
const DebugLoc &DL = MI.getDebugLoc();
if (UseGPRIdxMode) {
Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset);
const MCInstrDesc &GPRIDXDesc =
TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true);
BuildMI(MBB, I, DL, GPRIDXDesc, Dst)
.addReg(SrcReg)
.addReg(Idx)
.addImm(SubReg);
} else {
setM0ToIndexFromSGPR(TII, MRI, MI, Offset);
BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
.addReg(SrcReg, 0, SubReg)
.addReg(SrcReg, RegState::Implicit);
}
MI.eraseFromParent();
return &MBB;
}
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
Register SGPRIdxReg;
auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset,
UseGPRIdxMode, SGPRIdxReg);
MachineBasicBlock *LoopBB = InsPt->getParent();
if (UseGPRIdxMode) {
const MCInstrDesc &GPRIDXDesc =
TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true);
BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst)
.addReg(SrcReg)
.addReg(SGPRIdxReg)
.addImm(SubReg);
} else {
BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
.addReg(SrcReg, 0, SubReg)
.addReg(SrcReg, RegState::Implicit);
}
MI.eraseFromParent();
return LoopBB;
}
static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
MachineBasicBlock &MBB,
const GCNSubtarget &ST) {
const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo &TRI = TII->getRegisterInfo();
MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
Register Dst = MI.getOperand(0).getReg();
const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
assert(Val->getReg());
unsigned SubReg;
std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
SrcVec->getReg(),
Offset);
const bool UseGPRIdxMode = ST.useVGPRIndexMode();
if (Idx->getReg() == AMDGPU::NoRegister) {
MachineBasicBlock::iterator I(&MI);
const DebugLoc &DL = MI.getDebugLoc();
assert(Offset == 0);
BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
.add(*SrcVec)
.add(*Val)
.addImm(SubReg);
MI.eraseFromParent();
return &MBB;
}
if (TII->getRegisterInfo().isSGPRClass(IdxRC)) {
MachineBasicBlock::iterator I(&MI);
const DebugLoc &DL = MI.getDebugLoc();
if (UseGPRIdxMode) {
Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset);
const MCInstrDesc &GPRIDXDesc =
TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
BuildMI(MBB, I, DL, GPRIDXDesc, Dst)
.addReg(SrcVec->getReg())
.add(*Val)
.addReg(Idx)
.addImm(SubReg);
} else {
setM0ToIndexFromSGPR(TII, MRI, MI, Offset);
const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo(
TRI.getRegSizeInBits(*VecRC), 32, false);
BuildMI(MBB, I, DL, MovRelDesc, Dst)
.addReg(SrcVec->getReg())
.add(*Val)
.addImm(SubReg);
}
MI.eraseFromParent();
return &MBB;
}
if (Val->isReg())
MRI.clearKillFlags(Val->getReg());
const DebugLoc &DL = MI.getDebugLoc();
Register PhiReg = MRI.createVirtualRegister(VecRC);
Register SGPRIdxReg;
auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset,
UseGPRIdxMode, SGPRIdxReg);
MachineBasicBlock *LoopBB = InsPt->getParent();
if (UseGPRIdxMode) {
const MCInstrDesc &GPRIDXDesc =
TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst)
.addReg(PhiReg)
.add(*Val)
.addReg(SGPRIdxReg)
.addImm(AMDGPU::sub0);
} else {
const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo(
TRI.getRegSizeInBits(*VecRC), 32, false);
BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst)
.addReg(PhiReg)
.add(*Val)
.addImm(AMDGPU::sub0);
}
MI.eraseFromParent();
return LoopBB;
}
MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *BB) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
MachineFunction *MF = BB->getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
switch (MI.getOpcode()) {
case AMDGPU::S_UADDO_PSEUDO:
case AMDGPU::S_USUBO_PSEUDO: {
const DebugLoc &DL = MI.getDebugLoc();
MachineOperand &Dest0 = MI.getOperand(0);
MachineOperand &Dest1 = MI.getOperand(1);
MachineOperand &Src0 = MI.getOperand(2);
MachineOperand &Src1 = MI.getOperand(3);
unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO)
? AMDGPU::S_ADD_I32
: AMDGPU::S_SUB_I32;
BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg())
.addImm(1)
.addImm(0);
MI.eraseFromParent();
return BB;
}
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
const DebugLoc &DL = MI.getDebugLoc();
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &Src0 = MI.getOperand(1);
MachineOperand &Src1 = MI.getOperand(2);
Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(
MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(
MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(
MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(
MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0);
BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1);
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
.addReg(DestSub0)
.addImm(AMDGPU::sub0)
.addReg(DestSub1)
.addImm(AMDGPU::sub1);
MI.eraseFromParent();
return BB;
}
case AMDGPU::V_ADD_U64_PSEUDO:
case AMDGPU::V_SUB_U64_PSEUDO: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const DebugLoc &DL = MI.getDebugLoc();
bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO);
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &Src0 = MI.getOperand(1);
MachineOperand &Src1 = MI.getOperand(2);
if (IsAdd && ST.hasLshlAddB64()) {
auto Add = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_LSHL_ADD_U64_e64),
Dest.getReg())
.add(Src0)
.addImm(0)
.add(Src1);
TII->legalizeOperands(*Add);
MI.eraseFromParent();
return BB;
}
const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register CarryReg = MRI.createVirtualRegister(CarryRC);
Register DeadCarryReg = MRI.createVirtualRegister(CarryRC);
const TargetRegisterClass *Src0RC = Src0.isReg()
? MRI.getRegClass(Src0.getReg())
: &AMDGPU::VReg_64RegClass;
const TargetRegisterClass *Src1RC = Src1.isReg()
? MRI.getRegClass(Src1.getReg())
: &AMDGPU::VReg_64RegClass;
const TargetRegisterClass *Src0SubRC =
TRI->getSubRegClass(Src0RC, AMDGPU::sub0);
const TargetRegisterClass *Src1SubRC =
TRI->getSubRegClass(Src1RC, AMDGPU::sub1);
MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm(
MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm(
MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC);
unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
.addReg(CarryReg, RegState::Define)
.add(SrcReg0Sub0)
.add(SrcReg1Sub0)
.addImm(0);
unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
MachineInstr *HiHalf =
BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
.addReg(DeadCarryReg, RegState::Define | RegState::Dead)
.add(SrcReg0Sub1)
.add(SrcReg1Sub1)
.addReg(CarryReg, RegState::Kill)
.addImm(0);
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
.addReg(DestSub0)
.addImm(AMDGPU::sub0)
.addReg(DestSub1)
.addImm(AMDGPU::sub1);
TII->legalizeOperands(*LoHalf);
TII->legalizeOperands(*HiHalf);
MI.eraseFromParent();
return BB;
}
case AMDGPU::S_ADD_CO_PSEUDO:
case AMDGPU::S_SUB_CO_PSEUDO: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
MachineBasicBlock::iterator MII = MI;
const DebugLoc &DL = MI.getDebugLoc();
MachineOperand &Dest = MI.getOperand(0);
MachineOperand &CarryDest = MI.getOperand(1);
MachineOperand &Src0 = MI.getOperand(2);
MachineOperand &Src1 = MI.getOperand(3);
MachineOperand &Src2 = MI.getOperand(4);
unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
? AMDGPU::S_ADDC_U32
: AMDGPU::S_SUBB_U32;
if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) {
Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0)
.addReg(Src0.getReg());
Src0.setReg(RegOp0);
}
if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) {
Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1)
.addReg(Src1.getReg());
Src1.setReg(RegOp1);
}
Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
if (TRI->isVectorRegister(MRI, Src2.getReg())) {
BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2)
.addReg(Src2.getReg());
Src2.setReg(RegOp2);
}
const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
assert(WaveSize == 64 || WaveSize == 32);
if (WaveSize == 64) {
if (ST.hasScalarCompareEq64()) {
BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
.addReg(Src2.getReg())
.addImm(0);
} else {
const TargetRegisterClass *SubRC =
TRI->getSubRegClass(Src2RC, AMDGPU::sub0);
MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
.add(Src2Sub0)
.add(Src2Sub1);
BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
.addReg(Src2_32, RegState::Kill)
.addImm(0);
}
} else {
BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32))
.addReg(Src2.getReg())
.addImm(0);
}
BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1);
unsigned SelOpc =
(WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
.addImm(-1)
.addImm(0);
MI.eraseFromParent();
return BB;
}
case AMDGPU::SI_INIT_M0: {
BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
.add(MI.getOperand(0));
MI.eraseFromParent();
return BB;
}
case AMDGPU::GET_GROUPSTATICSIZE: {
assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
.add(MI.getOperand(0))
.addImm(MFI->getLDSSize());
MI.eraseFromParent();
return BB;
}
case AMDGPU::SI_INDIRECT_SRC_V1:
case AMDGPU::SI_INDIRECT_SRC_V2:
case AMDGPU::SI_INDIRECT_SRC_V4:
case AMDGPU::SI_INDIRECT_SRC_V8:
case AMDGPU::SI_INDIRECT_SRC_V16:
case AMDGPU::SI_INDIRECT_SRC_V32:
return emitIndirectSrc(MI, *BB, *getSubtarget());
case AMDGPU::SI_INDIRECT_DST_V1:
case AMDGPU::SI_INDIRECT_DST_V2:
case AMDGPU::SI_INDIRECT_DST_V4:
case AMDGPU::SI_INDIRECT_DST_V8:
case AMDGPU::SI_INDIRECT_DST_V16:
case AMDGPU::SI_INDIRECT_DST_V32:
return emitIndirectDst(MI, *BB, *getSubtarget());
case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
case AMDGPU::SI_KILL_I1_PSEUDO:
return splitKillBlock(MI, BB);
case AMDGPU::V_CNDMASK_B64_PSEUDO: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
Register Dst = MI.getOperand(0).getReg();
Register Src0 = MI.getOperand(1).getReg();
Register Src1 = MI.getOperand(2).getReg();
const DebugLoc &DL = MI.getDebugLoc();
Register SrcCond = MI.getOperand(3).getReg();
Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
Register SrcCondCopy = MRI.createVirtualRegister(CondRC);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
.addReg(SrcCond);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
.addImm(0)
.addReg(Src0, 0, AMDGPU::sub0)
.addImm(0)
.addReg(Src1, 0, AMDGPU::sub0)
.addReg(SrcCondCopy);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
.addImm(0)
.addReg(Src0, 0, AMDGPU::sub1)
.addImm(0)
.addReg(Src1, 0, AMDGPU::sub1)
.addReg(SrcCondCopy);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
.addReg(DstLo)
.addImm(AMDGPU::sub0)
.addReg(DstHi)
.addImm(AMDGPU::sub1);
MI.eraseFromParent();
return BB;
}
case AMDGPU::SI_BR_UNDEF: {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const DebugLoc &DL = MI.getDebugLoc();
MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
.add(MI.getOperand(0));
Br->getOperand(1).setIsUndef(true); MI.eraseFromParent();
return BB;
}
case AMDGPU::ADJCALLSTACKUP:
case AMDGPU::ADJCALLSTACKDOWN: {
const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
MachineInstrBuilder MIB(*MF, &MI);
MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
.addReg(Info->getStackPtrOffsetReg(), RegState::Implicit);
return BB;
}
case AMDGPU::SI_CALL_ISEL: {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const DebugLoc &DL = MI.getDebugLoc();
unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
MachineInstrBuilder MIB;
MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
for (const MachineOperand &MO : MI.operands())
MIB.add(MO);
MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return BB;
}
case AMDGPU::V_ADD_CO_U32_e32:
case AMDGPU::V_SUB_CO_U32_e32:
case AMDGPU::V_SUBREV_CO_U32_e32: {
const DebugLoc &DL = MI.getDebugLoc();
unsigned Opc = MI.getOpcode();
bool NeedClampOperand = false;
if (TII->pseudoToMCOpcode(Opc) == -1) {
Opc = AMDGPU::getVOPe64(Opc);
NeedClampOperand = true;
}
auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
if (TII->isVOP3(*I)) {
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
I.addReg(TRI->getVCC(), RegState::Define);
}
I.add(MI.getOperand(1))
.add(MI.getOperand(2));
if (NeedClampOperand)
I.addImm(0);
TII->legalizeOperands(*I);
MI.eraseFromParent();
return BB;
}
case AMDGPU::V_ADDC_U32_e32:
case AMDGPU::V_SUBB_U32_e32:
case AMDGPU::V_SUBBREV_U32_e32:
TII->legalizeOperands(MI);
return BB;
case AMDGPU::DS_GWS_INIT:
case AMDGPU::DS_GWS_SEMA_BR:
case AMDGPU::DS_GWS_BARRIER:
TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::data0);
LLVM_FALLTHROUGH;
case AMDGPU::DS_GWS_SEMA_V:
case AMDGPU::DS_GWS_SEMA_P:
case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
if (getSubtarget()->hasGWSAutoReplay()) {
bundleInstWithWaitcnt(MI);
return BB;
}
return emitGWSMemViolTestLoop(MI, BB);
case AMDGPU::S_SETREG_B32: {
unsigned ID, Offset, Width;
AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width);
if (ID != AMDGPU::Hwreg::ID_MODE)
return BB;
const unsigned WidthMask = maskTrailingOnes<unsigned>(Width);
const unsigned SetMask = WidthMask << Offset;
if (getSubtarget()->hasDenormModeInst()) {
unsigned SetDenormOp = 0;
unsigned SetRoundOp = 0;
if (SetMask ==
(AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) {
SetRoundOp = AMDGPU::S_ROUND_MODE;
SetDenormOp = AMDGPU::S_DENORM_MODE;
} else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) {
SetRoundOp = AMDGPU::S_ROUND_MODE;
} else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) {
SetDenormOp = AMDGPU::S_DENORM_MODE;
}
if (SetRoundOp || SetDenormOp) {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg());
if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) {
unsigned ImmVal = Def->getOperand(1).getImm();
if (SetRoundOp) {
BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp))
.addImm(ImmVal & 0xf);
ImmVal >>= 4;
}
if (SetDenormOp) {
BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp))
.addImm(ImmVal & 0xf);
}
MI.eraseFromParent();
return BB;
}
}
}
if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK |
AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask)
MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode));
return BB;
}
default:
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
}
}
bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
return isTypeLegal(VT.getScalarType());
}
bool SITargetLowering::hasAtomicFaddRtnForTy(SDValue &Op) const {
switch (Op.getValue(0).getSimpleValueType().SimpleTy) {
case MVT::f32:
return Subtarget->hasAtomicFaddRtnInsts();
case MVT::v2f16:
case MVT::f64:
return Subtarget->hasGFX90AInsts();
default:
return false;
}
}
bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
return true;
}
bool SITargetLowering::enableAggressiveFMAFusion(LLT Ty) const { return true; }
EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
EVT VT) const {
if (!VT.isVector()) {
return MVT::i1;
}
return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
}
MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
}
LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const {
return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts())
? Ty.changeElementSize(16)
: Ty.changeElementSize(32);
}
bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const {
VT = VT.getScalarType();
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32: {
if (!Subtarget->hasMadMacF32Insts())
return Subtarget->hasFastFMAF32();
if (hasFP32Denormals(MF))
return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
}
case MVT::f64:
return true;
case MVT::f16:
return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF);
default:
break;
}
return false;
}
bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
LLT Ty) const {
switch (Ty.getScalarSizeInBits()) {
case 16:
return isFMAFasterThanFMulAndFAdd(MF, MVT::f16);
case 32:
return isFMAFasterThanFMulAndFAdd(MF, MVT::f32);
case 64:
return isFMAFasterThanFMulAndFAdd(MF, MVT::f64);
default:
break;
}
return false;
}
bool SITargetLowering::isFMADLegal(const MachineInstr &MI, LLT Ty) const {
if (!Ty.isScalar())
return false;
if (Ty.getScalarSizeInBits() == 16)
return Subtarget->hasMadF16() && !hasFP64FP16Denormals(*MI.getMF());
if (Ty.getScalarSizeInBits() == 32)
return Subtarget->hasMadMacF32Insts() && !hasFP32Denormals(*MI.getMF());
return false;
}
bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG,
const SDNode *N) const {
EVT VT = N->getValueType(0);
if (VT == MVT::f32)
return Subtarget->hasMadMacF32Insts() &&
!hasFP32Denormals(DAG.getMachineFunction());
if (VT == MVT::f16) {
return Subtarget->hasMadF16() &&
!hasFP64FP16Denormals(DAG.getMachineFunction());
}
return false;
}
SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
SelectionDAG &DAG) const {
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert(VT == MVT::v4f16 || VT == MVT::v4i16);
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
SDLoc SL(Op);
SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
Op->getFlags());
SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
Op->getFlags());
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
}
SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
SelectionDAG &DAG) const {
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||
VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i16 ||
VT == MVT::v16f16 || VT == MVT::v8f32 || VT == MVT::v16f32 ||
VT == MVT::v32f32);
SDValue Lo0, Hi0;
std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
SDValue Lo1, Hi1;
std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
SDLoc SL(Op);
SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
Op->getFlags());
SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
Op->getFlags());
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
}
SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
SelectionDAG &DAG) const {
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 ||
VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v16i16 ||
VT == MVT::v16f16 || VT == MVT::v8f32 || VT == MVT::v16f32 ||
VT == MVT::v32f32);
SDValue Lo0, Hi0;
SDValue Op0 = Op.getOperand(0);
std::tie(Lo0, Hi0) = Op0.getValueType().isVector()
? DAG.SplitVectorOperand(Op.getNode(), 0)
: std::make_pair(Op0, Op0);
SDValue Lo1, Hi1;
std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
SDValue Lo2, Hi2;
std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
SDLoc SL(Op);
auto ResVT = DAG.GetSplitDestVTs(VT);
SDValue OpLo = DAG.getNode(Opc, SL, ResVT.first, Lo0, Lo1, Lo2,
Op->getFlags());
SDValue OpHi = DAG.getNode(Opc, SL, ResVT.second, Hi0, Hi1, Hi2,
Op->getFlags());
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
}
SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::LOAD: {
SDValue Result = LowerLOAD(Op, DAG);
assert((!Result.getNode() ||
Result.getNode()->getNumValues() == 2) &&
"Load should return a value and a chain");
return Result;
}
case ISD::FSIN:
case ISD::FCOS:
return LowerTrig(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::FDIV: return LowerFDIV(Op, DAG);
case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
case ISD::GlobalAddress: {
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
return LowerGlobalAddress(MFI, Op, DAG);
}
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
case ISD::INSERT_SUBVECTOR:
return lowerINSERT_SUBVECTOR(Op, DAG);
case ISD::INSERT_VECTOR_ELT:
return lowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT:
return lowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::VECTOR_SHUFFLE:
return lowerVECTOR_SHUFFLE(Op, DAG);
case ISD::SCALAR_TO_VECTOR:
return lowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::BUILD_VECTOR:
return lowerBUILD_VECTOR(Op, DAG);
case ISD::FP_ROUND:
return lowerFP_ROUND(Op, DAG);
case ISD::FPTRUNC_ROUND: {
unsigned Opc;
SDLoc DL(Op);
if (Op.getOperand(0)->getValueType(0) != MVT::f32)
return SDValue();
int RoundMode = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
if (RoundMode == (int)RoundingMode::TowardPositive)
Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD;
else if (RoundMode == (int)RoundingMode::TowardNegative)
Opc = AMDGPUISD::FPTRUNC_ROUND_DOWNWARD;
else
return SDValue();
return DAG.getNode(Opc, DL, Op.getNode()->getVTList(), Op->getOperand(0));
}
case ISD::TRAP:
return lowerTRAP(Op, DAG);
case ISD::DEBUGTRAP:
return lowerDEBUGTRAP(Op, DAG);
case ISD::FABS:
case ISD::FNEG:
case ISD::FCANONICALIZE:
case ISD::BSWAP:
return splitUnaryVectorOp(Op, DAG);
case ISD::FMINNUM:
case ISD::FMAXNUM:
return lowerFMINNUM_FMAXNUM(Op, DAG);
case ISD::FMA:
return splitTernaryVectorOp(Op, DAG);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
return LowerFP_TO_INT(Op, DAG);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX:
case ISD::FADD:
case ISD::FMUL:
case ISD::FMINNUM_IEEE:
case ISD::FMAXNUM_IEEE:
case ISD::UADDSAT:
case ISD::USUBSAT:
case ISD::SADDSAT:
case ISD::SSUBSAT:
return splitBinaryVectorOp(Op, DAG);
case ISD::SMULO:
case ISD::UMULO:
return lowerXMULO(Op, DAG);
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
return lowerXMUL_LOHI(Op, DAG);
case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG);
}
return SDValue();
}
static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
const SDLoc &DL,
SelectionDAG &DAG, bool Unpacked) {
if (!LoadVT.isVector())
return Result;
EVT FittingLoadVT = LoadVT;
if ((LoadVT.getVectorNumElements() % 2) == 1) {
FittingLoadVT =
EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(),
LoadVT.getVectorNumElements() + 1);
}
if (Unpacked) { EVT IntLoadVT = FittingLoadVT.changeTypeToInteger();
SmallVector<SDValue, 4> Elts;
DAG.ExtractVectorElements(Result, Elts);
for (SDValue &Elt : Elts)
Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
if ((LoadVT.getVectorNumElements() % 2) == 1)
Elts.push_back(DAG.getUNDEF(MVT::i16));
Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result);
}
return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result);
}
SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
MemSDNode *M,
SelectionDAG &DAG,
ArrayRef<SDValue> Ops,
bool IsIntrinsic) const {
SDLoc DL(M);
bool Unpacked = Subtarget->hasUnpackedD16VMem();
EVT LoadVT = M->getValueType(0);
EVT EquivLoadVT = LoadVT;
if (LoadVT.isVector()) {
if (Unpacked) {
EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
LoadVT.getVectorNumElements());
} else if ((LoadVT.getVectorNumElements() % 2) == 1) {
EquivLoadVT =
EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(),
LoadVT.getVectorNumElements() + 1);
}
}
SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
SDValue Load
= DAG.getMemIntrinsicNode(
IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
VTList, Ops, M->getMemoryVT(),
M->getMemOperand());
SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
}
SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
SelectionDAG &DAG,
ArrayRef<SDValue> Ops) const {
SDLoc DL(M);
EVT LoadVT = M->getValueType(0);
EVT EltType = LoadVT.getScalarType();
EVT IntVT = LoadVT.changeTypeToInteger();
bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
unsigned Opc =
IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD;
if (IsD16) {
return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops);
}
if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32)
return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
if (isTypeLegal(LoadVT)) {
return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT,
M->getMemOperand(), DAG);
}
EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT);
SDVTList VTList = DAG.getVTList(CastVT, MVT::Other);
SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT,
M->getMemOperand(), DAG);
return DAG.getMergeValues(
{DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)},
DL);
}
static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
unsigned CondCode = CD->getZExtValue();
if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode)))
return DAG.getUNDEF(VT);
ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
SDLoc DL(N);
EVT CmpVT = LHS.getValueType();
if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
}
ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
DAG.getCondCode(CCOpcode));
if (VT.bitsEq(CCVT))
return SetCC;
return DAG.getZExtOrTrunc(SetCC, DL, VT);
}
static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
unsigned CondCode = CD->getZExtValue();
if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode)))
return DAG.getUNDEF(VT);
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
EVT CmpVT = Src0.getValueType();
SDLoc SL(N);
if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
}
FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
Src1, DAG.getCondCode(CCOpcode));
if (VT.bitsEq(CCVT))
return SetCC;
return DAG.getZExtOrTrunc(SetCC, SL, VT);
}
static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(1);
SDLoc SL(N);
if (Src.getOpcode() == ISD::SETCC) {
return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0),
Src.getOperand(1), Src.getOperand(2));
}
if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) {
if (Arg->isZero())
return DAG.getConstant(0, SL, VT);
if (Arg->isOne()) {
Register Exec;
if (VT.getScalarSizeInBits() == 32)
Exec = AMDGPU::EXEC_LO;
else if (VT.getScalarSizeInBits() == 64)
Exec = AMDGPU::EXEC;
else
return SDValue();
return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT);
}
}
return DAG.getNode(
AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32),
DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE));
}
void SITargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
switch (N->getOpcode()) {
case ISD::INSERT_VECTOR_ELT: {
if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
Results.push_back(Res);
return;
}
case ISD::EXTRACT_VECTOR_ELT: {
if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
Results.push_back(Res);
return;
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
switch (IID) {
case Intrinsic::amdgcn_cvt_pkrtz: {
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
SDLoc SL(N);
SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
Src0, Src1);
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
return;
}
case Intrinsic::amdgcn_cvt_pknorm_i16:
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16: {
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
SDLoc SL(N);
unsigned Opcode;
if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
Opcode = AMDGPUISD::CVT_PK_I16_I32;
else
Opcode = AMDGPUISD::CVT_PK_U16_U32;
EVT VT = N->getValueType(0);
if (isTypeLegal(VT))
Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
else {
SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
}
return;
}
}
break;
}
case ISD::INTRINSIC_W_CHAIN: {
if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
if (Res.getOpcode() == ISD::MERGE_VALUES) {
for (unsigned I = 0; I < Res.getNumOperands(); I++) {
Results.push_back(Res.getOperand(I));
}
} else {
Results.push_back(Res);
Results.push_back(Res.getValue(1));
}
return;
}
break;
}
case ISD::SELECT: {
SDLoc SL(N);
EVT VT = N->getValueType(0);
EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
EVT SelectVT = NewVT;
if (NewVT.bitsLT(MVT::i32)) {
LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
SelectVT = MVT::i32;
}
SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
N->getOperand(0), LHS, RHS);
if (NewVT != SelectVT)
NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
return;
}
case ISD::FNEG: {
if (N->getValueType(0) != MVT::v2f16)
break;
SDLoc SL(N);
SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
BC,
DAG.getConstant(0x80008000, SL, MVT::i32));
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
return;
}
case ISD::FABS: {
if (N->getValueType(0) != MVT::v2f16)
break;
SDLoc SL(N);
SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
BC,
DAG.getConstant(0x7fff7fff, SL, MVT::i32));
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
return;
}
default:
break;
}
}
static SDNode *findUser(SDValue Value, unsigned Opcode) {
SDNode *Parent = Value.getNode();
for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
I != E; ++I) {
if (I.getUse().get() != Value)
continue;
if (I->getOpcode() == Opcode)
return *I;
}
return nullptr;
}
unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
case Intrinsic::amdgcn_if:
return AMDGPUISD::IF;
case Intrinsic::amdgcn_else:
return AMDGPUISD::ELSE;
case Intrinsic::amdgcn_loop:
return AMDGPUISD::LOOP;
case Intrinsic::amdgcn_end_cf:
llvm_unreachable("should not occur");
default:
return 0;
}
}
return 0;
}
bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
const Triple &TT = getTargetMachine().getTargetTriple();
return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
AMDGPU::shouldEmitConstantsToTextSection(TT);
}
bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
return (GV->getValueType()->isFunctionTy() ||
!isNonGlobalAddrSpace(GV->getAddressSpace())) &&
!shouldEmitFixup(GV) &&
!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
}
bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
}
bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const {
if (!GV->hasExternalLinkage())
return true;
const auto OS = getTargetMachine().getTargetTriple().getOS();
return OS == Triple::AMDHSA || OS == Triple::AMDPAL;
}
SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SelectionDAG &DAG) const {
SDLoc DL(BRCOND);
SDNode *Intr = BRCOND.getOperand(1).getNode();
SDValue Target = BRCOND.getOperand(2);
SDNode *BR = nullptr;
SDNode *SetCC = nullptr;
if (Intr->getOpcode() == ISD::SETCC) {
SetCC = Intr;
Intr = SetCC->getOperand(0).getNode();
} else {
BR = findUser(BRCOND, ISD::BR);
assert(BR && "brcond missing unconditional branch user");
Target = BR->getOperand(1);
}
unsigned CFNode = isCFIntrinsic(Intr);
if (CFNode == 0) {
return BRCOND;
}
bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
assert(!SetCC ||
(SetCC->getConstantOperandVal(1) == 1 &&
cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
ISD::SETNE));
SmallVector<SDValue, 4> Ops;
if (HaveChain)
Ops.push_back(BRCOND.getOperand(0));
Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Ops.push_back(Target);
ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
if (!HaveChain) {
SDValue Ops[] = {
SDValue(Result, 0),
BRCOND.getOperand(0)
};
Result = DAG.getMergeValues(Ops, DL).getNode();
}
if (BR) {
SDValue Ops[] = {
BR->getOperand(0),
BRCOND.getOperand(2)
};
SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
}
SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
if (!CopyToReg)
continue;
Chain = DAG.getCopyToReg(
Chain, DL,
CopyToReg->getOperand(1),
SDValue(Result, i - 1),
SDValue());
DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
}
DAG.ReplaceAllUsesOfValueWith(
SDValue(Intr, Intr->getNumValues() - 1),
Intr->getOperand(0));
return Chain;
}
SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
return DAG.getConstant(0, DL, VT);
MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
if (Info->isEntryFunction())
return DAG.getConstant(0, DL, VT);
MachineFrameInfo &MFI = MF.getFrameInfo();
MFI.setReturnAddressIsTaken(true);
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
}
SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG,
SDValue Op,
const SDLoc &DL,
EVT VT) const {
return Op.getValueType().bitsLE(VT) ?
DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
DAG.getNode(ISD::FP_ROUND, DL, VT, Op,
DAG.getTargetConstant(0, DL, MVT::i32));
}
SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType() == MVT::f16 &&
"Do not know how to custom lower FP_ROUND for non-f16 type");
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
if (SrcVT != MVT::f64)
return Op;
SDLoc DL(Op);
SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
}
SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
const MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
bool IsIEEEMode = Info->getMode().IEEE;
if (IsIEEEMode)
return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
if (VT == MVT::v4f16 || VT == MVT::v8f16 || VT == MVT::v16f16)
return splitBinaryVectorOp(Op, DAG);
return Op;
}
SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc SL(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
bool isSigned = Op.getOpcode() == ISD::SMULO;
if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) {
const APInt &C = RHSC->getAPIntValue();
if (C.isPowerOf2()) {
bool UseArithShift = isSigned && !C.isMinSignedValue();
SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32);
SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt);
SDValue Overflow = DAG.getSetCC(SL, MVT::i1,
DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL,
SL, VT, Result, ShiftAmt),
LHS, ISD::SETNE);
return DAG.getMergeValues({ Result, Overflow }, SL);
}
}
SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS);
SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU,
SL, VT, LHS, RHS);
SDValue Sign = isSigned
? DAG.getNode(ISD::SRA, SL, VT, Result,
DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32))
: DAG.getConstant(0, SL, VT);
SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE);
return DAG.getMergeValues({ Result, Overflow }, SL);
}
SDValue SITargetLowering::lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const {
if (Op->isDivergent()) {
return Op;
}
if (Subtarget->hasSMulHi()) {
return SDValue();
}
return Op;
}
SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
if (!Subtarget->isTrapHandlerEnabled() ||
Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
return lowerTrapEndpgm(Op, DAG);
if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
return lowerTrapHsaQueuePtr(Op, DAG);
case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
case ELF::ELFABIVERSION_AMDGPU_HSA_V5:
return Subtarget->supportsGetDoorbellID() ?
lowerTrapHsa(Op, DAG) : lowerTrapHsaQueuePtr(Op, DAG);
}
}
llvm_unreachable("Unknown trap handler");
}
SDValue SITargetLowering::lowerTrapEndpgm(
SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue Chain = Op.getOperand(0);
return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
}
SDValue SITargetLowering::loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT,
const SDLoc &DL, Align Alignment, ImplicitParameter Param) const {
MachineFunction &MF = DAG.getMachineFunction();
uint64_t Offset = getImplicitParameterOffset(MF, Param);
SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), Offset);
MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
return DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, PtrInfo, Alignment,
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
}
SDValue SITargetLowering::lowerTrapHsaQueuePtr(
SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue Chain = Op.getOperand(0);
SDValue QueuePtr;
if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) {
QueuePtr =
loadImplicitKernelArgument(DAG, MVT::i64, SL, Align(8), QUEUE_PTR);
} else {
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Register UserSGPR = Info->getQueuePtrUserSGPR();
if (UserSGPR == AMDGPU::NoRegister) {
QueuePtr = DAG.getConstant(0, SL, MVT::i64);
} else {
QueuePtr = CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, UserSGPR,
MVT::i64);
}
}
SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
QueuePtr, SDValue());
uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap);
SDValue Ops[] = {
ToReg,
DAG.getTargetConstant(TrapID, SL, MVT::i16),
SGPR01,
ToReg.getValue(1)
};
return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
}
SDValue SITargetLowering::lowerTrapHsa(
SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue Chain = Op.getOperand(0);
uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap);
SDValue Ops[] = {
Chain,
DAG.getTargetConstant(TrapID, SL, MVT::i16)
};
return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
}
SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue Chain = Op.getOperand(0);
MachineFunction &MF = DAG.getMachineFunction();
if (!Subtarget->isTrapHandlerEnabled() ||
Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) {
DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
"debugtrap handler not supported",
Op.getDebugLoc(),
DS_Warning);
LLVMContext &Ctx = MF.getFunction().getContext();
Ctx.diagnose(NoTrap);
return Chain;
}
uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSADebugTrap);
SDValue Ops[] = {
Chain,
DAG.getTargetConstant(TrapID, SL, MVT::i16)
};
return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
}
SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
SelectionDAG &DAG) const {
if (Subtarget->hasApertureRegs()) {
unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
unsigned Encoding =
AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
SDValue ApertureReg = SDValue(
DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
}
if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) {
ImplicitParameter Param =
(AS == AMDGPUAS::LOCAL_ADDRESS) ? SHARED_BASE : PRIVATE_BASE;
return loadImplicitKernelArgument(DAG, MVT::i32, DL, Align(4), Param);
}
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Register UserSGPR = Info->getQueuePtrUserSGPR();
if (UserSGPR == AMDGPU::NoRegister) {
return DAG.getUNDEF(MVT::i32);
}
SDValue QueuePtr = CreateLiveInRegister(
DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
SDValue Ptr =
DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset));
MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
commonAlignment(Align(64), StructOffset),
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
}
static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
const AMDGPUTargetMachine &TM, unsigned AddrSpace) {
if (isa<FrameIndexSDNode>(Val) || isa<GlobalAddressSDNode>(Val) ||
isa<BasicBlockSDNode>(Val))
return true;
if (auto *ConstVal = dyn_cast<ConstantSDNode>(Val))
return ConstVal->getSExtValue() != TM.getNullPointerValue(AddrSpace);
return false;
}
SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
SDValue Src = ASC->getOperand(0);
SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
unsigned SrcAS = ASC->getSrcAddressSpace();
const AMDGPUTargetMachine &TM =
static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
unsigned DestAS = ASC->getDestAddressSpace();
if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
if (isKnownNonNull(Src, DAG, TM, SrcAS))
return Ptr;
unsigned NullVal = TM.getNullPointerValue(DestAS);
SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
return DAG.getNode(ISD::SELECT, SL, MVT::i32, NonNull, Ptr,
SegmentNullPtr);
}
}
if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
SDValue CvtPtr =
DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr);
if (isKnownNonNull(Src, DAG, TM, SrcAS))
return CvtPtr;
unsigned NullVal = TM.getNullPointerValue(SrcAS);
SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
SDValue NonNull
= DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, CvtPtr,
FlatNullPtr);
}
}
if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Op.getValueType() == MVT::i64) {
const SIMachineFunctionInfo *Info =
DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
SDValue Hi = DAG.getConstant(Info->get32BitAddressHighBits(), SL, MVT::i32);
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Hi);
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Src.getValueType() == MVT::i64)
return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
const MachineFunction &MF = DAG.getMachineFunction();
DiagnosticInfoUnsupported InvalidAddrSpaceCast(
MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
DAG.getContext()->diagnose(InvalidAddrSpaceCast);
return DAG.getUNDEF(ASC->getValueType(0));
}
SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDValue Vec = Op.getOperand(0);
SDValue Ins = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
EVT VecVT = Vec.getValueType();
EVT InsVT = Ins.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned InsNumElts = InsVT.getVectorNumElements();
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
SDLoc SL(Op);
for (unsigned I = 0; I != InsNumElts; ++I) {
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
DAG.getConstant(I, SL, MVT::i32));
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
DAG.getConstant(IdxVal + I, SL, MVT::i32));
}
return Vec;
}
SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
SDValue Vec = Op.getOperand(0);
SDValue InsVal = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned VecSize = VecVT.getSizeInBits();
unsigned EltSize = EltVT.getSizeInBits();
SDLoc SL(Op);
unsigned NumElts = VecVT.getVectorNumElements();
auto KIdx = dyn_cast<ConstantSDNode>(Idx);
if (NumElts == 4 && EltSize == 16 && KIdx) {
SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
DAG.getConstant(0, SL, MVT::i32));
SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
DAG.getConstant(1, SL, MVT::i32));
SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
unsigned Idx = KIdx->getZExtValue();
bool InsertLo = Idx < 2;
SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
InsertLo ? LoVec : HiVec,
DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
SDValue Concat = InsertLo ?
DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
}
if (isa<ConstantSDNode>(Idx))
return SDValue();
assert(VecSize <= 64 && "Expected target vector size to be <= 64 bits");
MVT IntVT = MVT::getIntegerVT(VecSize);
assert(isPowerOf2_32(EltSize));
SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
DAG.getConstant(0xffff, SL, IntVT),
ScaledIdx);
SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
DAG.getSplatBuildVector(VecVT, SL, InsVal));
SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
DAG.getNOT(SL, BFM, IntVT), BCVec);
SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
}
SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
EVT ResultVT = Op.getValueType();
SDValue Vec = Op.getOperand(0);
SDValue Idx = Op.getOperand(1);
EVT VecVT = Vec.getValueType();
unsigned VecSize = VecVT.getSizeInBits();
EVT EltVT = VecVT.getVectorElementType();
DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
return Combined;
if (VecSize == 128 || VecSize == 256) {
SDValue Lo, Hi;
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
if (VecSize == 128) {
SDValue V2 = DAG.getBitcast(MVT::v2i64, Vec);
Lo = DAG.getBitcast(LoVT,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, V2,
DAG.getConstant(0, SL, MVT::i32)));
Hi = DAG.getBitcast(HiVT,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, V2,
DAG.getConstant(1, SL, MVT::i32)));
} else {
assert(VecSize == 256);
SDValue V2 = DAG.getBitcast(MVT::v4i64, Vec);
SDValue Parts[4];
for (unsigned P = 0; P < 4; ++P) {
Parts[P] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, V2,
DAG.getConstant(P, SL, MVT::i32));
}
Lo = DAG.getBitcast(LoVT, DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i64,
Parts[0], Parts[1]));
Hi = DAG.getBitcast(HiVT, DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i64,
Parts[2], Parts[3]));
}
EVT IdxVT = Idx.getValueType();
unsigned NElem = VecVT.getVectorNumElements();
assert(isPowerOf2_32(NElem));
SDValue IdxMask = DAG.getConstant(NElem / 2 - 1, SL, IdxVT);
SDValue NewIdx = DAG.getNode(ISD::AND, SL, IdxVT, Idx, IdxMask);
SDValue Half = DAG.getSelectCC(SL, Idx, IdxMask, Hi, Lo, ISD::SETUGT);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Half, NewIdx);
}
assert(VecSize <= 64);
MVT IntVT = MVT::getIntegerVT(VecSize);
SDValue VecBC = peekThroughBitcasts(Vec);
if (VecBC.getOpcode() == ISD::SCALAR_TO_VECTOR) {
SDValue Src = VecBC.getOperand(0);
Src = DAG.getBitcast(Src.getValueType().changeTypeToInteger(), Src);
Vec = DAG.getAnyExtOrTrunc(Src, SL, IntVT);
}
unsigned EltSize = EltVT.getSizeInBits();
assert(isPowerOf2_32(EltSize));
SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
if (ResultVT == MVT::f16) {
SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
}
return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
}
static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
assert(Elt % 2 == 0);
return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
}
SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
EVT ResultVT = Op.getValueType();
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
EVT EltVT = PackVT.getVectorElementType();
int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
SmallVector<SDValue, 4> Pieces;
for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
if (elementPairIsContiguous(SVN->getMask(), I)) {
const int Idx = SVN->getMaskElt(I);
int VecIdx = Idx < SrcNumElts ? 0 : 1;
int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
PackVT, SVN->getOperand(VecIdx),
DAG.getConstant(EltIdx, SL, MVT::i32));
Pieces.push_back(SubVec);
} else {
const int Idx0 = SVN->getMaskElt(I);
const int Idx1 = SVN->getMaskElt(I + 1);
int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
SDValue Vec0 = SVN->getOperand(VecIdx0);
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
SDValue Vec1 = SVN->getOperand(VecIdx1);
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
}
}
return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
}
SDValue SITargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDValue SVal = Op.getOperand(0);
EVT ResultVT = Op.getValueType();
EVT SValVT = SVal.getValueType();
SDValue UndefVal = DAG.getUNDEF(SValVT);
SDLoc SL(Op);
SmallVector<SDValue, 8> VElts;
VElts.push_back(SVal);
for (int I = 1, E = ResultVT.getVectorNumElements(); I < E; ++I)
VElts.push_back(UndefVal);
return DAG.getBuildVector(ResultVT, SL, VElts);
}
SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
EVT VT = Op.getValueType();
if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
VT == MVT::v8i16 || VT == MVT::v8f16) {
EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
VT.getVectorNumElements() / 2);
MVT HalfIntVT = MVT::getIntegerVT(HalfVT.getSizeInBits());
SmallVector<SDValue, 4> LoOps, HiOps;
for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I != E; ++I) {
LoOps.push_back(Op.getOperand(I));
HiOps.push_back(Op.getOperand(I + E));
}
SDValue Lo = DAG.getBuildVector(HalfVT, SL, LoOps);
SDValue Hi = DAG.getBuildVector(HalfVT, SL, HiOps);
SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Lo);
SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Hi);
SDValue Blend = DAG.getBuildVector(MVT::getVectorVT(HalfIntVT, 2), SL,
{ CastLo, CastHi });
return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
}
if (VT == MVT::v16i16 || VT == MVT::v16f16) {
EVT QuarterVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
VT.getVectorNumElements() / 4);
MVT QuarterIntVT = MVT::getIntegerVT(QuarterVT.getSizeInBits());
SmallVector<SDValue, 4> Parts[4];
for (unsigned I = 0, E = VT.getVectorNumElements() / 4; I != E; ++I) {
for (unsigned P = 0; P < 4; ++P)
Parts[P].push_back(Op.getOperand(I + P * E));
}
SDValue Casts[4];
for (unsigned P = 0; P < 4; ++P) {
SDValue Vec = DAG.getBuildVector(QuarterVT, SL, Parts[P]);
Casts[P] = DAG.getNode(ISD::BITCAST, SL, QuarterIntVT, Vec);
}
SDValue Blend =
DAG.getBuildVector(MVT::getVectorVT(QuarterIntVT, 4), SL, Casts);
return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
}
assert(VT == MVT::v2f16 || VT == MVT::v2i16);
assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
SDValue Lo = Op.getOperand(0);
SDValue Hi = Op.getOperand(1);
if (Hi.isUndef()) {
Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
}
Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
DAG.getConstant(16, SL, MVT::i32));
if (Lo.isUndef())
return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
return DAG.getNode(ISD::BITCAST, SL, VT, Or);
}
bool
SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
!shouldEmitGOTReloc(GA->getGlobal());
}
static SDValue
buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
const SDLoc &DL, int64_t Offset, EVT PtrVT,
unsigned GAFlags = SIInstrInfo::MO_NONE) {
assert(isInt<32>(Offset + 4) && "32-bit offset is expected!");
SDValue PtrLo =
DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags);
SDValue PtrHi;
if (GAFlags == SIInstrInfo::MO_NONE) {
PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
} else {
PtrHi =
DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1);
}
return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
}
SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
SDLoc DL(GSD);
EVT PtrVT = Op.getValueType();
const GlobalValue *GV = GSD->getGlobal();
if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
shouldUseLDSConstAddress(GV)) ||
GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
GV->hasExternalLinkage()) {
Type *Ty = GV->getValueType();
if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) {
assert(PtrVT == MVT::i32 && "32-bit pointer is expected.");
MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV));
return SDValue(
DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0);
}
}
return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
}
if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
SIInstrInfo::MO_ABS32_LO);
return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
}
if (shouldEmitFixup(GV))
return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
else if (shouldEmitPCReloc(GV))
return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
SIInstrInfo::MO_REL32);
SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
SIInstrInfo::MO_GOTPCREL32);
Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
const DataLayout &DataLayout = DAG.getDataLayout();
Align Alignment = DataLayout.getABITypeAlign(PtrTy);
MachinePointerInfo PtrInfo
= MachinePointerInfo::getGOT(DAG.getMachineFunction());
return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment,
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
}
SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
const SDLoc &DL, SDValue V) const {
SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
V, Chain);
return SDValue(M0, 0);
}
SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
SDValue Op,
MVT VT,
unsigned Offset) const {
SDLoc SL(Op);
SDValue Param = lowerKernargMemParameter(
DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false);
return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
DAG.getValueType(VT));
}
static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) {
DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
"non-hsa intrinsic with hsa target",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
}
static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) {
DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
"intrinsic not supported on subtarget",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
}
static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
ArrayRef<SDValue> Elts) {
assert(!Elts.empty());
MVT Type;
unsigned NumElts = Elts.size();
if (NumElts <= 8) {
Type = MVT::getVectorVT(MVT::f32, NumElts);
} else {
assert(Elts.size() <= 16);
Type = MVT::v16f32;
NumElts = 16;
}
SmallVector<SDValue, 16> VecElts(NumElts);
for (unsigned i = 0; i < Elts.size(); ++i) {
SDValue Elt = Elts[i];
if (Elt.getValueType() != MVT::f32)
Elt = DAG.getBitcast(MVT::f32, Elt);
VecElts[i] = Elt;
}
for (unsigned i = Elts.size(); i < NumElts; ++i)
VecElts[i] = DAG.getUNDEF(MVT::f32);
if (NumElts == 1)
return VecElts[0];
return DAG.getBuildVector(Type, DL, VecElts);
}
static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT,
SDValue Src, int ExtraElts) {
EVT SrcVT = Src.getValueType();
SmallVector<SDValue, 8> Elts;
if (SrcVT.isVector())
DAG.ExtractVectorElements(Src, Elts);
else
Elts.push_back(Src);
SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType());
while (ExtraElts--)
Elts.push_back(Undef);
return DAG.getBuildVector(CastVT, DL, Elts);
}
static SDValue constructRetValue(SelectionDAG &DAG,
MachineSDNode *Result,
ArrayRef<EVT> ResultTypes,
bool IsTexFail, bool Unpacked, bool IsD16,
int DMaskPop, int NumVDataDwords,
const SDLoc &DL) {
EVT ReqRetVT = ResultTypes[0];
int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ?
ReqRetNumElts : (ReqRetNumElts + 1) / 2;
int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ?
DMaskPop : (DMaskPop + 1) / 2;
MVT DataDwordVT = NumDataDwords == 1 ?
MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords);
MVT MaskPopVT = MaskPopDwords == 1 ?
MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords);
SDValue Data(Result, 0);
SDValue TexFail;
if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) {
SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32);
if (MaskPopVT.isVector()) {
Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT,
SDValue(Result, 0), ZeroIdx);
} else {
Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT,
SDValue(Result, 0), ZeroIdx);
}
}
if (DataDwordVT.isVector())
Data = padEltsToUndef(DAG, DL, DataDwordVT, Data,
NumDataDwords - MaskPopDwords);
if (IsD16)
Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked);
EVT LegalReqRetVT = ReqRetVT;
if (!ReqRetVT.isVector()) {
if (!Data.getValueType().isInteger())
Data = DAG.getNode(ISD::BITCAST, DL,
Data.getValueType().changeTypeToInteger(), Data);
Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data);
} else {
if ((ReqRetVT.getVectorNumElements() % 2) == 1 &&
ReqRetVT.getVectorElementType().getSizeInBits() == 16) {
LegalReqRetVT =
EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(),
ReqRetVT.getVectorNumElements() + 1);
}
}
Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data);
if (IsTexFail) {
TexFail =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0),
DAG.getConstant(MaskPopDwords, DL, MVT::i32));
return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL);
}
if (Result->getNumValues() == 1)
return Data;
return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL);
}
static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
SDValue *LWE, bool &IsTexFail) {
auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
uint64_t Value = TexFailCtrlConst->getZExtValue();
if (Value) {
IsTexFail = true;
}
SDLoc DL(TexFailCtrlConst);
*TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
Value &= ~(uint64_t)0x1;
*LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
Value &= ~(uint64_t)0x2;
return Value == 0;
}
static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op,
MVT PackVectorVT,
SmallVectorImpl<SDValue> &PackedAddrs,
unsigned DimIdx, unsigned EndIdx,
unsigned NumGradients) {
SDLoc DL(Op);
for (unsigned I = DimIdx; I < EndIdx; I++) {
SDValue Addr = Op.getOperand(I);
if (((I + 1) >= EndIdx) ||
((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 ||
I == DimIdx + NumGradients - 1))) {
if (Addr.getValueType() != MVT::i16)
Addr = DAG.getBitcast(MVT::i16, Addr);
Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr);
} else {
Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)});
I++;
}
Addr = DAG.getBitcast(MVT::f32, Addr);
PackedAddrs.push_back(Addr);
}
}
SDValue SITargetLowering::lowerImage(SDValue Op,
const AMDGPU::ImageDimIntrinsicInfo *Intr,
SelectionDAG &DAG, bool WithChain) const {
SDLoc DL(Op);
MachineFunction &MF = DAG.getMachineFunction();
const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
unsigned IntrOpcode = Intr->BaseOpcode;
bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget);
bool IsGFX11Plus = AMDGPU::isGFX11Plus(*Subtarget);
SmallVector<EVT, 3> ResultTypes(Op->values());
SmallVector<EVT, 3> OrigResultTypes(Op->values());
bool IsD16 = false;
bool IsG16 = false;
bool IsA16 = false;
SDValue VData;
int NumVDataDwords;
bool AdjustRetType = false;
const unsigned ArgOffset = WithChain ? 2 : 1;
unsigned DMask;
unsigned DMaskLanes = 0;
if (BaseOpcode->Atomic) {
VData = Op.getOperand(2);
bool Is64Bit = VData.getValueType() == MVT::i64;
if (BaseOpcode->AtomicX2) {
SDValue VData2 = Op.getOperand(3);
VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
{VData, VData2});
if (Is64Bit)
VData = DAG.getBitcast(MVT::v4i32, VData);
ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
DMask = Is64Bit ? 0xf : 0x3;
NumVDataDwords = Is64Bit ? 4 : 2;
} else {
DMask = Is64Bit ? 0x3 : 0x1;
NumVDataDwords = Is64Bit ? 2 : 1;
}
} else {
auto *DMaskConst =
cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex));
DMask = DMaskConst->getZExtValue();
DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
if (BaseOpcode->Store) {
VData = Op.getOperand(2);
MVT StoreVT = VData.getSimpleValueType();
if (StoreVT.getScalarType() == MVT::f16) {
if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
return Op;
IsD16 = true;
VData = handleD16VData(VData, DAG, true);
}
NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
} else {
MVT LoadVT = ResultTypes[0].getSimpleVT();
if (LoadVT.getScalarType() == MVT::f16) {
if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
return Op;
IsD16 = true;
}
if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
(!LoadVT.isVector() && DMaskLanes > 1))
return Op;
if (IsD16 && !Subtarget->hasUnpackedD16VMem() &&
!(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug()))
NumVDataDwords = (DMaskLanes + 1) / 2;
else
NumVDataDwords = DMaskLanes;
AdjustRetType = true;
}
}
unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd;
SmallVector<SDValue, 4> VAddrs;
MVT VAddrVT =
Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType();
MVT VAddrScalarVT = VAddrVT.getScalarType();
MVT GradPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16;
VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType();
VAddrScalarVT = VAddrVT.getScalarType();
MVT AddrPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16;
for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) {
if (IsA16 && (Op.getOperand(ArgOffset + I).getValueType() == MVT::f16)) {
assert(I == Intr->BiasIndex && "Got unexpected 16-bit extra argument");
SDValue Bias = DAG.getBuildVector(
MVT::v2f16, DL,
{Op.getOperand(ArgOffset + I), DAG.getUNDEF(MVT::f16)});
VAddrs.push_back(Bias);
} else {
assert((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) &&
"Bias needs to be converted to 16 bit in A16 mode");
VAddrs.push_back(Op.getOperand(ArgOffset + I));
}
}
if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) {
LLVM_DEBUG(
dbgs() << "Failed to lower image intrinsic: 16 bit addresses "
"require 16 bit args for both gradients and addresses");
return Op;
}
if (IsA16) {
if (!ST->hasA16()) {
LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not "
"support 16 bit addresses\n");
return Op;
}
}
if (BaseOpcode->Gradients && IsG16 && ST->hasG16()) {
const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
IntrOpcode = G16MappingInfo->G16; }
if (IsG16) {
packImage16bitOpsToDwords(DAG, Op, GradPackVectorVT, VAddrs,
ArgOffset + Intr->GradientStart,
ArgOffset + Intr->CoordStart, Intr->NumGradients);
} else {
for (unsigned I = ArgOffset + Intr->GradientStart;
I < ArgOffset + Intr->CoordStart; I++)
VAddrs.push_back(Op.getOperand(I));
}
if (IsA16) {
packImage16bitOpsToDwords(DAG, Op, AddrPackVectorVT, VAddrs,
ArgOffset + Intr->CoordStart, VAddrEnd,
0 );
} else {
for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++)
VAddrs.push_back(Op.getOperand(I));
}
bool UseNSA = ST->hasFeature(AMDGPU::FeatureNSAEncoding) &&
VAddrs.size() >= 3 &&
VAddrs.size() <= (unsigned)ST->getNSAMaxSize();
SDValue VAddr;
if (!UseNSA)
VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
SDValue Unorm;
if (!BaseOpcode->Sampler) {
Unorm = True;
} else {
auto UnormConst =
cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex));
Unorm = UnormConst->getZExtValue() ? True : False;
}
SDValue TFE;
SDValue LWE;
SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex);
bool IsTexFail = false;
if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
return Op;
if (IsTexFail) {
if (!DMaskLanes) {
DMask = 0x1;
DMaskLanes = 1;
NumVDataDwords = 1;
}
NumVDataDwords += 1;
AdjustRetType = true;
}
if (AdjustRetType) {
if (DMaskLanes == 0 && !BaseOpcode->Store) {
SDValue Undef = DAG.getUNDEF(Op.getValueType());
if (isa<MemSDNode>(Op))
return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
return Undef;
}
EVT NewVT = NumVDataDwords > 1 ?
EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords)
: MVT::i32;
ResultTypes[0] = NewVT;
if (ResultTypes.size() == 3) {
ResultTypes.erase(&ResultTypes[1]);
}
}
unsigned CPol = cast<ConstantSDNode>(
Op.getOperand(ArgOffset + Intr->CachePolicyIndex))->getZExtValue();
if (BaseOpcode->Atomic)
CPol |= AMDGPU::CPol::GLC; if (CPol & ~AMDGPU::CPol::ALL)
return Op;
SmallVector<SDValue, 26> Ops;
if (BaseOpcode->Store || BaseOpcode->Atomic)
Ops.push_back(VData); if (UseNSA)
append_range(Ops, VAddrs);
else
Ops.push_back(VAddr);
Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex));
if (BaseOpcode->Sampler)
Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex));
Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
if (IsGFX10Plus)
Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
Ops.push_back(Unorm);
Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32));
Ops.push_back(IsA16 && ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
if (IsGFX10Plus)
Ops.push_back(IsA16 ? True : False);
if (!Subtarget->hasGFX90AInsts()) {
Ops.push_back(TFE); } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) {
report_fatal_error("TFE is not supported on this GPU");
}
Ops.push_back(LWE); if (!IsGFX10Plus)
Ops.push_back(DimInfo->DA ? True : False);
if (BaseOpcode->HasD16)
Ops.push_back(IsD16 ? True : False);
if (isa<MemSDNode>(Op))
Ops.push_back(Op.getOperand(0));
int NumVAddrDwords =
UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
int Opcode = -1;
if (IsGFX11Plus) {
Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
UseNSA ? AMDGPU::MIMGEncGfx11NSA
: AMDGPU::MIMGEncGfx11Default,
NumVDataDwords, NumVAddrDwords);
} else if (IsGFX10Plus) {
Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
UseNSA ? AMDGPU::MIMGEncGfx10NSA
: AMDGPU::MIMGEncGfx10Default,
NumVDataDwords, NumVAddrDwords);
} else {
if (Subtarget->hasGFX90AInsts()) {
Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
NumVDataDwords, NumVAddrDwords);
if (Opcode == -1)
report_fatal_error(
"requested image instruction is not supported on this GPU");
}
if (Opcode == -1 &&
Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
NumVDataDwords, NumVAddrDwords);
if (Opcode == -1)
Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
NumVDataDwords, NumVAddrDwords);
}
assert(Opcode != -1);
MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
MachineMemOperand *MemRef = MemOp->getMemOperand();
DAG.setNodeMemRefs(NewNode, {MemRef});
}
if (BaseOpcode->AtomicX2) {
SmallVector<SDValue, 1> Elt;
DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
}
if (BaseOpcode->Store)
return SDValue(NewNode, 0);
return constructRetValue(DAG, NewNode,
OrigResultTypes, IsTexFail,
Subtarget->hasUnpackedD16VMem(), IsD16,
DMaskLanes, NumVDataDwords, DL);
}
SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
SDValue Offset, SDValue CachePolicy,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const DataLayout &DataLayout = DAG.getDataLayout();
Align Alignment =
DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext()));
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo(),
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant,
VT.getStoreSize(), Alignment);
if (!Offset->isDivergent()) {
SDValue Ops[] = {
Rsrc,
Offset, CachePolicy
};
if (VT.isVector() && VT.getVectorNumElements() == 3) {
EVT WidenedVT =
EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
auto WidenedOp = DAG.getMemIntrinsicNode(
AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT,
MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize()));
auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp,
DAG.getVectorIdxConstant(0, DL));
return Subvector;
}
return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
DAG.getVTList(VT), Ops, VT, MMO);
}
SmallVector<SDValue, 4> Loads;
unsigned NumLoads = 1;
MVT LoadVT = VT.getSimpleVT();
unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
assert((LoadVT.getScalarType() == MVT::i32 ||
LoadVT.getScalarType() == MVT::f32));
if (NumElts == 8 || NumElts == 16) {
NumLoads = NumElts / 4;
LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4);
}
SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
SDValue Ops[] = {
DAG.getEntryNode(), Rsrc, DAG.getConstant(0, DL, MVT::i32), {}, {}, {}, CachePolicy, DAG.getTargetConstant(0, DL, MVT::i1), };
setBufferOffsets(Offset, DAG, &Ops[3],
NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
for (unsigned i = 0; i < NumLoads; ++i) {
Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
LoadVT, MMO, DAG));
}
if (NumElts == 8 || NumElts == 16)
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
return Loads[0];
}
SDValue SITargetLowering::lowerWorkitemID(SelectionDAG &DAG, SDValue Op,
unsigned Dim,
const ArgDescriptor &Arg) const {
SDLoc SL(Op);
MachineFunction &MF = DAG.getMachineFunction();
unsigned MaxID = Subtarget->getMaxWorkitemID(MF.getFunction(), Dim);
if (MaxID == 0)
return DAG.getConstant(0, SL, MVT::i32);
SDValue Val = loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
SDLoc(DAG.getEntryNode()), Arg);
if (Arg.isMasked())
return Val;
EVT SmallVT =
EVT::getIntegerVT(*DAG.getContext(), 32 - countLeadingZeros(MaxID));
return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Val,
DAG.getValueType(SmallVT));
}
SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
auto MFI = MF.getInfo<SIMachineFunctionInfo>();
EVT VT = Op.getValueType();
SDLoc DL(Op);
unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (IntrinsicID) {
case Intrinsic::amdgcn_implicit_buffer_ptr: {
if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
return emitNonHSAIntrinsicError(DAG, DL, VT);
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
}
case Intrinsic::amdgcn_dispatch_ptr:
case Intrinsic::amdgcn_queue_ptr: {
if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
DiagnosticInfoUnsupported BadIntrin(
MF.getFunction(), "unsupported hsa intrinsic without hsa target",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
}
auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
return getPreloadedValue(DAG, *MFI, VT, RegID);
}
case Intrinsic::amdgcn_implicitarg_ptr: {
if (MFI->isEntryFunction())
return getImplicitArgPtr(DAG, DL);
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
}
case Intrinsic::amdgcn_kernarg_segment_ptr: {
if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) {
return DAG.getConstant(0, DL, VT);
}
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
}
case Intrinsic::amdgcn_dispatch_id: {
return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
}
case Intrinsic::amdgcn_rcp:
return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_rsq:
return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_rsq_legacy:
if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
return emitRemovedIntrinsicError(DAG, DL, VT);
return SDValue();
case Intrinsic::amdgcn_rcp_legacy:
if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
return emitRemovedIntrinsicError(DAG, DL, VT);
return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_rsq_clamp: {
if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Type *Type = VT.getTypeForEVT(*DAG.getContext());
APFloat Max = APFloat::getLargest(Type->getFltSemantics());
APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
DAG.getConstantFP(Max, DL, VT));
return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
DAG.getConstantFP(Min, DL, VT));
}
case Intrinsic::r600_read_ngroups_x:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::NGROUPS_X, Align(4),
false);
case Intrinsic::r600_read_ngroups_y:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::NGROUPS_Y, Align(4),
false);
case Intrinsic::r600_read_ngroups_z:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::NGROUPS_Z, Align(4),
false);
case Intrinsic::r600_read_global_size_x:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::GLOBAL_SIZE_X,
Align(4), false);
case Intrinsic::r600_read_global_size_y:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::GLOBAL_SIZE_Y,
Align(4), false);
case Intrinsic::r600_read_global_size_z:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::GLOBAL_SIZE_Z,
Align(4), false);
case Intrinsic::r600_read_local_size_x:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerImplicitZextParam(DAG, Op, MVT::i16,
SI::KernelInputOffsets::LOCAL_SIZE_X);
case Intrinsic::r600_read_local_size_y:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerImplicitZextParam(DAG, Op, MVT::i16,
SI::KernelInputOffsets::LOCAL_SIZE_Y);
case Intrinsic::r600_read_local_size_z:
if (Subtarget->isAmdHsaOS())
return emitNonHSAIntrinsicError(DAG, DL, VT);
return lowerImplicitZextParam(DAG, Op, MVT::i16,
SI::KernelInputOffsets::LOCAL_SIZE_Z);
case Intrinsic::amdgcn_workgroup_id_x:
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
case Intrinsic::amdgcn_workgroup_id_y:
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
case Intrinsic::amdgcn_workgroup_id_z:
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
case Intrinsic::amdgcn_lds_kernel_id: {
if (MFI->isEntryFunction())
return getLDSKernelId(DAG, DL);
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::LDS_KERNEL_ID);
}
case Intrinsic::amdgcn_workitem_id_x:
return lowerWorkitemID(DAG, Op, 0, MFI->getArgInfo().WorkItemIDX);
case Intrinsic::amdgcn_workitem_id_y:
return lowerWorkitemID(DAG, Op, 1, MFI->getArgInfo().WorkItemIDY);
case Intrinsic::amdgcn_workitem_id_z:
return lowerWorkitemID(DAG, Op, 2, MFI->getArgInfo().WorkItemIDZ);
case Intrinsic::amdgcn_wavefrontsize:
return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
SDLoc(Op), MVT::i32);
case Intrinsic::amdgcn_s_buffer_load: {
unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
if (CPol & ~AMDGPU::CPol::ALL)
return Op;
return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
DAG);
}
case Intrinsic::amdgcn_fdiv_fast:
return lowerFDIV_FAST(Op, DAG);
case Intrinsic::amdgcn_sin:
return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_cos:
return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_mul_u24:
return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
case Intrinsic::amdgcn_mul_i24:
return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
case Intrinsic::amdgcn_log_clamp: {
if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
return SDValue();
return emitRemovedIntrinsicError(DAG, DL, VT);
}
case Intrinsic::amdgcn_ldexp:
return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
Op.getOperand(1), Op.getOperand(2));
case Intrinsic::amdgcn_fract:
return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_class:
return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
Op.getOperand(1), Op.getOperand(2));
case Intrinsic::amdgcn_div_fmas:
return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
Op.getOperand(4));
case Intrinsic::amdgcn_div_fixup:
return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_div_scale: {
const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
SDValue Numerator = Op.getOperand(1);
SDValue Denominator = Op.getOperand(2);
SDValue Src0 = Param->isAllOnes() ? Numerator : Denominator;
return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
Denominator, Numerator);
}
case Intrinsic::amdgcn_icmp: {
if (Op.getOperand(1).getValueType() == MVT::i1 &&
Op.getConstantOperandVal(2) == 0 &&
Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
return Op;
return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
}
case Intrinsic::amdgcn_fcmp: {
return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
}
case Intrinsic::amdgcn_ballot:
return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG);
case Intrinsic::amdgcn_fmed3:
return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_fdot2:
return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
Op.getOperand(4));
case Intrinsic::amdgcn_fmul_legacy:
return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
Op.getOperand(1), Op.getOperand(2));
case Intrinsic::amdgcn_sffbh:
return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
case Intrinsic::amdgcn_sbfe:
return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_ubfe:
return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_cvt_pkrtz:
case Intrinsic::amdgcn_cvt_pknorm_i16:
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16: {
EVT VT = Op.getValueType();
unsigned Opcode;
if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
Opcode = AMDGPUISD::CVT_PK_I16_I32;
else
Opcode = AMDGPUISD::CVT_PK_U16_U32;
if (isTypeLegal(VT))
return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Op.getOperand(1), Op.getOperand(2));
return DAG.getNode(ISD::BITCAST, DL, VT, Node);
}
case Intrinsic::amdgcn_fmad_ftz:
return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_if_break:
return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
Op->getOperand(1), Op->getOperand(2)), 0);
case Intrinsic::amdgcn_groupstaticsize: {
Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
return Op;
const Module *M = MF.getFunction().getParent();
const GlobalValue *GV =
M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
SIInstrInfo::MO_ABS32_LO);
return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
}
case Intrinsic::amdgcn_is_shared:
case Intrinsic::amdgcn_is_private: {
SDLoc SL(Op);
unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ?
AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
SDValue Aperture = getSegmentAperture(AS, SL, DAG);
SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32,
Op.getOperand(1));
SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec,
DAG.getConstant(1, SL, MVT::i32));
return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
}
case Intrinsic::amdgcn_perm:
return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_reloc_constant: {
Module *M = const_cast<Module *>(MF.getFunction().getParent());
const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD();
auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
auto RelocSymbol = cast<GlobalVariable>(
M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0,
SIInstrInfo::MO_ABS32_LO);
return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
}
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
return lowerImage(Op, ImageDimIntr, DAG, false);
return Op;
}
}
static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset,
SDValue SOffset, SDValue Offset,
SDValue VIndex = SDValue()) {
if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) ||
!isa<ConstantSDNode>(Offset)) {
MMO->setValue((Value *)nullptr);
return;
}
if (VIndex && (!isa<ConstantSDNode>(VIndex) ||
!cast<ConstantSDNode>(VIndex)->isZero())) {
MMO->setValue((Value *)nullptr);
return;
}
MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() +
cast<ConstantSDNode>(SOffset)->getSExtValue() +
cast<ConstantSDNode>(Offset)->getSExtValue());
}
SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op,
SelectionDAG &DAG,
unsigned NewOpcode) const {
SDLoc DL(Op);
SDValue VData = Op.getOperand(2);
auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
SDValue Ops[] = {
Op.getOperand(0), VData, Op.getOperand(3), DAG.getConstant(0, DL, MVT::i32), Offsets.first, Op.getOperand(5), Offsets.second, Op.getOperand(6), DAG.getTargetConstant(0, DL, MVT::i1), };
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]);
EVT MemVT = VData.getValueType();
return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT,
M->getMemOperand());
}
static unsigned getIdxEn(SDValue VIndex) {
if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex))
return VIndexC->getZExtValue() != 0;
return 1;
}
SDValue
SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
unsigned NewOpcode) const {
SDLoc DL(Op);
SDValue VData = Op.getOperand(2);
auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
SDValue Ops[] = {
Op.getOperand(0), VData, Op.getOperand(3), Op.getOperand(4), Offsets.first, Op.getOperand(6), Offsets.second, Op.getOperand(7), DAG.getTargetConstant(1, DL, MVT::i1), };
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
EVT MemVT = VData.getValueType();
return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT,
M->getMemOperand());
}
SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
SDLoc DL(Op);
switch (IntrID) {
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap: {
MemSDNode *M = cast<MemSDNode>(Op);
SDValue Chain = M->getOperand(0);
SDValue M0 = M->getOperand(2);
SDValue Value = M->getOperand(3);
unsigned IndexOperand = M->getConstantOperandVal(7);
unsigned WaveRelease = M->getConstantOperandVal(8);
unsigned WaveDone = M->getConstantOperandVal(9);
unsigned OrderedCountIndex = IndexOperand & 0x3f;
IndexOperand &= ~0x3f;
unsigned CountDw = 0;
if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
CountDw = (IndexOperand >> 24) & 0xf;
IndexOperand &= ~(0xf << 24);
if (CountDw < 1 || CountDw > 4) {
report_fatal_error(
"ds_ordered_count: dword count must be between 1 and 4");
}
}
if (IndexOperand)
report_fatal_error("ds_ordered_count: bad index operand");
if (WaveDone && !WaveRelease)
report_fatal_error("ds_ordered_count: wave_done requires wave_release");
unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
unsigned ShaderType =
SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction());
unsigned Offset0 = OrderedCountIndex << 2;
unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
Offset1 |= (CountDw - 1) << 6;
if (Subtarget->getGeneration() < AMDGPUSubtarget::GFX11)
Offset1 |= ShaderType << 2;
unsigned Offset = Offset0 | (Offset1 << 8);
SDValue Ops[] = {
Chain,
Value,
DAG.getTargetConstant(Offset, DL, MVT::i16),
copyToM0(DAG, Chain, DL, M0).getValue(1), };
return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
M->getVTList(), Ops, M->getMemoryVT(),
M->getMemOperand());
}
case Intrinsic::amdgcn_ds_fadd: {
MemSDNode *M = cast<MemSDNode>(Op);
unsigned Opc;
switch (IntrID) {
case Intrinsic::amdgcn_ds_fadd:
Opc = ISD::ATOMIC_LOAD_FADD;
break;
}
return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
M->getOperand(0), M->getOperand(2), M->getOperand(3),
M->getMemOperand());
}
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax: {
MemSDNode *M = cast<MemSDNode>(Op);
unsigned Opc;
switch (IntrID) {
case Intrinsic::amdgcn_atomic_inc:
Opc = AMDGPUISD::ATOMIC_INC;
break;
case Intrinsic::amdgcn_atomic_dec:
Opc = AMDGPUISD::ATOMIC_DEC;
break;
case Intrinsic::amdgcn_ds_fmin:
Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
break;
case Intrinsic::amdgcn_ds_fmax:
Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
break;
default:
llvm_unreachable("Unknown intrinsic!");
}
SDValue Ops[] = {
M->getOperand(0), M->getOperand(2), M->getOperand(3) };
return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_buffer_load:
case Intrinsic::amdgcn_buffer_load_format: {
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
unsigned IdxEn = getIdxEn(Op.getOperand(3));
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), SDValue(), SDValue(), SDValue(), DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), DAG.getTargetConstant(IdxEn, DL, MVT::i1), };
setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
EVT VT = Op.getValueType();
EVT IntVT = VT.changeTypeToInteger();
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]);
EVT LoadVT = Op.getValueType();
if (LoadVT.getScalarType() == MVT::f16)
return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
M, DAG, Ops);
if (LoadVT.getScalarType() == MVT::i8 ||
LoadVT.getScalarType() == MVT::i16)
return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
M->getMemOperand(), DAG);
}
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format: {
const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format;
auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), DAG.getConstant(0, DL, MVT::i32), Offsets.first, Op.getOperand(4), Offsets.second, Op.getOperand(5), DAG.getTargetConstant(0, DL, MVT::i1), };
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]);
return lowerIntrinsicLoad(M, IsFormat, DAG, Ops);
}
case Intrinsic::amdgcn_struct_buffer_load:
case Intrinsic::amdgcn_struct_buffer_load_format: {
const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format;
auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Offsets.first, Op.getOperand(5), Offsets.second, Op.getOperand(6), DAG.getTargetConstant(1, DL, MVT::i1), };
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]);
return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
}
case Intrinsic::amdgcn_tbuffer_load: {
MemSDNode *M = cast<MemSDNode>(Op);
EVT LoadVT = Op.getValueType();
unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
unsigned IdxEn = getIdxEn(Op.getOperand(3));
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), Op.getOperand(5), Op.getOperand(6), DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), DAG.getTargetConstant(IdxEn, DL, MVT::i1) };
if (LoadVT.getScalarType() == MVT::f16)
return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
M, DAG, Ops);
return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
DAG);
}
case Intrinsic::amdgcn_raw_tbuffer_load: {
MemSDNode *M = cast<MemSDNode>(Op);
EVT LoadVT = Op.getValueType();
auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), DAG.getConstant(0, DL, MVT::i32), Offsets.first, Op.getOperand(4), Offsets.second, Op.getOperand(5), Op.getOperand(6), DAG.getTargetConstant(0, DL, MVT::i1), };
if (LoadVT.getScalarType() == MVT::f16)
return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
M, DAG, Ops);
return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
DAG);
}
case Intrinsic::amdgcn_struct_tbuffer_load: {
MemSDNode *M = cast<MemSDNode>(Op);
EVT LoadVT = Op.getValueType();
auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Offsets.first, Op.getOperand(5), Offsets.second, Op.getOperand(6), Op.getOperand(7), DAG.getTargetConstant(1, DL, MVT::i1), };
if (LoadVT.getScalarType() == MVT::f16)
return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
M, DAG, Ops);
return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
DAG);
}
case Intrinsic::amdgcn_buffer_atomic_swap:
case Intrinsic::amdgcn_buffer_atomic_add:
case Intrinsic::amdgcn_buffer_atomic_sub:
case Intrinsic::amdgcn_buffer_atomic_csub:
case Intrinsic::amdgcn_buffer_atomic_smin:
case Intrinsic::amdgcn_buffer_atomic_umin:
case Intrinsic::amdgcn_buffer_atomic_smax:
case Intrinsic::amdgcn_buffer_atomic_umax:
case Intrinsic::amdgcn_buffer_atomic_and:
case Intrinsic::amdgcn_buffer_atomic_or:
case Intrinsic::amdgcn_buffer_atomic_xor:
case Intrinsic::amdgcn_buffer_atomic_fadd: {
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
unsigned IdxEn = getIdxEn(Op.getOperand(4));
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), SDValue(), SDValue(), SDValue(), DAG.getTargetConstant(Slc << 1, DL, MVT::i32), DAG.getTargetConstant(IdxEn, DL, MVT::i1), };
setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
unsigned Opcode = 0;
switch (IntrID) {
case Intrinsic::amdgcn_buffer_atomic_swap:
Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
break;
case Intrinsic::amdgcn_buffer_atomic_add:
Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
break;
case Intrinsic::amdgcn_buffer_atomic_sub:
Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
break;
case Intrinsic::amdgcn_buffer_atomic_csub:
Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB;
break;
case Intrinsic::amdgcn_buffer_atomic_smin:
Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
break;
case Intrinsic::amdgcn_buffer_atomic_umin:
Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
break;
case Intrinsic::amdgcn_buffer_atomic_smax:
Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
break;
case Intrinsic::amdgcn_buffer_atomic_umax:
Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
break;
case Intrinsic::amdgcn_buffer_atomic_and:
Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
break;
case Intrinsic::amdgcn_buffer_atomic_or:
Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
break;
case Intrinsic::amdgcn_buffer_atomic_xor:
Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
break;
case Intrinsic::amdgcn_buffer_atomic_fadd:
if (!Op.getValue(0).use_empty() && !hasAtomicFaddRtnForTy(Op)) {
DiagnosticInfoUnsupported
NoFpRet(DAG.getMachineFunction().getFunction(),
"return versions of fp atomics not supported",
DL.getDebugLoc(), DS_Error);
DAG.getContext()->diagnose(NoFpRet);
return SDValue();
}
Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD;
break;
default:
llvm_unreachable("unhandled atomic opcode");
}
return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
M->getMemOperand());
}
case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD);
case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD);
case Intrinsic::amdgcn_raw_buffer_atomic_fmin:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN);
case Intrinsic::amdgcn_struct_buffer_atomic_fmin:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN);
case Intrinsic::amdgcn_raw_buffer_atomic_fmax:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX);
case Intrinsic::amdgcn_struct_buffer_atomic_fmax:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX);
case Intrinsic::amdgcn_raw_buffer_atomic_swap:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP);
case Intrinsic::amdgcn_raw_buffer_atomic_add:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD);
case Intrinsic::amdgcn_raw_buffer_atomic_sub:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB);
case Intrinsic::amdgcn_raw_buffer_atomic_smin:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN);
case Intrinsic::amdgcn_raw_buffer_atomic_umin:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN);
case Intrinsic::amdgcn_raw_buffer_atomic_smax:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX);
case Intrinsic::amdgcn_raw_buffer_atomic_umax:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX);
case Intrinsic::amdgcn_raw_buffer_atomic_and:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND);
case Intrinsic::amdgcn_raw_buffer_atomic_or:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR);
case Intrinsic::amdgcn_raw_buffer_atomic_xor:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR);
case Intrinsic::amdgcn_raw_buffer_atomic_inc:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC);
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
case Intrinsic::amdgcn_struct_buffer_atomic_swap:
return lowerStructBufferAtomicIntrin(Op, DAG,
AMDGPUISD::BUFFER_ATOMIC_SWAP);
case Intrinsic::amdgcn_struct_buffer_atomic_add:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD);
case Intrinsic::amdgcn_struct_buffer_atomic_sub:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB);
case Intrinsic::amdgcn_struct_buffer_atomic_smin:
return lowerStructBufferAtomicIntrin(Op, DAG,
AMDGPUISD::BUFFER_ATOMIC_SMIN);
case Intrinsic::amdgcn_struct_buffer_atomic_umin:
return lowerStructBufferAtomicIntrin(Op, DAG,
AMDGPUISD::BUFFER_ATOMIC_UMIN);
case Intrinsic::amdgcn_struct_buffer_atomic_smax:
return lowerStructBufferAtomicIntrin(Op, DAG,
AMDGPUISD::BUFFER_ATOMIC_SMAX);
case Intrinsic::amdgcn_struct_buffer_atomic_umax:
return lowerStructBufferAtomicIntrin(Op, DAG,
AMDGPUISD::BUFFER_ATOMIC_UMAX);
case Intrinsic::amdgcn_struct_buffer_atomic_and:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND);
case Intrinsic::amdgcn_struct_buffer_atomic_or:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR);
case Intrinsic::amdgcn_struct_buffer_atomic_xor:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR);
case Intrinsic::amdgcn_struct_buffer_atomic_inc:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC);
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
unsigned IdxEn = getIdxEn(Op.getOperand(5));
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), Op.getOperand(5), SDValue(), SDValue(), SDValue(), DAG.getTargetConstant(Slc << 1, DL, MVT::i32), DAG.getTargetConstant(IdxEn, DL, MVT::i1), };
setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]);
return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Op->getVTList(), Ops, VT, M->getMemOperand());
}
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), DAG.getConstant(0, DL, MVT::i32), Offsets.first, Op.getOperand(6), Offsets.second, Op.getOperand(7), DAG.getTargetConstant(0, DL, MVT::i1), };
EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]);
return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Op->getVTList(), Ops, VT, M->getMemOperand());
}
case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
SDValue Ops[] = {
Op.getOperand(0), Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), Op.getOperand(5), Offsets.first, Op.getOperand(7), Offsets.second, Op.getOperand(8), DAG.getTargetConstant(1, DL, MVT::i1), };
EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]);
return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Op->getVTList(), Ops, VT, M->getMemOperand());
}
case Intrinsic::amdgcn_image_bvh_intersect_ray: {
MemSDNode *M = cast<MemSDNode>(Op);
SDValue NodePtr = M->getOperand(2);
SDValue RayExtent = M->getOperand(3);
SDValue RayOrigin = M->getOperand(4);
SDValue RayDir = M->getOperand(5);
SDValue RayInvDir = M->getOperand(6);
SDValue TDescr = M->getOperand(7);
assert(NodePtr.getValueType() == MVT::i32 ||
NodePtr.getValueType() == MVT::i64);
assert(RayDir.getValueType() == MVT::v3f16 ||
RayDir.getValueType() == MVT::v3f32);
if (!Subtarget->hasGFX10_AEncoding()) {
emitRemovedIntrinsicError(DAG, DL, Op.getValueType());
return SDValue();
}
const bool IsGFX11Plus = AMDGPU::isGFX11Plus(*Subtarget);
const bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16;
const bool Is64 = NodePtr.getValueType() == MVT::i64;
const unsigned NumVDataDwords = 4;
const unsigned NumVAddrDwords = IsA16 ? (Is64 ? 9 : 8) : (Is64 ? 12 : 11);
const unsigned NumVAddrs = IsGFX11Plus ? (IsA16 ? 4 : 5) : NumVAddrDwords;
const bool UseNSA =
Subtarget->hasNSAEncoding() && NumVAddrs <= Subtarget->getNSAMaxSize();
const unsigned BaseOpcodes[2][2] = {
{AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16},
{AMDGPU::IMAGE_BVH64_INTERSECT_RAY,
AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}};
int Opcode;
if (UseNSA) {
Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
IsGFX11Plus ? AMDGPU::MIMGEncGfx11NSA
: AMDGPU::MIMGEncGfx10NSA,
NumVDataDwords, NumVAddrDwords);
} else {
Opcode =
AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
IsGFX11Plus ? AMDGPU::MIMGEncGfx11Default
: AMDGPU::MIMGEncGfx10Default,
NumVDataDwords, PowerOf2Ceil(NumVAddrDwords));
}
assert(Opcode != -1);
SmallVector<SDValue, 16> Ops;
auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) {
SmallVector<SDValue, 3> Lanes;
DAG.ExtractVectorElements(Op, Lanes, 0, 3);
if (Lanes[0].getValueSizeInBits() == 32) {
for (unsigned I = 0; I < 3; ++I)
Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I]));
} else {
if (IsAligned) {
Ops.push_back(
DAG.getBitcast(MVT::i32,
DAG.getBuildVector(MVT::v2f16, DL,
{ Lanes[0], Lanes[1] })));
Ops.push_back(Lanes[2]);
} else {
SDValue Elt0 = Ops.pop_back_val();
Ops.push_back(
DAG.getBitcast(MVT::i32,
DAG.getBuildVector(MVT::v2f16, DL,
{ Elt0, Lanes[0] })));
Ops.push_back(
DAG.getBitcast(MVT::i32,
DAG.getBuildVector(MVT::v2f16, DL,
{ Lanes[1], Lanes[2] })));
}
}
};
if (UseNSA && IsGFX11Plus) {
Ops.push_back(NodePtr);
Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent));
Ops.push_back(RayOrigin);
if (IsA16) {
SmallVector<SDValue, 3> DirLanes, InvDirLanes, MergedLanes;
DAG.ExtractVectorElements(RayDir, DirLanes, 0, 3);
DAG.ExtractVectorElements(RayInvDir, InvDirLanes, 0, 3);
for (unsigned I = 0; I < 3; ++I) {
MergedLanes.push_back(DAG.getBitcast(
MVT::i32, DAG.getBuildVector(MVT::v2f16, DL,
{DirLanes[I], InvDirLanes[I]})));
}
Ops.push_back(DAG.getBuildVector(MVT::v3i32, DL, MergedLanes));
} else {
Ops.push_back(RayDir);
Ops.push_back(RayInvDir);
}
} else {
if (Is64)
DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0,
2);
else
Ops.push_back(NodePtr);
Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent));
packLanes(RayOrigin, true);
packLanes(RayDir, true);
packLanes(RayInvDir, false);
}
if (!UseNSA) {
if (NumVAddrDwords > 8) {
SDValue Undef = DAG.getUNDEF(MVT::i32);
Ops.append(16 - Ops.size(), Undef);
}
assert(Ops.size() == 8 || Ops.size() == 16);
SDValue MergedOps = DAG.getBuildVector(
Ops.size() == 16 ? MVT::v16i32 : MVT::v8i32, DL, Ops);
Ops.clear();
Ops.push_back(MergedOps);
}
Ops.push_back(TDescr);
if (IsA16)
Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1));
Ops.push_back(M->getChain());
auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops);
MachineMemOperand *MemRef = M->getMemOperand();
DAG.setNodeMemRefs(NewNode, {MemRef});
return SDValue(NewNode, 0);
}
case Intrinsic::amdgcn_global_atomic_fadd:
if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) {
DiagnosticInfoUnsupported
NoFpRet(DAG.getMachineFunction().getFunction(),
"return versions of fp atomics not supported",
DL.getDebugLoc(), DS_Error);
DAG.getContext()->diagnose(NoFpRet);
return SDValue();
}
LLVM_FALLTHROUGH;
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_flat_atomic_fadd:
case Intrinsic::amdgcn_flat_atomic_fmin:
case Intrinsic::amdgcn_flat_atomic_fmax: {
MemSDNode *M = cast<MemSDNode>(Op);
SDValue Ops[] = {
M->getOperand(0), M->getOperand(2), M->getOperand(3) };
unsigned Opcode = 0;
switch (IntrID) {
case Intrinsic::amdgcn_global_atomic_fadd:
case Intrinsic::amdgcn_flat_atomic_fadd: {
EVT VT = Op.getOperand(3).getValueType();
return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT,
DAG.getVTList(VT, MVT::Other), Ops,
M->getMemOperand());
}
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_flat_atomic_fmin: {
Opcode = AMDGPUISD::ATOMIC_LOAD_FMIN;
break;
}
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_flat_atomic_fmax: {
Opcode = AMDGPUISD::ATOMIC_LOAD_FMAX;
break;
}
default:
llvm_unreachable("unhandled atomic opcode");
}
return DAG.getMemIntrinsicNode(Opcode, SDLoc(Op),
M->getVTList(), Ops, M->getMemoryVT(),
M->getMemOperand());
}
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrID))
return lowerImage(Op, ImageDimIntr, DAG, true);
return SDValue();
}
}
SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
SDVTList VTList,
ArrayRef<SDValue> Ops, EVT MemVT,
MachineMemOperand *MMO,
SelectionDAG &DAG) const {
EVT VT = VTList.VTs[0];
EVT WidenedVT = VT;
EVT WidenedMemVT = MemVT;
if (!Subtarget->hasDwordx3LoadStores() &&
(WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
WidenedVT = EVT::getVectorVT(*DAG.getContext(),
WidenedVT.getVectorElementType(), 4);
WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
WidenedMemVT.getVectorElementType(), 4);
MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
}
assert(VTList.NumVTs == 2);
SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
WidenedMemVT, MMO);
if (WidenedVT != VT) {
auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
DAG.getVectorIdxConstant(0, DL));
NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
}
return NewOp;
}
SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG,
bool ImageStore) const {
EVT StoreVT = VData.getValueType();
if (!StoreVT.isVector())
return VData;
SDLoc DL(VData);
unsigned NumElements = StoreVT.getVectorNumElements();
if (Subtarget->hasUnpackedD16VMem()) {
EVT IntStoreVT = StoreVT.changeTypeToInteger();
SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
EVT EquivStoreVT =
EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements);
SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
return DAG.UnrollVectorOp(ZExt.getNode());
}
if (ImageStore && Subtarget->hasImageStoreD16Bug()) {
EVT IntStoreVT = StoreVT.changeTypeToInteger();
SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
SmallVector<SDValue, 4> Elts;
DAG.ExtractVectorElements(IntVData, Elts);
SmallVector<SDValue, 4> PackedElts;
for (unsigned I = 0; I < Elts.size() / 2; I += 1) {
SDValue Pair =
DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]});
SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair);
PackedElts.push_back(IntPair);
}
if ((NumElements % 2) == 1) {
unsigned I = Elts.size() / 2;
SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL,
{Elts[I * 2], DAG.getUNDEF(MVT::i16)});
SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair);
PackedElts.push_back(IntPair);
}
PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32));
EVT VecVT =
EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size());
return DAG.getBuildVector(VecVT, DL, PackedElts);
}
if (NumElements == 3) {
EVT IntStoreVT =
EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits());
SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
EVT WidenedStoreVT = EVT::getVectorVT(
*DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1);
EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(),
WidenedStoreVT.getStoreSizeInBits());
SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData);
return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt);
}
assert(isTypeLegal(StoreVT));
return VData;
}
SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
MachineFunction &MF = DAG.getMachineFunction();
switch (IntrinsicID) {
case Intrinsic::amdgcn_exp_compr: {
if (!Subtarget->hasCompressedExport()) {
DiagnosticInfoUnsupported BadIntrin(
DAG.getMachineFunction().getFunction(),
"intrinsic not supported on subtarget", DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
}
SDValue Src0 = Op.getOperand(4);
SDValue Src1 = Op.getOperand(5);
if (isTypeLegal(Src0.getValueType()))
return SDValue();
const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
SDValue Undef = DAG.getUNDEF(MVT::f32);
const SDValue Ops[] = {
Op.getOperand(2), DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), Undef, Undef, Op.getOperand(7), DAG.getTargetConstant(1, DL, MVT::i1), Op.getOperand(3), Op.getOperand(0) };
unsigned Opc = Done->isZero() ? AMDGPU::EXP : AMDGPU::EXP_DONE;
return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0);
}
case Intrinsic::amdgcn_s_barrier: {
if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
if (WGSize <= ST.getWavefrontSize())
return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
Op.getOperand(0)), 0);
}
return SDValue();
};
case Intrinsic::amdgcn_tbuffer_store: {
SDValue VData = Op.getOperand(2);
bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
if (IsD16)
VData = handleD16VData(VData, DAG);
unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
unsigned IdxEn = getIdxEn(Op.getOperand(4));
SDValue Ops[] = {
Chain,
VData, Op.getOperand(3), Op.getOperand(4), Op.getOperand(5), Op.getOperand(6), Op.getOperand(7), DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), DAG.getTargetConstant(IdxEn, DL, MVT::i1), };
unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
AMDGPUISD::TBUFFER_STORE_FORMAT;
MemSDNode *M = cast<MemSDNode>(Op);
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_struct_tbuffer_store: {
SDValue VData = Op.getOperand(2);
bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
if (IsD16)
VData = handleD16VData(VData, DAG);
auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
SDValue Ops[] = {
Chain,
VData, Op.getOperand(3), Op.getOperand(4), Offsets.first, Op.getOperand(6), Offsets.second, Op.getOperand(7), Op.getOperand(8), DAG.getTargetConstant(1, DL, MVT::i1), };
unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
AMDGPUISD::TBUFFER_STORE_FORMAT;
MemSDNode *M = cast<MemSDNode>(Op);
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_raw_tbuffer_store: {
SDValue VData = Op.getOperand(2);
bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
if (IsD16)
VData = handleD16VData(VData, DAG);
auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
SDValue Ops[] = {
Chain,
VData, Op.getOperand(3), DAG.getConstant(0, DL, MVT::i32), Offsets.first, Op.getOperand(5), Offsets.second, Op.getOperand(6), Op.getOperand(7), DAG.getTargetConstant(0, DL, MVT::i1), };
unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
AMDGPUISD::TBUFFER_STORE_FORMAT;
MemSDNode *M = cast<MemSDNode>(Op);
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_buffer_store:
case Intrinsic::amdgcn_buffer_store_format: {
SDValue VData = Op.getOperand(2);
bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
if (IsD16)
VData = handleD16VData(VData, DAG);
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
unsigned IdxEn = getIdxEn(Op.getOperand(4));
SDValue Ops[] = {
Chain,
VData,
Op.getOperand(3), Op.getOperand(4), SDValue(), SDValue(), SDValue(), DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), DAG.getTargetConstant(IdxEn, DL, MVT::i1), };
setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
MemSDNode *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
EVT VDataType = VData.getValueType().getScalarType();
if (VDataType == MVT::i8 || VDataType == MVT::i16)
return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_raw_buffer_store:
case Intrinsic::amdgcn_raw_buffer_store_format: {
const bool IsFormat =
IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format;
SDValue VData = Op.getOperand(2);
EVT VDataVT = VData.getValueType();
EVT EltType = VDataVT.getScalarType();
bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
if (IsD16) {
VData = handleD16VData(VData, DAG);
VDataVT = VData.getValueType();
}
if (!isTypeLegal(VDataVT)) {
VData =
DAG.getNode(ISD::BITCAST, DL,
getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
}
auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
SDValue Ops[] = {
Chain,
VData,
Op.getOperand(3), DAG.getConstant(0, DL, MVT::i32), Offsets.first, Op.getOperand(5), Offsets.second, Op.getOperand(6), DAG.getTargetConstant(0, DL, MVT::i1), };
unsigned Opc =
IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
MemSDNode *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]);
if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M);
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_struct_buffer_store:
case Intrinsic::amdgcn_struct_buffer_store_format: {
const bool IsFormat =
IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format;
SDValue VData = Op.getOperand(2);
EVT VDataVT = VData.getValueType();
EVT EltType = VDataVT.getScalarType();
bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
if (IsD16) {
VData = handleD16VData(VData, DAG);
VDataVT = VData.getValueType();
}
if (!isTypeLegal(VDataVT)) {
VData =
DAG.getNode(ISD::BITCAST, DL,
getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
}
auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
SDValue Ops[] = {
Chain,
VData,
Op.getOperand(3), Op.getOperand(4), Offsets.first, Op.getOperand(6), Offsets.second, Op.getOperand(7), DAG.getTargetConstant(1, DL, MVT::i1), };
unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
MemSDNode *M = cast<MemSDNode>(Op);
updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
EVT VDataType = VData.getValueType().getScalarType();
if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
M->getMemoryVT(), M->getMemOperand());
}
case Intrinsic::amdgcn_raw_buffer_load_lds:
case Intrinsic::amdgcn_struct_buffer_load_lds: {
unsigned Opc;
bool HasVIndex = IntrinsicID == Intrinsic::amdgcn_struct_buffer_load_lds;
unsigned OpOffset = HasVIndex ? 1 : 0;
SDValue VOffset = Op.getOperand(5 + OpOffset);
auto CVOffset = dyn_cast<ConstantSDNode>(VOffset);
bool HasVOffset = !CVOffset || !CVOffset->isZero();
unsigned Size = Op->getConstantOperandVal(4);
switch (Size) {
default:
return SDValue();
case 1:
Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
: AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
: HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
: AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
break;
case 2:
Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
: AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
: HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
: AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
break;
case 4:
Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
: AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
: HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
: AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
break;
}
SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3));
SmallVector<SDValue, 8> Ops;
if (HasVIndex && HasVOffset)
Ops.push_back(DAG.getBuildVector(MVT::v2i32, DL,
{ Op.getOperand(5), VOffset }));
else if (HasVIndex)
Ops.push_back(Op.getOperand(5));
else if (HasVOffset)
Ops.push_back(VOffset);
Ops.push_back(Op.getOperand(2)); Ops.push_back(Op.getOperand(6 + OpOffset)); Ops.push_back(Op.getOperand(7 + OpOffset)); unsigned Aux = Op.getConstantOperandVal(8 + OpOffset);
Ops.push_back(
DAG.getTargetConstant(Aux & AMDGPU::CPol::ALL, DL, MVT::i8)); Ops.push_back(
DAG.getTargetConstant((Aux >> 3) & 1, DL, MVT::i8)); Ops.push_back(M0Val.getValue(0)); Ops.push_back(M0Val.getValue(1));
auto *M = cast<MemSDNode>(Op);
MachineMemOperand *LoadMMO = M->getMemOperand();
MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
LoadPtrI.Offset = Op->getConstantOperandVal(7 + OpOffset);
MachinePointerInfo StorePtrI = LoadPtrI;
StorePtrI.V = nullptr;
StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
auto F = LoadMMO->getFlags() &
~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
LoadMMO = MF.getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
Size, LoadMMO->getBaseAlign());
MachineMemOperand *StoreMMO =
MF.getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
sizeof(int32_t), LoadMMO->getBaseAlign());
auto Load = DAG.getMachineNode(Opc, DL, M->getVTList(), Ops);
DAG.setNodeMemRefs(Load, {LoadMMO, StoreMMO});
return SDValue(Load, 0);
}
case Intrinsic::amdgcn_global_load_lds: {
unsigned Opc;
unsigned Size = Op->getConstantOperandVal(4);
switch (Size) {
default:
return SDValue();
case 1:
Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
break;
case 2:
Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
break;
case 4:
Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
break;
}
auto *M = cast<MemSDNode>(Op);
SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3));
SmallVector<SDValue, 6> Ops;
SDValue Addr = Op.getOperand(2); SDValue VOffset;
if (Addr->isDivergent() && Addr.getOpcode() == ISD::ADD) {
SDValue LHS = Addr.getOperand(0);
SDValue RHS = Addr.getOperand(1);
if (LHS->isDivergent())
std::swap(LHS, RHS);
if (!LHS->isDivergent() && RHS.getOpcode() == ISD::ZERO_EXTEND &&
RHS.getOperand(0).getValueType() == MVT::i32) {
Addr = LHS;
VOffset = RHS.getOperand(0);
}
}
Ops.push_back(Addr);
if (!Addr->isDivergent()) {
Opc = AMDGPU::getGlobalSaddrOp(Opc);
if (!VOffset)
VOffset = SDValue(
DAG.getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
DAG.getTargetConstant(0, DL, MVT::i32)), 0);
Ops.push_back(VOffset);
}
Ops.push_back(Op.getOperand(5)); Ops.push_back(Op.getOperand(6)); Ops.push_back(M0Val.getValue(0)); Ops.push_back(M0Val.getValue(1));
MachineMemOperand *LoadMMO = M->getMemOperand();
MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
LoadPtrI.Offset = Op->getConstantOperandVal(5);
MachinePointerInfo StorePtrI = LoadPtrI;
LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
auto F = LoadMMO->getFlags() &
~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
LoadMMO = MF.getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
Size, LoadMMO->getBaseAlign());
MachineMemOperand *StoreMMO =
MF.getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
sizeof(int32_t), Align(4));
auto Load = DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops);
DAG.setNodeMemRefs(Load, {LoadMMO, StoreMMO});
return SDValue(Load, 0);
}
case Intrinsic::amdgcn_end_cf:
return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
Op->getOperand(2), Chain), 0);
default: {
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
return lowerImage(Op, ImageDimIntr, DAG, true);
return Op;
}
}
}
std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
SDValue Offset, SelectionDAG &DAG) const {
SDLoc DL(Offset);
const unsigned MaxImm = 4095;
SDValue N0 = Offset;
ConstantSDNode *C1 = nullptr;
if ((C1 = dyn_cast<ConstantSDNode>(N0)))
N0 = SDValue();
else if (DAG.isBaseWithConstantOffset(N0)) {
C1 = cast<ConstantSDNode>(N0.getOperand(1));
N0 = N0.getOperand(0);
}
if (C1) {
unsigned ImmOffset = C1->getZExtValue();
unsigned Overflow = ImmOffset & ~MaxImm;
ImmOffset -= Overflow;
if ((int32_t)Overflow < 0) {
Overflow += ImmOffset;
ImmOffset = 0;
}
C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32));
if (Overflow) {
auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
if (!N0)
N0 = OverflowVal;
else {
SDValue Ops[] = { N0, OverflowVal };
N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
}
}
}
if (!N0)
N0 = DAG.getConstant(0, DL, MVT::i32);
if (!C1)
C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32));
return {N0, SDValue(C1, 0)};
}
void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
SelectionDAG &DAG, SDValue *Offsets,
Align Alignment) const {
SDLoc DL(CombinedOffset);
if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
uint32_t Imm = C->getZExtValue();
uint32_t SOffset, ImmOffset;
if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget,
Alignment)) {
Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
return;
}
}
if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
SDValue N0 = CombinedOffset.getOperand(0);
SDValue N1 = CombinedOffset.getOperand(1);
uint32_t SOffset, ImmOffset;
int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
Subtarget, Alignment)) {
Offsets[0] = N0;
Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
return;
}
}
Offsets[0] = CombinedOffset;
Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32);
}
SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
EVT LoadVT, SDLoc DL,
ArrayRef<SDValue> Ops,
MemSDNode *M) const {
EVT IntVT = LoadVT.changeTypeToInteger();
unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
Ops, IntVT,
M->getMemOperand());
SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad);
LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal);
return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL);
}
SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
EVT VDataType, SDLoc DL,
SDValue Ops[],
MemSDNode *M) const {
if (VDataType == MVT::f16)
Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]);
SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
Ops[1] = BufferStoreExt;
unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
AMDGPUISD::BUFFER_STORE_SHORT;
ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
M->getMemOperand());
}
static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
ISD::LoadExtType ExtType, SDValue Op,
const SDLoc &SL, EVT VT) {
if (VT.bitsLT(Op.getValueType()))
return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
switch (ExtType) {
case ISD::SEXTLOAD:
return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
case ISD::ZEXTLOAD:
return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
case ISD::EXTLOAD:
return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
case ISD::NON_EXTLOAD:
return Op;
}
llvm_unreachable("invalid ext type");
}
SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
if (Ld->getAlign() < Align(4) || Ld->isDivergent())
return SDValue();
unsigned AS = Ld->getAddressSpace();
if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
(AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
return SDValue();
EVT MemVT = Ld->getMemoryVT();
if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
MemVT.getSizeInBits() >= 32)
return SDValue();
SDLoc SL(Ld);
assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
"unexpected vector extload");
SDValue Ptr = Ld->getBasePtr();
SDValue NewLoad = DAG.getLoad(
ISD::UNINDEXED, ISD::NON_EXTLOAD, MVT::i32, SL, Ld->getChain(), Ptr,
Ld->getOffset(), Ld->getPointerInfo(), MVT::i32, Ld->getAlign(),
Ld->getMemOperand()->getFlags(), Ld->getAAInfo(),
nullptr);
EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
if (MemVT.isFloatingPoint()) {
assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
"unexpected fp extload");
TruncVT = MemVT.changeTypeToInteger();
}
SDValue Cvt = NewLoad;
if (Ld->getExtensionType() == ISD::SEXTLOAD) {
Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
DAG.getValueType(TruncVT));
} else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
Ld->getExtensionType() == ISD::NON_EXTLOAD) {
Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
} else {
assert(Ld->getExtensionType() == ISD::EXTLOAD);
}
EVT VT = Ld->getValueType(0);
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
DCI.AddToWorklist(Cvt.getNode());
Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
DCI.AddToWorklist(Cvt.getNode());
Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
}
SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
LoadSDNode *Load = cast<LoadSDNode>(Op);
ISD::LoadExtType ExtType = Load->getExtensionType();
EVT MemVT = Load->getMemoryVT();
if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
return SDValue();
SDValue Chain = Load->getChain();
SDValue BasePtr = Load->getBasePtr();
MachineMemOperand *MMO = Load->getMemOperand();
EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
BasePtr, RealMemVT, MMO);
if (!MemVT.isVector()) {
SDValue Ops[] = {
DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
NewLD.getValue(1)
};
return DAG.getMergeValues(Ops, DL);
}
SmallVector<SDValue, 3> Elts;
for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
DAG.getConstant(I, DL, MVT::i32));
Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
}
SDValue Ops[] = {
DAG.getBuildVector(MemVT, DL, Elts),
NewLD.getValue(1)
};
return DAG.getMergeValues(Ops, DL);
}
if (!MemVT.isVector())
return SDValue();
assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
"Custom lowering for non-i32 vectors hasn't been implemented.");
Align Alignment = Load->getAlign();
unsigned AS = Load->getAddressSpace();
if (Subtarget->hasLDSMisalignedBug() && AS == AMDGPUAS::FLAT_ADDRESS &&
Alignment.value() < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
return SplitVectorLoad(Op, DAG);
}
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
if (AS == AMDGPUAS::FLAT_ADDRESS &&
!Subtarget->hasMultiDwordFlatScratchAddressing())
AS = MFI->hasFlatScratchInit() ?
AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
unsigned NumElements = MemVT.getVectorNumElements();
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
if (!Op->isDivergent() && Alignment >= Align(4) && NumElements < 32) {
if (MemVT.isPow2VectorType())
return SDValue();
return WidenOrSplitVectorLoad(Op, DAG);
}
}
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
AS == AMDGPUAS::GLOBAL_ADDRESS) {
if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) &&
Alignment >= Align(4) && NumElements < 32) {
if (MemVT.isPow2VectorType())
return SDValue();
return WidenOrSplitVectorLoad(Op, DAG);
}
}
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
AS == AMDGPUAS::GLOBAL_ADDRESS ||
AS == AMDGPUAS::FLAT_ADDRESS) {
if (NumElements > 4)
return SplitVectorLoad(Op, DAG);
if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
return WidenOrSplitVectorLoad(Op, DAG);
return SDValue();
}
if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
switch (Subtarget->getMaxPrivateElementSize()) {
case 4: {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
return DAG.getMergeValues(Ops, DL);
}
case 8:
if (NumElements > 2)
return SplitVectorLoad(Op, DAG);
return SDValue();
case 16:
if (NumElements > 4)
return SplitVectorLoad(Op, DAG);
if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
return WidenOrSplitVectorLoad(Op, DAG);
return SDValue();
default:
llvm_unreachable("unsupported private_element_size");
}
} else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
bool Fast = false;
auto Flags = Load->getMemOperand()->getFlags();
if (allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS,
Load->getAlign(), Flags, &Fast) &&
Fast)
return SDValue();
if (MemVT.isVector())
return SplitVectorLoad(Op, DAG);
}
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
MemVT, *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, DL);
}
return SDValue();
}
SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
if (VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256)
return splitTernaryVectorOp(Op, DAG);
assert(VT.getSizeInBits() == 64);
SDLoc DL(Op);
SDValue Cond = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
SDValue One = DAG.getConstant(1, DL, MVT::i32);
SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
return DAG.getNode(ISD::BITCAST, DL, VT, Res);
}
SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
bool AllowInaccurateRcp = Flags.hasApproximateFuncs();
if (!AllowInaccurateRcp)
return SDValue();
if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
if (CLHS->isExactlyValue(1.0)) {
if (RHS.getOpcode() == ISD::FSQRT)
return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
}
if (CLHS->isExactlyValue(-1.0)) {
SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
}
}
SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
}
SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue X = Op.getOperand(0);
SDValue Y = Op.getOperand(1);
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
bool AllowInaccurateDiv = Flags.hasApproximateFuncs() ||
DAG.getTarget().Options.UnsafeFPMath;
if (!AllowInaccurateDiv)
return SDValue();
SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y);
SDValue One = DAG.getConstantFP(1.0, SL, VT);
SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y);
SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One);
R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R);
SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One);
R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R);
SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R);
SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X);
return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret);
}
static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
EVT VT, SDValue A, SDValue B, SDValue GlueChain,
SDNodeFlags Flags) {
if (GlueChain->getNumValues() <= 1) {
return DAG.getNode(Opcode, SL, VT, A, B, Flags);
}
assert(GlueChain->getNumValues() == 3);
SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
switch (Opcode) {
default: llvm_unreachable("no chain equivalent for opcode");
case ISD::FMUL:
Opcode = AMDGPUISD::FMUL_W_CHAIN;
break;
}
return DAG.getNode(Opcode, SL, VTList,
{GlueChain.getValue(1), A, B, GlueChain.getValue(2)},
Flags);
}
static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
EVT VT, SDValue A, SDValue B, SDValue C,
SDValue GlueChain, SDNodeFlags Flags) {
if (GlueChain->getNumValues() <= 1) {
return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags);
}
assert(GlueChain->getNumValues() == 3);
SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
switch (Opcode) {
default: llvm_unreachable("no chain equivalent for opcode");
case ISD::FMA:
Opcode = AMDGPUISD::FMA_W_CHAIN;
break;
}
return DAG.getNode(Opcode, SL, VTList,
{GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)},
Flags);
}
SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
return FastLowered;
SDLoc SL(Op);
SDValue Src0 = Op.getOperand(0);
SDValue Src1 = Op.getOperand(1);
SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
}
SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
SDLoc SL(Op);
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
const APFloat K0Val(BitsToFloat(0x6f800000));
const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
const APFloat K1Val(BitsToFloat(0x2f800000));
const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
EVT SetCCVT =
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
}
static SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG,
const SDLoc &SL, const GCNSubtarget *ST) {
assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE");
int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction())
? FP_DENORM_FLUSH_NONE
: FP_DENORM_FLUSH_IN_FLUSH_OUT;
int Mode = SPDenormMode | (DPDenormModeDefault << 2);
return DAG.getTargetConstant(Mode, SL, MVT::i32);
}
SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
return FastLowered;
SDNodeFlags Flags = Op->getFlags();
Flags.setNoFPExcept(true);
SDLoc SL(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
{RHS, RHS, LHS}, Flags);
SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
{LHS, RHS, LHS}, Flags);
SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
DenominatorScaled, Flags);
SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
DenominatorScaled, Flags);
const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
(4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
(1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32);
const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction());
if (!HasFP32Denormals) {
SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDNode *EnableDenorm;
if (Subtarget->hasDenormModeInst()) {
const SDValue EnableDenormValue =
getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget);
EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs,
DAG.getEntryNode(), EnableDenormValue).getNode();
} else {
const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
SL, MVT::i32);
EnableDenorm =
DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs,
{EnableDenormValue, BitField, DAG.getEntryNode()});
}
SDValue Ops[3] = {
NegDivScale0,
SDValue(EnableDenorm, 0),
SDValue(EnableDenorm, 1)
};
NegDivScale0 = DAG.getMergeValues(Ops, SL);
}
SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
ApproxRcp, One, NegDivScale0, Flags);
SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
ApproxRcp, Fma0, Flags);
SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
Fma1, Fma1, Flags);
SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
NumeratorScaled, Mul, Flags);
SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32,
Fma2, Fma1, Mul, Fma2, Flags);
SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
NumeratorScaled, Fma3, Flags);
if (!HasFP32Denormals) {
SDNode *DisableDenorm;
if (Subtarget->hasDenormModeInst()) {
const SDValue DisableDenormValue =
getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget);
DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other,
Fma4.getValue(1), DisableDenormValue,
Fma4.getValue(2)).getNode();
} else {
const SDValue DisableDenormValue =
DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
DisableDenorm = DAG.getMachineNode(
AMDGPU::S_SETREG_B32, SL, MVT::Other,
{DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)});
}
SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
SDValue(DisableDenorm, 0), DAG.getRoot());
DAG.setRoot(OutputChain);
}
SDValue Scale = NumeratorScaled.getValue(1);
SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
{Fma4, Fma1, Fma3, Scale}, Flags);
return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags);
}
SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG))
return FastLowered;
SDLoc SL(Op);
SDValue X = Op.getOperand(0);
SDValue Y = Op.getOperand(1);
const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
NegDivScale0, Mul, DivScale1);
SDValue Scale;
if (!Subtarget->hasUsableDivScaleConditionOutput()) {
const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
SDValue Scale0Hi
= DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
SDValue Scale1Hi
= DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
} else {
Scale = DivScale1.getValue(1);
}
SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
Fma4, Fma3, Mul, Scale);
return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
}
SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
if (VT == MVT::f32)
return LowerFDIV32(Op, DAG);
if (VT == MVT::f64)
return LowerFDIV64(Op, DAG);
if (VT == MVT::f16)
return LowerFDIV16(Op, DAG);
llvm_unreachable("Unexpected type for fdiv");
}
SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
StoreSDNode *Store = cast<StoreSDNode>(Op);
EVT VT = Store->getMemoryVT();
if (VT == MVT::i1) {
return DAG.getTruncStore(Store->getChain(), DL,
DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
Store->getBasePtr(), MVT::i1, Store->getMemOperand());
}
assert(VT.isVector() &&
Store->getValue().getValueType().getScalarType() == MVT::i32);
unsigned AS = Store->getAddressSpace();
if (Subtarget->hasLDSMisalignedBug() &&
AS == AMDGPUAS::FLAT_ADDRESS &&
Store->getAlign().value() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
return SplitVectorStore(Op, DAG);
}
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
if (AS == AMDGPUAS::FLAT_ADDRESS &&
!Subtarget->hasMultiDwordFlatScratchAddressing())
AS = MFI->hasFlatScratchInit() ?
AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
unsigned NumElements = VT.getVectorNumElements();
if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
AS == AMDGPUAS::FLAT_ADDRESS) {
if (NumElements > 4)
return SplitVectorStore(Op, DAG);
if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
return SplitVectorStore(Op, DAG);
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
VT, *Store->getMemOperand()))
return expandUnalignedStore(Store, DAG);
return SDValue();
} else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
switch (Subtarget->getMaxPrivateElementSize()) {
case 4:
return scalarizeVectorStore(Store, DAG);
case 8:
if (NumElements > 2)
return SplitVectorStore(Op, DAG);
return SDValue();
case 16:
if (NumElements > 4 ||
(NumElements == 3 && !Subtarget->enableFlatScratch()))
return SplitVectorStore(Op, DAG);
return SDValue();
default:
llvm_unreachable("unsupported private_element_size");
}
} else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
bool Fast = false;
auto Flags = Store->getMemOperand()->getFlags();
if (allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS,
Store->getAlign(), Flags, &Fast) &&
Fast)
return SDValue();
if (VT.isVector())
return SplitVectorStore(Op, DAG);
return expandUnalignedStore(Store, DAG);
}
return SDValue();
}
SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Arg = Op.getOperand(0);
SDValue TrigVal;
auto Flags = Op->getFlags();
SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT);
if (Subtarget->hasTrigReducedRange()) {
SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags);
TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags);
} else {
TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags);
}
switch (Op.getOpcode()) {
case ISD::FCOS:
return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags);
case ISD::FSIN:
return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags);
default:
llvm_unreachable("Wrong trig opcode");
}
}
SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
assert(AtomicNode->isCompareAndSwap());
unsigned AS = AtomicNode->getAddressSpace();
if (!AMDGPU::isFlatGlobalAddrSpace(AS))
return Op;
SDLoc DL(Op);
SDValue ChainIn = Op.getOperand(0);
SDValue Addr = Op.getOperand(1);
SDValue Old = Op.getOperand(2);
SDValue New = Op.getOperand(3);
EVT VT = Op.getValueType();
MVT SimpleVT = VT.getSimpleVT();
MVT VecType = MVT::getVectorVT(SimpleVT, 2);
SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
SDValue Ops[] = { ChainIn, Addr, NewOld };
return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
Ops, VT, AtomicNode->getMemOperand());
}
SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
EVT ScalarVT = VT.getScalarType();
if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16)
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(N);
SDValue Src = N->getOperand(0);
EVT SrcVT = Src.getValueType();
if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src);
DCI.AddToWorklist(Cvt.getNode());
if (ScalarVT != MVT::f32) {
Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt,
DAG.getTargetConstant(0, DL, MVT::i32));
}
return Cvt;
}
}
return SDValue();
}
SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
unsigned AddrSpace,
EVT MemVT,
DAGCombinerInfo &DCI) const {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
N0->hasOneUse())
return SDValue();
const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
if (!CN1)
return SDValue();
const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!CAdd)
return SDValue();
APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
AddrMode AM;
AM.HasBaseReg = true;
AM.BaseOffs = Offset.getSExtValue();
if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
EVT VT = N->getValueType(0);
SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
SDValue COffset = DAG.getConstant(Offset, SL, VT);
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
(N0.getOpcode() == ISD::OR ||
N0->getFlags().hasNoUnsignedWrap()));
return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
}
static unsigned getBasePtrIndex(const MemSDNode *N) {
switch (N->getOpcode()) {
case ISD::STORE:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
return 2;
default:
return 1;
}
}
SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
unsigned PtrIdx = getBasePtrIndex(N);
SDValue Ptr = N->getOperand(PtrIdx);
if (Ptr.getOpcode() == ISD::SHL) {
SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
N->getMemoryVT(), DCI);
if (NewPtr) {
SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
NewOps[PtrIdx] = NewPtr;
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
}
return SDValue();
}
static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
(Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
(Opc == ISD::XOR && Val == 0);
}
SDValue SITargetLowering::splitBinaryBitConstantOp(
DAGCombinerInfo &DCI,
const SDLoc &SL,
unsigned Opc, SDValue LHS,
const ConstantSDNode *CRHS) const {
uint64_t Val = CRHS->getZExtValue();
uint32_t ValLo = Lo_32(Val);
uint32_t ValHi = Hi_32(Val);
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
bitOpWithConstantIsReducible(Opc, ValHi)) ||
(CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
}
return SDValue();
}
static bool isBoolSGPR(SDValue V) {
if (V.getValueType() != MVT::i1)
return false;
switch (V.getOpcode()) {
default:
break;
case ISD::SETCC:
case AMDGPUISD::FP_CLASS:
return true;
case ISD::AND:
case ISD::OR:
case ISD::XOR:
return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1));
}
return false;
}
static uint32_t getConstantPermuteMask(uint32_t C) {
uint32_t ZeroByteMask = 0;
if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
uint32_t NonZeroByteMask = ~ZeroByteMask; if ((NonZeroByteMask & C) != NonZeroByteMask)
return 0; return C;
}
static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
assert(V.getValueSizeInBits() == 32);
if (V.getNumOperands() != 2)
return ~0;
ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
if (!N1)
return ~0;
uint32_t C = N1->getZExtValue();
switch (V.getOpcode()) {
default:
break;
case ISD::AND:
if (uint32_t ConstMask = getConstantPermuteMask(C)) {
return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
}
break;
case ISD::OR:
if (uint32_t ConstMask = getConstantPermuteMask(C)) {
return (0x03020100 & ~ConstMask) | ConstMask;
}
break;
case ISD::SHL:
if (C % 8)
return ~0;
return uint32_t((0x030201000c0c0c0cull << C) >> 32);
case ISD::SRL:
if (C % 8)
return ~0;
return uint32_t(0x0c0c0c0c03020100ull >> C);
}
return ~0;
}
SDValue SITargetLowering::performAndCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (DCI.isBeforeLegalize())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
if (VT == MVT::i64 && CRHS) {
if (SDValue Split
= splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
return Split;
}
if (CRHS && VT == MVT::i32) {
uint64_t Mask = CRHS->getZExtValue();
unsigned Bits = countPopulation(Mask);
if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
(Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
unsigned Shift = CShift->getZExtValue();
unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
unsigned Offset = NB + Shift;
if ((Offset & (Bits - 1)) == 0) { SDLoc SL(N);
SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
LHS->getOperand(0),
DAG.getConstant(Offset, SL, MVT::i32),
DAG.getConstant(Bits, SL, MVT::i32));
EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
DAG.getValueType(NarrowVT));
SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
return Shl;
}
}
}
if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
isa<ConstantSDNode>(LHS.getOperand(2))) {
uint32_t Sel = getConstantPermuteMask(Mask);
if (!Sel)
return SDValue();
Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
}
}
if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
SDValue X = LHS.getOperand(0);
SDValue Y = RHS.getOperand(0);
if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
return SDValue();
if (LCC == ISD::SETO) {
if (X != LHS.getOperand(1))
return SDValue();
if (RCC == ISD::SETUNE) {
const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
if (!C1 || !C1->isInfinity() || C1->isNegative())
return SDValue();
const uint32_t Mask = SIInstrFlags::N_NORMAL |
SIInstrFlags::N_SUBNORMAL |
SIInstrFlags::N_ZERO |
SIInstrFlags::P_ZERO |
SIInstrFlags::P_SUBNORMAL |
SIInstrFlags::P_NORMAL;
static_assert(((~(SIInstrFlags::S_NAN |
SIInstrFlags::Q_NAN |
SIInstrFlags::N_INFINITY |
SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
"mask not equal");
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
X, DAG.getConstant(Mask, DL, MVT::i32));
}
}
}
if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
std::swap(LHS, RHS);
if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
RHS.hasOneUse()) {
ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
(RHS.getOperand(0) == LHS.getOperand(0) &&
LHS.getOperand(0) == LHS.getOperand(1))) {
const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
unsigned NewMask = LCC == ISD::SETO ?
Mask->getZExtValue() & ~OrdMask :
Mask->getZExtValue() & OrdMask;
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
DAG.getConstant(NewMask, DL, MVT::i32));
}
}
if (VT == MVT::i32 &&
(RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
if (RHS.getOpcode() != ISD::SIGN_EXTEND)
std::swap(LHS, RHS);
if (isBoolSGPR(RHS.getOperand(0)))
return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
}
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) {
uint32_t LHSMask = getPermuteMask(DAG, LHS);
uint32_t RHSMask = getPermuteMask(DAG, RHS);
if (LHSMask != ~0u && RHSMask != ~0u) {
if (LHSMask > RHSMask) {
std::swap(LHSMask, RHSMask);
std::swap(LHS, RHS);
}
uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
if (!(LHSUsedLanes & RHSUsedLanes) &&
!(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
uint32_t Mask = LHSMask & RHSMask;
for (unsigned I = 0; I < 32; I += 8) {
uint32_t ByteSel = 0xff << I;
if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
Mask &= (0x0c << I) & 0xffffffff;
}
uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
LHS.getOperand(0), RHS.getOperand(0),
DAG.getConstant(Sel, DL, MVT::i32));
}
}
}
return SDValue();
}
SDValue SITargetLowering::performOrCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
EVT VT = N->getValueType(0);
if (VT == MVT::i1) {
if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
SDValue Src = LHS.getOperand(0);
if (Src != RHS.getOperand(0))
return SDValue();
const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
if (!CLHS || !CRHS)
return SDValue();
static const uint32_t MaxMask = 0x3ff;
uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
Src, DAG.getConstant(NewMask, DL, MVT::i32));
}
return SDValue();
}
if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
LHS.getOpcode() == AMDGPUISD::PERM &&
isa<ConstantSDNode>(LHS.getOperand(2))) {
uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
if (!Sel)
return SDValue();
Sel |= LHS.getConstantOperandVal(2);
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
}
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) {
uint32_t LHSMask = getPermuteMask(DAG, LHS);
uint32_t RHSMask = getPermuteMask(DAG, RHS);
if (LHSMask != ~0u && RHSMask != ~0u) {
if (LHSMask > RHSMask) {
std::swap(LHSMask, RHSMask);
std::swap(LHS, RHS);
}
uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
if (!(LHSUsedLanes & RHSUsedLanes) &&
!(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
LHSMask &= ~RHSUsedLanes;
RHSMask &= ~LHSUsedLanes;
LHSMask |= LHSUsedLanes & 0x04040404;
uint32_t Sel = LHSMask | RHSMask;
SDLoc DL(N);
return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
LHS.getOperand(0), RHS.getOperand(0),
DAG.getConstant(Sel, DL, MVT::i32));
}
}
}
if (VT != MVT::i64 || DCI.isBeforeLegalizeOps())
return SDValue();
if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
RHS.getOpcode() != ISD::ZERO_EXTEND)
std::swap(LHS, RHS);
if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
SDValue ExtSrc = RHS.getOperand(0);
EVT SrcVT = ExtSrc.getValueType();
if (SrcVT == MVT::i32) {
SDLoc SL(N);
SDValue LowLHS, HiBits;
std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
DCI.AddToWorklist(LowOr.getNode());
DCI.AddToWorklist(HiBits.getNode());
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
LowOr, HiBits);
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
}
const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (CRHS) {
if (SDValue Split
= splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR,
N->getOperand(0), CRHS))
return Split;
}
return SDValue();
}
SDValue SITargetLowering::performXorCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (SDValue RV = reassociateScalarOps(N, DCI.DAG))
return RV;
EVT VT = N->getValueType(0);
if (VT != MVT::i64)
return SDValue();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
if (CRHS) {
if (SDValue Split
= splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
return Split;
}
return SDValue();
}
SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (!Subtarget->has16BitInsts() ||
DCI.getDAGCombineLevel() < AfterLegalizeDAG)
return SDValue();
EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
SDValue Src = N->getOperand(0);
if (Src.getValueType() != MVT::i16)
return SDValue();
return SDValue();
}
SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
DAGCombinerInfo &DCI)
const {
SDValue Src = N->getOperand(0);
auto *VTSign = cast<VTSDNode>(N->getOperand(1));
if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
VTSign->getVT() == MVT::i8) ||
(Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
VTSign->getVT() == MVT::i16)) &&
Src.hasOneUse()) {
auto *M = cast<MemSDNode>(Src);
SDValue Ops[] = {
Src.getOperand(0), Src.getOperand(1), Src.getOperand(2), Src.getOperand(3), Src.getOperand(4), Src.getOperand(5), Src.getOperand(6),
Src.getOperand(7)
};
SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
Src.getOperand(0).getValueType());
unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
ResList,
Ops, M->getMemoryVT(),
M->getMemOperand());
return DCI.DAG.getMergeValues({BufferLoadSignExt,
BufferLoadSignExt.getValue(1)}, SDLoc(N));
}
return SDValue();
}
SDValue SITargetLowering::performClassCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDValue Mask = N->getOperand(1);
if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
if (CMask->isZero())
return DAG.getConstant(0, SDLoc(N), MVT::i1);
}
if (N->getOperand(0).isUndef())
return DAG.getUNDEF(MVT::i1);
return SDValue();
}
SDValue SITargetLowering::performRcpCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
if (N0.isUndef())
return N0;
if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
N0.getOpcode() == ISD::SINT_TO_FP)) {
return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
N->getFlags());
}
if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) {
return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT,
N0.getOperand(0), N->getFlags());
}
return AMDGPUTargetLowering::performRcpCombine(N, DCI);
}
bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
unsigned MaxDepth) const {
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::FCANONICALIZE)
return true;
if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
auto F = CFP->getValueAPF();
if (F.isNaN() && F.isSignaling())
return false;
return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType());
}
if (MaxDepth == 0)
return false;
switch (Opcode) {
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FCEIL:
case ISD::FFLOOR:
case ISD::FMA:
case ISD::FMAD:
case ISD::FSQRT:
case ISD::FDIV:
case ISD::FREM:
case ISD::FP_ROUND:
case ISD::FP_EXTEND:
case AMDGPUISD::FMUL_LEGACY:
case AMDGPUISD::FMAD_FTZ:
case AMDGPUISD::RCP:
case AMDGPUISD::RSQ:
case AMDGPUISD::RSQ_CLAMP:
case AMDGPUISD::RCP_LEGACY:
case AMDGPUISD::RCP_IFLAG:
case AMDGPUISD::DIV_SCALE:
case AMDGPUISD::DIV_FMAS:
case AMDGPUISD::DIV_FIXUP:
case AMDGPUISD::FRACT:
case AMDGPUISD::LDEXP:
case AMDGPUISD::CVT_PKRTZ_F16_F32:
case AMDGPUISD::CVT_F32_UBYTE0:
case AMDGPUISD::CVT_F32_UBYTE1:
case AMDGPUISD::CVT_F32_UBYTE2:
case AMDGPUISD::CVT_F32_UBYTE3:
return true;
case ISD::FNEG:
case ISD::FABS:
case ISD::FCOPYSIGN:
return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
case ISD::FSIN:
case ISD::FCOS:
case ISD::FSINCOS:
return Op.getValueType().getScalarType() != MVT::f16;
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FMINNUM_IEEE:
case ISD::FMAXNUM_IEEE:
case AMDGPUISD::CLAMP:
case AMDGPUISD::FMED3:
case AMDGPUISD::FMAX3:
case AMDGPUISD::FMIN3: {
if (Subtarget->supportsMinMaxDenormModes() ||
denormalsEnabledForType(DAG, Op.getValueType()))
return true;
for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
return false;
}
return true;
}
case ISD::SELECT: {
return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
}
case ISD::BUILD_VECTOR: {
for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
SDValue SrcOp = Op.getOperand(i);
if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
return false;
}
return true;
}
case ISD::EXTRACT_VECTOR_ELT:
case ISD::EXTRACT_SUBVECTOR: {
return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
}
case ISD::INSERT_VECTOR_ELT: {
return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
}
case ISD::UNDEF:
return false;
case ISD::BITCAST:
return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
case ISD::TRUNCATE: {
if (Op.getValueType() == MVT::i16) {
SDValue TruncSrc = Op.getOperand(0);
if (TruncSrc.getValueType() == MVT::i32 &&
TruncSrc.getOpcode() == ISD::BITCAST &&
TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
}
}
return false;
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntrinsicID
= cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (IntrinsicID) {
case Intrinsic::amdgcn_cvt_pkrtz:
case Intrinsic::amdgcn_cubeid:
case Intrinsic::amdgcn_frexp_mant:
case Intrinsic::amdgcn_fdot2:
case Intrinsic::amdgcn_rcp:
case Intrinsic::amdgcn_rsq:
case Intrinsic::amdgcn_rsq_clamp:
case Intrinsic::amdgcn_rcp_legacy:
case Intrinsic::amdgcn_rsq_legacy:
case Intrinsic::amdgcn_trig_preop:
return true;
default:
break;
}
LLVM_FALLTHROUGH;
}
default:
return denormalsEnabledForType(DAG, Op.getValueType()) &&
DAG.isKnownNeverSNaN(Op);
}
llvm_unreachable("invalid operation");
}
bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF,
unsigned MaxDepth) const {
MachineRegisterInfo &MRI = MF.getRegInfo();
MachineInstr *MI = MRI.getVRegDef(Reg);
unsigned Opcode = MI->getOpcode();
if (Opcode == AMDGPU::G_FCANONICALIZE)
return true;
Optional<FPValueAndVReg> FCR;
if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) {
if (FCR->Value.isSignaling())
return false;
return !FCR->Value.isDenormal() ||
denormalsEnabledForType(MRI.getType(FCR->VReg), MF);
}
if (MaxDepth == 0)
return false;
switch (Opcode) {
case AMDGPU::G_FMINNUM_IEEE:
case AMDGPU::G_FMAXNUM_IEEE: {
if (Subtarget->supportsMinMaxDenormModes() ||
denormalsEnabledForType(MRI.getType(Reg), MF))
return true;
for (const MachineOperand &MO : llvm::drop_begin(MI->operands()))
if (!isCanonicalized(MO.getReg(), MF, MaxDepth - 1))
return false;
return true;
}
default:
return denormalsEnabledForType(MRI.getType(Reg), MF) &&
isKnownNeverSNaN(Reg, MRI);
}
llvm_unreachable("invalid operation");
}
SDValue SITargetLowering::getCanonicalConstantFP(
SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
if (C.isDenormal() && !denormalsEnabledForType(DAG, VT))
return DAG.getConstantFP(0.0, SL, VT);
if (C.isNaN()) {
APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
if (C.isSignaling()) {
return DAG.getConstantFP(CanonicalQNaN, SL, VT);
}
if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
return DAG.getConstantFP(CanonicalQNaN, SL, VT);
}
return DAG.getConstantFP(C, SL, VT);
}
static bool vectorEltWillFoldAway(SDValue Op) {
return Op.isUndef() || isa<ConstantFPSDNode>(Op);
}
SDValue SITargetLowering::performFCanonicalizeCombine(
SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.isUndef()) {
APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
return DAG.getConstantFP(QNaN, SDLoc(N), VT);
}
if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
EVT VT = N->getValueType(0);
return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
}
if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
isTypeLegal(MVT::v2f16)) {
SDLoc SL(N);
SDValue NewElts[2];
SDValue Lo = N0.getOperand(0);
SDValue Hi = N0.getOperand(1);
EVT EltVT = Lo.getValueType();
if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
for (unsigned I = 0; I != 2; ++I) {
SDValue Op = N0.getOperand(I);
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
CFP->getValueAPF());
} else if (Op.isUndef()) {
NewElts[I] = Op;
} else {
NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
}
}
if (NewElts[0].isUndef()) {
if (isa<ConstantFPSDNode>(NewElts[1]))
NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
}
if (NewElts[1].isUndef()) {
NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
}
return DAG.getBuildVector(VT, SL, NewElts);
}
}
unsigned SrcOpc = N0.getOpcode();
if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
if (CRHS && N0.hasOneUse()) {
SDLoc SL(N);
SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
N0.getOperand(0));
SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
DCI.AddToWorklist(Canon0.getNode());
return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
}
}
return isCanonicalized(DAG, N0) ? N0 : SDValue();
}
static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
switch (Opc) {
case ISD::FMAXNUM:
case ISD::FMAXNUM_IEEE:
return AMDGPUISD::FMAX3;
case ISD::SMAX:
return AMDGPUISD::SMAX3;
case ISD::UMAX:
return AMDGPUISD::UMAX3;
case ISD::FMINNUM:
case ISD::FMINNUM_IEEE:
return AMDGPUISD::FMIN3;
case ISD::SMIN:
return AMDGPUISD::SMIN3;
case ISD::UMIN:
return AMDGPUISD::UMIN3;
default:
llvm_unreachable("Not a min/max opcode");
}
}
SDValue SITargetLowering::performIntMed3ImmCombine(
SelectionDAG &DAG, const SDLoc &SL,
SDValue Op0, SDValue Op1, bool Signed) const {
ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
if (!K1)
return SDValue();
ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
if (!K0)
return SDValue();
if (Signed) {
if (K0->getAPIntValue().sge(K1->getAPIntValue()))
return SDValue();
} else {
if (K0->getAPIntValue().uge(K1->getAPIntValue()))
return SDValue();
}
EVT VT = K0->getValueType(0);
unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
return DAG.getNode(Med3Opc, SL, VT,
Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
}
if (VT == MVT::i16) {
MVT NVT = MVT::i32;
unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
}
return SDValue();
}
static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
return C;
if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
return C;
}
return nullptr;
}
SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
const SDLoc &SL,
SDValue Op0,
SDValue Op1) const {
ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
if (!K1)
return SDValue();
ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
if (!K0)
return SDValue();
if (K0->getValueAPF() > K1->getValueAPF())
return SDValue();
const MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
EVT VT = Op0.getValueType();
if (Info->getMode().DX10Clamp) {
if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
}
if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
SDValue Var = Op0.getOperand(0);
if (!DAG.isKnownNeverSNaN(Var))
return SDValue();
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
if ((!K0->hasOneUse() ||
TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
(!K1->hasOneUse() ||
TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
Var, SDValue(K0, 0), SDValue(K1, 0));
}
}
return SDValue();
}
SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
unsigned Opc = N->getOpcode();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
!VT.isVector() &&
(VT == MVT::i32 || VT == MVT::f32 ||
((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
SDLoc DL(N);
return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
DL,
N->getValueType(0),
Op0.getOperand(0),
Op0.getOperand(1),
Op1);
}
if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
SDLoc DL(N);
return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
DL,
N->getValueType(0),
Op0,
Op1.getOperand(0),
Op1.getOperand(1));
}
}
if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
return Med3;
}
if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
return Med3;
}
if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
(Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
(Opc == AMDGPUISD::FMIN_LEGACY &&
Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
(VT == MVT::f32 || VT == MVT::f64 ||
(VT == MVT::f16 && Subtarget->has16BitInsts()) ||
(VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Op0.hasOneUse()) {
if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
return Res;
}
return SDValue();
}
static bool isClampZeroToOne(SDValue A, SDValue B) {
if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
(CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
}
}
return false;
}
SDValue SITargetLowering::performFMed3Combine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
SDValue Src0 = N->getOperand(0);
SDValue Src1 = N->getOperand(1);
SDValue Src2 = N->getOperand(2);
if (isClampZeroToOne(Src0, Src1)) {
return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
}
const MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
if (Info->getMode().DX10Clamp) {
if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
std::swap(Src0, Src1);
if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
std::swap(Src1, Src2);
if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
std::swap(Src0, Src1);
if (isClampZeroToOne(Src1, Src2))
return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
}
return SDValue();
}
SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SDValue Src0 = N->getOperand(0);
SDValue Src1 = N->getOperand(1);
if (Src0.isUndef() && Src1.isUndef())
return DCI.DAG.getUNDEF(N->getValueType(0));
return SDValue();
}
bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize,
unsigned NumElem,
bool IsDivergentIdx,
const GCNSubtarget *Subtarget) {
if (UseDivergentRegisterIndexing)
return false;
unsigned VecSize = EltSize * NumElem;
if (VecSize <= 64 && EltSize < 32)
return false;
if (EltSize < 32)
return true;
if (IsDivergentIdx)
return true;
unsigned NumInsts = NumElem +
((EltSize + 31) / 32) * NumElem ;
if (!Subtarget->hasMovrel())
return NumInsts <= 16;
return NumInsts <= 15;
}
bool SITargetLowering::shouldExpandVectorDynExt(SDNode *N) const {
SDValue Idx = N->getOperand(N->getNumOperands() - 1);
if (isa<ConstantSDNode>(Idx))
return false;
SDValue Vec = N->getOperand(0);
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned EltSize = EltVT.getSizeInBits();
unsigned NumElem = VecVT.getVectorNumElements();
return SITargetLowering::shouldExpandVectorDynExt(
EltSize, NumElem, Idx->isDivergent(), getSubtarget());
}
SDValue SITargetLowering::performExtractVectorEltCombine(
SDNode *N, DAGCombinerInfo &DCI) const {
SDValue Vec = N->getOperand(0);
SelectionDAG &DAG = DCI.DAG;
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
if ((Vec.getOpcode() == ISD::FNEG ||
Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
SDLoc SL(N);
EVT EltVT = N->getValueType(0);
SDValue Idx = N->getOperand(1);
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Vec.getOperand(0), Idx);
return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
}
if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
SDLoc SL(N);
EVT EltVT = N->getValueType(0);
SDValue Idx = N->getOperand(1);
unsigned Opc = Vec.getOpcode();
switch(Opc) {
default:
break;
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::ADD:
case ISD::UMIN:
case ISD::UMAX:
case ISD::SMIN:
case ISD::SMAX:
case ISD::FMAXNUM:
case ISD::FMINNUM:
case ISD::FMAXNUM_IEEE:
case ISD::FMINNUM_IEEE: {
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Vec.getOperand(0), Idx);
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Vec.getOperand(1), Idx);
DCI.AddToWorklist(Elt0.getNode());
DCI.AddToWorklist(Elt1.getNode());
return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
}
}
}
unsigned VecSize = VecVT.getSizeInBits();
unsigned EltSize = EltVT.getSizeInBits();
if (shouldExpandVectorDynExt(N)) {
SDLoc SL(N);
SDValue Idx = N->getOperand(1);
SDValue V;
for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
SDValue IC = DAG.getVectorIdxConstant(I, SL);
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
if (I == 0)
V = Elt;
else
V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
}
return V;
}
if (!DCI.isBeforeLegalize())
return SDValue();
auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (isa<MemSDNode>(Vec) &&
EltSize <= 16 &&
EltVT.isByteSized() &&
VecSize > 32 &&
VecSize % 32 == 0 &&
Idx) {
EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
unsigned BitIndex = Idx->getZExtValue() * EltSize;
unsigned EltIdx = BitIndex / 32;
unsigned LeftoverBitIdx = BitIndex % 32;
SDLoc SL(N);
SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
DCI.AddToWorklist(Cast.getNode());
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
DAG.getConstant(EltIdx, SL, MVT::i32));
DCI.AddToWorklist(Elt.getNode());
SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
DCI.AddToWorklist(Srl.getNode());
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
DCI.AddToWorklist(Trunc.getNode());
return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
}
return SDValue();
}
SDValue
SITargetLowering::performInsertVectorEltCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(2);
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
if (!shouldExpandVectorDynExt(N))
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
SDValue Ins = N->getOperand(1);
EVT IdxVT = Idx.getValueType();
SmallVector<SDValue, 16> Ops;
for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
SDValue IC = DAG.getConstant(I, SL, IdxVT);
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
Ops.push_back(V);
}
return DAG.getBuildVector(VecVT, SL, Ops);
}
unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0,
const SDNode *N1) const {
EVT VT = N0->getValueType(0);
if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) ||
(VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) &&
getSubtarget()->hasMadF16())) &&
isOperationLegal(ISD::FMAD, VT))
return ISD::FMAD;
const TargetOptions &Options = DAG.getTarget().Options;
if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
(N0->getFlags().hasAllowContract() &&
N1->getFlags().hasAllowContract())) &&
isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
return ISD::FMA;
}
return 0;
}
SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
SelectionDAG &DAG) const {
EVT VT = N->getValueType(0);
if (VT != MVT::i32 && VT != MVT::i64)
return SDValue();
if (DAG.isBaseWithConstantOffset(SDValue(N, 0)))
return SDValue();
unsigned Opc = N->getOpcode();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (!(Op0->isDivergent() ^ Op1->isDivergent()))
return SDValue();
if (Op0->isDivergent())
std::swap(Op0, Op1);
if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
return SDValue();
SDValue Op2 = Op1.getOperand(1);
Op1 = Op1.getOperand(0);
if (!(Op1->isDivergent() ^ Op2->isDivergent()))
return SDValue();
if (Op1->isDivergent())
std::swap(Op1, Op2);
SDLoc SL(N);
SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
return DAG.getNode(Opc, SL, VT, Add1, Op2);
}
static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
EVT VT,
SDValue N0, SDValue N1, SDValue N2,
bool Signed) {
unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
}
SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::ADD);
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (VT.isVector())
return SDValue();
if (!N->isDivergent() && Subtarget->hasSMulHi())
return SDValue();
unsigned NumBits = VT.getScalarSizeInBits();
if (NumBits <= 32 || NumBits > 64)
return SDValue();
if (LHS.getOpcode() != ISD::MUL) {
assert(RHS.getOpcode() == ISD::MUL);
std::swap(LHS, RHS);
}
if (!Subtarget->hasFullRate64Ops()) {
unsigned NumUsers = 0;
for (SDNode *Use : LHS->uses()) {
if (Use->getOpcode() != ISD::ADD)
return SDValue();
++NumUsers;
if (NumUsers >= 3)
return SDValue();
}
}
SDValue MulLHS = LHS.getOperand(0);
SDValue MulRHS = LHS.getOperand(1);
SDValue AddRHS = RHS;
bool MulLHSUnsigned32 = numBitsUnsigned(MulLHS, DAG) <= 32;
bool MulRHSUnsigned32 = numBitsUnsigned(MulRHS, DAG) <= 32;
bool MulSignedLo = false;
if (!MulLHSUnsigned32 || !MulRHSUnsigned32) {
MulSignedLo = numBitsSigned(MulLHS, DAG) <= 32 &&
numBitsSigned(MulRHS, DAG) <= 32;
}
if (VT != MVT::i64) {
MulLHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, MulLHS);
MulRHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, MulRHS);
AddRHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, AddRHS);
}
SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
SDValue One = DAG.getConstant(1, SL, MVT::i32);
auto MulLHSLo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS);
auto MulRHSLo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulRHS);
SDValue Accum =
getMad64_32(DAG, SL, MVT::i64, MulLHSLo, MulRHSLo, AddRHS, MulSignedLo);
if (!MulSignedLo && (!MulLHSUnsigned32 || !MulRHSUnsigned32)) {
auto AccumLo = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, Accum, Zero);
auto AccumHi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, Accum, One);
if (!MulLHSUnsigned32) {
auto MulLHSHi =
DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, MulLHS, One);
SDValue MulHi = DAG.getNode(ISD::MUL, SL, MVT::i32, MulLHSHi, MulRHSLo);
AccumHi = DAG.getNode(ISD::ADD, SL, MVT::i32, MulHi, AccumHi);
}
if (!MulRHSUnsigned32) {
auto MulRHSHi =
DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, MulRHS, One);
SDValue MulHi = DAG.getNode(ISD::MUL, SL, MVT::i32, MulLHSLo, MulRHSHi);
AccumHi = DAG.getNode(ISD::ADD, SL, MVT::i32, MulHi, AccumHi);
}
Accum = DAG.getBuildVector(MVT::v2i32, SL, {AccumLo, AccumHi});
Accum = DAG.getBitcast(MVT::i64, Accum);
}
if (VT != MVT::i64)
Accum = DAG.getNode(ISD::TRUNCATE, SL, VT, Accum);
return Accum;
}
SDValue SITargetLowering::performAddCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) {
if (Subtarget->hasMad64_32()) {
if (SDValue Folded = tryFoldToMad64_32(N, DCI))
return Folded;
}
return SDValue();
}
if (SDValue V = reassociateScalarOps(N, DAG)) {
return V;
}
if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
return SDValue();
unsigned Opc = LHS.getOpcode();
if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
std::swap(RHS, LHS);
Opc = RHS.getOpcode();
switch (Opc) {
default: break;
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ANY_EXTEND: {
auto Cond = RHS.getOperand(0);
if (!isBoolSGPR(Cond))
break;
SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
return DAG.getNode(Opc, SL, VTList, Args);
}
case ISD::ADDCARRY: {
auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
if (!C || C->getZExtValue() != 0) break;
SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
}
}
return SDValue();
}
SDValue SITargetLowering::performSubCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
unsigned Opc = RHS.getOpcode();
switch (Opc) {
default: break;
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ANY_EXTEND: {
auto Cond = RHS.getOperand(0);
if (!isBoolSGPR(Cond))
break;
SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY;
return DAG.getNode(Opc, SL, VTList, Args);
}
}
if (LHS.getOpcode() == ISD::SUBCARRY) {
auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
if (!C || !C->isZero())
return SDValue();
SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
}
return SDValue();
}
SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (N->getValueType(0) != MVT::i32)
return SDValue();
auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!C || C->getZExtValue() != 0)
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDValue LHS = N->getOperand(0);
unsigned LHSOpc = LHS.getOpcode();
unsigned Opc = N->getOpcode();
if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
(LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
}
return SDValue();
}
SDValue SITargetLowering::performFAddCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
return SDValue();
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getOpcode() == ISD::FADD) {
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
if (FusedOp != 0) {
const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
}
}
}
if (RHS.getOpcode() == ISD::FADD) {
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
if (FusedOp != 0) {
const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
}
}
}
return SDValue();
}
SDValue SITargetLowering::performFSubCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
EVT VT = N->getValueType(0);
assert(!VT.isVector());
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getOpcode() == ISD::FADD) {
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
if (FusedOp != 0){
const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
}
}
}
if (RHS.getOpcode() == ISD::FADD) {
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
if (FusedOp != 0){
const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
}
}
}
return SDValue();
}
SDValue SITargetLowering::performFMACombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
SDLoc SL(N);
if (!Subtarget->hasDot7Insts() || VT != MVT::f32)
return SDValue();
SDValue Op1 = N->getOperand(0);
SDValue Op2 = N->getOperand(1);
SDValue FMA = N->getOperand(2);
if (FMA.getOpcode() != ISD::FMA ||
Op1.getOpcode() != ISD::FP_EXTEND ||
Op2.getOpcode() != ISD::FP_EXTEND)
return SDValue();
const TargetOptions &Options = DAG.getTarget().Options;
if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
(N->getFlags().hasAllowContract() &&
FMA->getFlags().hasAllowContract())) {
Op1 = Op1.getOperand(0);
Op2 = Op2.getOperand(0);
if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
SDValue Vec1 = Op1.getOperand(0);
SDValue Idx1 = Op1.getOperand(1);
SDValue Vec2 = Op2.getOperand(0);
SDValue FMAOp1 = FMA.getOperand(0);
SDValue FMAOp2 = FMA.getOperand(1);
SDValue FMAAcc = FMA.getOperand(2);
if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
FMAOp2.getOpcode() != ISD::FP_EXTEND)
return SDValue();
FMAOp1 = FMAOp1.getOperand(0);
FMAOp2 = FMAOp2.getOperand(0);
if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
SDValue Vec3 = FMAOp1.getOperand(0);
SDValue Vec4 = FMAOp2.getOperand(0);
SDValue Idx2 = FMAOp1.getOperand(1);
if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
Idx1 == Idx2)
return SDValue();
if (Vec1 == Vec2 || Vec3 == Vec4)
return SDValue();
if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
return SDValue();
if ((Vec1 == Vec3 && Vec2 == Vec4) ||
(Vec1 == Vec4 && Vec2 == Vec3)) {
return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
DAG.getTargetConstant(0, SL, MVT::i1));
}
}
return SDValue();
}
SDValue SITargetLowering::performSetCCCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
EVT VT = LHS.getValueType();
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
auto CRHS = dyn_cast<ConstantSDNode>(RHS);
if (!CRHS) {
CRHS = dyn_cast<ConstantSDNode>(LHS);
if (CRHS) {
std::swap(LHS, RHS);
CC = getSetCCSwappedOperands(CC);
}
}
if (CRHS) {
if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
isBoolSGPR(LHS.getOperand(0))) {
if ((CRHS->isAllOnes() &&
(CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
(CRHS->isZero() &&
(CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
DAG.getConstant(-1, SL, MVT::i1));
if ((CRHS->isAllOnes() &&
(CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
(CRHS->isZero() &&
(CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
return LHS.getOperand(0);
}
const APInt &CRHSVal = CRHS->getAPIntValue();
if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
LHS.getOpcode() == ISD::SELECT &&
isa<ConstantSDNode>(LHS.getOperand(1)) &&
isa<ConstantSDNode>(LHS.getOperand(2)) &&
LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
isBoolSGPR(LHS.getOperand(0))) {
const APInt &CT = LHS.getConstantOperandAPInt(1);
const APInt &CF = LHS.getConstantOperandAPInt(2);
if ((CF == CRHSVal && CC == ISD::SETEQ) ||
(CT == CRHSVal && CC == ISD::SETNE))
return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
DAG.getConstant(-1, SL, MVT::i1));
if ((CF == CRHSVal && CC == ISD::SETNE) ||
(CT == CRHSVal && CC == ISD::SETEQ))
return LHS.getOperand(0);
}
}
if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
VT != MVT::f16))
return SDValue();
if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
if (!CRHS)
return SDValue();
const APFloat &APF = CRHS->getValueAPF();
if (APF.isInfinity() && !APF.isNegative()) {
const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
SIInstrFlags::N_INFINITY;
const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
SIInstrFlags::P_ZERO |
SIInstrFlags::N_NORMAL |
SIInstrFlags::P_NORMAL |
SIInstrFlags::N_SUBNORMAL |
SIInstrFlags::P_SUBNORMAL;
unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
DAG.getConstant(Mask, SL, MVT::i32));
}
}
return SDValue();
}
SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
SDValue Src = N->getOperand(0);
SDValue Shift = N->getOperand(0);
if (Shift.getOpcode() == ISD::ZERO_EXTEND)
Shift = Shift.getOperand(0);
if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) {
if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) {
SDValue Shifted = DAG.getZExtOrTrunc(Shift.getOperand(0),
SDLoc(Shift.getOperand(0)), MVT::i32);
unsigned ShiftOffset = 8 * Offset;
if (Shift.getOpcode() == ISD::SHL)
ShiftOffset -= C->getZExtValue();
else
ShiftOffset += C->getZExtValue();
if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) {
return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL,
MVT::f32, Shifted);
}
}
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) {
if (N->getOpcode() != ISD::DELETED_NODE)
DCI.AddToWorklist(N);
return SDValue(N, 0);
}
if (SDValue DemandedSrc =
TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG))
return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc);
return SDValue();
}
SDValue SITargetLowering::performClampCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
if (!CSrc)
return SDValue();
const MachineFunction &MF = DCI.DAG.getMachineFunction();
const APFloat &F = CSrc->getValueAPF();
APFloat Zero = APFloat::getZero(F.getSemantics());
if (F < Zero ||
(F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
}
APFloat One(F.getSemantics(), "1.0");
if (F > One)
return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
return SDValue(CSrc, 0);
}
SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
return SDValue();
switch (N->getOpcode()) {
case ISD::ADD:
return performAddCombine(N, DCI);
case ISD::SUB:
return performSubCombine(N, DCI);
case ISD::ADDCARRY:
case ISD::SUBCARRY:
return performAddCarrySubCarryCombine(N, DCI);
case ISD::FADD:
return performFAddCombine(N, DCI);
case ISD::FSUB:
return performFSubCombine(N, DCI);
case ISD::SETCC:
return performSetCCCombine(N, DCI);
case ISD::FMAXNUM:
case ISD::FMINNUM:
case ISD::FMAXNUM_IEEE:
case ISD::FMINNUM_IEEE:
case ISD::SMAX:
case ISD::SMIN:
case ISD::UMAX:
case ISD::UMIN:
case AMDGPUISD::FMIN_LEGACY:
case AMDGPUISD::FMAX_LEGACY:
return performMinMaxCombine(N, DCI);
case ISD::FMA:
return performFMACombine(N, DCI);
case ISD::AND:
return performAndCombine(N, DCI);
case ISD::OR:
return performOrCombine(N, DCI);
case ISD::XOR:
return performXorCombine(N, DCI);
case ISD::ZERO_EXTEND:
return performZeroExtendCombine(N, DCI);
case ISD::SIGN_EXTEND_INREG:
return performSignExtendInRegCombine(N , DCI);
case AMDGPUISD::FP_CLASS:
return performClassCombine(N, DCI);
case ISD::FCANONICALIZE:
return performFCanonicalizeCombine(N, DCI);
case AMDGPUISD::RCP:
return performRcpCombine(N, DCI);
case AMDGPUISD::FRACT:
case AMDGPUISD::RSQ:
case AMDGPUISD::RCP_LEGACY:
case AMDGPUISD::RCP_IFLAG:
case AMDGPUISD::RSQ_CLAMP:
case AMDGPUISD::LDEXP: {
SDValue Src = N->getOperand(0);
if (Src.isUndef())
return Src;
break;
}
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
return performUCharToFloatCombine(N, DCI);
case AMDGPUISD::CVT_F32_UBYTE0:
case AMDGPUISD::CVT_F32_UBYTE1:
case AMDGPUISD::CVT_F32_UBYTE2:
case AMDGPUISD::CVT_F32_UBYTE3:
return performCvtF32UByteNCombine(N, DCI);
case AMDGPUISD::FMED3:
return performFMed3Combine(N, DCI);
case AMDGPUISD::CVT_PKRTZ_F16_F32:
return performCvtPkRTZCombine(N, DCI);
case AMDGPUISD::CLAMP:
return performClampCombine(N, DCI);
case ISD::SCALAR_TO_VECTOR: {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
if (VT == MVT::v2i16 || VT == MVT::v2f16) {
SDLoc SL(N);
SDValue Src = N->getOperand(0);
EVT EltVT = Src.getValueType();
if (EltVT == MVT::f16)
Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
}
break;
}
case ISD::EXTRACT_VECTOR_ELT:
return performExtractVectorEltCombine(N, DCI);
case ISD::INSERT_VECTOR_ELT:
return performInsertVectorEltCombine(N, DCI);
case ISD::LOAD: {
if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
return Widended;
LLVM_FALLTHROUGH;
}
default: {
if (!DCI.isBeforeLegalize()) {
if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N))
return performMemSDNodeCombine(MemNode, DCI);
}
break;
}
}
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
static unsigned SubIdx2Lane(unsigned Idx) {
switch (Idx) {
default: return ~0u;
case AMDGPU::sub0: return 0;
case AMDGPU::sub1: return 1;
case AMDGPU::sub2: return 2;
case AMDGPU::sub3: return 3;
case AMDGPU::sub4: return 4; }
}
SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
SelectionDAG &DAG) const {
unsigned Opcode = Node->getMachineOpcode();
int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
return Node;
SDNode *Users[5] = { nullptr };
unsigned Lane = 0;
unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
unsigned NewDmask = 0;
unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) ||
Node->getConstantOperandVal(LWEIdx))
? true
: false;
unsigned TFCLane = 0;
bool HasChain = Node->getNumValues() > 1;
if (OldDmask == 0) {
return Node;
}
unsigned OldBitsSet = countPopulation(OldDmask);
if (UsesTFC) {
TFCLane = OldBitsSet;
}
for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
I != E; ++I) {
if (I.getUse().getResNo() != 0)
continue;
if (!I->isMachineOpcode() ||
I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
return Node;
Lane = SubIdx2Lane(I->getConstantOperandVal(1));
if (Lane == ~0u)
return Node;
if (UsesTFC && Lane == TFCLane) {
Users[Lane] = *I;
} else {
unsigned Comp;
for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
Comp = countTrailingZeros(Dmask);
Dmask &= ~(1 << Comp);
}
if (Users[Lane])
return Node;
Users[Lane] = *I;
NewDmask |= 1 << Comp;
}
}
bool NoChannels = !NewDmask;
if (NoChannels) {
if (!UsesTFC) {
return Node;
}
if (OldBitsSet == 1)
return Node;
NewDmask = 1;
}
if (NewDmask == OldDmask)
return Node;
unsigned BitsSet = countPopulation(NewDmask);
unsigned NewChannels = BitsSet + UsesTFC;
int NewOpcode =
AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
assert(NewOpcode != -1 &&
NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
"failed to find equivalent MIMG op");
SmallVector<SDValue, 12> Ops;
Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
MVT ResultVT = NewChannels == 1 ?
SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
NewChannels == 5 ? 8 : NewChannels);
SDVTList NewVTList = HasChain ?
DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
NewVTList, Ops);
if (HasChain) {
DAG.setNodeMemRefs(NewNode, Node->memoperands());
DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
}
if (NewChannels == 1) {
assert(Node->hasNUsesOfValue(1, 0));
SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
SDLoc(Node), Users[Lane]->getValueType(0),
SDValue(NewNode, 0));
DAG.ReplaceAllUsesWith(Users[Lane], Copy);
return nullptr;
}
for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
SDNode *User = Users[i];
if (!User) {
if (i || !NoChannels)
continue;
} else {
SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
}
switch (Idx) {
default: break;
case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
}
}
DAG.RemoveDeadNode(Node);
return nullptr;
}
static bool isFrameIndexOp(SDValue Op) {
if (Op.getOpcode() == ISD::AssertZext)
Op = Op.getOperand(0);
return isa<FrameIndexSDNode>(Op);
}
SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
SelectionDAG &DAG) const {
if (Node->getOpcode() == ISD::CopyToReg) {
RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
SDValue SrcVal = Node->getOperand(2);
if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) {
SDLoc SL(Node);
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
SDValue VReg = DAG.getRegister(
MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
SDNode *Glued = Node->getGluedNode();
SDValue ToVReg
= DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
SDValue ToResultReg
= DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
VReg, ToVReg.getValue(1));
DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
DAG.RemoveDeadNode(Node);
return ToResultReg.getNode();
}
}
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
if (!isFrameIndexOp(Node->getOperand(i))) {
Ops.push_back(Node->getOperand(i));
continue;
}
SDLoc DL(Node);
Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Node->getOperand(i).getValueType(),
Node->getOperand(i)), 0));
}
return DAG.UpdateNodeOperands(Node, Ops);
}
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
unsigned Opcode = Node->getMachineOpcode();
if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
!TII->isGather4(Opcode) &&
AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) {
return adjustWritemask(Node, DAG);
}
if (Opcode == AMDGPU::INSERT_SUBREG ||
Opcode == AMDGPU::REG_SEQUENCE) {
legalizeTargetIndependentNode(Node, DAG);
return Node;
}
switch (Opcode) {
case AMDGPU::V_DIV_SCALE_F32_e64:
case AMDGPU::V_DIV_SCALE_F64_e64: {
SDValue Src0 = Node->getOperand(1);
SDValue Src1 = Node->getOperand(3);
SDValue Src2 = Node->getOperand(5);
if ((Src0.isMachineOpcode() &&
Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
(Src0 == Src1 || Src0 == Src2))
break;
MVT VT = Src0.getValueType().getSimpleVT();
const TargetRegisterClass *RC =
getRegClassFor(VT, Src0.getNode()->isDivergent());
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
UndefReg, Src0, SDValue());
if (Src0.isMachineOpcode() &&
Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
if (Src1.isMachineOpcode() &&
Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
Src0 = Src1;
else if (Src2.isMachineOpcode() &&
Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
Src0 = Src2;
else {
assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
Src0 = UndefReg;
Src1 = UndefReg;
}
} else
break;
SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end());
Ops[1] = Src0;
Ops[3] = Src1;
Ops[5] = Src2;
Ops.push_back(ImpDef.getValue(1));
return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
}
default:
break;
}
return Node;
}
void SITargetLowering::AddIMGInit(MachineInstr &MI) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const SIRegisterInfo &TRI = TII->getRegisterInfo();
MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
MachineBasicBlock &MBB = *MI.getParent();
MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe);
MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe);
MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16);
if (!TFE && !LWE) return;
unsigned TFEVal = TFE ? TFE->getImm() : 0;
unsigned LWEVal = LWE->getImm();
unsigned D16Val = D16 ? D16->getImm() : 0;
if (!TFEVal && !LWEVal)
return;
const DebugLoc &DL = MI.getDebugLoc();
int DstIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask);
assert(MO_Dmask && "Expected dmask operand in instruction");
unsigned dmask = MO_Dmask->getImm();
unsigned ActiveLanes = TII->isGather4(MI) ? 4 : countPopulation(dmask);
bool Packed = !Subtarget->hasUnpackedD16VMem();
unsigned InitIdx =
D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1;
uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
if (DstSize < InitIdx)
return;
Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
unsigned NewDst = 0;
unsigned SizeLeft = Subtarget->usePRTStrictNull() ? InitIdx : 1;
unsigned CurrIdx = Subtarget->usePRTStrictNull() ? 0 : (InitIdx - 1);
BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst);
for (; SizeLeft; SizeLeft--, CurrIdx++) {
NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg)
.addImm(0);
BuildMI(MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst)
.addReg(PrevDst)
.addReg(SubReg)
.addImm(SIRegisterInfo::getSubRegFromChannel(CurrIdx));
PrevDst = NewDst;
}
MI.addOperand(MachineOperand::CreateReg(NewDst, false, true));
MI.tieOperands(DstIdx, MI.getNumOperands() - 1);
}
void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
if (TII->isVOP3(MI.getOpcode())) {
TII->legalizeOperandsVOP3(MRI, MI);
if (MI.getDesc().OpInfo) {
unsigned Opc = MI.getOpcode();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
if (I == -1)
break;
MachineOperand &Op = MI.getOperand(I);
if (!Op.isReg() || !Op.getReg().isVirtual())
continue;
auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
if (!TRI->hasAGPRs(RC))
continue;
auto *Src = MRI.getUniqueVRegDef(Op.getReg());
if (!Src || !Src->isCopy() ||
!TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
continue;
auto *NewRC = TRI->getEquivalentVGPRClass(RC);
MRI.setRegClass(Op.getReg(), NewRC);
}
if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
if (Src2->isReg() && Src2->getReg().isVirtual()) {
auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
if (TRI->isVectorSuperClass(RC)) {
auto *NewRC = TRI->getEquivalentAGPRClass(RC);
MRI.setRegClass(Src2->getReg(), NewRC);
if (Src2->isTied())
MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
}
}
}
}
return;
}
if (TII->isMIMG(MI)) {
if (!MI.mayStore())
AddIMGInit(MI);
TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::vaddr);
}
}
static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
uint64_t Val) {
SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
}
MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
const SDLoc &DL,
SDValue Ptr) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const SDValue Ops0[] = {
DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
buildSMovImm32(DAG, DL, 0),
DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
};
SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
MVT::v2i32, Ops0), 0);
const SDValue Ops1[] = {
DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
Ptr,
DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
SubRegHi,
DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
};
return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
}
MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
SDValue Ptr, uint32_t RsrcDword1,
uint64_t RsrcDword2And3) const {
SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
if (RsrcDword1) {
PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
DAG.getConstant(RsrcDword1, DL, MVT::i32)),
0);
}
SDValue DataLo = buildSMovImm32(DAG, DL,
RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
const SDValue Ops[] = {
DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
PtrLo,
DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
PtrHi,
DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
DataLo,
DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
DataHi,
DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
};
return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
}
std::pair<unsigned, const TargetRegisterClass *>
SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
StringRef Constraint,
MVT VT) const {
const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_);
const TargetRegisterClass *RC = nullptr;
if (Constraint.size() == 1) {
const unsigned BitWidth = VT.getSizeInBits();
switch (Constraint[0]) {
default:
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
case 's':
case 'r':
switch (BitWidth) {
case 16:
RC = &AMDGPU::SReg_32RegClass;
break;
case 64:
RC = &AMDGPU::SGPR_64RegClass;
break;
default:
RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth);
if (!RC)
return std::make_pair(0U, nullptr);
break;
}
break;
case 'v':
switch (BitWidth) {
case 16:
RC = &AMDGPU::VGPR_32RegClass;
break;
default:
RC = TRI->getVGPRClassForBitWidth(BitWidth);
if (!RC)
return std::make_pair(0U, nullptr);
break;
}
break;
case 'a':
if (!Subtarget->hasMAIInsts())
break;
switch (BitWidth) {
case 16:
RC = &AMDGPU::AGPR_32RegClass;
break;
default:
RC = TRI->getAGPRClassForBitWidth(BitWidth);
if (!RC)
return std::make_pair(0U, nullptr);
break;
}
break;
}
if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
return std::make_pair(0U, RC);
}
if (Constraint.startswith("{") && Constraint.endswith("}")) {
StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
if (RegName.consume_front("v")) {
RC = &AMDGPU::VGPR_32RegClass;
} else if (RegName.consume_front("s")) {
RC = &AMDGPU::SGPR_32RegClass;
} else if (RegName.consume_front("a")) {
RC = &AMDGPU::AGPR_32RegClass;
}
if (RC) {
uint32_t Idx;
if (RegName.consume_front("[")) {
uint32_t End;
bool Failed = RegName.consumeInteger(10, Idx);
Failed |= !RegName.consume_front(":");
Failed |= RegName.consumeInteger(10, End);
Failed |= !RegName.consume_back("]");
if (!Failed) {
uint32_t Width = (End - Idx + 1) * 32;
MCRegister Reg = RC->getRegister(Idx);
if (SIRegisterInfo::isVGPRClass(RC))
RC = TRI->getVGPRClassForBitWidth(Width);
else if (SIRegisterInfo::isSGPRClass(RC))
RC = TRI->getSGPRClassForBitWidth(Width);
else if (SIRegisterInfo::isAGPRClass(RC))
RC = TRI->getAGPRClassForBitWidth(Width);
if (RC) {
Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC);
return std::make_pair(Reg, RC);
}
}
} else {
bool Failed = RegName.getAsInteger(10, Idx);
if (!Failed && Idx < RC->getNumRegs())
return std::make_pair(RC->getRegister(Idx), RC);
}
}
}
auto Ret = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
if (Ret.first)
Ret.second = TRI->getPhysRegClass(Ret.first);
return Ret;
}
static bool isImmConstraint(StringRef Constraint) {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default: break;
case 'I':
case 'J':
case 'A':
case 'B':
case 'C':
return true;
}
} else if (Constraint == "DA" ||
Constraint == "DB") {
return true;
}
return false;
}
SITargetLowering::ConstraintType
SITargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default: break;
case 's':
case 'v':
case 'a':
return C_RegisterClass;
}
}
if (isImmConstraint(Constraint)) {
return C_Other;
}
return TargetLowering::getConstraintType(Constraint);
}
static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) {
if (!AMDGPU::isInlinableIntLiteral(Val)) {
Val = Val & maskTrailingOnes<uint64_t>(Size);
}
return Val;
}
void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
if (isImmConstraint(Constraint)) {
uint64_t Val;
if (getAsmOperandConstVal(Op, Val) &&
checkAsmConstraintVal(Op, Constraint, Val)) {
Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits());
Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64));
}
} else {
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
}
bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const {
unsigned Size = Op.getScalarValueSizeInBits();
if (Size > 64)
return false;
if (Size == 16 && !Subtarget->has16BitInsts())
return false;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
Val = C->getSExtValue();
return true;
}
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
Val = C->getValueAPF().bitcastToAPInt().getSExtValue();
return true;
}
if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) {
if (Size != 16 || Op.getNumOperands() != 2)
return false;
if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef())
return false;
if (ConstantSDNode *C = V->getConstantSplatNode()) {
Val = C->getSExtValue();
return true;
}
if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) {
Val = C->getValueAPF().bitcastToAPInt().getSExtValue();
return true;
}
}
return false;
}
bool SITargetLowering::checkAsmConstraintVal(SDValue Op,
const std::string &Constraint,
uint64_t Val) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'I':
return AMDGPU::isInlinableIntLiteral(Val);
case 'J':
return isInt<16>(Val);
case 'A':
return checkAsmConstraintValA(Op, Val);
case 'B':
return isInt<32>(Val);
case 'C':
return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) ||
AMDGPU::isInlinableIntLiteral(Val);
default:
break;
}
} else if (Constraint.size() == 2) {
if (Constraint == "DA") {
int64_t HiBits = static_cast<int32_t>(Val >> 32);
int64_t LoBits = static_cast<int32_t>(Val);
return checkAsmConstraintValA(Op, HiBits, 32) &&
checkAsmConstraintValA(Op, LoBits, 32);
}
if (Constraint == "DB") {
return true;
}
}
llvm_unreachable("Invalid asm constraint");
}
bool SITargetLowering::checkAsmConstraintValA(SDValue Op,
uint64_t Val,
unsigned MaxSize) const {
unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize);
bool HasInv2Pi = Subtarget->hasInv2PiInlineImm();
if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) ||
(Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) ||
(Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) {
return true;
}
return false;
}
static int getAlignedAGPRClassID(unsigned UnalignedClassID) {
switch (UnalignedClassID) {
case AMDGPU::VReg_64RegClassID:
return AMDGPU::VReg_64_Align2RegClassID;
case AMDGPU::VReg_96RegClassID:
return AMDGPU::VReg_96_Align2RegClassID;
case AMDGPU::VReg_128RegClassID:
return AMDGPU::VReg_128_Align2RegClassID;
case AMDGPU::VReg_160RegClassID:
return AMDGPU::VReg_160_Align2RegClassID;
case AMDGPU::VReg_192RegClassID:
return AMDGPU::VReg_192_Align2RegClassID;
case AMDGPU::VReg_224RegClassID:
return AMDGPU::VReg_224_Align2RegClassID;
case AMDGPU::VReg_256RegClassID:
return AMDGPU::VReg_256_Align2RegClassID;
case AMDGPU::VReg_512RegClassID:
return AMDGPU::VReg_512_Align2RegClassID;
case AMDGPU::VReg_1024RegClassID:
return AMDGPU::VReg_1024_Align2RegClassID;
case AMDGPU::AReg_64RegClassID:
return AMDGPU::AReg_64_Align2RegClassID;
case AMDGPU::AReg_96RegClassID:
return AMDGPU::AReg_96_Align2RegClassID;
case AMDGPU::AReg_128RegClassID:
return AMDGPU::AReg_128_Align2RegClassID;
case AMDGPU::AReg_160RegClassID:
return AMDGPU::AReg_160_Align2RegClassID;
case AMDGPU::AReg_192RegClassID:
return AMDGPU::AReg_192_Align2RegClassID;
case AMDGPU::AReg_256RegClassID:
return AMDGPU::AReg_256_Align2RegClassID;
case AMDGPU::AReg_512RegClassID:
return AMDGPU::AReg_512_Align2RegClassID;
case AMDGPU::AReg_1024RegClassID:
return AMDGPU::AReg_1024_Align2RegClassID;
default:
return -1;
}
}
void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
MachineRegisterInfo &MRI = MF.getRegInfo();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
const SIInstrInfo *TII = ST.getInstrInfo();
if (Info->isEntryFunction()) {
reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
}
assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
Info->getStackPtrOffsetReg()));
if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
Info->limitOccupancy(MF);
if (ST.isWave32() && !MF.empty()) {
for (auto &MBB : MF) {
for (auto &MI : MBB) {
TII->fixImplicitOperands(MI);
}
}
}
if (ST.needsAlignedVGPRs()) {
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
const Register Reg = Register::index2VirtReg(I);
const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg);
if (!RC)
continue;
int NewClassID = getAlignedAGPRClassID(RC->getID());
if (NewClassID != -1)
MRI.setRegClass(Reg, TRI->getRegClass(NewClassID));
}
}
TargetLoweringBase::finalizeLowering(MF);
}
void SITargetLowering::computeKnownBitsForFrameIndex(
const int FI, KnownBits &Known, const MachineFunction &MF) const {
TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF);
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
}
static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB,
KnownBits &Known, unsigned Dim) {
unsigned MaxValue =
ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim);
Known.Zero.setHighBits(countLeadingZeros(MaxValue));
}
void SITargetLowering::computeKnownBitsForTargetInstr(
GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts,
const MachineRegisterInfo &MRI, unsigned Depth) const {
const MachineInstr *MI = MRI.getVRegDef(R);
switch (MI->getOpcode()) {
case AMDGPU::G_INTRINSIC: {
switch (MI->getIntrinsicID()) {
case Intrinsic::amdgcn_workitem_id_x:
knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0);
break;
case Intrinsic::amdgcn_workitem_id_y:
knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1);
break;
case Intrinsic::amdgcn_workitem_id_z:
knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2);
break;
case Intrinsic::amdgcn_mbcnt_lo:
case Intrinsic::amdgcn_mbcnt_hi: {
unsigned Size = MRI.getType(R).getSizeInBits();
Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2());
break;
}
case Intrinsic::amdgcn_groupstaticsize: {
Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize()));
break;
}
}
break;
}
case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
Known.Zero.setHighBits(24);
break;
case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
Known.Zero.setHighBits(16);
break;
}
}
Align SITargetLowering::computeKnownAlignForTargetInstr(
GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI,
unsigned Depth) const {
const MachineInstr *MI = MRI.getVRegDef(R);
switch (MI->getOpcode()) {
case AMDGPU::G_INTRINSIC:
case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
Intrinsic::ID IID = MI->getIntrinsicID();
LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext();
AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID);
if (MaybeAlign RetAlign = Attrs.getRetAlignment())
return *RetAlign;
return Align(1);
}
default:
return Align(1);
}
}
Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
const Align CacheLineAlign = Align(64);
if (!ML || DisableLoopAlignment ||
(getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
getSubtarget()->hasInstFwdPrefetchBug())
return PrefAlign;
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const MachineBasicBlock *Header = ML->getHeader();
if (Header->getAlignment() != PrefAlign)
return Header->getAlignment();
unsigned LoopSize = 0;
for (const MachineBasicBlock *MBB : ML->blocks()) {
if (MBB != Header)
LoopSize += MBB->getAlignment().value() / 2;
for (const MachineInstr &MI : *MBB) {
LoopSize += TII->getInstSizeInBytes(MI);
if (LoopSize > 192)
return PrefAlign;
}
}
if (LoopSize <= 64)
return PrefAlign;
if (LoopSize <= 128)
return CacheLineAlign;
for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
if (MachineBasicBlock *Exit = P->getExitBlock()) {
auto I = Exit->getFirstNonDebugInstr();
if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
return CacheLineAlign;
}
}
MachineBasicBlock *Pre = ML->getLoopPreheader();
MachineBasicBlock *Exit = ML->getExitBlock();
if (Pre && Exit) {
auto PreTerm = Pre->getFirstTerminator();
if (PreTerm == Pre->begin() ||
std::prev(PreTerm)->getOpcode() != AMDGPU::S_INST_PREFETCH)
BuildMI(*Pre, PreTerm, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH))
.addImm(1);
auto ExitHead = Exit->getFirstNonDebugInstr();
if (ExitHead == Exit->end() ||
ExitHead->getOpcode() != AMDGPU::S_INST_PREFETCH)
BuildMI(*Exit, ExitHead, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH))
.addImm(2); }
return CacheLineAlign;
}
LLVM_ATTRIBUTE_UNUSED
static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
assert(N->getOpcode() == ISD::CopyFromReg);
do {
N = N->getOperand(0).getNode();
if (N->getOpcode() == ISD::INLINEASM ||
N->getOpcode() == ISD::INLINEASM_BR)
return true;
} while (N->getOpcode() == ISD::CopyFromReg);
return false;
}
bool SITargetLowering::isSDNodeSourceOfDivergence(
const SDNode *N, FunctionLoweringInfo *FLI,
LegacyDivergenceAnalysis *KDA) const {
switch (N->getOpcode()) {
case ISD::CopyFromReg: {
const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
const MachineRegisterInfo &MRI = FLI->MF->getRegInfo();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Register Reg = R->getReg();
if (Reg.isPhysical() || MRI.isLiveIn(Reg))
return !TRI->isSGPRReg(MRI, Reg);
if (const Value *V = FLI->getValueFromVirtualReg(R->getReg()))
return KDA->isDivergent(V);
assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
return !TRI->isSGPRReg(MRI, Reg);
}
case ISD::LOAD: {
const LoadSDNode *L = cast<LoadSDNode>(N);
unsigned AS = L->getAddressSpace();
return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
}
case ISD::CALLSEQ_END:
return true;
case ISD::INTRINSIC_WO_CHAIN:
return AMDGPU::isIntrinsicSourceOfDivergence(
cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
case ISD::INTRINSIC_W_CHAIN:
return AMDGPU::isIntrinsicSourceOfDivergence(
cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
case AMDGPUISD::ATOMIC_CMP_SWAP:
case AMDGPUISD::ATOMIC_INC:
case AMDGPUISD::ATOMIC_DEC:
case AMDGPUISD::ATOMIC_LOAD_FMIN:
case AMDGPUISD::ATOMIC_LOAD_FMAX:
case AMDGPUISD::BUFFER_ATOMIC_SWAP:
case AMDGPUISD::BUFFER_ATOMIC_ADD:
case AMDGPUISD::BUFFER_ATOMIC_SUB:
case AMDGPUISD::BUFFER_ATOMIC_SMIN:
case AMDGPUISD::BUFFER_ATOMIC_UMIN:
case AMDGPUISD::BUFFER_ATOMIC_SMAX:
case AMDGPUISD::BUFFER_ATOMIC_UMAX:
case AMDGPUISD::BUFFER_ATOMIC_AND:
case AMDGPUISD::BUFFER_ATOMIC_OR:
case AMDGPUISD::BUFFER_ATOMIC_XOR:
case AMDGPUISD::BUFFER_ATOMIC_INC:
case AMDGPUISD::BUFFER_ATOMIC_DEC:
case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP:
case AMDGPUISD::BUFFER_ATOMIC_CSUB:
case AMDGPUISD::BUFFER_ATOMIC_FADD:
case AMDGPUISD::BUFFER_ATOMIC_FMIN:
case AMDGPUISD::BUFFER_ATOMIC_FMAX:
return true;
default:
if (auto *A = dyn_cast<AtomicSDNode>(N)) {
return A->readMem() && A->writeMem();
}
return false;
}
}
bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG,
EVT VT) const {
switch (VT.getScalarType().getSimpleVT().SimpleTy) {
case MVT::f32:
return hasFP32Denormals(DAG.getMachineFunction());
case MVT::f64:
case MVT::f16:
return hasFP64FP16Denormals(DAG.getMachineFunction());
default:
return false;
}
}
bool SITargetLowering::denormalsEnabledForType(LLT Ty,
MachineFunction &MF) const {
switch (Ty.getScalarSizeInBits()) {
case 32:
return hasFP32Denormals(MF);
case 64:
case 16:
return hasFP64FP16Denormals(MF);
default:
return false;
}
}
bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
const SelectionDAG &DAG,
bool SNaN,
unsigned Depth) const {
if (Op.getOpcode() == AMDGPUISD::CLAMP) {
const MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
if (Info->getMode().DX10Clamp)
return true; return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
}
return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
SNaN, Depth);
}
static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) {
const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics();
auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt);
if (&Flt == &APFloat::IEEEsingle())
return DenormMode == DenormalMode::getPreserveSign();
return DenormMode == DenormalMode::getIEEE();
}
TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
unsigned AS = RMW->getPointerAddressSpace();
if (AS == AMDGPUAS::PRIVATE_ADDRESS)
return AtomicExpansionKind::NotAtomic;
auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) {
OptimizationRemarkEmitter ORE(RMW->getFunction());
LLVMContext &Ctx = RMW->getFunction()->getContext();
SmallVector<StringRef> SSNs;
Ctx.getSyncScopeNames(SSNs);
auto MemScope = SSNs[RMW->getSyncScopeID()].empty()
? "system"
: SSNs[RMW->getSyncScopeID()];
ORE.emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "Passed", RMW)
<< "Hardware instruction generated for atomic "
<< RMW->getOperationName(RMW->getOperation())
<< " operation at memory scope " << MemScope
<< " due to an unsafe request.";
});
return Kind;
};
switch (RMW->getOperation()) {
case AtomicRMWInst::FAdd: {
Type *Ty = RMW->getType();
if (Ty->isHalfTy())
return AtomicExpansionKind::None;
if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy()))
return AtomicExpansionKind::CmpXChg;
if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) &&
Subtarget->hasAtomicFaddNoRtnInsts()) {
if (Subtarget->hasGFX940Insts())
return AtomicExpansionKind::None;
if (RMW->getFunction()
->getFnAttribute("amdgpu-unsafe-fp-atomics")
.getValueAsString() != "true")
return AtomicExpansionKind::CmpXChg;
if (Subtarget->hasGFX90AInsts()) {
if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS)
return AtomicExpansionKind::CmpXChg;
auto SSID = RMW->getSyncScopeID();
if (SSID == SyncScope::System ||
SSID == RMW->getContext().getOrInsertSyncScopeID("one-as"))
return AtomicExpansionKind::CmpXChg;
return ReportUnsafeHWInst(AtomicExpansionKind::None);
}
if (AS == AMDGPUAS::FLAT_ADDRESS)
return AtomicExpansionKind::CmpXChg;
return RMW->use_empty() ? ReportUnsafeHWInst(AtomicExpansionKind::None)
: AtomicExpansionKind::CmpXChg;
}
if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomicAdd()) {
if (!Ty->isDoubleTy())
return AtomicExpansionKind::None;
if (fpModeMatchesGlobalFPAtomicMode(RMW))
return AtomicExpansionKind::None;
return RMW->getFunction()
->getFnAttribute("amdgpu-unsafe-fp-atomics")
.getValueAsString() == "true"
? ReportUnsafeHWInst(AtomicExpansionKind::None)
: AtomicExpansionKind::CmpXChg;
}
return AtomicExpansionKind::CmpXChg;
}
default:
break;
}
return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
}
TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
return LI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
? AtomicExpansionKind::NotAtomic
: AtomicExpansionKind::None;
}
TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
return SI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
? AtomicExpansionKind::NotAtomic
: AtomicExpansionKind::None;
}
TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const {
return CmpX->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
? AtomicExpansionKind::NotAtomic
: AtomicExpansionKind::None;
}
const TargetRegisterClass *
SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false);
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
if (RC == &AMDGPU::VReg_1RegClass && !isDivergent)
return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass
: &AMDGPU::SReg_32RegClass;
if (!TRI->isSGPRClass(RC) && !isDivergent)
return TRI->getEquivalentSGPRClass(RC);
else if (TRI->isSGPRClass(RC) && isDivergent)
return TRI->getEquivalentVGPRClass(RC);
return RC;
}
static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
unsigned WaveSize) {
IntegerType *IT = dyn_cast<IntegerType>(V->getType());
if (!IT || IT->getBitWidth() != WaveSize)
return false;
if (!isa<Instruction>(V))
return false;
if (!Visited.insert(V).second)
return false;
bool Result = false;
for (auto U : V->users()) {
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) {
if (V == U->getOperand(1)) {
switch (Intrinsic->getIntrinsicID()) {
default:
Result = false;
break;
case Intrinsic::amdgcn_if_break:
case Intrinsic::amdgcn_if:
case Intrinsic::amdgcn_else:
Result = true;
break;
}
}
if (V == U->getOperand(0)) {
switch (Intrinsic->getIntrinsicID()) {
default:
Result = false;
break;
case Intrinsic::amdgcn_end_cf:
case Intrinsic::amdgcn_loop:
Result = true;
break;
}
}
} else {
Result = hasCFUser(U, Visited, WaveSize);
}
if (Result)
break;
}
return Result;
}
bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
const Value *V) const {
if (const CallInst *CI = dyn_cast<CallInst>(V)) {
if (CI->isInlineAsm()) {
const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo();
TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints(
MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI);
for (auto &TC : TargetConstraints) {
if (TC.Type == InlineAsm::isOutput) {
ComputeConstraintToUse(TC, SDValue());
const TargetRegisterClass *RC = getRegForInlineAsmConstraint(
SIRI, TC.ConstraintCode, TC.ConstraintVT).second;
if (RC && SIRI->isSGPRClass(RC))
return true;
}
}
}
}
SmallPtrSet<const Value *, 16> Visited;
return hasCFUser(V, Visited, Subtarget->getWavefrontSize());
}
std::pair<InstructionCost, MVT>
SITargetLowering::getTypeLegalizationCost(const DataLayout &DL,
Type *Ty) const {
std::pair<InstructionCost, MVT> Cost =
TargetLoweringBase::getTypeLegalizationCost(DL, Ty);
auto Size = DL.getTypeSizeInBits(Ty);
if (Size <= 256)
return Cost;
Cost.first += (Size + 255) / 256;
return Cost;
}
bool SITargetLowering::hasMemSDNodeUser(SDNode *N) const {
SDNode::use_iterator I = N->use_begin(), E = N->use_end();
for (; I != E; ++I) {
if (MemSDNode *M = dyn_cast<MemSDNode>(*I)) {
if (getBasePtrIndex(M) == I.getOperandNo())
return true;
}
}
return false;
}
bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
SDValue N1) const {
if (!N0.hasOneUse())
return false;
if (N0->isDivergent() || !N1->isDivergent())
return true;
return (DAG.isBaseWithConstantOffset(N0) &&
hasMemSDNodeUser(*N0->use_begin()));
}
MachineMemOperand::Flags
SITargetLowering::getTargetMMOFlags(const Instruction &I) const {
if (I.getMetadata("amdgpu.noclobber"))
return MONoClobber;
return MachineMemOperand::MONone;
}