#include "MCTargetDesc/AMDGPUFixupKinds.h"
#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIDefines.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/ADT/APInt.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Casting.h"
using namespace llvm;
namespace {
class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
const MCRegisterInfo &MRI;
uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo,
const MCSubtargetInfo &STI) const;
public:
SIMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
: AMDGPUMCCodeEmitter(mcii), MRI(*ctx.getRegisterInfo()) {}
SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete;
void encodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
private:
uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
};
}
MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx) {
return new SIMCCodeEmitter(MCII, Ctx);
}
template <typename IntTy>
static uint32_t getIntInlineImmEncoding(IntTy Imm) {
if (Imm >= 0 && Imm <= 64)
return 128 + Imm;
if (Imm >= -16 && Imm <= -1)
return 192 + std::abs(Imm);
return 0;
}
static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) {
uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
return IntImm == 0 ? 255 : IntImm;
}
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
if (IntImm != 0)
return IntImm;
if (Val == 0x3800) return 240;
if (Val == 0xB800) return 241;
if (Val == 0x3C00) return 242;
if (Val == 0xBC00) return 243;
if (Val == 0x4000) return 244;
if (Val == 0xC000) return 245;
if (Val == 0x4400) return 246;
if (Val == 0xC400) return 247;
if (Val == 0x3118 && STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
return 248;
return 255;
}
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
if (IntImm != 0)
return IntImm;
if (Val == FloatToBits(0.5f))
return 240;
if (Val == FloatToBits(-0.5f))
return 241;
if (Val == FloatToBits(1.0f))
return 242;
if (Val == FloatToBits(-1.0f))
return 243;
if (Val == FloatToBits(2.0f))
return 244;
if (Val == FloatToBits(-2.0f))
return 245;
if (Val == FloatToBits(4.0f))
return 246;
if (Val == FloatToBits(-4.0f))
return 247;
if (Val == 0x3e22f983 && STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
return 248;
return 255;
}
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
if (IntImm != 0)
return IntImm;
if (Val == DoubleToBits(0.5))
return 240;
if (Val == DoubleToBits(-0.5))
return 241;
if (Val == DoubleToBits(1.0))
return 242;
if (Val == DoubleToBits(-1.0))
return 243;
if (Val == DoubleToBits(2.0))
return 244;
if (Val == DoubleToBits(-2.0))
return 245;
if (Val == DoubleToBits(4.0))
return 246;
if (Val == DoubleToBits(-4.0))
return 247;
if (Val == 0x3fc45f306dc9c882 && STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
return 248;
return 255;
}
uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
const MCOperandInfo &OpInfo,
const MCSubtargetInfo &STI) const {
int64_t Imm;
if (MO.isExpr()) {
const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
if (!C)
return 255;
Imm = C->getValue();
} else {
assert(!MO.isDFPImm());
if (!MO.isImm())
return ~0;
Imm = MO.getImm();
}
switch (OpInfo.OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
case AMDGPU::OPERAND_REG_IMM_V2INT32:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16: {
if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])
return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
LLVM_FALLTHROUGH;
}
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
uint16_t Lo16 = static_cast<uint16_t>(Imm);
uint32_t Encoding = getLit16Encoding(Lo16, STI);
return Encoding;
}
case AMDGPU::OPERAND_KIMM32:
case AMDGPU::OPERAND_KIMM16:
return MO.getImm();
default:
llvm_unreachable("invalid operand size");
}
}
uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
using namespace AMDGPU::VOP3PEncoding;
using namespace AMDGPU::OpName;
if (AMDGPU::getNamedOperandIdx(Opcode, op_sel_hi) != -1) {
if (AMDGPU::getNamedOperandIdx(Opcode, src2) != -1)
return 0;
if (AMDGPU::getNamedOperandIdx(Opcode, src1) != -1)
return OP_SEL_HI_2;
if (AMDGPU::getNamedOperandIdx(Opcode, src0) != -1)
return OP_SEL_HI_1 | OP_SEL_HI_2;
}
return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
}
static bool isVCMPX64(const MCInstrDesc &Desc) {
return (Desc.TSFlags & SIInstrFlags::VOP3) &&
Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
}
void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
int Opcode = MI.getOpcode();
APInt Encoding, Scratch;
getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
unsigned bytes = Desc.getSize();
if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
Encoding |= getImplicitOpSelHiEncoding(Opcode);
}
if (AMDGPU::isGFX11Plus(STI) && isVCMPX64(Desc)) {
assert((Encoding & 0xFF) == 0);
Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO);
}
for (unsigned i = 0; i < bytes; i++) {
OS.write((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
}
if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::vaddr0);
int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::srsrc);
assert(vaddr0 >= 0 && srsrc > vaddr0);
unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
unsigned NumPadding = (-NumExtraAddrs) & 3;
for (unsigned i = 0; i < NumExtraAddrs; ++i) {
getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
STI);
OS.write((uint8_t)Encoding.getLimitedValue());
}
for (unsigned i = 0; i < NumPadding; ++i)
OS.write(0);
}
if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) ||
(bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]))
return;
int ImmLitIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
if (ImmLitIdx != -1)
return;
for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
if (!AMDGPU::isSISrcOperand(Desc, i))
continue;
const MCOperand &Op = MI.getOperand(i);
if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255)
continue;
int64_t Imm = 0;
if (Op.isImm())
Imm = Op.getImm();
else if (Op.isExpr()) {
if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
Imm = C->getValue();
} else if (!Op.isExpr()) llvm_unreachable("Must be immediate or expr");
for (unsigned j = 0; j < 4; j++) {
OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
}
break;
}
}
void SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isExpr()) {
const MCExpr *Expr = MO.getExpr();
MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
Op = APInt::getNullValue(96);
} else {
getMachineOpValue(MI, MO, Op, Fixups, STI);
}
}
void SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
auto Offset = MI.getOperand(OpNo).getImm();
assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
Op = Offset;
}
void SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
using namespace AMDGPU::SDWA;
uint64_t RegEnc = 0;
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg()) {
unsigned Reg = MO.getReg();
RegEnc |= MRI.getEncodingValue(Reg);
RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) {
RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
}
Op = RegEnc;
return;
} else {
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
if (Enc != ~0U && Enc != 255) {
Op = Enc | SDWA9EncValues::SRC_SGPR_MASK;
return;
}
}
llvm_unreachable("Unsupported operand kind");
}
void SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
using namespace AMDGPU::SDWA;
uint64_t RegEnc = 0;
const MCOperand &MO = MI.getOperand(OpNo);
unsigned Reg = MO.getReg();
if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
RegEnc |= MRI.getEncodingValue(Reg);
RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
}
Op = RegEnc;
}
void SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
unsigned Reg = MI.getOperand(OpNo).getReg();
uint64_t Enc = MRI.getEncodingValue(Reg);
if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
Enc |= 512;
Op = Enc;
}
static bool needsPCRel(const MCExpr *Expr) {
switch (Expr->getKind()) {
case MCExpr::SymbolRef: {
auto *SE = cast<MCSymbolRefExpr>(Expr);
MCSymbolRefExpr::VariantKind Kind = SE->getKind();
return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO &&
Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI;
}
case MCExpr::Binary: {
auto *BE = cast<MCBinaryExpr>(Expr);
if (BE->getOpcode() == MCBinaryExpr::Sub)
return false;
return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
}
case MCExpr::Unary:
return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
case MCExpr::Target:
case MCExpr::Constant:
return false;
}
llvm_unreachable("invalid kind");
}
void SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCOperand &MO, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
if (MO.isReg()){
Op = MRI.getEncodingValue(MO.getReg());
return;
}
unsigned OpNo = &MO - MI.begin();
getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
}
void SIMCCodeEmitter::getMachineOpValueCommon(
const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
MCFixupKind Kind;
if (needsPCRel(MO.getExpr()))
Kind = FK_PCRel_4;
else
Kind = FK_Data_4;
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
uint32_t Offset = Desc.getSize();
assert(Offset == 4 || Offset == 8);
Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
}
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
if (Enc != ~0U) {
Op = Enc;
return;
}
} else if (MO.isImm()) {
Op = MO.getImm();
return;
}
llvm_unreachable("Encoding of this operand type is not supported yet.");
}
#include "AMDGPUGenMCCodeEmitter.inc"