#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/RISCVISAInfo.h"
namespace llvm {
namespace RISCVII {
enum {
InstFormatPseudo = 0,
InstFormatR = 1,
InstFormatR4 = 2,
InstFormatI = 3,
InstFormatS = 4,
InstFormatB = 5,
InstFormatU = 6,
InstFormatJ = 7,
InstFormatCR = 8,
InstFormatCI = 9,
InstFormatCSS = 10,
InstFormatCIW = 11,
InstFormatCL = 12,
InstFormatCS = 13,
InstFormatCA = 14,
InstFormatCB = 15,
InstFormatCJ = 16,
InstFormatOther = 17,
InstFormatMask = 31,
InstFormatShift = 0,
ConstraintShift = InstFormatShift + 5,
ConstraintMask = 0b111 << ConstraintShift,
VLMulShift = ConstraintShift + 3,
VLMulMask = 0b111 << VLMulShift,
HasDummyMaskOpShift = VLMulShift + 3,
HasDummyMaskOpMask = 1 << HasDummyMaskOpShift,
ForceTailAgnosticShift = HasDummyMaskOpShift + 1,
ForceTailAgnosticMask = 1 << ForceTailAgnosticShift,
HasMergeOpShift = ForceTailAgnosticShift + 1,
HasMergeOpMask = 1 << HasMergeOpShift,
HasSEWOpShift = HasMergeOpShift + 1,
HasSEWOpMask = 1 << HasSEWOpShift,
HasVLOpShift = HasSEWOpShift + 1,
HasVLOpMask = 1 << HasVLOpShift,
HasVecPolicyOpShift = HasVLOpShift + 1,
HasVecPolicyOpMask = 1 << HasVecPolicyOpShift,
IsRVVWideningReductionShift = HasVecPolicyOpShift + 1,
IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift,
UsesMaskPolicyShift = IsRVVWideningReductionShift + 1,
UsesMaskPolicyMask = 1 << UsesMaskPolicyShift,
};
enum VConstraintType {
NoConstraint = 0,
VS2Constraint = 0b001,
VS1Constraint = 0b010,
VMConstraint = 0b100,
};
enum VLMUL : uint8_t {
LMUL_1 = 0,
LMUL_2,
LMUL_4,
LMUL_8,
LMUL_RESERVED,
LMUL_F8,
LMUL_F4,
LMUL_F2
};
enum {
TAIL_AGNOSTIC = 1,
MASK_AGNOSTIC = 2,
};
static inline unsigned getFormat(uint64_t TSFlags) {
return (TSFlags & InstFormatMask) >> InstFormatShift;
}
static inline VConstraintType getConstraint(uint64_t TSFlags) {
return static_cast<VConstraintType>((TSFlags & ConstraintMask) >>
ConstraintShift);
}
static inline VLMUL getLMul(uint64_t TSFlags) {
return static_cast<VLMUL>((TSFlags & VLMulMask) >> VLMulShift);
}
static inline bool hasDummyMaskOp(uint64_t TSFlags) {
return TSFlags & HasDummyMaskOpMask;
}
static inline bool doesForceTailAgnostic(uint64_t TSFlags) {
return TSFlags & ForceTailAgnosticMask;
}
static inline bool hasMergeOp(uint64_t TSFlags) {
return TSFlags & HasMergeOpMask;
}
static inline bool hasSEWOp(uint64_t TSFlags) {
return TSFlags & HasSEWOpMask;
}
static inline bool hasVLOp(uint64_t TSFlags) {
return TSFlags & HasVLOpMask;
}
static inline bool hasVecPolicyOp(uint64_t TSFlags) {
return TSFlags & HasVecPolicyOpMask;
}
static inline bool isRVVWideningReduction(uint64_t TSFlags) {
return TSFlags & IsRVVWideningReductionMask;
}
static inline bool usesMaskPolicy(uint64_t TSFlags) {
return TSFlags & UsesMaskPolicyMask;
}
static inline unsigned getVLOpNum(const MCInstrDesc &Desc) {
const uint64_t TSFlags = Desc.TSFlags;
assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags));
unsigned Offset = 2;
if (hasVecPolicyOp(TSFlags))
Offset = 3;
return Desc.getNumOperands() - Offset;
}
static inline unsigned getSEWOpNum(const MCInstrDesc &Desc) {
const uint64_t TSFlags = Desc.TSFlags;
assert(hasSEWOp(TSFlags));
unsigned Offset = 1;
if (hasVecPolicyOp(TSFlags))
Offset = 2;
return Desc.getNumOperands() - Offset;
}
enum {
MO_None = 0,
MO_CALL = 1,
MO_PLT = 2,
MO_LO = 3,
MO_HI = 4,
MO_PCREL_LO = 5,
MO_PCREL_HI = 6,
MO_GOT_HI = 7,
MO_TPREL_LO = 8,
MO_TPREL_HI = 9,
MO_TPREL_ADD = 10,
MO_TLS_GOT_HI = 11,
MO_TLS_GD_HI = 12,
MO_DIRECT_FLAG_MASK = 15
};
}
namespace RISCVOp {
enum OperandType : unsigned {
OPERAND_FIRST_RISCV_IMM = MCOI::OPERAND_FIRST_TARGET,
OPERAND_UIMM2 = OPERAND_FIRST_RISCV_IMM,
OPERAND_UIMM3,
OPERAND_UIMM4,
OPERAND_UIMM5,
OPERAND_UIMM7,
OPERAND_UIMM12,
OPERAND_SIMM12,
OPERAND_SIMM12_LSB00000,
OPERAND_UIMM20,
OPERAND_UIMMLOG2XLEN,
OPERAND_RVKRNUM,
OPERAND_LAST_RISCV_IMM = OPERAND_RVKRNUM,
OPERAND_AVL,
};
}
namespace RISCVFenceField {
enum FenceField {
I = 8,
O = 4,
R = 2,
W = 1
};
}
namespace RISCVFPRndMode {
enum RoundingMode {
RNE = 0,
RTZ = 1,
RDN = 2,
RUP = 3,
RMM = 4,
DYN = 7,
Invalid
};
inline static StringRef roundingModeToString(RoundingMode RndMode) {
switch (RndMode) {
default:
llvm_unreachable("Unknown floating point rounding mode");
case RISCVFPRndMode::RNE:
return "rne";
case RISCVFPRndMode::RTZ:
return "rtz";
case RISCVFPRndMode::RDN:
return "rdn";
case RISCVFPRndMode::RUP:
return "rup";
case RISCVFPRndMode::RMM:
return "rmm";
case RISCVFPRndMode::DYN:
return "dyn";
}
}
inline static RoundingMode stringToRoundingMode(StringRef Str) {
return StringSwitch<RoundingMode>(Str)
.Case("rne", RISCVFPRndMode::RNE)
.Case("rtz", RISCVFPRndMode::RTZ)
.Case("rdn", RISCVFPRndMode::RDN)
.Case("rup", RISCVFPRndMode::RUP)
.Case("rmm", RISCVFPRndMode::RMM)
.Case("dyn", RISCVFPRndMode::DYN)
.Default(RISCVFPRndMode::Invalid);
}
inline static bool isValidRoundingMode(unsigned Mode) {
switch (Mode) {
default:
return false;
case RISCVFPRndMode::RNE:
case RISCVFPRndMode::RTZ:
case RISCVFPRndMode::RDN:
case RISCVFPRndMode::RUP:
case RISCVFPRndMode::RMM:
case RISCVFPRndMode::DYN:
return true;
}
}
}
namespace RISCVSysReg {
struct SysReg {
const char *Name;
const char *AltName;
const char *DeprecatedName;
unsigned Encoding;
FeatureBitset FeaturesRequired;
bool isRV32Only;
bool haveRequiredFeatures(const FeatureBitset &ActiveFeatures) const {
if (isRV32Only && ActiveFeatures[RISCV::Feature64Bit])
return false;
if (FeaturesRequired.none())
return true;
return (FeaturesRequired & ActiveFeatures) == FeaturesRequired;
}
};
#define GET_SysRegsList_DECL
#include "RISCVGenSearchableTables.inc"
}
namespace RISCVInsnOpcode {
struct RISCVOpcode {
const char *Name;
unsigned Value;
};
#define GET_RISCVOpcodesList_DECL
#include "RISCVGenSearchableTables.inc"
}
namespace RISCVABI {
enum ABI {
ABI_ILP32,
ABI_ILP32F,
ABI_ILP32D,
ABI_ILP32E,
ABI_LP64,
ABI_LP64F,
ABI_LP64D,
ABI_Unknown
};
ABI computeTargetABI(const Triple &TT, FeatureBitset FeatureBits,
StringRef ABIName);
ABI getTargetABI(StringRef ABIName);
MCRegister getBPReg();
MCRegister getSCSPReg();
}
namespace RISCVFeatures {
void validate(const Triple &TT, const FeatureBitset &FeatureBits);
llvm::Expected<std::unique_ptr<RISCVISAInfo>>
parseFeatureBits(bool IsRV64, const FeatureBitset &FeatureBits);
}
namespace RISCVVType {
inline static bool isValidSEW(unsigned SEW) {
return isPowerOf2_32(SEW) && SEW >= 8 && SEW <= 1024;
}
inline static bool isValidLMUL(unsigned LMUL, bool Fractional) {
return isPowerOf2_32(LMUL) && LMUL <= 8 && (!Fractional || LMUL != 1);
}
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
bool MaskAgnostic);
inline static RISCVII::VLMUL getVLMUL(unsigned VType) {
unsigned VLMUL = VType & 0x7;
return static_cast<RISCVII::VLMUL>(VLMUL);
}
std::pair<unsigned, bool> decodeVLMUL(RISCVII::VLMUL VLMUL);
inline static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional) {
assert(isValidLMUL(LMUL, Fractional) && "Unsupported LMUL");
unsigned LmulLog2 = Log2_32(LMUL);
return static_cast<RISCVII::VLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
}
inline static unsigned decodeVSEW(unsigned VSEW) {
assert(VSEW < 8 && "Unexpected VSEW value");
return 1 << (VSEW + 3);
}
inline static unsigned encodeSEW(unsigned SEW) {
assert(isValidSEW(SEW) && "Unexpected SEW value");
return Log2_32(SEW) - 3;
}
inline static unsigned getSEW(unsigned VType) {
unsigned VSEW = (VType >> 3) & 0x7;
return decodeVSEW(VSEW);
}
inline static bool isTailAgnostic(unsigned VType) { return VType & 0x40; }
inline static bool isMaskAgnostic(unsigned VType) { return VType & 0x80; }
void printVType(unsigned VType, raw_ostream &OS);
}
}
#endif