#include "llvm/MC/MCSchedule.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include <type_traits>
using namespace llvm;
static_assert(std::is_pod<MCSchedModel>::value,
"We shouldn't have a static constructor here");
const MCSchedModel MCSchedModel::Default = {DefaultIssueWidth,
DefaultMicroOpBufferSize,
DefaultLoopMicroOpBufferSize,
DefaultLoadLatency,
DefaultHighLatency,
DefaultMispredictPenalty,
false,
true,
0,
nullptr,
nullptr,
0,
0,
nullptr,
nullptr};
int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI,
const MCSchedClassDesc &SCDesc) {
int Latency = 0;
for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries;
DefIdx != DefEnd; ++DefIdx) {
const MCWriteLatencyEntry *WLEntry =
STI.getWriteLatencyEntry(&SCDesc, DefIdx);
if (WLEntry->Cycles < 0)
return WLEntry->Cycles;
Latency = std::max(Latency, static_cast<int>(WLEntry->Cycles));
}
return Latency;
}
int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI,
unsigned SchedClass) const {
const MCSchedClassDesc &SCDesc = *getSchedClassDesc(SchedClass);
if (!SCDesc.isValid())
return 0;
if (!SCDesc.isVariant())
return MCSchedModel::computeInstrLatency(STI, SCDesc);
llvm_unreachable("unsupported variant scheduling class");
}
int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI,
const MCInstrInfo &MCII,
const MCInst &Inst) const {
unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass();
const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass);
if (!SCDesc->isValid())
return 0;
unsigned CPUID = getProcessorID();
while (SCDesc->isVariant()) {
SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID);
SCDesc = getSchedClassDesc(SchedClass);
}
if (SchedClass)
return MCSchedModel::computeInstrLatency(STI, *SCDesc);
llvm_unreachable("unsupported variant scheduling class");
}
double
MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI,
const MCSchedClassDesc &SCDesc) {
Optional<double> Throughput;
const MCSchedModel &SM = STI.getSchedModel();
const MCWriteProcResEntry *I = STI.getWriteProcResBegin(&SCDesc);
const MCWriteProcResEntry *E = STI.getWriteProcResEnd(&SCDesc);
for (; I != E; ++I) {
if (!I->Cycles)
continue;
unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits;
double Temp = NumUnits * 1.0 / I->Cycles;
Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp;
}
if (Throughput)
return 1.0 / Throughput.value();
return ((double)SCDesc.NumMicroOps) / SM.IssueWidth;
}
double
MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI,
const MCInstrInfo &MCII,
const MCInst &Inst) const {
unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass();
const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass);
if (!SCDesc->isValid())
return 1.0 / IssueWidth;
unsigned CPUID = getProcessorID();
while (SCDesc->isVariant()) {
SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID);
SCDesc = getSchedClassDesc(SchedClass);
}
if (SchedClass)
return MCSchedModel::getReciprocalThroughput(STI, *SCDesc);
llvm_unreachable("unsupported variant scheduling class");
}
double
MCSchedModel::getReciprocalThroughput(unsigned SchedClass,
const InstrItineraryData &IID) {
Optional<double> Throughput;
const InstrStage *I = IID.beginStage(SchedClass);
const InstrStage *E = IID.endStage(SchedClass);
for (; I != E; ++I) {
if (!I->getCycles())
continue;
double Temp = countPopulation(I->getUnits()) * 1.0 / I->getCycles();
Throughput = Throughput ? std::min(Throughput.value(), Temp) : Temp;
}
if (Throughput)
return 1.0 / Throughput.value();
return 1.0 / DefaultIssueWidth;
}
unsigned
MCSchedModel::getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries,
unsigned WriteResourceID) {
if (Entries.empty())
return 0;
int DelayCycles = 0;
for (const MCReadAdvanceEntry &E : Entries) {
if (E.WriteResourceID != WriteResourceID)
continue;
DelayCycles = std::min(DelayCycles, E.Cycles);
}
return std::abs(DelayCycles);
}