#include "SchedClassResolution.h"
#include "BenchmarkResult.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/FormatVariadic.h"
#include <limits>
#include <unordered_set>
#include <vector>
namespace llvm {
namespace exegesis {
static SmallVector<MCWriteProcResEntry, 8>
getNonRedundantWriteProcRes(const MCSchedClassDesc &SCDesc,
const MCSubtargetInfo &STI) {
SmallVector<MCWriteProcResEntry, 8> Result;
const auto &SM = STI.getSchedModel();
const unsigned NumProcRes = SM.getNumProcResourceKinds();
SmallVector<float, 32> ProcResUnitUsage(NumProcRes);
for (const auto *WPR = STI.getWriteProcResBegin(&SCDesc),
*const WPREnd = STI.getWriteProcResEnd(&SCDesc);
WPR != WPREnd; ++WPR) {
const MCProcResourceDesc *const ProcResDesc =
SM.getProcResource(WPR->ProcResourceIdx);
if (ProcResDesc->SubUnitsIdxBegin == nullptr) {
Result.push_back({WPR->ProcResourceIdx, WPR->Cycles});
ProcResUnitUsage[WPR->ProcResourceIdx] += WPR->Cycles;
} else {
float RemainingCycles = WPR->Cycles;
for (const auto *SubResIdx = ProcResDesc->SubUnitsIdxBegin;
SubResIdx != ProcResDesc->SubUnitsIdxBegin + ProcResDesc->NumUnits;
++SubResIdx) {
RemainingCycles -= ProcResUnitUsage[*SubResIdx];
}
if (RemainingCycles < 0.01f) {
continue;
}
Result.push_back({WPR->ProcResourceIdx,
static_cast<uint16_t>(std::round(RemainingCycles))});
for (const auto *SubResIdx = ProcResDesc->SubUnitsIdxBegin;
SubResIdx != ProcResDesc->SubUnitsIdxBegin + ProcResDesc->NumUnits;
++SubResIdx) {
ProcResUnitUsage[*SubResIdx] += RemainingCycles / ProcResDesc->NumUnits;
}
}
}
return Result;
}
static void distributePressure(float RemainingPressure,
SmallVector<uint16_t, 32> Subunits,
SmallVector<float, 32> &DensePressure) {
sort(Subunits, [&DensePressure](const uint16_t A, const uint16_t B) {
return DensePressure[A] < DensePressure[B];
});
const auto getPressureForSubunit = [&DensePressure,
&Subunits](size_t I) -> float & {
return DensePressure[Subunits[I]];
};
size_t NumMinimalSU = 1;
while (NumMinimalSU < Subunits.size() &&
getPressureForSubunit(NumMinimalSU) == getPressureForSubunit(0)) {
++NumMinimalSU;
}
while (RemainingPressure > 0.0f) {
if (NumMinimalSU == Subunits.size()) {
for (size_t I = 0; I < NumMinimalSU; ++I) {
getPressureForSubunit(I) += RemainingPressure / NumMinimalSU;
}
return;
}
const float MinimalPressure = getPressureForSubunit(NumMinimalSU - 1);
const float SecondToMinimalPressure = getPressureForSubunit(NumMinimalSU);
assert(MinimalPressure < SecondToMinimalPressure);
const float Increment = SecondToMinimalPressure - MinimalPressure;
if (RemainingPressure <= NumMinimalSU * Increment) {
for (size_t I = 0; I < NumMinimalSU; ++I) {
getPressureForSubunit(I) += RemainingPressure / NumMinimalSU;
}
return;
}
for (size_t I = 0; I < NumMinimalSU; ++I) {
getPressureForSubunit(I) = SecondToMinimalPressure;
RemainingPressure -= SecondToMinimalPressure;
}
while (NumMinimalSU < Subunits.size() &&
getPressureForSubunit(NumMinimalSU) == SecondToMinimalPressure) {
++NumMinimalSU;
}
}
}
std::vector<std::pair<uint16_t, float>>
computeIdealizedProcResPressure(const MCSchedModel &SM,
SmallVector<MCWriteProcResEntry, 8> WPRS) {
SmallVector<float, 32> DensePressure(SM.getNumProcResourceKinds());
sort(WPRS, [](const MCWriteProcResEntry &A, const MCWriteProcResEntry &B) {
return A.ProcResourceIdx < B.ProcResourceIdx;
});
for (const MCWriteProcResEntry &WPR : WPRS) {
const MCProcResourceDesc *const ProcResDesc =
SM.getProcResource(WPR.ProcResourceIdx);
if (ProcResDesc->SubUnitsIdxBegin == nullptr) {
DensePressure[WPR.ProcResourceIdx] += WPR.Cycles;
} else {
SmallVector<uint16_t, 32> Subunits(ProcResDesc->SubUnitsIdxBegin,
ProcResDesc->SubUnitsIdxBegin +
ProcResDesc->NumUnits);
distributePressure(WPR.Cycles, Subunits, DensePressure);
}
}
std::vector<std::pair<uint16_t, float>> Pressure;
for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
if (DensePressure[I] > 0.0f)
Pressure.emplace_back(I, DensePressure[I]);
}
return Pressure;
}
ResolvedSchedClass::ResolvedSchedClass(const MCSubtargetInfo &STI,
unsigned ResolvedSchedClassId,
bool WasVariant)
: SchedClassId(ResolvedSchedClassId),
SCDesc(STI.getSchedModel().getSchedClassDesc(ResolvedSchedClassId)),
WasVariant(WasVariant),
NonRedundantWriteProcRes(getNonRedundantWriteProcRes(*SCDesc, STI)),
IdealizedProcResPressure(computeIdealizedProcResPressure(
STI.getSchedModel(), NonRedundantWriteProcRes)) {
assert((SCDesc == nullptr || !SCDesc->isVariant()) &&
"ResolvedSchedClass should never be variant");
}
static unsigned ResolveVariantSchedClassId(const MCSubtargetInfo &STI,
const MCInstrInfo &InstrInfo,
unsigned SchedClassId,
const MCInst &MCI) {
const auto &SM = STI.getSchedModel();
while (SchedClassId && SM.getSchedClassDesc(SchedClassId)->isVariant()) {
SchedClassId = STI.resolveVariantSchedClass(SchedClassId, &MCI, &InstrInfo,
SM.getProcessorID());
}
return SchedClassId;
}
std::pair<unsigned , bool >
ResolvedSchedClass::resolveSchedClassId(const MCSubtargetInfo &SubtargetInfo,
const MCInstrInfo &InstrInfo,
const MCInst &MCI) {
unsigned SchedClassId = InstrInfo.get(MCI.getOpcode()).getSchedClass();
const bool WasVariant = SchedClassId && SubtargetInfo.getSchedModel()
.getSchedClassDesc(SchedClassId)
->isVariant();
SchedClassId =
ResolveVariantSchedClassId(SubtargetInfo, InstrInfo, SchedClassId, MCI);
return std::make_pair(SchedClassId, WasVariant);
}
static unsigned findProcResIdx(const MCSubtargetInfo &STI,
const StringRef NameOrId) {
unsigned ProcResIdx = 0;
if (to_integer(NameOrId, ProcResIdx, 10))
return ProcResIdx;
const auto &SchedModel = STI.getSchedModel();
for (int I = 0, E = SchedModel.getNumProcResourceKinds(); I < E; ++I) {
if (NameOrId == SchedModel.getProcResource(I)->Name)
return I;
}
return 0;
}
std::vector<BenchmarkMeasure> ResolvedSchedClass::getAsPoint(
InstructionBenchmark::ModeE Mode, const MCSubtargetInfo &STI,
ArrayRef<PerInstructionStats> Representative) const {
const size_t NumMeasurements = Representative.size();
std::vector<BenchmarkMeasure> SchedClassPoint(NumMeasurements);
if (Mode == InstructionBenchmark::Latency) {
assert(NumMeasurements == 1 && "Latency is a single measure.");
BenchmarkMeasure &LatencyMeasure = SchedClassPoint[0];
LatencyMeasure.PerInstructionValue = 0.0;
for (unsigned I = 0; I < SCDesc->NumWriteLatencyEntries; ++I) {
const MCWriteLatencyEntry *const WLE =
STI.getWriteLatencyEntry(SCDesc, I);
LatencyMeasure.PerInstructionValue =
std::max<double>(LatencyMeasure.PerInstructionValue, WLE->Cycles);
}
} else if (Mode == InstructionBenchmark::Uops) {
for (auto I : zip(SchedClassPoint, Representative)) {
BenchmarkMeasure &Measure = std::get<0>(I);
const PerInstructionStats &Stats = std::get<1>(I);
StringRef Key = Stats.key();
uint16_t ProcResIdx = findProcResIdx(STI, Key);
if (ProcResIdx > 0) {
const auto ProcResPressureIt =
llvm::find_if(IdealizedProcResPressure,
[ProcResIdx](const std::pair<uint16_t, float> &WPR) {
return WPR.first == ProcResIdx;
});
Measure.PerInstructionValue =
ProcResPressureIt == IdealizedProcResPressure.end()
? 0.0
: ProcResPressureIt->second;
} else if (Key == "NumMicroOps") {
Measure.PerInstructionValue = SCDesc->NumMicroOps;
} else {
errs() << "expected `key` to be either a ProcResIdx or a ProcRes "
"name, got "
<< Key << "\n";
return {};
}
}
} else if (Mode == InstructionBenchmark::InverseThroughput) {
assert(NumMeasurements == 1 && "Inverse Throughput is a single measure.");
BenchmarkMeasure &RThroughputMeasure = SchedClassPoint[0];
RThroughputMeasure.PerInstructionValue =
MCSchedModel::getReciprocalThroughput(STI, *SCDesc);
} else {
llvm_unreachable("unimplemented measurement matching mode");
}
return SchedClassPoint;
}
} }