|
reference, declaration → definition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
|
Declarations
lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h 21 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPUAsmPrinter.h 37 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPUInstrInfo.h 24 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPUInstructionSelector.h 36 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPULegalizerInfo.h 25 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPUMachineFunction.h 17 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPURegisterBankInfo.h 28 class GCNSubtarget;
lib/Target/AMDGPU/AMDGPURegisterInfo.h 23 class GCNSubtarget;
lib/Target/AMDGPU/GCNHazardRecognizer.h 31 class GCNSubtarget;
lib/Target/AMDGPU/GCNSchedStrategy.h 23 class GCNSubtarget;
lib/Target/AMDGPU/SIFrameLowering.h 19 class GCNSubtarget;
lib/Target/AMDGPU/SIInstrInfo.h 42 class GCNSubtarget;
lib/Target/AMDGPU/SIRegisterInfo.h 23 class GCNSubtarget;
lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h 32 class GCNSubtarget;
References
gen/lib/Target/AMDGPU/AMDGPUGenSubtargetInfo.inc 706 if (Bits[AMDGPU::FeatureGFX9] && Gen < GCNSubtarget::GFX9) Gen = GCNSubtarget::GFX9;
706 if (Bits[AMDGPU::FeatureGFX9] && Gen < GCNSubtarget::GFX9) Gen = GCNSubtarget::GFX9;
708 if (Bits[AMDGPU::FeatureGFX10] && Gen < GCNSubtarget::GFX10) Gen = GCNSubtarget::GFX10;
708 if (Bits[AMDGPU::FeatureGFX10] && Gen < GCNSubtarget::GFX10) Gen = GCNSubtarget::GFX10;
750 if (Bits[AMDGPU::FeatureSeaIslands] && Gen < GCNSubtarget::SEA_ISLANDS) Gen = GCNSubtarget::SEA_ISLANDS;
750 if (Bits[AMDGPU::FeatureSeaIslands] && Gen < GCNSubtarget::SEA_ISLANDS) Gen = GCNSubtarget::SEA_ISLANDS;
751 if (Bits[AMDGPU::FeatureSouthernIslands] && Gen < GCNSubtarget::SOUTHERN_ISLANDS) Gen = GCNSubtarget::SOUTHERN_ISLANDS;
751 if (Bits[AMDGPU::FeatureSouthernIslands] && Gen < GCNSubtarget::SOUTHERN_ISLANDS) Gen = GCNSubtarget::SOUTHERN_ISLANDS;
763 if (Bits[AMDGPU::FeatureVolcanicIslands] && Gen < GCNSubtarget::VOLCANIC_ISLANDS) Gen = GCNSubtarget::VOLCANIC_ISLANDS;
763 if (Bits[AMDGPU::FeatureVolcanicIslands] && Gen < GCNSubtarget::VOLCANIC_ISLANDS) Gen = GCNSubtarget::VOLCANIC_ISLANDS;
include/llvm/CodeGen/MachineFunction.h 481 template<typename STC> const STC &getSubtarget() const {
include/llvm/Target/TargetMachine.h 148 template <typename STC> const STC &getSubtarget(const Function &F) const {
lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp 265 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
265 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp 73 const GCNSubtarget& ST = F.getSubtarget<GCNSubtarget>();
73 const GCNSubtarget& ST = F.getSubtarget<GCNSubtarget>();
197 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
197 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
259 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
259 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
393 if (MF.getSubtarget<GCNSubtarget>().isWave32()) {
431 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
431 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
486 Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
571 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
571 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
603 const GCNSubtarget &ST) const {
609 const GCNSubtarget &ST) const {
618 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
618 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
920 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
920 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
1195 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
1195 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
1222 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
1222 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPUAsmPrinter.h 54 int32_t getTotalNumSGPRs(const GCNSubtarget &ST) const;
55 int32_t getTotalNumVGPRs(const GCNSubtarget &ST) const;
lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp 46 const GCNSubtarget *ST;
90 ST = &TM.getSubtarget<GCNSubtarget>(F);
lib/Target/AMDGPU/AMDGPUCallLowering.cpp 312 auto const &ST = B.getMF().getSubtarget<GCNSubtarget>();
312 auto const &ST = B.getMF().getSubtarget<GCNSubtarget>();
441 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
441 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
568 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
568 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp 66 const GCNSubtarget *ST = nullptr;
1032 ST = &TM.getSubtarget<GCNSubtarget>(F);
lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp 213 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
213 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
881 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
881 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp 129 const GCNSubtarget *Subtarget;
394 Subtarget = &MF.getSubtarget<GCNSubtarget>();
1111 const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
1753 GCNSubtarget::Generation Gen = Subtarget->getGeneration();
2046 const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
lib/Target/AMDGPU/AMDGPUISelLowering.cpp 4521 const GCNSubtarget &ST =
4522 DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPUInstrInfo.cpp 28 AMDGPUInstrInfo::AMDGPUInstrInfo(const GCNSubtarget &ST) { }
lib/Target/AMDGPU/AMDGPUInstrInfo.h 31 explicit AMDGPUInstrInfo(const GCNSubtarget &st);
lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp 49 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
lib/Target/AMDGPU/AMDGPUInstructionSelector.h 50 AMDGPUInstructionSelector(const GCNSubtarget &STI,
174 const GCNSubtarget &STI;
lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp 163 AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
1141 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1141 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1233 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1233 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPULegalizerInfo.h 29 const GCNSubtarget &ST;
32 AMDGPULegalizerInfo(const GCNSubtarget &ST,
lib/Target/AMDGPU/AMDGPULibCalls.cpp 1393 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(*F);
1393 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(*F);
lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp 69 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
69 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
lib/Target/AMDGPU/AMDGPUMCInstLower.cpp 222 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>();
222 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>();
261 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>();
261 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp 2875 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
2875 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp 88 AMDGPURegisterBankInfo::AMDGPURegisterBankInfo(const GCNSubtarget &ST)
lib/Target/AMDGPU/AMDGPURegisterBankInfo.h 43 const GCNSubtarget &Subtarget;
144 AMDGPURegisterBankInfo(const GCNSubtarget &STI);
lib/Target/AMDGPU/AMDGPURegisterInfo.cpp 130 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
lib/Target/AMDGPU/AMDGPUSubtarget.cpp 70 GCNSubtarget &
844 const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget<GCNSubtarget>();
844 const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget<GCNSubtarget>();
896 return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<GCNSubtarget>());
903 return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<GCNSubtarget>(F));
lib/Target/AMDGPU/AMDGPUSubtarget.h 401 GCNSubtarget &initializeSubtargetDependencies(const Triple &TT,
lib/Target/AMDGPU/AMDGPUTargetMachine.cpp 510 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
523 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
817 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
817 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/AMDGPUTargetMachine.h 96 mutable StringMap<std::unique_ptr<GCNSubtarget>> SubtargetMap;
106 const GCNSubtarget *getSubtargetImpl(const Function &) const override;
lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h 72 const GCNSubtarget *ST;
104 const GCNSubtarget *getST() const { return ST; }
lib/Target/AMDGPU/GCNDPPCombine.cpp 555 auto &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/GCNHazardRecognizer.cpp 44 ST(MF.getSubtarget<GCNSubtarget>()),
lib/Target/AMDGPU/GCNHazardRecognizer.h 47 const GCNSubtarget &ST;
lib/Target/AMDGPU/GCNIterativeScheduler.cpp 110 const auto &ST = MF.getSubtarget<GCNSubtarget>();
110 const auto &ST = MF.getSubtarget<GCNSubtarget>();
134 const auto &ST = MF.getSubtarget<GCNSubtarget>();
134 const auto &ST = MF.getSubtarget<GCNSubtarget>();
420 const auto &ST = MF.getSubtarget<GCNSubtarget>();
420 const auto &ST = MF.getSubtarget<GCNSubtarget>();
435 const auto &ST = MF.getSubtarget<GCNSubtarget>();
435 const auto &ST = MF.getSubtarget<GCNSubtarget>();
452 const auto &ST = MF.getSubtarget<GCNSubtarget>();
452 const auto &ST = MF.getSubtarget<GCNSubtarget>();
489 const auto &ST = MF.getSubtarget<GCNSubtarget>();
489 const auto &ST = MF.getSubtarget<GCNSubtarget>();
543 const auto &ST = MF.getSubtarget<GCNSubtarget>();
543 const auto &ST = MF.getSubtarget<GCNSubtarget>();
577 const auto &ST = MF.getSubtarget<GCNSubtarget>();
577 const auto &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/GCNNSAReassign.cpp 69 const GCNSubtarget *ST;
224 ST = &MF.getSubtarget<GCNSubtarget>();
225 if (ST->getGeneration() < GCNSubtarget::GFX10)
lib/Target/AMDGPU/GCNRegBankReassign.cpp 137 const GCNSubtarget *ST;
728 ST = &MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/GCNRegPressure.cpp 139 bool GCNRegPressure::less(const GCNSubtarget &ST,
185 void GCNRegPressure::print(raw_ostream &OS, const GCNSubtarget *ST) const {
lib/Target/AMDGPU/GCNRegPressure.h 54 unsigned getOccupancy(const GCNSubtarget &ST) const {
64 bool higherOccupancy(const GCNSubtarget &ST, const GCNRegPressure& O) const {
68 bool less(const GCNSubtarget &ST, const GCNRegPressure& O,
79 void print(raw_ostream &OS, const GCNSubtarget *ST = nullptr) const;
lib/Target/AMDGPU/GCNSchedStrategy.cpp 37 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
37 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
315 ST(MF.getSubtarget<GCNSubtarget>()),
lib/Target/AMDGPU/GCNSchedStrategy.h 67 const GCNSubtarget &ST;
lib/Target/AMDGPU/SIAddIMGInit.cpp 64 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
64 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIAnnotateControlFlow.cpp 79 void initialize(Module &M, const GCNSubtarget &ST);
137 void SIAnnotateControlFlow::initialize(Module &M, const GCNSubtarget &ST) {
328 initialize(*F.getParent(), TM.getSubtarget<GCNSubtarget>(F));
lib/Target/AMDGPU/SIFixSGPRCopies.cpp 589 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
589 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIFixVGPRCopies.cpp 49 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
49 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIFixupVectorISel.cpp 158 const GCNSubtarget &ST,
224 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
224 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIFoldOperands.cpp 92 const GCNSubtarget *ST;
188 const GCNSubtarget &ST) {
1447 ST = &MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIFormMemoryClauses.cpp 72 const GCNSubtarget *ST;
308 ST = &MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIFrameLowering.cpp 27 static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST,
33 static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST,
187 void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST,
270 const GCNSubtarget &ST,
320 const GCNSubtarget &ST, const SIInstrInfo *TII, const SIRegisterInfo *TRI,
404 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
404 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
533 void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST,
592 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
592 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
692 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
692 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
834 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
834 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
945 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
956 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
956 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
994 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
994 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1068 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1068 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1105 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1105 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1150 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) ||
lib/Target/AMDGPU/SIFrameLowering.h 58 void emitFlatScratchInit(const GCNSubtarget &ST,
63 const GCNSubtarget &ST,
70 const GCNSubtarget &ST, const SIInstrInfo *TII, const SIRegisterInfo *TRI,
74 void emitEntryFunctionScratchSetup(const GCNSubtarget &ST, MachineFunction &MF,
lib/Target/AMDGPU/SIISelLowering.cpp 114 const GCNSubtarget &STI)
752 const GCNSubtarget *SITargetLowering::getSubtarget() const {
919 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
924 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
982 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
1027 MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1871 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1871 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
3179 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3179 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3279 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3279 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3391 const GCNSubtarget &ST) {
3482 const GCNSubtarget &ST) {
3601 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3601 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3750 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3750 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3840 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3840 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
4606 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4621 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4633 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4646 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
5320 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5320 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5858 return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
6823 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6823 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
7666 const SDLoc &SL, const GCNSubtarget *ST) {
10677 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10677 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10833 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10833 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIISelLowering.h 25 const GCNSubtarget *Subtarget;
221 SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI);
223 const GCNSubtarget *getSubtarget() const;
lib/Target/AMDGPU/SIInsertSkips.cpp 247 const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
247 const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
275 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
275 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
346 const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
346 const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
430 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
430 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIInsertWaitcnts.cpp 196 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {
350 const GCNSubtarget *ST = nullptr;
368 const GCNSubtarget *ST = nullptr;
1445 ST = &MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIInstrInfo.cpp 86 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
820 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
820 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
1233 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
1233 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2938 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2938 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3466 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3466 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3634 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) {
3955 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
3955 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4323 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4323 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4406 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4406 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6245 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
6245 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
6314 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) {
lib/Target/AMDGPU/SIInstrInfo.h 48 const GCNSubtarget &ST;
169 explicit SIInstrInfo(const GCNSubtarget &ST);
lib/Target/AMDGPU/SILoadStoreOptimizer.cpp 205 const GCNSubtarget *STM = nullptr;
214 static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
735 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
1833 STM = &MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SILowerControlFlow.cpp 495 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
495 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SILowerI1Copies.cpp 58 const GCNSubtarget *ST = nullptr;
428 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
428 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
436 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
436 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
459 ST = &MF->getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SILowerSGPRSpills.cpp 187 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
187 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
231 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
231 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIMachineFunctionInfo.cpp 51 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
51 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
183 const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>();
183 const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>();
255 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
255 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
269 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
269 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
329 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
329 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIMemoryLegalizer.cpp 257 SICacheControl(const GCNSubtarget &ST);
262 static std::unique_ptr<SICacheControl> create(const GCNSubtarget &ST);
322 SIGfx6CacheControl(const GCNSubtarget &ST) : SICacheControl(ST) {};
346 SIGfx7CacheControl(const GCNSubtarget &ST) : SIGfx6CacheControl(ST) {};
367 SIGfx10CacheControl(const GCNSubtarget &ST, bool CuMode) :
650 SICacheControl::SICacheControl(const GCNSubtarget &ST) {
656 std::unique_ptr<SICacheControl> SICacheControl::create(const GCNSubtarget &ST) {
657 GCNSubtarget::Generation Generation = ST.getGeneration();
860 const GCNSubtarget &STM = MBB.getParent()->getSubtarget<GCNSubtarget>();
860 const GCNSubtarget &STM = MBB.getParent()->getSubtarget<GCNSubtarget>();
1288 CC = SICacheControl::create(MF.getSubtarget<GCNSubtarget>());
lib/Target/AMDGPU/SIModeRegister.cpp 377 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
377 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIOptimizeExecMasking.cpp 59 static unsigned isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
77 static unsigned isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
239 const GCNSubtarget &ST,
271 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
271 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp 86 const GCNSubtarget &ST) {
96 static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
109 const GCNSubtarget& ST) {
123 const GCNSubtarget& ST) {
189 const GCNSubtarget &ST,
297 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
297 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIPeepholeSDWA.cpp 92 bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const;
94 const GCNSubtarget &ST) const;
96 void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
881 const GCNSubtarget &ST) const {
938 const GCNSubtarget &ST) const {
1174 const GCNSubtarget &ST) const {
1205 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1205 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp 166 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
166 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIRegisterInfo.cpp 57 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST) :
543 static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
574 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
lib/Target/AMDGPU/SIRegisterInfo.h 30 const GCNSubtarget &ST;
43 SIRegisterInfo(const GCNSubtarget &ST);
lib/Target/AMDGPU/SIShrinkInstructions.cpp 227 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
227 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
315 static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
554 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
554 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/SIWholeQuadMode.cpp 151 const GCNSubtarget *ST;
883 ST = &MF.getSubtarget<GCNSubtarget>();
lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp 1266 const GCNSubtarget *Subtarget, uint32_t Align) {
lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h 644 const GCNSubtarget *Subtarget, uint32_t Align = 4);
usr/include/c++/7.4.0/bits/move.h 72 constexpr _Tp&&
83 constexpr _Tp&&
usr/include/c++/7.4.0/bits/unique_ptr.h 68 default_delete(const default_delete<_Up>&) noexcept { }
72 operator()(_Tp* __ptr) const
74 static_assert(!is_void<_Tp>::value,
76 static_assert(sizeof(_Tp)>0,
122 using type = _Up*;
137 using pointer = typename _Ptr<_Tp, _Dp>::type;
161 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
163 __uniq_ptr_impl<_Tp, _Dp> _M_t;
166 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
167 using element_type = _Tp;
252 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
297 __safe_conversion_up<_Up, _Ep>,
301 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
811 { typedef unique_ptr<_Tp> __single_object; };
823 inline typename _MakeUniq<_Tp>::__single_object
824 make_unique(_Args&&... __args)
825 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
usr/include/c++/7.4.0/type_traits 215 : public __is_void_helper<typename remove_cv<_Tp>::type>::type
581 : public __or_<is_lvalue_reference<_Tp>,
582 is_rvalue_reference<_Tp>>::type
601 : public __not_<__or_<is_function<_Tp>, is_reference<_Tp>,
601 : public __not_<__or_<is_function<_Tp>, is_reference<_Tp>,
602 is_void<_Tp>>>::type
638 : public __or_<is_object<_Tp>, is_reference<_Tp>>::type
638 : public __or_<is_object<_Tp>, is_reference<_Tp>>::type
1554 { typedef _Tp type; };
1563 { typedef _Tp type; };
1574 remove_const<typename remove_volatile<_Tp>::type>::type type;
1645 { typedef _Tp& type; };
1650 : public __add_lvalue_reference_helper<_Tp>