reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
482 : RI.getPhysRegClass(Reg); 484 return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold; 528 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 617 if (!Def->definesRegister(SrcReg, &RI)) 630 if (I->modifiesRegister(DefOp.getReg(), &RI)) 650 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 665 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 683 if (RI.isSGPRClass(RC)) { 685 if (!(RI.getRegSizeInBits(*RC) % 64)) { 693 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 693 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 697 } else if (RI.hasAGPRs(RC)) { 698 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 698 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 700 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 700 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 700 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 704 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 705 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 705 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 715 copyPhysReg(MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 716 RI.getSubReg(SrcReg, SubIdx), KillSrc); 721 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 723 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 787 if (RI.isSGPRClass(RegClass)) { 788 if (RI.getRegSizeInBits(*RegClass) > 32) { 797 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 802 get(Opcode), RI.getSubReg(DestReg, Idx)); 822 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 897 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 915 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 945 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 958 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 968 if (RI.hasAGPRs(DstRC)) 970 if (RI.getRegSizeInBits(*DstRC) == 32) { 971 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 972 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 972 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 974 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 974 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1063 if (RI.isSGPRClass(RC)) { 1086 if (RI.spillSGPRToVGPR()) 1091 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1096 if (RI.hasAGPRs(RC)) { 1191 if (RI.isSGPRClass(RC)) { 1202 if (RI.spillSGPRToVGPR()) 1212 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1215 if (RI.hasAGPRs(RC)) { 1244 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1420 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1421 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1437 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1440 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1489 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1506 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1507 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1579 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1597 MovDPP.addReg(RI.getSubReg(Src, Sub)); 2133 return RI.hasVGPRs(RC) && NumInsts <= 6; 2150 return RI.isSGPRClass(RC); 2169 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2333 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 2335 if (RI.isAGPR(*MRI, UseMI.getOperand(0).getReg())) { 2374 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2377 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2441 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2441 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2444 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2458 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2458 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2460 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2524 if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) && 2525 getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) { 2657 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2732 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2792 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 2796 return MI.readsRegister(AMDGPU::EXEC, &RI); 2807 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 2945 return RI.opCanUseInlineConstant(OpInfo.OperandType); 2948 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3006 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3015 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3026 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3115 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3215 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3292 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3318 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3318 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3416 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3455 return !RI.regsOverlap(SGPRUsed, SGPR); 3607 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3740 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 3817 return RI.getPhysRegClass(Reg); 3821 return RI.getRegClass(RCID); 3832 const TargetRegisterClass *RC = RI.getRegClass(RCID); 3837 else if (RI.isSGPRClass(RC)) 3840 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 3841 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 3922 : RI.getPhysRegClass(Reg); 3924 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 3927 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 3931 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 3957 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4027 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4036 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4042 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4053 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4056 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4068 RI.isVGPR(MRI, Src1.getReg())) { 4140 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4146 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4186 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4192 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4214 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4216 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4218 if (RI.hasAGPRs(VRC)) { 4219 VRC = RI.getEquivalentVGPRClass(VRC); 4239 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4248 MIB.addImm(RI.getSubRegFromChannel(i)); 4261 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4266 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4281 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4282 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4310 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4310 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4547 if (RI.hasVectorRegisters(OpRC)) { 4557 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 4563 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4564 ? RI.getEquivalentAGPRClass(SRC) 4565 : RI.getEquivalentVGPRClass(SRC); 4567 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4568 ? RI.getEquivalentAGPRClass(VRC) 4569 : RI.getEquivalentVGPRClass(VRC); 4598 if (RI.hasVGPRs(DstRC)) { 4608 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 4638 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 4652 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 4658 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 4672 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 4673 RI.getRegClass(RsrcRC))) { 4702 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5067 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5191 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5193 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5303 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5309 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5310 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5347 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5364 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5365 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5430 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5435 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5447 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5448 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5496 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 5537 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 5640 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 5726 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) 5729 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 5750 if (RI.hasAGPRs(SrcRC)) { 5751 if (RI.hasAGPRs(NewDstRC)) 5758 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 5761 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5767 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 5770 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5814 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 5815 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 5822 if (RI.isSGPRClass(RegRC)) 6075 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6102 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6103 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6113 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6185 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6197 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6198 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6212 Register UnusedCarry = RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6266 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);lib/Target/AMDGPU/SIInstrInfo.h
172 return RI; 675 return !RI.isSGPRReg(MRI, Dest); 683 return MO.isReg() && RI.isVGPR(MRI, MO.getReg());}); 812 return RI.getRegSizeInBits(*RI.getRegClass(OpInfo.RegClass)) / 8; 812 return RI.getRegSizeInBits(*RI.getRegClass(OpInfo.RegClass)) / 8; 821 assert(RI.getRegSizeInBits(*RI.getSubClassWithSubReg( 821 assert(RI.getRegSizeInBits(*RI.getSubClassWithSubReg( 825 return RI.getSubRegIndexLaneMask(SubReg).getNumLanes() * 4; 828 return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8; 1026 return RI.getRegClass(TID.OpInfo[OpNum].RegClass);