reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
547 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 549 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 561 if (UseMI->isRegSequence()) { 562 Register RegSeqDstReg = UseMI->getOperand(0).getReg(); 563 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 573 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI, 587 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList)) 590 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) { 593 MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); 598 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != 604 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex()); 612 if (FoldingImmLike && UseMI->isCopy()) { 613 Register DestReg = UseMI->getOperand(0).getReg(); 623 Register SrcReg = UseMI->getOperand(1).getReg(); 634 Use.getOperandNo(), &UseMI->getOperand(1)); 646 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 647 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 648 CopiesToReplace.push_back(UseMI); 659 UseMI->setDesc(TII->get(MovOp)); 660 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin(); 661 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end(); 665 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp)); 665 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp)); 667 CopiesToReplace.push_back(UseMI); 669 if (UseMI->isCopy() && OpToFold.isReg() && 670 UseMI->getOperand(0).getReg().isVirtual() && 671 !UseMI->getOperand(1).getSubReg()) { 673 << "\n into " << *UseMI << '\n'); 674 unsigned Size = TII->getOpSize(*UseMI, 1); 676 UseMI->getOperand(1).setReg(UseReg); 677 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 678 UseMI->getOperand(1).setIsKill(false); 679 CopiesToReplace.push_back(UseMI); 687 if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 690 const DebugLoc &DL = UseMI->getDebugLoc(); 691 MachineBasicBlock &MBB = *UseMI->getParent(); 693 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); 694 for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I) 695 UseMI->RemoveOperand(I); 697 MachineInstrBuilder B(*MBB.getParent(), UseMI); 708 BuildMI(MBB, UseMI, DL, 735 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); 746 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); 750 BuildMI(MBB, UseMI, DL, 757 LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n'); 763 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 764 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg())) 765 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 766 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && 767 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg())) 768 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32)); 772 unsigned UseOpc = UseMI->getOpcode(); 783 UseMI->getOperand(UseOpIdx).getReg(), 785 *UseMI)) 788 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); 791 UseMI->getOperand(1).setSubReg(0); 793 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 795 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex()); 796 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 802 UseMI->getOperand(UseOpIdx).getReg(), 804 *UseMI)) 811 UseMI->setDesc(TII->get(AMDGPU::COPY)); 812 UseMI->getOperand(1).setReg(OpToFold.getReg()); 813 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 814 UseMI->getOperand(1).setIsKill(false); 815 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 820 const MCInstrDesc &UseDesc = UseMI->getDesc(); 831 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 861 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 867 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);