|
reference, declaration → definition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
|
References
include/llvm/ADT/APSInt.h 142 return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
lib/Analysis/BlockFrequencyInfoImpl.cpp 580 BlockCount = (BlockCount + EntryFreq.lshr(1)).udiv(EntryFreq);
lib/Analysis/ConstantFolding.cpp 2362 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2365 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
lib/Analysis/DemandedBits.cpp 166 AB = AOut.lshr(ShiftAmt);
187 AB = AOut.lshr(ShiftAmt);
lib/Analysis/ScalarEvolution.cpp 5564 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
6280 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
8419 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
lib/Analysis/ValueTracking.cpp 1195 APInt KZResult = KnownZero.lshr(ShiftAmt);
1202 return KnownOne.lshr(ShiftAmt);
1532 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1534 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
5485 Lower = C->lshr(ShiftAmount);
5535 Upper = Lower.lshr(1) + 1;
lib/CodeGen/GlobalISel/GISelKnownBits.cpp 332 Known.Zero = Known.Zero.lshr(Shift);
333 Known.One = Known.One.lshr(Shift);
lib/CodeGen/GlobalISel/LegalizerHelper.cpp 639 Val.lshr(Offset).trunc(NarrowSize));
650 Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits));
lib/CodeGen/InterleavedLoadCombinePass.cpp 485 A = A.lshr(shiftAmt);
lib/CodeGen/SelectionDAG/DAGCombiner.cpp 6985 Ones = N0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) : Ones.lshr(ShiftAmt);
7667 SDValue ShiftC = DAG.getConstant(AddC->getAPIntValue().lshr(ShiftAmt).
10215 APInt ShiftedMask = Mask.lshr(ShAmt);
14472 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
14918 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
lib/CodeGen/SelectionDAG/LegalizeDAG.cpp 463 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32);
lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp 2467 Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), dl, NVT, IsTarget,
lib/CodeGen/SelectionDAG/SelectionDAG.cpp 1236 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
2542 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2678 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2679 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
3842 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
5315 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
9406 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9408 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
lib/CodeGen/SelectionDAG/TargetLowering.cpp 1311 if (SimplifyDemandedBits(Op0, DemandedBits.lshr(ShAmt), DemandedElts,
1513 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt));
3263 bestMask = Mask.lshr(offset * (width/8) * 8);
3659 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
4776 magics = Divisor.lshr(PreShift).magicu(PreShift);
4970 APInt D0 = D.lshr(K);
5153 APInt D0 = D.lshr(K);
lib/ExecutionEngine/Interpreter/Execution.cpp 1230 Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1237 Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
2092 Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
lib/Support/APInt.cpp 566 return this->lshr(BitWidth - numBits);
1053 return shl(rotateAmt) | lshr(BitWidth - rotateAmt);
1064 return lshr(rotateAmt) | shl(BitWidth - rotateAmt);
1209 t = signedMin + (d.lshr(d.getBitWidth() - 1));
1251 APInt allOnes = APInt::getAllOnesValue(d.getBitWidth()).lshr(LeadingZeros);
1979 APInt Res = lshr(1) * RHS;
lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h 368 uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
369 int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
394 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
395 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
422 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
423 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp 1735 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
lib/Target/ARM/ARMISelLowering.cpp 6531 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h 654 uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
655 int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
680 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
681 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
708 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
709 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
lib/Target/Hexagon/HexagonConstPropagation.cpp 1777 Result = A1.shl(BW-Bits-Offset).lshr(BW-Bits);
lib/Target/Hexagon/HexagonGenExtract.cpp 172 APInt A = APInt(BW, ~0ULL).lshr(SR).shl(SL);
178 APInt M = CM->getValue().lshr(SL);
lib/Target/Mips/MipsSEISelDAGToDAG.cpp 1063 const unsigned Hi = SplatValue.lshr(16).getLoBits(16).getZExtValue();
1085 const unsigned Hi = SplatValue.lshr(16).getLoBits(16).getZExtValue();
1134 const unsigned Hi = SplatValue.lshr(16).getLoBits(16).getZExtValue();
1135 const unsigned Higher = SplatValue.lshr(32).getLoBits(16).getZExtValue();
1136 const unsigned Highest = SplatValue.lshr(48).getLoBits(16).getZExtValue();
lib/Target/Mips/MipsSEISelLowering.cpp 1476 SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), DL,
lib/Target/RISCV/RISCVISelLowering.cpp 945 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
lib/Target/Sparc/SparcISelLowering.cpp 3069 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
lib/Target/SystemZ/SystemZISelLowering.cpp 681 uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue();
752 APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
6253 APInt DemEls = DemandedElts.lshr(NumSrc0Els);
lib/Target/X86/X86ISelLowering.cpp 5416 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
9053 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
22489 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
34344 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
34542 DemandedElts.lshr(NumElts / 2) == 0) {
34547 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
34752 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
lib/Transforms/InstCombine/InstCombineCalls.cpp 2985 APInt Idx = V11.lshr(8).zextOrTrunc(6);
lib/Transforms/InstCombine/InstCombineCompares.cpp 1198 } else if (AP1 == AP2.lshr(Shift)) {
2126 Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2135 Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2216 if (ShiftedC.lshr(ShAmtVal) == C)
2222 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2236 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
3434 const APInt AddCst = ICmpCst.lshr(1);
lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp 476 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
703 APInt DemandedMaskLHS(DemandedMask.lshr(ShiftAmt));
710 RHSKnown.Zero.lshr(BitWidth - ShiftAmt);
712 RHSKnown.One.lshr(BitWidth - ShiftAmt);
928 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
934 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
1446 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1653 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
tools/clang/lib/Basic/FixedPoint.cpp 118 Val = Val.lshr(1);
197 .lshr(Scale)
tools/lldb/source/Utility/Scalar.cpp 2758 m_integer = m_integer.lshr(bit_offset)
unittests/ADT/APIntTest.cpp 290 EXPECT_EQ(zero, one.lshr(1));
2378 EXPECT_EQ(0, neg_one.lshr(128));