LLVM  14.0.0git
InstCombineShifts.cpp
Go to the documentation of this file.
1 //===- InstCombineShifts.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitShl, visitLShr, and visitAShr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
16 #include "llvm/IR/IntrinsicInst.h"
17 #include "llvm/IR/PatternMatch.h"
19 using namespace llvm;
20 using namespace PatternMatch;
21 
22 #define DEBUG_TYPE "instcombine"
23 
25  Value *ShAmt1) {
26  // We have two shift amounts from two different shifts. The types of those
27  // shift amounts may not match. If that's the case let's bailout now..
28  if (ShAmt0->getType() != ShAmt1->getType())
29  return false;
30 
31  // As input, we have the following pattern:
32  // Sh0 (Sh1 X, Q), K
33  // We want to rewrite that as:
34  // Sh x, (Q+K) iff (Q+K) u< bitwidth(x)
35  // While we know that originally (Q+K) would not overflow
36  // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
37  // shift amounts. so it may now overflow in smaller bitwidth.
38  // To ensure that does not happen, we need to ensure that the total maximal
39  // shift amount is still representable in that smaller bit width.
40  unsigned MaximalPossibleTotalShiftAmount =
41  (Sh0->getType()->getScalarSizeInBits() - 1) +
42  (Sh1->getType()->getScalarSizeInBits() - 1);
43  APInt MaximalRepresentableShiftAmount =
45  return MaximalRepresentableShiftAmount.uge(MaximalPossibleTotalShiftAmount);
46 }
47 
48 // Given pattern:
49 // (x shiftopcode Q) shiftopcode K
50 // we should rewrite it as
51 // x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and
52 //
53 // This is valid for any shift, but they must be identical, and we must be
54 // careful in case we have (zext(Q)+zext(K)) and look past extensions,
55 // (Q+K) must not overflow or else (Q+K) u< bitwidth(x) is bogus.
56 //
57 // AnalyzeForSignBitExtraction indicates that we will only analyze whether this
58 // pattern has any 2 right-shifts that sum to 1 less than original bit width.
60  BinaryOperator *Sh0, const SimplifyQuery &SQ,
61  bool AnalyzeForSignBitExtraction) {
62  // Look for a shift of some instruction, ignore zext of shift amount if any.
63  Instruction *Sh0Op0;
64  Value *ShAmt0;
65  if (!match(Sh0,
66  m_Shift(m_Instruction(Sh0Op0), m_ZExtOrSelf(m_Value(ShAmt0)))))
67  return nullptr;
68 
69  // If there is a truncation between the two shifts, we must make note of it
70  // and look through it. The truncation imposes additional constraints on the
71  // transform.
72  Instruction *Sh1;
73  Value *Trunc = nullptr;
74  match(Sh0Op0,
76  m_Instruction(Sh1)));
77 
78  // Inner shift: (x shiftopcode ShAmt1)
79  // Like with other shift, ignore zext of shift amount if any.
80  Value *X, *ShAmt1;
81  if (!match(Sh1, m_Shift(m_Value(X), m_ZExtOrSelf(m_Value(ShAmt1)))))
82  return nullptr;
83 
84  // Verify that it would be safe to try to add those two shift amounts.
85  if (!canTryToConstantAddTwoShiftAmounts(Sh0, ShAmt0, Sh1, ShAmt1))
86  return nullptr;
87 
88  // We are only looking for signbit extraction if we have two right shifts.
89  bool HadTwoRightShifts = match(Sh0, m_Shr(m_Value(), m_Value())) &&
90  match(Sh1, m_Shr(m_Value(), m_Value()));
91  // ... and if it's not two right-shifts, we know the answer already.
92  if (AnalyzeForSignBitExtraction && !HadTwoRightShifts)
93  return nullptr;
94 
95  // The shift opcodes must be identical, unless we are just checking whether
96  // this pattern can be interpreted as a sign-bit-extraction.
97  Instruction::BinaryOps ShiftOpcode = Sh0->getOpcode();
98  bool IdenticalShOpcodes = Sh0->getOpcode() == Sh1->getOpcode();
99  if (!IdenticalShOpcodes && !AnalyzeForSignBitExtraction)
100  return nullptr;
101 
102  // If we saw truncation, we'll need to produce extra instruction,
103  // and for that one of the operands of the shift must be one-use,
104  // unless of course we don't actually plan to produce any instructions here.
105  if (Trunc && !AnalyzeForSignBitExtraction &&
106  !match(Sh0, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
107  return nullptr;
108 
109  // Can we fold (ShAmt0+ShAmt1) ?
110  auto *NewShAmt = dyn_cast_or_null<Constant>(
111  SimplifyAddInst(ShAmt0, ShAmt1, /*isNSW=*/false, /*isNUW=*/false,
112  SQ.getWithInstruction(Sh0)));
113  if (!NewShAmt)
114  return nullptr; // Did not simplify.
115  unsigned NewShAmtBitWidth = NewShAmt->getType()->getScalarSizeInBits();
116  unsigned XBitWidth = X->getType()->getScalarSizeInBits();
117  // Is the new shift amount smaller than the bit width of inner/new shift?
118  if (!match(NewShAmt, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
119  APInt(NewShAmtBitWidth, XBitWidth))))
120  return nullptr; // FIXME: could perform constant-folding.
121 
122  // If there was a truncation, and we have a right-shift, we can only fold if
123  // we are left with the original sign bit. Likewise, if we were just checking
124  // that this is a sighbit extraction, this is the place to check it.
125  // FIXME: zero shift amount is also legal here, but we can't *easily* check
126  // more than one predicate so it's not really worth it.
127  if (HadTwoRightShifts && (Trunc || AnalyzeForSignBitExtraction)) {
128  // If it's not a sign bit extraction, then we're done.
129  if (!match(NewShAmt,
130  m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
131  APInt(NewShAmtBitWidth, XBitWidth - 1))))
132  return nullptr;
133  // If it is, and that was the question, return the base value.
134  if (AnalyzeForSignBitExtraction)
135  return X;
136  }
137 
138  assert(IdenticalShOpcodes && "Should not get here with different shifts.");
139 
140  // All good, we can do this fold.
141  NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, X->getType());
142 
143  BinaryOperator *NewShift = BinaryOperator::Create(ShiftOpcode, X, NewShAmt);
144 
145  // The flags can only be propagated if there wasn't a trunc.
146  if (!Trunc) {
147  // If the pattern did not involve trunc, and both of the original shifts
148  // had the same flag set, preserve the flag.
149  if (ShiftOpcode == Instruction::BinaryOps::Shl) {
150  NewShift->setHasNoUnsignedWrap(Sh0->hasNoUnsignedWrap() &&
151  Sh1->hasNoUnsignedWrap());
152  NewShift->setHasNoSignedWrap(Sh0->hasNoSignedWrap() &&
153  Sh1->hasNoSignedWrap());
154  } else {
155  NewShift->setIsExact(Sh0->isExact() && Sh1->isExact());
156  }
157  }
158 
159  Instruction *Ret = NewShift;
160  if (Trunc) {
161  Builder.Insert(NewShift);
162  Ret = CastInst::Create(Instruction::Trunc, NewShift, Sh0->getType());
163  }
164 
165  return Ret;
166 }
167 
168 // If we have some pattern that leaves only some low bits set, and then performs
169 // left-shift of those bits, if none of the bits that are left after the final
170 // shift are modified by the mask, we can omit the mask.
171 //
172 // There are many variants to this pattern:
173 // a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt
174 // b) (x & (~(-1 << MaskShAmt))) << ShiftShAmt
175 // c) (x & (-1 l>> MaskShAmt)) << ShiftShAmt
176 // d) (x & ((-1 << MaskShAmt) l>> MaskShAmt)) << ShiftShAmt
177 // e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt
178 // f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt
179 // All these patterns can be simplified to just:
180 // x << ShiftShAmt
181 // iff:
182 // a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
183 // c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt)
184 static Instruction *
186  const SimplifyQuery &Q,
188  assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&
189  "The input must be 'shl'!");
190 
191  Value *Masked, *ShiftShAmt;
192  match(OuterShift,
193  m_Shift(m_Value(Masked), m_ZExtOrSelf(m_Value(ShiftShAmt))));
194 
195  // *If* there is a truncation between an outer shift and a possibly-mask,
196  // then said truncation *must* be one-use, else we can't perform the fold.
197  Value *Trunc;
198  if (match(Masked, m_CombineAnd(m_Trunc(m_Value(Masked)), m_Value(Trunc))) &&
199  !Trunc->hasOneUse())
200  return nullptr;
201 
202  Type *NarrowestTy = OuterShift->getType();
203  Type *WidestTy = Masked->getType();
204  bool HadTrunc = WidestTy != NarrowestTy;
205 
206  // The mask must be computed in a type twice as wide to ensure
207  // that no bits are lost if the sum-of-shifts is wider than the base type.
208  Type *ExtendedTy = WidestTy->getExtendedType();
209 
210  Value *MaskShAmt;
211 
212  // ((1 << MaskShAmt) - 1)
213  auto MaskA = m_Add(m_Shl(m_One(), m_Value(MaskShAmt)), m_AllOnes());
214  // (~(-1 << maskNbits))
215  auto MaskB = m_Xor(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_AllOnes());
216  // (-1 l>> MaskShAmt)
217  auto MaskC = m_LShr(m_AllOnes(), m_Value(MaskShAmt));
218  // ((-1 << MaskShAmt) l>> MaskShAmt)
219  auto MaskD =
220  m_LShr(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_Deferred(MaskShAmt));
221 
222  Value *X;
223  Constant *NewMask;
224 
225  if (match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) {
226  // Peek through an optional zext of the shift amount.
227  match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
228 
229  // Verify that it would be safe to try to add those two shift amounts.
230  if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
231  MaskShAmt))
232  return nullptr;
233 
234  // Can we simplify (MaskShAmt+ShiftShAmt) ?
235  auto *SumOfShAmts = dyn_cast_or_null<Constant>(SimplifyAddInst(
236  MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
237  if (!SumOfShAmts)
238  return nullptr; // Did not simplify.
239  // In this pattern SumOfShAmts correlates with the number of low bits
240  // that shall remain in the root value (OuterShift).
241 
242  // An extend of an undef value becomes zero because the high bits are never
243  // completely unknown. Replace the `undef` shift amounts with final
244  // shift bitwidth to ensure that the value remains undef when creating the
245  // subsequent shift op.
246  SumOfShAmts = Constant::replaceUndefsWith(
247  SumOfShAmts, ConstantInt::get(SumOfShAmts->getType()->getScalarType(),
248  ExtendedTy->getScalarSizeInBits()));
249  auto *ExtendedSumOfShAmts = ConstantExpr::getZExt(SumOfShAmts, ExtendedTy);
250  // And compute the mask as usual: ~(-1 << (SumOfShAmts))
251  auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
252  auto *ExtendedInvertedMask =
253  ConstantExpr::getShl(ExtendedAllOnes, ExtendedSumOfShAmts);
254  NewMask = ConstantExpr::getNot(ExtendedInvertedMask);
255  } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
256  match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
257  m_Deferred(MaskShAmt)))) {
258  // Peek through an optional zext of the shift amount.
259  match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
260 
261  // Verify that it would be safe to try to add those two shift amounts.
262  if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
263  MaskShAmt))
264  return nullptr;
265 
266  // Can we simplify (ShiftShAmt-MaskShAmt) ?
267  auto *ShAmtsDiff = dyn_cast_or_null<Constant>(SimplifySubInst(
268  ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
269  if (!ShAmtsDiff)
270  return nullptr; // Did not simplify.
271  // In this pattern ShAmtsDiff correlates with the number of high bits that
272  // shall be unset in the root value (OuterShift).
273 
274  // An extend of an undef value becomes zero because the high bits are never
275  // completely unknown. Replace the `undef` shift amounts with negated
276  // bitwidth of innermost shift to ensure that the value remains undef when
277  // creating the subsequent shift op.
278  unsigned WidestTyBitWidth = WidestTy->getScalarSizeInBits();
279  ShAmtsDiff = Constant::replaceUndefsWith(
280  ShAmtsDiff, ConstantInt::get(ShAmtsDiff->getType()->getScalarType(),
281  -WidestTyBitWidth));
282  auto *ExtendedNumHighBitsToClear = ConstantExpr::getZExt(
283  ConstantExpr::getSub(ConstantInt::get(ShAmtsDiff->getType(),
284  WidestTyBitWidth,
285  /*isSigned=*/false),
286  ShAmtsDiff),
287  ExtendedTy);
288  // And compute the mask as usual: (-1 l>> (NumHighBitsToClear))
289  auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
290  NewMask =
291  ConstantExpr::getLShr(ExtendedAllOnes, ExtendedNumHighBitsToClear);
292  } else
293  return nullptr; // Don't know anything about this pattern.
294 
295  NewMask = ConstantExpr::getTrunc(NewMask, NarrowestTy);
296 
297  // Does this mask has any unset bits? If not then we can just not apply it.
298  bool NeedMask = !match(NewMask, m_AllOnes());
299 
300  // If we need to apply a mask, there are several more restrictions we have.
301  if (NeedMask) {
302  // The old masking instruction must go away.
303  if (!Masked->hasOneUse())
304  return nullptr;
305  // The original "masking" instruction must not have been`ashr`.
306  if (match(Masked, m_AShr(m_Value(), m_Value())))
307  return nullptr;
308  }
309 
310  // If we need to apply truncation, let's do it first, since we can.
311  // We have already ensured that the old truncation will go away.
312  if (HadTrunc)
313  X = Builder.CreateTrunc(X, NarrowestTy);
314 
315  // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits.
316  // We didn't change the Type of this outermost shift, so we can just do it.
317  auto *NewShift = BinaryOperator::Create(OuterShift->getOpcode(), X,
318  OuterShift->getOperand(1));
319  if (!NeedMask)
320  return NewShift;
321 
322  Builder.Insert(NewShift);
323  return BinaryOperator::Create(Instruction::And, NewShift, NewMask);
324 }
325 
326 /// If we have a shift-by-constant of a bitwise logic op that itself has a
327 /// shift-by-constant operand with identical opcode, we may be able to convert
328 /// that into 2 independent shifts followed by the logic op. This eliminates a
329 /// a use of an intermediate value (reduces dependency chain).
332  assert(I.isShift() && "Expected a shift as input");
333  auto *LogicInst = dyn_cast<BinaryOperator>(I.getOperand(0));
334  if (!LogicInst || !LogicInst->isBitwiseLogicOp() || !LogicInst->hasOneUse())
335  return nullptr;
336 
337  Constant *C0, *C1;
338  if (!match(I.getOperand(1), m_Constant(C1)))
339  return nullptr;
340 
341  Instruction::BinaryOps ShiftOpcode = I.getOpcode();
342  Type *Ty = I.getType();
343 
344  // Find a matching one-use shift by constant. The fold is not valid if the sum
345  // of the shift values equals or exceeds bitwidth.
346  // TODO: Remove the one-use check if the other logic operand (Y) is constant.
347  Value *X, *Y;
348  auto matchFirstShift = [&](Value *V) {
350  return match(V, m_BinOp(ShiftOpcode, m_Value(), m_Value())) &&
351  match(V, m_OneUse(m_Shift(m_Value(X), m_Constant(C0)))) &&
354  };
355 
356  // Logic ops are commutative, so check each operand for a match.
357  if (matchFirstShift(LogicInst->getOperand(0)))
358  Y = LogicInst->getOperand(1);
359  else if (matchFirstShift(LogicInst->getOperand(1)))
360  Y = LogicInst->getOperand(0);
361  else
362  return nullptr;
363 
364  // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1)
365  Constant *ShiftSumC = ConstantExpr::getAdd(C0, C1);
366  Value *NewShift1 = Builder.CreateBinOp(ShiftOpcode, X, ShiftSumC);
367  Value *NewShift2 = Builder.CreateBinOp(ShiftOpcode, Y, I.getOperand(1));
368  return BinaryOperator::Create(LogicInst->getOpcode(), NewShift1, NewShift2);
369 }
370 
372  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
373  assert(Op0->getType() == Op1->getType());
374 
375  // If the shift amount is a one-use `sext`, we can demote it to `zext`.
376  Value *Y;
377  if (match(Op1, m_OneUse(m_SExt(m_Value(Y))))) {
378  Value *NewExt = Builder.CreateZExt(Y, I.getType(), Op1->getName());
379  return BinaryOperator::Create(I.getOpcode(), Op0, NewExt);
380  }
381 
382  // See if we can fold away this shift.
383  if (SimplifyDemandedInstructionBits(I))
384  return &I;
385 
386  // Try to fold constant and into select arguments.
387  if (isa<Constant>(Op0))
388  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
389  if (Instruction *R = FoldOpIntoSelect(I, SI))
390  return R;
391 
392  if (Constant *CUI = dyn_cast<Constant>(Op1))
393  if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
394  return Res;
395 
396  if (auto *NewShift = cast_or_null<Instruction>(
397  reassociateShiftAmtsOfTwoSameDirectionShifts(&I, SQ)))
398  return NewShift;
399 
400  // (C1 shift (A add C2)) -> (C1 shift C2) shift A)
401  // iff A and C2 are both positive.
402  Value *A;
403  Constant *C;
404  if (match(Op0, m_Constant()) && match(Op1, m_Add(m_Value(A), m_Constant(C))))
405  if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) &&
406  isKnownNonNegative(C, DL, 0, &AC, &I, &DT))
407  return BinaryOperator::Create(
408  I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A);
409 
410  // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2.
411  // Because shifts by negative values (which could occur if A were negative)
412  // are undefined.
413  if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Constant(C))) &&
414  match(C, m_Power2())) {
415  // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
416  // demand the sign bit (and many others) here??
418  Value *Rem = Builder.CreateAnd(A, Mask, Op1->getName());
419  return replaceOperand(I, 1, Rem);
420  }
421 
423  return Logic;
424 
425  return nullptr;
426 }
427 
428 /// Return true if we can simplify two logical (either left or right) shifts
429 /// that have constant shift amounts: OuterShift (InnerShift X, C1), C2.
430 static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
431  Instruction *InnerShift,
432  InstCombinerImpl &IC, Instruction *CxtI) {
433  assert(InnerShift->isLogicalShift() && "Unexpected instruction type");
434 
435  // We need constant scalar or constant splat shifts.
436  const APInt *InnerShiftConst;
437  if (!match(InnerShift->getOperand(1), m_APInt(InnerShiftConst)))
438  return false;
439 
440  // Two logical shifts in the same direction:
441  // shl (shl X, C1), C2 --> shl X, C1 + C2
442  // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
443  bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
444  if (IsInnerShl == IsOuterShl)
445  return true;
446 
447  // Equal shift amounts in opposite directions become bitwise 'and':
448  // lshr (shl X, C), C --> and X, C'
449  // shl (lshr X, C), C --> and X, C'
450  if (*InnerShiftConst == OuterShAmt)
451  return true;
452 
453  // If the 2nd shift is bigger than the 1st, we can fold:
454  // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3
455  // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3
456  // but it isn't profitable unless we know the and'd out bits are already zero.
457  // Also, check that the inner shift is valid (less than the type width) or
458  // we'll crash trying to produce the bit mask for the 'and'.
459  unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits();
460  if (InnerShiftConst->ugt(OuterShAmt) && InnerShiftConst->ult(TypeWidth)) {
461  unsigned InnerShAmt = InnerShiftConst->getZExtValue();
462  unsigned MaskShift =
463  IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
464  APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
465  if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
466  return true;
467  }
468 
469  return false;
470 }
471 
472 /// See if we can compute the specified value, but shifted logically to the left
473 /// or right by some number of bits. This should return true if the expression
474 /// can be computed for the same cost as the current expression tree. This is
475 /// used to eliminate extraneous shifting from things like:
476 /// %C = shl i128 %A, 64
477 /// %D = shl i128 %B, 96
478 /// %E = or i128 %C, %D
479 /// %F = lshr i128 %E, 64
480 /// where the client will ask if E can be computed shifted right by 64-bits. If
481 /// this succeeds, getShiftedValue() will be called to produce the value.
482 static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
483  InstCombinerImpl &IC, Instruction *CxtI) {
484  // We can always evaluate constants shifted.
485  if (isa<Constant>(V))
486  return true;
487 
488  Instruction *I = dyn_cast<Instruction>(V);
489  if (!I) return false;
490 
491  // We can't mutate something that has multiple uses: doing so would
492  // require duplicating the instruction in general, which isn't profitable.
493  if (!I->hasOneUse()) return false;
494 
495  switch (I->getOpcode()) {
496  default: return false;
497  case Instruction::And:
498  case Instruction::Or:
499  case Instruction::Xor:
500  // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
501  return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
502  canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
503 
504  case Instruction::Shl:
505  case Instruction::LShr:
506  return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
507 
508  case Instruction::Select: {
509  SelectInst *SI = cast<SelectInst>(I);
510  Value *TrueVal = SI->getTrueValue();
511  Value *FalseVal = SI->getFalseValue();
512  return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
513  canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
514  }
515  case Instruction::PHI: {
516  // We can change a phi if we can change all operands. Note that we never
517  // get into trouble with cyclic PHIs here because we only consider
518  // instructions with a single use.
519  PHINode *PN = cast<PHINode>(I);
520  for (Value *IncValue : PN->incoming_values())
521  if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
522  return false;
523  return true;
524  }
525  }
526 }
527 
528 /// Fold OuterShift (InnerShift X, C1), C2.
529 /// See canEvaluateShiftedShift() for the constraints on these instructions.
530 static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
531  bool IsOuterShl,
533  bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
534  Type *ShType = InnerShift->getType();
535  unsigned TypeWidth = ShType->getScalarSizeInBits();
536 
537  // We only accept shifts-by-a-constant in canEvaluateShifted().
538  const APInt *C1;
539  match(InnerShift->getOperand(1), m_APInt(C1));
540  unsigned InnerShAmt = C1->getZExtValue();
541 
542  // Change the shift amount and clear the appropriate IR flags.
543  auto NewInnerShift = [&](unsigned ShAmt) {
544  InnerShift->setOperand(1, ConstantInt::get(ShType, ShAmt));
545  if (IsInnerShl) {
546  InnerShift->setHasNoUnsignedWrap(false);
547  InnerShift->setHasNoSignedWrap(false);
548  } else {
549  InnerShift->setIsExact(false);
550  }
551  return InnerShift;
552  };
553 
554  // Two logical shifts in the same direction:
555  // shl (shl X, C1), C2 --> shl X, C1 + C2
556  // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
557  if (IsInnerShl == IsOuterShl) {
558  // If this is an oversized composite shift, then unsigned shifts get 0.
559  if (InnerShAmt + OuterShAmt >= TypeWidth)
560  return Constant::getNullValue(ShType);
561 
562  return NewInnerShift(InnerShAmt + OuterShAmt);
563  }
564 
565  // Equal shift amounts in opposite directions become bitwise 'and':
566  // lshr (shl X, C), C --> and X, C'
567  // shl (lshr X, C), C --> and X, C'
568  if (InnerShAmt == OuterShAmt) {
569  APInt Mask = IsInnerShl
570  ? APInt::getLowBitsSet(TypeWidth, TypeWidth - OuterShAmt)
571  : APInt::getHighBitsSet(TypeWidth, TypeWidth - OuterShAmt);
572  Value *And = Builder.CreateAnd(InnerShift->getOperand(0),
573  ConstantInt::get(ShType, Mask));
574  if (auto *AndI = dyn_cast<Instruction>(And)) {
575  AndI->moveBefore(InnerShift);
576  AndI->takeName(InnerShift);
577  }
578  return And;
579  }
580 
581  assert(InnerShAmt > OuterShAmt &&
582  "Unexpected opposite direction logical shift pair");
583 
584  // In general, we would need an 'and' for this transform, but
585  // canEvaluateShiftedShift() guarantees that the masked-off bits are not used.
586  // lshr (shl X, C1), C2 --> shl X, C1 - C2
587  // shl (lshr X, C1), C2 --> lshr X, C1 - C2
588  return NewInnerShift(InnerShAmt - OuterShAmt);
589 }
590 
591 /// When canEvaluateShifted() returns true for an expression, this function
592 /// inserts the new computation that produces the shifted value.
593 static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
594  InstCombinerImpl &IC, const DataLayout &DL) {
595  // We can always evaluate constants shifted.
596  if (Constant *C = dyn_cast<Constant>(V)) {
597  if (isLeftShift)
598  return IC.Builder.CreateShl(C, NumBits);
599  else
600  return IC.Builder.CreateLShr(C, NumBits);
601  }
602 
603  Instruction *I = cast<Instruction>(V);
604  IC.addToWorklist(I);
605 
606  switch (I->getOpcode()) {
607  default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
608  case Instruction::And:
609  case Instruction::Or:
610  case Instruction::Xor:
611  // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
612  I->setOperand(
613  0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
614  I->setOperand(
615  1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
616  return I;
617 
618  case Instruction::Shl:
619  case Instruction::LShr:
620  return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
621  IC.Builder);
622 
623  case Instruction::Select:
624  I->setOperand(
625  1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
626  I->setOperand(
627  2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
628  return I;
629  case Instruction::PHI: {
630  // We can change a phi if we can change all operands. Note that we never
631  // get into trouble with cyclic PHIs here because we only consider
632  // instructions with a single use.
633  PHINode *PN = cast<PHINode>(I);
634  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
636  isLeftShift, IC, DL));
637  return PN;
638  }
639  }
640 }
641 
642 // If this is a bitwise operator or add with a constant RHS we might be able
643 // to pull it through a shift.
645  BinaryOperator *BO) {
646  switch (BO->getOpcode()) {
647  default:
648  return false; // Do not perform transform!
649  case Instruction::Add:
650  return Shift.getOpcode() == Instruction::Shl;
651  case Instruction::Or:
652  case Instruction::And:
653  return true;
654  case Instruction::Xor:
655  // Do not change a 'not' of logical shift because that would create a normal
656  // 'xor'. The 'not' is likely better for analysis, SCEV, and codegen.
657  return !(Shift.isLogicalShift() && match(BO, m_Not(m_Value())));
658  }
659 }
660 
662  BinaryOperator &I) {
663  const APInt *Op1C;
664  if (!match(Op1, m_APInt(Op1C)))
665  return nullptr;
666 
667  // See if we can propagate this shift into the input, this covers the trivial
668  // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
669  bool IsLeftShift = I.getOpcode() == Instruction::Shl;
670  if (I.getOpcode() != Instruction::AShr &&
671  canEvaluateShifted(Op0, Op1C->getZExtValue(), IsLeftShift, *this, &I)) {
672  LLVM_DEBUG(
673  dbgs() << "ICE: GetShiftedValue propagating shift through expression"
674  " to eliminate shift:\n IN: "
675  << *Op0 << "\n SH: " << I << "\n");
676 
677  return replaceInstUsesWith(
678  I, getShiftedValue(Op0, Op1C->getZExtValue(), IsLeftShift, *this, DL));
679  }
680 
681  // See if we can simplify any instructions used by the instruction whose sole
682  // purpose is to compute bits we don't care about.
683  Type *Ty = I.getType();
684  unsigned TypeBits = Ty->getScalarSizeInBits();
685  assert(!Op1C->uge(TypeBits) &&
686  "Shift over the type width should have been removed already");
687  (void)TypeBits;
688 
689  if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I))
690  return FoldedShift;
691 
692  if (!Op0->hasOneUse())
693  return nullptr;
694 
695  if (auto *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
696  // If the operand is a bitwise operator with a constant RHS, and the
697  // shift is the only use, we can pull it out of the shift.
698  const APInt *Op0C;
699  if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
700  if (canShiftBinOpWithConstantRHS(I, Op0BO)) {
701  Constant *NewRHS = ConstantExpr::get(
702  I.getOpcode(), cast<Constant>(Op0BO->getOperand(1)), Op1);
703 
704  Value *NewShift =
705  Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
706  NewShift->takeName(Op0BO);
707 
708  return BinaryOperator::Create(Op0BO->getOpcode(), NewShift, NewRHS);
709  }
710  }
711  }
712 
713  // If we have a select that conditionally executes some binary operator,
714  // see if we can pull it the select and operator through the shift.
715  //
716  // For example, turning:
717  // shl (select C, (add X, C1), X), C2
718  // Into:
719  // Y = shl X, C2
720  // select C, (add Y, C1 << C2), Y
721  Value *Cond;
722  BinaryOperator *TBO;
723  Value *FalseVal;
724  if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
725  m_Value(FalseVal)))) {
726  const APInt *C;
727  if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
728  match(TBO->getOperand(1), m_APInt(C)) &&
730  Constant *NewRHS = ConstantExpr::get(
731  I.getOpcode(), cast<Constant>(TBO->getOperand(1)), Op1);
732 
733  Value *NewShift = Builder.CreateBinOp(I.getOpcode(), FalseVal, Op1);
734  Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift, NewRHS);
735  return SelectInst::Create(Cond, NewOp, NewShift);
736  }
737  }
738 
739  BinaryOperator *FBO;
740  Value *TrueVal;
742  m_OneUse(m_BinOp(FBO))))) {
743  const APInt *C;
744  if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
745  match(FBO->getOperand(1), m_APInt(C)) &&
747  Constant *NewRHS = ConstantExpr::get(
748  I.getOpcode(), cast<Constant>(FBO->getOperand(1)), Op1);
749 
750  Value *NewShift = Builder.CreateBinOp(I.getOpcode(), TrueVal, Op1);
751  Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift, NewRHS);
752  return SelectInst::Create(Cond, NewShift, NewOp);
753  }
754  }
755 
756  return nullptr;
757 }
758 
760  const SimplifyQuery Q = SQ.getWithInstruction(&I);
761 
762  if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
763  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), Q))
764  return replaceInstUsesWith(I, V);
765 
766  if (Instruction *X = foldVectorBinop(I))
767  return X;
768 
769  if (Instruction *V = commonShiftTransforms(I))
770  return V;
771 
773  return V;
774 
775  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
776  Type *Ty = I.getType();
777  unsigned BitWidth = Ty->getScalarSizeInBits();
778 
779  const APInt *C;
780  if (match(Op1, m_APInt(C))) {
781  unsigned ShAmtC = C->getZExtValue();
782 
783  // shl (zext X), C --> zext (shl X, C)
784  // This is only valid if X would have zeros shifted out.
785  Value *X;
786  if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
787  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
788  if (ShAmtC < SrcWidth &&
789  MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmtC), 0, &I))
790  return new ZExtInst(Builder.CreateShl(X, ShAmtC), Ty);
791  }
792 
793  // (X >> C) << C --> X & (-1 << C)
794  if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1)))) {
796  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
797  }
798 
799  const APInt *C1;
800  if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(C1)))) &&
801  C1->ult(BitWidth)) {
802  unsigned ShrAmt = C1->getZExtValue();
803  if (ShrAmt < ShAmtC) {
804  // If C1 < C: (X >>?,exact C1) << C --> X << (C - C1)
805  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShrAmt);
806  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
807  NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
808  NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
809  return NewShl;
810  }
811  if (ShrAmt > ShAmtC) {
812  // If C1 > C: (X >>?exact C1) << C --> X >>?exact (C1 - C)
813  Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmtC);
814  auto *NewShr = BinaryOperator::Create(
815  cast<BinaryOperator>(Op0)->getOpcode(), X, ShiftDiff);
816  NewShr->setIsExact(true);
817  return NewShr;
818  }
819  }
820 
821  if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_APInt(C1)))) &&
822  C1->ult(BitWidth)) {
823  unsigned ShrAmt = C1->getZExtValue();
824  if (ShrAmt < ShAmtC) {
825  // If C1 < C: (X >>? C1) << C --> (X << (C - C1)) & (-1 << C)
826  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShrAmt);
827  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
828  NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
829  NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
830  Builder.Insert(NewShl);
832  return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
833  }
834  if (ShrAmt > ShAmtC) {
835  // If C1 > C: (X >>? C1) << C --> (X >>? (C1 - C)) & (-1 << C)
836  Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmtC);
837  auto *OldShr = cast<BinaryOperator>(Op0);
838  auto *NewShr =
839  BinaryOperator::Create(OldShr->getOpcode(), X, ShiftDiff);
840  NewShr->setIsExact(OldShr->isExact());
841  Builder.Insert(NewShr);
843  return BinaryOperator::CreateAnd(NewShr, ConstantInt::get(Ty, Mask));
844  }
845  }
846 
847  // Similar to above, but look through an intermediate trunc instruction.
848  BinaryOperator *Shr;
849  if (match(Op0, m_OneUse(m_Trunc(m_OneUse(m_BinOp(Shr))))) &&
850  match(Shr, m_Shr(m_Value(X), m_APInt(C1)))) {
851  // The larger shift direction survives through the transform.
852  unsigned ShrAmtC = C1->getZExtValue();
853  unsigned ShDiff = ShrAmtC > ShAmtC ? ShrAmtC - ShAmtC : ShAmtC - ShrAmtC;
854  Constant *ShiftDiffC = ConstantInt::get(X->getType(), ShDiff);
855  auto ShiftOpc = ShrAmtC > ShAmtC ? Shr->getOpcode() : Instruction::Shl;
856 
857  // If C1 > C:
858  // (trunc (X >> C1)) << C --> (trunc (X >> (C1 - C))) && (-1 << C)
859  // If C > C1:
860  // (trunc (X >> C1)) << C --> (trunc (X << (C - C1))) && (-1 << C)
861  Value *NewShift = Builder.CreateBinOp(ShiftOpc, X, ShiftDiffC, "sh.diff");
862  Value *Trunc = Builder.CreateTrunc(NewShift, Ty, "tr.sh.diff");
864  return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, Mask));
865  }
866 
867  if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
868  unsigned AmtSum = ShAmtC + C1->getZExtValue();
869  // Oversized shifts are simplified to zero in InstSimplify.
870  if (AmtSum < BitWidth)
871  // (X << C1) << C2 --> X << (C1 + C2)
872  return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
873  }
874 
875  // If we have an opposite shift by the same amount, we may be able to
876  // reorder binops and shifts to eliminate math/logic.
877  auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) {
878  switch (BinOpcode) {
879  default:
880  return false;
881  case Instruction::Add:
882  case Instruction::And:
883  case Instruction::Or:
884  case Instruction::Xor:
885  case Instruction::Sub:
886  // NOTE: Sub is not commutable and the tranforms below may not be valid
887  // when the shift-right is operand 1 (RHS) of the sub.
888  return true;
889  }
890  };
891  BinaryOperator *Op0BO;
892  if (match(Op0, m_OneUse(m_BinOp(Op0BO))) &&
893  isSuitableBinOpcode(Op0BO->getOpcode())) {
894  // Commute so shift-right is on LHS of the binop.
895  // (Y bop (X >> C)) << C -> ((X >> C) bop Y) << C
896  // (Y bop ((X >> C) & CC)) << C -> (((X >> C) & CC) bop Y) << C
897  Value *Shr = Op0BO->getOperand(0);
898  Value *Y = Op0BO->getOperand(1);
899  Value *X;
900  const APInt *CC;
901  if (Op0BO->isCommutative() && Y->hasOneUse() &&
902  (match(Y, m_Shr(m_Value(), m_Specific(Op1))) ||
904  m_APInt(CC)))))
905  std::swap(Shr, Y);
906 
907  // ((X >> C) bop Y) << C -> (X bop (Y << C)) & (~0 << C)
908  if (match(Shr, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
909  // Y << C
910  Value *YS = Builder.CreateShl(Y, Op1, Op0BO->getName());
911  // (X bop (Y << C))
912  Value *B =
913  Builder.CreateBinOp(Op0BO->getOpcode(), X, YS, Shr->getName());
914  unsigned Op1Val = C->getLimitedValue(BitWidth);
917  return BinaryOperator::CreateAnd(B, Mask);
918  }
919 
920  // (((X >> C) & CC) bop Y) << C -> (X & (CC << C)) bop (Y << C)
921  if (match(Shr,
923  m_APInt(CC))))) {
924  // Y << C
925  Value *YS = Builder.CreateShl(Y, Op1, Op0BO->getName());
926  // X & (CC << C)
927  Value *M = Builder.CreateAnd(X, ConstantInt::get(Ty, CC->shl(*C)),
928  X->getName() + ".mask");
929  return BinaryOperator::Create(Op0BO->getOpcode(), M, YS);
930  }
931  }
932 
933  // (C1 - X) << C --> (C1 << C) - (X << C)
934  if (match(Op0, m_OneUse(m_Sub(m_APInt(C1), m_Value(X))))) {
935  Constant *NewLHS = ConstantInt::get(Ty, C1->shl(*C));
936  Value *NewShift = Builder.CreateShl(X, Op1);
937  return BinaryOperator::CreateSub(NewLHS, NewShift);
938  }
939 
940  // If the shifted-out value is known-zero, then this is a NUW shift.
941  if (!I.hasNoUnsignedWrap() &&
943  &I)) {
944  I.setHasNoUnsignedWrap();
945  return &I;
946  }
947 
948  // If the shifted-out value is all signbits, then this is a NSW shift.
949  if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmtC) {
950  I.setHasNoSignedWrap();
951  return &I;
952  }
953  }
954 
955  // Transform (x >> y) << y to x & (-1 << y)
956  // Valid for any type of right-shift.
957  Value *X;
958  if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
959  Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
960  Value *Mask = Builder.CreateShl(AllOnes, Op1);
961  return BinaryOperator::CreateAnd(Mask, X);
962  }
963 
964  Constant *C1;
965  if (match(Op1, m_Constant(C1))) {
966  Constant *C2;
967  Value *X;
968  // (C2 << X) << C1 --> (C2 << C1) << X
969  if (match(Op0, m_OneUse(m_Shl(m_Constant(C2), m_Value(X)))))
970  return BinaryOperator::CreateShl(ConstantExpr::getShl(C2, C1), X);
971 
972  // (X * C2) << C1 --> X * (C2 << C1)
973  if (match(Op0, m_Mul(m_Value(X), m_Constant(C2))))
975 
976  // shl (zext i1 X), C1 --> select (X, 1 << C1, 0)
977  if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
978  auto *NewC = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C1);
980  }
981  }
982 
983  // (1 << (C - x)) -> ((1 << C) >> x) if C is bitwidth - 1
984  if (match(Op0, m_One()) &&
985  match(Op1, m_Sub(m_SpecificInt(BitWidth - 1), m_Value(X))))
986  return BinaryOperator::CreateLShr(
988 
989  return nullptr;
990 }
991 
993  if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
994  SQ.getWithInstruction(&I)))
995  return replaceInstUsesWith(I, V);
996 
997  if (Instruction *X = foldVectorBinop(I))
998  return X;
999 
1000  if (Instruction *R = commonShiftTransforms(I))
1001  return R;
1002 
1003  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1004  Type *Ty = I.getType();
1005  const APInt *C;
1006  if (match(Op1, m_APInt(C))) {
1007  unsigned ShAmtC = C->getZExtValue();
1008  unsigned BitWidth = Ty->getScalarSizeInBits();
1009  auto *II = dyn_cast<IntrinsicInst>(Op0);
1010  if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmtC &&
1011  (II->getIntrinsicID() == Intrinsic::ctlz ||
1012  II->getIntrinsicID() == Intrinsic::cttz ||
1013  II->getIntrinsicID() == Intrinsic::ctpop)) {
1014  // ctlz.i32(x)>>5 --> zext(x == 0)
1015  // cttz.i32(x)>>5 --> zext(x == 0)
1016  // ctpop.i32(x)>>5 --> zext(x == -1)
1017  bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
1018  Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
1019  Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
1020  return new ZExtInst(Cmp, Ty);
1021  }
1022 
1023  Value *X;
1024  const APInt *C1;
1025  if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
1026  if (C1->ult(ShAmtC)) {
1027  unsigned ShlAmtC = C1->getZExtValue();
1028  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShlAmtC);
1029  if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1030  // (X <<nuw C1) >>u C --> X >>u (C - C1)
1031  auto *NewLShr = BinaryOperator::CreateLShr(X, ShiftDiff);
1032  NewLShr->setIsExact(I.isExact());
1033  return NewLShr;
1034  }
1035  // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
1036  Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
1038  return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
1039  }
1040  if (C1->ugt(ShAmtC)) {
1041  unsigned ShlAmtC = C1->getZExtValue();
1042  Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmtC - ShAmtC);
1043  if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1044  // (X <<nuw C1) >>u C --> X <<nuw (C1 - C)
1045  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
1046  NewShl->setHasNoUnsignedWrap(true);
1047  return NewShl;
1048  }
1049  // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
1050  Value *NewShl = Builder.CreateShl(X, ShiftDiff);
1052  return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
1053  }
1054  assert(*C1 == ShAmtC);
1055  // (X << C) >>u C --> X & (-1 >>u C)
1057  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
1058  }
1059 
1060  // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
1061  // TODO: Consolidate with the more general transform that starts from shl
1062  // (the shifts are in the opposite order).
1063  Value *Y;
1064  if (match(Op0,
1066  m_Value(Y))))) {
1067  Value *NewLshr = Builder.CreateLShr(Y, Op1);
1068  Value *NewAdd = Builder.CreateAdd(NewLshr, X);
1069  unsigned Op1Val = C->getLimitedValue(BitWidth);
1072  return BinaryOperator::CreateAnd(NewAdd, Mask);
1073  }
1074 
1075  if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
1076  (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
1077  assert(ShAmtC < X->getType()->getScalarSizeInBits() &&
1078  "Big shift not simplified to zero?");
1079  // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN
1080  Value *NewLShr = Builder.CreateLShr(X, ShAmtC);
1081  return new ZExtInst(NewLShr, Ty);
1082  }
1083 
1084  if (match(Op0, m_SExt(m_Value(X)))) {
1085  unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits();
1086  // lshr (sext i1 X to iN), C --> select (X, -1 >> C, 0)
1087  if (SrcTyBitWidth == 1) {
1088  auto *NewC = ConstantInt::get(
1089  Ty, APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
1090  return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty));
1091  }
1092 
1093  if ((!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType())) &&
1094  Op0->hasOneUse()) {
1095  // Are we moving the sign bit to the low bit and widening with high
1096  // zeros? lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
1097  if (ShAmtC == BitWidth - 1) {
1098  Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
1099  return new ZExtInst(NewLShr, Ty);
1100  }
1101 
1102  // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN
1103  if (ShAmtC == BitWidth - SrcTyBitWidth) {
1104  // The new shift amount can't be more than the narrow source type.
1105  unsigned NewShAmt = std::min(ShAmtC, SrcTyBitWidth - 1);
1106  Value *AShr = Builder.CreateAShr(X, NewShAmt);
1107  return new ZExtInst(AShr, Ty);
1108  }
1109  }
1110  }
1111 
1112  if (ShAmtC == BitWidth - 1) {
1113  // lshr i32 or(X,-X), 31 --> zext (X != 0)
1114  if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
1115  return new ZExtInst(Builder.CreateIsNotNull(X), Ty);
1116 
1117  // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
1118  if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1119  return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1120 
1121  // Check if a number is negative and odd:
1122  // lshr i32 (srem X, 2), 31 --> and (X >> 31), X
1123  if (match(Op0, m_OneUse(m_SRem(m_Value(X), m_SpecificInt(2))))) {
1124  Value *Signbit = Builder.CreateLShr(X, ShAmtC);
1125  return BinaryOperator::CreateAnd(Signbit, X);
1126  }
1127  }
1128 
1129  // (X >>u C1) >>u C --> X >>u (C1 + C)
1130  if (match(Op0, m_LShr(m_Value(X), m_APInt(C1)))) {
1131  // Oversized shifts are simplified to zero in InstSimplify.
1132  unsigned AmtSum = ShAmtC + C1->getZExtValue();
1133  if (AmtSum < BitWidth)
1134  return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
1135  }
1136 
1137  Instruction *TruncSrc;
1138  if (match(Op0, m_OneUse(m_Trunc(m_Instruction(TruncSrc)))) &&
1139  match(TruncSrc, m_LShr(m_Value(X), m_APInt(C1)))) {
1140  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
1141  unsigned AmtSum = ShAmtC + C1->getZExtValue();
1142 
1143  // If the combined shift fits in the source width:
1144  // (trunc (X >>u C1)) >>u C --> and (trunc (X >>u (C1 + C)), MaskC
1145  //
1146  // If the first shift covers the number of bits truncated, then the
1147  // mask instruction is eliminated (and so the use check is relaxed).
1148  if (AmtSum < SrcWidth &&
1149  (TruncSrc->hasOneUse() || C1->uge(SrcWidth - BitWidth))) {
1150  Value *SumShift = Builder.CreateLShr(X, AmtSum, "sum.shift");
1151  Value *Trunc = Builder.CreateTrunc(SumShift, Ty, I.getName());
1152 
1153  // If the first shift does not cover the number of bits truncated, then
1154  // we require a mask to get rid of high bits in the result.
1155  APInt MaskC = APInt::getAllOnes(BitWidth).lshr(ShAmtC);
1156  return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, MaskC));
1157  }
1158  }
1159 
1160  // Look for a "splat" mul pattern - it replicates bits across each half of
1161  // a value, so a right shift is just a mask of the low bits:
1162  // lshr i32 (mul nuw X, Pow2+1), 16 --> and X, Pow2-1
1163  // TODO: Generalize to allow more than just half-width shifts?
1164  const APInt *MulC;
1165  if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC))) &&
1166  ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
1167  MulC->logBase2() == ShAmtC)
1168  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
1169 
1170  // If the shifted-out value is known-zero, then this is an exact shift.
1171  if (!I.isExact() &&
1172  MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmtC), 0, &I)) {
1173  I.setIsExact();
1174  return &I;
1175  }
1176  }
1177 
1178  // Transform (x << y) >> y to x & (-1 >> y)
1179  Value *X;
1180  if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))))) {
1181  Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1182  Value *Mask = Builder.CreateLShr(AllOnes, Op1);
1183  return BinaryOperator::CreateAnd(Mask, X);
1184  }
1185 
1186  return nullptr;
1187 }
1188 
1189 Instruction *
1191  BinaryOperator &OldAShr) {
1192  assert(OldAShr.getOpcode() == Instruction::AShr &&
1193  "Must be called with arithmetic right-shift instruction only.");
1194 
1195  // Check that constant C is a splat of the element-wise bitwidth of V.
1196  auto BitWidthSplat = [](Constant *C, Value *V) {
1197  return match(
1198  C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1199  APInt(C->getType()->getScalarSizeInBits(),
1200  V->getType()->getScalarSizeInBits())));
1201  };
1202 
1203  // It should look like variable-length sign-extension on the outside:
1204  // (Val << (bitwidth(Val)-Nbits)) a>> (bitwidth(Val)-Nbits)
1205  Value *NBits;
1206  Instruction *MaybeTrunc;
1207  Constant *C1, *C2;
1208  if (!match(&OldAShr,
1209  m_AShr(m_Shl(m_Instruction(MaybeTrunc),
1211  m_ZExtOrSelf(m_Value(NBits))))),
1213  m_ZExtOrSelf(m_Deferred(NBits)))))) ||
1214  !BitWidthSplat(C1, &OldAShr) || !BitWidthSplat(C2, &OldAShr))
1215  return nullptr;
1216 
1217  // There may or may not be a truncation after outer two shifts.
1218  Instruction *HighBitExtract;
1219  match(MaybeTrunc, m_TruncOrSelf(m_Instruction(HighBitExtract)));
1220  bool HadTrunc = MaybeTrunc != HighBitExtract;
1221 
1222  // And finally, the innermost part of the pattern must be a right-shift.
1223  Value *X, *NumLowBitsToSkip;
1224  if (!match(HighBitExtract, m_Shr(m_Value(X), m_Value(NumLowBitsToSkip))))
1225  return nullptr;
1226 
1227  // Said right-shift must extract high NBits bits - C0 must be it's bitwidth.
1228  Constant *C0;
1229  if (!match(NumLowBitsToSkip,
1230  m_ZExtOrSelf(
1231  m_Sub(m_Constant(C0), m_ZExtOrSelf(m_Specific(NBits))))) ||
1232  !BitWidthSplat(C0, HighBitExtract))
1233  return nullptr;
1234 
1235  // Since the NBits is identical for all shifts, if the outermost and
1236  // innermost shifts are identical, then outermost shifts are redundant.
1237  // If we had truncation, do keep it though.
1238  if (HighBitExtract->getOpcode() == OldAShr.getOpcode())
1239  return replaceInstUsesWith(OldAShr, MaybeTrunc);
1240 
1241  // Else, if there was a truncation, then we need to ensure that one
1242  // instruction will go away.
1243  if (HadTrunc && !match(&OldAShr, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1244  return nullptr;
1245 
1246  // Finally, bypass two innermost shifts, and perform the outermost shift on
1247  // the operands of the innermost shift.
1248  Instruction *NewAShr =
1249  BinaryOperator::Create(OldAShr.getOpcode(), X, NumLowBitsToSkip);
1250  NewAShr->copyIRFlags(HighBitExtract); // We can preserve 'exact'-ness.
1251  if (!HadTrunc)
1252  return NewAShr;
1253 
1254  Builder.Insert(NewAShr);
1255  return TruncInst::CreateTruncOrBitCast(NewAShr, OldAShr.getType());
1256 }
1257 
1259  if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1260  SQ.getWithInstruction(&I)))
1261  return replaceInstUsesWith(I, V);
1262 
1263  if (Instruction *X = foldVectorBinop(I))
1264  return X;
1265 
1266  if (Instruction *R = commonShiftTransforms(I))
1267  return R;
1268 
1269  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1270  Type *Ty = I.getType();
1271  unsigned BitWidth = Ty->getScalarSizeInBits();
1272  const APInt *ShAmtAPInt;
1273  if (match(Op1, m_APInt(ShAmtAPInt)) && ShAmtAPInt->ult(BitWidth)) {
1274  unsigned ShAmt = ShAmtAPInt->getZExtValue();
1275 
1276  // If the shift amount equals the difference in width of the destination
1277  // and source scalar types:
1278  // ashr (shl (zext X), C), C --> sext X
1279  Value *X;
1280  if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
1281  ShAmt == BitWidth - X->getType()->getScalarSizeInBits())
1282  return new SExtInst(X, Ty);
1283 
1284  // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
1285  // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
1286  const APInt *ShOp1;
1287  if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1))) &&
1288  ShOp1->ult(BitWidth)) {
1289  unsigned ShlAmt = ShOp1->getZExtValue();
1290  if (ShlAmt < ShAmt) {
1291  // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
1292  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
1293  auto *NewAShr = BinaryOperator::CreateAShr(X, ShiftDiff);
1294  NewAShr->setIsExact(I.isExact());
1295  return NewAShr;
1296  }
1297  if (ShlAmt > ShAmt) {
1298  // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
1299  Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
1300  auto *NewShl = BinaryOperator::Create(Instruction::Shl, X, ShiftDiff);
1301  NewShl->setHasNoSignedWrap(true);
1302  return NewShl;
1303  }
1304  }
1305 
1306  if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1))) &&
1307  ShOp1->ult(BitWidth)) {
1308  unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
1309  // Oversized arithmetic shifts replicate the sign bit.
1310  AmtSum = std::min(AmtSum, BitWidth - 1);
1311  // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
1312  return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
1313  }
1314 
1315  if (match(Op0, m_OneUse(m_SExt(m_Value(X)))) &&
1316  (Ty->isVectorTy() || shouldChangeType(Ty, X->getType()))) {
1317  // ashr (sext X), C --> sext (ashr X, C')
1318  Type *SrcTy = X->getType();
1319  ShAmt = std::min(ShAmt, SrcTy->getScalarSizeInBits() - 1);
1320  Value *NewSh = Builder.CreateAShr(X, ConstantInt::get(SrcTy, ShAmt));
1321  return new SExtInst(NewSh, Ty);
1322  }
1323 
1324  if (ShAmt == BitWidth - 1) {
1325  // ashr i32 or(X,-X), 31 --> sext (X != 0)
1326  if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
1327  return new SExtInst(Builder.CreateIsNotNull(X), Ty);
1328 
1329  // ashr i32 (X -nsw Y), 31 --> sext (X < Y)
1330  Value *Y;
1331  if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1332  return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1333  }
1334 
1335  // If the shifted-out value is known-zero, then this is an exact shift.
1336  if (!I.isExact() &&
1337  MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
1338  I.setIsExact();
1339  return &I;
1340  }
1341  }
1342 
1343  // Prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)`
1344  // as the pattern to splat the lowest bit.
1345  // FIXME: iff X is already masked, we don't need the one-use check.
1346  Value *X;
1347  if (match(Op1, m_SpecificIntAllowUndef(BitWidth - 1)) &&
1348  match(Op0, m_OneUse(m_Shl(m_Value(X),
1350  Constant *Mask = ConstantInt::get(Ty, 1);
1351  // Retain the knowledge about the ignored lanes.
1353  Constant::mergeUndefsWith(Mask, cast<Constant>(Op1)),
1354  cast<Constant>(cast<Instruction>(Op0)->getOperand(1)));
1355  X = Builder.CreateAnd(X, Mask);
1356  return BinaryOperator::CreateNeg(X);
1357  }
1358 
1359  if (Instruction *R = foldVariableSignZeroExtensionOfVariableHighBitExtract(I))
1360  return R;
1361 
1362  // See if we can turn a signed shr into an unsigned shr.
1364  return BinaryOperator::CreateLShr(Op0, Op1);
1365 
1366  // ashr (xor %x, -1), %y --> xor (ashr %x, %y), -1
1367  if (match(Op0, m_OneUse(m_Not(m_Value(X))))) {
1368  // Note that we must drop 'exact'-ness of the shift!
1369  // Note that we can't keep undef's in -1 vector constant!
1370  auto *NewAShr = Builder.CreateAShr(X, Op1, Op0->getName() + ".not");
1371  return BinaryOperator::CreateNot(NewAShr);
1372  }
1373 
1374  return nullptr;
1375 }
i
i
Definition: README.txt:29
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::PatternMatch::m_TruncOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1627
llvm::isKnownNonNegative
bool isKnownNonNegative(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Returns true if the give value is known to be non-negative.
Definition: ValueTracking.cpp:323
llvm::SimplifySubInst
Value * SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Definition: InstructionSimplify.cpp:874
llvm::ConstantExpr::getNot
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2733
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:367
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2743
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::ConstantExpr::getZExtOrBitCast
static Constant * getZExtOrBitCast(Constant *C, Type *Ty)
Definition: Constants.cpp:2076
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
InstCombiner.h
IntrinsicInst.h
llvm::SimplifyQuery
Definition: InstructionSimplify.h:94
canTryToConstantAddTwoShiftAmounts
bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1, Value *ShAmt1)
Definition: InstCombineShifts.cpp:24
llvm::ConstantExpr::getZExt
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2172
llvm::InstCombinerImpl::FoldShiftByConstant
Instruction * FoldShiftByConstant(Value *Op0, Constant *Op1, BinaryOperator &I)
Definition: InstCombineShifts.cpp:661
llvm::BinaryOperator::CreateNot
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Definition: Instructions.cpp:2806
llvm::PatternMatch::m_LShr
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1127
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
canShiftBinOpWithConstantRHS
static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift, BinaryOperator *BO)
Definition: InstCombineShifts.cpp:644
llvm::InstCombiner::Builder
BuilderTy & Builder
Definition: InstCombiner.h:58
llvm::PatternMatch::m_Add
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:988
llvm::PatternMatch::m_CombineOr
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:210
llvm::IRBuilder< TargetFolder, IRBuilderCallbackInserter >
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3152
llvm::Instruction::hasNoUnsignedWrap
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
Definition: Instruction.cpp:136
Shift
bool Shift
Definition: README.txt:468
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Definition: InstCombineShifts.cpp:59
canEvaluateShiftedShift
static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl, Instruction *InnerShift, InstCombinerImpl &IC, Instruction *CxtI)
Return true if we can simplify two logical (either left or right) shifts that have constant shift amo...
Definition: InstCombineShifts.cpp:430
llvm::APInt::ugt
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition: APInt.h:1115
llvm::Instruction::setHasNoUnsignedWrap
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:124
llvm::PatternMatch::m_BinOp
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:84
llvm::PatternMatch::m_AShr
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1133
llvm::ComputeNumSignBits
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: ValueTracking.cpp:391
llvm::InstCombiner::addToWorklist
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:366
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:61
llvm::ARM_AM::ShiftOpc
ShiftOpc
Definition: ARMAddressingModes.h:27
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:116
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:815
RHS
Value * RHS
Definition: X86PartialReduction.cpp:74
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::PatternMatch::m_c_And
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
Definition: PatternMatch.h:2249
llvm::BinaryOperator::CreateNeg
NUW NUW NUW NUW Exact static Exact BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
Definition: Instructions.cpp:2766
llvm::PatternMatch::m_Not
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Definition: PatternMatch.h:2287
llvm::Instruction::setIsExact
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:132
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::PHINode::setIncomingValue
void setIncomingValue(unsigned i, Value *V)
Definition: Instructions.h:2756
ConstantFolding.h
llvm::PatternMatch::m_Deferred
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:798
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::PatternMatch::m_OneUse
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ConstantExpr::getSub
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2750
llvm::PatternMatch::m_APInt
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:270
llvm::PatternMatch::m_c_BinOp
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
Definition: PatternMatch.h:2213
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1154
llvm::Intrinsic::getType
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:1355
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:160
llvm::SelectInst::Create
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
Definition: Instructions.h:1772
llvm::InstCombinerImpl::foldVariableSignZeroExtensionOfVariableHighBitExtract
Instruction * foldVariableSignZeroExtensionOfVariableHighBitExtract(BinaryOperator &OldAShr)
Definition: InstCombineShifts.cpp:1190
llvm::SimplifyShlInst
Value * SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Definition: InstructionSimplify.cpp:1395
llvm::RecurKind::And
@ And
Bitwise or logical AND of integers.
hasNoUnsignedWrap
static bool hasNoUnsignedWrap(BinaryOperator &I)
Definition: InstructionCombining.cpp:295
InstCombineInternal.h
llvm::PatternMatch::m_Select
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
Definition: PatternMatch.h:1472
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2753
getScalarSizeInBits
static unsigned getScalarSizeInBits(Type *Ty)
Definition: SystemZTargetTransformInfo.cpp:367
llvm::Instruction::setHasNoSignedWrap
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:128
llvm::PatternMatch::m_Exact
Exact_match< T > m_Exact(const T &SubPattern)
Definition: PatternMatch.h:1361
llvm::PatternMatch::m_SpecificIntAllowUndef
specific_intval< true > m_SpecificIntAllowUndef(APInt V)
Definition: PatternMatch.h:871
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
getOpcode
static Optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:199
llvm::APInt::getAllOnes
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:226
llvm::PatternMatch::m_Instruction
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:726
llvm::SimplifyAShrInst
Value * SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
Definition: InstructionSimplify.cpp:1465
llvm::PatternMatch::m_ZExt
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
Definition: PatternMatch.h:1639
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::PatternMatch::m_c_Add
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
Definition: PatternMatch.h:2235
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::Constant::getAllOnesValue
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:405
llvm::BinaryOperator::getOpcode
BinaryOps getOpcode() const
Definition: InstrTypes.h:394
llvm::Instruction
Definition: Instruction.h:45
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:191
llvm::PatternMatch::m_c_Or
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
Definition: PatternMatch.h:2256
llvm::APInt::getZExtValue
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1467
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
llvm::InstCombinerImpl
Definition: InstCombineInternal.h:61
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:932
llvm::PatternMatch::m_NSWSub
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1174
PatternMatch.h
llvm::CastInst::CreateTruncOrBitCast
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
Definition: Instructions.cpp:3228
llvm::PatternMatch::m_Shift
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
Definition: PatternMatch.h:1306
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2749
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::PatternMatch::m_One
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:513
llvm::Instruction::isCommutative
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
Definition: Instruction.cpp:768
llvm::PatternMatch::m_Power2
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:544
llvm::PatternMatch::m_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1115
llvm::Type::getExtendedType
Type * getExtendedType() const
Given scalar/vector integer type, returns a type with elements twice as wide as in the original type.
Definition: DerivedTypes.h:711
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:190
llvm::PatternMatch::m_Shr
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
Definition: PatternMatch.h:1313
llvm::PatternMatch::m_ZExtOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, OpTy > m_ZExtOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1645
llvm::ConstantExpr::getTrunc
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2144
llvm::InstCombinerImpl::visitShl
Instruction * visitShl(BinaryOperator &I)
Definition: InstCombineShifts.cpp:759
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::Instruction::isLogicalShift
bool isLogicalShift() const
Return true if this is a logical shift left or a logical shift right.
Definition: Instruction.h:205
llvm::Constant::replaceUndefsWith
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:800
llvm::APInt::logBase2
unsigned logBase2() const
Definition: APInt.h:1648
llvm::PatternMatch::m_AllOnes
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:445
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::ConstantExpr::get
static Constant * get(unsigned Opcode, Constant *C1, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a unary operator constant expression, folding if possible.
Definition: Constants.cpp:2316
I
#define I(x, y, z)
Definition: MD5.cpp:58
getShiftedValue
static Value * getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, InstCombinerImpl &IC, const DataLayout &DL)
When canEvaluateShifted() returns true for an expression, this function inserts the new computation t...
Definition: InstCombineShifts.cpp:593
llvm::ConstantExpr::getShl
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2815
llvm::PatternMatch::m_And
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1103
llvm::PatternMatch::m_SRem
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1091
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::PatternMatch::m_Sub
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1000
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1741
llvm::SimplifyQuery::getWithInstruction
SimplifyQuery getWithInstruction(Instruction *I) const
Definition: InstructionSimplify.h:121
llvm::PatternMatch::m_Constant
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:142
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:650
llvm::PatternMatch::m_Value
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
llvm::ZExtInst
This class represents zero extension of integer types.
Definition: Instructions.h:4812
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::PatternMatch::m_SExt
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
Definition: PatternMatch.h:1633
llvm::InstCombinerImpl::visitLShr
Instruction * visitLShr(BinaryOperator &I)
Definition: InstCombineShifts.cpp:992
llvm::PatternMatch::m_SpecificInt
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:863
llvm::PatternMatch::m_CombineAnd
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:216
llvm::PatternMatch::m_NUWMul
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1215
llvm::SimplifyLShrInst
Value * SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
Definition: InstructionSimplify.cpp:1432
llvm::BinaryOperator
Definition: InstrTypes.h:190
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::PatternMatch::m_SpecificInt_ICMP
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
Definition: PatternMatch.h:604
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:179
llvm::CmpInst::ICMP_ULT
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:746
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::Instruction::isExact
bool isExact() const
Determine whether the exact flag is set.
Definition: Instruction.cpp:196
foldShiftedShift
static Value * foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt, bool IsOuterShl, InstCombiner::BuilderTy &Builder)
Fold OuterShift (InnerShift X, C1), C2.
Definition: InstCombineShifts.cpp:530
llvm::APInt::ult
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1044
llvm::SExtInst
This class represents a sign extension of integer types.
Definition: Instructions.h:4851
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
llvm::InstCombinerImpl::visitAShr
Instruction * visitAShr(BinaryOperator &I)
Definition: InstCombineShifts.cpp:1258
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:162
foldShiftOfShiftedLogic
static Instruction * foldShiftOfShiftedLogic(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
Definition: InstCombineShifts.cpp:330
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:348
llvm::InstCombinerImpl::commonShiftTransforms
Instruction * commonShiftTransforms(BinaryOperator &I)
Definition: InstCombineShifts.cpp:371
llvm::ConstantExpr::getAdd
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2739
llvm::ConstantInt::getSigned
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.cpp:946
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::PatternMatch::m_NSWShl
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1190
dropRedundantMaskingOfLeftShiftInput
static Instruction * dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, const SimplifyQuery &Q, InstCombiner::BuilderTy &Builder)
Definition: InstCombineShifts.cpp:185
llvm::ConstantExpr::getLShr
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2822
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:183
llvm::Constant::mergeUndefsWith
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
Definition: Constants.cpp:824
llvm::Instruction::BinaryOps
BinaryOps
Definition: Instruction.h:789
llvm::APInt::getSignMask
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition: APInt.h:209
llvm::PatternMatch::m_Specific
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:780
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1321
llvm::Instruction::hasNoSignedWrap
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
Definition: Instruction.cpp:140
CreateMul
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Definition: Reassociate.cpp:246
llvm::IRBuilderBase::CreateShl
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1300
canEvaluateShifted
static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift, InstCombinerImpl &IC, Instruction *CxtI)
See if we can compute the specified value, but shifted logically to the left or right by some number ...
Definition: InstCombineShifts.cpp:482
InstructionSimplify.h
llvm::PHINode
Definition: Instructions.h:2657
Threshold
static cl::opt< unsigned > Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"), cl::init(100), cl::Hidden)
llvm::Instruction::copyIRFlags
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
Definition: Instruction.cpp:299
llvm::PatternMatch::m_Neg
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
Definition: PatternMatch.h:2271
llvm::APInt::getLowBitsSet
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:289
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:382
llvm::APInt::shl
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:837
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::PatternMatch::m_Trunc
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
Definition: PatternMatch.h:1621
llvm::BinaryOperator::Create
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Definition: Instructions.cpp:2750
llvm::SimplifyAddInst
Value * SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Definition: InstructionSimplify.cpp:687
llvm::tgtok::TrueVal
@ TrueVal
Definition: TGLexer.h:61
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::PatternMatch::m_Shl
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1121
llvm::InstCombinerImpl::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombineInternal.h:482
llvm::PatternMatch::m_Mul
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1055