LLVM  15.0.0git
InstCombineShifts.cpp
Go to the documentation of this file.
1 //===- InstCombineShifts.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitShl, visitLShr, and visitAShr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
15 #include "llvm/IR/IntrinsicInst.h"
16 #include "llvm/IR/PatternMatch.h"
18 using namespace llvm;
19 using namespace PatternMatch;
20 
21 #define DEBUG_TYPE "instcombine"
22 
24  Value *ShAmt1) {
25  // We have two shift amounts from two different shifts. The types of those
26  // shift amounts may not match. If that's the case let's bailout now..
27  if (ShAmt0->getType() != ShAmt1->getType())
28  return false;
29 
30  // As input, we have the following pattern:
31  // Sh0 (Sh1 X, Q), K
32  // We want to rewrite that as:
33  // Sh x, (Q+K) iff (Q+K) u< bitwidth(x)
34  // While we know that originally (Q+K) would not overflow
35  // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
36  // shift amounts. so it may now overflow in smaller bitwidth.
37  // To ensure that does not happen, we need to ensure that the total maximal
38  // shift amount is still representable in that smaller bit width.
39  unsigned MaximalPossibleTotalShiftAmount =
40  (Sh0->getType()->getScalarSizeInBits() - 1) +
41  (Sh1->getType()->getScalarSizeInBits() - 1);
42  APInt MaximalRepresentableShiftAmount =
44  return MaximalRepresentableShiftAmount.uge(MaximalPossibleTotalShiftAmount);
45 }
46 
47 // Given pattern:
48 // (x shiftopcode Q) shiftopcode K
49 // we should rewrite it as
50 // x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and
51 //
52 // This is valid for any shift, but they must be identical, and we must be
53 // careful in case we have (zext(Q)+zext(K)) and look past extensions,
54 // (Q+K) must not overflow or else (Q+K) u< bitwidth(x) is bogus.
55 //
56 // AnalyzeForSignBitExtraction indicates that we will only analyze whether this
57 // pattern has any 2 right-shifts that sum to 1 less than original bit width.
59  BinaryOperator *Sh0, const SimplifyQuery &SQ,
60  bool AnalyzeForSignBitExtraction) {
61  // Look for a shift of some instruction, ignore zext of shift amount if any.
62  Instruction *Sh0Op0;
63  Value *ShAmt0;
64  if (!match(Sh0,
65  m_Shift(m_Instruction(Sh0Op0), m_ZExtOrSelf(m_Value(ShAmt0)))))
66  return nullptr;
67 
68  // If there is a truncation between the two shifts, we must make note of it
69  // and look through it. The truncation imposes additional constraints on the
70  // transform.
71  Instruction *Sh1;
72  Value *Trunc = nullptr;
73  match(Sh0Op0,
75  m_Instruction(Sh1)));
76 
77  // Inner shift: (x shiftopcode ShAmt1)
78  // Like with other shift, ignore zext of shift amount if any.
79  Value *X, *ShAmt1;
80  if (!match(Sh1, m_Shift(m_Value(X), m_ZExtOrSelf(m_Value(ShAmt1)))))
81  return nullptr;
82 
83  // Verify that it would be safe to try to add those two shift amounts.
84  if (!canTryToConstantAddTwoShiftAmounts(Sh0, ShAmt0, Sh1, ShAmt1))
85  return nullptr;
86 
87  // We are only looking for signbit extraction if we have two right shifts.
88  bool HadTwoRightShifts = match(Sh0, m_Shr(m_Value(), m_Value())) &&
89  match(Sh1, m_Shr(m_Value(), m_Value()));
90  // ... and if it's not two right-shifts, we know the answer already.
91  if (AnalyzeForSignBitExtraction && !HadTwoRightShifts)
92  return nullptr;
93 
94  // The shift opcodes must be identical, unless we are just checking whether
95  // this pattern can be interpreted as a sign-bit-extraction.
96  Instruction::BinaryOps ShiftOpcode = Sh0->getOpcode();
97  bool IdenticalShOpcodes = Sh0->getOpcode() == Sh1->getOpcode();
98  if (!IdenticalShOpcodes && !AnalyzeForSignBitExtraction)
99  return nullptr;
100 
101  // If we saw truncation, we'll need to produce extra instruction,
102  // and for that one of the operands of the shift must be one-use,
103  // unless of course we don't actually plan to produce any instructions here.
104  if (Trunc && !AnalyzeForSignBitExtraction &&
105  !match(Sh0, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
106  return nullptr;
107 
108  // Can we fold (ShAmt0+ShAmt1) ?
109  auto *NewShAmt = dyn_cast_or_null<Constant>(
110  SimplifyAddInst(ShAmt0, ShAmt1, /*isNSW=*/false, /*isNUW=*/false,
111  SQ.getWithInstruction(Sh0)));
112  if (!NewShAmt)
113  return nullptr; // Did not simplify.
114  unsigned NewShAmtBitWidth = NewShAmt->getType()->getScalarSizeInBits();
115  unsigned XBitWidth = X->getType()->getScalarSizeInBits();
116  // Is the new shift amount smaller than the bit width of inner/new shift?
117  if (!match(NewShAmt, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
118  APInt(NewShAmtBitWidth, XBitWidth))))
119  return nullptr; // FIXME: could perform constant-folding.
120 
121  // If there was a truncation, and we have a right-shift, we can only fold if
122  // we are left with the original sign bit. Likewise, if we were just checking
123  // that this is a sighbit extraction, this is the place to check it.
124  // FIXME: zero shift amount is also legal here, but we can't *easily* check
125  // more than one predicate so it's not really worth it.
126  if (HadTwoRightShifts && (Trunc || AnalyzeForSignBitExtraction)) {
127  // If it's not a sign bit extraction, then we're done.
128  if (!match(NewShAmt,
129  m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
130  APInt(NewShAmtBitWidth, XBitWidth - 1))))
131  return nullptr;
132  // If it is, and that was the question, return the base value.
133  if (AnalyzeForSignBitExtraction)
134  return X;
135  }
136 
137  assert(IdenticalShOpcodes && "Should not get here with different shifts.");
138 
139  // All good, we can do this fold.
140  NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, X->getType());
141 
142  BinaryOperator *NewShift = BinaryOperator::Create(ShiftOpcode, X, NewShAmt);
143 
144  // The flags can only be propagated if there wasn't a trunc.
145  if (!Trunc) {
146  // If the pattern did not involve trunc, and both of the original shifts
147  // had the same flag set, preserve the flag.
148  if (ShiftOpcode == Instruction::BinaryOps::Shl) {
149  NewShift->setHasNoUnsignedWrap(Sh0->hasNoUnsignedWrap() &&
150  Sh1->hasNoUnsignedWrap());
151  NewShift->setHasNoSignedWrap(Sh0->hasNoSignedWrap() &&
152  Sh1->hasNoSignedWrap());
153  } else {
154  NewShift->setIsExact(Sh0->isExact() && Sh1->isExact());
155  }
156  }
157 
158  Instruction *Ret = NewShift;
159  if (Trunc) {
160  Builder.Insert(NewShift);
161  Ret = CastInst::Create(Instruction::Trunc, NewShift, Sh0->getType());
162  }
163 
164  return Ret;
165 }
166 
167 // If we have some pattern that leaves only some low bits set, and then performs
168 // left-shift of those bits, if none of the bits that are left after the final
169 // shift are modified by the mask, we can omit the mask.
170 //
171 // There are many variants to this pattern:
172 // a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt
173 // b) (x & (~(-1 << MaskShAmt))) << ShiftShAmt
174 // c) (x & (-1 l>> MaskShAmt)) << ShiftShAmt
175 // d) (x & ((-1 << MaskShAmt) l>> MaskShAmt)) << ShiftShAmt
176 // e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt
177 // f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt
178 // All these patterns can be simplified to just:
179 // x << ShiftShAmt
180 // iff:
181 // a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
182 // c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt)
183 static Instruction *
185  const SimplifyQuery &Q,
187  assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&
188  "The input must be 'shl'!");
189 
190  Value *Masked, *ShiftShAmt;
191  match(OuterShift,
192  m_Shift(m_Value(Masked), m_ZExtOrSelf(m_Value(ShiftShAmt))));
193 
194  // *If* there is a truncation between an outer shift and a possibly-mask,
195  // then said truncation *must* be one-use, else we can't perform the fold.
196  Value *Trunc;
197  if (match(Masked, m_CombineAnd(m_Trunc(m_Value(Masked)), m_Value(Trunc))) &&
198  !Trunc->hasOneUse())
199  return nullptr;
200 
201  Type *NarrowestTy = OuterShift->getType();
202  Type *WidestTy = Masked->getType();
203  bool HadTrunc = WidestTy != NarrowestTy;
204 
205  // The mask must be computed in a type twice as wide to ensure
206  // that no bits are lost if the sum-of-shifts is wider than the base type.
207  Type *ExtendedTy = WidestTy->getExtendedType();
208 
209  Value *MaskShAmt;
210 
211  // ((1 << MaskShAmt) - 1)
212  auto MaskA = m_Add(m_Shl(m_One(), m_Value(MaskShAmt)), m_AllOnes());
213  // (~(-1 << maskNbits))
214  auto MaskB = m_Xor(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_AllOnes());
215  // (-1 l>> MaskShAmt)
216  auto MaskC = m_LShr(m_AllOnes(), m_Value(MaskShAmt));
217  // ((-1 << MaskShAmt) l>> MaskShAmt)
218  auto MaskD =
219  m_LShr(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_Deferred(MaskShAmt));
220 
221  Value *X;
222  Constant *NewMask;
223 
224  if (match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) {
225  // Peek through an optional zext of the shift amount.
226  match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
227 
228  // Verify that it would be safe to try to add those two shift amounts.
229  if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
230  MaskShAmt))
231  return nullptr;
232 
233  // Can we simplify (MaskShAmt+ShiftShAmt) ?
234  auto *SumOfShAmts = dyn_cast_or_null<Constant>(SimplifyAddInst(
235  MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
236  if (!SumOfShAmts)
237  return nullptr; // Did not simplify.
238  // In this pattern SumOfShAmts correlates with the number of low bits
239  // that shall remain in the root value (OuterShift).
240 
241  // An extend of an undef value becomes zero because the high bits are never
242  // completely unknown. Replace the `undef` shift amounts with final
243  // shift bitwidth to ensure that the value remains undef when creating the
244  // subsequent shift op.
245  SumOfShAmts = Constant::replaceUndefsWith(
246  SumOfShAmts, ConstantInt::get(SumOfShAmts->getType()->getScalarType(),
247  ExtendedTy->getScalarSizeInBits()));
248  auto *ExtendedSumOfShAmts = ConstantExpr::getZExt(SumOfShAmts, ExtendedTy);
249  // And compute the mask as usual: ~(-1 << (SumOfShAmts))
250  auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
251  auto *ExtendedInvertedMask =
252  ConstantExpr::getShl(ExtendedAllOnes, ExtendedSumOfShAmts);
253  NewMask = ConstantExpr::getNot(ExtendedInvertedMask);
254  } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
255  match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
256  m_Deferred(MaskShAmt)))) {
257  // Peek through an optional zext of the shift amount.
258  match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
259 
260  // Verify that it would be safe to try to add those two shift amounts.
261  if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
262  MaskShAmt))
263  return nullptr;
264 
265  // Can we simplify (ShiftShAmt-MaskShAmt) ?
266  auto *ShAmtsDiff = dyn_cast_or_null<Constant>(SimplifySubInst(
267  ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
268  if (!ShAmtsDiff)
269  return nullptr; // Did not simplify.
270  // In this pattern ShAmtsDiff correlates with the number of high bits that
271  // shall be unset in the root value (OuterShift).
272 
273  // An extend of an undef value becomes zero because the high bits are never
274  // completely unknown. Replace the `undef` shift amounts with negated
275  // bitwidth of innermost shift to ensure that the value remains undef when
276  // creating the subsequent shift op.
277  unsigned WidestTyBitWidth = WidestTy->getScalarSizeInBits();
278  ShAmtsDiff = Constant::replaceUndefsWith(
279  ShAmtsDiff, ConstantInt::get(ShAmtsDiff->getType()->getScalarType(),
280  -WidestTyBitWidth));
281  auto *ExtendedNumHighBitsToClear = ConstantExpr::getZExt(
282  ConstantExpr::getSub(ConstantInt::get(ShAmtsDiff->getType(),
283  WidestTyBitWidth,
284  /*isSigned=*/false),
285  ShAmtsDiff),
286  ExtendedTy);
287  // And compute the mask as usual: (-1 l>> (NumHighBitsToClear))
288  auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
289  NewMask =
290  ConstantExpr::getLShr(ExtendedAllOnes, ExtendedNumHighBitsToClear);
291  } else
292  return nullptr; // Don't know anything about this pattern.
293 
294  NewMask = ConstantExpr::getTrunc(NewMask, NarrowestTy);
295 
296  // Does this mask has any unset bits? If not then we can just not apply it.
297  bool NeedMask = !match(NewMask, m_AllOnes());
298 
299  // If we need to apply a mask, there are several more restrictions we have.
300  if (NeedMask) {
301  // The old masking instruction must go away.
302  if (!Masked->hasOneUse())
303  return nullptr;
304  // The original "masking" instruction must not have been`ashr`.
305  if (match(Masked, m_AShr(m_Value(), m_Value())))
306  return nullptr;
307  }
308 
309  // If we need to apply truncation, let's do it first, since we can.
310  // We have already ensured that the old truncation will go away.
311  if (HadTrunc)
312  X = Builder.CreateTrunc(X, NarrowestTy);
313 
314  // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits.
315  // We didn't change the Type of this outermost shift, so we can just do it.
316  auto *NewShift = BinaryOperator::Create(OuterShift->getOpcode(), X,
317  OuterShift->getOperand(1));
318  if (!NeedMask)
319  return NewShift;
320 
321  Builder.Insert(NewShift);
322  return BinaryOperator::Create(Instruction::And, NewShift, NewMask);
323 }
324 
325 /// If we have a shift-by-constant of a bitwise logic op that itself has a
326 /// shift-by-constant operand with identical opcode, we may be able to convert
327 /// that into 2 independent shifts followed by the logic op. This eliminates a
328 /// a use of an intermediate value (reduces dependency chain).
331  assert(I.isShift() && "Expected a shift as input");
332  auto *LogicInst = dyn_cast<BinaryOperator>(I.getOperand(0));
333  if (!LogicInst || !LogicInst->isBitwiseLogicOp() || !LogicInst->hasOneUse())
334  return nullptr;
335 
336  Constant *C0, *C1;
337  if (!match(I.getOperand(1), m_Constant(C1)))
338  return nullptr;
339 
340  Instruction::BinaryOps ShiftOpcode = I.getOpcode();
341  Type *Ty = I.getType();
342 
343  // Find a matching one-use shift by constant. The fold is not valid if the sum
344  // of the shift values equals or exceeds bitwidth.
345  // TODO: Remove the one-use check if the other logic operand (Y) is constant.
346  Value *X, *Y;
347  auto matchFirstShift = [&](Value *V) {
348  APInt Threshold(Ty->getScalarSizeInBits(), Ty->getScalarSizeInBits());
349  return match(V, m_BinOp(ShiftOpcode, m_Value(), m_Value())) &&
350  match(V, m_OneUse(m_Shift(m_Value(X), m_Constant(C0)))) &&
353  };
354 
355  // Logic ops are commutative, so check each operand for a match.
356  if (matchFirstShift(LogicInst->getOperand(0)))
357  Y = LogicInst->getOperand(1);
358  else if (matchFirstShift(LogicInst->getOperand(1)))
359  Y = LogicInst->getOperand(0);
360  else
361  return nullptr;
362 
363  // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1)
364  Constant *ShiftSumC = ConstantExpr::getAdd(C0, C1);
365  Value *NewShift1 = Builder.CreateBinOp(ShiftOpcode, X, ShiftSumC);
366  Value *NewShift2 = Builder.CreateBinOp(ShiftOpcode, Y, I.getOperand(1));
367  return BinaryOperator::Create(LogicInst->getOpcode(), NewShift1, NewShift2);
368 }
369 
371  if (Instruction *Phi = foldBinopWithPhiOperands(I))
372  return Phi;
373 
374  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
375  assert(Op0->getType() == Op1->getType());
376  Type *Ty = I.getType();
377 
378  // If the shift amount is a one-use `sext`, we can demote it to `zext`.
379  Value *Y;
380  if (match(Op1, m_OneUse(m_SExt(m_Value(Y))))) {
381  Value *NewExt = Builder.CreateZExt(Y, Ty, Op1->getName());
382  return BinaryOperator::Create(I.getOpcode(), Op0, NewExt);
383  }
384 
385  // See if we can fold away this shift.
386  if (SimplifyDemandedInstructionBits(I))
387  return &I;
388 
389  // Try to fold constant and into select arguments.
390  if (isa<Constant>(Op0))
391  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
392  if (Instruction *R = FoldOpIntoSelect(I, SI))
393  return R;
394 
395  if (Constant *CUI = dyn_cast<Constant>(Op1))
396  if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
397  return Res;
398 
399  if (auto *NewShift = cast_or_null<Instruction>(
400  reassociateShiftAmtsOfTwoSameDirectionShifts(&I, SQ)))
401  return NewShift;
402 
403  // Pre-shift a constant shifted by a variable amount with constant offset:
404  // C shift (A add nuw C1) --> (C shift C1) shift A
405  Value *A;
406  Constant *C, *C1;
407  if (match(Op0, m_Constant(C)) &&
408  match(Op1, m_NUWAdd(m_Value(A), m_Constant(C1)))) {
409  Constant *NewC = ConstantExpr::get(I.getOpcode(), C, C1);
410  return BinaryOperator::Create(I.getOpcode(), NewC, A);
411  }
412 
413  unsigned BitWidth = Ty->getScalarSizeInBits();
414 
415  const APInt *AC, *AddC;
416  // Try to pre-shift a constant shifted by a variable amount added with a
417  // negative number:
418  // C << (X - AddC) --> (C >> AddC) << X
419  // and
420  // C >> (X - AddC) --> (C << AddC) >> X
421  if (match(Op0, m_APInt(AC)) && match(Op1, m_Add(m_Value(A), m_APInt(AddC))) &&
422  AddC->isNegative() && (-*AddC).ult(BitWidth)) {
423  assert(!AC->isZero() && "Expected simplify of shifted zero");
424  unsigned PosOffset = (-*AddC).getZExtValue();
425 
426  auto isSuitableForPreShift = [PosOffset, &I, AC]() {
427  switch (I.getOpcode()) {
428  default:
429  return false;
430  case Instruction::Shl:
431  return (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) &&
432  AC->eq(AC->lshr(PosOffset).shl(PosOffset));
433  case Instruction::LShr:
434  return I.isExact() && AC->eq(AC->shl(PosOffset).lshr(PosOffset));
435  case Instruction::AShr:
436  return I.isExact() && AC->eq(AC->shl(PosOffset).ashr(PosOffset));
437  }
438  };
439  if (isSuitableForPreShift()) {
440  Constant *NewC = ConstantInt::get(Ty, I.getOpcode() == Instruction::Shl
441  ? AC->lshr(PosOffset)
442  : AC->shl(PosOffset));
443  BinaryOperator *NewShiftOp =
444  BinaryOperator::Create(I.getOpcode(), NewC, A);
445  if (I.getOpcode() == Instruction::Shl) {
446  NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
447  } else {
448  NewShiftOp->setIsExact();
449  }
450  return NewShiftOp;
451  }
452  }
453 
454  // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2.
455  // Because shifts by negative values (which could occur if A were negative)
456  // are undefined.
457  if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Constant(C))) &&
458  match(C, m_Power2())) {
459  // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
460  // demand the sign bit (and many others) here??
462  Value *Rem = Builder.CreateAnd(A, Mask, Op1->getName());
463  return replaceOperand(I, 1, Rem);
464  }
465 
467  return Logic;
468 
469  return nullptr;
470 }
471 
472 /// Return true if we can simplify two logical (either left or right) shifts
473 /// that have constant shift amounts: OuterShift (InnerShift X, C1), C2.
474 static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
475  Instruction *InnerShift,
476  InstCombinerImpl &IC, Instruction *CxtI) {
477  assert(InnerShift->isLogicalShift() && "Unexpected instruction type");
478 
479  // We need constant scalar or constant splat shifts.
480  const APInt *InnerShiftConst;
481  if (!match(InnerShift->getOperand(1), m_APInt(InnerShiftConst)))
482  return false;
483 
484  // Two logical shifts in the same direction:
485  // shl (shl X, C1), C2 --> shl X, C1 + C2
486  // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
487  bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
488  if (IsInnerShl == IsOuterShl)
489  return true;
490 
491  // Equal shift amounts in opposite directions become bitwise 'and':
492  // lshr (shl X, C), C --> and X, C'
493  // shl (lshr X, C), C --> and X, C'
494  if (*InnerShiftConst == OuterShAmt)
495  return true;
496 
497  // If the 2nd shift is bigger than the 1st, we can fold:
498  // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3
499  // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3
500  // but it isn't profitable unless we know the and'd out bits are already zero.
501  // Also, check that the inner shift is valid (less than the type width) or
502  // we'll crash trying to produce the bit mask for the 'and'.
503  unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits();
504  if (InnerShiftConst->ugt(OuterShAmt) && InnerShiftConst->ult(TypeWidth)) {
505  unsigned InnerShAmt = InnerShiftConst->getZExtValue();
506  unsigned MaskShift =
507  IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
508  APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
509  if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
510  return true;
511  }
512 
513  return false;
514 }
515 
516 /// See if we can compute the specified value, but shifted logically to the left
517 /// or right by some number of bits. This should return true if the expression
518 /// can be computed for the same cost as the current expression tree. This is
519 /// used to eliminate extraneous shifting from things like:
520 /// %C = shl i128 %A, 64
521 /// %D = shl i128 %B, 96
522 /// %E = or i128 %C, %D
523 /// %F = lshr i128 %E, 64
524 /// where the client will ask if E can be computed shifted right by 64-bits. If
525 /// this succeeds, getShiftedValue() will be called to produce the value.
526 static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
527  InstCombinerImpl &IC, Instruction *CxtI) {
528  // We can always evaluate constants shifted.
529  if (isa<Constant>(V))
530  return true;
531 
532  Instruction *I = dyn_cast<Instruction>(V);
533  if (!I) return false;
534 
535  // We can't mutate something that has multiple uses: doing so would
536  // require duplicating the instruction in general, which isn't profitable.
537  if (!I->hasOneUse()) return false;
538 
539  switch (I->getOpcode()) {
540  default: return false;
541  case Instruction::And:
542  case Instruction::Or:
543  case Instruction::Xor:
544  // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
545  return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
546  canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
547 
548  case Instruction::Shl:
549  case Instruction::LShr:
550  return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
551 
552  case Instruction::Select: {
553  SelectInst *SI = cast<SelectInst>(I);
554  Value *TrueVal = SI->getTrueValue();
555  Value *FalseVal = SI->getFalseValue();
556  return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
557  canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
558  }
559  case Instruction::PHI: {
560  // We can change a phi if we can change all operands. Note that we never
561  // get into trouble with cyclic PHIs here because we only consider
562  // instructions with a single use.
563  PHINode *PN = cast<PHINode>(I);
564  for (Value *IncValue : PN->incoming_values())
565  if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
566  return false;
567  return true;
568  }
569  }
570 }
571 
572 /// Fold OuterShift (InnerShift X, C1), C2.
573 /// See canEvaluateShiftedShift() for the constraints on these instructions.
574 static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
575  bool IsOuterShl,
577  bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
578  Type *ShType = InnerShift->getType();
579  unsigned TypeWidth = ShType->getScalarSizeInBits();
580 
581  // We only accept shifts-by-a-constant in canEvaluateShifted().
582  const APInt *C1;
583  match(InnerShift->getOperand(1), m_APInt(C1));
584  unsigned InnerShAmt = C1->getZExtValue();
585 
586  // Change the shift amount and clear the appropriate IR flags.
587  auto NewInnerShift = [&](unsigned ShAmt) {
588  InnerShift->setOperand(1, ConstantInt::get(ShType, ShAmt));
589  if (IsInnerShl) {
590  InnerShift->setHasNoUnsignedWrap(false);
591  InnerShift->setHasNoSignedWrap(false);
592  } else {
593  InnerShift->setIsExact(false);
594  }
595  return InnerShift;
596  };
597 
598  // Two logical shifts in the same direction:
599  // shl (shl X, C1), C2 --> shl X, C1 + C2
600  // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
601  if (IsInnerShl == IsOuterShl) {
602  // If this is an oversized composite shift, then unsigned shifts get 0.
603  if (InnerShAmt + OuterShAmt >= TypeWidth)
604  return Constant::getNullValue(ShType);
605 
606  return NewInnerShift(InnerShAmt + OuterShAmt);
607  }
608 
609  // Equal shift amounts in opposite directions become bitwise 'and':
610  // lshr (shl X, C), C --> and X, C'
611  // shl (lshr X, C), C --> and X, C'
612  if (InnerShAmt == OuterShAmt) {
613  APInt Mask = IsInnerShl
614  ? APInt::getLowBitsSet(TypeWidth, TypeWidth - OuterShAmt)
615  : APInt::getHighBitsSet(TypeWidth, TypeWidth - OuterShAmt);
616  Value *And = Builder.CreateAnd(InnerShift->getOperand(0),
617  ConstantInt::get(ShType, Mask));
618  if (auto *AndI = dyn_cast<Instruction>(And)) {
619  AndI->moveBefore(InnerShift);
620  AndI->takeName(InnerShift);
621  }
622  return And;
623  }
624 
625  assert(InnerShAmt > OuterShAmt &&
626  "Unexpected opposite direction logical shift pair");
627 
628  // In general, we would need an 'and' for this transform, but
629  // canEvaluateShiftedShift() guarantees that the masked-off bits are not used.
630  // lshr (shl X, C1), C2 --> shl X, C1 - C2
631  // shl (lshr X, C1), C2 --> lshr X, C1 - C2
632  return NewInnerShift(InnerShAmt - OuterShAmt);
633 }
634 
635 /// When canEvaluateShifted() returns true for an expression, this function
636 /// inserts the new computation that produces the shifted value.
637 static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
638  InstCombinerImpl &IC, const DataLayout &DL) {
639  // We can always evaluate constants shifted.
640  if (Constant *C = dyn_cast<Constant>(V)) {
641  if (isLeftShift)
642  return IC.Builder.CreateShl(C, NumBits);
643  else
644  return IC.Builder.CreateLShr(C, NumBits);
645  }
646 
647  Instruction *I = cast<Instruction>(V);
648  IC.addToWorklist(I);
649 
650  switch (I->getOpcode()) {
651  default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
652  case Instruction::And:
653  case Instruction::Or:
654  case Instruction::Xor:
655  // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
656  I->setOperand(
657  0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
658  I->setOperand(
659  1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
660  return I;
661 
662  case Instruction::Shl:
663  case Instruction::LShr:
664  return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
665  IC.Builder);
666 
667  case Instruction::Select:
668  I->setOperand(
669  1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
670  I->setOperand(
671  2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
672  return I;
673  case Instruction::PHI: {
674  // We can change a phi if we can change all operands. Note that we never
675  // get into trouble with cyclic PHIs here because we only consider
676  // instructions with a single use.
677  PHINode *PN = cast<PHINode>(I);
678  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
680  isLeftShift, IC, DL));
681  return PN;
682  }
683  }
684 }
685 
686 // If this is a bitwise operator or add with a constant RHS we might be able
687 // to pull it through a shift.
689  BinaryOperator *BO) {
690  switch (BO->getOpcode()) {
691  default:
692  return false; // Do not perform transform!
693  case Instruction::Add:
694  return Shift.getOpcode() == Instruction::Shl;
695  case Instruction::Or:
696  case Instruction::And:
697  return true;
698  case Instruction::Xor:
699  // Do not change a 'not' of logical shift because that would create a normal
700  // 'xor'. The 'not' is likely better for analysis, SCEV, and codegen.
701  return !(Shift.isLogicalShift() && match(BO, m_Not(m_Value())));
702  }
703 }
704 
706  BinaryOperator &I) {
707  const APInt *Op1C;
708  if (!match(Op1, m_APInt(Op1C)))
709  return nullptr;
710 
711  // See if we can propagate this shift into the input, this covers the trivial
712  // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
713  bool IsLeftShift = I.getOpcode() == Instruction::Shl;
714  if (I.getOpcode() != Instruction::AShr &&
715  canEvaluateShifted(Op0, Op1C->getZExtValue(), IsLeftShift, *this, &I)) {
716  LLVM_DEBUG(
717  dbgs() << "ICE: GetShiftedValue propagating shift through expression"
718  " to eliminate shift:\n IN: "
719  << *Op0 << "\n SH: " << I << "\n");
720 
721  return replaceInstUsesWith(
722  I, getShiftedValue(Op0, Op1C->getZExtValue(), IsLeftShift, *this, DL));
723  }
724 
725  // See if we can simplify any instructions used by the instruction whose sole
726  // purpose is to compute bits we don't care about.
727  Type *Ty = I.getType();
728  unsigned TypeBits = Ty->getScalarSizeInBits();
729  assert(!Op1C->uge(TypeBits) &&
730  "Shift over the type width should have been removed already");
731  (void)TypeBits;
732 
733  if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I))
734  return FoldedShift;
735 
736  if (!Op0->hasOneUse())
737  return nullptr;
738 
739  if (auto *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
740  // If the operand is a bitwise operator with a constant RHS, and the
741  // shift is the only use, we can pull it out of the shift.
742  const APInt *Op0C;
743  if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
744  if (canShiftBinOpWithConstantRHS(I, Op0BO)) {
745  Constant *NewRHS = ConstantExpr::get(
746  I.getOpcode(), cast<Constant>(Op0BO->getOperand(1)), Op1);
747 
748  Value *NewShift =
749  Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
750  NewShift->takeName(Op0BO);
751 
752  return BinaryOperator::Create(Op0BO->getOpcode(), NewShift, NewRHS);
753  }
754  }
755  }
756 
757  // If we have a select that conditionally executes some binary operator,
758  // see if we can pull it the select and operator through the shift.
759  //
760  // For example, turning:
761  // shl (select C, (add X, C1), X), C2
762  // Into:
763  // Y = shl X, C2
764  // select C, (add Y, C1 << C2), Y
765  Value *Cond;
766  BinaryOperator *TBO;
767  Value *FalseVal;
768  if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
769  m_Value(FalseVal)))) {
770  const APInt *C;
771  if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
772  match(TBO->getOperand(1), m_APInt(C)) &&
774  Constant *NewRHS = ConstantExpr::get(
775  I.getOpcode(), cast<Constant>(TBO->getOperand(1)), Op1);
776 
777  Value *NewShift = Builder.CreateBinOp(I.getOpcode(), FalseVal, Op1);
778  Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift, NewRHS);
779  return SelectInst::Create(Cond, NewOp, NewShift);
780  }
781  }
782 
783  BinaryOperator *FBO;
784  Value *TrueVal;
786  m_OneUse(m_BinOp(FBO))))) {
787  const APInt *C;
788  if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
789  match(FBO->getOperand(1), m_APInt(C)) &&
791  Constant *NewRHS = ConstantExpr::get(
792  I.getOpcode(), cast<Constant>(FBO->getOperand(1)), Op1);
793 
794  Value *NewShift = Builder.CreateBinOp(I.getOpcode(), TrueVal, Op1);
795  Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift, NewRHS);
796  return SelectInst::Create(Cond, NewShift, NewOp);
797  }
798  }
799 
800  return nullptr;
801 }
802 
804  const SimplifyQuery Q = SQ.getWithInstruction(&I);
805 
806  if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
807  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), Q))
808  return replaceInstUsesWith(I, V);
809 
810  if (Instruction *X = foldVectorBinop(I))
811  return X;
812 
813  if (Instruction *V = commonShiftTransforms(I))
814  return V;
815 
817  return V;
818 
819  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
820  Type *Ty = I.getType();
821  unsigned BitWidth = Ty->getScalarSizeInBits();
822 
823  const APInt *C;
824  if (match(Op1, m_APInt(C))) {
825  unsigned ShAmtC = C->getZExtValue();
826 
827  // shl (zext X), C --> zext (shl X, C)
828  // This is only valid if X would have zeros shifted out.
829  Value *X;
830  if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
831  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
832  if (ShAmtC < SrcWidth &&
833  MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmtC), 0, &I))
834  return new ZExtInst(Builder.CreateShl(X, ShAmtC), Ty);
835  }
836 
837  // (X >> C) << C --> X & (-1 << C)
838  if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1)))) {
840  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
841  }
842 
843  const APInt *C1;
844  if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(C1)))) &&
845  C1->ult(BitWidth)) {
846  unsigned ShrAmt = C1->getZExtValue();
847  if (ShrAmt < ShAmtC) {
848  // If C1 < C: (X >>?,exact C1) << C --> X << (C - C1)
849  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShrAmt);
850  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
851  NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
852  NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
853  return NewShl;
854  }
855  if (ShrAmt > ShAmtC) {
856  // If C1 > C: (X >>?exact C1) << C --> X >>?exact (C1 - C)
857  Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmtC);
858  auto *NewShr = BinaryOperator::Create(
859  cast<BinaryOperator>(Op0)->getOpcode(), X, ShiftDiff);
860  NewShr->setIsExact(true);
861  return NewShr;
862  }
863  }
864 
865  if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_APInt(C1)))) &&
866  C1->ult(BitWidth)) {
867  unsigned ShrAmt = C1->getZExtValue();
868  if (ShrAmt < ShAmtC) {
869  // If C1 < C: (X >>? C1) << C --> (X << (C - C1)) & (-1 << C)
870  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShrAmt);
871  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
872  NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
873  NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
874  Builder.Insert(NewShl);
876  return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
877  }
878  if (ShrAmt > ShAmtC) {
879  // If C1 > C: (X >>? C1) << C --> (X >>? (C1 - C)) & (-1 << C)
880  Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmtC);
881  auto *OldShr = cast<BinaryOperator>(Op0);
882  auto *NewShr =
883  BinaryOperator::Create(OldShr->getOpcode(), X, ShiftDiff);
884  NewShr->setIsExact(OldShr->isExact());
885  Builder.Insert(NewShr);
887  return BinaryOperator::CreateAnd(NewShr, ConstantInt::get(Ty, Mask));
888  }
889  }
890 
891  // Similar to above, but look through an intermediate trunc instruction.
892  BinaryOperator *Shr;
893  if (match(Op0, m_OneUse(m_Trunc(m_OneUse(m_BinOp(Shr))))) &&
894  match(Shr, m_Shr(m_Value(X), m_APInt(C1)))) {
895  // The larger shift direction survives through the transform.
896  unsigned ShrAmtC = C1->getZExtValue();
897  unsigned ShDiff = ShrAmtC > ShAmtC ? ShrAmtC - ShAmtC : ShAmtC - ShrAmtC;
898  Constant *ShiftDiffC = ConstantInt::get(X->getType(), ShDiff);
899  auto ShiftOpc = ShrAmtC > ShAmtC ? Shr->getOpcode() : Instruction::Shl;
900 
901  // If C1 > C:
902  // (trunc (X >> C1)) << C --> (trunc (X >> (C1 - C))) && (-1 << C)
903  // If C > C1:
904  // (trunc (X >> C1)) << C --> (trunc (X << (C - C1))) && (-1 << C)
905  Value *NewShift = Builder.CreateBinOp(ShiftOpc, X, ShiftDiffC, "sh.diff");
906  Value *Trunc = Builder.CreateTrunc(NewShift, Ty, "tr.sh.diff");
908  return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, Mask));
909  }
910 
911  if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
912  unsigned AmtSum = ShAmtC + C1->getZExtValue();
913  // Oversized shifts are simplified to zero in InstSimplify.
914  if (AmtSum < BitWidth)
915  // (X << C1) << C2 --> X << (C1 + C2)
916  return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
917  }
918 
919  // If we have an opposite shift by the same amount, we may be able to
920  // reorder binops and shifts to eliminate math/logic.
921  auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) {
922  switch (BinOpcode) {
923  default:
924  return false;
925  case Instruction::Add:
926  case Instruction::And:
927  case Instruction::Or:
928  case Instruction::Xor:
929  case Instruction::Sub:
930  // NOTE: Sub is not commutable and the tranforms below may not be valid
931  // when the shift-right is operand 1 (RHS) of the sub.
932  return true;
933  }
934  };
935  BinaryOperator *Op0BO;
936  if (match(Op0, m_OneUse(m_BinOp(Op0BO))) &&
937  isSuitableBinOpcode(Op0BO->getOpcode())) {
938  // Commute so shift-right is on LHS of the binop.
939  // (Y bop (X >> C)) << C -> ((X >> C) bop Y) << C
940  // (Y bop ((X >> C) & CC)) << C -> (((X >> C) & CC) bop Y) << C
941  Value *Shr = Op0BO->getOperand(0);
942  Value *Y = Op0BO->getOperand(1);
943  Value *X;
944  const APInt *CC;
945  if (Op0BO->isCommutative() && Y->hasOneUse() &&
946  (match(Y, m_Shr(m_Value(), m_Specific(Op1))) ||
948  m_APInt(CC)))))
949  std::swap(Shr, Y);
950 
951  // ((X >> C) bop Y) << C -> (X bop (Y << C)) & (~0 << C)
952  if (match(Shr, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
953  // Y << C
954  Value *YS = Builder.CreateShl(Y, Op1, Op0BO->getName());
955  // (X bop (Y << C))
956  Value *B =
957  Builder.CreateBinOp(Op0BO->getOpcode(), X, YS, Shr->getName());
958  unsigned Op1Val = C->getLimitedValue(BitWidth);
961  return BinaryOperator::CreateAnd(B, Mask);
962  }
963 
964  // (((X >> C) & CC) bop Y) << C -> (X & (CC << C)) bop (Y << C)
965  if (match(Shr,
967  m_APInt(CC))))) {
968  // Y << C
969  Value *YS = Builder.CreateShl(Y, Op1, Op0BO->getName());
970  // X & (CC << C)
971  Value *M = Builder.CreateAnd(X, ConstantInt::get(Ty, CC->shl(*C)),
972  X->getName() + ".mask");
973  return BinaryOperator::Create(Op0BO->getOpcode(), M, YS);
974  }
975  }
976 
977  // (C1 - X) << C --> (C1 << C) - (X << C)
978  if (match(Op0, m_OneUse(m_Sub(m_APInt(C1), m_Value(X))))) {
979  Constant *NewLHS = ConstantInt::get(Ty, C1->shl(*C));
980  Value *NewShift = Builder.CreateShl(X, Op1);
981  return BinaryOperator::CreateSub(NewLHS, NewShift);
982  }
983 
984  // If the shifted-out value is known-zero, then this is a NUW shift.
985  if (!I.hasNoUnsignedWrap() &&
987  &I)) {
988  I.setHasNoUnsignedWrap();
989  return &I;
990  }
991 
992  // If the shifted-out value is all signbits, then this is a NSW shift.
993  if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmtC) {
994  I.setHasNoSignedWrap();
995  return &I;
996  }
997  }
998 
999  // Transform (x >> y) << y to x & (-1 << y)
1000  // Valid for any type of right-shift.
1001  Value *X;
1002  if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
1003  Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1004  Value *Mask = Builder.CreateShl(AllOnes, Op1);
1005  return BinaryOperator::CreateAnd(Mask, X);
1006  }
1007 
1008  Constant *C1;
1009  if (match(Op1, m_Constant(C1))) {
1010  Constant *C2;
1011  Value *X;
1012  // (C2 << X) << C1 --> (C2 << C1) << X
1013  if (match(Op0, m_OneUse(m_Shl(m_Constant(C2), m_Value(X)))))
1014  return BinaryOperator::CreateShl(ConstantExpr::getShl(C2, C1), X);
1015 
1016  // (X * C2) << C1 --> X * (C2 << C1)
1017  if (match(Op0, m_Mul(m_Value(X), m_Constant(C2))))
1019 
1020  // shl (zext i1 X), C1 --> select (X, 1 << C1, 0)
1021  if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1022  auto *NewC = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C1);
1023  return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty));
1024  }
1025  }
1026 
1027  // (1 << (C - x)) -> ((1 << C) >> x) if C is bitwidth - 1
1028  if (match(Op0, m_One()) &&
1029  match(Op1, m_Sub(m_SpecificInt(BitWidth - 1), m_Value(X))))
1030  return BinaryOperator::CreateLShr(
1032 
1033  return nullptr;
1034 }
1035 
1037  if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1038  SQ.getWithInstruction(&I)))
1039  return replaceInstUsesWith(I, V);
1040 
1041  if (Instruction *X = foldVectorBinop(I))
1042  return X;
1043 
1044  if (Instruction *R = commonShiftTransforms(I))
1045  return R;
1046 
1047  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1048  Type *Ty = I.getType();
1049  const APInt *C;
1050  if (match(Op1, m_APInt(C))) {
1051  unsigned ShAmtC = C->getZExtValue();
1052  unsigned BitWidth = Ty->getScalarSizeInBits();
1053  auto *II = dyn_cast<IntrinsicInst>(Op0);
1054  if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmtC &&
1055  (II->getIntrinsicID() == Intrinsic::ctlz ||
1056  II->getIntrinsicID() == Intrinsic::cttz ||
1057  II->getIntrinsicID() == Intrinsic::ctpop)) {
1058  // ctlz.i32(x)>>5 --> zext(x == 0)
1059  // cttz.i32(x)>>5 --> zext(x == 0)
1060  // ctpop.i32(x)>>5 --> zext(x == -1)
1061  bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
1062  Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
1063  Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
1064  return new ZExtInst(Cmp, Ty);
1065  }
1066 
1067  Value *X;
1068  const APInt *C1;
1069  if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
1070  if (C1->ult(ShAmtC)) {
1071  unsigned ShlAmtC = C1->getZExtValue();
1072  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmtC - ShlAmtC);
1073  if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1074  // (X <<nuw C1) >>u C --> X >>u (C - C1)
1075  auto *NewLShr = BinaryOperator::CreateLShr(X, ShiftDiff);
1076  NewLShr->setIsExact(I.isExact());
1077  return NewLShr;
1078  }
1079  if (Op0->hasOneUse()) {
1080  // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
1081  Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
1083  return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
1084  }
1085  } else if (C1->ugt(ShAmtC)) {
1086  unsigned ShlAmtC = C1->getZExtValue();
1087  Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmtC - ShAmtC);
1088  if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1089  // (X <<nuw C1) >>u C --> X <<nuw (C1 - C)
1090  auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
1091  NewShl->setHasNoUnsignedWrap(true);
1092  return NewShl;
1093  }
1094  if (Op0->hasOneUse()) {
1095  // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
1096  Value *NewShl = Builder.CreateShl(X, ShiftDiff);
1098  return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
1099  }
1100  } else {
1101  assert(*C1 == ShAmtC);
1102  // (X << C) >>u C --> X & (-1 >>u C)
1104  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
1105  }
1106  }
1107 
1108  // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
1109  // TODO: Consolidate with the more general transform that starts from shl
1110  // (the shifts are in the opposite order).
1111  Value *Y;
1112  if (match(Op0,
1114  m_Value(Y))))) {
1115  Value *NewLshr = Builder.CreateLShr(Y, Op1);
1116  Value *NewAdd = Builder.CreateAdd(NewLshr, X);
1117  unsigned Op1Val = C->getLimitedValue(BitWidth);
1120  return BinaryOperator::CreateAnd(NewAdd, Mask);
1121  }
1122 
1123  if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
1124  (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
1125  assert(ShAmtC < X->getType()->getScalarSizeInBits() &&
1126  "Big shift not simplified to zero?");
1127  // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN
1128  Value *NewLShr = Builder.CreateLShr(X, ShAmtC);
1129  return new ZExtInst(NewLShr, Ty);
1130  }
1131 
1132  if (match(Op0, m_SExt(m_Value(X)))) {
1133  unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits();
1134  // lshr (sext i1 X to iN), C --> select (X, -1 >> C, 0)
1135  if (SrcTyBitWidth == 1) {
1136  auto *NewC = ConstantInt::get(
1137  Ty, APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
1138  return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty));
1139  }
1140 
1141  if ((!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType())) &&
1142  Op0->hasOneUse()) {
1143  // Are we moving the sign bit to the low bit and widening with high
1144  // zeros? lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
1145  if (ShAmtC == BitWidth - 1) {
1146  Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
1147  return new ZExtInst(NewLShr, Ty);
1148  }
1149 
1150  // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN
1151  if (ShAmtC == BitWidth - SrcTyBitWidth) {
1152  // The new shift amount can't be more than the narrow source type.
1153  unsigned NewShAmt = std::min(ShAmtC, SrcTyBitWidth - 1);
1154  Value *AShr = Builder.CreateAShr(X, NewShAmt);
1155  return new ZExtInst(AShr, Ty);
1156  }
1157  }
1158  }
1159 
1160  if (ShAmtC == BitWidth - 1) {
1161  // lshr i32 or(X,-X), 31 --> zext (X != 0)
1162  if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
1163  return new ZExtInst(Builder.CreateIsNotNull(X), Ty);
1164 
1165  // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
1166  if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1167  return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1168 
1169  // Check if a number is negative and odd:
1170  // lshr i32 (srem X, 2), 31 --> and (X >> 31), X
1171  if (match(Op0, m_OneUse(m_SRem(m_Value(X), m_SpecificInt(2))))) {
1172  Value *Signbit = Builder.CreateLShr(X, ShAmtC);
1173  return BinaryOperator::CreateAnd(Signbit, X);
1174  }
1175  }
1176 
1177  // (X >>u C1) >>u C --> X >>u (C1 + C)
1178  if (match(Op0, m_LShr(m_Value(X), m_APInt(C1)))) {
1179  // Oversized shifts are simplified to zero in InstSimplify.
1180  unsigned AmtSum = ShAmtC + C1->getZExtValue();
1181  if (AmtSum < BitWidth)
1182  return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
1183  }
1184 
1185  Instruction *TruncSrc;
1186  if (match(Op0, m_OneUse(m_Trunc(m_Instruction(TruncSrc)))) &&
1187  match(TruncSrc, m_LShr(m_Value(X), m_APInt(C1)))) {
1188  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
1189  unsigned AmtSum = ShAmtC + C1->getZExtValue();
1190 
1191  // If the combined shift fits in the source width:
1192  // (trunc (X >>u C1)) >>u C --> and (trunc (X >>u (C1 + C)), MaskC
1193  //
1194  // If the first shift covers the number of bits truncated, then the
1195  // mask instruction is eliminated (and so the use check is relaxed).
1196  if (AmtSum < SrcWidth &&
1197  (TruncSrc->hasOneUse() || C1->uge(SrcWidth - BitWidth))) {
1198  Value *SumShift = Builder.CreateLShr(X, AmtSum, "sum.shift");
1199  Value *Trunc = Builder.CreateTrunc(SumShift, Ty, I.getName());
1200 
1201  // If the first shift does not cover the number of bits truncated, then
1202  // we require a mask to get rid of high bits in the result.
1203  APInt MaskC = APInt::getAllOnes(BitWidth).lshr(ShAmtC);
1204  return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, MaskC));
1205  }
1206  }
1207 
1208  const APInt *MulC;
1209  if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC)))) {
1210  // Look for a "splat" mul pattern - it replicates bits across each half of
1211  // a value, so a right shift is just a mask of the low bits:
1212  // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
1213  // TODO: Generalize to allow more than just half-width shifts?
1214  if (BitWidth > 2 && ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
1215  MulC->logBase2() == ShAmtC)
1216  return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
1217 
1218  // The one-use check is not strictly necessary, but codegen may not be
1219  // able to invert the transform and perf may suffer with an extra mul
1220  // instruction.
1221  if (Op0->hasOneUse()) {
1222  APInt NewMulC = MulC->lshr(ShAmtC);
1223  // if c is divisible by (1 << ShAmtC):
1224  // lshr (mul nuw x, MulC), ShAmtC -> mul nuw x, (MulC >> ShAmtC)
1225  if (MulC->eq(NewMulC.shl(ShAmtC))) {
1226  auto *NewMul =
1227  BinaryOperator::CreateNUWMul(X, ConstantInt::get(Ty, NewMulC));
1228  BinaryOperator *OrigMul = cast<BinaryOperator>(Op0);
1229  NewMul->setHasNoSignedWrap(OrigMul->hasNoSignedWrap());
1230  return NewMul;
1231  }
1232  }
1233  }
1234 
1235  // Try to narrow bswap.
1236  // In the case where the shift amount equals the bitwidth difference, the
1237  // shift is eliminated.
1238  if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::bswap>(
1239  m_OneUse(m_ZExt(m_Value(X))))))) {
1240  unsigned SrcWidth = X->getType()->getScalarSizeInBits();
1241  unsigned WidthDiff = BitWidth - SrcWidth;
1242  if (SrcWidth % 16 == 0) {
1243  Value *NarrowSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
1244  if (ShAmtC >= WidthDiff) {
1245  // (bswap (zext X)) >> C --> zext (bswap X >> C')
1246  Value *NewShift = Builder.CreateLShr(NarrowSwap, ShAmtC - WidthDiff);
1247  return new ZExtInst(NewShift, Ty);
1248  } else {
1249  // (bswap (zext X)) >> C --> (zext (bswap X)) << C'
1250  Value *NewZExt = Builder.CreateZExt(NarrowSwap, Ty);
1251  Constant *ShiftDiff = ConstantInt::get(Ty, WidthDiff - ShAmtC);
1252  return BinaryOperator::CreateShl(NewZExt, ShiftDiff);
1253  }
1254  }
1255  }
1256 
1257  // If the shifted-out value is known-zero, then this is an exact shift.
1258  if (!I.isExact() &&
1259  MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmtC), 0, &I)) {
1260  I.setIsExact();
1261  return &I;
1262  }
1263  }
1264 
1265  // Transform (x << y) >> y to x & (-1 >> y)
1266  Value *X;
1267  if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))))) {
1268  Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1269  Value *Mask = Builder.CreateLShr(AllOnes, Op1);
1270  return BinaryOperator::CreateAnd(Mask, X);
1271  }
1272 
1273  return nullptr;
1274 }
1275 
1276 Instruction *
1278  BinaryOperator &OldAShr) {
1279  assert(OldAShr.getOpcode() == Instruction::AShr &&
1280  "Must be called with arithmetic right-shift instruction only.");
1281 
1282  // Check that constant C is a splat of the element-wise bitwidth of V.
1283  auto BitWidthSplat = [](Constant *C, Value *V) {
1284  return match(
1285  C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1286  APInt(C->getType()->getScalarSizeInBits(),
1287  V->getType()->getScalarSizeInBits())));
1288  };
1289 
1290  // It should look like variable-length sign-extension on the outside:
1291  // (Val << (bitwidth(Val)-Nbits)) a>> (bitwidth(Val)-Nbits)
1292  Value *NBits;
1293  Instruction *MaybeTrunc;
1294  Constant *C1, *C2;
1295  if (!match(&OldAShr,
1296  m_AShr(m_Shl(m_Instruction(MaybeTrunc),
1298  m_ZExtOrSelf(m_Value(NBits))))),
1300  m_ZExtOrSelf(m_Deferred(NBits)))))) ||
1301  !BitWidthSplat(C1, &OldAShr) || !BitWidthSplat(C2, &OldAShr))
1302  return nullptr;
1303 
1304  // There may or may not be a truncation after outer two shifts.
1305  Instruction *HighBitExtract;
1306  match(MaybeTrunc, m_TruncOrSelf(m_Instruction(HighBitExtract)));
1307  bool HadTrunc = MaybeTrunc != HighBitExtract;
1308 
1309  // And finally, the innermost part of the pattern must be a right-shift.
1310  Value *X, *NumLowBitsToSkip;
1311  if (!match(HighBitExtract, m_Shr(m_Value(X), m_Value(NumLowBitsToSkip))))
1312  return nullptr;
1313 
1314  // Said right-shift must extract high NBits bits - C0 must be it's bitwidth.
1315  Constant *C0;
1316  if (!match(NumLowBitsToSkip,
1317  m_ZExtOrSelf(
1318  m_Sub(m_Constant(C0), m_ZExtOrSelf(m_Specific(NBits))))) ||
1319  !BitWidthSplat(C0, HighBitExtract))
1320  return nullptr;
1321 
1322  // Since the NBits is identical for all shifts, if the outermost and
1323  // innermost shifts are identical, then outermost shifts are redundant.
1324  // If we had truncation, do keep it though.
1325  if (HighBitExtract->getOpcode() == OldAShr.getOpcode())
1326  return replaceInstUsesWith(OldAShr, MaybeTrunc);
1327 
1328  // Else, if there was a truncation, then we need to ensure that one
1329  // instruction will go away.
1330  if (HadTrunc && !match(&OldAShr, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1331  return nullptr;
1332 
1333  // Finally, bypass two innermost shifts, and perform the outermost shift on
1334  // the operands of the innermost shift.
1335  Instruction *NewAShr =
1336  BinaryOperator::Create(OldAShr.getOpcode(), X, NumLowBitsToSkip);
1337  NewAShr->copyIRFlags(HighBitExtract); // We can preserve 'exact'-ness.
1338  if (!HadTrunc)
1339  return NewAShr;
1340 
1341  Builder.Insert(NewAShr);
1342  return TruncInst::CreateTruncOrBitCast(NewAShr, OldAShr.getType());
1343 }
1344 
1346  if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1347  SQ.getWithInstruction(&I)))
1348  return replaceInstUsesWith(I, V);
1349 
1350  if (Instruction *X = foldVectorBinop(I))
1351  return X;
1352 
1353  if (Instruction *R = commonShiftTransforms(I))
1354  return R;
1355 
1356  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1357  Type *Ty = I.getType();
1358  unsigned BitWidth = Ty->getScalarSizeInBits();
1359  const APInt *ShAmtAPInt;
1360  if (match(Op1, m_APInt(ShAmtAPInt)) && ShAmtAPInt->ult(BitWidth)) {
1361  unsigned ShAmt = ShAmtAPInt->getZExtValue();
1362 
1363  // If the shift amount equals the difference in width of the destination
1364  // and source scalar types:
1365  // ashr (shl (zext X), C), C --> sext X
1366  Value *X;
1367  if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
1368  ShAmt == BitWidth - X->getType()->getScalarSizeInBits())
1369  return new SExtInst(X, Ty);
1370 
1371  // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
1372  // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
1373  const APInt *ShOp1;
1374  if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1))) &&
1375  ShOp1->ult(BitWidth)) {
1376  unsigned ShlAmt = ShOp1->getZExtValue();
1377  if (ShlAmt < ShAmt) {
1378  // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
1379  Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
1380  auto *NewAShr = BinaryOperator::CreateAShr(X, ShiftDiff);
1381  NewAShr->setIsExact(I.isExact());
1382  return NewAShr;
1383  }
1384  if (ShlAmt > ShAmt) {
1385  // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
1386  Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
1387  auto *NewShl = BinaryOperator::Create(Instruction::Shl, X, ShiftDiff);
1388  NewShl->setHasNoSignedWrap(true);
1389  return NewShl;
1390  }
1391  }
1392 
1393  if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1))) &&
1394  ShOp1->ult(BitWidth)) {
1395  unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
1396  // Oversized arithmetic shifts replicate the sign bit.
1397  AmtSum = std::min(AmtSum, BitWidth - 1);
1398  // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
1399  return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
1400  }
1401 
1402  if (match(Op0, m_OneUse(m_SExt(m_Value(X)))) &&
1403  (Ty->isVectorTy() || shouldChangeType(Ty, X->getType()))) {
1404  // ashr (sext X), C --> sext (ashr X, C')
1405  Type *SrcTy = X->getType();
1406  ShAmt = std::min(ShAmt, SrcTy->getScalarSizeInBits() - 1);
1407  Value *NewSh = Builder.CreateAShr(X, ConstantInt::get(SrcTy, ShAmt));
1408  return new SExtInst(NewSh, Ty);
1409  }
1410 
1411  if (ShAmt == BitWidth - 1) {
1412  // ashr i32 or(X,-X), 31 --> sext (X != 0)
1413  if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
1414  return new SExtInst(Builder.CreateIsNotNull(X), Ty);
1415 
1416  // ashr i32 (X -nsw Y), 31 --> sext (X < Y)
1417  Value *Y;
1418  if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1419  return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1420  }
1421 
1422  // If the shifted-out value is known-zero, then this is an exact shift.
1423  if (!I.isExact() &&
1424  MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
1425  I.setIsExact();
1426  return &I;
1427  }
1428  }
1429 
1430  // Prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)`
1431  // as the pattern to splat the lowest bit.
1432  // FIXME: iff X is already masked, we don't need the one-use check.
1433  Value *X;
1434  if (match(Op1, m_SpecificIntAllowUndef(BitWidth - 1)) &&
1435  match(Op0, m_OneUse(m_Shl(m_Value(X),
1437  Constant *Mask = ConstantInt::get(Ty, 1);
1438  // Retain the knowledge about the ignored lanes.
1440  Constant::mergeUndefsWith(Mask, cast<Constant>(Op1)),
1441  cast<Constant>(cast<Instruction>(Op0)->getOperand(1)));
1442  X = Builder.CreateAnd(X, Mask);
1443  return BinaryOperator::CreateNeg(X);
1444  }
1445 
1446  if (Instruction *R = foldVariableSignZeroExtensionOfVariableHighBitExtract(I))
1447  return R;
1448 
1449  // See if we can turn a signed shr into an unsigned shr.
1451  return BinaryOperator::CreateLShr(Op0, Op1);
1452 
1453  // ashr (xor %x, -1), %y --> xor (ashr %x, %y), -1
1454  if (match(Op0, m_OneUse(m_Not(m_Value(X))))) {
1455  // Note that we must drop 'exact'-ness of the shift!
1456  // Note that we can't keep undef's in -1 vector constant!
1457  auto *NewAShr = Builder.CreateAShr(X, Op1, Op0->getName() + ".not");
1458  return BinaryOperator::CreateNot(NewAShr);
1459  }
1460 
1461  return nullptr;
1462 }
i
i
Definition: README.txt:29
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::PatternMatch::m_TruncOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1627
llvm::SimplifySubInst
Value * SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Definition: InstructionSimplify.cpp:865
llvm::ConstantExpr::getNot
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2709
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:391
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2750
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::ConstantExpr::getZExtOrBitCast
static Constant * getZExtOrBitCast(Constant *C, Type *Ty)
Definition: Constants.cpp:2041
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
InstCombiner.h
IntrinsicInst.h
llvm::SimplifyQuery
Definition: InstructionSimplify.h:93
canTryToConstantAddTwoShiftAmounts
bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1, Value *ShAmt1)
Definition: InstCombineShifts.cpp:23
llvm::ConstantExpr::getZExt
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2148
llvm::InstCombinerImpl::FoldShiftByConstant
Instruction * FoldShiftByConstant(Value *Op0, Constant *Op1, BinaryOperator &I)
Definition: InstCombineShifts.cpp:705
llvm::BinaryOperator::CreateNot
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Definition: Instructions.cpp:2834
llvm::PatternMatch::m_LShr
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1127
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
canShiftBinOpWithConstantRHS
static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift, BinaryOperator *BO)
Definition: InstCombineShifts.cpp:688
llvm::InstCombiner::Builder
BuilderTy & Builder
Definition: InstCombiner.h:58
llvm::PatternMatch::m_Add
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:988
llvm::PatternMatch::m_CombineOr
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:210
llvm::IRBuilder< TargetFolder, IRBuilderCallbackInserter >
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3180
llvm::Instruction::hasNoUnsignedWrap
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
Definition: Instruction.cpp:135
Shift
bool Shift
Definition: README.txt:468
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Definition: InstCombineShifts.cpp:58
canEvaluateShiftedShift
static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl, Instruction *InnerShift, InstCombinerImpl &IC, Instruction *CxtI)
Return true if we can simplify two logical (either left or right) shifts that have constant shift amo...
Definition: InstCombineShifts.cpp:474
llvm::APInt::ugt
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition: APInt.h:1132
llvm::Instruction::setHasNoUnsignedWrap
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:123
llvm::PatternMatch::m_BinOp
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:84
llvm::PatternMatch::m_AShr
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1133
llvm::ComputeNumSignBits
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: ValueTracking.cpp:415
llvm::InstCombiner::addToWorklist
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:366
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:61
llvm::ARM_AM::ShiftOpc
ShiftOpc
Definition: ARMAddressingModes.h:27
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:832
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::PatternMatch::m_c_And
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
Definition: PatternMatch.h:2257
llvm::BinaryOperator::CreateNeg
NUW NUW NUW NUW Exact static Exact BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
Definition: Instructions.cpp:2794
llvm::PatternMatch::m_Not
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Definition: PatternMatch.h:2295
llvm::Instruction::setIsExact
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:131
llvm::PHINode::setIncomingValue
void setIncomingValue(unsigned i, Value *V)
Definition: Instructions.h:2763
llvm::PatternMatch::m_Deferred
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:798
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::PatternMatch::m_OneUse
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ConstantExpr::getSub
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2726
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::PatternMatch::m_APInt
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:270
llvm::PatternMatch::m_c_BinOp
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
Definition: PatternMatch.h:2221
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1171
llvm::Intrinsic::getType
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:1366
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:157
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
llvm::SelectInst::Create
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
Definition: Instructions.h:1768
llvm::InstCombinerImpl::foldVariableSignZeroExtensionOfVariableHighBitExtract
Instruction * foldVariableSignZeroExtensionOfVariableHighBitExtract(BinaryOperator &OldAShr)
Definition: InstCombineShifts.cpp:1277
llvm::SimplifyShlInst
Value * SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Definition: InstructionSimplify.cpp:1386
llvm::RecurKind::And
@ And
Bitwise or logical AND of integers.
hasNoUnsignedWrap
static bool hasNoUnsignedWrap(BinaryOperator &I)
Definition: InstructionCombining.cpp:298
llvm::APInt::eq
bool eq(const APInt &RHS) const
Equality comparison.
Definition: APInt.h:1029
InstCombineInternal.h
llvm::PatternMatch::m_Select
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
Definition: PatternMatch.h:1472
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2760
getScalarSizeInBits
static unsigned getScalarSizeInBits(Type *Ty)
Definition: SystemZTargetTransformInfo.cpp:403
llvm::APInt::isZero
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
llvm::Instruction::setHasNoSignedWrap
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:127
llvm::PatternMatch::m_Exact
Exact_match< T > m_Exact(const T &SubPattern)
Definition: PatternMatch.h:1361
llvm::PatternMatch::m_SpecificIntAllowUndef
specific_intval< true > m_SpecificIntAllowUndef(APInt V)
Definition: PatternMatch.h:871
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
getOpcode
static Optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:190
llvm::APInt::getAllOnes
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
llvm::PatternMatch::m_Instruction
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:726
llvm::SimplifyAShrInst
Value * SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
Definition: InstructionSimplify.cpp:1456
llvm::PatternMatch::m_ZExt
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
Definition: PatternMatch.h:1639
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:623
llvm::PatternMatch::m_c_Add
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
Definition: PatternMatch.h:2243
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::Constant::getAllOnesValue
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:395
llvm::BinaryOperator::getOpcode
BinaryOps getOpcode() const
Definition: InstrTypes.h:392
llvm::Instruction
Definition: Instruction.h:42
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:189
llvm::PatternMatch::m_c_Or
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
Definition: PatternMatch.h:2264
llvm::APInt::getZExtValue
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1478
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
llvm::InstCombinerImpl
Definition: InstCombineInternal.h:61
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:919
llvm::PatternMatch::m_NSWSub
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1174
PatternMatch.h
llvm::CastInst::CreateTruncOrBitCast
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
Definition: Instructions.cpp:3256
llvm::PatternMatch::m_Shift
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
Definition: PatternMatch.h:1306
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2756
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::PatternMatch::m_One
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:513
llvm::Instruction::isCommutative
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
Definition: Instruction.cpp:770
llvm::APInt::ashr
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:808
llvm::PatternMatch::m_Power2
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:544
llvm::PatternMatch::m_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1115
llvm::Type::getExtendedType
Type * getExtendedType() const
Given scalar/vector integer type, returns a type with elements twice as wide as in the original type.
Definition: DerivedTypes.h:706
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:191
llvm::PatternMatch::m_Shr
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
Definition: PatternMatch.h:1313
llvm::PatternMatch::m_ZExtOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, OpTy > m_ZExtOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1645
llvm::ConstantExpr::getTrunc
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2120
llvm::PatternMatch::m_NUWAdd
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1199
llvm::InstCombinerImpl::visitShl
Instruction * visitShl(BinaryOperator &I)
Definition: InstCombineShifts.cpp:803
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::Instruction::isLogicalShift
bool isLogicalShift() const
Return true if this is a logical shift left or a logical shift right.
Definition: Instruction.h:202
llvm::Constant::replaceUndefsWith
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:787
llvm::APInt::logBase2
unsigned logBase2() const
Definition: APInt.h:1664
llvm::PatternMatch::m_AllOnes
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:445
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::ConstantExpr::get
static Constant * get(unsigned Opcode, Constant *C1, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a unary operator constant expression, folding if possible.
Definition: Constants.cpp:2292
I
#define I(x, y, z)
Definition: MD5.cpp:58
getShiftedValue
static Value * getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, InstCombinerImpl &IC, const DataLayout &DL)
When canEvaluateShifted() returns true for an expression, this function inserts the new computation t...
Definition: InstCombineShifts.cpp:637
llvm::ConstantExpr::getShl
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2791
llvm::PatternMatch::m_And
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1103
llvm::PatternMatch::m_SRem
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1091
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::PatternMatch::m_Sub
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1000
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1737
llvm::SimplifyQuery::getWithInstruction
SimplifyQuery getWithInstruction(Instruction *I) const
Definition: InstructionSimplify.h:120
llvm::PatternMatch::m_Constant
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:142
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::PatternMatch::m_Value
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
llvm::ZExtInst
This class represents zero extension of integer types.
Definition: Instructions.h:4819
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::PatternMatch::m_SExt
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
Definition: PatternMatch.h:1633
llvm::InstCombinerImpl::visitLShr
Instruction * visitLShr(BinaryOperator &I)
Definition: InstCombineShifts.cpp:1036
llvm::PatternMatch::m_SpecificInt
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:863
llvm::PatternMatch::m_CombineAnd
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:216
llvm::PatternMatch::m_NUWMul
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1215
llvm::SimplifyLShrInst
Value * SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
Definition: InstructionSimplify.cpp:1423
llvm::BinaryOperator
Definition: InstrTypes.h:188
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::PatternMatch::m_SpecificInt_ICMP
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
Definition: PatternMatch.h:604
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:178
llvm::CmpInst::ICMP_ULT
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:744
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::Instruction::isExact
bool isExact() const
Determine whether the exact flag is set.
Definition: Instruction.cpp:195
foldShiftedShift
static Value * foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt, bool IsOuterShl, InstCombiner::BuilderTy &Builder)
Fold OuterShift (InnerShift X, C1), C2.
Definition: InstCombineShifts.cpp:574
llvm::APInt::ult
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1061
llvm::SExtInst
This class represents a sign extension of integer types.
Definition: Instructions.h:4858
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:305
llvm::InstCombinerImpl::visitAShr
Instruction * visitAShr(BinaryOperator &I)
Definition: InstCombineShifts.cpp:1345
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:164
foldShiftOfShiftedLogic
static Instruction * foldShiftOfShiftedLogic(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
Definition: InstCombineShifts.cpp:329
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::InstCombinerImpl::commonShiftTransforms
Instruction * commonShiftTransforms(BinaryOperator &I)
Definition: InstCombineShifts.cpp:370
llvm::ConstantExpr::getAdd
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2715
llvm::ConstantInt::getSigned
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.cpp:933
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::PatternMatch::m_NSWShl
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1190
dropRedundantMaskingOfLeftShiftInput
static Instruction * dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, const SimplifyQuery &Q, InstCombiner::BuilderTy &Builder)
Definition: InstCombineShifts.cpp:184
llvm::ConstantExpr::getLShr
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2798
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
llvm::Constant::mergeUndefsWith
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
Definition: Constants.cpp:811
llvm::Instruction::BinaryOps
BinaryOps
Definition: Instruction.h:786
llvm::APInt::getSignMask
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition: APInt.h:209
llvm::PatternMatch::m_Specific
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:780
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1310
llvm::Instruction::hasNoSignedWrap
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
Definition: Instruction.cpp:139
CreateMul
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Definition: Reassociate.cpp:243
llvm::IRBuilderBase::CreateShl
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1289
canEvaluateShifted
static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift, InstCombinerImpl &IC, Instruction *CxtI)
See if we can compute the specified value, but shifted logically to the left or right by some number ...
Definition: InstCombineShifts.cpp:526
InstructionSimplify.h
llvm::PHINode
Definition: Instructions.h:2664
llvm::Instruction::copyIRFlags
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
Definition: Instruction.cpp:298
llvm::PatternMatch::m_Neg
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
Definition: PatternMatch.h:2279
llvm::APInt::getLowBitsSet
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:289
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:378
llvm::APInt::shl
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:854
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::PatternMatch::m_Trunc
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
Definition: PatternMatch.h:1621
llvm::BinaryOperator::Create
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Definition: Instructions.cpp:2778
llvm::SimplifyAddInst
Value * SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Definition: InstructionSimplify.cpp:683
llvm::tgtok::TrueVal
@ TrueVal
Definition: TGLexer.h:61
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::PatternMatch::m_Shl
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1121
llvm::InstCombinerImpl::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombineInternal.h:485
llvm::PatternMatch::m_Mul
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1055