LLVM  7.0.0svn
InstCombineAndOrXor.cpp
Go to the documentation of this file.
1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visitAnd, visitOr, and visitXor functions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
17 #include "llvm/IR/ConstantRange.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/PatternMatch.h"
21 using namespace llvm;
22 using namespace PatternMatch;
23 
24 #define DEBUG_TYPE "instcombine"
25 
26 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into
27 /// a four bit mask.
28 static unsigned getFCmpCode(FCmpInst::Predicate CC) {
30  "Unexpected FCmp predicate!");
31  // Take advantage of the bit pattern of FCmpInst::Predicate here.
32  // U L G E
33  static_assert(FCmpInst::FCMP_FALSE == 0, ""); // 0 0 0 0
34  static_assert(FCmpInst::FCMP_OEQ == 1, ""); // 0 0 0 1
35  static_assert(FCmpInst::FCMP_OGT == 2, ""); // 0 0 1 0
36  static_assert(FCmpInst::FCMP_OGE == 3, ""); // 0 0 1 1
37  static_assert(FCmpInst::FCMP_OLT == 4, ""); // 0 1 0 0
38  static_assert(FCmpInst::FCMP_OLE == 5, ""); // 0 1 0 1
39  static_assert(FCmpInst::FCMP_ONE == 6, ""); // 0 1 1 0
40  static_assert(FCmpInst::FCMP_ORD == 7, ""); // 0 1 1 1
41  static_assert(FCmpInst::FCMP_UNO == 8, ""); // 1 0 0 0
42  static_assert(FCmpInst::FCMP_UEQ == 9, ""); // 1 0 0 1
43  static_assert(FCmpInst::FCMP_UGT == 10, ""); // 1 0 1 0
44  static_assert(FCmpInst::FCMP_UGE == 11, ""); // 1 0 1 1
45  static_assert(FCmpInst::FCMP_ULT == 12, ""); // 1 1 0 0
46  static_assert(FCmpInst::FCMP_ULE == 13, ""); // 1 1 0 1
47  static_assert(FCmpInst::FCMP_UNE == 14, ""); // 1 1 1 0
48  static_assert(FCmpInst::FCMP_TRUE == 15, ""); // 1 1 1 1
49  return CC;
50 }
51 
52 /// This is the complement of getICmpCode, which turns an opcode and two
53 /// operands into either a constant true or false, or a brand new ICmp
54 /// instruction. The sign is passed in to determine which kind of predicate to
55 /// use in the new icmp instruction.
56 static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
57  InstCombiner::BuilderTy &Builder) {
58  ICmpInst::Predicate NewPred;
59  if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
60  return NewConstant;
61  return Builder.CreateICmp(NewPred, LHS, RHS);
62 }
63 
64 /// This is the complement of getFCmpCode, which turns an opcode and two
65 /// operands into either a FCmp instruction, or a true/false constant.
66 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
67  InstCombiner::BuilderTy &Builder) {
68  const auto Pred = static_cast<FCmpInst::Predicate>(Code);
69  assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
70  "Unexpected FCmp predicate!");
71  if (Pred == FCmpInst::FCMP_FALSE)
73  if (Pred == FCmpInst::FCMP_TRUE)
75  return Builder.CreateFCmp(Pred, LHS, RHS);
76 }
77 
78 /// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
79 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
80 /// \param I Binary operator to transform.
81 /// \return Pointer to node that must replace the original binary operator, or
82 /// null pointer if no transformation was made.
84  InstCombiner::BuilderTy &Builder) {
85  assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
86 
87  Value *OldLHS = I.getOperand(0);
88  Value *OldRHS = I.getOperand(1);
89 
90  Value *NewLHS;
91  if (!match(OldLHS, m_BSwap(m_Value(NewLHS))))
92  return nullptr;
93 
94  Value *NewRHS;
95  const APInt *C;
96 
97  if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) {
98  // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
99  if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse())
100  return nullptr;
101  // NewRHS initialized by the matcher.
102  } else if (match(OldRHS, m_APInt(C))) {
103  // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
104  if (!OldLHS->hasOneUse())
105  return nullptr;
106  NewRHS = ConstantInt::get(I.getType(), C->byteSwap());
107  } else
108  return nullptr;
109 
110  Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
111  Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
112  I.getType());
113  return Builder.CreateCall(F, BinOp);
114 }
115 
116 /// This handles expressions of the form ((val OP C1) & C2). Where
117 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'.
118 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
119  ConstantInt *OpRHS,
120  ConstantInt *AndRHS,
121  BinaryOperator &TheAnd) {
122  Value *X = Op->getOperand(0);
123 
124  switch (Op->getOpcode()) {
125  default: break;
126  case Instruction::Add:
127  if (Op->hasOneUse()) {
128  // Adding a one to a single bit bit-field should be turned into an XOR
129  // of the bit. First thing to check is to see if this AND is with a
130  // single bit constant.
131  const APInt &AndRHSV = AndRHS->getValue();
132 
133  // If there is only one bit set.
134  if (AndRHSV.isPowerOf2()) {
135  // Ok, at this point, we know that we are masking the result of the
136  // ADD down to exactly one bit. If the constant we are adding has
137  // no bits set below this bit, then we can eliminate the ADD.
138  const APInt& AddRHS = OpRHS->getValue();
139 
140  // Check to see if any bits below the one bit set in AndRHSV are set.
141  if ((AddRHS & (AndRHSV - 1)).isNullValue()) {
142  // If not, the only thing that can effect the output of the AND is
143  // the bit specified by AndRHSV. If that bit is set, the effect of
144  // the XOR is to toggle the bit. If it is clear, then the ADD has
145  // no effect.
146  if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop
147  TheAnd.setOperand(0, X);
148  return &TheAnd;
149  } else {
150  // Pull the XOR out of the AND.
151  Value *NewAnd = Builder.CreateAnd(X, AndRHS);
152  NewAnd->takeName(Op);
153  return BinaryOperator::CreateXor(NewAnd, AndRHS);
154  }
155  }
156  }
157  }
158  break;
159  }
160  return nullptr;
161 }
162 
163 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
164 /// (V < Lo || V >= Hi). This method expects that Lo <= Hi. IsSigned indicates
165 /// whether to treat V, Lo, and Hi as signed or not.
166 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
167  bool isSigned, bool Inside) {
168  assert((isSigned ? Lo.sle(Hi) : Lo.ule(Hi)) &&
169  "Lo is not <= Hi in range emission code!");
170 
171  Type *Ty = V->getType();
172  if (Lo == Hi)
173  return Inside ? ConstantInt::getFalse(Ty) : ConstantInt::getTrue(Ty);
174 
175  // V >= Min && V < Hi --> V < Hi
176  // V < Min || V >= Hi --> V >= Hi
178  if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
179  Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
180  return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
181  }
182 
183  // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
184  // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
185  Value *VMinusLo =
186  Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
187  Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
188  return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
189 }
190 
191 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
192 /// that can be simplified.
193 /// One of A and B is considered the mask. The other is the value. This is
194 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
195 /// only "Mask", then both A and B can be considered masks. If A is the mask,
196 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
197 /// If both A and C are constants, this proof is also easy.
198 /// For the following explanations, we assume that A is the mask.
199 ///
200 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
201 /// bits of A are set in B.
202 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
203 ///
204 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
205 /// bits of A are cleared in B.
206 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
207 ///
208 /// "Mixed" declares that (A & B) == C and C might or might not contain any
209 /// number of one bits and zero bits.
210 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
211 ///
212 /// "Not" means that in above descriptions "==" should be replaced by "!=".
213 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
214 ///
215 /// If the mask A contains a single bit, then the following is equivalent:
216 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
217 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
227  BMask_Mixed = 256,
229 };
230 
231 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
232 /// satisfies.
233 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
234  ICmpInst::Predicate Pred) {
235  ConstantInt *ACst = dyn_cast<ConstantInt>(A);
236  ConstantInt *BCst = dyn_cast<ConstantInt>(B);
237  ConstantInt *CCst = dyn_cast<ConstantInt>(C);
238  bool IsEq = (Pred == ICmpInst::ICMP_EQ);
239  bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2());
240  bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2());
241  unsigned MaskVal = 0;
242  if (CCst && CCst->isZero()) {
243  // if C is zero, then both A and B qualify as mask
244  MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
246  if (IsAPow2)
247  MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
248  : (AMask_AllOnes | AMask_Mixed));
249  if (IsBPow2)
250  MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
251  : (BMask_AllOnes | BMask_Mixed));
252  return MaskVal;
253  }
254 
255  if (A == C) {
256  MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
258  if (IsAPow2)
259  MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
260  : (Mask_AllZeros | AMask_Mixed));
261  } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) {
262  MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
263  }
264 
265  if (B == C) {
266  MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
268  if (IsBPow2)
269  MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
270  : (Mask_AllZeros | BMask_Mixed));
271  } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) {
272  MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
273  }
274 
275  return MaskVal;
276 }
277 
278 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
279 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
280 /// is adjacent to the corresponding normal flag (recording ==), this just
281 /// involves swapping those bits over.
282 static unsigned conjugateICmpMask(unsigned Mask) {
283  unsigned NewMask;
284  NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
286  << 1;
287 
288  NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
290  >> 1;
291 
292  return NewMask;
293 }
294 
295 // Adapts the external decomposeBitTestICmp for local use.
296 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
297  Value *&X, Value *&Y, Value *&Z) {
298  APInt Mask;
299  if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask))
300  return false;
301 
302  Y = ConstantInt::get(X->getType(), Mask);
303  Z = ConstantInt::get(X->getType(), 0);
304  return true;
305 }
306 
307 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
308 /// Return the set of pattern classes (from MaskedICmpType) that both LHS and
309 /// RHS satisfy.
310 static unsigned getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
311  Value *&D, Value *&E, ICmpInst *LHS,
312  ICmpInst *RHS,
313  ICmpInst::Predicate &PredL,
314  ICmpInst::Predicate &PredR) {
315  // vectors are not (yet?) supported. Don't support pointers either.
316  if (!LHS->getOperand(0)->getType()->isIntegerTy() ||
317  !RHS->getOperand(0)->getType()->isIntegerTy())
318  return 0;
319 
320  // Here comes the tricky part:
321  // LHS might be of the form L11 & L12 == X, X == L21 & L22,
322  // and L11 & L12 == L21 & L22. The same goes for RHS.
323  // Now we must find those components L** and R**, that are equal, so
324  // that we can extract the parameters A, B, C, D, and E for the canonical
325  // above.
326  Value *L1 = LHS->getOperand(0);
327  Value *L2 = LHS->getOperand(1);
328  Value *L11, *L12, *L21, *L22;
329  // Check whether the icmp can be decomposed into a bit test.
330  if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) {
331  L21 = L22 = L1 = nullptr;
332  } else {
333  // Look for ANDs in the LHS icmp.
334  if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
335  // Any icmp can be viewed as being trivially masked; if it allows us to
336  // remove one, it's worth it.
337  L11 = L1;
338  L12 = Constant::getAllOnesValue(L1->getType());
339  }
340 
341  if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
342  L21 = L2;
343  L22 = Constant::getAllOnesValue(L2->getType());
344  }
345  }
346 
347  // Bail if LHS was a icmp that can't be decomposed into an equality.
348  if (!ICmpInst::isEquality(PredL))
349  return 0;
350 
351  Value *R1 = RHS->getOperand(0);
352  Value *R2 = RHS->getOperand(1);
353  Value *R11, *R12;
354  bool Ok = false;
355  if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) {
356  if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
357  A = R11;
358  D = R12;
359  } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
360  A = R12;
361  D = R11;
362  } else {
363  return 0;
364  }
365  E = R2;
366  R1 = nullptr;
367  Ok = true;
368  } else {
369  if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
370  // As before, model no mask as a trivial mask if it'll let us do an
371  // optimization.
372  R11 = R1;
373  R12 = Constant::getAllOnesValue(R1->getType());
374  }
375 
376  if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
377  A = R11;
378  D = R12;
379  E = R2;
380  Ok = true;
381  } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
382  A = R12;
383  D = R11;
384  E = R2;
385  Ok = true;
386  }
387  }
388 
389  // Bail if RHS was a icmp that can't be decomposed into an equality.
390  if (!ICmpInst::isEquality(PredR))
391  return 0;
392 
393  // Look for ANDs on the right side of the RHS icmp.
394  if (!Ok) {
395  if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
396  R11 = R2;
397  R12 = Constant::getAllOnesValue(R2->getType());
398  }
399 
400  if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
401  A = R11;
402  D = R12;
403  E = R1;
404  Ok = true;
405  } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
406  A = R12;
407  D = R11;
408  E = R1;
409  Ok = true;
410  } else {
411  return 0;
412  }
413  }
414  if (!Ok)
415  return 0;
416 
417  if (L11 == A) {
418  B = L12;
419  C = L2;
420  } else if (L12 == A) {
421  B = L11;
422  C = L2;
423  } else if (L21 == A) {
424  B = L22;
425  C = L1;
426  } else if (L22 == A) {
427  B = L21;
428  C = L1;
429  }
430 
431  unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
432  unsigned RightType = getMaskedICmpType(A, D, E, PredR);
433  return LeftType & RightType;
434 }
435 
436 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
437 /// into a single (icmp(A & X) ==/!= Y).
438 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
440  Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
441  ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
442  unsigned Mask =
443  getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
444  if (Mask == 0)
445  return nullptr;
446 
448  "Expected equality predicates for masked type of icmps.");
449 
450  // In full generality:
451  // (icmp (A & B) Op C) | (icmp (A & D) Op E)
452  // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
453  //
454  // If the latter can be converted into (icmp (A & X) Op Y) then the former is
455  // equivalent to (icmp (A & X) !Op Y).
456  //
457  // Therefore, we can pretend for the rest of this function that we're dealing
458  // with the conjunction, provided we flip the sense of any comparisons (both
459  // input and output).
460 
461  // In most cases we're going to produce an EQ for the "&&" case.
463  if (!IsAnd) {
464  // Convert the masking analysis into its equivalent with negated
465  // comparisons.
466  Mask = conjugateICmpMask(Mask);
467  }
468 
469  if (Mask & Mask_AllZeros) {
470  // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
471  // -> (icmp eq (A & (B|D)), 0)
472  Value *NewOr = Builder.CreateOr(B, D);
473  Value *NewAnd = Builder.CreateAnd(A, NewOr);
474  // We can't use C as zero because we might actually handle
475  // (icmp ne (A & B), B) & (icmp ne (A & D), D)
476  // with B and D, having a single bit set.
477  Value *Zero = Constant::getNullValue(A->getType());
478  return Builder.CreateICmp(NewCC, NewAnd, Zero);
479  }
480  if (Mask & BMask_AllOnes) {
481  // (icmp eq (A & B), B) & (icmp eq (A & D), D)
482  // -> (icmp eq (A & (B|D)), (B|D))
483  Value *NewOr = Builder.CreateOr(B, D);
484  Value *NewAnd = Builder.CreateAnd(A, NewOr);
485  return Builder.CreateICmp(NewCC, NewAnd, NewOr);
486  }
487  if (Mask & AMask_AllOnes) {
488  // (icmp eq (A & B), A) & (icmp eq (A & D), A)
489  // -> (icmp eq (A & (B&D)), A)
490  Value *NewAnd1 = Builder.CreateAnd(B, D);
491  Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
492  return Builder.CreateICmp(NewCC, NewAnd2, A);
493  }
494 
495  // Remaining cases assume at least that B and D are constant, and depend on
496  // their actual values. This isn't strictly necessary, just a "handle the
497  // easy cases for now" decision.
498  ConstantInt *BCst = dyn_cast<ConstantInt>(B);
499  if (!BCst)
500  return nullptr;
501  ConstantInt *DCst = dyn_cast<ConstantInt>(D);
502  if (!DCst)
503  return nullptr;
504 
505  if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
506  // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
507  // (icmp ne (A & B), B) & (icmp ne (A & D), D)
508  // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
509  // Only valid if one of the masks is a superset of the other (check "B&D" is
510  // the same as either B or D).
511  APInt NewMask = BCst->getValue() & DCst->getValue();
512 
513  if (NewMask == BCst->getValue())
514  return LHS;
515  else if (NewMask == DCst->getValue())
516  return RHS;
517  }
518 
519  if (Mask & AMask_NotAllOnes) {
520  // (icmp ne (A & B), B) & (icmp ne (A & D), D)
521  // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
522  // Only valid if one of the masks is a superset of the other (check "B|D" is
523  // the same as either B or D).
524  APInt NewMask = BCst->getValue() | DCst->getValue();
525 
526  if (NewMask == BCst->getValue())
527  return LHS;
528  else if (NewMask == DCst->getValue())
529  return RHS;
530  }
531 
532  if (Mask & BMask_Mixed) {
533  // (icmp eq (A & B), C) & (icmp eq (A & D), E)
534  // We already know that B & C == C && D & E == E.
535  // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
536  // C and E, which are shared by both the mask B and the mask D, don't
537  // contradict, then we can transform to
538  // -> (icmp eq (A & (B|D)), (C|E))
539  // Currently, we only handle the case of B, C, D, and E being constant.
540  // We can't simply use C and E because we might actually handle
541  // (icmp ne (A & B), B) & (icmp eq (A & D), D)
542  // with B and D, having a single bit set.
543  ConstantInt *CCst = dyn_cast<ConstantInt>(C);
544  if (!CCst)
545  return nullptr;
546  ConstantInt *ECst = dyn_cast<ConstantInt>(E);
547  if (!ECst)
548  return nullptr;
549  if (PredL != NewCC)
550  CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst));
551  if (PredR != NewCC)
552  ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
553 
554  // If there is a conflict, we should actually return a false for the
555  // whole construct.
556  if (((BCst->getValue() & DCst->getValue()) &
557  (CCst->getValue() ^ ECst->getValue())).getBoolValue())
558  return ConstantInt::get(LHS->getType(), !IsAnd);
559 
560  Value *NewOr1 = Builder.CreateOr(B, D);
561  Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
562  Value *NewAnd = Builder.CreateAnd(A, NewOr1);
563  return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
564  }
565 
566  return nullptr;
567 }
568 
569 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
570 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
571 /// If \p Inverted is true then the check is for the inverted range, e.g.
572 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
574  bool Inverted) {
575  // Check the lower range comparison, e.g. x >= 0
576  // InstCombine already ensured that if there is a constant it's on the RHS.
577  ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
578  if (!RangeStart)
579  return nullptr;
580 
581  ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
582  Cmp0->getPredicate());
583 
584  // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
585  if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
586  (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
587  return nullptr;
588 
589  ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
590  Cmp1->getPredicate());
591 
592  Value *Input = Cmp0->getOperand(0);
593  Value *RangeEnd;
594  if (Cmp1->getOperand(0) == Input) {
595  // For the upper range compare we have: icmp x, n
596  RangeEnd = Cmp1->getOperand(1);
597  } else if (Cmp1->getOperand(1) == Input) {
598  // For the upper range compare we have: icmp n, x
599  RangeEnd = Cmp1->getOperand(0);
600  Pred1 = ICmpInst::getSwappedPredicate(Pred1);
601  } else {
602  return nullptr;
603  }
604 
605  // Check the upper range comparison, e.g. x < n
606  ICmpInst::Predicate NewPred;
607  switch (Pred1) {
608  case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
609  case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
610  default: return nullptr;
611  }
612 
613  // This simplification is only valid if the upper range is not negative.
614  KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
615  if (!Known.isNonNegative())
616  return nullptr;
617 
618  if (Inverted)
619  NewPred = ICmpInst::getInversePredicate(NewPred);
620 
621  return Builder.CreateICmp(NewPred, Input, RangeEnd);
622 }
623 
624 static Value *
626  bool JoinedByAnd,
627  InstCombiner::BuilderTy &Builder) {
628  Value *X = LHS->getOperand(0);
629  if (X != RHS->getOperand(0))
630  return nullptr;
631 
632  const APInt *C1, *C2;
633  if (!match(LHS->getOperand(1), m_APInt(C1)) ||
634  !match(RHS->getOperand(1), m_APInt(C2)))
635  return nullptr;
636 
637  // We only handle (X != C1 && X != C2) and (X == C1 || X == C2).
638  ICmpInst::Predicate Pred = LHS->getPredicate();
639  if (Pred != RHS->getPredicate())
640  return nullptr;
641  if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
642  return nullptr;
643  if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
644  return nullptr;
645 
646  // The larger unsigned constant goes on the right.
647  if (C1->ugt(*C2))
648  std::swap(C1, C2);
649 
650  APInt Xor = *C1 ^ *C2;
651  if (Xor.isPowerOf2()) {
652  // If LHSC and RHSC differ by only one bit, then set that bit in X and
653  // compare against the larger constant:
654  // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2
655  // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
656  // We choose an 'or' with a Pow2 constant rather than the inverse mask with
657  // 'and' because that may lead to smaller codegen from a smaller constant.
658  Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor));
659  return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
660  }
661 
662  // Special case: get the ordering right when the values wrap around zero.
663  // Ie, we assumed the constants were unsigned when swapping earlier.
664  if (C1->isNullValue() && C2->isAllOnesValue())
665  std::swap(C1, C2);
666 
667  if (*C1 == *C2 - 1) {
668  // (X == 13 || X == 14) --> X - 13 <=u 1
669  // (X != 13 && X != 14) --> X - 13 >u 1
670  // An 'add' is the canonical IR form, so favor that over a 'sub'.
671  Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
672  auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
673  return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
674  }
675 
676  return nullptr;
677 }
678 
679 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
680 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
681 Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
682  bool JoinedByAnd,
683  Instruction &CxtI) {
684  ICmpInst::Predicate Pred = LHS->getPredicate();
685  if (Pred != RHS->getPredicate())
686  return nullptr;
687  if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
688  return nullptr;
689  if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
690  return nullptr;
691 
692  // TODO support vector splats
693  ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
694  ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
695  if (!LHSC || !RHSC || !LHSC->isZero() || !RHSC->isZero())
696  return nullptr;
697 
698  Value *A, *B, *C, *D;
699  if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) &&
700  match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) {
701  if (A == D || B == D)
702  std::swap(C, D);
703  if (B == C)
704  std::swap(A, B);
705 
706  if (A == C &&
707  isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
708  isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
709  Value *Mask = Builder.CreateOr(B, D);
710  Value *Masked = Builder.CreateAnd(A, Mask);
711  auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
712  return Builder.CreateICmp(NewPred, Masked, Mask);
713  }
714  }
715 
716  return nullptr;
717 }
718 
719 /// Fold (icmp)&(icmp) if possible.
720 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
721  Instruction &CxtI) {
722  // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
723  // if K1 and K2 are a one-bit mask.
724  if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, true, CxtI))
725  return V;
726 
727  ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
728 
729  // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
730  if (PredicatesFoldable(PredL, PredR)) {
731  if (LHS->getOperand(0) == RHS->getOperand(1) &&
732  LHS->getOperand(1) == RHS->getOperand(0))
733  LHS->swapOperands();
734  if (LHS->getOperand(0) == RHS->getOperand(0) &&
735  LHS->getOperand(1) == RHS->getOperand(1)) {
736  Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
737  unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
738  bool isSigned = LHS->isSigned() || RHS->isSigned();
739  return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
740  }
741  }
742 
743  // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
744  if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
745  return V;
746 
747  // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
748  if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
749  return V;
750 
751  // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
752  if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
753  return V;
754 
755  if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder))
756  return V;
757 
758  // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
759  Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
760  ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
761  ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
762  if (!LHSC || !RHSC)
763  return nullptr;
764 
765  if (LHSC == RHSC && PredL == PredR) {
766  // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
767  // where C is a power of 2 or
768  // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
769  if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
770  (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
771  Value *NewOr = Builder.CreateOr(LHS0, RHS0);
772  return Builder.CreateICmp(PredL, NewOr, LHSC);
773  }
774  }
775 
776  // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
777  // where CMAX is the all ones value for the truncated type,
778  // iff the lower bits of C2 and CA are zero.
779  if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() &&
780  RHS->hasOneUse()) {
781  Value *V;
782  ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr;
783 
784  // (trunc x) == C1 & (and x, CA) == C2
785  // (and x, CA) == C2 & (trunc x) == C1
786  if (match(RHS0, m_Trunc(m_Value(V))) &&
787  match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
788  SmallC = RHSC;
789  BigC = LHSC;
790  } else if (match(LHS0, m_Trunc(m_Value(V))) &&
791  match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
792  SmallC = LHSC;
793  BigC = RHSC;
794  }
795 
796  if (SmallC && BigC) {
797  unsigned BigBitSize = BigC->getType()->getBitWidth();
798  unsigned SmallBitSize = SmallC->getType()->getBitWidth();
799 
800  // Check that the low bits are zero.
801  APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
802  if ((Low & AndC->getValue()).isNullValue() &&
803  (Low & BigC->getValue()).isNullValue()) {
804  Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue());
805  APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
806  Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
807  return Builder.CreateICmp(PredL, NewAnd, NewVal);
808  }
809  }
810  }
811 
812  // From here on, we only handle:
813  // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
814  if (LHS0 != RHS0)
815  return nullptr;
816 
817  // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
818  if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
819  PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
820  PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
821  PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
822  return nullptr;
823 
824  // We can't fold (ugt x, C) & (sgt x, C2).
825  if (!PredicatesFoldable(PredL, PredR))
826  return nullptr;
827 
828  // Ensure that the larger constant is on the RHS.
829  bool ShouldSwap;
830  if (CmpInst::isSigned(PredL) ||
831  (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
832  ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
833  else
834  ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
835 
836  if (ShouldSwap) {
837  std::swap(LHS, RHS);
838  std::swap(LHSC, RHSC);
839  std::swap(PredL, PredR);
840  }
841 
842  // At this point, we know we have two icmp instructions
843  // comparing a value against two constants and and'ing the result
844  // together. Because of the above check, we know that we only have
845  // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
846  // (from the icmp folding check above), that the two constants
847  // are not equal and that the larger constant is on the RHS
848  assert(LHSC != RHSC && "Compares not folded above?");
849 
850  switch (PredL) {
851  default:
852  llvm_unreachable("Unknown integer condition code!");
853  case ICmpInst::ICMP_NE:
854  switch (PredR) {
855  default:
856  llvm_unreachable("Unknown integer condition code!");
857  case ICmpInst::ICMP_ULT:
858  if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13
859  return Builder.CreateICmpULT(LHS0, LHSC);
860  if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13
861  return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
862  false, true);
863  break; // (X != 13 & X u< 15) -> no change
864  case ICmpInst::ICMP_SLT:
865  if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
866  return Builder.CreateICmpSLT(LHS0, LHSC);
867  break; // (X != 13 & X s< 15) -> no change
868  case ICmpInst::ICMP_NE:
869  // Potential folds for this case should already be handled.
870  break;
871  }
872  break;
873  case ICmpInst::ICMP_UGT:
874  switch (PredR) {
875  default:
876  llvm_unreachable("Unknown integer condition code!");
877  case ICmpInst::ICMP_NE:
878  if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
879  return Builder.CreateICmp(PredL, LHS0, RHSC);
880  break; // (X u> 13 & X != 15) -> no change
881  case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
882  return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
883  false, true);
884  }
885  break;
886  case ICmpInst::ICMP_SGT:
887  switch (PredR) {
888  default:
889  llvm_unreachable("Unknown integer condition code!");
890  case ICmpInst::ICMP_NE:
891  if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
892  return Builder.CreateICmp(PredL, LHS0, RHSC);
893  break; // (X s> 13 & X != 15) -> no change
894  case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
895  return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
896  true);
897  }
898  break;
899  }
900 
901  return nullptr;
902 }
903 
904 Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
905  Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
906  Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
907  FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
908 
909  if (LHS0 == RHS1 && RHS0 == LHS1) {
910  // Swap RHS operands to match LHS.
911  PredR = FCmpInst::getSwappedPredicate(PredR);
912  std::swap(RHS0, RHS1);
913  }
914 
915  // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
916  // Suppose the relation between x and y is R, where R is one of
917  // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
918  // testing the desired relations.
919  //
920  // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
921  // bool(R & CC0) && bool(R & CC1)
922  // = bool((R & CC0) & (R & CC1))
923  // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
924  //
925  // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
926  // bool(R & CC0) || bool(R & CC1)
927  // = bool((R & CC0) | (R & CC1))
928  // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
929  if (LHS0 == RHS0 && LHS1 == RHS1) {
930  unsigned FCmpCodeL = getFCmpCode(PredL);
931  unsigned FCmpCodeR = getFCmpCode(PredR);
932  unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
933  return getFCmpValue(NewPred, LHS0, LHS1, Builder);
934  }
935 
936  if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
937  (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
938  if (LHS0->getType() != RHS0->getType())
939  return nullptr;
940 
941  // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
942  // (fcmp ord/uno X, C) will be transformed to (fcmp X, 0.0).
943  if (match(LHS1, m_Zero()) && LHS1 == RHS1)
944  // Ignore the constants because they are obviously not NANs:
945  // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
946  // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
947  return Builder.CreateFCmp(PredL, LHS0, RHS0);
948  }
949 
950  return nullptr;
951 }
952 
953 /// Match De Morgan's Laws:
954 /// (~A & ~B) == (~(A | B))
955 /// (~A | ~B) == (~(A & B))
957  InstCombiner::BuilderTy &Builder) {
958  auto Opcode = I.getOpcode();
959  assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
960  "Trying to match De Morgan's Laws with something other than and/or");
961 
962  // Flip the logic operation.
963  Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
964 
965  Value *A, *B;
966  if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) &&
967  match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) &&
968  !IsFreeToInvert(A, A->hasOneUse()) &&
969  !IsFreeToInvert(B, B->hasOneUse())) {
970  Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan");
971  return BinaryOperator::CreateNot(AndOr);
972  }
973 
974  return nullptr;
975 }
976 
977 bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
978  Value *CastSrc = CI->getOperand(0);
979 
980  // Noop casts and casts of constants should be eliminated trivially.
981  if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
982  return false;
983 
984  // If this cast is paired with another cast that can be eliminated, we prefer
985  // to have it eliminated.
986  if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
987  if (isEliminableCastPair(PrecedingCI, CI))
988  return false;
989 
990  return true;
991 }
992 
993 /// Fold {and,or,xor} (cast X), C.
995  InstCombiner::BuilderTy &Builder) {
996  Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
997  if (!C)
998  return nullptr;
999 
1000  auto LogicOpc = Logic.getOpcode();
1001  Type *DestTy = Logic.getType();
1002  Type *SrcTy = Cast->getSrcTy();
1003 
1004  // Move the logic operation ahead of a zext or sext if the constant is
1005  // unchanged in the smaller source type. Performing the logic in a smaller
1006  // type may provide more information to later folds, and the smaller logic
1007  // instruction may be cheaper (particularly in the case of vectors).
1008  Value *X;
1009  if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1010  Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1011  Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
1012  if (ZextTruncC == C) {
1013  // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1014  Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1015  return new ZExtInst(NewOp, DestTy);
1016  }
1017  }
1018 
1019  if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) {
1020  Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1021  Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy);
1022  if (SextTruncC == C) {
1023  // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1024  Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1025  return new SExtInst(NewOp, DestTy);
1026  }
1027  }
1028 
1029  return nullptr;
1030 }
1031 
1032 /// Fold {and,or,xor} (cast X), Y.
1033 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
1034  auto LogicOpc = I.getOpcode();
1035  assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1036 
1037  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1038  CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1039  if (!Cast0)
1040  return nullptr;
1041 
1042  // This must be a cast from an integer or integer vector source type to allow
1043  // transformation of the logic operation to the source type.
1044  Type *DestTy = I.getType();
1045  Type *SrcTy = Cast0->getSrcTy();
1046  if (!SrcTy->isIntOrIntVectorTy())
1047  return nullptr;
1048 
1049  if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder))
1050  return Ret;
1051 
1052  CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1053  if (!Cast1)
1054  return nullptr;
1055 
1056  // Both operands of the logic operation are casts. The casts must be of the
1057  // same type for reduction.
1058  auto CastOpcode = Cast0->getOpcode();
1059  if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy())
1060  return nullptr;
1061 
1062  Value *Cast0Src = Cast0->getOperand(0);
1063  Value *Cast1Src = Cast1->getOperand(0);
1064 
1065  // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1066  if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1067  Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1068  I.getName());
1069  return CastInst::Create(CastOpcode, NewOp, DestTy);
1070  }
1071 
1072  // For now, only 'and'/'or' have optimizations after this.
1073  if (LogicOpc == Instruction::Xor)
1074  return nullptr;
1075 
1076  // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the
1077  // cast is otherwise not optimizable. This happens for vector sexts.
1078  ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
1079  ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
1080  if (ICmp0 && ICmp1) {
1081  Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I)
1082  : foldOrOfICmps(ICmp0, ICmp1, I);
1083  if (Res)
1084  return CastInst::Create(CastOpcode, Res, DestTy);
1085  return nullptr;
1086  }
1087 
1088  // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the
1089  // cast is otherwise not optimizable. This happens for vector sexts.
1090  FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src);
1091  FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src);
1092  if (FCmp0 && FCmp1)
1093  if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And))
1094  return CastInst::Create(CastOpcode, R, DestTy);
1095 
1096  return nullptr;
1097 }
1098 
1100  InstCombiner::BuilderTy &Builder) {
1101  assert(I.getOpcode() == Instruction::And);
1102  Value *Op0 = I.getOperand(0);
1103  Value *Op1 = I.getOperand(1);
1104  Value *A, *B;
1105 
1106  // Operand complexity canonicalization guarantees that the 'or' is Op0.
1107  // (A | B) & ~(A & B) --> A ^ B
1108  // (A | B) & ~(B & A) --> A ^ B
1109  if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1110  match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B)))))
1111  return BinaryOperator::CreateXor(A, B);
1112 
1113  // (A | ~B) & (~A | B) --> ~(A ^ B)
1114  // (A | ~B) & (B | ~A) --> ~(A ^ B)
1115  // (~B | A) & (~A | B) --> ~(A ^ B)
1116  // (~B | A) & (B | ~A) --> ~(A ^ B)
1117  if (Op0->hasOneUse() || Op1->hasOneUse())
1118  if (match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
1119  match(Op1, m_c_Or(m_Not(m_Specific(A)), m_Specific(B))))
1120  return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1121 
1122  return nullptr;
1123 }
1124 
1126  InstCombiner::BuilderTy &Builder) {
1127  assert(I.getOpcode() == Instruction::Or);
1128  Value *Op0 = I.getOperand(0);
1129  Value *Op1 = I.getOperand(1);
1130  Value *A, *B;
1131 
1132  // Operand complexity canonicalization guarantees that the 'and' is Op0.
1133  // (A & B) | ~(A | B) --> ~(A ^ B)
1134  // (A & B) | ~(B | A) --> ~(A ^ B)
1135  if (Op0->hasOneUse() || Op1->hasOneUse())
1136  if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1137  match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1138  return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1139 
1140  // (A & ~B) | (~A & B) --> A ^ B
1141  // (A & ~B) | (B & ~A) --> A ^ B
1142  // (~B & A) | (~A & B) --> A ^ B
1143  // (~B & A) | (B & ~A) --> A ^ B
1144  if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1145  match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1146  return BinaryOperator::CreateXor(A, B);
1147 
1148  return nullptr;
1149 }
1150 
1151 /// Return true if a constant shift amount is always less than the specified
1152 /// bit-width. If not, the shift could create poison in the narrower type.
1153 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1154  if (auto *ScalarC = dyn_cast<ConstantInt>(C))
1155  return ScalarC->getZExtValue() < BitWidth;
1156 
1157  if (C->getType()->isVectorTy()) {
1158  // Check each element of a constant vector.
1159  unsigned NumElts = C->getType()->getVectorNumElements();
1160  for (unsigned i = 0; i != NumElts; ++i) {
1161  Constant *Elt = C->getAggregateElement(i);
1162  if (!Elt)
1163  return false;
1164  if (isa<UndefValue>(Elt))
1165  continue;
1166  auto *CI = dyn_cast<ConstantInt>(Elt);
1167  if (!CI || CI->getZExtValue() >= BitWidth)
1168  return false;
1169  }
1170  return true;
1171  }
1172 
1173  // The constant is a constant expression or unknown.
1174  return false;
1175 }
1176 
1177 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1178 /// a common zext operand: and (binop (zext X), C), (zext X).
1179 Instruction *InstCombiner::narrowMaskedBinOp(BinaryOperator &And) {
1180  // This transform could also apply to {or, and, xor}, but there are better
1181  // folds for those cases, so we don't expect those patterns here. AShr is not
1182  // handled because it should always be transformed to LShr in this sequence.
1183  // The subtract transform is different because it has a constant on the left.
1184  // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1185  Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1);
1186  Constant *C;
1187  if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) &&
1188  !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) &&
1189  !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) &&
1190  !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) &&
1191  !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1)))))
1192  return nullptr;
1193 
1194  Value *X;
1195  if (!match(Op1, m_ZExt(m_Value(X))) || Op1->getNumUses() > 2)
1196  return nullptr;
1197 
1198  Type *Ty = And.getType();
1199  if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType()))
1200  return nullptr;
1201 
1202  // If we're narrowing a shift, the shift amount must be safe (less than the
1203  // width) in the narrower type. If the shift amount is greater, instsimplify
1204  // usually handles that case, but we can't guarantee/assert it.
1205  Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode();
1206  if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1208  return nullptr;
1209 
1210  // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1211  // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1212  Value *NewC = ConstantExpr::getTrunc(C, X->getType());
1213  Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X)
1214  : Builder.CreateBinOp(Opc, X, NewC);
1215  return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty);
1216 }
1217 
1218 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1219 // here. We should standardize that construct where it is needed or choose some
1220 // other way to ensure that commutated variants of patterns are not missed.
1222  bool Changed = SimplifyAssociativeOrCommutative(I);
1223  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1224 
1225  if (Value *V = SimplifyVectorOp(I))
1226  return replaceInstUsesWith(I, V);
1227 
1228  if (Value *V = SimplifyAndInst(Op0, Op1, SQ.getWithInstruction(&I)))
1229  return replaceInstUsesWith(I, V);
1230 
1231  // See if we can simplify any instructions used by the instruction whose sole
1232  // purpose is to compute bits we don't care about.
1233  if (SimplifyDemandedInstructionBits(I))
1234  return &I;
1235 
1236  // Do this before using distributive laws to catch simple and/or/not patterns.
1237  if (Instruction *Xor = foldAndToXor(I, Builder))
1238  return Xor;
1239 
1240  // (A|B)&(A|C) -> A|(B&C) etc
1241  if (Value *V = SimplifyUsingDistributiveLaws(I))
1242  return replaceInstUsesWith(I, V);
1243 
1244  if (Value *V = SimplifyBSwap(I, Builder))
1245  return replaceInstUsesWith(I, V);
1246 
1247  const APInt *C;
1248  if (match(Op1, m_APInt(C))) {
1249  Value *X, *Y;
1250  if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) &&
1251  C->isOneValue()) {
1252  // (1 << X) & 1 --> zext(X == 0)
1253  // (1 >> X) & 1 --> zext(X == 0)
1254  Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0));
1255  return new ZExtInst(IsZero, I.getType());
1256  }
1257 
1258  const APInt *XorC;
1259  if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
1260  // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
1261  Constant *NewC = ConstantInt::get(I.getType(), *C & *XorC);
1262  Value *And = Builder.CreateAnd(X, Op1);
1263  And->takeName(Op0);
1264  return BinaryOperator::CreateXor(And, NewC);
1265  }
1266 
1267  const APInt *OrC;
1268  if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
1269  // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
1270  // NOTE: This reduces the number of bits set in the & mask, which
1271  // can expose opportunities for store narrowing for scalars.
1272  // NOTE: SimplifyDemandedBits should have already removed bits from C1
1273  // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
1274  // above, but this feels safer.
1275  APInt Together = *C & *OrC;
1276  Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(),
1277  Together ^ *C));
1278  And->takeName(Op0);
1279  return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(),
1280  Together));
1281  }
1282 
1283  // If the mask is only needed on one incoming arm, push the 'and' op up.
1284  if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
1285  match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
1286  APInt NotAndMask(~(*C));
1287  BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
1288  if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
1289  // Not masking anything out for the LHS, move mask to RHS.
1290  // and ({x}or X, Y), C --> {x}or X, (and Y, C)
1291  Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
1292  return BinaryOperator::Create(BinOp, X, NewRHS);
1293  }
1294  if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
1295  // Not masking anything out for the RHS, move mask to LHS.
1296  // and ({x}or X, Y), C --> {x}or (and X, C), Y
1297  Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
1298  return BinaryOperator::Create(BinOp, NewLHS, Y);
1299  }
1300  }
1301 
1302  }
1303 
1304  if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1305  const APInt &AndRHSMask = AndRHS->getValue();
1306 
1307  // Optimize a variety of ((val OP C1) & C2) combinations...
1308  if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1309  // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth
1310  // of X and OP behaves well when given trunc(C1) and X.
1311  switch (Op0I->getOpcode()) {
1312  default:
1313  break;
1314  case Instruction::Xor:
1315  case Instruction::Or:
1316  case Instruction::Mul:
1317  case Instruction::Add:
1318  case Instruction::Sub:
1319  Value *X;
1320  ConstantInt *C1;
1321  if (match(Op0I, m_c_BinOp(m_ZExt(m_Value(X)), m_ConstantInt(C1)))) {
1322  if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) {
1323  auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
1324  Value *BinOp;
1325  Value *Op0LHS = Op0I->getOperand(0);
1326  if (isa<ZExtInst>(Op0LHS))
1327  BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1);
1328  else
1329  BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X);
1330  auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
1331  auto *And = Builder.CreateAnd(BinOp, TruncC2);
1332  return new ZExtInst(And, I.getType());
1333  }
1334  }
1335  }
1336 
1337  if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1338  if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1339  return Res;
1340  }
1341 
1342  // If this is an integer truncation, and if the source is an 'and' with
1343  // immediate, transform it. This frequently occurs for bitfield accesses.
1344  {
1345  Value *X = nullptr; ConstantInt *YC = nullptr;
1346  if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
1347  // Change: and (trunc (and X, YC) to T), C2
1348  // into : and (trunc X to T), trunc(YC) & C2
1349  // This will fold the two constants together, which may allow
1350  // other simplifications.
1351  Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk");
1352  Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
1353  C3 = ConstantExpr::getAnd(C3, AndRHS);
1354  return BinaryOperator::CreateAnd(NewCast, C3);
1355  }
1356  }
1357  }
1358 
1359  if (Instruction *Z = narrowMaskedBinOp(I))
1360  return Z;
1361 
1362  if (isa<Constant>(Op1))
1363  if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
1364  return FoldedLogic;
1365 
1366  if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
1367  return DeMorgan;
1368 
1369  {
1370  Value *A = nullptr, *B = nullptr, *C = nullptr;
1371  // A&(A^B) => A & ~B
1372  {
1373  Value *tmpOp0 = Op0;
1374  Value *tmpOp1 = Op1;
1375  if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_Value(B))))) {
1376  if (A == Op1 || B == Op1 ) {
1377  tmpOp1 = Op0;
1378  tmpOp0 = Op1;
1379  // Simplify below
1380  }
1381  }
1382 
1383  if (match(tmpOp1, m_OneUse(m_Xor(m_Value(A), m_Value(B))))) {
1384  if (B == tmpOp0) {
1385  std::swap(A, B);
1386  }
1387  // Notice that the pattern (A&(~B)) is actually (A&(-1^B)), so if
1388  // A is originally -1 (or a vector of -1 and undefs), then we enter
1389  // an endless loop. By checking that A is non-constant we ensure that
1390  // we will never get to the loop.
1391  if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
1392  return BinaryOperator::CreateAnd(A, Builder.CreateNot(B));
1393  }
1394  }
1395 
1396  // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
1397  if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1398  if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1399  if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
1400  return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
1401 
1402  // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
1403  if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1404  if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1405  if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
1406  return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
1407 
1408  // (A | B) & ((~A) ^ B) -> (A & B)
1409  // (A | B) & (B ^ (~A)) -> (A & B)
1410  // (B | A) & ((~A) ^ B) -> (A & B)
1411  // (B | A) & (B ^ (~A)) -> (A & B)
1412  if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1413  match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1414  return BinaryOperator::CreateAnd(A, B);
1415 
1416  // ((~A) ^ B) & (A | B) -> (A & B)
1417  // ((~A) ^ B) & (B | A) -> (A & B)
1418  // (B ^ (~A)) & (A | B) -> (A & B)
1419  // (B ^ (~A)) & (B | A) -> (A & B)
1420  if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1421  match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1422  return BinaryOperator::CreateAnd(A, B);
1423  }
1424 
1425  {
1426  ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1427  ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1428  if (LHS && RHS)
1429  if (Value *Res = foldAndOfICmps(LHS, RHS, I))
1430  return replaceInstUsesWith(I, Res);
1431 
1432  // TODO: Make this recursive; it's a little tricky because an arbitrary
1433  // number of 'and' instructions might have to be created.
1434  Value *X, *Y;
1435  if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1436  if (auto *Cmp = dyn_cast<ICmpInst>(X))
1437  if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1438  return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1439  if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1440  if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1441  return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1442  }
1443  if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1444  if (auto *Cmp = dyn_cast<ICmpInst>(X))
1445  if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1446  return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1447  if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1448  if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1449  return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1450  }
1451  }
1452 
1453  if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1454  if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1455  if (Value *Res = foldLogicOfFCmps(LHS, RHS, true))
1456  return replaceInstUsesWith(I, Res);
1457 
1458  if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
1459  return CastedAnd;
1460 
1461  // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
1462  Value *A;
1463  if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
1464  A->getType()->isIntOrIntVectorTy(1))
1465  return SelectInst::Create(A, Op1, Constant::getNullValue(I.getType()));
1466  if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
1467  A->getType()->isIntOrIntVectorTy(1))
1468  return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType()));
1469 
1470  return Changed ? &I : nullptr;
1471 }
1472 
1473 /// Given an OR instruction, check to see if this is a bswap idiom. If so,
1474 /// insert the new intrinsic and return it.
1475 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1476  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1477 
1478  // Look through zero extends.
1479  if (Instruction *Ext = dyn_cast<ZExtInst>(Op0))
1480  Op0 = Ext->getOperand(0);
1481 
1482  if (Instruction *Ext = dyn_cast<ZExtInst>(Op1))
1483  Op1 = Ext->getOperand(0);
1484 
1485  // (A | B) | C and A | (B | C) -> bswap if possible.
1486  bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) ||
1487  match(Op1, m_Or(m_Value(), m_Value()));
1488 
1489  // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1490  bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1491  match(Op1, m_LogicalShift(m_Value(), m_Value()));
1492 
1493  // (A & B) | (C & D) -> bswap if possible.
1494  bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) &&
1495  match(Op1, m_And(m_Value(), m_Value()));
1496 
1497  if (!OrOfOrs && !OrOfShifts && !OrOfAnds)
1498  return nullptr;
1499 
1501  if (!recognizeBSwapOrBitReverseIdiom(&I, true, false, Insts))
1502  return nullptr;
1503  Instruction *LastInst = Insts.pop_back_val();
1504  LastInst->removeFromParent();
1505 
1506  for (auto *Inst : Insts)
1507  Worklist.Add(Inst);
1508  return LastInst;
1509 }
1510 
1511 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
1513  unsigned NumElts = C1->getType()->getVectorNumElements();
1514  for (unsigned i = 0; i != NumElts; ++i) {
1515  Constant *EltC1 = C1->getAggregateElement(i);
1516  Constant *EltC2 = C2->getAggregateElement(i);
1517  if (!EltC1 || !EltC2)
1518  return false;
1519 
1520  // One element must be all ones, and the other must be all zeros.
1521  // FIXME: Allow undef elements.
1522  if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
1523  (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
1524  return false;
1525  }
1526  return true;
1527 }
1528 
1529 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
1530 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
1531 /// B, it can be used as the condition operand of a select instruction.
1533  InstCombiner::BuilderTy &Builder) {
1534  // If these are scalars or vectors of i1, A can be used directly.
1535  Type *Ty = A->getType();
1536  if (match(A, m_Not(m_Specific(B))) && Ty->isIntOrIntVectorTy(1))
1537  return A;
1538 
1539  // If A and B are sign-extended, look through the sexts to find the booleans.
1540  Value *Cond;
1541  Value *NotB;
1542  if (match(A, m_SExt(m_Value(Cond))) &&
1543  Cond->getType()->isIntOrIntVectorTy(1) &&
1544  match(B, m_OneUse(m_Not(m_Value(NotB))))) {
1545  NotB = peekThroughBitcast(NotB, true);
1546  if (match(NotB, m_SExt(m_Specific(Cond))))
1547  return Cond;
1548  }
1549 
1550  // All scalar (and most vector) possibilities should be handled now.
1551  // Try more matches that only apply to non-splat constant vectors.
1552  if (!Ty->isVectorTy())
1553  return nullptr;
1554 
1555  // If both operands are constants, see if the constants are inverse bitmasks.
1556  Constant *AC, *BC;
1557  if (match(A, m_Constant(AC)) && match(B, m_Constant(BC)) &&
1558  areInverseVectorBitmasks(AC, BC)) {
1559  return Builder.CreateZExtOrTrunc(AC, CmpInst::makeCmpResultType(Ty));
1560  }
1561 
1562  // If both operands are xor'd with constants using the same sexted boolean
1563  // operand, see if the constants are inverse bitmasks.
1564  if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AC)))) &&
1565  match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BC)))) &&
1566  Cond->getType()->isIntOrIntVectorTy(1) &&
1567  areInverseVectorBitmasks(AC, BC)) {
1569  return Builder.CreateXor(Cond, AC);
1570  }
1571  return nullptr;
1572 }
1573 
1574 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
1575 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
1577  InstCombiner::BuilderTy &Builder) {
1578  // The potential condition of the select may be bitcasted. In that case, look
1579  // through its bitcast and the corresponding bitcast of the 'not' condition.
1580  Type *OrigType = A->getType();
1581  A = peekThroughBitcast(A, true);
1582  B = peekThroughBitcast(B, true);
1583 
1584  if (Value *Cond = getSelectCondition(A, B, Builder)) {
1585  // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
1586  // The bitcasts will either all exist or all not exist. The builder will
1587  // not create unnecessary casts if the types already match.
1588  Value *BitcastC = Builder.CreateBitCast(C, A->getType());
1589  Value *BitcastD = Builder.CreateBitCast(D, A->getType());
1590  Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
1591  return Builder.CreateBitCast(Select, OrigType);
1592  }
1593 
1594  return nullptr;
1595 }
1596 
1597 /// Fold (icmp)|(icmp) if possible.
1598 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1599  Instruction &CxtI) {
1600  // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
1601  // if K1 and K2 are a one-bit mask.
1602  if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, false, CxtI))
1603  return V;
1604 
1605  ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1606 
1607  ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
1608  ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
1609 
1610  // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3)
1611  // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3)
1612  // The original condition actually refers to the following two ranges:
1613  // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3]
1614  // We can fold these two ranges if:
1615  // 1) C1 and C2 is unsigned greater than C3.
1616  // 2) The two ranges are separated.
1617  // 3) C1 ^ C2 is one-bit mask.
1618  // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
1619  // This implies all values in the two ranges differ by exactly one bit.
1620 
1621  if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
1622  PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() &&
1623  LHSC->getType() == RHSC->getType() &&
1624  LHSC->getValue() == (RHSC->getValue())) {
1625 
1626  Value *LAdd = LHS->getOperand(0);
1627  Value *RAdd = RHS->getOperand(0);
1628 
1629  Value *LAddOpnd, *RAddOpnd;
1630  ConstantInt *LAddC, *RAddC;
1631  if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) &&
1632  match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) &&
1633  LAddC->getValue().ugt(LHSC->getValue()) &&
1634  RAddC->getValue().ugt(LHSC->getValue())) {
1635 
1636  APInt DiffC = LAddC->getValue() ^ RAddC->getValue();
1637  if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) {
1638  ConstantInt *MaxAddC = nullptr;
1639  if (LAddC->getValue().ult(RAddC->getValue()))
1640  MaxAddC = RAddC;
1641  else
1642  MaxAddC = LAddC;
1643 
1644  APInt RRangeLow = -RAddC->getValue();
1645  APInt RRangeHigh = RRangeLow + LHSC->getValue();
1646  APInt LRangeLow = -LAddC->getValue();
1647  APInt LRangeHigh = LRangeLow + LHSC->getValue();
1648  APInt LowRangeDiff = RRangeLow ^ LRangeLow;
1649  APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
1650  APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow
1651  : RRangeLow - LRangeLow;
1652 
1653  if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff &&
1654  RangeDiff.ugt(LHSC->getValue())) {
1655  Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
1656 
1657  Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC);
1658  Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC);
1659  return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC);
1660  }
1661  }
1662  }
1663  }
1664 
1665  // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1666  if (PredicatesFoldable(PredL, PredR)) {
1667  if (LHS->getOperand(0) == RHS->getOperand(1) &&
1668  LHS->getOperand(1) == RHS->getOperand(0))
1669  LHS->swapOperands();
1670  if (LHS->getOperand(0) == RHS->getOperand(0) &&
1671  LHS->getOperand(1) == RHS->getOperand(1)) {
1672  Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1673  unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1674  bool isSigned = LHS->isSigned() || RHS->isSigned();
1675  return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
1676  }
1677  }
1678 
1679  // handle (roughly):
1680  // (icmp ne (A & B), C) | (icmp ne (A & D), E)
1681  if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
1682  return V;
1683 
1684  Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
1685  if (LHS->hasOneUse() || RHS->hasOneUse()) {
1686  // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
1687  // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
1688  Value *A = nullptr, *B = nullptr;
1689  if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) {
1690  B = LHS0;
1691  if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1))
1692  A = RHS0;
1693  else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0)
1694  A = RHS->getOperand(1);
1695  }
1696  // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1)
1697  // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1)
1698  else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) {
1699  B = RHS0;
1700  if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1))
1701  A = LHS0;
1702  else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0)
1703  A = LHS->getOperand(1);
1704  }
1705  if (A && B)
1706  return Builder.CreateICmp(
1708  Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
1709  }
1710 
1711  // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
1712  if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
1713  return V;
1714 
1715  // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
1716  if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
1717  return V;
1718 
1719  if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder))
1720  return V;
1721 
1722  // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1723  if (!LHSC || !RHSC)
1724  return nullptr;
1725 
1726  if (LHSC == RHSC && PredL == PredR) {
1727  // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1728  if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
1729  Value *NewOr = Builder.CreateOr(LHS0, RHS0);
1730  return Builder.CreateICmp(PredL, NewOr, LHSC);
1731  }
1732  }
1733 
1734  // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
1735  // iff C2 + CA == C1.
1736  if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) {
1737  ConstantInt *AddC;
1738  if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
1739  if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
1740  return Builder.CreateICmpULE(LHS0, LHSC);
1741  }
1742 
1743  // From here on, we only handle:
1744  // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1745  if (LHS0 != RHS0)
1746  return nullptr;
1747 
1748  // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
1749  if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
1750  PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
1751  PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
1752  PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
1753  return nullptr;
1754 
1755  // We can't fold (ugt x, C) | (sgt x, C2).
1756  if (!PredicatesFoldable(PredL, PredR))
1757  return nullptr;
1758 
1759  // Ensure that the larger constant is on the RHS.
1760  bool ShouldSwap;
1761  if (CmpInst::isSigned(PredL) ||
1762  (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
1763  ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
1764  else
1765  ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
1766 
1767  if (ShouldSwap) {
1768  std::swap(LHS, RHS);
1769  std::swap(LHSC, RHSC);
1770  std::swap(PredL, PredR);
1771  }
1772 
1773  // At this point, we know we have two icmp instructions
1774  // comparing a value against two constants and or'ing the result
1775  // together. Because of the above check, we know that we only have
1776  // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1777  // icmp folding check above), that the two constants are not
1778  // equal.
1779  assert(LHSC != RHSC && "Compares not folded above?");
1780 
1781  switch (PredL) {
1782  default:
1783  llvm_unreachable("Unknown integer condition code!");
1784  case ICmpInst::ICMP_EQ:
1785  switch (PredR) {
1786  default:
1787  llvm_unreachable("Unknown integer condition code!");
1788  case ICmpInst::ICMP_EQ:
1789  // Potential folds for this case should already be handled.
1790  break;
1791  case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1792  case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1793  break;
1794  }
1795  break;
1796  case ICmpInst::ICMP_ULT:
1797  switch (PredR) {
1798  default:
1799  llvm_unreachable("Unknown integer condition code!");
1800  case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1801  break;
1802  case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1803  assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
1804  return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
1805  false, false);
1806  }
1807  break;
1808  case ICmpInst::ICMP_SLT:
1809  switch (PredR) {
1810  default:
1811  llvm_unreachable("Unknown integer condition code!");
1812  case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1813  break;
1814  case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1815  assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
1816  return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
1817  false);
1818  }
1819  break;
1820  }
1821  return nullptr;
1822 }
1823 
1824 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1825 // here. We should standardize that construct where it is needed or choose some
1826 // other way to ensure that commutated variants of patterns are not missed.
1828  bool Changed = SimplifyAssociativeOrCommutative(I);
1829  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1830 
1831  if (Value *V = SimplifyVectorOp(I))
1832  return replaceInstUsesWith(I, V);
1833 
1834  if (Value *V = SimplifyOrInst(Op0, Op1, SQ.getWithInstruction(&I)))
1835  return replaceInstUsesWith(I, V);
1836 
1837  // See if we can simplify any instructions used by the instruction whose sole
1838  // purpose is to compute bits we don't care about.
1839  if (SimplifyDemandedInstructionBits(I))
1840  return &I;
1841 
1842  // Do this before using distributive laws to catch simple and/or/not patterns.
1843  if (Instruction *Xor = foldOrToXor(I, Builder))
1844  return Xor;
1845 
1846  // (A&B)|(A&C) -> A&(B|C) etc
1847  if (Value *V = SimplifyUsingDistributiveLaws(I))
1848  return replaceInstUsesWith(I, V);
1849 
1850  if (Value *V = SimplifyBSwap(I, Builder))
1851  return replaceInstUsesWith(I, V);
1852 
1853  if (isa<Constant>(Op1))
1854  if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
1855  return FoldedLogic;
1856 
1857  // Given an OR instruction, check to see if this is a bswap.
1858  if (Instruction *BSwap = MatchBSwap(I))
1859  return BSwap;
1860 
1861  {
1862  Value *A;
1863  const APInt *C;
1864  // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1865  if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
1866  MaskedValueIsZero(Op1, *C, 0, &I)) {
1867  Value *NOr = Builder.CreateOr(A, Op1);
1868  NOr->takeName(Op0);
1869  return BinaryOperator::CreateXor(NOr,
1870  ConstantInt::get(NOr->getType(), *C));
1871  }
1872 
1873  // Y|(X^C) -> (X|Y)^C iff Y&C == 0
1874  if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
1875  MaskedValueIsZero(Op0, *C, 0, &I)) {
1876  Value *NOr = Builder.CreateOr(A, Op0);
1877  NOr->takeName(Op0);
1878  return BinaryOperator::CreateXor(NOr,
1879  ConstantInt::get(NOr->getType(), *C));
1880  }
1881  }
1882 
1883  Value *A, *B;
1884 
1885  // (A & C)|(B & D)
1886  Value *C = nullptr, *D = nullptr;
1887  if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1888  match(Op1, m_And(m_Value(B), m_Value(D)))) {
1891  if (C1 && C2) { // (A & C1)|(B & C2)
1892  Value *V1 = nullptr, *V2 = nullptr;
1893  if ((C1->getValue() & C2->getValue()).isNullValue()) {
1894  // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
1895  // iff (C1&C2) == 0 and (N&~C1) == 0
1896  if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
1897  ((V1 == B &&
1898  MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N)
1899  (V2 == B &&
1900  MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
1901  return BinaryOperator::CreateAnd(A,
1902  Builder.getInt(C1->getValue()|C2->getValue()));
1903  // Or commutes, try both ways.
1904  if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
1905  ((V1 == A &&
1906  MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N)
1907  (V2 == A &&
1908  MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
1909  return BinaryOperator::CreateAnd(B,
1910  Builder.getInt(C1->getValue()|C2->getValue()));
1911 
1912  // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
1913  // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
1914  ConstantInt *C3 = nullptr, *C4 = nullptr;
1915  if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
1916  (C3->getValue() & ~C1->getValue()).isNullValue() &&
1917  match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
1918  (C4->getValue() & ~C2->getValue()).isNullValue()) {
1919  V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
1920  return BinaryOperator::CreateAnd(V2,
1921  Builder.getInt(C1->getValue()|C2->getValue()));
1922  }
1923  }
1924 
1925  if (C1->getValue() == ~C2->getValue()) {
1926  Value *X;
1927 
1928  // ((X|B)&C1)|(B&C2) -> (X&C1) | B iff C1 == ~C2
1929  if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
1930  return BinaryOperator::CreateOr(Builder.CreateAnd(X, C1), B);
1931  // (A&C2)|((X|A)&C1) -> (X&C2) | A iff C1 == ~C2
1932  if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
1933  return BinaryOperator::CreateOr(Builder.CreateAnd(X, C2), A);
1934 
1935  // ((X^B)&C1)|(B&C2) -> (X&C1) ^ B iff C1 == ~C2
1936  if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
1937  return BinaryOperator::CreateXor(Builder.CreateAnd(X, C1), B);
1938  // (A&C2)|((X^A)&C1) -> (X&C2) ^ A iff C1 == ~C2
1939  if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
1940  return BinaryOperator::CreateXor(Builder.CreateAnd(X, C2), A);
1941  }
1942  }
1943 
1944  // Don't try to form a select if it's unlikely that we'll get rid of at
1945  // least one of the operands. A select is generally more expensive than the
1946  // 'or' that it is replacing.
1947  if (Op0->hasOneUse() || Op1->hasOneUse()) {
1948  // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
1949  if (Value *V = matchSelectFromAndOr(A, C, B, D, Builder))
1950  return replaceInstUsesWith(I, V);
1951  if (Value *V = matchSelectFromAndOr(A, C, D, B, Builder))
1952  return replaceInstUsesWith(I, V);
1953  if (Value *V = matchSelectFromAndOr(C, A, B, D, Builder))
1954  return replaceInstUsesWith(I, V);
1955  if (Value *V = matchSelectFromAndOr(C, A, D, B, Builder))
1956  return replaceInstUsesWith(I, V);
1957  if (Value *V = matchSelectFromAndOr(B, D, A, C, Builder))
1958  return replaceInstUsesWith(I, V);
1959  if (Value *V = matchSelectFromAndOr(B, D, C, A, Builder))
1960  return replaceInstUsesWith(I, V);
1961  if (Value *V = matchSelectFromAndOr(D, B, A, C, Builder))
1962  return replaceInstUsesWith(I, V);
1963  if (Value *V = matchSelectFromAndOr(D, B, C, A, Builder))
1964  return replaceInstUsesWith(I, V);
1965  }
1966  }
1967 
1968  // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
1969  if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1970  if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1971  return BinaryOperator::CreateOr(Op0, C);
1972 
1973  // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
1974  if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1975  if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1976  return BinaryOperator::CreateOr(Op1, C);
1977 
1978  // ((B | C) & A) | B -> B | (A & C)
1979  if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
1980  return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
1981 
1982  if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
1983  return DeMorgan;
1984 
1985  // Canonicalize xor to the RHS.
1986  bool SwappedForXor = false;
1987  if (match(Op0, m_Xor(m_Value(), m_Value()))) {
1988  std::swap(Op0, Op1);
1989  SwappedForXor = true;
1990  }
1991 
1992  // A | ( A ^ B) -> A | B
1993  // A | (~A ^ B) -> A | ~B
1994  // (A & B) | (A ^ B)
1995  if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
1996  if (Op0 == A || Op0 == B)
1997  return BinaryOperator::CreateOr(A, B);
1998 
1999  if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
2000  match(Op0, m_And(m_Specific(B), m_Specific(A))))
2001  return BinaryOperator::CreateOr(A, B);
2002 
2003  if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
2004  Value *Not = Builder.CreateNot(B, B->getName() + ".not");
2005  return BinaryOperator::CreateOr(Not, Op0);
2006  }
2007  if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
2008  Value *Not = Builder.CreateNot(A, A->getName() + ".not");
2009  return BinaryOperator::CreateOr(Not, Op0);
2010  }
2011  }
2012 
2013  // A | ~(A | B) -> A | ~B
2014  // A | ~(A ^ B) -> A | ~B
2015  if (match(Op1, m_Not(m_Value(A))))
2016  if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
2017  if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
2018  Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
2019  B->getOpcode() == Instruction::Xor)) {
2020  Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
2021  B->getOperand(0);
2022  Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
2023  return BinaryOperator::CreateOr(Not, Op0);
2024  }
2025 
2026  if (SwappedForXor)
2027  std::swap(Op0, Op1);
2028 
2029  {
2030  ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2031  ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2032  if (LHS && RHS)
2033  if (Value *Res = foldOrOfICmps(LHS, RHS, I))
2034  return replaceInstUsesWith(I, Res);
2035 
2036  // TODO: Make this recursive; it's a little tricky because an arbitrary
2037  // number of 'or' instructions might have to be created.
2038  Value *X, *Y;
2039  if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2040  if (auto *Cmp = dyn_cast<ICmpInst>(X))
2041  if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2042  return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
2043  if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2044  if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2045  return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
2046  }
2047  if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2048  if (auto *Cmp = dyn_cast<ICmpInst>(X))
2049  if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2050  return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
2051  if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2052  if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2053  return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
2054  }
2055  }
2056 
2057  if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
2058  if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2059  if (Value *Res = foldLogicOfFCmps(LHS, RHS, false))
2060  return replaceInstUsesWith(I, Res);
2061 
2062  if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
2063  return CastedOr;
2064 
2065  // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
2066  if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
2067  A->getType()->isIntOrIntVectorTy(1))
2068  return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
2069  if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
2070  A->getType()->isIntOrIntVectorTy(1))
2071  return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
2072 
2073  // Note: If we've gotten to the point of visiting the outer OR, then the
2074  // inner one couldn't be simplified. If it was a constant, then it won't
2075  // be simplified by a later pass either, so we try swapping the inner/outer
2076  // ORs in the hopes that we'll be able to simplify it this way.
2077  // (X|C) | V --> (X|V) | C
2078  ConstantInt *C1;
2079  if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
2080  match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
2081  Value *Inner = Builder.CreateOr(A, Op1);
2082  Inner->takeName(Op0);
2083  return BinaryOperator::CreateOr(Inner, C1);
2084  }
2085 
2086  // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
2087  // Since this OR statement hasn't been optimized further yet, we hope
2088  // that this transformation will allow the new ORs to be optimized.
2089  {
2090  Value *X = nullptr, *Y = nullptr;
2091  if (Op0->hasOneUse() && Op1->hasOneUse() &&
2092  match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
2093  match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
2094  Value *orTrue = Builder.CreateOr(A, C);
2095  Value *orFalse = Builder.CreateOr(B, D);
2096  return SelectInst::Create(X, orTrue, orFalse);
2097  }
2098  }
2099 
2100  return Changed ? &I : nullptr;
2101 }
2102 
2103 /// A ^ B can be specified using other logic ops in a variety of patterns. We
2104 /// can fold these early and efficiently by morphing an existing instruction.
2106  InstCombiner::BuilderTy &Builder) {
2107  assert(I.getOpcode() == Instruction::Xor);
2108  Value *Op0 = I.getOperand(0);
2109  Value *Op1 = I.getOperand(1);
2110  Value *A, *B;
2111 
2112  // There are 4 commuted variants for each of the basic patterns.
2113 
2114  // (A & B) ^ (A | B) -> A ^ B
2115  // (A & B) ^ (B | A) -> A ^ B
2116  // (A | B) ^ (A & B) -> A ^ B
2117  // (A | B) ^ (B & A) -> A ^ B
2118  if ((match(Op0, m_And(m_Value(A), m_Value(B))) &&
2119  match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) ||
2120  (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2121  match(Op1, m_c_And(m_Specific(A), m_Specific(B))))) {
2122  I.setOperand(0, A);
2123  I.setOperand(1, B);
2124  return &I;
2125  }
2126 
2127  // (A | ~B) ^ (~A | B) -> A ^ B
2128  // (~B | A) ^ (~A | B) -> A ^ B
2129  // (~A | B) ^ (A | ~B) -> A ^ B
2130  // (B | ~A) ^ (A | ~B) -> A ^ B
2131  if ((match(Op0, m_Or(m_Value(A), m_Not(m_Value(B)))) &&
2132  match(Op1, m_c_Or(m_Not(m_Specific(A)), m_Specific(B)))) ||
2133  (match(Op0, m_Or(m_Not(m_Value(A)), m_Value(B))) &&
2134  match(Op1, m_c_Or(m_Specific(A), m_Not(m_Specific(B)))))) {
2135  I.setOperand(0, A);
2136  I.setOperand(1, B);
2137  return &I;
2138  }
2139 
2140  // (A & ~B) ^ (~A & B) -> A ^ B
2141  // (~B & A) ^ (~A & B) -> A ^ B
2142  // (~A & B) ^ (A & ~B) -> A ^ B
2143  // (B & ~A) ^ (A & ~B) -> A ^ B
2144  if ((match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
2145  match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))) ||
2146  (match(Op0, m_And(m_Not(m_Value(A)), m_Value(B))) &&
2147  match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))))) {
2148  I.setOperand(0, A);
2149  I.setOperand(1, B);
2150  return &I;
2151  }
2152 
2153  // For the remaining cases we need to get rid of one of the operands.
2154  if (!Op0->hasOneUse() && !Op1->hasOneUse())
2155  return nullptr;
2156 
2157  // (A | B) ^ ~(A & B) -> ~(A ^ B)
2158  // (A | B) ^ ~(B & A) -> ~(A ^ B)
2159  // (A & B) ^ ~(A | B) -> ~(A ^ B)
2160  // (A & B) ^ ~(B | A) -> ~(A ^ B)
2161  // Complexity sorting ensures the not will be on the right side.
2162  if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2163  match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
2164  (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2165  match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
2166  return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
2167 
2168  return nullptr;
2169 }
2170 
2171 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
2172  if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2173  if (LHS->getOperand(0) == RHS->getOperand(1) &&
2174  LHS->getOperand(1) == RHS->getOperand(0))
2175  LHS->swapOperands();
2176  if (LHS->getOperand(0) == RHS->getOperand(0) &&
2177  LHS->getOperand(1) == RHS->getOperand(1)) {
2178  // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2179  Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2180  unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2181  bool isSigned = LHS->isSigned() || RHS->isSigned();
2182  return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
2183  }
2184  }
2185 
2186  // Instead of trying to imitate the folds for and/or, decompose this 'xor'
2187  // into those logic ops. That is, try to turn this into an and-of-icmps
2188  // because we have many folds for that pattern.
2189  //
2190  // This is based on a truth table definition of xor:
2191  // X ^ Y --> (X | Y) & !(X & Y)
2192  if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
2193  // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
2194  // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
2195  if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
2196  // TODO: Independently handle cases where the 'and' side is a constant.
2197  if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) {
2198  // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS
2199  RHS->setPredicate(RHS->getInversePredicate());
2200  return Builder.CreateAnd(LHS, RHS);
2201  }
2202  if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) {
2203  // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS
2204  LHS->setPredicate(LHS->getInversePredicate());
2205  return Builder.CreateAnd(LHS, RHS);
2206  }
2207  }
2208  }
2209 
2210  return nullptr;
2211 }
2212 
2213 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2214 // here. We should standardize that construct where it is needed or choose some
2215 // other way to ensure that commutated variants of patterns are not missed.
2217  bool Changed = SimplifyAssociativeOrCommutative(I);
2218  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2219 
2220  if (Value *V = SimplifyVectorOp(I))
2221  return replaceInstUsesWith(I, V);
2222 
2223  if (Value *V = SimplifyXorInst(Op0, Op1, SQ.getWithInstruction(&I)))
2224  return replaceInstUsesWith(I, V);
2225 
2226  if (Instruction *NewXor = foldXorToXor(I, Builder))
2227  return NewXor;
2228 
2229  // (A&B)^(A&C) -> A&(B^C) etc
2230  if (Value *V = SimplifyUsingDistributiveLaws(I))
2231  return replaceInstUsesWith(I, V);
2232 
2233  // See if we can simplify any instructions used by the instruction whose sole
2234  // purpose is to compute bits we don't care about.
2235  if (SimplifyDemandedInstructionBits(I))
2236  return &I;
2237 
2238  if (Value *V = SimplifyBSwap(I, Builder))
2239  return replaceInstUsesWith(I, V);
2240 
2241  // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
2242  Value *X, *Y;
2243 
2244  // We must eliminate the and/or (one-use) for these transforms to not increase
2245  // the instruction count.
2246  // ~(~X & Y) --> (X | ~Y)
2247  // ~(Y & ~X) --> (X | ~Y)
2248  if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
2249  Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2250  return BinaryOperator::CreateOr(X, NotY);
2251  }
2252  // ~(~X | Y) --> (X & ~Y)
2253  // ~(Y | ~X) --> (X & ~Y)
2254  if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
2255  Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2256  return BinaryOperator::CreateAnd(X, NotY);
2257  }
2258 
2259  // Is this a 'not' (~) fed by a binary operator?
2260  BinaryOperator *NotVal;
2261  if (match(&I, m_Not(m_BinOp(NotVal)))) {
2262  if (NotVal->getOpcode() == Instruction::And ||
2263  NotVal->getOpcode() == Instruction::Or) {
2264  // Apply DeMorgan's Law when inverts are free:
2265  // ~(X & Y) --> (~X | ~Y)
2266  // ~(X | Y) --> (~X & ~Y)
2267  if (IsFreeToInvert(NotVal->getOperand(0),
2268  NotVal->getOperand(0)->hasOneUse()) &&
2269  IsFreeToInvert(NotVal->getOperand(1),
2270  NotVal->getOperand(1)->hasOneUse())) {
2271  Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs");
2272  Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs");
2273  if (NotVal->getOpcode() == Instruction::And)
2274  return BinaryOperator::CreateOr(NotX, NotY);
2275  return BinaryOperator::CreateAnd(NotX, NotY);
2276  }
2277  }
2278 
2279  // ~(~X >>s Y) --> (X >>s Y)
2280  if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
2281  return BinaryOperator::CreateAShr(X, Y);
2282 
2283  // If we are inverting a right-shifted constant, we may be able to eliminate
2284  // the 'not' by inverting the constant and using the opposite shift type.
2285  // Canonicalization rules ensure that only a negative constant uses 'ashr',
2286  // but we must check that in case that transform has not fired yet.
2287  Constant *C;
2288  if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) &&
2289  match(C, m_Negative())) {
2290  // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
2291  Constant *NotC = ConstantExpr::getNot(C);
2292  return BinaryOperator::CreateLShr(NotC, Y);
2293  }
2294 
2295  if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) &&
2296  match(C, m_NonNegative())) {
2297  // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
2298  Constant *NotC = ConstantExpr::getNot(C);
2299  return BinaryOperator::CreateAShr(NotC, Y);
2300  }
2301  }
2302 
2303  // not (cmp A, B) = !cmp A, B
2304  CmpInst::Predicate Pred;
2305  if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) {
2306  cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred));
2307  return replaceInstUsesWith(I, Op0);
2308  }
2309 
2310  {
2311  const APInt *RHSC;
2312  if (match(Op1, m_APInt(RHSC))) {
2313  Value *X;
2314  const APInt *C;
2315  if (match(Op0, m_Sub(m_APInt(C), m_Value(X)))) {
2316  // ~(c-X) == X-c-1 == X+(-c-1)
2317  if (RHSC->isAllOnesValue()) {
2318  Constant *NewC = ConstantInt::get(I.getType(), -(*C) - 1);
2319  return BinaryOperator::CreateAdd(X, NewC);
2320  }
2321  if (RHSC->isSignMask()) {
2322  // (C - X) ^ signmask -> (C + signmask - X)
2323  Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
2324  return BinaryOperator::CreateSub(NewC, X);
2325  }
2326  } else if (match(Op0, m_Add(m_Value(X), m_APInt(C)))) {
2327  // ~(X-c) --> (-c-1)-X
2328  if (RHSC->isAllOnesValue()) {
2329  Constant *NewC = ConstantInt::get(I.getType(), -(*C) - 1);
2330  return BinaryOperator::CreateSub(NewC, X);
2331  }
2332  if (RHSC->isSignMask()) {
2333  // (X + C) ^ signmask -> (X + C + signmask)
2334  Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
2335  return BinaryOperator::CreateAdd(X, NewC);
2336  }
2337  }
2338 
2339  // (X|C1)^C2 -> X^(C1^C2) iff X&~C1 == 0
2340  if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
2341  MaskedValueIsZero(X, *C, 0, &I)) {
2342  Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC);
2343  Worklist.Add(cast<Instruction>(Op0));
2344  I.setOperand(0, X);
2345  I.setOperand(1, NewC);
2346  return &I;
2347  }
2348  }
2349  }
2350 
2351  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) {
2352  if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2353  if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2354  if (Op0I->getOpcode() == Instruction::LShr) {
2355  // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
2356  // E1 = "X ^ C1"
2357  BinaryOperator *E1;
2358  ConstantInt *C1;
2359  if (Op0I->hasOneUse() &&
2360  (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) &&
2361  E1->getOpcode() == Instruction::Xor &&
2362  (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) {
2363  // fold (C1 >> C2) ^ C3
2364  ConstantInt *C2 = Op0CI, *C3 = RHSC;
2365  APInt FoldConst = C1->getValue().lshr(C2->getValue());
2366  FoldConst ^= C3->getValue();
2367  // Prepare the two operands.
2368  Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2);
2369  Opnd0->takeName(Op0I);
2370  cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
2371  Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
2372 
2373  return BinaryOperator::CreateXor(Opnd0, FoldVal);
2374  }
2375  }
2376  }
2377  }
2378  }
2379 
2380  if (isa<Constant>(Op1))
2381  if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
2382  return FoldedLogic;
2383 
2384  {
2385  Value *A, *B;
2386  if (match(Op1, m_OneUse(m_Or(m_Value(A), m_Value(B))))) {
2387  if (A == Op0) { // A^(A|B) == A^(B|A)
2388  cast<BinaryOperator>(Op1)->swapOperands();
2389  std::swap(A, B);
2390  }
2391  if (B == Op0) { // A^(B|A) == (B|A)^A
2392  I.swapOperands(); // Simplified below.
2393  std::swap(Op0, Op1);
2394  }
2395  } else if (match(Op1, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
2396  if (A == Op0) { // A^(A&B) -> A^(B&A)
2397  cast<BinaryOperator>(Op1)->swapOperands();
2398  std::swap(A, B);
2399  }
2400  if (B == Op0) { // A^(B&A) -> (B&A)^A
2401  I.swapOperands(); // Simplified below.
2402  std::swap(Op0, Op1);
2403  }
2404  }
2405  }
2406 
2407  {
2408  Value *A, *B;
2409  if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B))))) {
2410  if (A == Op1) // (B|A)^B == (A|B)^B
2411  std::swap(A, B);
2412  if (B == Op1) // (A|B)^B == A & ~B
2413  return BinaryOperator::CreateAnd(A, Builder.CreateNot(Op1));
2414  } else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
2415  if (A == Op1) // (A&B)^A -> (B&A)^A
2416  std::swap(A, B);
2417  const APInt *C;
2418  if (B == Op1 && // (B&A)^A == ~B & A
2419  !match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C
2420  return BinaryOperator::CreateAnd(Builder.CreateNot(A), Op1);
2421  }
2422  }
2423  }
2424 
2425  {
2426  Value *A, *B, *C, *D;
2427  // (A ^ C)^(A | B) -> ((~A) & B) ^ C
2428  if (match(Op0, m_Xor(m_Value(D), m_Value(C))) &&
2429  match(Op1, m_Or(m_Value(A), m_Value(B)))) {
2430  if (D == A)
2431  return BinaryOperator::CreateXor(
2432  Builder.CreateAnd(Builder.CreateNot(A), B), C);
2433  if (D == B)
2434  return BinaryOperator::CreateXor(
2435  Builder.CreateAnd(Builder.CreateNot(B), A), C);
2436  }
2437  // (A | B)^(A ^ C) -> ((~A) & B) ^ C
2438  if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2439  match(Op1, m_Xor(m_Value(D), m_Value(C)))) {
2440  if (D == A)
2441  return BinaryOperator::CreateXor(
2442  Builder.CreateAnd(Builder.CreateNot(A), B), C);
2443  if (D == B)
2444  return BinaryOperator::CreateXor(
2445  Builder.CreateAnd(Builder.CreateNot(B), A), C);
2446  }
2447  // (A & B) ^ (A ^ B) -> (A | B)
2448  if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2449  match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2450  return BinaryOperator::CreateOr(A, B);
2451  // (A ^ B) ^ (A & B) -> (A | B)
2452  if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2453  match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
2454  return BinaryOperator::CreateOr(A, B);
2455  }
2456 
2457  // (A & ~B) ^ ~A -> ~(A & B)
2458  // (~B & A) ^ ~A -> ~(A & B)
2459  Value *A, *B;
2460  if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2461  match(Op1, m_Not(m_Specific(A))))
2462  return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
2463 
2464  if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2465  if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2466  if (Value *V = foldXorOfICmps(LHS, RHS))
2467  return replaceInstUsesWith(I, V);
2468 
2469  if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
2470  return CastedXor;
2471 
2472  // Canonicalize the shifty way to code absolute value to the common pattern.
2473  // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
2474  // We're relying on the fact that we only do this transform when the shift has
2475  // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
2476  // instructions).
2477  if (Op0->getNumUses() == 2)
2478  std::swap(Op0, Op1);
2479 
2480  const APInt *ShAmt;
2481  Type *Ty = I.getType();
2482  if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2483  Op1->getNumUses() == 2 && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
2484  match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
2485  // B = ashr i32 A, 31 ; smear the sign bit
2486  // xor (add A, B), B ; add -1 and flip bits if negative
2487  // --> (A < 0) ? -A : A
2488  Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty));
2489  return SelectInst::Create(Cmp, Builder.CreateNeg(A), A);
2490  }
2491 
2492  return Changed ? &I : nullptr;
2493 }
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:615
uint64_t CallInst * C
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
Definition: Constants.h:172
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of nonnegative values.
Definition: PatternMatch.h:355
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:548
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to have exactly one bit set when defined. ...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:72
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1677
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
bool isSignMask() const
Check if the APInt&#39;s value is returned by getSignMask.
Definition: APInt.h:466
Type * getSrcTy() const
Return the source type, as a convenience.
Definition: InstrTypes.h:818
static bool IsFreeToInvert(Value *V, bool WillInvertAllUses)
Return true if the specified value is free to invert (apply ~ to).
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
Definition: PatternMatch.h:80
static Value * getFCmpValue(unsigned Code, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder)
This is the complement of getFCmpCode, which turns an opcode and two operands into either a FCmp inst...
Instruction * visitXor(BinaryOperator &I)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1151
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:555
Instruction * visitOr(BinaryOperator &I)
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:1434
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1095
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BinaryOps getOpcode() const
Definition: InstrTypes.h:523
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1138
bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, Value *&X, APInt &Mask, bool LookThroughTrunc=true)
Decompose an icmp into the form ((X & Mask) pred 0) if possible.
match_zero m_Zero()
Match an arbitrary zero/null constant.
Definition: PatternMatch.h:145
static unsigned getFCmpCode(FCmpInst::Predicate CC)
Similar to getICmpCode but for FCmpInst.
This class represents zero extension of integer types.
unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred=false)
Encode a icmp predicate into a three bit mask.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:567
static bool areInverseVectorBitmasks(Constant *C1, Constant *C2)
If all elements of two constant vectors are 0/-1 and inverses, return true.
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:866
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:641
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:91
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
unsigned less or equal
Definition: InstrTypes.h:879
unsigned less than
Definition: InstrTypes.h:878
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:645
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:859
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:869
bool sgt(const APInt &RHS) const
Signed greather than comparison.
Definition: APInt.h:1253
MaskedICmpType
Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns that can be simplified...
static Value * SimplifyBSwap(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
F(f)
This class represents a sign extension of integer types.
#define R2(n)
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition: APInt.h:1218
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:161
Value * SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
Definition: PatternMatch.h:807
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:232
bool swapOperands()
Exchange the two operands to this instruction.
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:864
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:863
bool isSigned() const
Determine if this instruction is using a signed comparison.
Definition: InstrTypes.h:1022
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:627
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Definition: InstrTypes.h:951
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:560
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
Definition: PatternMatch.h:953
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:707
static Constant * AddOne(Constant *C)
Add one to a Constant.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:860
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1592
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:932
not_match< LHS > m_Not(const LHS &L)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:543
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1606
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1487
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:813
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
Definition: PatternMatch.h:965
This instruction compares its operands according to the predicate given to the constructor.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:83
static Value * foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS, bool JoinedByAnd, InstCombiner::BuilderTy &Builder)
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:138
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:209
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:203
SelectClass_match< Cond, LHS, RHS > m_Select(const Cond &C, const LHS &L, const RHS &R)
Definition: PatternMatch.h:910
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:66
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:301
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:980
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Value * getOperand(unsigned i) const
Definition: User.h:154
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1122
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:302
Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1684
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:63
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:639
static Instruction * foldOrToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
bool isAllOnesValue() const
Determine if all bits are set.
Definition: APInt.h:389
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
Definition: PatternMatch.h:260
static Value * getSelectCondition(Value *A, Value *B, InstCombiner::BuilderTy &Builder)
We have an expression of the form (A & C) | (B & D).
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, ICmpInst::Predicate Pred)
Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) satisfies.
static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth)
Return true if a constant shift amount is always less than the specified bit-width.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
Definition: PatternMatch.h:621
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1164
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.h:1732
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if &#39;V & Mask&#39; is known to be zero.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
static Constant * getAnd(Constant *C1, Constant *C2)
Definition: Constants.cpp:2210
bool isOneValue() const
Determine if this is a value of 1.
Definition: APInt.h:404
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:423
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:436
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:633
This instruction compares its operands according to the predicate given to the constructor.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:862
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:75
static unsigned conjugateICmpMask(unsigned Mask)
Convert an analysis of a masked ICmp into its equivalent if all boolean operations had the opposite s...
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:443
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2145
static Constant * getAllOnesValue(Type *Ty)
Get the all ones value.
Definition: Constants.cpp:286
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:870
static Value * getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder)
This is the complement of getICmpCode, which turns an opcode and two operands into either a constant ...
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
static Instruction * matchDeMorgansLaws(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Match De Morgan&#39;s Laws: (~A & ~B) == (~(A | B)) (~A | ~B) == (~(A & B))
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:868
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:959
signed greater than
Definition: InstrTypes.h:880
Instruction * visitAnd(BinaryOperator &I)
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
Definition: PatternMatch.h:959
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:857
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
Definition: PatternMatch.h:347
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:130
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:867
signed less than
Definition: InstrTypes.h:882
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:383
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1578
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:585
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.cpp:599
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
Type * getDestTy() const
Return the destination type, as a convenience.
Definition: InstrTypes.h:820
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:541
bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2)
Return true if both predicates match sign or if at least one of them is an equality comparison (which...
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:932
void setOperand(unsigned i, Value *Val)
Definition: User.h:159
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
signed less or equal
Definition: InstrTypes.h:883
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1202
bool isPowerOf2() const
Check if this APInt&#39;s value is a power of two greater than zero.
Definition: APInt.h:457
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
void removeFromParent()
This method unlinks &#39;this&#39; from the containing basic block, but does not delete it.
Definition: Instruction.cpp:63
static Instruction * foldAndToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Value * SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
static Value * foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, llvm::InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!= Y)...
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass&#39;s ...
static unsigned getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS, ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR)
Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
bool ugt(const APInt &RHS) const
Unsigned greather than comparison.
Definition: APInt.h:1234
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:927
unsigned getNumUses() const
This method computes the number of uses of this Value.
Definition: Value.cpp:170
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:285
Value * getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, CmpInst::Predicate &NewICmpPred)
This is the complement of getICmpCode, which turns an opcode and two operands into either a constant ...
unsigned greater or equal
Definition: InstrTypes.h:877
match_one m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:194
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:224
bool isEquality() const
Return true if this predicate is either EQ or NE.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
static Constant * getOr(Constant *C1, Constant *C2)
Definition: Constants.cpp:2214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:861
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Value * SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:865
APInt byteSwap() const
Definition: APInt.cpp:618
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1106
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:430
Value * SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
match_all_ones m_AllOnes()
Match an integer or vector with all bits set to true.
Definition: PatternMatch.h:205
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:856
LLVM Value Representation.
Definition: Value.h:73
This file provides internal interfaces used to implement the InstCombine.
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:866
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:418
unsigned greater than
Definition: InstrTypes.h:876
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:967
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
Value * simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted)
Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction *> &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition: Local.cpp:2351
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:858
static Value * matchSelectFromAndOr(Value *A, Value *C, Value *B, Value *D, InstCombiner::BuilderTy &Builder)
We have an expression of the form (A & C) | (B & D).
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
static Instruction * foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, InstCombiner::BuilderTy &Builder)
Fold {and,or,xor} (cast X), C.
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
static Instruction * foldXorToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
A ^ B can be specified using other logic ops in a variety of patterns.
bool isNullValue() const
Determine if all bits are clear.
Definition: APInt.h:399
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:855
signed greater or equal
Definition: InstrTypes.h:881
static Constant * getXor(Constant *C1, Constant *C2)
Definition: Constants.cpp:2218
CallInst * CreateCall(Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1702