LLVM 23.0.0git
InstructionSimplify.cpp
Go to the documentation of this file.
1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements routines for folding instructions into simpler forms
10// that do not require creating new instructions. This does constant folding
11// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14// simplified: This is usually true and assuming it simplifies the logic (if
15// they have not been simplified then results are correct but maybe suboptimal).
16//
17//===----------------------------------------------------------------------===//
18
20
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
31#include "llvm/Analysis/Loads.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/IntrinsicsAArch64.h"
45#include "llvm/IR/Operator.h"
47#include "llvm/IR/Statepoint.h"
50#include <algorithm>
51#include <optional>
52using namespace llvm;
53using namespace llvm::PatternMatch;
54
55#define DEBUG_TYPE "instsimplify"
56
57enum { RecursionLimit = 3 };
58
59STATISTIC(NumExpand, "Number of expansions");
60STATISTIC(NumReassoc, "Number of reassociations");
61
62static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
63 unsigned);
64static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
65static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
66 const SimplifyQuery &, unsigned);
67static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
68 unsigned);
69static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
70 const SimplifyQuery &, unsigned);
72 const SimplifyQuery &, unsigned);
74 const SimplifyQuery &Q, unsigned MaxRecurse);
75static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
76static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
77 unsigned);
78static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
79 unsigned);
81 GEPNoWrapFlags, const SimplifyQuery &, unsigned);
83 const SimplifyQuery &, unsigned);
85 ArrayRef<Value *> NewOps,
86 const SimplifyQuery &SQ,
87 unsigned MaxRecurse);
88
89/// For a boolean type or a vector of boolean type, return false or a vector
90/// with every element false.
91static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
92
93/// For a boolean type or a vector of boolean type, return true or a vector
94/// with every element true.
95static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
96
97/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
98static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS) {
99 CmpInst *Cmp = dyn_cast<CmpInst>(V);
100 if (!Cmp)
101 return false;
102 CmpInst::Predicate CPred = Cmp->getPredicate();
103 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
104 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
105 return true;
106 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
107 CRHS == LHS;
108}
109
110/// Simplify comparison with true or false branch of select:
111/// %sel = select i1 %cond, i32 %tv, i32 %fv
112/// %cmp = icmp sle i32 %sel, %rhs
113/// Compose new comparison by substituting %sel with either %tv or %fv
114/// and see if it simplifies.
116 Value *Cond, const SimplifyQuery &Q,
117 unsigned MaxRecurse, Constant *TrueOrFalse) {
118 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
119 if (SimplifiedCmp == Cond) {
120 // %cmp simplified to the select condition (%cond).
121 return TrueOrFalse;
122 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
123 // It didn't simplify. However, if composed comparison is equivalent
124 // to the select condition (%cond) then we can replace it.
125 return TrueOrFalse;
126 }
127 return SimplifiedCmp;
128}
129
130/// Simplify comparison with true branch of select
132 Value *Cond, const SimplifyQuery &Q,
133 unsigned MaxRecurse) {
134 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
135 getTrue(Cond->getType()));
136}
137
138/// Simplify comparison with false branch of select
140 Value *Cond, const SimplifyQuery &Q,
141 unsigned MaxRecurse) {
142 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
143 getFalse(Cond->getType()));
144}
145
146/// We know comparison with both branches of select can be simplified, but they
147/// are not equal. This routine handles some logical simplifications.
149 Value *Cond,
150 const SimplifyQuery &Q,
151 unsigned MaxRecurse) {
152 // If the false value simplified to false, then the result of the compare
153 // is equal to "Cond && TCmp". This also catches the case when the false
154 // value simplified to false and the true value to true, returning "Cond".
155 // Folding select to and/or isn't poison-safe in general; impliesPoison
156 // checks whether folding it does not convert a well-defined value into
157 // poison.
158 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
159 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
160 return V;
161 // If the true value simplified to true, then the result of the compare
162 // is equal to "Cond || FCmp".
163 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
164 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
165 return V;
166 // Finally, if the false value simplified to true and the true value to
167 // false, then the result of the compare is equal to "!Cond".
168 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
169 if (Value *V = simplifyXorInst(
170 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
171 return V;
172 return nullptr;
173}
174
175/// Does the given value dominate the specified phi node?
176static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
178 if (!I)
179 // Arguments and constants dominate all instructions.
180 return true;
181
182 // If we have a DominatorTree then do a precise test.
183 if (DT)
184 return DT->dominates(I, P);
185
186 // Otherwise, if the instruction is in the entry block and is not an invoke,
187 // then it obviously dominates all phi nodes.
188 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
190 return true;
191
192 return false;
193}
194
195/// Try to simplify a binary operator of form "V op OtherOp" where V is
196/// "(B0 opex B1)" by distributing 'op' across 'opex' as
197/// "(B0 op OtherOp) opex (B1 op OtherOp)".
199 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
200 const SimplifyQuery &Q, unsigned MaxRecurse) {
201 auto *B = dyn_cast<BinaryOperator>(V);
202 if (!B || B->getOpcode() != OpcodeToExpand)
203 return nullptr;
204 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
205 Value *L =
206 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
207 if (!L)
208 return nullptr;
209 Value *R =
210 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
211 if (!R)
212 return nullptr;
213
214 // Does the expanded pair of binops simplify to the existing binop?
215 if ((L == B0 && R == B1) ||
216 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
217 ++NumExpand;
218 return B;
219 }
220
221 // Otherwise, return "L op' R" if it simplifies.
222 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
223 if (!S)
224 return nullptr;
225
226 ++NumExpand;
227 return S;
228}
229
230/// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
231/// distributing op over op'.
233 Value *R,
234 Instruction::BinaryOps OpcodeToExpand,
235 const SimplifyQuery &Q,
236 unsigned MaxRecurse) {
237 // Recursion is always used, so bail out at once if we already hit the limit.
238 if (!MaxRecurse--)
239 return nullptr;
240
241 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
242 return V;
243 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
244 return V;
245 return nullptr;
246}
247
248/// Generic simplifications for associative binary operations.
249/// Returns the simpler value, or null if none was found.
251 Value *LHS, Value *RHS,
252 const SimplifyQuery &Q,
253 unsigned MaxRecurse) {
254 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
255
256 // Recursion is always used, so bail out at once if we already hit the limit.
257 if (!MaxRecurse--)
258 return nullptr;
259
262
263 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
264 if (Op0 && Op0->getOpcode() == Opcode) {
265 Value *A = Op0->getOperand(0);
266 Value *B = Op0->getOperand(1);
267 Value *C = RHS;
268
269 // Does "B op C" simplify?
270 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
271 // It does! Return "A op V" if it simplifies or is already available.
272 // If V equals B then "A op V" is just the LHS.
273 if (V == B)
274 return LHS;
275 // Otherwise return "A op V" if it simplifies.
276 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
277 ++NumReassoc;
278 return W;
279 }
280 }
281 }
282
283 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
284 if (Op1 && Op1->getOpcode() == Opcode) {
285 Value *A = LHS;
286 Value *B = Op1->getOperand(0);
287 Value *C = Op1->getOperand(1);
288
289 // Does "A op B" simplify?
290 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
291 // It does! Return "V op C" if it simplifies or is already available.
292 // If V equals B then "V op C" is just the RHS.
293 if (V == B)
294 return RHS;
295 // Otherwise return "V op C" if it simplifies.
296 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
297 ++NumReassoc;
298 return W;
299 }
300 }
301 }
302
303 // The remaining transforms require commutativity as well as associativity.
304 if (!Instruction::isCommutative(Opcode))
305 return nullptr;
306
307 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
308 if (Op0 && Op0->getOpcode() == Opcode) {
309 Value *A = Op0->getOperand(0);
310 Value *B = Op0->getOperand(1);
311 Value *C = RHS;
312
313 // Does "C op A" simplify?
314 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
315 // It does! Return "V op B" if it simplifies or is already available.
316 // If V equals A then "V op B" is just the LHS.
317 if (V == A)
318 return LHS;
319 // Otherwise return "V op B" if it simplifies.
320 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
321 ++NumReassoc;
322 return W;
323 }
324 }
325 }
326
327 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
328 if (Op1 && Op1->getOpcode() == Opcode) {
329 Value *A = LHS;
330 Value *B = Op1->getOperand(0);
331 Value *C = Op1->getOperand(1);
332
333 // Does "C op A" simplify?
334 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
335 // It does! Return "B op V" if it simplifies or is already available.
336 // If V equals C then "B op V" is just the RHS.
337 if (V == C)
338 return RHS;
339 // Otherwise return "B op V" if it simplifies.
340 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
341 ++NumReassoc;
342 return W;
343 }
344 }
345 }
346
347 return nullptr;
348}
349
350/// In the case of a binary operation with a select instruction as an operand,
351/// try to simplify the binop by seeing whether evaluating it on both branches
352/// of the select results in the same value. Returns the common value if so,
353/// otherwise returns null.
355 Value *RHS, const SimplifyQuery &Q,
356 unsigned MaxRecurse) {
357 // Recursion is always used, so bail out at once if we already hit the limit.
358 if (!MaxRecurse--)
359 return nullptr;
360
361 SelectInst *SI;
362 if (isa<SelectInst>(LHS)) {
364 } else {
365 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
367 }
368
369 // Evaluate the BinOp on the true and false branches of the select.
370 Value *TV;
371 Value *FV;
372 if (SI == LHS) {
373 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
374 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
375 } else {
376 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
377 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
378 }
379
380 // If they simplified to the same value, then return the common value.
381 // If they both failed to simplify then return null.
382 if (TV == FV)
383 return TV;
384
385 // If one branch simplified to undef, return the other one.
386 if (TV && Q.isUndefValue(TV))
387 return FV;
388 if (FV && Q.isUndefValue(FV))
389 return TV;
390
391 // If applying the operation did not change the true and false select values,
392 // then the result of the binop is the select itself.
393 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
394 return SI;
395
396 // If one branch simplified and the other did not, and the simplified
397 // value is equal to the unsimplified one, return the simplified value.
398 // For example, select (cond, X, X & Z) & Z -> X & Z.
399 if ((FV && !TV) || (TV && !FV)) {
400 // Check that the simplified value has the form "X op Y" where "op" is the
401 // same as the original operation.
402 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
403 if (Simplified && Simplified->getOpcode() == unsigned(Opcode) &&
404 !Simplified->hasPoisonGeneratingFlags()) {
405 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
406 // We already know that "op" is the same as for the simplified value. See
407 // if the operands match too. If so, return the simplified value.
408 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
409 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
410 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
411 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
412 Simplified->getOperand(1) == UnsimplifiedRHS)
413 return Simplified;
414 if (Simplified->isCommutative() &&
415 Simplified->getOperand(1) == UnsimplifiedLHS &&
416 Simplified->getOperand(0) == UnsimplifiedRHS)
417 return Simplified;
418 }
419 }
420
421 return nullptr;
422}
423
424/// In the case of a comparison with a select instruction, try to simplify the
425/// comparison by seeing whether both branches of the select result in the same
426/// value. Returns the common value if so, otherwise returns null.
427/// For example, if we have:
428/// %tmp = select i1 %cmp, i32 1, i32 2
429/// %cmp1 = icmp sle i32 %tmp, 3
430/// We can simplify %cmp1 to true, because both branches of select are
431/// less than 3. We compose new comparison by substituting %tmp with both
432/// branches of select and see if it can be simplified.
434 const SimplifyQuery &Q, unsigned MaxRecurse) {
435 // Recursion is always used, so bail out at once if we already hit the limit.
436 if (!MaxRecurse--)
437 return nullptr;
438
439 // Make sure the select is on the LHS.
440 if (!isa<SelectInst>(LHS)) {
441 std::swap(LHS, RHS);
442 Pred = CmpInst::getSwappedPredicate(Pred);
443 }
444 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
446 Value *Cond = SI->getCondition();
447 Value *TV = SI->getTrueValue();
448 Value *FV = SI->getFalseValue();
449
450 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
451 // Does "cmp TV, RHS" simplify?
452 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
453 if (!TCmp)
454 return nullptr;
455
456 // Does "cmp FV, RHS" simplify?
457 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
458 if (!FCmp)
459 return nullptr;
460
461 // If both sides simplified to the same value, then use it as the result of
462 // the original comparison.
463 if (TCmp == FCmp)
464 return TCmp;
465
466 // The remaining cases only make sense if the select condition has the same
467 // type as the result of the comparison, so bail out if this is not so.
468 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
469 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
470
471 return nullptr;
472}
473
474/// In the case of a binary operation with an operand that is a PHI instruction,
475/// try to simplify the binop by seeing whether evaluating it on the incoming
476/// phi values yields the same result for every value. If so returns the common
477/// value, otherwise returns null.
479 Value *RHS, const SimplifyQuery &Q,
480 unsigned MaxRecurse) {
481 // Recursion is always used, so bail out at once if we already hit the limit.
482 if (!MaxRecurse--)
483 return nullptr;
484
485 PHINode *PI;
486 if (isa<PHINode>(LHS)) {
487 PI = cast<PHINode>(LHS);
488 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
489 if (!valueDominatesPHI(RHS, PI, Q.DT))
490 return nullptr;
491 } else {
492 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
493 PI = cast<PHINode>(RHS);
494 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
495 if (!valueDominatesPHI(LHS, PI, Q.DT))
496 return nullptr;
497 }
498
499 // Evaluate the BinOp on the incoming phi values.
500 Value *CommonValue = nullptr;
501 for (Use &Incoming : PI->incoming_values()) {
502 // If the incoming value is the phi node itself, it can safely be skipped.
503 if (Incoming == PI)
504 continue;
506 Value *V = PI == LHS
507 ? simplifyBinOp(Opcode, Incoming, RHS,
508 Q.getWithInstruction(InTI), MaxRecurse)
509 : simplifyBinOp(Opcode, LHS, Incoming,
510 Q.getWithInstruction(InTI), MaxRecurse);
511 // If the operation failed to simplify, or simplified to a different value
512 // to previously, then give up.
513 if (!V || (CommonValue && V != CommonValue))
514 return nullptr;
515 CommonValue = V;
516 }
517
518 return CommonValue;
519}
520
521/// In the case of a comparison with a PHI instruction, try to simplify the
522/// comparison by seeing whether comparing with all of the incoming phi values
523/// yields the same result every time. If so returns the common result,
524/// otherwise returns null.
526 const SimplifyQuery &Q, unsigned MaxRecurse) {
527 // Recursion is always used, so bail out at once if we already hit the limit.
528 if (!MaxRecurse--)
529 return nullptr;
530
531 // Make sure the phi is on the LHS.
532 if (!isa<PHINode>(LHS)) {
533 std::swap(LHS, RHS);
534 Pred = CmpInst::getSwappedPredicate(Pred);
535 }
536 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
538
539 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
540 if (!valueDominatesPHI(RHS, PI, Q.DT))
541 return nullptr;
542
543 // Evaluate the BinOp on the incoming phi values.
544 Value *CommonValue = nullptr;
545 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
548 // If the incoming value is the phi node itself, it can safely be skipped.
549 if (Incoming == PI)
550 continue;
551 // Change the context instruction to the "edge" that flows into the phi.
552 // This is important because that is where incoming is actually "evaluated"
553 // even though it is used later somewhere else.
555 MaxRecurse);
556 // If the operation failed to simplify, or simplified to a different value
557 // to previously, then give up.
558 if (!V || (CommonValue && V != CommonValue))
559 return nullptr;
560 CommonValue = V;
561 }
562
563 return CommonValue;
564}
565
567 Value *&Op0, Value *&Op1,
568 const SimplifyQuery &Q) {
569 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
570 if (auto *CRHS = dyn_cast<Constant>(Op1)) {
571 switch (Opcode) {
572 default:
573 break;
574 case Instruction::FAdd:
575 case Instruction::FSub:
576 case Instruction::FMul:
577 case Instruction::FDiv:
578 case Instruction::FRem:
579 if (Q.CxtI != nullptr)
580 return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
581 }
582 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
583 }
584
585 // Canonicalize the constant to the RHS if this is a commutative operation.
586 if (Instruction::isCommutative(Opcode))
587 std::swap(Op0, Op1);
588 }
589 return nullptr;
590}
591
592/// Given operands for an Add, see if we can fold the result.
593/// If not, this returns null.
594static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
595 const SimplifyQuery &Q, unsigned MaxRecurse) {
596 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
597 return C;
598
599 // X + poison -> poison
600 if (isa<PoisonValue>(Op1))
601 return Op1;
602
603 // X + undef -> undef
604 if (Q.isUndefValue(Op1))
605 return Op1;
606
607 // X + 0 -> X
608 if (match(Op1, m_Zero()))
609 return Op0;
610
611 // If two operands are negative, return 0.
612 if (isKnownNegation(Op0, Op1))
613 return Constant::getNullValue(Op0->getType());
614
615 // X + (Y - X) -> Y
616 // (Y - X) + X -> Y
617 // Eg: X + -X -> 0
618 Value *Y = nullptr;
619 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
620 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
621 return Y;
622
623 // X + ~X -> -1 since ~X = -X-1
624 Type *Ty = Op0->getType();
625 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
626 return Constant::getAllOnesValue(Ty);
627
628 // add nsw/nuw (xor Y, signmask), signmask --> Y
629 // The no-wrapping add guarantees that the top bit will be set by the add.
630 // Therefore, the xor must be clearing the already set sign bit of Y.
631 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
632 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
633 return Y;
634
635 // add nuw %x, -1 -> -1, because %x can only be 0.
636 if (IsNUW && match(Op1, m_AllOnes()))
637 return Op1; // Which is -1.
638
639 /// i1 add -> xor.
640 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
641 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
642 return V;
643
644 // Try some generic simplifications for associative operations.
645 if (Value *V =
646 simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
647 return V;
648
649 // Threading Add over selects and phi nodes is pointless, so don't bother.
650 // Threading over the select in "A + select(cond, B, C)" means evaluating
651 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
652 // only if B and C are equal. If B and C are equal then (since we assume
653 // that operands have already been simplified) "select(cond, B, C)" should
654 // have been simplified to the common value of B and C already. Analysing
655 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
656 // for threading over phi nodes.
657
658 return nullptr;
659}
660
661Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
662 const SimplifyQuery &Query) {
663 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
664}
665
666/// Compute the base pointer and cumulative constant offsets for V.
667///
668/// This strips all constant offsets off of V, leaving it the base pointer, and
669/// accumulates the total constant offset applied in the returned constant.
670/// It returns zero if there are no constant offsets applied.
671///
672/// This is very similar to stripAndAccumulateConstantOffsets(), except it
673/// normalizes the offset bitwidth to the stripped pointer type, not the
674/// original pointer type.
676 assert(V->getType()->isPtrOrPtrVectorTy());
677
678 APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
679 V = V->stripAndAccumulateConstantOffsets(DL, Offset,
680 /*AllowNonInbounds=*/true);
681 // As that strip may trace through `addrspacecast`, need to sext or trunc
682 // the offset calculated.
683 return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
684}
685
686/// Compute the constant difference between two pointer values.
687/// If the difference is not a constant, returns zero.
689 Value *RHS) {
692
693 // If LHS and RHS are not related via constant offsets to the same base
694 // value, there is nothing we can do here.
695 if (LHS != RHS)
696 return nullptr;
697
698 // Otherwise, the difference of LHS - RHS can be computed as:
699 // LHS - RHS
700 // = (LHSOffset + Base) - (RHSOffset + Base)
701 // = LHSOffset - RHSOffset
702 Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
703 if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
704 Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
705 return Res;
706}
707
708/// Test if there is a dominating equivalence condition for the
709/// two operands. If there is, try to reduce the binary operation
710/// between the two operands.
711/// Example: Op0 - Op1 --> 0 when Op0 == Op1
712static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
713 const SimplifyQuery &Q, unsigned MaxRecurse) {
714 // Recursive run it can not get any benefit
715 if (MaxRecurse != RecursionLimit)
716 return nullptr;
717
718 std::optional<bool> Imp =
720 if (Imp && *Imp) {
721 Type *Ty = Op0->getType();
722 switch (Opcode) {
723 case Instruction::Sub:
724 case Instruction::Xor:
725 case Instruction::URem:
726 case Instruction::SRem:
727 return Constant::getNullValue(Ty);
728
729 case Instruction::SDiv:
730 case Instruction::UDiv:
731 return ConstantInt::get(Ty, 1);
732
733 case Instruction::And:
734 case Instruction::Or:
735 // Could be either one - choose Op1 since that's more likely a constant.
736 return Op1;
737 default:
738 break;
739 }
740 }
741 return nullptr;
742}
743
744/// Given operands for a Sub, see if we can fold the result.
745/// If not, this returns null.
746static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
747 const SimplifyQuery &Q, unsigned MaxRecurse) {
748 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
749 return C;
750
751 // X - poison -> poison
752 // poison - X -> poison
753 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
754 return PoisonValue::get(Op0->getType());
755
756 // X - undef -> undef
757 // undef - X -> undef
758 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
759 return UndefValue::get(Op0->getType());
760
761 // X - 0 -> X
762 if (match(Op1, m_Zero()))
763 return Op0;
764
765 // X - X -> 0
766 if (Op0 == Op1)
767 return Constant::getNullValue(Op0->getType());
768
769 // Is this a negation?
770 if (match(Op0, m_Zero())) {
771 // 0 - X -> 0 if the sub is NUW.
772 if (IsNUW)
773 return Constant::getNullValue(Op0->getType());
774
775 KnownBits Known = computeKnownBits(Op1, Q);
776 if (Known.Zero.isMaxSignedValue()) {
777 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
778 // Op1 must be 0 because negating the minimum signed value is undefined.
779 if (IsNSW)
780 return Constant::getNullValue(Op0->getType());
781
782 // 0 - X -> X if X is 0 or the minimum signed value.
783 return Op1;
784 }
785 }
786
787 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
788 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
789 Value *X = nullptr, *Y = nullptr, *Z = Op1;
790 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
791 // See if "V === Y - Z" simplifies.
792 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
793 // It does! Now see if "X + V" simplifies.
794 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
795 // It does, we successfully reassociated!
796 ++NumReassoc;
797 return W;
798 }
799 // See if "V === X - Z" simplifies.
800 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
801 // It does! Now see if "Y + V" simplifies.
802 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
803 // It does, we successfully reassociated!
804 ++NumReassoc;
805 return W;
806 }
807 }
808
809 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
810 // For example, X - (X + 1) -> -1
811 X = Op0;
812 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
813 // See if "V === X - Y" simplifies.
814 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
815 // It does! Now see if "V - Z" simplifies.
816 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
817 // It does, we successfully reassociated!
818 ++NumReassoc;
819 return W;
820 }
821 // See if "V === X - Z" simplifies.
822 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
823 // It does! Now see if "V - Y" simplifies.
824 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
825 // It does, we successfully reassociated!
826 ++NumReassoc;
827 return W;
828 }
829 }
830
831 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
832 // For example, X - (X - Y) -> Y.
833 Z = Op0;
834 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
835 // See if "V === Z - X" simplifies.
836 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
837 // It does! Now see if "V + Y" simplifies.
838 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
839 // It does, we successfully reassociated!
840 ++NumReassoc;
841 return W;
842 }
843
844 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
845 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
846 match(Op1, m_Trunc(m_Value(Y))))
847 if (X->getType() == Y->getType())
848 // See if "V === X - Y" simplifies.
849 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
850 // It does! Now see if "trunc V" simplifies.
851 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
852 Q, MaxRecurse - 1))
853 // It does, return the simplified "trunc V".
854 return W;
855
856 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
857 if (match(Op0, m_PtrToIntOrAddr(m_Value(X))) &&
859 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
860 return ConstantFoldIntegerCast(Result, Op0->getType(), /*IsSigned*/ true,
861 Q.DL);
862 }
863
864 // i1 sub -> xor.
865 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
866 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
867 return V;
868
869 // Threading Sub over selects and phi nodes is pointless, so don't bother.
870 // Threading over the select in "A - select(cond, B, C)" means evaluating
871 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
872 // only if B and C are equal. If B and C are equal then (since we assume
873 // that operands have already been simplified) "select(cond, B, C)" should
874 // have been simplified to the common value of B and C already. Analysing
875 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
876 // for threading over phi nodes.
877
878 if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
879 return V;
880
881 // (sub nuw C_Mask, (xor X, C_Mask)) -> X
882 if (IsNUW) {
883 Value *X;
884 if (match(Op1, m_Xor(m_Value(X), m_Specific(Op0))) &&
885 match(Op0, m_LowBitMask()))
886 return X;
887 }
888
889 return nullptr;
890}
891
892Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
893 const SimplifyQuery &Q) {
894 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
895}
896
897/// Given operands for a Mul, see if we can fold the result.
898/// If not, this returns null.
899static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
900 const SimplifyQuery &Q, unsigned MaxRecurse) {
901 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
902 return C;
903
904 // X * poison -> poison
905 if (isa<PoisonValue>(Op1))
906 return Op1;
907
908 // X * undef -> 0
909 // X * 0 -> 0
910 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
911 return Constant::getNullValue(Op0->getType());
912
913 // X * 1 -> X
914 if (match(Op1, m_One()))
915 return Op0;
916
917 // (X / Y) * Y -> X if the division is exact.
918 Value *X = nullptr;
919 if (Q.IIQ.UseInstrInfo &&
920 (match(Op0,
921 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
922 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
923 return X;
924
925 if (Op0->getType()->isIntOrIntVectorTy(1)) {
926 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
927 // representable). All other cases reduce to 0, so just return 0.
928 if (IsNSW)
929 return ConstantInt::getNullValue(Op0->getType());
930
931 // Treat "mul i1" as "and i1".
932 if (MaxRecurse)
933 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
934 return V;
935 }
936
937 // Try some generic simplifications for associative operations.
938 if (Value *V =
939 simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
940 return V;
941
942 // Mul distributes over Add. Try some generic simplifications based on this.
943 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
944 Instruction::Add, Q, MaxRecurse))
945 return V;
946
947 // If the operation is with the result of a select instruction, check whether
948 // operating on either branch of the select always yields the same value.
949 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
950 if (Value *V =
951 threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
952 return V;
953
954 // If the operation is with the result of a phi instruction, check whether
955 // operating on all incoming values of the phi always yields the same value.
956 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
957 if (Value *V =
958 threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
959 return V;
960
961 return nullptr;
962}
963
964Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
965 const SimplifyQuery &Q) {
966 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
967}
968
969/// Given a predicate and two operands, return true if the comparison is true.
970/// This is a helper for div/rem simplification where we return some other value
971/// when we can prove a relationship between the operands.
973 const SimplifyQuery &Q, unsigned MaxRecurse) {
974 Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
976 return (C && C->isAllOnesValue());
977}
978
979/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
980/// to simplify X % Y to X.
981static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
982 unsigned MaxRecurse, bool IsSigned) {
983 // Recursion is always used, so bail out at once if we already hit the limit.
984 if (!MaxRecurse--)
985 return false;
986
987 if (IsSigned) {
988 // (X srem Y) sdiv Y --> 0
989 if (match(X, m_SRem(m_Value(), m_Specific(Y))))
990 return true;
991
992 // |X| / |Y| --> 0
993 //
994 // We require that 1 operand is a simple constant. That could be extended to
995 // 2 variables if we computed the sign bit for each.
996 //
997 // Make sure that a constant is not the minimum signed value because taking
998 // the abs() of that is undefined.
999 Type *Ty = X->getType();
1000 const APInt *C;
1001 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1002 // Is the variable divisor magnitude always greater than the constant
1003 // dividend magnitude?
1004 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1005 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1006 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1007 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1008 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1009 return true;
1010 }
1011 if (match(Y, m_APInt(C))) {
1012 // Special-case: we can't take the abs() of a minimum signed value. If
1013 // that's the divisor, then all we have to do is prove that the dividend
1014 // is also not the minimum signed value.
1015 if (C->isMinSignedValue())
1016 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1017
1018 // Is the variable dividend magnitude always less than the constant
1019 // divisor magnitude?
1020 // |X| < |C| --> X > -abs(C) and X < abs(C)
1021 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1022 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1023 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1024 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1025 return true;
1026 }
1027 return false;
1028 }
1029
1030 // IsSigned == false.
1031
1032 // Is the unsigned dividend known to be less than a constant divisor?
1033 // TODO: Convert this (and above) to range analysis
1034 // ("computeConstantRangeIncludingKnownBits")?
1035 const APInt *C;
1036 if (match(Y, m_APInt(C)) && computeKnownBits(X, Q).getMaxValue().ult(*C))
1037 return true;
1038
1039 // Try again for any divisor:
1040 // Is the dividend unsigned less than the divisor?
1041 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1042}
1043
1044/// Check for common or similar folds of integer division or integer remainder.
1045/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1047 Value *Op1, const SimplifyQuery &Q,
1048 unsigned MaxRecurse) {
1049 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1050 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1051
1052 Type *Ty = Op0->getType();
1053
1054 // X / undef -> poison
1055 // X % undef -> poison
1056 if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1057 return PoisonValue::get(Ty);
1058
1059 // X / 0 -> poison
1060 // X % 0 -> poison
1061 // We don't need to preserve faults!
1062 if (match(Op1, m_Zero()))
1063 return PoisonValue::get(Ty);
1064
1065 // poison / X -> poison
1066 // poison % X -> poison
1067 if (isa<PoisonValue>(Op0))
1068 return Op0;
1069
1070 // undef / X -> 0
1071 // undef % X -> 0
1072 if (Q.isUndefValue(Op0))
1073 return Constant::getNullValue(Ty);
1074
1075 // 0 / X -> 0
1076 // 0 % X -> 0
1077 if (match(Op0, m_Zero()))
1078 return Constant::getNullValue(Op0->getType());
1079
1080 // X / X -> 1
1081 // X % X -> 0
1082 if (Op0 == Op1)
1083 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1084
1085 KnownBits Known = computeKnownBits(Op1, Q);
1086 // X / 0 -> poison
1087 // X % 0 -> poison
1088 // If the divisor is known to be zero, just return poison. This can happen in
1089 // some cases where its provable indirectly the denominator is zero but it's
1090 // not trivially simplifiable (i.e known zero through a phi node).
1091 if (Known.isZero())
1092 return PoisonValue::get(Ty);
1093
1094 // X / 1 -> X
1095 // X % 1 -> 0
1096 // If the divisor can only be zero or one, we can't have division-by-zero
1097 // or remainder-by-zero, so assume the divisor is 1.
1098 // e.g. 1, zext (i8 X), sdiv X (Y and 1)
1099 if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1100 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1101
1102 // If X * Y does not overflow, then:
1103 // X * Y / Y -> X
1104 // X * Y % Y -> 0
1105 Value *X;
1106 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1108 // The multiplication can't overflow if it is defined not to, or if
1109 // X == A / Y for some A.
1110 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1111 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1112 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1113 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1114 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1115 }
1116 }
1117
1118 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1119 return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1120
1121 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1122 return V;
1123
1124 // If the operation is with the result of a select instruction, check whether
1125 // operating on either branch of the select always yields the same value.
1126 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1127 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1128 return V;
1129
1130 // If the operation is with the result of a phi instruction, check whether
1131 // operating on all incoming values of the phi always yields the same value.
1132 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1133 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1134 return V;
1135
1136 return nullptr;
1137}
1138
1139/// These are simplifications common to SDiv and UDiv.
1141 bool IsExact, const SimplifyQuery &Q,
1142 unsigned MaxRecurse) {
1143 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1144 return C;
1145
1146 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1147 return V;
1148
1149 const APInt *DivC;
1150 if (IsExact && match(Op1, m_APInt(DivC))) {
1151 // If this is an exact divide by a constant, then the dividend (Op0) must
1152 // have at least as many trailing zeros as the divisor to divide evenly. If
1153 // it has less trailing zeros, then the result must be poison.
1154 if (DivC->countr_zero()) {
1155 KnownBits KnownOp0 = computeKnownBits(Op0, Q);
1156 if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1157 return PoisonValue::get(Op0->getType());
1158 }
1159
1160 // udiv exact (mul nsw X, C), C --> X
1161 // sdiv exact (mul nuw X, C), C --> X
1162 // where C is not a power of 2.
1163 Value *X;
1164 if (!DivC->isPowerOf2() &&
1165 (Opcode == Instruction::UDiv
1166 ? match(Op0, m_NSWMul(m_Value(X), m_Specific(Op1)))
1167 : match(Op0, m_NUWMul(m_Value(X), m_Specific(Op1)))))
1168 return X;
1169 }
1170
1171 return nullptr;
1172}
1173
1174/// These are simplifications common to SRem and URem.
1176 const SimplifyQuery &Q, unsigned MaxRecurse) {
1177 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1178 return C;
1179
1180 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1181 return V;
1182
1183 // (X << Y) % X -> 0
1184 if (Q.IIQ.UseInstrInfo) {
1185 if ((Opcode == Instruction::SRem &&
1186 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1187 (Opcode == Instruction::URem &&
1188 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
1189 return Constant::getNullValue(Op0->getType());
1190
1191 const APInt *C0;
1192 if (match(Op1, m_APInt(C0))) {
1193 // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
1194 // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
1195 if (Opcode == Instruction::SRem
1196 ? match(Op0,
1197 m_NSWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1198 return C.srem(*C0).isZero();
1199 })))
1200 : match(Op0,
1201 m_NUWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1202 return C.urem(*C0).isZero();
1203 }))))
1204 return Constant::getNullValue(Op0->getType());
1205 }
1206 }
1207 return nullptr;
1208}
1209
1210/// Given operands for an SDiv, see if we can fold the result.
1211/// If not, this returns null.
1212static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1213 const SimplifyQuery &Q, unsigned MaxRecurse) {
1214 // If two operands are negated and no signed overflow, return -1.
1215 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1216 return Constant::getAllOnesValue(Op0->getType());
1217
1218 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1219}
1220
1221Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1222 const SimplifyQuery &Q) {
1223 return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1224}
1225
1226/// Given operands for a UDiv, see if we can fold the result.
1227/// If not, this returns null.
1228static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1229 const SimplifyQuery &Q, unsigned MaxRecurse) {
1230 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1231}
1232
1233Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1234 const SimplifyQuery &Q) {
1235 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1236}
1237
1238/// Given operands for an SRem, see if we can fold the result.
1239/// If not, this returns null.
1240static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1241 unsigned MaxRecurse) {
1242 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1243 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1244 Value *X;
1245 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1246 return ConstantInt::getNullValue(Op0->getType());
1247
1248 // If the two operands are negated, return 0.
1249 if (isKnownNegation(Op0, Op1))
1250 return ConstantInt::getNullValue(Op0->getType());
1251
1252 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1253}
1254
1256 return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1257}
1258
1259/// Given operands for a URem, see if we can fold the result.
1260/// If not, this returns null.
1261static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1262 unsigned MaxRecurse) {
1263 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1264}
1265
1267 return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1268}
1269
1270/// Returns true if a shift by \c Amount always yields poison.
1271static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1272 Constant *C = dyn_cast<Constant>(Amount);
1273 if (!C)
1274 return false;
1275
1276 // X shift by undef -> poison because it may shift by the bitwidth.
1277 if (Q.isUndefValue(C))
1278 return true;
1279
1280 // Shifting by the bitwidth or more is poison. This covers scalars and
1281 // fixed/scalable vectors with splat constants.
1282 const APInt *AmountC;
1283 if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1284 return true;
1285
1286 // Try harder for fixed-length vectors:
1287 // If all lanes of a vector shift are poison, the whole shift is poison.
1289 for (unsigned I = 0,
1290 E = cast<FixedVectorType>(C->getType())->getNumElements();
1291 I != E; ++I)
1292 if (!isPoisonShift(C->getAggregateElement(I), Q))
1293 return false;
1294 return true;
1295 }
1296
1297 return false;
1298}
1299
1300/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1301/// If not, this returns null.
1303 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1304 unsigned MaxRecurse) {
1305 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1306 return C;
1307
1308 // poison shift by X -> poison
1309 if (isa<PoisonValue>(Op0))
1310 return Op0;
1311
1312 // 0 shift by X -> 0
1313 if (match(Op0, m_Zero()))
1314 return Constant::getNullValue(Op0->getType());
1315
1316 // X shift by 0 -> X
1317 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1318 // would be poison.
1319 Value *X;
1320 if (match(Op1, m_Zero()) ||
1321 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1322 return Op0;
1323
1324 // Fold undefined shifts.
1325 if (isPoisonShift(Op1, Q))
1326 return PoisonValue::get(Op0->getType());
1327
1328 // If the operation is with the result of a select instruction, check whether
1329 // operating on either branch of the select always yields the same value.
1330 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1331 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1332 return V;
1333
1334 // If the operation is with the result of a phi instruction, check whether
1335 // operating on all incoming values of the phi always yields the same value.
1336 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1337 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1338 return V;
1339
1340 // If any bits in the shift amount make that value greater than or equal to
1341 // the number of bits in the type, the shift is undefined.
1342 KnownBits KnownAmt = computeKnownBits(Op1, Q);
1343 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1344 return PoisonValue::get(Op0->getType());
1345
1346 // If all valid bits in the shift amount are known zero, the first operand is
1347 // unchanged.
1348 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1349 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1350 return Op0;
1351
1352 // Check for nsw shl leading to a poison value.
1353 if (IsNSW) {
1354 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1355 KnownBits KnownVal = computeKnownBits(Op0, Q);
1356 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1357
1358 if (KnownVal.Zero.isSignBitSet())
1359 KnownShl.Zero.setSignBit();
1360 if (KnownVal.One.isSignBitSet())
1361 KnownShl.One.setSignBit();
1362
1363 if (KnownShl.hasConflict())
1364 return PoisonValue::get(Op0->getType());
1365 }
1366
1367 return nullptr;
1368}
1369
1370/// Given operands for an LShr or AShr, see if we can fold the result. If not,
1371/// this returns null.
1373 Value *Op1, bool IsExact,
1374 const SimplifyQuery &Q, unsigned MaxRecurse) {
1375 if (Value *V =
1376 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1377 return V;
1378
1379 // X >> X -> 0
1380 if (Op0 == Op1)
1381 return Constant::getNullValue(Op0->getType());
1382
1383 // undef >> X -> 0
1384 // undef >> X -> undef (if it's exact)
1385 if (Q.isUndefValue(Op0))
1386 return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1387
1388 // The low bit cannot be shifted out of an exact shift if it is set.
1389 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1390 if (IsExact) {
1391 KnownBits Op0Known = computeKnownBits(Op0, Q);
1392 if (Op0Known.One[0])
1393 return Op0;
1394 }
1395
1396 return nullptr;
1397}
1398
1399/// Given operands for an Shl, see if we can fold the result.
1400/// If not, this returns null.
1401static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1402 const SimplifyQuery &Q, unsigned MaxRecurse) {
1403 if (Value *V =
1404 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1405 return V;
1406
1407 Type *Ty = Op0->getType();
1408 // undef << X -> 0
1409 // undef << X -> undef if (if it's NSW/NUW)
1410 if (Q.isUndefValue(Op0))
1411 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1412
1413 // (X >> A) << A -> X
1414 Value *X;
1415 if (Q.IIQ.UseInstrInfo &&
1416 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1417 return X;
1418
1419 // shl nuw i8 C, %x -> C iff C has sign bit set.
1420 if (IsNUW && match(Op0, m_Negative()))
1421 return Op0;
1422 // NOTE: could use computeKnownBits() / LazyValueInfo,
1423 // but the cost-benefit analysis suggests it isn't worth it.
1424
1425 // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1426 // that the sign-bit does not change, so the only input that does not
1427 // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1428 if (IsNSW && IsNUW &&
1429 match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1430 return Constant::getNullValue(Ty);
1431
1432 return nullptr;
1433}
1434
1435Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1436 const SimplifyQuery &Q) {
1437 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1438}
1439
1440/// Given operands for an LShr, see if we can fold the result.
1441/// If not, this returns null.
1442static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1443 const SimplifyQuery &Q, unsigned MaxRecurse) {
1444 if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1445 MaxRecurse))
1446 return V;
1447
1448 // (X << A) >> A -> X
1449 Value *X;
1450 if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1451 return X;
1452
1453 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1454 // We can return X as we do in the above case since OR alters no bits in X.
1455 // SimplifyDemandedBits in InstCombine can do more general optimization for
1456 // bit manipulation. This pattern aims to provide opportunities for other
1457 // optimizers by supporting a simple but common case in InstSimplify.
1458 Value *Y;
1459 const APInt *ShRAmt, *ShLAmt;
1460 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
1461 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1462 *ShRAmt == *ShLAmt) {
1463 const KnownBits YKnown = computeKnownBits(Y, Q);
1464 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1465 if (ShRAmt->uge(EffWidthY))
1466 return X;
1467 }
1468
1469 return nullptr;
1470}
1471
1472Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1473 const SimplifyQuery &Q) {
1474 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1475}
1476
1477/// Given operands for an AShr, see if we can fold the result.
1478/// If not, this returns null.
1479static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1480 const SimplifyQuery &Q, unsigned MaxRecurse) {
1481 if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1482 MaxRecurse))
1483 return V;
1484
1485 // -1 >>a X --> -1
1486 // (-1 << X) a>> X --> -1
1487 // We could return the original -1 constant to preserve poison elements.
1488 if (match(Op0, m_AllOnes()) ||
1489 match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1490 return Constant::getAllOnesValue(Op0->getType());
1491
1492 // (X << A) >> A -> X
1493 Value *X;
1494 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1495 return X;
1496
1497 // Arithmetic shifting an all-sign-bit value is a no-op.
1498 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, Q.AC, Q.CxtI, Q.DT);
1499 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1500 return Op0;
1501
1502 return nullptr;
1503}
1504
1505Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1506 const SimplifyQuery &Q) {
1507 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1508}
1509
1510/// Commuted variants are assumed to be handled by calling this function again
1511/// with the parameters swapped.
1513 ICmpInst *UnsignedICmp, bool IsAnd,
1514 const SimplifyQuery &Q) {
1515 Value *X, *Y;
1516
1517 CmpPredicate EqPred;
1518 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1519 !ICmpInst::isEquality(EqPred))
1520 return nullptr;
1521
1522 CmpPredicate UnsignedPred;
1523
1524 Value *A, *B;
1525 // Y = (A - B);
1526 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1527 if (match(UnsignedICmp,
1528 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1529 ICmpInst::isUnsigned(UnsignedPred)) {
1530 // A >=/<= B || (A - B) != 0 <--> true
1531 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1532 UnsignedPred == ICmpInst::ICMP_ULE) &&
1533 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1534 return ConstantInt::getTrue(UnsignedICmp->getType());
1535 // A </> B && (A - B) == 0 <--> false
1536 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1537 UnsignedPred == ICmpInst::ICMP_UGT) &&
1538 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1539 return ConstantInt::getFalse(UnsignedICmp->getType());
1540
1541 // A </> B && (A - B) != 0 <--> A </> B
1542 // A </> B || (A - B) != 0 <--> (A - B) != 0
1543 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1544 UnsignedPred == ICmpInst::ICMP_UGT))
1545 return IsAnd ? UnsignedICmp : ZeroICmp;
1546
1547 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1548 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1549 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1550 UnsignedPred == ICmpInst::ICMP_UGE))
1551 return IsAnd ? ZeroICmp : UnsignedICmp;
1552 }
1553
1554 // Given Y = (A - B)
1555 // Y >= A && Y != 0 --> Y >= A iff B != 0
1556 // Y < A || Y == 0 --> Y < A iff B != 0
1557 if (match(UnsignedICmp,
1558 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1559 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1560 EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, Q))
1561 return UnsignedICmp;
1562 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1563 EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, Q))
1564 return UnsignedICmp;
1565 }
1566 }
1567
1568 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1569 ICmpInst::isUnsigned(UnsignedPred))
1570 ;
1571 else if (match(UnsignedICmp,
1572 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1573 ICmpInst::isUnsigned(UnsignedPred))
1574 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1575 else
1576 return nullptr;
1577
1578 // X > Y && Y == 0 --> Y == 0 iff X != 0
1579 // X > Y || Y == 0 --> X > Y iff X != 0
1580 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1581 isKnownNonZero(X, Q))
1582 return IsAnd ? ZeroICmp : UnsignedICmp;
1583
1584 // X <= Y && Y != 0 --> X <= Y iff X != 0
1585 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1586 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1587 isKnownNonZero(X, Q))
1588 return IsAnd ? UnsignedICmp : ZeroICmp;
1589
1590 // The transforms below here are expected to be handled more generally with
1591 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1592 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1593 // these are candidates for removal.
1594
1595 // X < Y && Y != 0 --> X < Y
1596 // X < Y || Y != 0 --> Y != 0
1597 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1598 return IsAnd ? UnsignedICmp : ZeroICmp;
1599
1600 // X >= Y && Y == 0 --> Y == 0
1601 // X >= Y || Y == 0 --> X >= Y
1602 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1603 return IsAnd ? ZeroICmp : UnsignedICmp;
1604
1605 // X < Y && Y == 0 --> false
1606 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1607 IsAnd)
1608 return getFalse(UnsignedICmp->getType());
1609
1610 // X >= Y || Y != 0 --> true
1611 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1612 !IsAnd)
1613 return getTrue(UnsignedICmp->getType());
1614
1615 return nullptr;
1616}
1617
1618/// Test if a pair of compares with a shared operand and 2 constants has an
1619/// empty set intersection, full set union, or if one compare is a superset of
1620/// the other.
1622 bool IsAnd) {
1623 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1624 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1625 return nullptr;
1626
1627 const APInt *C0, *C1;
1628 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1629 !match(Cmp1->getOperand(1), m_APInt(C1)))
1630 return nullptr;
1631
1632 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1633 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1634
1635 // For and-of-compares, check if the intersection is empty:
1636 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1637 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1638 return getFalse(Cmp0->getType());
1639
1640 // For or-of-compares, check if the union is full:
1641 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1642 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1643 return getTrue(Cmp0->getType());
1644
1645 // Is one range a superset of the other?
1646 // If this is and-of-compares, take the smaller set:
1647 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1648 // If this is or-of-compares, take the larger set:
1649 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1650 if (Range0.contains(Range1))
1651 return IsAnd ? Cmp1 : Cmp0;
1652 if (Range1.contains(Range0))
1653 return IsAnd ? Cmp0 : Cmp1;
1654
1655 return nullptr;
1656}
1657
1659 const InstrInfoQuery &IIQ) {
1660 // (icmp (add V, C0), C1) & (icmp V, C0)
1661 CmpPredicate Pred0, Pred1;
1662 const APInt *C0, *C1;
1663 Value *V;
1664 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1665 return nullptr;
1666
1667 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1668 return nullptr;
1669
1670 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1671 if (AddInst->getOperand(1) != Op1->getOperand(1))
1672 return nullptr;
1673
1674 Type *ITy = Op0->getType();
1675 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1676 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1677
1678 const APInt Delta = *C1 - *C0;
1679 if (C0->isStrictlyPositive()) {
1680 if (Delta == 2) {
1681 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1682 return getFalse(ITy);
1683 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1684 return getFalse(ITy);
1685 }
1686 if (Delta == 1) {
1687 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1688 return getFalse(ITy);
1689 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1690 return getFalse(ITy);
1691 }
1692 }
1693 if (C0->getBoolValue() && IsNUW) {
1694 if (Delta == 2)
1695 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1696 return getFalse(ITy);
1697 if (Delta == 1)
1698 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1699 return getFalse(ITy);
1700 }
1701
1702 return nullptr;
1703}
1704
1705/// Try to simplify and/or of icmp with ctpop intrinsic.
1707 bool IsAnd) {
1708 CmpPredicate Pred0, Pred1;
1709 Value *X;
1710 const APInt *C;
1712 m_APInt(C))) ||
1713 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1714 return nullptr;
1715
1716 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1717 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1718 return Cmp1;
1719 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1720 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1721 return Cmp1;
1722
1723 return nullptr;
1724}
1725
1727 const SimplifyQuery &Q) {
1728 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1729 return X;
1730 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1731 return X;
1732
1733 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1734 return X;
1735
1736 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1737 return X;
1738 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1739 return X;
1740
1741 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1742 return X;
1743 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1744 return X;
1745
1746 return nullptr;
1747}
1748
1750 const InstrInfoQuery &IIQ) {
1751 // (icmp (add V, C0), C1) | (icmp V, C0)
1752 CmpPredicate Pred0, Pred1;
1753 const APInt *C0, *C1;
1754 Value *V;
1755 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1756 return nullptr;
1757
1758 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1759 return nullptr;
1760
1761 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1762 if (AddInst->getOperand(1) != Op1->getOperand(1))
1763 return nullptr;
1764
1765 Type *ITy = Op0->getType();
1766 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1767 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1768
1769 const APInt Delta = *C1 - *C0;
1770 if (C0->isStrictlyPositive()) {
1771 if (Delta == 2) {
1772 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1773 return getTrue(ITy);
1774 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1775 return getTrue(ITy);
1776 }
1777 if (Delta == 1) {
1778 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1779 return getTrue(ITy);
1780 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1781 return getTrue(ITy);
1782 }
1783 }
1784 if (C0->getBoolValue() && IsNUW) {
1785 if (Delta == 2)
1786 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1787 return getTrue(ITy);
1788 if (Delta == 1)
1789 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1790 return getTrue(ITy);
1791 }
1792
1793 return nullptr;
1794}
1795
1797 const SimplifyQuery &Q) {
1798 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1799 return X;
1800 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1801 return X;
1802
1803 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1804 return X;
1805
1806 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1807 return X;
1808 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1809 return X;
1810
1811 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1812 return X;
1813 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1814 return X;
1815
1816 return nullptr;
1817}
1818
1819/// Test if a pair of compares with a shared operand and 2 constants has an
1820/// empty set intersection, full set union, or if one compare is a superset of
1821/// the other.
1823 bool IsAnd) {
1824 // Look for this pattern: {and/or} (fcmp X, C0), (fcmp X, C1)).
1825 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1826 return nullptr;
1827
1828 const APFloat *C0, *C1;
1829 if (!match(Cmp0->getOperand(1), m_APFloat(C0)) ||
1830 !match(Cmp1->getOperand(1), m_APFloat(C1)))
1831 return nullptr;
1832
1834 IsAnd ? Cmp0->getPredicate() : Cmp0->getInversePredicate(), *C0);
1836 IsAnd ? Cmp1->getPredicate() : Cmp1->getInversePredicate(), *C1);
1837
1838 if (!Range0 || !Range1)
1839 return nullptr;
1840
1841 // For and-of-compares, check if the intersection is empty:
1842 // (fcmp X, C0) && (fcmp X, C1) --> empty set --> false
1843 if (Range0->intersectWith(*Range1).isEmptySet())
1844 return ConstantInt::getBool(Cmp0->getType(), !IsAnd);
1845
1846 // Is one range a superset of the other?
1847 // If this is and-of-compares, take the smaller set:
1848 // (fcmp ogt X, 4) && (fcmp ogt X, 42) --> fcmp ogt X, 42
1849 // If this is or-of-compares, take the larger set:
1850 // (fcmp ogt X, 4) || (fcmp ogt X, 42) --> fcmp ogt X, 4
1851 if (Range0->contains(*Range1))
1852 return Cmp1;
1853 if (Range1->contains(*Range0))
1854 return Cmp0;
1855
1856 return nullptr;
1857}
1858
1860 FCmpInst *RHS, bool IsAnd) {
1861 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1862 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1863 if (LHS0->getType() != RHS0->getType())
1864 return nullptr;
1865
1866 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1867 auto AbsOrSelfLHS0 = m_CombineOr(m_Specific(LHS0), m_FAbs(m_Specific(LHS0)));
1868 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1869 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1870 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1871 // (fcmp ord X, 0) & (fcmp o** X/abs(X), Y) --> fcmp o** X/abs(X), Y
1872 // (fcmp uno X, 0) & (fcmp o** X/abs(X), Y) --> false
1873 // (fcmp uno X, 0) | (fcmp u** X/abs(X), Y) --> fcmp u** X/abs(X), Y
1874 // (fcmp ord X, 0) | (fcmp u** X/abs(X), Y) --> true
1875 if ((match(RHS0, AbsOrSelfLHS0) || match(RHS1, AbsOrSelfLHS0)) &&
1876 match(LHS1, m_PosZeroFP()))
1877 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1878 ? static_cast<Value *>(RHS)
1879 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1880 }
1881
1882 auto AbsOrSelfRHS0 = m_CombineOr(m_Specific(RHS0), m_FAbs(m_Specific(RHS0)));
1883 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1884 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1885 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1886 // (fcmp o** X/abs(X), Y) & (fcmp ord X, 0) --> fcmp o** X/abs(X), Y
1887 // (fcmp o** X/abs(X), Y) & (fcmp uno X, 0) --> false
1888 // (fcmp u** X/abs(X), Y) | (fcmp uno X, 0) --> fcmp u** X/abs(X), Y
1889 // (fcmp u** X/abs(X), Y) | (fcmp ord X, 0) --> true
1890 if ((match(LHS0, AbsOrSelfRHS0) || match(LHS1, AbsOrSelfRHS0)) &&
1891 match(RHS1, m_PosZeroFP()))
1892 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1893 ? static_cast<Value *>(LHS)
1894 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1895 }
1896
1897 if (auto *V = simplifyAndOrOfFCmpsWithConstants(LHS, RHS, IsAnd))
1898 return V;
1899
1900 return nullptr;
1901}
1902
1904 Value *Op1, bool IsAnd) {
1905 // Look through casts of the 'and' operands to find compares.
1906 auto *Cast0 = dyn_cast<CastInst>(Op0);
1907 auto *Cast1 = dyn_cast<CastInst>(Op1);
1908 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1909 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1910 Op0 = Cast0->getOperand(0);
1911 Op1 = Cast1->getOperand(0);
1912 }
1913
1914 Value *V = nullptr;
1915 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1916 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1917 if (ICmp0 && ICmp1)
1918 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1919 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1920
1921 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1922 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1923 if (FCmp0 && FCmp1)
1924 V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1925
1926 if (!V)
1927 return nullptr;
1928 if (!Cast0)
1929 return V;
1930
1931 // If we looked through casts, we can only handle a constant simplification
1932 // because we are not allowed to create a cast instruction here.
1933 if (auto *C = dyn_cast<Constant>(V))
1934 return ConstantFoldCastOperand(Cast0->getOpcode(), C, Cast0->getType(),
1935 Q.DL);
1936
1937 return nullptr;
1938}
1939
1940static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1941 const SimplifyQuery &Q,
1942 bool AllowRefinement,
1944 unsigned MaxRecurse);
1945
1946static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1947 const SimplifyQuery &Q,
1948 unsigned MaxRecurse) {
1949 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1950 "Must be and/or");
1951 CmpPredicate Pred;
1952 Value *A, *B;
1953 if (Op0->getType()->isIntOrIntVectorTy(1) &&
1954 match(Op0, m_NUWTrunc(m_Value(A)))) {
1955 B = ConstantInt::getNullValue(A->getType());
1956 Pred = ICmpInst::ICMP_NE;
1957 } else if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
1958 !ICmpInst::isEquality(Pred))
1959 return nullptr;
1960
1961 auto Simplify = [&](Value *Res) -> Value * {
1962 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1963
1964 // and (icmp eq a, b), x implies (a==b) inside x.
1965 // or (icmp ne a, b), x implies (a==b) inside x.
1966 // If x simplifies to true/false, we can simplify the and/or.
1967 if (Pred ==
1968 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1969 if (Res == Absorber)
1970 return Absorber;
1971 if (Res == ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1972 return Op0;
1973 return nullptr;
1974 }
1975
1976 // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1977 // then we can drop the icmp, as x will already be false in the case where
1978 // the icmp is false. Similar for or and true.
1979 if (Res == Absorber)
1980 return Op1;
1981 return nullptr;
1982 };
1983
1984 // In the final case (Res == Absorber with inverted predicate), it is safe to
1985 // refine poison during simplification, but not undef. For simplicity always
1986 // disable undef-based folds here.
1987 if (Value *Res = simplifyWithOpReplaced(Op1, A, B, Q.getWithoutUndef(),
1988 /* AllowRefinement */ true,
1989 /* DropFlags */ nullptr, MaxRecurse))
1990 return Simplify(Res);
1991 if (Value *Res = simplifyWithOpReplaced(Op1, B, A, Q.getWithoutUndef(),
1992 /* AllowRefinement */ true,
1993 /* DropFlags */ nullptr, MaxRecurse))
1994 return Simplify(Res);
1995
1996 return nullptr;
1997}
1998
1999/// Given a bitwise logic op, check if the operands are add/sub with a common
2000/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
2002 Instruction::BinaryOps Opcode) {
2003 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
2004 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
2005 Value *X;
2006 Constant *C1, *C2;
2007 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
2008 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
2009 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
2010 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
2011 if (ConstantExpr::getNot(C1) == C2) {
2012 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2013 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2014 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2015 Type *Ty = Op0->getType();
2016 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2018 }
2019 }
2020 return nullptr;
2021}
2022
2023// Commutative patterns for and that will be tried with both operand orders.
2025 const SimplifyQuery &Q,
2026 unsigned MaxRecurse) {
2027 // ~A & A = 0
2028 if (match(Op0, m_Not(m_Specific(Op1))))
2029 return Constant::getNullValue(Op0->getType());
2030
2031 // (A | ?) & A = A
2032 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2033 return Op1;
2034
2035 // (X | ~Y) & (X | Y) --> X
2036 Value *X, *Y;
2037 if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2038 match(Op1, m_c_Or(m_Specific(X), m_Specific(Y))))
2039 return X;
2040
2041 // If we have a multiplication overflow check that is being 'and'ed with a
2042 // check that one of the multipliers is not zero, we can omit the 'and', and
2043 // only keep the overflow check.
2044 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2045 return Op1;
2046
2047 // -A & A = A if A is a power of two or zero.
2048 if (match(Op0, m_Neg(m_Specific(Op1))) &&
2049 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
2050 return Op1;
2051
2052 // This is a similar pattern used for checking if a value is a power-of-2:
2053 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2054 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2055 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
2056 return Constant::getNullValue(Op1->getType());
2057
2058 // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2059 // M <= N.
2060 const APInt *Shift1, *Shift2;
2061 if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
2062 match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
2063 isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI) &&
2064 Shift1->uge(*Shift2))
2065 return Constant::getNullValue(Op0->getType());
2066
2067 if (Value *V =
2068 simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2069 return V;
2070
2071 return nullptr;
2072}
2073
2074/// Given operands for an And, see if we can fold the result.
2075/// If not, this returns null.
2076static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2077 unsigned MaxRecurse) {
2078 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2079 return C;
2080
2081 // X & poison -> poison
2082 if (isa<PoisonValue>(Op1))
2083 return Op1;
2084
2085 // X & undef -> 0
2086 if (Q.isUndefValue(Op1))
2087 return Constant::getNullValue(Op0->getType());
2088
2089 // X & X = X
2090 if (Op0 == Op1)
2091 return Op0;
2092
2093 // X & 0 = 0
2094 if (match(Op1, m_Zero()))
2095 return Constant::getNullValue(Op0->getType());
2096
2097 // X & -1 = X
2098 if (match(Op1, m_AllOnes()))
2099 return Op0;
2100
2101 if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2102 return Res;
2103 if (Value *Res = simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2104 return Res;
2105
2106 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2107 return V;
2108
2109 // A mask that only clears known zeros of a shifted value is a no-op.
2110 const APInt *Mask;
2111 const APInt *ShAmt;
2112 Value *X, *Y;
2113 if (match(Op1, m_APInt(Mask))) {
2114 // If all bits in the inverted and shifted mask are clear:
2115 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2116 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2117 (~(*Mask)).lshr(*ShAmt).isZero())
2118 return Op0;
2119
2120 // If all bits in the inverted and shifted mask are clear:
2121 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2122 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2123 (~(*Mask)).shl(*ShAmt).isZero())
2124 return Op0;
2125 }
2126
2127 // and 2^x-1, 2^C --> 0 where x <= C.
2128 const APInt *PowerC;
2129 Value *Shift;
2130 if (match(Op1, m_Power2(PowerC)) &&
2131 match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
2132 isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI,
2133 Q.DT)) {
2134 KnownBits Known = computeKnownBits(Shift, Q);
2135 // Use getActiveBits() to make use of the additional power of two knowledge
2136 if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2137 return ConstantInt::getNullValue(Op1->getType());
2138 }
2139
2140 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2141 return V;
2142
2143 // Try some generic simplifications for associative operations.
2144 if (Value *V =
2145 simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2146 return V;
2147
2148 // And distributes over Or. Try some generic simplifications based on this.
2149 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2150 Instruction::Or, Q, MaxRecurse))
2151 return V;
2152
2153 // And distributes over Xor. Try some generic simplifications based on this.
2154 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2155 Instruction::Xor, Q, MaxRecurse))
2156 return V;
2157
2158 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2159 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2160 // A & (A && B) -> A && B
2161 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2162 return Op1;
2163 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2164 return Op0;
2165 }
2166 // If the operation is with the result of a select instruction, check
2167 // whether operating on either branch of the select always yields the same
2168 // value.
2169 if (Value *V =
2170 threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2171 return V;
2172 }
2173
2174 // If the operation is with the result of a phi instruction, check whether
2175 // operating on all incoming values of the phi always yields the same value.
2176 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2177 if (Value *V =
2178 threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2179 return V;
2180
2181 // Assuming the effective width of Y is not larger than A, i.e. all bits
2182 // from X and Y are disjoint in (X << A) | Y,
2183 // if the mask of this AND op covers all bits of X or Y, while it covers
2184 // no bits from the other, we can bypass this AND op. E.g.,
2185 // ((X << A) | Y) & Mask -> Y,
2186 // if Mask = ((1 << effective_width_of(Y)) - 1)
2187 // ((X << A) | Y) & Mask -> X << A,
2188 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2189 // SimplifyDemandedBits in InstCombine can optimize the general case.
2190 // This pattern aims to help other passes for a common case.
2191 Value *XShifted;
2192 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(Mask)) &&
2194 m_Value(XShifted)),
2195 m_Value(Y)))) {
2196 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2197 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2198 const KnownBits YKnown = computeKnownBits(Y, Q);
2199 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2200 if (EffWidthY <= ShftCnt) {
2201 const KnownBits XKnown = computeKnownBits(X, Q);
2202 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2203 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2204 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2205 // If the mask is extracting all bits from X or Y as is, we can skip
2206 // this AND op.
2207 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2208 return Y;
2209 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2210 return XShifted;
2211 }
2212 }
2213
2214 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2215 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2217 if (match(Op0, m_c_Xor(m_Value(X),
2219 m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2221 return Constant::getNullValue(Op0->getType());
2222
2223 const APInt *C1;
2224 Value *A;
2225 // (A ^ C) & (A ^ ~C) -> 0
2226 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2227 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2228 return Constant::getNullValue(Op0->getType());
2229
2230 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2231 if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2232 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2233 if (*Implied == true)
2234 return Op0;
2235 // If Op0 is true implies Op1 is false, then they are not true together.
2236 if (*Implied == false)
2237 return ConstantInt::getFalse(Op0->getType());
2238 }
2239 if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2240 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2241 if (*Implied)
2242 return Op1;
2243 // If Op1 is true implies Op0 is false, then they are not true together.
2244 if (!*Implied)
2245 return ConstantInt::getFalse(Op1->getType());
2246 }
2247 }
2248
2249 if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2250 return V;
2251
2252 return nullptr;
2253}
2254
2256 return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2257}
2258
2259// TODO: Many of these folds could use LogicalAnd/LogicalOr.
2261 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2262 Type *Ty = X->getType();
2263
2264 // X | ~X --> -1
2265 if (match(Y, m_Not(m_Specific(X))))
2267
2268 // X | ~(X & ?) = -1
2269 if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2271
2272 // X | (X & ?) --> X
2273 if (match(Y, m_c_And(m_Specific(X), m_Value())))
2274 return X;
2275
2276 Value *A, *B;
2277
2278 // (A ^ B) | (A | B) --> A | B
2279 // (A ^ B) | (B | A) --> B | A
2280 if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2282 return Y;
2283
2284 // ~(A ^ B) | (A | B) --> -1
2285 // ~(A ^ B) | (B | A) --> -1
2286 if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2289
2290 // (A & ~B) | (A ^ B) --> A ^ B
2291 // (~B & A) | (A ^ B) --> A ^ B
2292 // (A & ~B) | (B ^ A) --> B ^ A
2293 // (~B & A) | (B ^ A) --> B ^ A
2294 if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2296 return Y;
2297
2298 // (~A ^ B) | (A & B) --> ~A ^ B
2299 // (B ^ ~A) | (A & B) --> B ^ ~A
2300 // (~A ^ B) | (B & A) --> ~A ^ B
2301 // (B ^ ~A) | (B & A) --> B ^ ~A
2302 if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2304 return X;
2305
2306 // (~A | B) | (A ^ B) --> -1
2307 // (~A | B) | (B ^ A) --> -1
2308 // (B | ~A) | (A ^ B) --> -1
2309 // (B | ~A) | (B ^ A) --> -1
2310 if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2313
2314 // (~A & B) | ~(A | B) --> ~A
2315 // (~A & B) | ~(B | A) --> ~A
2316 // (B & ~A) | ~(A | B) --> ~A
2317 // (B & ~A) | ~(B | A) --> ~A
2318 Value *NotA;
2320 m_Value(B))) &&
2322 return NotA;
2323 // The same is true of Logical And
2324 // TODO: This could share the logic of the version above if there was a
2325 // version of LogicalAnd that allowed more than just i1 types.
2327 m_Value(B))) &&
2329 return NotA;
2330
2331 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2332 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2333 Value *NotAB;
2335 m_Value(NotAB))) &&
2337 return NotAB;
2338
2339 // ~(A & B) | (A ^ B) --> ~(A & B)
2340 // ~(A & B) | (B ^ A) --> ~(A & B)
2342 m_Value(NotAB))) &&
2344 return NotAB;
2345
2346 return nullptr;
2347}
2348
2349/// Given operands for an Or, see if we can fold the result.
2350/// If not, this returns null.
2351static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2352 unsigned MaxRecurse) {
2353 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2354 return C;
2355
2356 // X | poison -> poison
2357 if (isa<PoisonValue>(Op1))
2358 return Op1;
2359
2360 // X | undef -> -1
2361 // X | -1 = -1
2362 // Do not return Op1 because it may contain undef elements if it's a vector.
2363 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2364 return Constant::getAllOnesValue(Op0->getType());
2365
2366 // X | X = X
2367 // X | 0 = X
2368 if (Op0 == Op1 || match(Op1, m_Zero()))
2369 return Op0;
2370
2371 if (Value *R = simplifyOrLogic(Op0, Op1))
2372 return R;
2373 if (Value *R = simplifyOrLogic(Op1, Op0))
2374 return R;
2375
2376 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2377 return V;
2378
2379 // Rotated -1 is still -1:
2380 // (-1 << X) | (-1 >> (C - X)) --> -1
2381 // (-1 >> X) | (-1 << (C - X)) --> -1
2382 // ...with C <= bitwidth (and commuted variants).
2383 Value *X, *Y;
2384 if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2385 match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2386 (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2387 match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2388 const APInt *C;
2389 if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2390 match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2391 C->ule(X->getType()->getScalarSizeInBits())) {
2392 return ConstantInt::getAllOnesValue(X->getType());
2393 }
2394 }
2395
2396 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2397 // are mixing in another shift that is redundant with the funnel shift.
2398
2399 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2400 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2401 if (match(Op0,
2403 match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2404 return Op0;
2405 if (match(Op1,
2407 match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2408 return Op1;
2409
2410 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2411 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2412 if (match(Op0,
2414 match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2415 return Op0;
2416 if (match(Op1,
2418 match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2419 return Op1;
2420
2421 if (Value *V =
2422 simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2423 return V;
2424 if (Value *V =
2425 simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2426 return V;
2427
2428 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2429 return V;
2430
2431 // If we have a multiplication overflow check that is being 'and'ed with a
2432 // check that one of the multipliers is not zero, we can omit the 'and', and
2433 // only keep the overflow check.
2434 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2435 return Op1;
2436 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2437 return Op0;
2438
2439 // Try some generic simplifications for associative operations.
2440 if (Value *V =
2441 simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2442 return V;
2443
2444 // Or distributes over And. Try some generic simplifications based on this.
2445 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2446 Instruction::And, Q, MaxRecurse))
2447 return V;
2448
2449 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2450 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2451 // A | (A || B) -> A || B
2452 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2453 return Op1;
2454 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2455 return Op0;
2456 }
2457 // If the operation is with the result of a select instruction, check
2458 // whether operating on either branch of the select always yields the same
2459 // value.
2460 if (Value *V =
2461 threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2462 return V;
2463 }
2464
2465 // (A & C1)|(B & C2)
2466 Value *A, *B;
2467 const APInt *C1, *C2;
2468 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2469 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2470 if (*C1 == ~*C2) {
2471 // (A & C1)|(B & C2)
2472 // If we have: ((V + N) & C1) | (V & C2)
2473 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2474 // replace with V+N.
2475 Value *N;
2476 if (C2->isMask() && // C2 == 0+1+
2478 // Add commutes, try both ways.
2479 if (MaskedValueIsZero(N, *C2, Q))
2480 return A;
2481 }
2482 // Or commutes, try both ways.
2483 if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2484 // Add commutes, try both ways.
2485 if (MaskedValueIsZero(N, *C1, Q))
2486 return B;
2487 }
2488 }
2489 }
2490
2491 // If the operation is with the result of a phi instruction, check whether
2492 // operating on all incoming values of the phi always yields the same value.
2493 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2494 if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2495 return V;
2496
2497 // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2498 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2499 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2500 return Constant::getAllOnesValue(Op0->getType());
2501
2502 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2503 if (std::optional<bool> Implied =
2504 isImpliedCondition(Op0, Op1, Q.DL, false)) {
2505 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2506 if (*Implied == false)
2507 return Op0;
2508 // If Op0 is false implies Op1 is true, then at least one is always true.
2509 if (*Implied == true)
2510 return ConstantInt::getTrue(Op0->getType());
2511 }
2512 if (std::optional<bool> Implied =
2513 isImpliedCondition(Op1, Op0, Q.DL, false)) {
2514 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2515 if (*Implied == false)
2516 return Op1;
2517 // If Op1 is false implies Op0 is true, then at least one is always true.
2518 if (*Implied == true)
2519 return ConstantInt::getTrue(Op1->getType());
2520 }
2521 }
2522
2523 if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2524 return V;
2525
2526 return nullptr;
2527}
2528
2530 return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2531}
2532
2533/// Given operands for a Xor, see if we can fold the result.
2534/// If not, this returns null.
2535static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2536 unsigned MaxRecurse) {
2537 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2538 return C;
2539
2540 // X ^ poison -> poison
2541 if (isa<PoisonValue>(Op1))
2542 return Op1;
2543
2544 // A ^ undef -> undef
2545 if (Q.isUndefValue(Op1))
2546 return Op1;
2547
2548 // A ^ 0 = A
2549 if (match(Op1, m_Zero()))
2550 return Op0;
2551
2552 // A ^ A = 0
2553 if (Op0 == Op1)
2554 return Constant::getNullValue(Op0->getType());
2555
2556 // A ^ ~A = ~A ^ A = -1
2557 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2558 return Constant::getAllOnesValue(Op0->getType());
2559
2560 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2561 Value *A, *B;
2562 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2563 if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2565 return A;
2566
2567 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2568 // The 'not' op must contain a complete -1 operand (no undef elements for
2569 // vector) for the transform to be safe.
2570 Value *NotA;
2572 m_Value(B))) &&
2574 return NotA;
2575
2576 return nullptr;
2577 };
2578 if (Value *R = foldAndOrNot(Op0, Op1))
2579 return R;
2580 if (Value *R = foldAndOrNot(Op1, Op0))
2581 return R;
2582
2583 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2584 return V;
2585
2586 // Try some generic simplifications for associative operations.
2587 if (Value *V =
2588 simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2589 return V;
2590
2591 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2592 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2593 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2594 // only if B and C are equal. If B and C are equal then (since we assume
2595 // that operands have already been simplified) "select(cond, B, C)" should
2596 // have been simplified to the common value of B and C already. Analysing
2597 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2598 // for threading over phi nodes.
2599
2600 if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2601 return V;
2602
2603 // (xor (sub nuw C_Mask, X), C_Mask) -> X
2604 {
2605 Value *X;
2606 if (match(Op0, m_NUWSub(m_Specific(Op1), m_Value(X))) &&
2607 match(Op1, m_LowBitMask()))
2608 return X;
2609 }
2610
2611 return nullptr;
2612}
2613
2615 return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2616}
2617
2619 return CmpInst::makeCmpResultType(Op->getType());
2620}
2621
2622/// Rummage around inside V looking for something equivalent to the comparison
2623/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2624/// Helper function for analyzing max/min idioms.
2626 Value *LHS, Value *RHS) {
2628 if (!SI)
2629 return nullptr;
2630 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2631 if (!Cmp)
2632 return nullptr;
2633 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2634 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2635 return Cmp;
2636 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2637 LHS == CmpRHS && RHS == CmpLHS)
2638 return Cmp;
2639 return nullptr;
2640}
2641
2642/// Return true if the underlying object (storage) must be disjoint from
2643/// storage returned by any noalias return call.
2644static bool isAllocDisjoint(const Value *V) {
2645 // For allocas, we consider only static ones (dynamic
2646 // allocas might be transformed into calls to malloc not simultaneously
2647 // live with the compared-to allocation). For globals, we exclude symbols
2648 // that might be resolve lazily to symbols in another dynamically-loaded
2649 // library (and, thus, could be malloc'ed by the implementation).
2650 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2651 return AI->isStaticAlloca();
2652 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2653 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2654 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2655 !GV->isThreadLocal();
2656 if (const Argument *A = dyn_cast<Argument>(V))
2657 return A->hasByValAttr();
2658 return false;
2659}
2660
2661/// Return true if V1 and V2 are each the base of some distict storage region
2662/// [V, object_size(V)] which do not overlap. Note that zero sized regions
2663/// *are* possible, and that zero sized regions do not overlap with any other.
2664static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2665 // Global variables always exist, so they always exist during the lifetime
2666 // of each other and all allocas. Global variables themselves usually have
2667 // non-overlapping storage, but since their addresses are constants, the
2668 // case involving two globals does not reach here and is instead handled in
2669 // constant folding.
2670 //
2671 // Two different allocas usually have different addresses...
2672 //
2673 // However, if there's an @llvm.stackrestore dynamically in between two
2674 // allocas, they may have the same address. It's tempting to reduce the
2675 // scope of the problem by only looking at *static* allocas here. That would
2676 // cover the majority of allocas while significantly reducing the likelihood
2677 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2678 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2679 // an entry block. Also, if we have a block that's not attached to a
2680 // function, we can't tell if it's "static" under the current definition.
2681 // Theoretically, this problem could be fixed by creating a new kind of
2682 // instruction kind specifically for static allocas. Such a new instruction
2683 // could be required to be at the top of the entry block, thus preventing it
2684 // from being subject to a @llvm.stackrestore. Instcombine could even
2685 // convert regular allocas into these special allocas. It'd be nifty.
2686 // However, until then, this problem remains open.
2687 //
2688 // So, we'll assume that two non-empty allocas have different addresses
2689 // for now.
2690 auto isByValArg = [](const Value *V) {
2691 const Argument *A = dyn_cast<Argument>(V);
2692 return A && A->hasByValAttr();
2693 };
2694
2695 // Byval args are backed by store which does not overlap with each other,
2696 // allocas, or globals.
2697 if (isByValArg(V1))
2698 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2699 if (isByValArg(V2))
2700 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2701
2702 return isa<AllocaInst>(V1) &&
2704}
2705
2706// A significant optimization not implemented here is assuming that alloca
2707// addresses are not equal to incoming argument values. They don't *alias*,
2708// as we say, but that doesn't mean they aren't equal, so we take a
2709// conservative approach.
2710//
2711// This is inspired in part by C++11 5.10p1:
2712// "Two pointers of the same type compare equal if and only if they are both
2713// null, both point to the same function, or both represent the same
2714// address."
2715//
2716// This is pretty permissive.
2717//
2718// It's also partly due to C11 6.5.9p6:
2719// "Two pointers compare equal if and only if both are null pointers, both are
2720// pointers to the same object (including a pointer to an object and a
2721// subobject at its beginning) or function, both are pointers to one past the
2722// last element of the same array object, or one is a pointer to one past the
2723// end of one array object and the other is a pointer to the start of a
2724// different array object that happens to immediately follow the first array
2725// object in the address space.)
2726//
2727// C11's version is more restrictive, however there's no reason why an argument
2728// couldn't be a one-past-the-end value for a stack object in the caller and be
2729// equal to the beginning of a stack object in the callee.
2730//
2731// If the C and C++ standards are ever made sufficiently restrictive in this
2732// area, it may be possible to update LLVM's semantics accordingly and reinstate
2733// this optimization.
2735 const SimplifyQuery &Q) {
2736 assert(LHS->getType() == RHS->getType() && "Must have same types");
2737 const DataLayout &DL = Q.DL;
2738 const TargetLibraryInfo *TLI = Q.TLI;
2739
2740 // We fold equality and unsigned predicates on pointer comparisons, but forbid
2741 // signed predicates since a GEP with inbounds could cross the sign boundary.
2742 if (CmpInst::isSigned(Pred))
2743 return nullptr;
2744
2745 // We have to switch to a signed predicate to handle negative indices from
2746 // the base pointer.
2747 Pred = ICmpInst::getSignedPredicate(Pred);
2748
2749 // Strip off any constant offsets so that we can reason about them.
2750 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2751 // here and compare base addresses like AliasAnalysis does, however there are
2752 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2753 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2754 // doesn't need to guarantee pointer inequality when it says NoAlias.
2755
2756 // Even if an non-inbounds GEP occurs along the path we can still optimize
2757 // equality comparisons concerning the result.
2758 bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2759 unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2760 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2761 LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2762 RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2763
2764 // If LHS and RHS are related via constant offsets to the same base
2765 // value, we can replace it with an icmp which just compares the offsets.
2766 if (LHS == RHS)
2767 return ConstantInt::get(getCompareTy(LHS),
2768 ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2769
2770 // Various optimizations for (in)equality comparisons.
2771 if (ICmpInst::isEquality(Pred)) {
2772 // Different non-empty allocations that exist at the same time have
2773 // different addresses (if the program can tell). If the offsets are
2774 // within the bounds of their allocations (and not one-past-the-end!
2775 // so we can't use inbounds!), and their allocations aren't the same,
2776 // the pointers are not equal.
2778 uint64_t LHSSize, RHSSize;
2779 ObjectSizeOpts Opts;
2781 auto *F = [](Value *V) -> Function * {
2782 if (auto *I = dyn_cast<Instruction>(V))
2783 return I->getFunction();
2784 if (auto *A = dyn_cast<Argument>(V))
2785 return A->getParent();
2786 return nullptr;
2787 }(LHS);
2788 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2789 if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) && LHSSize != 0 &&
2790 getObjectSize(RHS, RHSSize, DL, TLI, Opts) && RHSSize != 0) {
2791 APInt Dist = LHSOffset - RHSOffset;
2792 if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2793 return ConstantInt::get(getCompareTy(LHS),
2795 }
2796 }
2797
2798 // If one side of the equality comparison must come from a noalias call
2799 // (meaning a system memory allocation function), and the other side must
2800 // come from a pointer that cannot overlap with dynamically-allocated
2801 // memory within the lifetime of the current function (allocas, byval
2802 // arguments, globals), then determine the comparison result here.
2803 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2804 getUnderlyingObjects(LHS, LHSUObjs);
2805 getUnderlyingObjects(RHS, RHSUObjs);
2806
2807 // Is the set of underlying objects all noalias calls?
2808 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2809 return all_of(Objects, isNoAliasCall);
2810 };
2811
2812 // Is the set of underlying objects all things which must be disjoint from
2813 // noalias calls. We assume that indexing from such disjoint storage
2814 // into the heap is undefined, and thus offsets can be safely ignored.
2815 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2816 return all_of(Objects, ::isAllocDisjoint);
2817 };
2818
2819 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2820 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2821 return ConstantInt::get(getCompareTy(LHS),
2823
2824 // Fold comparisons for non-escaping pointer even if the allocation call
2825 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2826 // dynamic allocation call could be either of the operands. Note that
2827 // the other operand can not be based on the alloc - if it were, then
2828 // the cmp itself would be a capture.
2829 Value *MI = nullptr;
2830 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, Q))
2831 MI = LHS;
2832 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonZero(LHS, Q))
2833 MI = RHS;
2834 if (MI) {
2835 // FIXME: This is incorrect, see PR54002. While we can assume that the
2836 // allocation is at an address that makes the comparison false, this
2837 // requires that *all* comparisons to that address be false, which
2838 // InstSimplify cannot guarantee.
2839 struct CustomCaptureTracker : public CaptureTracker {
2840 bool Captured = false;
2841 void tooManyUses() override { Captured = true; }
2842 Action captured(const Use *U, UseCaptureInfo CI) override {
2843 // TODO(captures): Use UseCaptureInfo.
2844 if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2845 // Comparison against value stored in global variable. Given the
2846 // pointer does not escape, its value cannot be guessed and stored
2847 // separately in a global variable.
2848 unsigned OtherIdx = 1 - U->getOperandNo();
2849 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2850 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2851 return Continue;
2852 }
2853
2854 Captured = true;
2855 return Stop;
2856 }
2857 };
2858 CustomCaptureTracker Tracker;
2859 PointerMayBeCaptured(MI, &Tracker);
2860 if (!Tracker.Captured)
2861 return ConstantInt::get(getCompareTy(LHS),
2863 }
2864 }
2865
2866 // Otherwise, fail.
2867 return nullptr;
2868}
2869
2870/// Fold an icmp when its operands have i1 scalar type.
2872 const SimplifyQuery &Q) {
2873 Type *ITy = getCompareTy(LHS); // The return type.
2874 Type *OpTy = LHS->getType(); // The operand type.
2875 if (!OpTy->isIntOrIntVectorTy(1))
2876 return nullptr;
2877
2878 // A boolean compared to true/false can be reduced in 14 out of the 20
2879 // (10 predicates * 2 constants) possible combinations. The other
2880 // 6 cases require a 'not' of the LHS.
2881
2882 auto ExtractNotLHS = [](Value *V) -> Value * {
2883 Value *X;
2884 if (match(V, m_Not(m_Value(X))))
2885 return X;
2886 return nullptr;
2887 };
2888
2889 if (match(RHS, m_Zero())) {
2890 switch (Pred) {
2891 case CmpInst::ICMP_NE: // X != 0 -> X
2892 case CmpInst::ICMP_UGT: // X >u 0 -> X
2893 case CmpInst::ICMP_SLT: // X <s 0 -> X
2894 return LHS;
2895
2896 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2897 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2898 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2899 if (Value *X = ExtractNotLHS(LHS))
2900 return X;
2901 break;
2902
2903 case CmpInst::ICMP_ULT: // X <u 0 -> false
2904 case CmpInst::ICMP_SGT: // X >s 0 -> false
2905 return getFalse(ITy);
2906
2907 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2908 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2909 return getTrue(ITy);
2910
2911 default:
2912 break;
2913 }
2914 } else if (match(RHS, m_One())) {
2915 switch (Pred) {
2916 case CmpInst::ICMP_EQ: // X == 1 -> X
2917 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2918 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2919 return LHS;
2920
2921 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2922 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2923 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2924 if (Value *X = ExtractNotLHS(LHS))
2925 return X;
2926 break;
2927
2928 case CmpInst::ICMP_UGT: // X >u 1 -> false
2929 case CmpInst::ICMP_SLT: // X <s -1 -> false
2930 return getFalse(ITy);
2931
2932 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2933 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2934 return getTrue(ITy);
2935
2936 default:
2937 break;
2938 }
2939 }
2940
2941 switch (Pred) {
2942 default:
2943 break;
2944 case ICmpInst::ICMP_UGE:
2945 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2946 return getTrue(ITy);
2947 break;
2948 case ICmpInst::ICMP_SGE:
2949 /// For signed comparison, the values for an i1 are 0 and -1
2950 /// respectively. This maps into a truth table of:
2951 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2952 /// 0 | 0 | 1 (0 >= 0) | 1
2953 /// 0 | 1 | 1 (0 >= -1) | 1
2954 /// 1 | 0 | 0 (-1 >= 0) | 0
2955 /// 1 | 1 | 1 (-1 >= -1) | 1
2956 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2957 return getTrue(ITy);
2958 break;
2959 case ICmpInst::ICMP_ULE:
2960 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2961 return getTrue(ITy);
2962 break;
2963 case ICmpInst::ICMP_SLE:
2964 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2965 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2966 return getTrue(ITy);
2967 break;
2968 }
2969
2970 return nullptr;
2971}
2972
2973/// Check if RHS is zero or can be transformed to an equivalent zero comparison.
2974/// E.g., icmp sgt X, -1 --> icmp sge X, 0
2975static bool matchEquivZeroRHS(CmpPredicate &Pred, const Value *RHS) {
2976 // icmp [pred] X, 0 --> as-is
2977 if (match(RHS, m_Zero()))
2978 return true;
2979
2980 // Handle comparisons with -1 (all ones)
2981 if (match(RHS, m_AllOnes())) {
2982 switch (Pred) {
2983 case ICmpInst::ICMP_SGT:
2984 // icmp sgt X, -1 --> icmp sge X, 0
2985 Pred = ICmpInst::ICMP_SGE;
2986 return true;
2987 case ICmpInst::ICMP_SLE:
2988 // icmp sle X, -1 --> icmp slt X, 0
2989 Pred = ICmpInst::ICMP_SLT;
2990 return true;
2991 // Note: unsigned comparisons with -1 (UINT_MAX) are not handled here:
2992 // - icmp ugt X, -1 is always false (nothing > UINT_MAX)
2993 // - icmp ule X, -1 is always true (everything <= UINT_MAX)
2994 default:
2995 return false;
2996 }
2997 }
2998
2999 // Handle comparisons with 1
3000 if (match(RHS, m_One())) {
3001 switch (Pred) {
3002 case ICmpInst::ICMP_SGE:
3003 // icmp sge X, 1 --> icmp sgt X, 0
3004 Pred = ICmpInst::ICMP_SGT;
3005 return true;
3006 case ICmpInst::ICMP_UGE:
3007 // icmp uge X, 1 --> icmp ugt X, 0
3008 Pred = ICmpInst::ICMP_UGT;
3009 return true;
3010 case ICmpInst::ICMP_SLT:
3011 // icmp slt X, 1 --> icmp sle X, 0
3012 Pred = ICmpInst::ICMP_SLE;
3013 return true;
3014 case ICmpInst::ICMP_ULT:
3015 // icmp ult X, 1 --> icmp ule X, 0
3016 Pred = ICmpInst::ICMP_ULE;
3017 return true;
3018 default:
3019 return false;
3020 }
3021 }
3022
3023 return false;
3024}
3025
3026/// Try hard to fold icmp with zero RHS because this is a common case.
3027/// Note that, this function also handles the equivalent zero RHS, e.g.,
3028/// icmp sgt X, -1 --> icmp sge X, 0
3030 const SimplifyQuery &Q) {
3031 // Check if RHS is zero or can be transformed to an equivalent zero comparison
3032 if (!matchEquivZeroRHS(Pred, RHS))
3033 return nullptr;
3034
3035 Type *ITy = getCompareTy(LHS); // The return type.
3036 switch (Pred) {
3037 default:
3038 llvm_unreachable("Unknown ICmp predicate!");
3039 case ICmpInst::ICMP_ULT:
3040 return getFalse(ITy);
3041 case ICmpInst::ICMP_UGE:
3042 return getTrue(ITy);
3043 case ICmpInst::ICMP_EQ:
3044 case ICmpInst::ICMP_ULE:
3045 if (isKnownNonZero(LHS, Q))
3046 return getFalse(ITy);
3047 break;
3048 case ICmpInst::ICMP_NE:
3049 case ICmpInst::ICMP_UGT:
3050 if (isKnownNonZero(LHS, Q))
3051 return getTrue(ITy);
3052 break;
3053 case ICmpInst::ICMP_SLT: {
3054 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3055 if (LHSKnown.isNegative())
3056 return getTrue(ITy);
3057 if (LHSKnown.isNonNegative())
3058 return getFalse(ITy);
3059 break;
3060 }
3061 case ICmpInst::ICMP_SLE: {
3062 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3063 if (LHSKnown.isNegative())
3064 return getTrue(ITy);
3065 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
3066 return getFalse(ITy);
3067 break;
3068 }
3069 case ICmpInst::ICMP_SGE: {
3070 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3071 if (LHSKnown.isNegative())
3072 return getFalse(ITy);
3073 if (LHSKnown.isNonNegative())
3074 return getTrue(ITy);
3075 break;
3076 }
3077 case ICmpInst::ICMP_SGT: {
3078 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3079 if (LHSKnown.isNegative())
3080 return getFalse(ITy);
3081 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
3082 return getTrue(ITy);
3083 break;
3084 }
3085 }
3086
3087 return nullptr;
3088}
3089
3091 Value *RHS, const SimplifyQuery &Q) {
3092 Type *ITy = getCompareTy(RHS); // The return type.
3093
3094 Value *X;
3095 const APInt *C;
3096 if (!match(RHS, m_APIntAllowPoison(C)))
3097 return nullptr;
3098
3099 // Sign-bit checks can be optimized to true/false after unsigned
3100 // floating-point casts:
3101 // icmp slt (bitcast (uitofp X)), 0 --> false
3102 // icmp sgt (bitcast (uitofp X)), -1 --> true
3104 bool TrueIfSigned;
3105 if (isSignBitCheck(Pred, *C, TrueIfSigned))
3106 return ConstantInt::getBool(ITy, !TrueIfSigned);
3107 }
3108
3109 // Rule out tautological comparisons (eg., ult 0 or uge 0).
3111 if (RHS_CR.isEmptySet())
3112 return ConstantInt::getFalse(ITy);
3113 if (RHS_CR.isFullSet())
3114 return ConstantInt::getTrue(ITy);
3115
3116 ConstantRange LHS_CR =
3118 if (!LHS_CR.isFullSet()) {
3119 if (RHS_CR.contains(LHS_CR))
3120 return ConstantInt::getTrue(ITy);
3121 if (RHS_CR.inverse().contains(LHS_CR))
3122 return ConstantInt::getFalse(ITy);
3123 }
3124
3125 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3126 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3127 const APInt *MulC;
3128 if (Q.IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
3130 *MulC != 0 && C->urem(*MulC) != 0) ||
3132 *MulC != 0 && C->srem(*MulC) != 0)))
3133 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3134
3135 if (Pred == ICmpInst::ICMP_UGE && C->isOne() && isKnownNonZero(LHS, Q))
3136 return ConstantInt::getTrue(ITy);
3137
3138 return nullptr;
3139}
3140
3142
3143/// Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
3146 const SimplifyQuery &Q,
3147 unsigned Depth = 0) {
3148 if (!Res.insert(V).second)
3149 return;
3150
3151 // Can be increased if useful.
3152 if (++Depth > 1)
3153 return;
3154
3155 auto *I = dyn_cast<Instruction>(V);
3156 if (!I)
3157 return;
3158
3159 Value *X, *Y;
3161 if (match(I, m_Or(m_Value(X), m_Value(Y))) ||
3165 }
3166 // X * Y >= X --> true
3167 if (match(I, m_NUWMul(m_Value(X), m_Value(Y)))) {
3168 if (isKnownNonZero(X, Q))
3170 if (isKnownNonZero(Y, Q))
3172 }
3173 } else {
3175 switch (I->getOpcode()) {
3176 case Instruction::And:
3177 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Q, Depth);
3178 getUnsignedMonotonicValues(Res, I->getOperand(1), Type, Q, Depth);
3179 break;
3180 case Instruction::URem:
3181 case Instruction::UDiv:
3182 case Instruction::LShr:
3183 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Q, Depth);
3184 break;
3185 case Instruction::Call:
3188 break;
3189 default:
3190 break;
3191 }
3192 }
3193}
3194
3196 Value *RHS,
3197 const SimplifyQuery &Q) {
3198 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3199 return nullptr;
3200
3201 // We have LHS uge GreaterValues and LowerValues uge RHS. If any of the
3202 // GreaterValues and LowerValues are the same, it follows that LHS uge RHS.
3203 SmallPtrSet<Value *, 4> GreaterValues;
3204 SmallPtrSet<Value *, 4> LowerValues;
3207 for (Value *GV : GreaterValues)
3208 if (LowerValues.contains(GV))
3210 Pred == ICmpInst::ICMP_UGE);
3211 return nullptr;
3212}
3213
3215 Value *RHS, const SimplifyQuery &Q,
3216 unsigned MaxRecurse) {
3217 Type *ITy = getCompareTy(RHS); // The return type.
3218
3219 Value *Y = nullptr;
3220 // icmp pred (or X, Y), X
3221 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3222 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3223 KnownBits RHSKnown = computeKnownBits(RHS, Q);
3224 KnownBits YKnown = computeKnownBits(Y, Q);
3225 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3226 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3227 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3228 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3229 }
3230 }
3231
3232 // icmp pred (urem X, Y), Y
3233 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3234 switch (Pred) {
3235 default:
3236 break;
3237 case ICmpInst::ICMP_SGT:
3238 case ICmpInst::ICMP_SGE: {
3239 KnownBits Known = computeKnownBits(RHS, Q);
3240 if (!Known.isNonNegative())
3241 break;
3242 [[fallthrough]];
3243 }
3244 case ICmpInst::ICMP_EQ:
3245 case ICmpInst::ICMP_UGT:
3246 case ICmpInst::ICMP_UGE:
3247 return getFalse(ITy);
3248 case ICmpInst::ICMP_SLT:
3249 case ICmpInst::ICMP_SLE: {
3250 KnownBits Known = computeKnownBits(RHS, Q);
3251 if (!Known.isNonNegative())
3252 break;
3253 [[fallthrough]];
3254 }
3255 case ICmpInst::ICMP_NE:
3256 case ICmpInst::ICMP_ULT:
3257 case ICmpInst::ICMP_ULE:
3258 return getTrue(ITy);
3259 }
3260 }
3261
3262 // If x is nonzero:
3263 // x >>u C <u x --> true for C != 0.
3264 // x >>u C != x --> true for C != 0.
3265 // x >>u C >=u x --> false for C != 0.
3266 // x >>u C == x --> false for C != 0.
3267 // x udiv C <u x --> true for C != 1.
3268 // x udiv C != x --> true for C != 1.
3269 // x udiv C >=u x --> false for C != 1.
3270 // x udiv C == x --> false for C != 1.
3271 // TODO: allow non-constant shift amount/divisor
3272 const APInt *C;
3273 if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3274 (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3275 if (isKnownNonZero(RHS, Q)) {
3276 switch (Pred) {
3277 default:
3278 break;
3279 case ICmpInst::ICMP_EQ:
3280 case ICmpInst::ICMP_UGE:
3281 case ICmpInst::ICMP_UGT:
3282 return getFalse(ITy);
3283 case ICmpInst::ICMP_NE:
3284 case ICmpInst::ICMP_ULT:
3285 case ICmpInst::ICMP_ULE:
3286 return getTrue(ITy);
3287 }
3288 }
3289 }
3290
3291 // (x*C1)/C2 <= x for C1 <= C2.
3292 // This holds even if the multiplication overflows: Assume that x != 0 and
3293 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3294 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3295 //
3296 // Additionally, either the multiplication and division might be represented
3297 // as shifts:
3298 // (x*C1)>>C2 <= x for C1 < 2**C2.
3299 // (x<<C1)/C2 <= x for 2**C1 < C2.
3300 const APInt *C1, *C2;
3301 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3302 C1->ule(*C2)) ||
3303 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3304 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3305 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3306 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3307 if (Pred == ICmpInst::ICMP_UGT)
3308 return getFalse(ITy);
3309 if (Pred == ICmpInst::ICMP_ULE)
3310 return getTrue(ITy);
3311 }
3312
3313 // (sub C, X) == X, C is odd --> false
3314 // (sub C, X) != X, C is odd --> true
3315 if (match(LBO, m_Sub(m_APIntAllowPoison(C), m_Specific(RHS))) &&
3316 (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3317 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3318
3319 return nullptr;
3320}
3321
3322// If only one of the icmp's operands has NSW flags, try to prove that:
3323//
3324// icmp slt/sgt/sle/sge (x + C1), (x +nsw C2)
3325//
3326// is equivalent to:
3327//
3328// icmp slt/sgt/sle/sge C1, C2
3329//
3330// which is true if x + C2 has the NSW flags set and:
3331// *) C1 <= C2 && C1 >= 0, or
3332// *) C2 <= C1 && C1 <= 0.
3333//
3335 const InstrInfoQuery &IIQ) {
3336 // TODO: support other predicates.
3337 if (!ICmpInst::isSigned(Pred) || !IIQ.UseInstrInfo)
3338 return false;
3339
3340 // Canonicalize nsw add as RHS.
3341 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3342 std::swap(LHS, RHS);
3343 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3344 return false;
3345
3346 Value *X;
3347 const APInt *C1, *C2;
3348 if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
3349 !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
3350 return false;
3351
3352 return (C1->sle(*C2) && C1->isNonNegative()) ||
3353 (C2->sle(*C1) && C1->isNonPositive());
3354}
3355
3356/// TODO: A large part of this logic is duplicated in InstCombine's
3357/// foldICmpBinOp(). We should be able to share that and avoid the code
3358/// duplication.
3360 const SimplifyQuery &Q,
3361 unsigned MaxRecurse) {
3364 if (MaxRecurse && (LBO || RBO)) {
3365 // Analyze the case when either LHS or RHS is an add instruction.
3366 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3367 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3368 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3369 if (LBO && LBO->getOpcode() == Instruction::Add) {
3370 A = LBO->getOperand(0);
3371 B = LBO->getOperand(1);
3372 NoLHSWrapProblem =
3373 ICmpInst::isEquality(Pred) ||
3374 (CmpInst::isUnsigned(Pred) &&
3376 (CmpInst::isSigned(Pred) &&
3378 }
3379 if (RBO && RBO->getOpcode() == Instruction::Add) {
3380 C = RBO->getOperand(0);
3381 D = RBO->getOperand(1);
3382 NoRHSWrapProblem =
3383 ICmpInst::isEquality(Pred) ||
3384 (CmpInst::isUnsigned(Pred) &&
3386 (CmpInst::isSigned(Pred) &&
3388 }
3389
3390 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3391 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3392 if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3393 Constant::getNullValue(RHS->getType()), Q,
3394 MaxRecurse - 1))
3395 return V;
3396
3397 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3398 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3399 if (Value *V =
3401 C == LHS ? D : C, Q, MaxRecurse - 1))
3402 return V;
3403
3404 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3405 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3407 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3408 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3409 Value *Y, *Z;
3410 if (A == C) {
3411 // C + B == C + D -> B == D
3412 Y = B;
3413 Z = D;
3414 } else if (A == D) {
3415 // D + B == C + D -> B == C
3416 Y = B;
3417 Z = C;
3418 } else if (B == C) {
3419 // A + C == C + D -> A == D
3420 Y = A;
3421 Z = D;
3422 } else {
3423 assert(B == D);
3424 // A + D == C + D -> A == C
3425 Y = A;
3426 Z = C;
3427 }
3428 if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3429 return V;
3430 }
3431 }
3432
3433 if (LBO)
3434 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3435 return V;
3436
3437 if (RBO)
3439 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3440 return V;
3441
3442 // 0 - (zext X) pred C
3443 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3444 const APInt *C;
3445 if (match(RHS, m_APInt(C))) {
3446 if (C->isStrictlyPositive()) {
3447 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3449 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3451 }
3452 if (C->isNonNegative()) {
3453 if (Pred == ICmpInst::ICMP_SLE)
3455 if (Pred == ICmpInst::ICMP_SGT)
3457 }
3458 }
3459 }
3460
3461 // If C2 is a power-of-2 and C is not:
3462 // (C2 << X) == C --> false
3463 // (C2 << X) != C --> true
3464 const APInt *C;
3465 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3466 match(RHS, m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
3467 // C2 << X can equal zero in some circumstances.
3468 // This simplification might be unsafe if C is zero.
3469 //
3470 // We know it is safe if:
3471 // - The shift is nsw. We can't shift out the one bit.
3472 // - The shift is nuw. We can't shift out the one bit.
3473 // - C2 is one.
3474 // - C isn't zero.
3477 match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3478 if (Pred == ICmpInst::ICMP_EQ)
3480 if (Pred == ICmpInst::ICMP_NE)
3482 }
3483 }
3484
3485 // If C is a power-of-2:
3486 // (C << X) >u 0x8000 --> false
3487 // (C << X) <=u 0x8000 --> true
3488 if (match(LHS, m_Shl(m_Power2(), m_Value())) && match(RHS, m_SignMask())) {
3489 if (Pred == ICmpInst::ICMP_UGT)
3491 if (Pred == ICmpInst::ICMP_ULE)
3493 }
3494
3495 if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3496 return nullptr;
3497
3498 if (LBO->getOperand(0) == RBO->getOperand(0)) {
3499 switch (LBO->getOpcode()) {
3500 default:
3501 break;
3502 case Instruction::Shl: {
3503 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3504 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3505 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3506 !isKnownNonZero(LBO->getOperand(0), Q))
3507 break;
3508 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3509 RBO->getOperand(1), Q, MaxRecurse - 1))
3510 return V;
3511 break;
3512 }
3513 // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3514 // icmp ule A, B -> true
3515 // icmp ugt A, B -> false
3516 // icmp sle A, B -> true (C1 and C2 are the same sign)
3517 // icmp sgt A, B -> false (C1 and C2 are the same sign)
3518 case Instruction::And:
3519 case Instruction::Or: {
3520 const APInt *C1, *C2;
3521 if (ICmpInst::isRelational(Pred) &&
3522 match(LBO->getOperand(1), m_APInt(C1)) &&
3523 match(RBO->getOperand(1), m_APInt(C2))) {
3524 if (!C1->isSubsetOf(*C2)) {
3525 std::swap(C1, C2);
3526 Pred = ICmpInst::getSwappedPredicate(Pred);
3527 }
3528 if (C1->isSubsetOf(*C2)) {
3529 if (Pred == ICmpInst::ICMP_ULE)
3531 if (Pred == ICmpInst::ICMP_UGT)
3533 if (C1->isNonNegative() == C2->isNonNegative()) {
3534 if (Pred == ICmpInst::ICMP_SLE)
3536 if (Pred == ICmpInst::ICMP_SGT)
3538 }
3539 }
3540 }
3541 break;
3542 }
3543 }
3544 }
3545
3546 if (LBO->getOperand(1) == RBO->getOperand(1)) {
3547 switch (LBO->getOpcode()) {
3548 default:
3549 break;
3550 case Instruction::UDiv:
3551 case Instruction::LShr:
3552 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3553 !Q.IIQ.isExact(RBO))
3554 break;
3555 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3556 RBO->getOperand(0), Q, MaxRecurse - 1))
3557 return V;
3558 break;
3559 case Instruction::SDiv:
3560 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3561 !Q.IIQ.isExact(RBO))
3562 break;
3563 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3564 RBO->getOperand(0), Q, MaxRecurse - 1))
3565 return V;
3566 break;
3567 case Instruction::AShr:
3568 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3569 break;
3570 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3571 RBO->getOperand(0), Q, MaxRecurse - 1))
3572 return V;
3573 break;
3574 case Instruction::Shl: {
3575 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3576 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3577 if (!NUW && !NSW)
3578 break;
3579 if (!NSW && ICmpInst::isSigned(Pred))
3580 break;
3581 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3582 RBO->getOperand(0), Q, MaxRecurse - 1))
3583 return V;
3584 break;
3585 }
3586 }
3587 }
3588 return nullptr;
3589}
3590
3591/// simplify integer comparisons where at least one operand of the compare
3592/// matches an integer min/max idiom.
3594 const SimplifyQuery &Q,
3595 unsigned MaxRecurse) {
3596 Type *ITy = getCompareTy(LHS); // The return type.
3597 Value *A, *B;
3599 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3600
3601 // Signed variants on "max(a,b)>=a -> true".
3602 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3603 if (A != RHS)
3604 std::swap(A, B); // smax(A, B) pred A.
3605 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3606 // We analyze this as smax(A, B) pred A.
3607 P = Pred;
3608 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3609 (A == LHS || B == LHS)) {
3610 if (A != LHS)
3611 std::swap(A, B); // A pred smax(A, B).
3612 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3613 // We analyze this as smax(A, B) swapped-pred A.
3615 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3616 (A == RHS || B == RHS)) {
3617 if (A != RHS)
3618 std::swap(A, B); // smin(A, B) pred A.
3619 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3620 // We analyze this as smax(-A, -B) swapped-pred -A.
3621 // Note that we do not need to actually form -A or -B thanks to EqP.
3623 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3624 (A == LHS || B == LHS)) {
3625 if (A != LHS)
3626 std::swap(A, B); // A pred smin(A, B).
3627 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3628 // We analyze this as smax(-A, -B) pred -A.
3629 // Note that we do not need to actually form -A or -B thanks to EqP.
3630 P = Pred;
3631 }
3633 // Cases correspond to "max(A, B) p A".
3634 switch (P) {
3635 default:
3636 break;
3637 case CmpInst::ICMP_EQ:
3638 case CmpInst::ICMP_SLE:
3639 // Equivalent to "A EqP B". This may be the same as the condition tested
3640 // in the max/min; if so, we can just return that.
3641 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3642 return V;
3643 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3644 return V;
3645 // Otherwise, see if "A EqP B" simplifies.
3646 if (MaxRecurse)
3647 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3648 return V;
3649 break;
3650 case CmpInst::ICMP_NE:
3651 case CmpInst::ICMP_SGT: {
3653 // Equivalent to "A InvEqP B". This may be the same as the condition
3654 // tested in the max/min; if so, we can just return that.
3655 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3656 return V;
3657 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3658 return V;
3659 // Otherwise, see if "A InvEqP B" simplifies.
3660 if (MaxRecurse)
3661 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3662 return V;
3663 break;
3664 }
3665 case CmpInst::ICMP_SGE:
3666 // Always true.
3667 return getTrue(ITy);
3668 case CmpInst::ICMP_SLT:
3669 // Always false.
3670 return getFalse(ITy);
3671 }
3672 }
3673
3674 // Unsigned variants on "max(a,b)>=a -> true".
3676 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3677 if (A != RHS)
3678 std::swap(A, B); // umax(A, B) pred A.
3679 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3680 // We analyze this as umax(A, B) pred A.
3681 P = Pred;
3682 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3683 (A == LHS || B == LHS)) {
3684 if (A != LHS)
3685 std::swap(A, B); // A pred umax(A, B).
3686 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3687 // We analyze this as umax(A, B) swapped-pred A.
3689 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3690 (A == RHS || B == RHS)) {
3691 if (A != RHS)
3692 std::swap(A, B); // umin(A, B) pred A.
3693 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3694 // We analyze this as umax(-A, -B) swapped-pred -A.
3695 // Note that we do not need to actually form -A or -B thanks to EqP.
3697 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3698 (A == LHS || B == LHS)) {
3699 if (A != LHS)
3700 std::swap(A, B); // A pred umin(A, B).
3701 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3702 // We analyze this as umax(-A, -B) pred -A.
3703 // Note that we do not need to actually form -A or -B thanks to EqP.
3704 P = Pred;
3705 }
3707 // Cases correspond to "max(A, B) p A".
3708 switch (P) {
3709 default:
3710 break;
3711 case CmpInst::ICMP_EQ:
3712 case CmpInst::ICMP_ULE:
3713 // Equivalent to "A EqP B". This may be the same as the condition tested
3714 // in the max/min; if so, we can just return that.
3715 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3716 return V;
3717 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3718 return V;
3719 // Otherwise, see if "A EqP B" simplifies.
3720 if (MaxRecurse)
3721 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3722 return V;
3723 break;
3724 case CmpInst::ICMP_NE:
3725 case CmpInst::ICMP_UGT: {
3727 // Equivalent to "A InvEqP B". This may be the same as the condition
3728 // tested in the max/min; if so, we can just return that.
3729 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3730 return V;
3731 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3732 return V;
3733 // Otherwise, see if "A InvEqP B" simplifies.
3734 if (MaxRecurse)
3735 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3736 return V;
3737 break;
3738 }
3739 case CmpInst::ICMP_UGE:
3740 return getTrue(ITy);
3741 case CmpInst::ICMP_ULT:
3742 return getFalse(ITy);
3743 }
3744 }
3745
3746 // Comparing 1 each of min/max with a common operand?
3747 // Canonicalize min operand to RHS.
3748 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3749 match(LHS, m_SMin(m_Value(), m_Value()))) {
3750 std::swap(LHS, RHS);
3751 Pred = ICmpInst::getSwappedPredicate(Pred);
3752 }
3753
3754 Value *C, *D;
3755 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3756 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3757 (A == C || A == D || B == C || B == D)) {
3758 // smax(A, B) >=s smin(A, D) --> true
3759 if (Pred == CmpInst::ICMP_SGE)
3760 return getTrue(ITy);
3761 // smax(A, B) <s smin(A, D) --> false
3762 if (Pred == CmpInst::ICMP_SLT)
3763 return getFalse(ITy);
3764 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3765 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3766 (A == C || A == D || B == C || B == D)) {
3767 // umax(A, B) >=u umin(A, D) --> true
3768 if (Pred == CmpInst::ICMP_UGE)
3769 return getTrue(ITy);
3770 // umax(A, B) <u umin(A, D) --> false
3771 if (Pred == CmpInst::ICMP_ULT)
3772 return getFalse(ITy);
3773 }
3774
3775 return nullptr;
3776}
3777
3779 Value *LHS, Value *RHS,
3780 const SimplifyQuery &Q) {
3781 // Gracefully handle instructions that have not been inserted yet.
3782 if (!Q.AC || !Q.CxtI)
3783 return nullptr;
3784
3785 for (Value *AssumeBaseOp : {LHS, RHS}) {
3786 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3787 if (!AssumeVH)
3788 continue;
3789
3790 CallInst *Assume = cast<CallInst>(AssumeVH);
3791 if (std::optional<bool> Imp = isImpliedCondition(
3792 Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3793 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3794 return ConstantInt::get(getCompareTy(LHS), *Imp);
3795 }
3796 }
3797
3798 return nullptr;
3799}
3800
3802 Value *RHS) {
3804 if (!II)
3805 return nullptr;
3806
3807 switch (II->getIntrinsicID()) {
3808 case Intrinsic::uadd_sat:
3809 // uadd.sat(X, Y) uge X + Y
3810 if (match(RHS, m_c_Add(m_Specific(II->getArgOperand(0)),
3811 m_Specific(II->getArgOperand(1))))) {
3812 if (Pred == ICmpInst::ICMP_UGE)
3814 if (Pred == ICmpInst::ICMP_ULT)
3816 }
3817 return nullptr;
3818 case Intrinsic::usub_sat:
3819 // usub.sat(X, Y) ule X - Y
3820 if (match(RHS, m_Sub(m_Specific(II->getArgOperand(0)),
3821 m_Specific(II->getArgOperand(1))))) {
3822 if (Pred == ICmpInst::ICMP_ULE)
3824 if (Pred == ICmpInst::ICMP_UGT)
3826 }
3827 return nullptr;
3828 default:
3829 return nullptr;
3830 }
3831}
3832
3833/// Helper method to get range from metadata or attribute.
3834static std::optional<ConstantRange> getRange(Value *V,
3835 const InstrInfoQuery &IIQ) {
3837 if (MDNode *MD = IIQ.getMetadata(I, LLVMContext::MD_range))
3838 return getConstantRangeFromMetadata(*MD);
3839
3840 if (const Argument *A = dyn_cast<Argument>(V))
3841 return A->getRange();
3842 else if (const CallBase *CB = dyn_cast<CallBase>(V))
3843 return CB->getRange();
3844
3845 return std::nullopt;
3846}
3847
3848/// Given operands for an ICmpInst, see if we can fold the result.
3849/// If not, this returns null.
3851 const SimplifyQuery &Q, unsigned MaxRecurse) {
3852 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3853
3854 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3855 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3856 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3857
3858 // If we have a constant, make sure it is on the RHS.
3859 std::swap(LHS, RHS);
3860 Pred = CmpInst::getSwappedPredicate(Pred);
3861 }
3862 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3863
3864 Type *ITy = getCompareTy(LHS); // The return type.
3865
3866 // icmp poison, X -> poison
3867 if (isa<PoisonValue>(RHS))
3868 return PoisonValue::get(ITy);
3869
3870 // For EQ and NE, we can always pick a value for the undef to make the
3871 // predicate pass or fail, so we can return undef.
3872 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3873 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3874 return UndefValue::get(ITy);
3875
3876 // icmp X, X -> true/false
3877 // icmp X, undef -> true/false because undef could be X.
3878 if (LHS == RHS || Q.isUndefValue(RHS))
3879 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3880
3881 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3882 return V;
3883
3884 // TODO: Sink/common this with other potentially expensive calls that use
3885 // ValueTracking? See comment below for isKnownNonEqual().
3886 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3887 return V;
3888
3889 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q))
3890 return V;
3891
3892 // If both operands have range metadata, use the metadata
3893 // to simplify the comparison.
3894 if (std::optional<ConstantRange> RhsCr = getRange(RHS, Q.IIQ))
3895 if (std::optional<ConstantRange> LhsCr = getRange(LHS, Q.IIQ)) {
3896 if (LhsCr->icmp(Pred, *RhsCr))
3897 return ConstantInt::getTrue(ITy);
3898
3899 if (LhsCr->icmp(CmpInst::getInversePredicate(Pred), *RhsCr))
3900 return ConstantInt::getFalse(ITy);
3901 }
3902
3903 // Compare of cast, for example (zext X) != 0 -> X != 0
3906 Value *SrcOp = LI->getOperand(0);
3907 Type *SrcTy = SrcOp->getType();
3908 Type *DstTy = LI->getType();
3909
3910 // Turn icmp (ptrtoint/ptrtoaddr x), (ptrtoint/ptrtoaddr/constant) into a
3911 // compare of the input if the integer type is the same size as the
3912 // pointer address type (icmp only compares the address of the pointer).
3913 if (MaxRecurse && (isa<PtrToIntInst, PtrToAddrInst>(LI)) &&
3914 Q.DL.getAddressType(SrcTy) == DstTy) {
3915 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3916 // Transfer the cast to the constant.
3917 if (Value *V = simplifyICmpInst(Pred, SrcOp,
3918 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3919 Q, MaxRecurse - 1))
3920 return V;
3922 auto *RI = cast<CastInst>(RHS);
3923 if (RI->getOperand(0)->getType() == SrcTy)
3924 // Compare without the cast.
3925 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3926 MaxRecurse - 1))
3927 return V;
3928 }
3929 }
3930
3931 if (isa<ZExtInst>(LHS)) {
3932 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3933 // same type.
3934 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3935 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3936 // Compare X and Y. Note that signed predicates become unsigned.
3937 if (Value *V =
3939 RI->getOperand(0), Q, MaxRecurse - 1))
3940 return V;
3941 }
3942 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3943 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3944 if (SrcOp == RI->getOperand(0)) {
3945 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3946 return ConstantInt::getTrue(ITy);
3947 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3948 return ConstantInt::getFalse(ITy);
3949 }
3950 }
3951 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3952 // too. If not, then try to deduce the result of the comparison.
3953 else if (match(RHS, m_ImmConstant())) {
3955 assert(C != nullptr);
3956
3957 // Compute the constant that would happen if we truncated to SrcTy then
3958 // reextended to DstTy.
3959 Constant *Trunc =
3960 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3961 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3962 Constant *RExt =
3963 ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3964 assert(RExt && "Constant-fold of ImmConstant should not fail");
3965 Constant *AnyEq =
3967 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3968
3969 // If the re-extended constant didn't change any of the elements then
3970 // this is effectively also a case of comparing two zero-extended
3971 // values.
3972 if (AnyEq->isAllOnesValue() && MaxRecurse)
3974 SrcOp, Trunc, Q, MaxRecurse - 1))
3975 return V;
3976
3977 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3978 // there. Use this to work out the result of the comparison.
3979 if (AnyEq->isNullValue()) {
3980 switch (Pred) {
3981 default:
3982 llvm_unreachable("Unknown ICmp predicate!");
3983 // LHS <u RHS.
3984 case ICmpInst::ICMP_EQ:
3985 case ICmpInst::ICMP_UGT:
3986 case ICmpInst::ICMP_UGE:
3987 return Constant::getNullValue(ITy);
3988
3989 case ICmpInst::ICMP_NE:
3990 case ICmpInst::ICMP_ULT:
3991 case ICmpInst::ICMP_ULE:
3992 return Constant::getAllOnesValue(ITy);
3993
3994 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3995 // is non-negative then LHS <s RHS.
3996 case ICmpInst::ICMP_SGT:
3997 case ICmpInst::ICMP_SGE:
4000 Q.DL);
4001 case ICmpInst::ICMP_SLT:
4002 case ICmpInst::ICMP_SLE:
4005 Q.DL);
4006 }
4007 }
4008 }
4009 }
4010
4011 if (isa<SExtInst>(LHS)) {
4012 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
4013 // same type.
4014 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
4015 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
4016 // Compare X and Y. Note that the predicate does not change.
4017 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
4018 MaxRecurse - 1))
4019 return V;
4020 }
4021 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
4022 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
4023 if (SrcOp == RI->getOperand(0)) {
4024 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
4025 return ConstantInt::getTrue(ITy);
4026 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
4027 return ConstantInt::getFalse(ITy);
4028 }
4029 }
4030 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
4031 // too. If not, then try to deduce the result of the comparison.
4032 else if (match(RHS, m_ImmConstant())) {
4034
4035 // Compute the constant that would happen if we truncated to SrcTy then
4036 // reextended to DstTy.
4037 Constant *Trunc =
4038 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
4039 assert(Trunc && "Constant-fold of ImmConstant should not fail");
4040 Constant *RExt =
4041 ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
4042 assert(RExt && "Constant-fold of ImmConstant should not fail");
4043 Constant *AnyEq =
4045 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
4046
4047 // If the re-extended constant didn't change then this is effectively
4048 // also a case of comparing two sign-extended values.
4049 if (AnyEq->isAllOnesValue() && MaxRecurse)
4050 if (Value *V =
4051 simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
4052 return V;
4053
4054 // Otherwise the upper bits of LHS are all equal, while RHS has varying
4055 // bits there. Use this to work out the result of the comparison.
4056 if (AnyEq->isNullValue()) {
4057 switch (Pred) {
4058 default:
4059 llvm_unreachable("Unknown ICmp predicate!");
4060 case ICmpInst::ICMP_EQ:
4061 return Constant::getNullValue(ITy);
4062 case ICmpInst::ICMP_NE:
4063 return Constant::getAllOnesValue(ITy);
4064
4065 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
4066 // LHS >s RHS.
4067 case ICmpInst::ICMP_SGT:
4068 case ICmpInst::ICMP_SGE:
4071 Q.DL);
4072 case ICmpInst::ICMP_SLT:
4073 case ICmpInst::ICMP_SLE:
4076 Q.DL);
4077
4078 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
4079 // LHS >u RHS.
4080 case ICmpInst::ICMP_UGT:
4081 case ICmpInst::ICMP_UGE:
4082 // Comparison is true iff the LHS <s 0.
4083 if (MaxRecurse)
4085 Constant::getNullValue(SrcTy), Q,
4086 MaxRecurse - 1))
4087 return V;
4088 break;
4089 case ICmpInst::ICMP_ULT:
4090 case ICmpInst::ICMP_ULE:
4091 // Comparison is true iff the LHS >=s 0.
4092 if (MaxRecurse)
4094 Constant::getNullValue(SrcTy), Q,
4095 MaxRecurse - 1))
4096 return V;
4097 break;
4098 }
4099 }
4100 }
4101 }
4102 }
4103
4104 // icmp eq|ne X, Y -> false|true if X != Y
4105 // This is potentially expensive, and we have already computedKnownBits for
4106 // compares with 0 above here, so only try this for a non-zero compare.
4107 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
4108 isKnownNonEqual(LHS, RHS, Q)) {
4109 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
4110 }
4111
4112 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
4113 return V;
4114
4115 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
4116 return V;
4117
4119 return V;
4122 return V;
4123
4124 if (Value *V = simplifyICmpUsingMonotonicValues(Pred, LHS, RHS, Q))
4125 return V;
4128 return V;
4129
4130 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
4131 return V;
4132
4133 if (std::optional<bool> Res =
4134 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4135 return ConstantInt::getBool(ITy, *Res);
4136
4137 // Simplify comparisons of related pointers using a powerful, recursive
4138 // GEP-walk when we have target data available..
4139 if (LHS->getType()->isPointerTy())
4140 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4141 return C;
4142
4143 // If the comparison is with the result of a select instruction, check whether
4144 // comparing with either branch of the select always yields the same value.
4146 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4147 return V;
4148
4149 // If the comparison is with the result of a phi instruction, check whether
4150 // doing the compare with each incoming phi value yields a common result.
4152 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4153 return V;
4154
4155 return nullptr;
4156}
4157
4159 const SimplifyQuery &Q) {
4160 return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4161}
4162
4163/// Given operands for an FCmpInst, see if we can fold the result.
4164/// If not, this returns null.
4166 FastMathFlags FMF, const SimplifyQuery &Q,
4167 unsigned MaxRecurse) {
4168 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4169
4170 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4171 if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
4172 // if the folding isn't successfull, fall back to the rest of the logic
4173 if (auto *Result = ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL,
4174 Q.TLI, Q.CxtI))
4175 return Result;
4176 } else {
4177 // If we have a constant, make sure it is on the RHS.
4178 std::swap(LHS, RHS);
4179 Pred = CmpInst::getSwappedPredicate(Pred);
4180 }
4181 }
4182
4183 // Fold trivial predicates.
4184 Type *RetTy = getCompareTy(LHS);
4185 if (Pred == FCmpInst::FCMP_FALSE)
4186 return getFalse(RetTy);
4187 if (Pred == FCmpInst::FCMP_TRUE)
4188 return getTrue(RetTy);
4189
4190 // fcmp pred x, poison and fcmp pred poison, x
4191 // fold to poison
4193 return PoisonValue::get(RetTy);
4194
4195 // fcmp pred x, undef and fcmp pred undef, x
4196 // fold to true if unordered, false if ordered
4197 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4198 // Choosing NaN for the undef will always make unordered comparison succeed
4199 // and ordered comparison fail.
4200 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4201 }
4202
4203 // fcmp x,x -> true/false. Not all compares are foldable.
4204 if (LHS == RHS) {
4205 if (CmpInst::isTrueWhenEqual(Pred))
4206 return getTrue(RetTy);
4207 if (CmpInst::isFalseWhenEqual(Pred))
4208 return getFalse(RetTy);
4209 }
4210
4211 // Fold (un)ordered comparison if we can determine there are no NaNs.
4212 //
4213 // This catches the 2 variable input case, constants are handled below as a
4214 // class-like compare.
4215 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4218
4219 if (FMF.noNaNs() ||
4220 (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
4221 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4222
4223 if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
4224 return ConstantInt::get(RetTy, Pred == CmpInst::FCMP_UNO);
4225 }
4226
4227 if (std::optional<bool> Res =
4228 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4229 return ConstantInt::getBool(RetTy, *Res);
4230
4231 const APFloat *C = nullptr;
4233 std::optional<KnownFPClass> FullKnownClassLHS;
4234
4235 // Lazily compute the possible classes for LHS. Avoid computing it twice if
4236 // RHS is a 0.
4237 auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4238 fcAllFlags) {
4239 if (FullKnownClassLHS)
4240 return *FullKnownClassLHS;
4241 return computeKnownFPClass(LHS, FMF, InterestedFlags, Q);
4242 };
4243
4244 if (C && Q.CxtI) {
4245 // Fold out compares that express a class test.
4246 //
4247 // FIXME: Should be able to perform folds without context
4248 // instruction. Always pass in the context function?
4249
4250 const Function *ParentF = Q.CxtI->getFunction();
4251 auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, *ParentF, LHS, C);
4252 if (ClassVal) {
4253 FullKnownClassLHS = computeLHSClass();
4254 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4255 return getFalse(RetTy);
4256 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4257 return getTrue(RetTy);
4258 }
4259 }
4260
4261 // Handle fcmp with constant RHS.
4262 if (C) {
4263 // TODO: If we always required a context function, we wouldn't need to
4264 // special case nans.
4265 if (C->isNaN())
4266 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4267
4268 // TODO: Need version fcmpToClassTest which returns implied class when the
4269 // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4270 // isn't implementable as a class call.
4271 if (C->isNegative() && !C->isNegZero()) {
4273
4274 // TODO: We can catch more cases by using a range check rather than
4275 // relying on CannotBeOrderedLessThanZero.
4276 switch (Pred) {
4277 case FCmpInst::FCMP_UGE:
4278 case FCmpInst::FCMP_UGT:
4279 case FCmpInst::FCMP_UNE: {
4280 KnownFPClass KnownClass = computeLHSClass(Interested);
4281
4282 // (X >= 0) implies (X > C) when (C < 0)
4283 if (KnownClass.cannotBeOrderedLessThanZero())
4284 return getTrue(RetTy);
4285 break;
4286 }
4287 case FCmpInst::FCMP_OEQ:
4288 case FCmpInst::FCMP_OLE:
4289 case FCmpInst::FCMP_OLT: {
4290 KnownFPClass KnownClass = computeLHSClass(Interested);
4291
4292 // (X >= 0) implies !(X < C) when (C < 0)
4293 if (KnownClass.cannotBeOrderedLessThanZero())
4294 return getFalse(RetTy);
4295 break;
4296 }
4297 default:
4298 break;
4299 }
4300 }
4301 // Check FCmp of [min/maxnum or min/maximumnum with const] with other const.
4302 const APFloat *C2;
4303 bool IsMax = match(LHS, m_FMaxNum_or_FMaximumNum(m_Value(), m_APFloat(C2)));
4304 bool IsMin = match(LHS, m_FMinNum_or_FMinimumNum(m_Value(), m_APFloat(C2)));
4305 if ((IsMax && *C2 > *C) || (IsMin && *C2 < *C)) {
4306 // The ordered relationship and min/maxnum or min/maximumnum guarantee
4307 // that we do not have NaN constants, so ordered/unordered preds are
4308 // handled the same.
4309 switch (Pred) {
4310 case FCmpInst::FCMP_OEQ:
4311 case FCmpInst::FCMP_UEQ:
4312 // minnum(X, LesserC) == C --> false
4313 // maxnum(X, GreaterC) == C --> false
4314 return getFalse(RetTy);
4315 case FCmpInst::FCMP_ONE:
4316 case FCmpInst::FCMP_UNE:
4317 // minnum(X, LesserC) != C --> true
4318 // maxnum(X, GreaterC) != C --> true
4319 return getTrue(RetTy);
4320 case FCmpInst::FCMP_OGE:
4321 case FCmpInst::FCMP_UGE:
4322 case FCmpInst::FCMP_OGT:
4323 case FCmpInst::FCMP_UGT:
4324 // minnum(X, LesserC) >= C --> false
4325 // minnum(X, LesserC) > C --> false
4326 // maxnum(X, GreaterC) >= C --> true
4327 // maxnum(X, GreaterC) > C --> true
4328 return ConstantInt::get(RetTy, IsMax);
4329 case FCmpInst::FCMP_OLE:
4330 case FCmpInst::FCMP_ULE:
4331 case FCmpInst::FCMP_OLT:
4332 case FCmpInst::FCMP_ULT:
4333 // minnum(X, LesserC) <= C --> true
4334 // minnum(X, LesserC) < C --> true
4335 // maxnum(X, GreaterC) <= C --> false
4336 // maxnum(X, GreaterC) < C --> false
4337 return ConstantInt::get(RetTy, !IsMax);
4338 default:
4339 // TRUE/FALSE/ORD/UNO should be handled before this.
4340 llvm_unreachable("Unexpected fcmp predicate");
4341 }
4342 }
4343 }
4344
4345 // TODO: Could fold this with above if there were a matcher which returned all
4346 // classes in a non-splat vector.
4347 if (match(RHS, m_AnyZeroFP())) {
4348 switch (Pred) {
4349 case FCmpInst::FCMP_OGE:
4350 case FCmpInst::FCMP_ULT: {
4352 if (!FMF.noNaNs())
4353 Interested |= fcNan;
4354
4355 KnownFPClass Known = computeLHSClass(Interested);
4356
4357 // Positive or zero X >= 0.0 --> true
4358 // Positive or zero X < 0.0 --> false
4359 if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4361 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4362 break;
4363 }
4364 case FCmpInst::FCMP_UGE:
4365 case FCmpInst::FCMP_OLT: {
4367 KnownFPClass Known = computeLHSClass(Interested);
4368
4369 // Positive or zero or nan X >= 0.0 --> true
4370 // Positive or zero or nan X < 0.0 --> false
4371 if (Known.cannotBeOrderedLessThanZero())
4372 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4373 break;
4374 }
4375 default:
4376 break;
4377 }
4378 }
4379
4380 // If the comparison is with the result of a select instruction, check whether
4381 // comparing with either branch of the select always yields the same value.
4383 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4384 return V;
4385
4386 // If the comparison is with the result of a phi instruction, check whether
4387 // doing the compare with each incoming phi value yields a common result.
4389 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4390 return V;
4391
4392 return nullptr;
4393}
4394
4396 FastMathFlags FMF, const SimplifyQuery &Q) {
4397 return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4398}
4399
4401 ArrayRef<std::pair<Value *, Value *>> Ops,
4402 const SimplifyQuery &Q,
4403 bool AllowRefinement,
4405 unsigned MaxRecurse) {
4406 assert((AllowRefinement || !Q.CanUseUndef) &&
4407 "If AllowRefinement=false then CanUseUndef=false");
4408 for (const auto &OpAndRepOp : Ops) {
4409 // We cannot replace a constant, and shouldn't even try.
4410 if (isa<Constant>(OpAndRepOp.first))
4411 return nullptr;
4412
4413 // Trivial replacement.
4414 if (V == OpAndRepOp.first)
4415 return OpAndRepOp.second;
4416 }
4417
4418 if (!MaxRecurse--)
4419 return nullptr;
4420
4421 auto *I = dyn_cast<Instruction>(V);
4422 if (!I)
4423 return nullptr;
4424
4425 // The arguments of a phi node might refer to a value from a previous
4426 // cycle iteration.
4427 if (isa<PHINode>(I))
4428 return nullptr;
4429
4430 // Don't fold away llvm.is.constant checks based on assumptions.
4432 return nullptr;
4433
4434 // Don't simplify freeze.
4435 if (isa<FreezeInst>(I))
4436 return nullptr;
4437
4438 for (const auto &OpAndRepOp : Ops) {
4439 // For vector types, the simplification must hold per-lane, so forbid
4440 // potentially cross-lane operations like shufflevector.
4441 if (OpAndRepOp.first->getType()->isVectorTy() &&
4443 return nullptr;
4444 }
4445
4446 // Replace Op with RepOp in instruction operands.
4448 bool AnyReplaced = false;
4449 for (Value *InstOp : I->operands()) {
4450 if (Value *NewInstOp = simplifyWithOpsReplaced(
4451 InstOp, Ops, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4452 NewOps.push_back(NewInstOp);
4453 AnyReplaced = InstOp != NewInstOp;
4454 } else {
4455 NewOps.push_back(InstOp);
4456 }
4457
4458 // Bail out if any operand is undef and SimplifyQuery disables undef
4459 // simplification. Constant folding currently doesn't respect this option.
4460 if (isa<UndefValue>(NewOps.back()) && !Q.CanUseUndef)
4461 return nullptr;
4462 }
4463
4464 if (!AnyReplaced)
4465 return nullptr;
4466
4467 if (!AllowRefinement) {
4468 // General InstSimplify functions may refine the result, e.g. by returning
4469 // a constant for a potentially poison value. To avoid this, implement only
4470 // a few non-refining but profitable transforms here.
4471
4472 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4473 unsigned Opcode = BO->getOpcode();
4474 // id op x -> x, x op id -> x
4475 // Exclude floats, because x op id may produce a different NaN value.
4476 if (!BO->getType()->isFPOrFPVectorTy()) {
4477 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4478 return NewOps[1];
4479 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4480 /* RHS */ true))
4481 return NewOps[0];
4482 }
4483
4484 // x & x -> x, x | x -> x
4485 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4486 NewOps[0] == NewOps[1]) {
4487 // or disjoint x, x results in poison.
4488 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4489 if (PDI->isDisjoint()) {
4490 if (!DropFlags)
4491 return nullptr;
4492 DropFlags->push_back(BO);
4493 }
4494 }
4495 return NewOps[0];
4496 }
4497
4498 // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4499 // by assumption and this case never wraps, so nowrap flags can be
4500 // ignored.
4501 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4502 NewOps[0] == NewOps[1] &&
4503 any_of(Ops, [=](const auto &Rep) { return NewOps[0] == Rep.second; }))
4504 return Constant::getNullValue(I->getType());
4505
4506 // If we are substituting an absorber constant into a binop and extra
4507 // poison can't leak if we remove the select -- because both operands of
4508 // the binop are based on the same value -- then it may be safe to replace
4509 // the value with the absorber constant. Examples:
4510 // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4511 // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4512 // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4513 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4514 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4515 any_of(Ops,
4516 [=](const auto &Rep) { return impliesPoison(BO, Rep.first); }))
4517 return Absorber;
4518 }
4519
4520 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
4521 // `x == y ? 0 : ucmp(x, y)` where under the replacement y -> x,
4522 // `ucmp(x, x)` becomes `0`.
4523 if ((II->getIntrinsicID() == Intrinsic::scmp ||
4524 II->getIntrinsicID() == Intrinsic::ucmp) &&
4525 NewOps[0] == NewOps[1]) {
4526 if (II->hasPoisonGeneratingAnnotations()) {
4527 if (!DropFlags)
4528 return nullptr;
4529
4530 DropFlags->push_back(II);
4531 }
4532
4533 return ConstantInt::get(I->getType(), 0);
4534 }
4535 }
4536
4538 // getelementptr x, 0 -> x.
4539 // This never returns poison, even if inbounds is set.
4540 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4541 return NewOps[0];
4542 }
4543 } else {
4544 // The simplification queries below may return the original value. Consider:
4545 // %div = udiv i32 %arg, %arg2
4546 // %mul = mul nsw i32 %div, %arg2
4547 // %cmp = icmp eq i32 %mul, %arg
4548 // %sel = select i1 %cmp, i32 %div, i32 undef
4549 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4550 // simplifies back to %arg. This can only happen because %mul does not
4551 // dominate %div. To ensure a consistent return value contract, we make sure
4552 // that this case returns nullptr as well.
4553 auto PreventSelfSimplify = [V](Value *Simplified) {
4554 return Simplified != V ? Simplified : nullptr;
4555 };
4556
4557 return PreventSelfSimplify(
4558 ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4559 }
4560
4561 // If all operands are constant after substituting Op for RepOp then we can
4562 // constant fold the instruction.
4564 for (Value *NewOp : NewOps) {
4565 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4566 ConstOps.push_back(ConstOp);
4567 else
4568 return nullptr;
4569 }
4570
4571 // Consider:
4572 // %cmp = icmp eq i32 %x, 2147483647
4573 // %add = add nsw i32 %x, 1
4574 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4575 //
4576 // We can't replace %sel with %add unless we strip away the flags (which
4577 // will be done in InstCombine).
4578 // TODO: This may be unsound, because it only catches some forms of
4579 // refinement.
4580 if (!AllowRefinement) {
4581 auto *II = dyn_cast<IntrinsicInst>(I);
4582 if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4583 // abs cannot create poison if the value is known to never be int_min.
4584 if (II && II->getIntrinsicID() == Intrinsic::abs) {
4585 if (!ConstOps[0]->isNotMinSignedValue())
4586 return nullptr;
4587 } else
4588 return nullptr;
4589 }
4590
4591 if (DropFlags && II) {
4592 // If we're going to change the poison flag of abs/ctz to false, also
4593 // perform constant folding that way, so we get an integer instead of a
4594 // poison value here.
4595 switch (II->getIntrinsicID()) {
4596 case Intrinsic::abs:
4597 case Intrinsic::ctlz:
4598 case Intrinsic::cttz:
4599 ConstOps[1] = ConstantInt::getFalse(I->getContext());
4600 break;
4601 default:
4602 break;
4603 }
4604 }
4605
4606 Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4607 /*AllowNonDeterministic=*/false);
4608 if (DropFlags && Res && I->hasPoisonGeneratingAnnotations())
4609 DropFlags->push_back(I);
4610 return Res;
4611 }
4612
4613 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4614 /*AllowNonDeterministic=*/false);
4615}
4616
4618 const SimplifyQuery &Q,
4619 bool AllowRefinement,
4621 unsigned MaxRecurse) {
4622 return simplifyWithOpsReplaced(V, {{Op, RepOp}}, Q, AllowRefinement,
4623 DropFlags, MaxRecurse);
4624}
4625
4627 const SimplifyQuery &Q,
4628 bool AllowRefinement,
4629 SmallVectorImpl<Instruction *> *DropFlags) {
4630 // If refinement is disabled, also disable undef simplifications (which are
4631 // always refinements) in SimplifyQuery.
4632 if (!AllowRefinement)
4633 return ::simplifyWithOpReplaced(V, Op, RepOp, Q.getWithoutUndef(),
4634 AllowRefinement, DropFlags, RecursionLimit);
4635 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4637}
4638
4639/// Try to simplify a select instruction when its condition operand is an
4640/// integer comparison where one operand of the compare is a constant.
4641static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4642 const APInt *Y, bool TrueWhenUnset) {
4643 const APInt *C;
4644
4645 // (X & Y) == 0 ? X & ~Y : X --> X
4646 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4647 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4648 *Y == ~*C)
4649 return TrueWhenUnset ? FalseVal : TrueVal;
4650
4651 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4652 // (X & Y) != 0 ? X : X & ~Y --> X
4653 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4654 *Y == ~*C)
4655 return TrueWhenUnset ? FalseVal : TrueVal;
4656
4657 if (Y->isPowerOf2()) {
4658 // (X & Y) == 0 ? X | Y : X --> X | Y
4659 // (X & Y) != 0 ? X | Y : X --> X
4660 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4661 *Y == *C) {
4662 // We can't return the or if it has the disjoint flag.
4663 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4664 return nullptr;
4665 return TrueWhenUnset ? TrueVal : FalseVal;
4666 }
4667
4668 // (X & Y) == 0 ? X : X | Y --> X
4669 // (X & Y) != 0 ? X : X | Y --> X | Y
4670 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4671 *Y == *C) {
4672 // We can't return the or if it has the disjoint flag.
4673 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4674 return nullptr;
4675 return TrueWhenUnset ? TrueVal : FalseVal;
4676 }
4677 }
4678
4679 return nullptr;
4680}
4681
4682static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4683 CmpPredicate Pred, Value *TVal,
4684 Value *FVal) {
4685 // Canonicalize common cmp+sel operand as CmpLHS.
4686 if (CmpRHS == TVal || CmpRHS == FVal) {
4687 std::swap(CmpLHS, CmpRHS);
4688 Pred = ICmpInst::getSwappedPredicate(Pred);
4689 }
4690
4691 // Canonicalize common cmp+sel operand as TVal.
4692 if (CmpLHS == FVal) {
4693 std::swap(TVal, FVal);
4694 Pred = ICmpInst::getInversePredicate(Pred);
4695 }
4696
4697 // A vector select may be shuffling together elements that are equivalent
4698 // based on the max/min/select relationship.
4699 Value *X = CmpLHS, *Y = CmpRHS;
4700 bool PeekedThroughSelectShuffle = false;
4701 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4702 if (Shuf && Shuf->isSelect()) {
4703 if (Shuf->getOperand(0) == Y)
4704 FVal = Shuf->getOperand(1);
4705 else if (Shuf->getOperand(1) == Y)
4706 FVal = Shuf->getOperand(0);
4707 else
4708 return nullptr;
4709 PeekedThroughSelectShuffle = true;
4710 }
4711
4712 // (X pred Y) ? X : max/min(X, Y)
4713 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4714 if (!MMI || TVal != X ||
4716 return nullptr;
4717
4718 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4719 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4720 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4721 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4722 //
4723 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4724 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4725 // If Z is true, this reduces as above, and if Z is false:
4726 // (X > Y) ? X : Y --> max(X, Y)
4727 ICmpInst::Predicate MMPred = MMI->getPredicate();
4728 if (MMPred == CmpInst::getStrictPredicate(Pred))
4729 return MMI;
4730
4731 // Other transforms are not valid with a shuffle.
4732 if (PeekedThroughSelectShuffle)
4733 return nullptr;
4734
4735 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4736 if (Pred == CmpInst::ICMP_EQ)
4737 return MMI;
4738
4739 // (X != Y) ? X : max/min(X, Y) --> X
4740 if (Pred == CmpInst::ICMP_NE)
4741 return X;
4742
4743 // (X < Y) ? X : max(X, Y) --> X
4744 // (X <= Y) ? X : max(X, Y) --> X
4745 // (X > Y) ? X : min(X, Y) --> X
4746 // (X >= Y) ? X : min(X, Y) --> X
4748 if (MMPred == CmpInst::getStrictPredicate(InvPred))
4749 return X;
4750
4751 return nullptr;
4752}
4753
4754/// An alternative way to test if a bit is set or not.
4755/// uses e.g. sgt/slt or trunc instead of eq/ne.
4756static Value *simplifySelectWithBitTest(Value *CondVal, Value *TrueVal,
4757 Value *FalseVal) {
4758 if (auto Res = decomposeBitTest(CondVal))
4759 return simplifySelectBitTest(TrueVal, FalseVal, Res->X, &Res->Mask,
4760 Res->Pred == ICmpInst::ICMP_EQ);
4761
4762 return nullptr;
4763}
4764
4765/// Try to simplify a select instruction when its condition operand is an
4766/// integer equality or floating-point equivalence comparison.
4768 ArrayRef<std::pair<Value *, Value *>> Replacements, Value *TrueVal,
4769 Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse) {
4770 Value *SimplifiedFalseVal =
4771 simplifyWithOpsReplaced(FalseVal, Replacements, Q.getWithoutUndef(),
4772 /* AllowRefinement */ false,
4773 /* DropFlags */ nullptr, MaxRecurse);
4774 if (!SimplifiedFalseVal)
4775 SimplifiedFalseVal = FalseVal;
4776
4777 Value *SimplifiedTrueVal =
4778 simplifyWithOpsReplaced(TrueVal, Replacements, Q,
4779 /* AllowRefinement */ true,
4780 /* DropFlags */ nullptr, MaxRecurse);
4781 if (!SimplifiedTrueVal)
4782 SimplifiedTrueVal = TrueVal;
4783
4784 if (SimplifiedFalseVal == SimplifiedTrueVal)
4785 return FalseVal;
4786
4787 return nullptr;
4788}
4789
4790/// Try to simplify a select instruction when its condition operand is an
4791/// integer comparison.
4792static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4793 Value *FalseVal,
4794 const SimplifyQuery &Q,
4795 unsigned MaxRecurse) {
4796 CmpPredicate Pred;
4797 Value *CmpLHS, *CmpRHS;
4798 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4799 return nullptr;
4800
4801 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4802 return V;
4803
4804 // Canonicalize ne to eq predicate.
4805 if (Pred == ICmpInst::ICMP_NE) {
4806 Pred = ICmpInst::ICMP_EQ;
4807 std::swap(TrueVal, FalseVal);
4808 }
4809
4810 // Check for integer min/max with a limit constant:
4811 // X > MIN_INT ? X : MIN_INT --> X
4812 // X < MAX_INT ? X : MAX_INT --> X
4813 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4814 Value *X, *Y;
4816 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4817 X, Y)
4818 .Flavor;
4819 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4821 X->getType()->getScalarSizeInBits());
4822 if (match(Y, m_SpecificInt(LimitC)))
4823 return X;
4824 }
4825 }
4826
4827 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4828 Value *X;
4829 const APInt *Y;
4830 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4831 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4832 /*TrueWhenUnset=*/true))
4833 return V;
4834
4835 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4836 Value *ShAmt;
4837 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4838 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4839 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4840 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4841 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4842 return X;
4843
4844 // Test for a zero-shift-guard-op around rotates. These are used to
4845 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4846 // intrinsics do not have that problem.
4847 // We do not allow this transform for the general funnel shift case because
4848 // that would not preserve the poison safety of the original code.
4849 auto isRotate =
4851 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4852 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4853 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4854 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4855 Pred == ICmpInst::ICMP_EQ)
4856 return FalseVal;
4857
4858 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4859 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4860 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4862 return FalseVal;
4863 if (match(TrueVal,
4865 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4866 return FalseVal;
4867 }
4868
4869 // If we have a scalar equality comparison, then we know the value in one of
4870 // the arms of the select. See if substituting this value into the arm and
4871 // simplifying the result yields the same value as the other arm.
4872 if (Pred == ICmpInst::ICMP_EQ) {
4873 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4874 canReplacePointersIfEqual(CmpLHS, CmpRHS, Q.DL))
4875 if (Value *V = simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, TrueVal,
4876 FalseVal, Q, MaxRecurse))
4877 return V;
4878 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4879 canReplacePointersIfEqual(CmpRHS, CmpLHS, Q.DL))
4880 if (Value *V = simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, TrueVal,
4881 FalseVal, Q, MaxRecurse))
4882 return V;
4883
4884 Value *X;
4885 Value *Y;
4886 // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4887 if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4888 match(CmpRHS, m_Zero())) {
4889 // (X | Y) == 0 implies X == 0 and Y == 0.
4891 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4892 return V;
4893 }
4894
4895 // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4896 if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4897 match(CmpRHS, m_AllOnes())) {
4898 // (X & Y) == -1 implies X == -1 and Y == -1.
4900 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4901 return V;
4902 }
4903 }
4904
4905 return nullptr;
4906}
4907
4908/// Try to simplify a select instruction when its condition operand is a
4909/// floating-point comparison.
4911 const SimplifyQuery &Q,
4912 unsigned MaxRecurse) {
4913 CmpPredicate Pred;
4914 Value *CmpLHS, *CmpRHS;
4915 if (!match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4916 return nullptr;
4918
4919 bool IsEquiv = I->isEquivalence();
4920 if (I->isEquivalence(/*Invert=*/true)) {
4921 std::swap(T, F);
4922 Pred = FCmpInst::getInversePredicate(Pred);
4923 IsEquiv = true;
4924 }
4925
4926 // This transforms is safe if at least one operand is known to not be zero.
4927 // Otherwise, the select can change the sign of a zero operand.
4928 if (IsEquiv) {
4929 if (Value *V = simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, T, F, Q,
4930 MaxRecurse))
4931 return V;
4932 if (Value *V = simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, T, F, Q,
4933 MaxRecurse))
4934 return V;
4935 }
4936
4937 // Canonicalize CmpLHS to be T, and CmpRHS to be F, if they're swapped.
4938 if (CmpLHS == F && CmpRHS == T)
4939 std::swap(CmpLHS, CmpRHS);
4940
4941 if (CmpLHS != T || CmpRHS != F)
4942 return nullptr;
4943
4944 // This transform is also safe if we do not have (do not care about) -0.0.
4945 if (Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros()) {
4946 // (T == F) ? T : F --> F
4947 if (Pred == FCmpInst::FCMP_OEQ)
4948 return F;
4949
4950 // (T != F) ? T : F --> T
4951 if (Pred == FCmpInst::FCMP_UNE)
4952 return T;
4953 }
4954
4955 return nullptr;
4956}
4957
4958/// Look for the following pattern and simplify %to_fold to %identicalPhi.
4959/// Here %phi, %to_fold and %phi.next perform the same functionality as
4960/// %identicalPhi and hence the select instruction %to_fold can be folded
4961/// into %identicalPhi.
4962///
4963/// BB1:
4964/// %identicalPhi = phi [ X, %BB0 ], [ %identicalPhi.next, %BB1 ]
4965/// %phi = phi [ X, %BB0 ], [ %phi.next, %BB1 ]
4966/// ...
4967/// %identicalPhi.next = select %cmp, %val, %identicalPhi
4968/// (or select %cmp, %identicalPhi, %val)
4969/// %to_fold = select %cmp2, %identicalPhi, %phi
4970/// %phi.next = select %cmp, %val, %to_fold
4971/// (or select %cmp, %to_fold, %val)
4972///
4973/// Prove that %phi and %identicalPhi are the same by induction:
4974///
4975/// Base case: Both %phi and %identicalPhi are equal on entry to the loop.
4976/// Inductive case:
4977/// Suppose %phi and %identicalPhi are equal at iteration i.
4978/// We look at their values at iteration i+1 which are %phi.next and
4979/// %identicalPhi.next. They would have become different only when %cmp is
4980/// false and the corresponding values %to_fold and %identicalPhi differ
4981/// (similar reason for the other "or" case in the bracket).
4982///
4983/// The only condition when %to_fold and %identicalPh could differ is when %cmp2
4984/// is false and %to_fold is %phi, which contradicts our inductive hypothesis
4985/// that %phi and %identicalPhi are equal. Thus %phi and %identicalPhi are
4986/// always equal at iteration i+1.
4988 if (PN.getParent() != IdenticalPN.getParent())
4989 return false;
4990 if (PN.getNumIncomingValues() != 2)
4991 return false;
4992
4993 // Check that only the backedge incoming value is different.
4994 unsigned DiffVals = 0;
4995 BasicBlock *DiffValBB = nullptr;
4996 for (unsigned i = 0; i < 2; i++) {
4997 BasicBlock *PredBB = PN.getIncomingBlock(i);
4998 if (PN.getIncomingValue(i) !=
4999 IdenticalPN.getIncomingValueForBlock(PredBB)) {
5000 DiffVals++;
5001 DiffValBB = PredBB;
5002 }
5003 }
5004 if (DiffVals != 1)
5005 return false;
5006 // Now check that the backedge incoming values are two select
5007 // instructions with the same condition. Either their true
5008 // values are the same, or their false values are the same.
5009 auto *SI = dyn_cast<SelectInst>(PN.getIncomingValueForBlock(DiffValBB));
5010 auto *IdenticalSI =
5011 dyn_cast<SelectInst>(IdenticalPN.getIncomingValueForBlock(DiffValBB));
5012 if (!SI || !IdenticalSI)
5013 return false;
5014 if (SI->getCondition() != IdenticalSI->getCondition())
5015 return false;
5016
5017 SelectInst *SIOtherVal = nullptr;
5018 Value *IdenticalSIOtherVal = nullptr;
5019 if (SI->getTrueValue() == IdenticalSI->getTrueValue()) {
5020 SIOtherVal = dyn_cast<SelectInst>(SI->getFalseValue());
5021 IdenticalSIOtherVal = IdenticalSI->getFalseValue();
5022 } else if (SI->getFalseValue() == IdenticalSI->getFalseValue()) {
5023 SIOtherVal = dyn_cast<SelectInst>(SI->getTrueValue());
5024 IdenticalSIOtherVal = IdenticalSI->getTrueValue();
5025 } else {
5026 return false;
5027 }
5028
5029 // Now check that the other values in select, i.e., %to_fold and
5030 // %identicalPhi, are essentially the same value.
5031 if (!SIOtherVal || IdenticalSIOtherVal != &IdenticalPN)
5032 return false;
5033 if (!(SIOtherVal->getTrueValue() == &IdenticalPN &&
5034 SIOtherVal->getFalseValue() == &PN) &&
5035 !(SIOtherVal->getTrueValue() == &PN &&
5036 SIOtherVal->getFalseValue() == &IdenticalPN))
5037 return false;
5038 return true;
5039}
5040
5041/// Given operands for a SelectInst, see if we can fold the result.
5042/// If not, this returns null.
5043static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
5044 const SimplifyQuery &Q, unsigned MaxRecurse) {
5045 if (auto *CondC = dyn_cast<Constant>(Cond)) {
5046 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
5047 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
5048 if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
5049 return C;
5050
5051 // select poison, X, Y -> poison
5052 if (isa<PoisonValue>(CondC))
5053 return PoisonValue::get(TrueVal->getType());
5054
5055 // select undef, X, Y -> X or Y
5056 if (Q.isUndefValue(CondC))
5057 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
5058
5059 // select true, X, Y --> X
5060 // select false, X, Y --> Y
5061 // For vectors, allow undef/poison elements in the condition to match the
5062 // defined elements, so we can eliminate the select.
5063 if (match(CondC, m_One()))
5064 return TrueVal;
5065 if (match(CondC, m_Zero()))
5066 return FalseVal;
5067 }
5068
5069 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
5070 "Select must have bool or bool vector condition");
5071 assert(TrueVal->getType() == FalseVal->getType() &&
5072 "Select must have same types for true/false ops");
5073
5074 if (Cond->getType() == TrueVal->getType()) {
5075 // select i1 Cond, i1 true, i1 false --> i1 Cond
5076 if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
5077 return Cond;
5078
5079 // (X && Y) ? X : Y --> Y (commuted 2 ways)
5080 if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
5081 return FalseVal;
5082
5083 // (X || Y) ? X : Y --> X (commuted 2 ways)
5084 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
5085 return TrueVal;
5086
5087 // (X || Y) ? false : X --> false (commuted 2 ways)
5088 if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
5089 match(TrueVal, m_ZeroInt()))
5090 return ConstantInt::getFalse(Cond->getType());
5091
5092 // Match patterns that end in logical-and.
5093 if (match(FalseVal, m_ZeroInt())) {
5094 // !(X || Y) && X --> false (commuted 2 ways)
5095 if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
5096 return ConstantInt::getFalse(Cond->getType());
5097 // X && !(X || Y) --> false (commuted 2 ways)
5098 if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
5099 return ConstantInt::getFalse(Cond->getType());
5100
5101 // (X || Y) && Y --> Y (commuted 2 ways)
5102 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
5103 return TrueVal;
5104 // Y && (X || Y) --> Y (commuted 2 ways)
5105 if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
5106 return Cond;
5107
5108 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
5109 Value *X, *Y;
5112 return X;
5113 if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
5115 return X;
5116 }
5117
5118 // Match patterns that end in logical-or.
5119 if (match(TrueVal, m_One())) {
5120 // !(X && Y) || X --> true (commuted 2 ways)
5121 if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
5122 return ConstantInt::getTrue(Cond->getType());
5123 // X || !(X && Y) --> true (commuted 2 ways)
5124 if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
5125 return ConstantInt::getTrue(Cond->getType());
5126
5127 // (X && Y) || Y --> Y (commuted 2 ways)
5128 if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
5129 return FalseVal;
5130 // Y || (X && Y) --> Y (commuted 2 ways)
5131 if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
5132 return Cond;
5133 }
5134 }
5135
5136 // select ?, X, X -> X
5137 if (TrueVal == FalseVal)
5138 return TrueVal;
5139
5140 if (Cond == TrueVal) {
5141 // select i1 X, i1 X, i1 false --> X (logical-and)
5142 if (match(FalseVal, m_ZeroInt()))
5143 return Cond;
5144 // select i1 X, i1 X, i1 true --> true
5145 if (match(FalseVal, m_One()))
5146 return ConstantInt::getTrue(Cond->getType());
5147 }
5148 if (Cond == FalseVal) {
5149 // select i1 X, i1 true, i1 X --> X (logical-or)
5150 if (match(TrueVal, m_One()))
5151 return Cond;
5152 // select i1 X, i1 false, i1 X --> false
5153 if (match(TrueVal, m_ZeroInt()))
5154 return ConstantInt::getFalse(Cond->getType());
5155 }
5156
5157 // If the true or false value is poison, we can fold to the other value.
5158 // If the true or false value is undef, we can fold to the other value as
5159 // long as the other value isn't poison.
5160 // select ?, poison, X -> X
5161 // select ?, undef, X -> X
5162 if (isa<PoisonValue>(TrueVal) ||
5163 (Q.isUndefValue(TrueVal) && impliesPoison(FalseVal, Cond)))
5164 return FalseVal;
5165 // select ?, X, poison -> X
5166 // select ?, X, undef -> X
5167 if (isa<PoisonValue>(FalseVal) ||
5168 (Q.isUndefValue(FalseVal) && impliesPoison(TrueVal, Cond)))
5169 return TrueVal;
5170
5171 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
5172 Constant *TrueC, *FalseC;
5173 if (isa<FixedVectorType>(TrueVal->getType()) &&
5174 match(TrueVal, m_Constant(TrueC)) &&
5175 match(FalseVal, m_Constant(FalseC))) {
5176 unsigned NumElts =
5177 cast<FixedVectorType>(TrueC->getType())->getNumElements();
5179 for (unsigned i = 0; i != NumElts; ++i) {
5180 // Bail out on incomplete vector constants.
5181 Constant *TEltC = TrueC->getAggregateElement(i);
5182 Constant *FEltC = FalseC->getAggregateElement(i);
5183 if (!TEltC || !FEltC)
5184 break;
5185
5186 // If the elements match (undef or not), that value is the result. If only
5187 // one element is undef, choose the defined element as the safe result.
5188 if (TEltC == FEltC)
5189 NewC.push_back(TEltC);
5190 else if (isa<PoisonValue>(TEltC) ||
5191 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
5192 NewC.push_back(FEltC);
5193 else if (isa<PoisonValue>(FEltC) ||
5194 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
5195 NewC.push_back(TEltC);
5196 else
5197 break;
5198 }
5199 if (NewC.size() == NumElts)
5200 return ConstantVector::get(NewC);
5201 }
5202
5203 if (Value *V =
5204 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
5205 return V;
5206
5207 if (Value *V = simplifySelectWithBitTest(Cond, TrueVal, FalseVal))
5208 return V;
5209
5210 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q, MaxRecurse))
5211 return V;
5212
5213 std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
5214 if (Imp)
5215 return *Imp ? TrueVal : FalseVal;
5216 // Look for same PHIs in the true and false values.
5217 if (auto *TruePHI = dyn_cast<PHINode>(TrueVal))
5218 if (auto *FalsePHI = dyn_cast<PHINode>(FalseVal)) {
5219 if (isSelectWithIdenticalPHI(*TruePHI, *FalsePHI))
5220 return FalseVal;
5221 if (isSelectWithIdenticalPHI(*FalsePHI, *TruePHI))
5222 return TrueVal;
5223 }
5224 return nullptr;
5225}
5226
5228 const SimplifyQuery &Q) {
5229 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
5230}
5231
5232/// Given operands for an GetElementPtrInst, see if we can fold the result.
5233/// If not, this returns null.
5234static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
5236 const SimplifyQuery &Q, unsigned) {
5237 // The type of the GEP pointer operand.
5238 unsigned AS =
5239 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
5240
5241 // getelementptr P -> P.
5242 if (Indices.empty())
5243 return Ptr;
5244
5245 // Compute the (pointer) type returned by the GEP instruction.
5246 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
5247 Type *GEPTy = Ptr->getType();
5248 if (!GEPTy->isVectorTy()) {
5249 for (Value *Op : Indices) {
5250 // If one of the operands is a vector, the result type is a vector of
5251 // pointers. All vector operands must have the same number of elements.
5252 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
5253 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5254 break;
5255 }
5256 }
5257 }
5258
5259 // All-zero GEP is a no-op, unless it performs a vector splat.
5260 if (Ptr->getType() == GEPTy && all_of(Indices, match_fn(m_Zero())))
5261 return Ptr;
5262
5263 // getelementptr poison, idx -> poison
5264 // getelementptr baseptr, poison -> poison
5265 if (isa<PoisonValue>(Ptr) || any_of(Indices, IsaPred<PoisonValue>))
5266 return PoisonValue::get(GEPTy);
5267
5268 // getelementptr undef, idx -> undef
5269 if (Q.isUndefValue(Ptr))
5270 return UndefValue::get(GEPTy);
5271
5272 bool IsScalableVec =
5273 SrcTy->isScalableTy() || any_of(Indices, [](const Value *V) {
5274 return isa<ScalableVectorType>(V->getType());
5275 });
5276
5277 if (Indices.size() == 1) {
5278 Type *Ty = SrcTy;
5279 if (!IsScalableVec && Ty->isSized()) {
5280 Value *P;
5281 uint64_t C;
5282 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5283 // getelementptr P, N -> P if P points to a type of zero size.
5284 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5285 return Ptr;
5286
5287 // The following transforms are only safe if the ptrtoint cast
5288 // doesn't truncate the address of the pointers. The non-address bits
5289 // must be the same, as the underlying objects are the same.
5290 if (Indices[0]->getType()->getScalarSizeInBits() >=
5291 Q.DL.getAddressSizeInBits(AS)) {
5292 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5293 return P->getType() == GEPTy &&
5295 };
5296 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5297 if (TyAllocSize == 1 &&
5298 match(Indices[0], m_Sub(m_PtrToIntOrAddr(m_Value(P)),
5299 m_PtrToIntOrAddr(m_Specific(Ptr)))) &&
5300 CanSimplify())
5301 return P;
5302
5303 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5304 // size 1 << C.
5305 if (match(Indices[0], m_AShr(m_Sub(m_PtrToIntOrAddr(m_Value(P)),
5307 m_ConstantInt(C))) &&
5308 TyAllocSize == 1ULL << C && CanSimplify())
5309 return P;
5310
5311 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5312 // size C.
5313 if (match(Indices[0], m_SDiv(m_Sub(m_PtrToIntOrAddr(m_Value(P)),
5315 m_SpecificInt(TyAllocSize))) &&
5316 CanSimplify())
5317 return P;
5318 }
5319 }
5320 }
5321
5322 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5323 all_of(Indices.drop_back(1), match_fn(m_Zero()))) {
5324 unsigned IdxWidth =
5326 if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5327 APInt BasePtrOffset(IdxWidth, 0);
5328 Value *StrippedBasePtr =
5329 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5330
5331 // Avoid creating inttoptr of zero here: While LLVMs treatment of
5332 // inttoptr is generally conservative, this particular case is folded to
5333 // a null pointer, which will have incorrect provenance.
5334
5335 // gep (gep V, C), (sub 0, V) -> C
5336 if (match(Indices.back(),
5337 m_Neg(m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5338 !BasePtrOffset.isZero()) {
5339 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5340 return ConstantExpr::getIntToPtr(CI, GEPTy);
5341 }
5342 // gep (gep V, C), (xor V, -1) -> C-1
5343 if (match(Indices.back(),
5344 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
5345 !BasePtrOffset.isOne()) {
5346 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5347 return ConstantExpr::getIntToPtr(CI, GEPTy);
5348 }
5349 }
5350 }
5351
5352 // Check to see if this is constant foldable.
5353 if (!isa<Constant>(Ptr) || !all_of(Indices, IsaPred<Constant>))
5354 return nullptr;
5355
5357 return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), std::nullopt,
5358 Indices);
5359
5360 auto *CE =
5361 ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices, NW);
5362 return ConstantFoldConstant(CE, Q.DL);
5363}
5364
5366 GEPNoWrapFlags NW, const SimplifyQuery &Q) {
5367 return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit);
5368}
5369
5370/// Given operands for an InsertValueInst, see if we can fold the result.
5371/// If not, this returns null.
5373 ArrayRef<unsigned> Idxs,
5374 const SimplifyQuery &Q, unsigned) {
5375 if (Constant *CAgg = dyn_cast<Constant>(Agg))
5376 if (Constant *CVal = dyn_cast<Constant>(Val))
5377 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5378
5379 // insertvalue x, poison, n -> x
5380 // insertvalue x, undef, n -> x if x cannot be poison
5381 if (isa<PoisonValue>(Val) ||
5382 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5383 return Agg;
5384
5385 // insertvalue x, (extractvalue y, n), n
5387 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5388 EV->getIndices() == Idxs) {
5389 // insertvalue poison, (extractvalue y, n), n -> y
5390 // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5391 if (isa<PoisonValue>(Agg) ||
5392 (Q.isUndefValue(Agg) &&
5393 isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5394 return EV->getAggregateOperand();
5395
5396 // insertvalue y, (extractvalue y, n), n -> y
5397 if (Agg == EV->getAggregateOperand())
5398 return Agg;
5399 }
5400
5401 return nullptr;
5402}
5403
5405 ArrayRef<unsigned> Idxs,
5406 const SimplifyQuery &Q) {
5407 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5408}
5409
5411 const SimplifyQuery &Q) {
5412 // Try to constant fold.
5413 auto *VecC = dyn_cast<Constant>(Vec);
5414 auto *ValC = dyn_cast<Constant>(Val);
5415 auto *IdxC = dyn_cast<Constant>(Idx);
5416 if (VecC && ValC && IdxC)
5417 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5418
5419 // For fixed-length vector, fold into poison if index is out of bounds.
5420 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5421 if (isa<FixedVectorType>(Vec->getType()) &&
5422 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5423 return PoisonValue::get(Vec->getType());
5424 }
5425
5426 // If index is undef, it might be out of bounds (see above case)
5427 if (Q.isUndefValue(Idx))
5428 return PoisonValue::get(Vec->getType());
5429
5430 // If the scalar is poison, or it is undef and there is no risk of
5431 // propagating poison from the vector value, simplify to the vector value.
5432 if (isa<PoisonValue>(Val) ||
5433 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5434 return Vec;
5435
5436 // Inserting the splatted value into a constant splat does nothing.
5437 if (VecC && ValC && VecC->getSplatValue() == ValC)
5438 return Vec;
5439
5440 // If we are extracting a value from a vector, then inserting it into the same
5441 // place, that's the input vector:
5442 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5443 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5444 return Vec;
5445
5446 return nullptr;
5447}
5448
5449/// Given operands for an ExtractValueInst, see if we can fold the result.
5450/// If not, this returns null.
5452 const SimplifyQuery &, unsigned) {
5453 if (auto *CAgg = dyn_cast<Constant>(Agg))
5454 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5455
5456 // extractvalue x, (insertvalue y, elt, n), n -> elt
5457 unsigned NumIdxs = Idxs.size();
5459 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5460 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5461 // Protect against insertvalue cycles in unreachable code.
5462 if (!VisitedSet.insert(IVI).second)
5463 break;
5464
5465 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5466 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5467 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5468 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5469 Idxs.slice(0, NumCommonIdxs)) {
5470 if (NumIdxs == NumInsertValueIdxs)
5471 return IVI->getInsertedValueOperand();
5472 break;
5473 }
5474 }
5475
5476 // Simplify umul_with_overflow where one operand is 1.
5477 Value *V;
5478 if (Idxs.size() == 1 &&
5479 (match(Agg,
5482 m_Value(V))))) {
5483 if (Idxs[0] == 0)
5484 return V;
5485 assert(Idxs[0] == 1 && "invalid index");
5486 return getFalse(CmpInst::makeCmpResultType(V->getType()));
5487 }
5488
5489 return nullptr;
5490}
5491
5493 const SimplifyQuery &Q) {
5494 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5495}
5496
5497/// Given operands for an ExtractElementInst, see if we can fold the result.
5498/// If not, this returns null.
5500 const SimplifyQuery &Q, unsigned) {
5501 auto *VecVTy = cast<VectorType>(Vec->getType());
5502 if (auto *CVec = dyn_cast<Constant>(Vec)) {
5503 if (auto *CIdx = dyn_cast<Constant>(Idx))
5504 return ConstantExpr::getExtractElement(CVec, CIdx);
5505
5506 if (Q.isUndefValue(Vec))
5507 return UndefValue::get(VecVTy->getElementType());
5508 }
5509
5510 // An undef extract index can be arbitrarily chosen to be an out-of-range
5511 // index value, which would result in the instruction being poison.
5512 if (Q.isUndefValue(Idx))
5513 return PoisonValue::get(VecVTy->getElementType());
5514
5515 // If extracting a specified index from the vector, see if we can recursively
5516 // find a previously computed scalar that was inserted into the vector.
5517 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5518 // For fixed-length vector, fold into undef if index is out of bounds.
5519 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5520 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5521 return PoisonValue::get(VecVTy->getElementType());
5522 // Handle case where an element is extracted from a splat.
5523 if (IdxC->getValue().ult(MinNumElts))
5524 if (auto *Splat = getSplatValue(Vec))
5525 return Splat;
5526 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5527 return Elt;
5528 } else {
5529 // extractelt x, (insertelt y, elt, n), n -> elt
5530 // If the possibly-variable indices are trivially known to be equal
5531 // (because they are the same operand) then use the value that was
5532 // inserted directly.
5533 auto *IE = dyn_cast<InsertElementInst>(Vec);
5534 if (IE && IE->getOperand(2) == Idx)
5535 return IE->getOperand(1);
5536
5537 // The index is not relevant if our vector is a splat.
5538 if (Value *Splat = getSplatValue(Vec))
5539 return Splat;
5540 }
5541 return nullptr;
5542}
5543
5545 const SimplifyQuery &Q) {
5546 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5547}
5548
5549/// See if we can fold the given phi. If not, returns null.
5551 const SimplifyQuery &Q) {
5552 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5553 // here, because the PHI we may succeed simplifying to was not
5554 // def-reachable from the original PHI!
5555
5556 // If all of the PHI's incoming values are the same then replace the PHI node
5557 // with the common value.
5558 Value *CommonValue = nullptr;
5559 bool HasPoisonInput = false;
5560 bool HasUndefInput = false;
5561 for (Value *Incoming : IncomingValues) {
5562 // If the incoming value is the phi node itself, it can safely be skipped.
5563 if (Incoming == PN)
5564 continue;
5566 HasPoisonInput = true;
5567 continue;
5568 }
5569 if (Q.isUndefValue(Incoming)) {
5570 // Remember that we saw an undef value, but otherwise ignore them.
5571 HasUndefInput = true;
5572 continue;
5573 }
5574 if (CommonValue && Incoming != CommonValue)
5575 return nullptr; // Not the same, bail out.
5576 CommonValue = Incoming;
5577 }
5578
5579 // If CommonValue is null then all of the incoming values were either undef,
5580 // poison or equal to the phi node itself.
5581 if (!CommonValue)
5582 return HasUndefInput ? UndefValue::get(PN->getType())
5583 : PoisonValue::get(PN->getType());
5584
5585 if (HasPoisonInput || HasUndefInput) {
5586 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5587 // instruction, we cannot return X as the result of the PHI node unless it
5588 // dominates the PHI block.
5589 if (!valueDominatesPHI(CommonValue, PN, Q.DT))
5590 return nullptr;
5591
5592 // Make sure we do not replace an undef value with poison.
5593 if (HasUndefInput &&
5594 !isGuaranteedNotToBePoison(CommonValue, Q.AC, Q.CxtI, Q.DT))
5595 return nullptr;
5596 return CommonValue;
5597 }
5598
5599 return CommonValue;
5600}
5601
5602static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5603 const SimplifyQuery &Q, unsigned MaxRecurse) {
5604 if (auto *C = dyn_cast<Constant>(Op))
5605 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5606
5607 if (auto *CI = dyn_cast<CastInst>(Op)) {
5608 auto *Src = CI->getOperand(0);
5609 Type *SrcTy = Src->getType();
5610 Type *MidTy = CI->getType();
5611 Type *DstTy = Ty;
5612 if (Src->getType() == Ty) {
5613 auto FirstOp = CI->getOpcode();
5614 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5615 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5616 &Q.DL) == Instruction::BitCast)
5617 return Src;
5618 }
5619 }
5620
5621 // bitcast x -> x
5622 if (CastOpc == Instruction::BitCast)
5623 if (Op->getType() == Ty)
5624 return Op;
5625
5626 // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
5627 Value *Ptr, *X;
5628 if ((CastOpc == Instruction::PtrToInt || CastOpc == Instruction::PtrToAddr) &&
5629 match(Op,
5630 m_PtrAdd(m_Value(Ptr),
5632 X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
5633 return X;
5634
5635 return nullptr;
5636}
5637
5638Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5639 const SimplifyQuery &Q) {
5640 return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5641}
5642
5643/// For the given destination element of a shuffle, peek through shuffles to
5644/// match a root vector source operand that contains that element in the same
5645/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5646static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5647 int MaskVal, Value *RootVec,
5648 unsigned MaxRecurse) {
5649 if (!MaxRecurse--)
5650 return nullptr;
5651
5652 // Bail out if any mask value is undefined. That kind of shuffle may be
5653 // simplified further based on demanded bits or other folds.
5654 if (MaskVal == -1)
5655 return nullptr;
5656
5657 // The mask value chooses which source operand we need to look at next.
5658 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5659 int RootElt = MaskVal;
5660 Value *SourceOp = Op0;
5661 if (MaskVal >= InVecNumElts) {
5662 RootElt = MaskVal - InVecNumElts;
5663 SourceOp = Op1;
5664 }
5665
5666 // If the source operand is a shuffle itself, look through it to find the
5667 // matching root vector.
5668 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5669 return foldIdentityShuffles(
5670 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5671 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5672 }
5673
5674 // The source operand is not a shuffle. Initialize the root vector value for
5675 // this shuffle if that has not been done yet.
5676 if (!RootVec)
5677 RootVec = SourceOp;
5678
5679 // Give up as soon as a source operand does not match the existing root value.
5680 if (RootVec != SourceOp)
5681 return nullptr;
5682
5683 // The element must be coming from the same lane in the source vector
5684 // (although it may have crossed lanes in intermediate shuffles).
5685 if (RootElt != DestElt)
5686 return nullptr;
5687
5688 return RootVec;
5689}
5690
5692 ArrayRef<int> Mask, Type *RetTy,
5693 const SimplifyQuery &Q,
5694 unsigned MaxRecurse) {
5695 if (all_of(Mask, equal_to(PoisonMaskElem)))
5696 return PoisonValue::get(RetTy);
5697
5698 auto *InVecTy = cast<VectorType>(Op0->getType());
5699 unsigned MaskNumElts = Mask.size();
5700 ElementCount InVecEltCount = InVecTy->getElementCount();
5701
5702 bool Scalable = InVecEltCount.isScalable();
5703
5704 SmallVector<int, 32> Indices;
5705 Indices.assign(Mask.begin(), Mask.end());
5706
5707 // Canonicalization: If mask does not select elements from an input vector,
5708 // replace that input vector with poison.
5709 if (!Scalable) {
5710 bool MaskSelects0 = false, MaskSelects1 = false;
5711 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5712 for (unsigned i = 0; i != MaskNumElts; ++i) {
5713 if (Indices[i] == -1)
5714 continue;
5715 if ((unsigned)Indices[i] < InVecNumElts)
5716 MaskSelects0 = true;
5717 else
5718 MaskSelects1 = true;
5719 }
5720 if (!MaskSelects0)
5721 Op0 = PoisonValue::get(InVecTy);
5722 if (!MaskSelects1)
5723 Op1 = PoisonValue::get(InVecTy);
5724 }
5725
5726 auto *Op0Const = dyn_cast<Constant>(Op0);
5727 auto *Op1Const = dyn_cast<Constant>(Op1);
5728
5729 // If all operands are constant, constant fold the shuffle. This
5730 // transformation depends on the value of the mask which is not known at
5731 // compile time for scalable vectors
5732 if (Op0Const && Op1Const)
5733 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5734
5735 // Canonicalization: if only one input vector is constant, it shall be the
5736 // second one. This transformation depends on the value of the mask which
5737 // is not known at compile time for scalable vectors
5738 if (!Scalable && Op0Const && !Op1Const) {
5739 std::swap(Op0, Op1);
5741 InVecEltCount.getKnownMinValue());
5742 }
5743
5744 // A splat of an inserted scalar constant becomes a vector constant:
5745 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5746 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5747 // original mask constant.
5748 // NOTE: This transformation depends on the value of the mask which is not
5749 // known at compile time for scalable vectors
5750 Constant *C;
5751 ConstantInt *IndexC;
5752 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5753 m_ConstantInt(IndexC)))) {
5754 // Match a splat shuffle mask of the insert index allowing undef elements.
5755 int InsertIndex = IndexC->getZExtValue();
5756 if (all_of(Indices, [InsertIndex](int MaskElt) {
5757 return MaskElt == InsertIndex || MaskElt == -1;
5758 })) {
5759 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5760
5761 // Shuffle mask poisons become poison constant result elements.
5762 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5763 for (unsigned i = 0; i != MaskNumElts; ++i)
5764 if (Indices[i] == -1)
5765 VecC[i] = PoisonValue::get(C->getType());
5766 return ConstantVector::get(VecC);
5767 }
5768 }
5769
5770 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5771 // value type is same as the input vectors' type.
5772 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5773 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5774 all_equal(OpShuf->getShuffleMask()))
5775 return Op0;
5776
5777 // All remaining transformation depend on the value of the mask, which is
5778 // not known at compile time for scalable vectors.
5779 if (Scalable)
5780 return nullptr;
5781
5782 // Don't fold a shuffle with undef mask elements. This may get folded in a
5783 // better way using demanded bits or other analysis.
5784 // TODO: Should we allow this?
5785 if (is_contained(Indices, -1))
5786 return nullptr;
5787
5788 // Check if every element of this shuffle can be mapped back to the
5789 // corresponding element of a single root vector. If so, we don't need this
5790 // shuffle. This handles simple identity shuffles as well as chains of
5791 // shuffles that may widen/narrow and/or move elements across lanes and back.
5792 Value *RootVec = nullptr;
5793 for (unsigned i = 0; i != MaskNumElts; ++i) {
5794 // Note that recursion is limited for each vector element, so if any element
5795 // exceeds the limit, this will fail to simplify.
5796 RootVec =
5797 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5798
5799 // We can't replace a widening/narrowing shuffle with one of its operands.
5800 if (!RootVec || RootVec->getType() != RetTy)
5801 return nullptr;
5802 }
5803 return RootVec;
5804}
5805
5806/// Given operands for a ShuffleVectorInst, fold the result or return null.
5808 ArrayRef<int> Mask, Type *RetTy,
5809 const SimplifyQuery &Q) {
5810 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5811}
5812
5814 const SimplifyQuery &Q) {
5815 if (auto *C = dyn_cast<Constant>(Op))
5816 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5817 return nullptr;
5818}
5819
5820/// Given the operand for an FNeg, see if we can fold the result. If not, this
5821/// returns null.
5823 const SimplifyQuery &Q, unsigned MaxRecurse) {
5824 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5825 return C;
5826
5827 Value *X;
5828 // fneg (fneg X) ==> X
5829 if (match(Op, m_FNeg(m_Value(X))))
5830 return X;
5831
5832 return nullptr;
5833}
5834
5836 const SimplifyQuery &Q) {
5837 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5838}
5839
5840/// Try to propagate existing NaN values when possible. If not, replace the
5841/// constant or elements in the constant with a canonical NaN.
5843 Type *Ty = In->getType();
5844 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5845 unsigned NumElts = VecTy->getNumElements();
5846 SmallVector<Constant *, 32> NewC(NumElts);
5847 for (unsigned i = 0; i != NumElts; ++i) {
5848 Constant *EltC = In->getAggregateElement(i);
5849 // Poison elements propagate. NaN propagates except signaling is quieted.
5850 // Replace unknown or undef elements with canonical NaN.
5851 if (EltC && isa<PoisonValue>(EltC))
5852 NewC[i] = EltC;
5853 else if (EltC && EltC->isNaN())
5854 NewC[i] = ConstantFP::get(
5855 EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5856 else
5857 NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5858 }
5859 return ConstantVector::get(NewC);
5860 }
5861
5862 // If it is not a fixed vector, but not a simple NaN either, return a
5863 // canonical NaN.
5864 if (!In->isNaN())
5865 return ConstantFP::getNaN(Ty);
5866
5867 // If we known this is a NaN, and it's scalable vector, we must have a splat
5868 // on our hands. Grab that before splatting a QNaN constant.
5869 if (isa<ScalableVectorType>(Ty)) {
5870 auto *Splat = In->getSplatValue();
5871 assert(Splat && Splat->isNaN() &&
5872 "Found a scalable-vector NaN but not a splat");
5873 In = Splat;
5874 }
5875
5876 // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5877 // preserve the sign/payload.
5878 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5879}
5880
5881/// Perform folds that are common to any floating-point operation. This implies
5882/// transforms based on poison/undef/NaN because the operation itself makes no
5883/// difference to the result.
5885 const SimplifyQuery &Q,
5886 fp::ExceptionBehavior ExBehavior,
5887 RoundingMode Rounding) {
5888 // Poison is independent of anything else. It always propagates from an
5889 // operand to a math result.
5891 return PoisonValue::get(Ops[0]->getType());
5892
5893 for (Value *V : Ops) {
5894 bool IsNan = match(V, m_NaN());
5895 bool IsInf = match(V, m_Inf());
5896 bool IsUndef = Q.isUndefValue(V);
5897
5898 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5899 // (an undef operand can be chosen to be Nan/Inf), then the result of
5900 // this operation is poison.
5901 if (FMF.noNaNs() && (IsNan || IsUndef))
5902 return PoisonValue::get(V->getType());
5903 if (FMF.noInfs() && (IsInf || IsUndef))
5904 return PoisonValue::get(V->getType());
5905
5906 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5907 // Undef does not propagate because undef means that all bits can take on
5908 // any value. If this is undef * NaN for example, then the result values
5909 // (at least the exponent bits) are limited. Assume the undef is a
5910 // canonical NaN and propagate that.
5911 if (IsUndef)
5912 return ConstantFP::getNaN(V->getType());
5913 if (IsNan)
5914 return propagateNaN(cast<Constant>(V));
5915 } else if (ExBehavior != fp::ebStrict) {
5916 if (IsNan)
5917 return propagateNaN(cast<Constant>(V));
5918 }
5919 }
5920 return nullptr;
5921}
5922
5923/// Given operands for an FAdd, see if we can fold the result. If not, this
5924/// returns null.
5925static Value *
5927 const SimplifyQuery &Q, unsigned MaxRecurse,
5930 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5931 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5932 return C;
5933
5934 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5935 return C;
5936
5937 // fadd X, -0 ==> X
5938 // With strict/constrained FP, we have these possible edge cases that do
5939 // not simplify to Op0:
5940 // fadd SNaN, -0.0 --> QNaN
5941 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5942 if (canIgnoreSNaN(ExBehavior, FMF) &&
5944 FMF.noSignedZeros()))
5945 if (match(Op1, m_NegZeroFP()))
5946 return Op0;
5947
5948 // fadd X, 0 ==> X, when we know X is not -0
5949 if (canIgnoreSNaN(ExBehavior, FMF))
5950 if (match(Op1, m_PosZeroFP()) &&
5951 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
5952 return Op0;
5953
5954 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5955 return nullptr;
5956
5957 if (FMF.noNaNs()) {
5958 // With nnan: X + {+/-}Inf --> {+/-}Inf
5959 if (match(Op1, m_Inf()))
5960 return Op1;
5961
5962 // With nnan: -X + X --> 0.0 (and commuted variant)
5963 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5964 // Negative zeros are allowed because we always end up with positive zero:
5965 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5966 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5967 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5968 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5969 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5970 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5971 return ConstantFP::getZero(Op0->getType());
5972
5973 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5974 match(Op1, m_FNeg(m_Specific(Op0))))
5975 return ConstantFP::getZero(Op0->getType());
5976 }
5977
5978 // (X - Y) + Y --> X
5979 // Y + (X - Y) --> X
5980 Value *X;
5981 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5982 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5983 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5984 return X;
5985
5986 return nullptr;
5987}
5988
5989/// Given operands for an FSub, see if we can fold the result. If not, this
5990/// returns null.
5991static Value *
5993 const SimplifyQuery &Q, unsigned MaxRecurse,
5996 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5997 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5998 return C;
5999
6000 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6001 return C;
6002
6003 // fsub X, +0 ==> X
6004 if (canIgnoreSNaN(ExBehavior, FMF) &&
6006 FMF.noSignedZeros()))
6007 if (match(Op1, m_PosZeroFP()))
6008 return Op0;
6009
6010 // fsub X, -0 ==> X, when we know X is not -0
6011 if (canIgnoreSNaN(ExBehavior, FMF))
6012 if (match(Op1, m_NegZeroFP()) &&
6013 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
6014 return Op0;
6015
6016 // fsub -0.0, (fsub -0.0, X) ==> X
6017 // fsub -0.0, (fneg X) ==> X
6018 Value *X;
6019 if (canIgnoreSNaN(ExBehavior, FMF))
6020 if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
6021 return X;
6022
6023 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
6024 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
6025 if (canIgnoreSNaN(ExBehavior, FMF))
6026 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
6027 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
6028 match(Op1, m_FNeg(m_Value(X)))))
6029 return X;
6030
6031 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6032 return nullptr;
6033
6034 if (FMF.noNaNs()) {
6035 // fsub nnan x, x ==> 0.0
6036 if (Op0 == Op1)
6037 return Constant::getNullValue(Op0->getType());
6038
6039 // With nnan: {+/-}Inf - X --> {+/-}Inf
6040 if (match(Op0, m_Inf()))
6041 return Op0;
6042
6043 // With nnan: X - {+/-}Inf --> {-/+}Inf
6044 if (match(Op1, m_Inf()))
6045 return foldConstant(Instruction::FNeg, Op1, Q);
6046 }
6047
6048 // Y - (Y - X) --> X
6049 // (X + Y) - Y --> X
6050 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
6051 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
6052 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
6053 return X;
6054
6055 return nullptr;
6056}
6057
6059 const SimplifyQuery &Q, unsigned MaxRecurse,
6060 fp::ExceptionBehavior ExBehavior,
6061 RoundingMode Rounding) {
6062 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6063 return C;
6064
6065 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6066 return nullptr;
6067
6068 // Canonicalize special constants as operand 1.
6069 if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
6070 std::swap(Op0, Op1);
6071
6072 // X * 1.0 --> X
6073 if (match(Op1, m_FPOne()))
6074 return Op0;
6075
6076 if (match(Op1, m_AnyZeroFP())) {
6077 // X * 0.0 --> 0.0 (with nnan and nsz)
6078 if (FMF.noNaNs() && FMF.noSignedZeros())
6079 return ConstantFP::getZero(Op0->getType());
6080
6081 KnownFPClass Known = computeKnownFPClass(Op0, FMF, fcInf | fcNan, Q);
6082 if (Known.isKnownNever(fcInf | fcNan)) {
6083 // if nsz is set, return 0.0
6084 if (FMF.noSignedZeros())
6085 return ConstantFP::getZero(Op0->getType());
6086 // +normal number * (-)0.0 --> (-)0.0
6087 if (Known.SignBit == false)
6088 return Op1;
6089 // -normal number * (-)0.0 --> -(-)0.0
6090 if (Known.SignBit == true)
6091 return foldConstant(Instruction::FNeg, Op1, Q);
6092 }
6093 }
6094
6095 // sqrt(X) * sqrt(X) --> X, if we can:
6096 // 1. Remove the intermediate rounding (reassociate).
6097 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
6098 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
6099 Value *X;
6100 if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
6101 FMF.noNaNs() && FMF.noSignedZeros())
6102 return X;
6103
6104 return nullptr;
6105}
6106
6107/// Given the operands for an FMul, see if we can fold the result
6108static Value *
6110 const SimplifyQuery &Q, unsigned MaxRecurse,
6113 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6114 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
6115 return C;
6116
6117 // Now apply simplifications that do not require rounding.
6118 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
6119}
6120
6122 const SimplifyQuery &Q,
6123 fp::ExceptionBehavior ExBehavior,
6124 RoundingMode Rounding) {
6125 return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6126 Rounding);
6127}
6128
6130 const SimplifyQuery &Q,
6131 fp::ExceptionBehavior ExBehavior,
6132 RoundingMode Rounding) {
6133 return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6134 Rounding);
6135}
6136
6138 const SimplifyQuery &Q,
6139 fp::ExceptionBehavior ExBehavior,
6140 RoundingMode Rounding) {
6141 return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6142 Rounding);
6143}
6144
6146 const SimplifyQuery &Q,
6147 fp::ExceptionBehavior ExBehavior,
6148 RoundingMode Rounding) {
6149 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6150 Rounding);
6151}
6152
6153static Value *
6155 const SimplifyQuery &Q, unsigned,
6158 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6159 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
6160 return C;
6161
6162 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6163 return C;
6164
6165 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6166 return nullptr;
6167
6168 // X / 1.0 -> X
6169 if (match(Op1, m_FPOne()))
6170 return Op0;
6171
6172 // 0 / X -> 0
6173 // Requires that NaNs are off (X could be zero) and signed zeroes are
6174 // ignored (X could be positive or negative, so the output sign is unknown).
6175 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
6176 return ConstantFP::getZero(Op0->getType());
6177
6178 if (FMF.noNaNs()) {
6179 // X / X -> 1.0 is legal when NaNs are ignored.
6180 // We can ignore infinities because INF/INF is NaN.
6181 if (Op0 == Op1)
6182 return ConstantFP::get(Op0->getType(), 1.0);
6183
6184 // (X * Y) / Y --> X if we can reassociate to the above form.
6185 Value *X;
6186 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
6187 return X;
6188
6189 // -X / X -> -1.0 and
6190 // X / -X -> -1.0 are legal when NaNs are ignored.
6191 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
6192 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
6193 match(Op1, m_FNegNSZ(m_Specific(Op0))))
6194 return ConstantFP::get(Op0->getType(), -1.0);
6195
6196 // nnan ninf X / [-]0.0 -> poison
6197 if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
6198 return PoisonValue::get(Op1->getType());
6199 }
6200
6201 return nullptr;
6202}
6203
6205 const SimplifyQuery &Q,
6206 fp::ExceptionBehavior ExBehavior,
6207 RoundingMode Rounding) {
6208 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6209 Rounding);
6210}
6211
6212static Value *
6214 const SimplifyQuery &Q, unsigned,
6217 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6218 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
6219 return C;
6220
6221 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6222 return C;
6223
6224 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6225 return nullptr;
6226
6227 // Unlike fdiv, the result of frem always matches the sign of the dividend.
6228 // The constant match may include undef elements in a vector, so return a full
6229 // zero constant as the result.
6230 if (FMF.noNaNs()) {
6231 // +0 % X -> 0
6232 if (match(Op0, m_PosZeroFP()))
6233 return ConstantFP::getZero(Op0->getType());
6234 // -0 % X -> -0
6235 if (match(Op0, m_NegZeroFP()))
6236 return ConstantFP::getNegativeZero(Op0->getType());
6237 }
6238
6239 return nullptr;
6240}
6241
6243 const SimplifyQuery &Q,
6244 fp::ExceptionBehavior ExBehavior,
6245 RoundingMode Rounding) {
6246 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6247 Rounding);
6248}
6249
6250//=== Helper functions for higher up the class hierarchy.
6251
6252/// Given the operand for a UnaryOperator, see if we can fold the result.
6253/// If not, this returns null.
6254static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
6255 unsigned MaxRecurse) {
6256 switch (Opcode) {
6257 case Instruction::FNeg:
6258 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
6259 default:
6260 llvm_unreachable("Unexpected opcode");
6261 }
6262}
6263
6264/// Given the operand for a UnaryOperator, see if we can fold the result.
6265/// If not, this returns null.
6266/// Try to use FastMathFlags when folding the result.
6267static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
6268 const FastMathFlags &FMF, const SimplifyQuery &Q,
6269 unsigned MaxRecurse) {
6270 switch (Opcode) {
6271 case Instruction::FNeg:
6272 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
6273 default:
6274 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
6275 }
6276}
6277
6278Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
6279 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
6280}
6281
6283 const SimplifyQuery &Q) {
6284 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
6285}
6286
6287/// Given operands for a BinaryOperator, see if we can fold the result.
6288/// If not, this returns null.
6289static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6290 const SimplifyQuery &Q, unsigned MaxRecurse) {
6291 switch (Opcode) {
6292 case Instruction::Add:
6293 return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6294 MaxRecurse);
6295 case Instruction::Sub:
6296 return simplifySubInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6297 MaxRecurse);
6298 case Instruction::Mul:
6299 return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6300 MaxRecurse);
6301 case Instruction::SDiv:
6302 return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6303 case Instruction::UDiv:
6304 return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6305 case Instruction::SRem:
6306 return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
6307 case Instruction::URem:
6308 return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
6309 case Instruction::Shl:
6310 return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6311 MaxRecurse);
6312 case Instruction::LShr:
6313 return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6314 case Instruction::AShr:
6315 return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6316 case Instruction::And:
6317 return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
6318 case Instruction::Or:
6319 return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
6320 case Instruction::Xor:
6321 return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
6322 case Instruction::FAdd:
6323 return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6324 case Instruction::FSub:
6325 return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6326 case Instruction::FMul:
6327 return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6328 case Instruction::FDiv:
6329 return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6330 case Instruction::FRem:
6331 return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6332 default:
6333 llvm_unreachable("Unexpected opcode");
6334 }
6335}
6336
6337/// Given operands for a BinaryOperator, see if we can fold the result.
6338/// If not, this returns null.
6339/// Try to use FastMathFlags when folding the result.
6340static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6341 const FastMathFlags &FMF, const SimplifyQuery &Q,
6342 unsigned MaxRecurse) {
6343 switch (Opcode) {
6344 case Instruction::FAdd:
6345 return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
6346 case Instruction::FSub:
6347 return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
6348 case Instruction::FMul:
6349 return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
6350 case Instruction::FDiv:
6351 return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
6352 default:
6353 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6354 }
6355}
6356
6357Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6358 const SimplifyQuery &Q) {
6359 return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
6360}
6361
6362Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6363 FastMathFlags FMF, const SimplifyQuery &Q) {
6364 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
6365}
6366
6367/// Given operands for a CmpInst, see if we can fold the result.
6369 const SimplifyQuery &Q, unsigned MaxRecurse) {
6371 return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
6372 return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6373}
6374
6376 const SimplifyQuery &Q) {
6377 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
6378}
6379
6381 switch (ID) {
6382 default:
6383 return false;
6384
6385 // Unary idempotent: f(f(x)) = f(x)
6386 case Intrinsic::fabs:
6387 case Intrinsic::floor:
6388 case Intrinsic::ceil:
6389 case Intrinsic::trunc:
6390 case Intrinsic::rint:
6391 case Intrinsic::nearbyint:
6392 case Intrinsic::round:
6393 case Intrinsic::roundeven:
6394 case Intrinsic::canonicalize:
6395 case Intrinsic::arithmetic_fence:
6396 return true;
6397 }
6398}
6399
6400/// Return true if the intrinsic rounds a floating-point value to an integral
6401/// floating-point value (not an integer type).
6403 switch (ID) {
6404 default:
6405 return false;
6406
6407 case Intrinsic::floor:
6408 case Intrinsic::ceil:
6409 case Intrinsic::trunc:
6410 case Intrinsic::rint:
6411 case Intrinsic::nearbyint:
6412 case Intrinsic::round:
6413 case Intrinsic::roundeven:
6414 return true;
6415 }
6416}
6417
6419 const DataLayout &DL) {
6420 GlobalValue *PtrSym;
6421 APInt PtrOffset;
6422 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6423 return nullptr;
6424
6426
6427 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6428 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6429 return nullptr;
6430
6431 APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6432 DL.getIndexTypeSizeInBits(Ptr->getType()));
6433 if (OffsetInt.srem(4) != 0)
6434 return nullptr;
6435
6436 Constant *Loaded =
6437 ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt), DL);
6438 if (!Loaded)
6439 return nullptr;
6440
6441 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6442 if (!LoadedCE)
6443 return nullptr;
6444
6445 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6446 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6447 if (!LoadedCE)
6448 return nullptr;
6449 }
6450
6451 if (LoadedCE->getOpcode() != Instruction::Sub)
6452 return nullptr;
6453
6454 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6455 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6456 return nullptr;
6457 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6458
6459 Constant *LoadedRHS = LoadedCE->getOperand(1);
6460 GlobalValue *LoadedRHSSym;
6461 APInt LoadedRHSOffset;
6462 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6463 DL) ||
6464 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6465 return nullptr;
6466
6467 return LoadedLHSPtr;
6468}
6469
6470// TODO: Need to pass in FastMathFlags
6471static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6472 bool IsStrict) {
6473 // ldexp(poison, x) -> poison
6474 // ldexp(x, poison) -> poison
6475 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6476 return Op0;
6477
6478 // ldexp(undef, x) -> nan
6479 if (Q.isUndefValue(Op0))
6480 return ConstantFP::getNaN(Op0->getType());
6481
6482 if (!IsStrict) {
6483 // TODO: Could insert a canonicalize for strict
6484
6485 // ldexp(x, undef) -> x
6486 if (Q.isUndefValue(Op1))
6487 return Op0;
6488 }
6489
6490 const APFloat *C = nullptr;
6492
6493 // These cases should be safe, even with strictfp.
6494 // ldexp(0.0, x) -> 0.0
6495 // ldexp(-0.0, x) -> -0.0
6496 // ldexp(inf, x) -> inf
6497 // ldexp(-inf, x) -> -inf
6498 if (C && (C->isZero() || C->isInfinity()))
6499 return Op0;
6500
6501 // These are canonicalization dropping, could do it if we knew how we could
6502 // ignore denormal flushes and target handling of nan payload bits.
6503 if (IsStrict)
6504 return nullptr;
6505
6506 // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6507 if (C && C->isNaN())
6508 return ConstantFP::get(Op0->getType(), C->makeQuiet());
6509
6510 // ldexp(x, 0) -> x
6511
6512 // TODO: Could fold this if we know the exception mode isn't
6513 // strict, we know the denormal mode and other target modes.
6514 if (match(Op1, PatternMatch::m_ZeroInt()))
6515 return Op0;
6516
6517 return nullptr;
6518}
6519
6521 const SimplifyQuery &Q,
6522 const CallBase *Call) {
6523 // Idempotent functions return the same result when called repeatedly.
6524 Intrinsic::ID IID = F->getIntrinsicID();
6525 if (isIdempotent(IID))
6526 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6527 if (II->getIntrinsicID() == IID)
6528 return II;
6529
6530 if (removesFPFraction(IID)) {
6531 // Converting from int or calling a rounding function always results in a
6532 // finite integral number or infinity. For those inputs, rounding functions
6533 // always return the same value, so the (2nd) rounding is eliminated. Ex:
6534 // floor (sitofp x) -> sitofp x
6535 // round (ceil x) -> ceil x
6536 auto *II = dyn_cast<IntrinsicInst>(Op0);
6537 if ((II && removesFPFraction(II->getIntrinsicID())) ||
6538 match(Op0, m_IToFP(m_Value())))
6539 return Op0;
6540 }
6541
6542 Value *X;
6543 switch (IID) {
6544 case Intrinsic::fabs: {
6545 KnownFPClass KnownClass = computeKnownFPClass(Op0, fcAllFlags, Q);
6546 if (KnownClass.SignBit == false)
6547 return Op0;
6548
6549 if (KnownClass.cannotBeOrderedLessThanZero() &&
6550 KnownClass.isKnownNeverNaN() && Call->hasNoSignedZeros())
6551 return Op0;
6552
6553 break;
6554 }
6555 case Intrinsic::bswap:
6556 // bswap(bswap(x)) -> x
6557 if (match(Op0, m_BSwap(m_Value(X))))
6558 return X;
6559 break;
6560 case Intrinsic::bitreverse:
6561 // bitreverse(bitreverse(x)) -> x
6562 if (match(Op0, m_BitReverse(m_Value(X))))
6563 return X;
6564 break;
6565 case Intrinsic::ctpop: {
6566 // ctpop(X) -> 1 iff X is non-zero power of 2.
6567 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI, Q.DT))
6568 return ConstantInt::get(Op0->getType(), 1);
6569 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6570 // ctpop(and X, 1) --> and X, 1
6571 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6573 Q))
6574 return Op0;
6575 break;
6576 }
6577 case Intrinsic::exp:
6578 // exp(log(x)) -> x
6579 if (Call->hasAllowReassoc() &&
6581 return X;
6582 break;
6583 case Intrinsic::exp2:
6584 // exp2(log2(x)) -> x
6585 if (Call->hasAllowReassoc() &&
6587 return X;
6588 break;
6589 case Intrinsic::exp10:
6590 // exp10(log10(x)) -> x
6591 if (Call->hasAllowReassoc() &&
6593 return X;
6594 break;
6595 case Intrinsic::log:
6596 // log(exp(x)) -> x
6597 if (Call->hasAllowReassoc() &&
6599 return X;
6600 break;
6601 case Intrinsic::log2:
6602 // log2(exp2(x)) -> x
6603 if (Call->hasAllowReassoc() &&
6605 match(Op0,
6607 return X;
6608 break;
6609 case Intrinsic::log10:
6610 // log10(pow(10.0, x)) -> x
6611 // log10(exp10(x)) -> x
6612 if (Call->hasAllowReassoc() &&
6614 match(Op0,
6616 return X;
6617 break;
6618 case Intrinsic::vector_reverse:
6619 // vector.reverse(vector.reverse(x)) -> x
6620 if (match(Op0, m_VecReverse(m_Value(X))))
6621 return X;
6622 // vector.reverse(splat(X)) -> splat(X)
6623 if (isSplatValue(Op0))
6624 return Op0;
6625 break;
6626 case Intrinsic::structured_gep:
6627 return cast<StructuredGEPInst>(Call)->getPointerOperand();
6628 default:
6629 break;
6630 }
6631
6632 return nullptr;
6633}
6634
6635/// Given a min/max intrinsic, see if it can be removed based on having an
6636/// operand that is another min/max intrinsic with shared operand(s). The caller
6637/// is expected to swap the operand arguments to handle commutation.
6639 Value *X, *Y;
6640 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6641 return nullptr;
6642
6643 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6644 if (!MM0)
6645 return nullptr;
6646 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6647
6648 if (Op1 == X || Op1 == Y ||
6650 // max (max X, Y), X --> max X, Y
6651 if (IID0 == IID)
6652 return MM0;
6653 // max (min X, Y), X --> X
6654 if (IID0 == getInverseMinMaxIntrinsic(IID))
6655 return Op1;
6656 }
6657 return nullptr;
6658}
6659
6660/// Given a min/max intrinsic, see if it can be removed based on having an
6661/// operand that is another min/max intrinsic with shared operand(s). The caller
6662/// is expected to swap the operand arguments to handle commutation.
6664 Value *Op1) {
6665 auto IsMinimumMaximumIntrinsic = [](Intrinsic::ID ID) {
6666 switch (ID) {
6667 case Intrinsic::maxnum:
6668 case Intrinsic::minnum:
6669 case Intrinsic::maximum:
6670 case Intrinsic::minimum:
6671 case Intrinsic::maximumnum:
6672 case Intrinsic::minimumnum:
6673 return true;
6674 default:
6675 return false;
6676 }
6677 };
6678
6679 assert(IsMinimumMaximumIntrinsic(IID) && "Unsupported intrinsic");
6680
6681 auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6682 // If Op0 is not the same intrinsic as IID, do not process.
6683 // This is a difference with integer min/max handling. We do not process the
6684 // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6685 if (!M0 || M0->getIntrinsicID() != IID)
6686 return nullptr;
6687 Value *X0 = M0->getOperand(0);
6688 Value *Y0 = M0->getOperand(1);
6689 // Simple case, m(m(X,Y), X) => m(X, Y)
6690 // m(m(X,Y), Y) => m(X, Y)
6691 // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6692 // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6693 // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6694 // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6695 if (X0 == Op1 || Y0 == Op1)
6696 return M0;
6697
6698 auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6699 if (!M1 || !IsMinimumMaximumIntrinsic(M1->getIntrinsicID()))
6700 return nullptr;
6701 Value *X1 = M1->getOperand(0);
6702 Value *Y1 = M1->getOperand(1);
6703 Intrinsic::ID IID1 = M1->getIntrinsicID();
6704 // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6705 // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6706 // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6707 // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6708 // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6709 // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6710 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6711 if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6712 return M0;
6713
6714 return nullptr;
6715}
6716
6721 // For undef/poison, we can choose to either propgate undef/poison or
6722 // use the LHS value depending on what will allow more optimization.
6724};
6725// Get the optimized value for a min/max instruction with a single constant
6726// input (either undef or scalar constantFP). The result may indicate to
6727// use the non-const LHS value, use a new constant value instead (with NaNs
6728// quieted), or to choose either option in the case of undef/poison.
6730 const Intrinsic::ID IID,
6731 const CallBase *Call,
6732 Constant **OutNewConstVal) {
6733 assert(OutNewConstVal != nullptr);
6734
6735 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6736 bool PropagateSNaN = IID == Intrinsic::minnum || IID == Intrinsic::maxnum;
6737 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum ||
6738 IID == Intrinsic::minimumnum;
6739
6740 // min/max(x, poison) -> either x or poison
6741 if (isa<UndefValue>(RHSConst)) {
6742 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6744 }
6745
6746 const ConstantFP *CFP = dyn_cast<ConstantFP>(RHSConst);
6747 if (!CFP)
6749 APFloat CAPF = CFP->getValueAPF();
6750
6751 // minnum(x, qnan) -> x
6752 // maxnum(x, qnan) -> x
6753 // minnum(x, snan) -> qnan
6754 // maxnum(x, snan) -> qnan
6755 // minimum(X, nan) -> qnan
6756 // maximum(X, nan) -> qnan
6757 // minimumnum(X, nan) -> x
6758 // maximumnum(X, nan) -> x
6759 if (CAPF.isNaN()) {
6760 if (PropagateNaN || (PropagateSNaN && CAPF.isSignaling())) {
6761 *OutNewConstVal = ConstantFP::get(CFP->getType(), CAPF.makeQuiet());
6763 }
6765 }
6766
6767 if (CAPF.isInfinity() || (Call && Call->hasNoInfs() && CAPF.isLargest())) {
6768 // minnum(X, -inf) -> -inf (ignoring sNaN -> qNaN propagation)
6769 // maxnum(X, +inf) -> +inf (ignoring sNaN -> qNaN propagation)
6770 // minimum(X, -inf) -> -inf if nnan
6771 // maximum(X, +inf) -> +inf if nnan
6772 // minimumnum(X, -inf) -> -inf
6773 // maximumnum(X, +inf) -> +inf
6774 if (CAPF.isNegative() == IsMin &&
6775 (!PropagateNaN || (Call && Call->hasNoNaNs()))) {
6776 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6778 }
6779
6780 // minnum(X, +inf) -> X if nnan
6781 // maxnum(X, -inf) -> X if nnan
6782 // minimum(X, +inf) -> X (ignoring quieting of sNaNs)
6783 // maximum(X, -inf) -> X (ignoring quieting of sNaNs)
6784 // minimumnum(X, +inf) -> X if nnan
6785 // maximumnum(X, -inf) -> X if nnan
6786 if (CAPF.isNegative() != IsMin &&
6787 (PropagateNaN || (Call && Call->hasNoNaNs())))
6789 }
6791}
6792
6794 Value *Op0, Value *Op1) {
6795 Constant *C0 = dyn_cast<Constant>(Op0);
6796 Constant *C1 = dyn_cast<Constant>(Op1);
6797 unsigned Width = ReturnType->getPrimitiveSizeInBits();
6798
6799 // All false predicate or reduction of neutral values ==> neutral result.
6800 switch (IID) {
6801 case Intrinsic::aarch64_sve_eorv:
6802 case Intrinsic::aarch64_sve_orv:
6803 case Intrinsic::aarch64_sve_saddv:
6804 case Intrinsic::aarch64_sve_uaddv:
6805 case Intrinsic::aarch64_sve_umaxv:
6806 if ((C0 && C0->isNullValue()) || (C1 && C1->isNullValue()))
6807 return ConstantInt::get(ReturnType, 0);
6808 break;
6809 case Intrinsic::aarch64_sve_andv:
6810 case Intrinsic::aarch64_sve_uminv:
6811 if ((C0 && C0->isNullValue()) || (C1 && C1->isAllOnesValue()))
6812 return ConstantInt::get(ReturnType, APInt::getMaxValue(Width));
6813 break;
6814 case Intrinsic::aarch64_sve_smaxv:
6815 if ((C0 && C0->isNullValue()) || (C1 && C1->isMinSignedValue()))
6816 return ConstantInt::get(ReturnType, APInt::getSignedMinValue(Width));
6817 break;
6818 case Intrinsic::aarch64_sve_sminv:
6819 if ((C0 && C0->isNullValue()) || (C1 && C1->isMaxSignedValue()))
6820 return ConstantInt::get(ReturnType, APInt::getSignedMaxValue(Width));
6821 break;
6822 }
6823
6824 switch (IID) {
6825 case Intrinsic::aarch64_sve_andv:
6826 case Intrinsic::aarch64_sve_orv:
6827 case Intrinsic::aarch64_sve_smaxv:
6828 case Intrinsic::aarch64_sve_sminv:
6829 case Intrinsic::aarch64_sve_umaxv:
6830 case Intrinsic::aarch64_sve_uminv:
6831 // sve_reduce_##(all, splat(X)) ==> X
6832 if (C0 && C0->isAllOnesValue()) {
6833 if (Value *SplatVal = getSplatValue(Op1)) {
6834 assert(SplatVal->getType() == ReturnType && "Unexpected result type!");
6835 return SplatVal;
6836 }
6837 }
6838 break;
6839 case Intrinsic::aarch64_sve_eorv:
6840 // sve_reduce_xor(all, splat(X)) ==> 0
6841 if (C0 && C0->isAllOnesValue())
6842 return ConstantInt::get(ReturnType, 0);
6843 break;
6844 }
6845
6846 return nullptr;
6847}
6848
6850 Value *Op0, Value *Op1,
6851 const SimplifyQuery &Q,
6852 const CallBase *Call) {
6853 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6854 switch (IID) {
6855 case Intrinsic::get_active_lane_mask: {
6856 if (match(Op1, m_Zero()))
6857 return ConstantInt::getFalse(ReturnType);
6858
6859 const Function *F = Call->getFunction();
6860 auto *ScalableTy = dyn_cast<ScalableVectorType>(ReturnType);
6861 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
6862 if (ScalableTy && Attr.isValid()) {
6863 std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
6864 if (!VScaleMax)
6865 break;
6866 uint64_t MaxPossibleMaskElements =
6867 (uint64_t)ScalableTy->getMinNumElements() * (*VScaleMax);
6868
6869 const APInt *Op1Val;
6870 if (match(Op0, m_Zero()) && match(Op1, m_APInt(Op1Val)) &&
6871 Op1Val->uge(MaxPossibleMaskElements))
6872 return ConstantInt::getAllOnesValue(ReturnType);
6873 }
6874 break;
6875 }
6876 case Intrinsic::abs:
6877 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6878 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6879 // on the outer abs.
6881 return Op0;
6882 break;
6883
6884 case Intrinsic::cttz: {
6885 Value *X;
6886 if (match(Op0, m_Shl(m_One(), m_Value(X))))
6887 return X;
6888 break;
6889 }
6890 case Intrinsic::ctlz: {
6891 Value *X;
6892 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6893 return X;
6894 if (match(Op0, m_AShr(m_Negative(), m_Value())))
6895 return Constant::getNullValue(ReturnType);
6896 break;
6897 }
6898 case Intrinsic::ptrmask: {
6899 // NOTE: We can't apply this simplifications based on the value of Op1
6900 // because we need to preserve provenance.
6901 if (Q.isUndefValue(Op0) || match(Op0, m_Zero()))
6902 return Constant::getNullValue(Op0->getType());
6903
6905 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6906 "Invalid mask width");
6907 // If index-width (mask size) is less than pointer-size then mask is
6908 // 1-extended.
6909 if (match(Op1, m_PtrToIntOrAddr(m_Specific(Op0))))
6910 return Op0;
6911
6912 // NOTE: We may have attributes associated with the return value of the
6913 // llvm.ptrmask intrinsic that will be lost when we just return the
6914 // operand. We should try to preserve them.
6915 if (match(Op1, m_AllOnes()) || Q.isUndefValue(Op1))
6916 return Op0;
6917
6918 Constant *C;
6919 if (match(Op1, m_ImmConstant(C))) {
6920 KnownBits PtrKnown = computeKnownBits(Op0, Q);
6921 // See if we only masking off bits we know are already zero due to
6922 // alignment.
6923 APInt IrrelevantPtrBits =
6924 PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6926 Instruction::Or, C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6927 Q.DL);
6928 if (C != nullptr && C->isAllOnesValue())
6929 return Op0;
6930 }
6931 break;
6932 }
6933 case Intrinsic::smax:
6934 case Intrinsic::smin:
6935 case Intrinsic::umax:
6936 case Intrinsic::umin: {
6937 // If the arguments are the same, this is a no-op.
6938 if (Op0 == Op1)
6939 return Op0;
6940
6941 // Canonicalize immediate constant operand as Op1.
6942 if (match(Op0, m_ImmConstant()))
6943 std::swap(Op0, Op1);
6944
6945 // Assume undef is the limit value.
6946 if (Q.isUndefValue(Op1))
6947 return ConstantInt::get(
6949
6950 const APInt *C;
6951 if (match(Op1, m_APIntAllowPoison(C))) {
6952 // Clamp to limit value. For example:
6953 // umax(i8 %x, i8 255) --> 255
6955 return ConstantInt::get(ReturnType, *C);
6956
6957 // If the constant op is the opposite of the limit value, the other must
6958 // be larger/smaller or equal. For example:
6959 // umin(i8 %x, i8 255) --> %x
6962 return Op0;
6963
6964 // Remove nested call if constant operands allow it. Example:
6965 // max (max X, 7), 5 -> max X, 7
6966 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6967 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6968 // TODO: loosen undef/splat restrictions for vector constants.
6969 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6970 const APInt *InnerC;
6971 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6972 ICmpInst::compare(*InnerC, *C,
6975 return Op0;
6976 }
6977 }
6978
6979 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6980 return V;
6981 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6982 return V;
6983
6984 ICmpInst::Predicate Pred =
6986 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6987 return Op0;
6988 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6989 return Op1;
6990
6991 break;
6992 }
6993 case Intrinsic::scmp:
6994 case Intrinsic::ucmp: {
6995 // Fold to a constant if the relationship between operands can be
6996 // established with certainty
6997 if (isICmpTrue(CmpInst::ICMP_EQ, Op0, Op1, Q, RecursionLimit))
6998 return Constant::getNullValue(ReturnType);
6999
7000 ICmpInst::Predicate PredGT =
7001 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
7002 if (isICmpTrue(PredGT, Op0, Op1, Q, RecursionLimit))
7003 return ConstantInt::get(ReturnType, 1);
7004
7005 ICmpInst::Predicate PredLT =
7006 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
7007 if (isICmpTrue(PredLT, Op0, Op1, Q, RecursionLimit))
7008 return ConstantInt::getSigned(ReturnType, -1);
7009
7010 break;
7011 }
7012 case Intrinsic::usub_with_overflow:
7013 case Intrinsic::ssub_with_overflow:
7014 // X - X -> { 0, false }
7015 // X - undef -> { 0, false }
7016 // undef - X -> { 0, false }
7017 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
7018 return Constant::getNullValue(ReturnType);
7019 break;
7020 case Intrinsic::uadd_with_overflow:
7021 case Intrinsic::sadd_with_overflow:
7022 // X + undef -> { -1, false }
7023 // undef + x -> { -1, false }
7024 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
7025 return ConstantStruct::get(
7026 cast<StructType>(ReturnType),
7027 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
7028 Constant::getNullValue(ReturnType->getStructElementType(1))});
7029 }
7030 break;
7031 case Intrinsic::umul_with_overflow:
7032 case Intrinsic::smul_with_overflow:
7033 // 0 * X -> { 0, false }
7034 // X * 0 -> { 0, false }
7035 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
7036 return Constant::getNullValue(ReturnType);
7037 // undef * X -> { 0, false }
7038 // X * undef -> { 0, false }
7039 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
7040 return Constant::getNullValue(ReturnType);
7041 break;
7042 case Intrinsic::uadd_sat:
7043 // sat(MAX + X) -> MAX
7044 // sat(X + MAX) -> MAX
7045 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
7046 return Constant::getAllOnesValue(ReturnType);
7047 [[fallthrough]];
7048 case Intrinsic::sadd_sat:
7049 // sat(X + undef) -> -1
7050 // sat(undef + X) -> -1
7051 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
7052 // For signed: Assume undef is ~X, in which case X + ~X = -1.
7053 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
7054 return Constant::getAllOnesValue(ReturnType);
7055
7056 // X + 0 -> X
7057 if (match(Op1, m_Zero()))
7058 return Op0;
7059 // 0 + X -> X
7060 if (match(Op0, m_Zero()))
7061 return Op1;
7062 break;
7063 case Intrinsic::usub_sat:
7064 // sat(0 - X) -> 0, sat(X - MAX) -> 0
7065 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
7066 return Constant::getNullValue(ReturnType);
7067 [[fallthrough]];
7068 case Intrinsic::ssub_sat:
7069 // X - X -> 0, X - undef -> 0, undef - X -> 0
7070 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
7071 return Constant::getNullValue(ReturnType);
7072 // X - 0 -> X
7073 if (match(Op1, m_Zero()))
7074 return Op0;
7075 break;
7076 case Intrinsic::load_relative:
7077 if (auto *C0 = dyn_cast<Constant>(Op0))
7078 if (auto *C1 = dyn_cast<Constant>(Op1))
7079 return simplifyRelativeLoad(C0, C1, Q.DL);
7080 break;
7081 case Intrinsic::powi:
7082 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
7083 // powi(x, 0) -> 1.0
7084 if (Power->isZero())
7085 return ConstantFP::get(Op0->getType(), 1.0);
7086 // powi(x, 1) -> x
7087 if (Power->isOne())
7088 return Op0;
7089 }
7090 break;
7091 case Intrinsic::ldexp:
7092 return simplifyLdexp(Op0, Op1, Q, false);
7093 case Intrinsic::copysign:
7094 // copysign X, X --> X
7095 if (Op0 == Op1)
7096 return Op0;
7097 // copysign -X, X --> X
7098 // copysign X, -X --> -X
7099 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
7100 match(Op1, m_FNeg(m_Specific(Op0))))
7101 return Op1;
7102 break;
7103 case Intrinsic::is_fpclass: {
7104 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
7105 // If all tests are made, it doesn't matter what the value is.
7106 if ((Mask & fcAllFlags) == fcAllFlags)
7107 return ConstantInt::get(ReturnType, true);
7108 if ((Mask & fcAllFlags) == 0)
7109 return ConstantInt::get(ReturnType, false);
7110 if (Q.isUndefValue(Op0))
7111 return UndefValue::get(ReturnType);
7112 break;
7113 }
7114 case Intrinsic::maxnum:
7115 case Intrinsic::minnum:
7116 case Intrinsic::maximum:
7117 case Intrinsic::minimum:
7118 case Intrinsic::maximumnum:
7119 case Intrinsic::minimumnum: {
7120 // In several cases here, we deviate from exact IEEE 754 semantics
7121 // to enable optimizations (as allowed by the LLVM IR spec).
7122 //
7123 // For instance, we may return one of the arguments unmodified instead of
7124 // inserting an llvm.canonicalize to transform input sNaNs into qNaNs,
7125 // or may assume all NaN inputs are qNaNs.
7126
7127 // If the arguments are the same, this is a no-op (ignoring NaN quieting)
7128 if (Op0 == Op1)
7129 return Op0;
7130
7131 // Canonicalize constant operand as Op1.
7132 if (isa<Constant>(Op0))
7133 std::swap(Op0, Op1);
7134
7135 if (Constant *C = dyn_cast<Constant>(Op1)) {
7137 Constant *NewConst = nullptr;
7138
7139 if (VectorType *VTy = dyn_cast<VectorType>(C->getType())) {
7140 ElementCount ElemCount = VTy->getElementCount();
7141
7142 if (Constant *SplatVal = C->getSplatValue()) {
7143 // Handle splat vectors (including scalable vectors)
7144 OptResult = OptimizeConstMinMax(SplatVal, IID, Call, &NewConst);
7145 if (OptResult == MinMaxOptResult::UseNewConstVal)
7146 NewConst = ConstantVector::getSplat(ElemCount, NewConst);
7147
7148 } else if (ElemCount.isFixed()) {
7149 // Storage to build up new const return value (with NaNs quieted)
7151
7152 // Check elementwise whether we can optimize to either a constant
7153 // value or return the LHS value. We cannot mix and match LHS +
7154 // constant elements, as this would require inserting a new
7155 // VectorShuffle instruction, which is not allowed in simplifyBinOp.
7156 OptResult = MinMaxOptResult::UseEither;
7157 for (unsigned i = 0; i != ElemCount.getFixedValue(); ++i) {
7158 auto *Elt = C->getAggregateElement(i);
7159 if (!Elt) {
7161 break;
7162 }
7163 auto ElemResult = OptimizeConstMinMax(Elt, IID, Call, &NewConst);
7164 if (ElemResult == MinMaxOptResult::CannotOptimize ||
7165 (ElemResult != OptResult &&
7166 OptResult != MinMaxOptResult::UseEither &&
7167 ElemResult != MinMaxOptResult::UseEither)) {
7169 break;
7170 }
7171 NewC[i] = NewConst;
7172 if (ElemResult != MinMaxOptResult::UseEither)
7173 OptResult = ElemResult;
7174 }
7175 if (OptResult == MinMaxOptResult::UseNewConstVal)
7176 NewConst = ConstantVector::get(NewC);
7177 }
7178 } else {
7179 // Handle scalar inputs
7180 OptResult = OptimizeConstMinMax(C, IID, Call, &NewConst);
7181 }
7182
7183 if (OptResult == MinMaxOptResult::UseOtherVal ||
7184 OptResult == MinMaxOptResult::UseEither)
7185 return Op0; // Return the other arg (ignoring NaN quieting)
7186 else if (OptResult == MinMaxOptResult::UseNewConstVal)
7187 return NewConst;
7188 }
7189
7190 // Min/max of the same operation with common operand:
7191 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
7192 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
7193 return V;
7194 if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
7195 return V;
7196
7197 break;
7198 }
7199 case Intrinsic::vector_extract: {
7200 // (extract_vector (insert_vector _, X, 0), 0) -> X
7201 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
7202 Value *X = nullptr;
7204 m_Zero())) &&
7205 IdxN == 0 && X->getType() == ReturnType)
7206 return X;
7207
7208 break;
7209 }
7210
7211 case Intrinsic::aarch64_sve_andv:
7212 case Intrinsic::aarch64_sve_eorv:
7213 case Intrinsic::aarch64_sve_orv:
7214 case Intrinsic::aarch64_sve_saddv:
7215 case Intrinsic::aarch64_sve_smaxv:
7216 case Intrinsic::aarch64_sve_sminv:
7217 case Intrinsic::aarch64_sve_uaddv:
7218 case Intrinsic::aarch64_sve_umaxv:
7219 case Intrinsic::aarch64_sve_uminv:
7220 return simplifySVEIntReduction(IID, ReturnType, Op0, Op1);
7221 default:
7222 break;
7223 }
7224
7225 return nullptr;
7226}
7227
7229 ArrayRef<Value *> Args,
7230 const SimplifyQuery &Q) {
7231 // Operand bundles should not be in Args.
7232 assert(Call->arg_size() == Args.size());
7233 unsigned NumOperands = Args.size();
7234 Function *F = cast<Function>(Callee);
7235 Intrinsic::ID IID = F->getIntrinsicID();
7236
7239 return PoisonValue::get(F->getReturnType());
7240 // Most of the intrinsics with no operands have some kind of side effect.
7241 // Don't simplify.
7242 if (!NumOperands) {
7243 switch (IID) {
7244 case Intrinsic::vscale: {
7245 Type *RetTy = F->getReturnType();
7246 ConstantRange CR = getVScaleRange(Call->getFunction(), 64);
7247 if (const APInt *C = CR.getSingleElement())
7248 return ConstantInt::get(RetTy, C->getZExtValue());
7249 return nullptr;
7250 }
7251 default:
7252 return nullptr;
7253 }
7254 }
7255
7256 if (NumOperands == 1)
7257 return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
7258
7259 if (NumOperands == 2)
7260 return simplifyBinaryIntrinsic(IID, F->getReturnType(), Args[0], Args[1], Q,
7261 Call);
7262
7263 // Handle intrinsics with 3 or more arguments.
7264 switch (IID) {
7265 case Intrinsic::masked_load:
7266 case Intrinsic::masked_gather: {
7267 Value *MaskArg = Args[1];
7268 Value *PassthruArg = Args[2];
7269 // If the mask is all zeros or undef, the "passthru" argument is the result.
7270 if (maskIsAllZeroOrUndef(MaskArg))
7271 return PassthruArg;
7272 return nullptr;
7273 }
7274 case Intrinsic::fshl:
7275 case Intrinsic::fshr: {
7276 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
7277
7278 // If both operands are undef, the result is undef.
7279 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
7280 return UndefValue::get(F->getReturnType());
7281
7282 // If shift amount is undef, assume it is zero.
7283 if (Q.isUndefValue(ShAmtArg))
7284 return Args[IID == Intrinsic::fshl ? 0 : 1];
7285
7286 const APInt *ShAmtC;
7287 if (match(ShAmtArg, m_APInt(ShAmtC))) {
7288 // If there's effectively no shift, return the 1st arg or 2nd arg.
7289 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
7290 if (ShAmtC->urem(BitWidth).isZero())
7291 return Args[IID == Intrinsic::fshl ? 0 : 1];
7292 }
7293
7294 // Rotating zero by anything is zero.
7295 if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
7296 return ConstantInt::getNullValue(F->getReturnType());
7297
7298 // Rotating -1 by anything is -1.
7299 if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
7300 return ConstantInt::getAllOnesValue(F->getReturnType());
7301
7302 return nullptr;
7303 }
7304 case Intrinsic::experimental_constrained_fma: {
7306 if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
7307 *FPI->getRoundingMode()))
7308 return V;
7309 return nullptr;
7310 }
7311 case Intrinsic::fma:
7312 case Intrinsic::fmuladd: {
7313 if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
7315 return V;
7316 return nullptr;
7317 }
7318 case Intrinsic::smul_fix:
7319 case Intrinsic::smul_fix_sat: {
7320 Value *Op0 = Args[0];
7321 Value *Op1 = Args[1];
7322 Value *Op2 = Args[2];
7323 Type *ReturnType = F->getReturnType();
7324
7325 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
7326 // when both Op0 and Op1 are constant so we do not care about that special
7327 // case here).
7328 if (isa<Constant>(Op0))
7329 std::swap(Op0, Op1);
7330
7331 // X * 0 -> 0
7332 if (match(Op1, m_Zero()))
7333 return Constant::getNullValue(ReturnType);
7334
7335 // X * undef -> 0
7336 if (Q.isUndefValue(Op1))
7337 return Constant::getNullValue(ReturnType);
7338
7339 // X * (1 << Scale) -> X
7340 APInt ScaledOne =
7341 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
7342 cast<ConstantInt>(Op2)->getZExtValue());
7343 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
7344 return Op0;
7345
7346 return nullptr;
7347 }
7348 case Intrinsic::vector_insert: {
7349 Value *Vec = Args[0];
7350 Value *SubVec = Args[1];
7351 Value *Idx = Args[2];
7352 Type *ReturnType = F->getReturnType();
7353
7354 // (insert_vector Y, (extract_vector X, 0), 0) -> X
7355 // where: Y is X, or Y is undef
7356 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
7357 Value *X = nullptr;
7358 if (match(SubVec,
7360 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
7361 X->getType() == ReturnType)
7362 return X;
7363
7364 return nullptr;
7365 }
7366 case Intrinsic::vector_splice_left:
7367 case Intrinsic::vector_splice_right: {
7368 Value *Offset = Args[2];
7369 auto *Ty = cast<VectorType>(F->getReturnType());
7370 if (Q.isUndefValue(Offset))
7371 return PoisonValue::get(Ty);
7372
7373 unsigned BitWidth = Offset->getType()->getScalarSizeInBits();
7374 ConstantRange NumElts(
7375 APInt(BitWidth, Ty->getElementCount().getKnownMinValue()));
7376 if (Ty->isScalableTy())
7377 NumElts = NumElts.multiply(getVScaleRange(Call->getFunction(), BitWidth));
7378
7379 // If we know Offset > NumElts, simplify to poison.
7381 if (CR.getUnsignedMin().ugt(NumElts.getUnsignedMax()))
7382 return PoisonValue::get(Ty);
7383
7384 // splice.left(a, b, 0) --> a, splice.right(a, b, 0) --> b
7385 if (CR.isSingleElement() && CR.getSingleElement()->isZero())
7386 return IID == Intrinsic::vector_splice_left ? Args[0] : Args[1];
7387
7388 return nullptr;
7389 }
7390 case Intrinsic::experimental_constrained_fadd: {
7392 return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7393 *FPI->getExceptionBehavior(),
7394 *FPI->getRoundingMode());
7395 }
7396 case Intrinsic::experimental_constrained_fsub: {
7398 return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7399 *FPI->getExceptionBehavior(),
7400 *FPI->getRoundingMode());
7401 }
7402 case Intrinsic::experimental_constrained_fmul: {
7404 return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7405 *FPI->getExceptionBehavior(),
7406 *FPI->getRoundingMode());
7407 }
7408 case Intrinsic::experimental_constrained_fdiv: {
7410 return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7411 *FPI->getExceptionBehavior(),
7412 *FPI->getRoundingMode());
7413 }
7414 case Intrinsic::experimental_constrained_frem: {
7416 return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7417 *FPI->getExceptionBehavior(),
7418 *FPI->getRoundingMode());
7419 }
7420 case Intrinsic::experimental_constrained_ldexp:
7421 return simplifyLdexp(Args[0], Args[1], Q, true);
7422 case Intrinsic::experimental_gc_relocate: {
7424 Value *DerivedPtr = GCR.getDerivedPtr();
7425 Value *BasePtr = GCR.getBasePtr();
7426
7427 // Undef is undef, even after relocation.
7428 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
7429 return UndefValue::get(GCR.getType());
7430 }
7431
7432 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
7433 // For now, the assumption is that the relocation of null will be null
7434 // for most any collector. If this ever changes, a corresponding hook
7435 // should be added to GCStrategy and this code should check it first.
7436 if (isa<ConstantPointerNull>(DerivedPtr)) {
7437 // Use null-pointer of gc_relocate's type to replace it.
7438 return ConstantPointerNull::get(PT);
7439 }
7440 }
7441 return nullptr;
7442 }
7443 case Intrinsic::experimental_vp_reverse: {
7444 Value *Vec = Call->getArgOperand(0);
7445 Value *EVL = Call->getArgOperand(2);
7446
7447 Value *X;
7448 // vp.reverse(vp.reverse(X)) == X (mask doesn't matter)
7450 m_Value(X), m_Value(), m_Specific(EVL))))
7451 return X;
7452
7453 // vp.reverse(splat(X)) -> splat(X) (regardless of mask and EVL)
7454 if (isSplatValue(Vec))
7455 return Vec;
7456 return nullptr;
7457 }
7458 default:
7459 return nullptr;
7460 }
7461}
7462
7464 ArrayRef<Value *> Args,
7465 const SimplifyQuery &Q) {
7466 auto *F = dyn_cast<Function>(Callee);
7467 if (!F || !canConstantFoldCallTo(Call, F))
7468 return nullptr;
7469
7470 SmallVector<Constant *, 4> ConstantArgs;
7471 ConstantArgs.reserve(Args.size());
7472 for (Value *Arg : Args) {
7474 if (!C) {
7475 if (isa<MetadataAsValue>(Arg))
7476 continue;
7477 return nullptr;
7478 }
7479 ConstantArgs.push_back(C);
7480 }
7481
7482 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
7483}
7484
7486 const SimplifyQuery &Q) {
7487 // Args should not contain operand bundle operands.
7488 assert(Call->arg_size() == Args.size());
7489
7490 // musttail calls can only be simplified if they are also DCEd.
7491 // As we can't guarantee this here, don't simplify them.
7492 if (Call->isMustTailCall())
7493 return nullptr;
7494
7495 // call undef -> poison
7496 // call null -> poison
7497 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
7498 return PoisonValue::get(Call->getType());
7499
7500 if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
7501 return V;
7502
7503 auto *F = dyn_cast<Function>(Callee);
7504 if (F && F->isIntrinsic())
7505 if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
7506 return Ret;
7507
7508 return nullptr;
7509}
7510
7513 SmallVector<Value *, 4> Args(Call->args());
7514 if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
7515 return V;
7516 if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
7517 return Ret;
7518 return nullptr;
7519}
7520
7521/// Given operands for a Freeze, see if we can fold the result.
7523 // Use a utility function defined in ValueTracking.
7525 return Op0;
7526 // We have room for improvement.
7527 return nullptr;
7528}
7529
7531 return ::simplifyFreezeInst(Op0, Q);
7532}
7533
7535 const SimplifyQuery &Q) {
7536 if (LI->isVolatile())
7537 return nullptr;
7538
7539 if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7540 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
7541
7542 // We can only fold the load if it is from a constant global with definitive
7543 // initializer. Skip expensive logic if this is not the case.
7545 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7546 return nullptr;
7547
7548 // If GlobalVariable's initializer is uniform, then return the constant
7549 // regardless of its offset.
7550 if (Constant *C = ConstantFoldLoadFromUniformValue(GV->getInitializer(),
7551 LI->getType(), Q.DL))
7552 return C;
7553
7554 // Try to convert operand into a constant by stripping offsets while looking
7555 // through invariant.group intrinsics.
7557 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
7558 Q.DL, Offset, /* AllowNonInbounts */ true,
7559 /* AllowInvariantGroup */ true);
7560 if (PtrOp == GV) {
7561 // Index size may have changed due to address space casts.
7562 Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
7563 return ConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
7564 Q.DL);
7565 }
7566
7567 return nullptr;
7568}
7569
7570/// See if we can compute a simplified version of this instruction.
7571/// If not, this returns null.
7572
7574 ArrayRef<Value *> NewOps,
7575 const SimplifyQuery &SQ,
7576 unsigned MaxRecurse) {
7577 assert(I->getFunction() && "instruction should be inserted in a function");
7578 assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
7579 "context instruction should be in the same function");
7580
7581 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
7582
7583 switch (I->getOpcode()) {
7584 default:
7585 if (all_of(NewOps, IsaPred<Constant>)) {
7586 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
7587 transform(NewOps, NewConstOps.begin(),
7588 [](Value *V) { return cast<Constant>(V); });
7589 return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
7590 }
7591 return nullptr;
7592 case Instruction::FNeg:
7593 return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
7594 case Instruction::FAdd:
7595 return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7596 MaxRecurse);
7597 case Instruction::Add:
7598 return simplifyAddInst(
7599 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7600 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7601 case Instruction::FSub:
7602 return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7603 MaxRecurse);
7604 case Instruction::Sub:
7605 return simplifySubInst(
7606 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7607 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7608 case Instruction::FMul:
7609 return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7610 MaxRecurse);
7611 case Instruction::Mul:
7612 return simplifyMulInst(
7613 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7614 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7615 case Instruction::SDiv:
7616 return simplifySDivInst(NewOps[0], NewOps[1],
7618 MaxRecurse);
7619 case Instruction::UDiv:
7620 return simplifyUDivInst(NewOps[0], NewOps[1],
7622 MaxRecurse);
7623 case Instruction::FDiv:
7624 return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7625 MaxRecurse);
7626 case Instruction::SRem:
7627 return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7628 case Instruction::URem:
7629 return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7630 case Instruction::FRem:
7631 return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7632 MaxRecurse);
7633 case Instruction::Shl:
7634 return simplifyShlInst(
7635 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7636 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7637 case Instruction::LShr:
7638 return simplifyLShrInst(NewOps[0], NewOps[1],
7640 MaxRecurse);
7641 case Instruction::AShr:
7642 return simplifyAShrInst(NewOps[0], NewOps[1],
7644 MaxRecurse);
7645 case Instruction::And:
7646 return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7647 case Instruction::Or:
7648 return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7649 case Instruction::Xor:
7650 return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7651 case Instruction::ICmp:
7652 return simplifyICmpInst(cast<ICmpInst>(I)->getCmpPredicate(), NewOps[0],
7653 NewOps[1], Q, MaxRecurse);
7654 case Instruction::FCmp:
7655 return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7656 NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
7657 case Instruction::Select:
7658 return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7659 case Instruction::GetElementPtr: {
7660 auto *GEPI = cast<GetElementPtrInst>(I);
7661 return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7662 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7663 MaxRecurse);
7664 }
7665 case Instruction::InsertValue: {
7667 return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
7668 MaxRecurse);
7669 }
7670 case Instruction::InsertElement:
7671 return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7672 case Instruction::ExtractValue: {
7673 auto *EVI = cast<ExtractValueInst>(I);
7674 return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7675 MaxRecurse);
7676 }
7677 case Instruction::ExtractElement:
7678 return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7679 case Instruction::ShuffleVector: {
7680 auto *SVI = cast<ShuffleVectorInst>(I);
7681 return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
7682 SVI->getShuffleMask(), SVI->getType(), Q,
7683 MaxRecurse);
7684 }
7685 case Instruction::PHI:
7686 return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
7687 case Instruction::Call:
7688 return simplifyCall(
7689 cast<CallInst>(I), NewOps.back(),
7690 NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7691 case Instruction::Freeze:
7692 return llvm::simplifyFreezeInst(NewOps[0], Q);
7693#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7694#include "llvm/IR/Instruction.def"
7695#undef HANDLE_CAST_INST
7696 return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
7697 MaxRecurse);
7698 case Instruction::Alloca:
7699 // No simplifications for Alloca and it can't be constant folded.
7700 return nullptr;
7701 case Instruction::Load:
7702 return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7703 }
7704}
7705
7707 ArrayRef<Value *> NewOps,
7708 const SimplifyQuery &SQ) {
7709 assert(NewOps.size() == I->getNumOperands() &&
7710 "Number of operands should match the instruction!");
7711 return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
7712}
7713
7715 SmallVector<Value *, 8> Ops(I->operands());
7717
7718 /// If called on unreachable code, the instruction may simplify to itself.
7719 /// Make life easier for users by detecting that case here, and returning a
7720 /// safe value instead.
7721 return Result == I ? PoisonValue::get(I->getType()) : Result;
7722}
7723
7724/// Implementation of recursive simplification through an instruction's
7725/// uses.
7726///
7727/// This is the common implementation of the recursive simplification routines.
7728/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7729/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7730/// instructions to process and attempt to simplify it using
7731/// InstructionSimplify. Recursively visited users which could not be
7732/// simplified themselves are to the optional UnsimplifiedUsers set for
7733/// further processing by the caller.
7734///
7735/// This routine returns 'true' only when *it* simplifies something. The passed
7736/// in simplified value does not count toward this.
7738 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7739 const DominatorTree *DT, AssumptionCache *AC,
7740 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7741 bool Simplified = false;
7743 const DataLayout &DL = I->getDataLayout();
7744
7745 // If we have an explicit value to collapse to, do that round of the
7746 // simplification loop by hand initially.
7747 if (SimpleV) {
7748 for (User *U : I->users())
7749 if (U != I)
7750 Worklist.insert(cast<Instruction>(U));
7751
7752 // Replace the instruction with its simplified value.
7753 I->replaceAllUsesWith(SimpleV);
7754
7755 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7756 I->eraseFromParent();
7757 } else {
7758 Worklist.insert(I);
7759 }
7760
7761 // Note that we must test the size on each iteration, the worklist can grow.
7762 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7763 I = Worklist[Idx];
7764
7765 // See if this instruction simplifies.
7766 SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7767 if (!SimpleV) {
7768 if (UnsimplifiedUsers)
7769 UnsimplifiedUsers->insert(I);
7770 continue;
7771 }
7772
7773 Simplified = true;
7774
7775 // Stash away all the uses of the old instruction so we can check them for
7776 // recursive simplifications after a RAUW. This is cheaper than checking all
7777 // uses of To on the recursive step in most cases.
7778 for (User *U : I->users())
7779 Worklist.insert(cast<Instruction>(U));
7780
7781 // Replace the instruction with its simplified value.
7782 I->replaceAllUsesWith(SimpleV);
7783
7784 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7785 I->eraseFromParent();
7786 }
7787 return Simplified;
7788}
7789
7791 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7792 const DominatorTree *DT, AssumptionCache *AC,
7793 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7794 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7795 assert(SimpleV && "Must provide a simplified value.");
7796 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7797 UnsimplifiedUsers);
7798}
7799
7800namespace llvm {
7802 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7803 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7804 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7805 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7806 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7807 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7808 return {F.getDataLayout(), TLI, DT, AC};
7809}
7810
7812 const DataLayout &DL) {
7813 return {DL, &AR.TLI, &AR.DT, &AR.AC};
7814}
7815
7816template <class T, class... TArgs>
7818 Function &F) {
7819 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7820 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7821 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7822 return {F.getDataLayout(), TLI, DT, AC};
7823}
7825 Function &);
7826
7828 if (!CanUseUndef)
7829 return false;
7830
7831 return match(V, m_Undef());
7832}
7833
7834} // namespace llvm
7835
7836void InstSimplifyFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
IRTranslator LLVM IR MI
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, const SimplifyQuery &Q, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
@ RecursionLimit
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyWithOpsReplaced(Value *V, ArrayRef< std::pair< Value *, Value * > > Ops, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyAndOrOfFCmpsWithConstants(FCmpInst *Cmp0, FCmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static bool matchEquivZeroRHS(CmpPredicate &Pred, const Value *RHS)
Check if RHS is zero or can be transformed to an equivalent zero comparison.
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifySelectWithEquivalence(ArrayRef< std::pair< Value *, Value * > > Replacements, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifySelectWithBitTest(Value *CondVal, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not.
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
bool isSelectWithIdenticalPHI(PHINode &PN, PHINode &IdenticalPN)
Look for the following pattern and simplify to_fold to identicalPhi.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V)
Compute the base pointer and cumulative constant offsets for V.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static MinMaxOptResult OptimizeConstMinMax(const Constant *RHSConst, const Intrinsic::ID IID, const CallBase *Call, Constant **OutNewConstVal)
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Value * simplifySVEIntReduction(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1)
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNegative() const
Definition APFloat.h:1516
APFloat makeQuiet() const
Assuming this is an IEEE-754 NaN value, quiet its signaling bit.
Definition APFloat.h:1371
bool isNaN() const
Definition APFloat.h:1514
bool isSignaling() const
Definition APFloat.h:1518
bool isLargest() const
Definition APFloat.h:1532
bool isInfinity() const
Definition APFloat.h:1513
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1064
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1527
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1697
void setSignBit()
Set the sign bit to 1.
Definition APInt.h:1355
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition APInt.h:1256
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1173
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1654
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition APInt.h:362
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1072
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
bool getBoolValue() const
Convert APInt to a boolean value.
Definition APInt.h:472
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1776
bool isMask(unsigned numBits) const
Definition APInt.h:489
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
Definition APInt.h:342
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
Definition Constants.h:1573
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1445
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
Definition Constants.h:458
static LLVM_ABI Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
bool isSingleElement() const
Return true if this set contains exactly one member.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
Definition Constants.cpp:95
LLVM_ABI bool isMaxSignedValue() const
Return true if the value is the largest signed value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
LLVM_ABI bool isMinSignedValue() const
Return true if the value is the smallest signed value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
unsigned getAddressSizeInBits(unsigned AS) const
The size in bits of an address in for the given AS.
Definition DataLayout.h:511
IntegerType * getAddressType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of an address in AddressSpace.
Definition DataLayout.h:683
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
Definition DataLayout.h:502
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:784
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:316
DominatorTree & getDomTree()
Definition Dominators.h:324
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
bool noSignedZeros() const
Definition FMF.h:70
bool noInfs() const
Definition FMF.h:69
bool allowReassoc() const
Flag queries.
Definition FMF.h:67
bool noNaNs() const
Definition FMF.h:68
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1080
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getTrueValue() const
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:737
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
match_combine_or< typename m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty, typename m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty > m_FMinNum_or_FMinimumNum(const Opnd0 &Op0, const Opnd1 &Op1)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
auto m_Constant()
Match an arbitrary Constant and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
auto m_MaxOrMin(const LHS &L, const RHS &R)
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
auto m_c_MaxOrMin(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
match_combine_or< typename m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty, typename m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty > m_FMaxNum_or_FMaximumNum(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ExceptionBehavior
Exception behavior used for floating point operations.
Definition FPEnv.h:39
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition FPEnv.h:40
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:532
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
LLVM_ABI Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Definition FPEnv.h:68
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
Definition FPEnv.h:80
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2173
LLVM_ABI Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
LLVM_ABI Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Definition VE.h:377
LLVM_ABI Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2026
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
LLVM_ABI bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
LLVM_ABI Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
LLVM_ABI Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
LLVM_ABI Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition Loads.cpp:879
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
std::optional< DecomposedBitTest > decomposeBitTest(Value *Cond, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
LLVM_ABI Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
@ NearestTiesToEven
roundTiesToEven.
@ TowardNegative
roundTowardNegative.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Definition VE.h:376
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
LLVM_ABI Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2166
LLVM_ABI Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
Attempt to constant fold an insertvalue instruction with the specified operands and indices.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
@ Continue
Definition DWP.h:22
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
std::pair< Value *, FPClassTest > fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Definition FPEnv.h:86
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
LLVM_ABI Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
LLVM_ABI Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This callback is used in conjunction with PointerMayBeCaptured.
virtual Action captured(const Use *U, UseCaptureInfo CI)=0
Use U directly captures CI.UseCC and additionally CI.ResultCC through the return value of the user of...
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:80
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:258
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:290
bool hasConflict() const
Returns true if there is conflicting information.
Definition KnownBits.h:51
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition KnownBits.h:312
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:264
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:148
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:132
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
@ Min
Evaluate all branches of an unknown condition.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
const Instruction * CxtI
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
LLVM_ABI bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
AssumptionCache * AC
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const
const InstrInfoQuery IIQ
Capture information for a specific Use.