LLVM 22.0.0git
InstructionSimplify.cpp
Go to the documentation of this file.
1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements routines for folding instructions into simpler forms
10// that do not require creating new instructions. This does constant folding
11// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14// simplified: This is usually true and assuming it simplifies the logic (if
15// they have not been simplified then results are correct but maybe suboptimal).
16//
17//===----------------------------------------------------------------------===//
18
20
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
31#include "llvm/Analysis/Loads.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/IntrinsicsAArch64.h"
45#include "llvm/IR/Operator.h"
47#include "llvm/IR/Statepoint.h"
50#include <algorithm>
51#include <optional>
52using namespace llvm;
53using namespace llvm::PatternMatch;
54
55#define DEBUG_TYPE "instsimplify"
56
57enum { RecursionLimit = 3 };
58
59STATISTIC(NumExpand, "Number of expansions");
60STATISTIC(NumReassoc, "Number of reassociations");
61
62static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
63 unsigned);
64static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
65static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
66 const SimplifyQuery &, unsigned);
67static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
68 unsigned);
69static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
70 const SimplifyQuery &, unsigned);
72 const SimplifyQuery &, unsigned);
74 const SimplifyQuery &Q, unsigned MaxRecurse);
75static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
76static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
77 unsigned);
78static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
79 unsigned);
81 GEPNoWrapFlags, const SimplifyQuery &, unsigned);
83 const SimplifyQuery &, unsigned);
85 ArrayRef<Value *> NewOps,
86 const SimplifyQuery &SQ,
87 unsigned MaxRecurse);
88
89/// For a boolean type or a vector of boolean type, return false or a vector
90/// with every element false.
91static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
92
93/// For a boolean type or a vector of boolean type, return true or a vector
94/// with every element true.
95static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
96
97/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
98static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS) {
99 CmpInst *Cmp = dyn_cast<CmpInst>(V);
100 if (!Cmp)
101 return false;
102 CmpInst::Predicate CPred = Cmp->getPredicate();
103 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
104 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
105 return true;
106 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
107 CRHS == LHS;
108}
109
110/// Simplify comparison with true or false branch of select:
111/// %sel = select i1 %cond, i32 %tv, i32 %fv
112/// %cmp = icmp sle i32 %sel, %rhs
113/// Compose new comparison by substituting %sel with either %tv or %fv
114/// and see if it simplifies.
116 Value *Cond, const SimplifyQuery &Q,
117 unsigned MaxRecurse, Constant *TrueOrFalse) {
118 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
119 if (SimplifiedCmp == Cond) {
120 // %cmp simplified to the select condition (%cond).
121 return TrueOrFalse;
122 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
123 // It didn't simplify. However, if composed comparison is equivalent
124 // to the select condition (%cond) then we can replace it.
125 return TrueOrFalse;
126 }
127 return SimplifiedCmp;
128}
129
130/// Simplify comparison with true branch of select
132 Value *Cond, const SimplifyQuery &Q,
133 unsigned MaxRecurse) {
134 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
135 getTrue(Cond->getType()));
136}
137
138/// Simplify comparison with false branch of select
140 Value *Cond, const SimplifyQuery &Q,
141 unsigned MaxRecurse) {
142 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
143 getFalse(Cond->getType()));
144}
145
146/// We know comparison with both branches of select can be simplified, but they
147/// are not equal. This routine handles some logical simplifications.
149 Value *Cond,
150 const SimplifyQuery &Q,
151 unsigned MaxRecurse) {
152 // If the false value simplified to false, then the result of the compare
153 // is equal to "Cond && TCmp". This also catches the case when the false
154 // value simplified to false and the true value to true, returning "Cond".
155 // Folding select to and/or isn't poison-safe in general; impliesPoison
156 // checks whether folding it does not convert a well-defined value into
157 // poison.
158 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
159 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
160 return V;
161 // If the true value simplified to true, then the result of the compare
162 // is equal to "Cond || FCmp".
163 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
164 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
165 return V;
166 // Finally, if the false value simplified to true and the true value to
167 // false, then the result of the compare is equal to "!Cond".
168 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
169 if (Value *V = simplifyXorInst(
170 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
171 return V;
172 return nullptr;
173}
174
175/// Does the given value dominate the specified phi node?
176static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
178 if (!I)
179 // Arguments and constants dominate all instructions.
180 return true;
181
182 // If we have a DominatorTree then do a precise test.
183 if (DT)
184 return DT->dominates(I, P);
185
186 // Otherwise, if the instruction is in the entry block and is not an invoke,
187 // then it obviously dominates all phi nodes.
188 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
190 return true;
191
192 return false;
193}
194
195/// Try to simplify a binary operator of form "V op OtherOp" where V is
196/// "(B0 opex B1)" by distributing 'op' across 'opex' as
197/// "(B0 op OtherOp) opex (B1 op OtherOp)".
199 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
200 const SimplifyQuery &Q, unsigned MaxRecurse) {
201 auto *B = dyn_cast<BinaryOperator>(V);
202 if (!B || B->getOpcode() != OpcodeToExpand)
203 return nullptr;
204 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
205 Value *L =
206 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
207 if (!L)
208 return nullptr;
209 Value *R =
210 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
211 if (!R)
212 return nullptr;
213
214 // Does the expanded pair of binops simplify to the existing binop?
215 if ((L == B0 && R == B1) ||
216 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
217 ++NumExpand;
218 return B;
219 }
220
221 // Otherwise, return "L op' R" if it simplifies.
222 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
223 if (!S)
224 return nullptr;
225
226 ++NumExpand;
227 return S;
228}
229
230/// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
231/// distributing op over op'.
233 Value *R,
234 Instruction::BinaryOps OpcodeToExpand,
235 const SimplifyQuery &Q,
236 unsigned MaxRecurse) {
237 // Recursion is always used, so bail out at once if we already hit the limit.
238 if (!MaxRecurse--)
239 return nullptr;
240
241 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
242 return V;
243 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
244 return V;
245 return nullptr;
246}
247
248/// Generic simplifications for associative binary operations.
249/// Returns the simpler value, or null if none was found.
251 Value *LHS, Value *RHS,
252 const SimplifyQuery &Q,
253 unsigned MaxRecurse) {
254 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
255
256 // Recursion is always used, so bail out at once if we already hit the limit.
257 if (!MaxRecurse--)
258 return nullptr;
259
262
263 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
264 if (Op0 && Op0->getOpcode() == Opcode) {
265 Value *A = Op0->getOperand(0);
266 Value *B = Op0->getOperand(1);
267 Value *C = RHS;
268
269 // Does "B op C" simplify?
270 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
271 // It does! Return "A op V" if it simplifies or is already available.
272 // If V equals B then "A op V" is just the LHS.
273 if (V == B)
274 return LHS;
275 // Otherwise return "A op V" if it simplifies.
276 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
277 ++NumReassoc;
278 return W;
279 }
280 }
281 }
282
283 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
284 if (Op1 && Op1->getOpcode() == Opcode) {
285 Value *A = LHS;
286 Value *B = Op1->getOperand(0);
287 Value *C = Op1->getOperand(1);
288
289 // Does "A op B" simplify?
290 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
291 // It does! Return "V op C" if it simplifies or is already available.
292 // If V equals B then "V op C" is just the RHS.
293 if (V == B)
294 return RHS;
295 // Otherwise return "V op C" if it simplifies.
296 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
297 ++NumReassoc;
298 return W;
299 }
300 }
301 }
302
303 // The remaining transforms require commutativity as well as associativity.
304 if (!Instruction::isCommutative(Opcode))
305 return nullptr;
306
307 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
308 if (Op0 && Op0->getOpcode() == Opcode) {
309 Value *A = Op0->getOperand(0);
310 Value *B = Op0->getOperand(1);
311 Value *C = RHS;
312
313 // Does "C op A" simplify?
314 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
315 // It does! Return "V op B" if it simplifies or is already available.
316 // If V equals A then "V op B" is just the LHS.
317 if (V == A)
318 return LHS;
319 // Otherwise return "V op B" if it simplifies.
320 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
321 ++NumReassoc;
322 return W;
323 }
324 }
325 }
326
327 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
328 if (Op1 && Op1->getOpcode() == Opcode) {
329 Value *A = LHS;
330 Value *B = Op1->getOperand(0);
331 Value *C = Op1->getOperand(1);
332
333 // Does "C op A" simplify?
334 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
335 // It does! Return "B op V" if it simplifies or is already available.
336 // If V equals C then "B op V" is just the RHS.
337 if (V == C)
338 return RHS;
339 // Otherwise return "B op V" if it simplifies.
340 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
341 ++NumReassoc;
342 return W;
343 }
344 }
345 }
346
347 return nullptr;
348}
349
350/// In the case of a binary operation with a select instruction as an operand,
351/// try to simplify the binop by seeing whether evaluating it on both branches
352/// of the select results in the same value. Returns the common value if so,
353/// otherwise returns null.
355 Value *RHS, const SimplifyQuery &Q,
356 unsigned MaxRecurse) {
357 // Recursion is always used, so bail out at once if we already hit the limit.
358 if (!MaxRecurse--)
359 return nullptr;
360
361 SelectInst *SI;
362 if (isa<SelectInst>(LHS)) {
364 } else {
365 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
367 }
368
369 // Evaluate the BinOp on the true and false branches of the select.
370 Value *TV;
371 Value *FV;
372 if (SI == LHS) {
373 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
374 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
375 } else {
376 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
377 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
378 }
379
380 // If they simplified to the same value, then return the common value.
381 // If they both failed to simplify then return null.
382 if (TV == FV)
383 return TV;
384
385 // If one branch simplified to undef, return the other one.
386 if (TV && Q.isUndefValue(TV))
387 return FV;
388 if (FV && Q.isUndefValue(FV))
389 return TV;
390
391 // If applying the operation did not change the true and false select values,
392 // then the result of the binop is the select itself.
393 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
394 return SI;
395
396 // If one branch simplified and the other did not, and the simplified
397 // value is equal to the unsimplified one, return the simplified value.
398 // For example, select (cond, X, X & Z) & Z -> X & Z.
399 if ((FV && !TV) || (TV && !FV)) {
400 // Check that the simplified value has the form "X op Y" where "op" is the
401 // same as the original operation.
402 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
403 if (Simplified && Simplified->getOpcode() == unsigned(Opcode) &&
404 !Simplified->hasPoisonGeneratingFlags()) {
405 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
406 // We already know that "op" is the same as for the simplified value. See
407 // if the operands match too. If so, return the simplified value.
408 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
409 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
410 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
411 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
412 Simplified->getOperand(1) == UnsimplifiedRHS)
413 return Simplified;
414 if (Simplified->isCommutative() &&
415 Simplified->getOperand(1) == UnsimplifiedLHS &&
416 Simplified->getOperand(0) == UnsimplifiedRHS)
417 return Simplified;
418 }
419 }
420
421 return nullptr;
422}
423
424/// In the case of a comparison with a select instruction, try to simplify the
425/// comparison by seeing whether both branches of the select result in the same
426/// value. Returns the common value if so, otherwise returns null.
427/// For example, if we have:
428/// %tmp = select i1 %cmp, i32 1, i32 2
429/// %cmp1 = icmp sle i32 %tmp, 3
430/// We can simplify %cmp1 to true, because both branches of select are
431/// less than 3. We compose new comparison by substituting %tmp with both
432/// branches of select and see if it can be simplified.
434 const SimplifyQuery &Q, unsigned MaxRecurse) {
435 // Recursion is always used, so bail out at once if we already hit the limit.
436 if (!MaxRecurse--)
437 return nullptr;
438
439 // Make sure the select is on the LHS.
440 if (!isa<SelectInst>(LHS)) {
441 std::swap(LHS, RHS);
442 Pred = CmpInst::getSwappedPredicate(Pred);
443 }
444 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
446 Value *Cond = SI->getCondition();
447 Value *TV = SI->getTrueValue();
448 Value *FV = SI->getFalseValue();
449
450 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
451 // Does "cmp TV, RHS" simplify?
452 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
453 if (!TCmp)
454 return nullptr;
455
456 // Does "cmp FV, RHS" simplify?
457 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
458 if (!FCmp)
459 return nullptr;
460
461 // If both sides simplified to the same value, then use it as the result of
462 // the original comparison.
463 if (TCmp == FCmp)
464 return TCmp;
465
466 // The remaining cases only make sense if the select condition has the same
467 // type as the result of the comparison, so bail out if this is not so.
468 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
469 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
470
471 return nullptr;
472}
473
474/// In the case of a binary operation with an operand that is a PHI instruction,
475/// try to simplify the binop by seeing whether evaluating it on the incoming
476/// phi values yields the same result for every value. If so returns the common
477/// value, otherwise returns null.
479 Value *RHS, const SimplifyQuery &Q,
480 unsigned MaxRecurse) {
481 // Recursion is always used, so bail out at once if we already hit the limit.
482 if (!MaxRecurse--)
483 return nullptr;
484
485 PHINode *PI;
486 if (isa<PHINode>(LHS)) {
487 PI = cast<PHINode>(LHS);
488 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
489 if (!valueDominatesPHI(RHS, PI, Q.DT))
490 return nullptr;
491 } else {
492 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
493 PI = cast<PHINode>(RHS);
494 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
495 if (!valueDominatesPHI(LHS, PI, Q.DT))
496 return nullptr;
497 }
498
499 // Evaluate the BinOp on the incoming phi values.
500 Value *CommonValue = nullptr;
501 for (Use &Incoming : PI->incoming_values()) {
502 // If the incoming value is the phi node itself, it can safely be skipped.
503 if (Incoming == PI)
504 continue;
506 Value *V = PI == LHS
507 ? simplifyBinOp(Opcode, Incoming, RHS,
508 Q.getWithInstruction(InTI), MaxRecurse)
509 : simplifyBinOp(Opcode, LHS, Incoming,
510 Q.getWithInstruction(InTI), MaxRecurse);
511 // If the operation failed to simplify, or simplified to a different value
512 // to previously, then give up.
513 if (!V || (CommonValue && V != CommonValue))
514 return nullptr;
515 CommonValue = V;
516 }
517
518 return CommonValue;
519}
520
521/// In the case of a comparison with a PHI instruction, try to simplify the
522/// comparison by seeing whether comparing with all of the incoming phi values
523/// yields the same result every time. If so returns the common result,
524/// otherwise returns null.
526 const SimplifyQuery &Q, unsigned MaxRecurse) {
527 // Recursion is always used, so bail out at once if we already hit the limit.
528 if (!MaxRecurse--)
529 return nullptr;
530
531 // Make sure the phi is on the LHS.
532 if (!isa<PHINode>(LHS)) {
533 std::swap(LHS, RHS);
534 Pred = CmpInst::getSwappedPredicate(Pred);
535 }
536 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
538
539 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
540 if (!valueDominatesPHI(RHS, PI, Q.DT))
541 return nullptr;
542
543 // Evaluate the BinOp on the incoming phi values.
544 Value *CommonValue = nullptr;
545 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
548 // If the incoming value is the phi node itself, it can safely be skipped.
549 if (Incoming == PI)
550 continue;
551 // Change the context instruction to the "edge" that flows into the phi.
552 // This is important because that is where incoming is actually "evaluated"
553 // even though it is used later somewhere else.
555 MaxRecurse);
556 // If the operation failed to simplify, or simplified to a different value
557 // to previously, then give up.
558 if (!V || (CommonValue && V != CommonValue))
559 return nullptr;
560 CommonValue = V;
561 }
562
563 return CommonValue;
564}
565
567 Value *&Op0, Value *&Op1,
568 const SimplifyQuery &Q) {
569 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
570 if (auto *CRHS = dyn_cast<Constant>(Op1)) {
571 switch (Opcode) {
572 default:
573 break;
574 case Instruction::FAdd:
575 case Instruction::FSub:
576 case Instruction::FMul:
577 case Instruction::FDiv:
578 case Instruction::FRem:
579 if (Q.CxtI != nullptr)
580 return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
581 }
582 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
583 }
584
585 // Canonicalize the constant to the RHS if this is a commutative operation.
586 if (Instruction::isCommutative(Opcode))
587 std::swap(Op0, Op1);
588 }
589 return nullptr;
590}
591
592/// Given operands for an Add, see if we can fold the result.
593/// If not, this returns null.
594static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
595 const SimplifyQuery &Q, unsigned MaxRecurse) {
596 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
597 return C;
598
599 // X + poison -> poison
600 if (isa<PoisonValue>(Op1))
601 return Op1;
602
603 // X + undef -> undef
604 if (Q.isUndefValue(Op1))
605 return Op1;
606
607 // X + 0 -> X
608 if (match(Op1, m_Zero()))
609 return Op0;
610
611 // If two operands are negative, return 0.
612 if (isKnownNegation(Op0, Op1))
613 return Constant::getNullValue(Op0->getType());
614
615 // X + (Y - X) -> Y
616 // (Y - X) + X -> Y
617 // Eg: X + -X -> 0
618 Value *Y = nullptr;
619 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
620 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
621 return Y;
622
623 // X + ~X -> -1 since ~X = -X-1
624 Type *Ty = Op0->getType();
625 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
626 return Constant::getAllOnesValue(Ty);
627
628 // add nsw/nuw (xor Y, signmask), signmask --> Y
629 // The no-wrapping add guarantees that the top bit will be set by the add.
630 // Therefore, the xor must be clearing the already set sign bit of Y.
631 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
632 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
633 return Y;
634
635 // add nuw %x, -1 -> -1, because %x can only be 0.
636 if (IsNUW && match(Op1, m_AllOnes()))
637 return Op1; // Which is -1.
638
639 /// i1 add -> xor.
640 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
641 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
642 return V;
643
644 // Try some generic simplifications for associative operations.
645 if (Value *V =
646 simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
647 return V;
648
649 // Threading Add over selects and phi nodes is pointless, so don't bother.
650 // Threading over the select in "A + select(cond, B, C)" means evaluating
651 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
652 // only if B and C are equal. If B and C are equal then (since we assume
653 // that operands have already been simplified) "select(cond, B, C)" should
654 // have been simplified to the common value of B and C already. Analysing
655 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
656 // for threading over phi nodes.
657
658 return nullptr;
659}
660
661Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
662 const SimplifyQuery &Query) {
663 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
664}
665
666/// Compute the base pointer and cumulative constant offsets for V.
667///
668/// This strips all constant offsets off of V, leaving it the base pointer, and
669/// accumulates the total constant offset applied in the returned constant.
670/// It returns zero if there are no constant offsets applied.
671///
672/// This is very similar to stripAndAccumulateConstantOffsets(), except it
673/// normalizes the offset bitwidth to the stripped pointer type, not the
674/// original pointer type.
676 assert(V->getType()->isPtrOrPtrVectorTy());
677
678 APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
679 V = V->stripAndAccumulateConstantOffsets(DL, Offset,
680 /*AllowNonInbounds=*/true);
681 // As that strip may trace through `addrspacecast`, need to sext or trunc
682 // the offset calculated.
683 return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
684}
685
686/// Compute the constant difference between two pointer values.
687/// If the difference is not a constant, returns zero.
689 Value *RHS) {
692
693 // If LHS and RHS are not related via constant offsets to the same base
694 // value, there is nothing we can do here.
695 if (LHS != RHS)
696 return nullptr;
697
698 // Otherwise, the difference of LHS - RHS can be computed as:
699 // LHS - RHS
700 // = (LHSOffset + Base) - (RHSOffset + Base)
701 // = LHSOffset - RHSOffset
702 Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
703 if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
704 Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
705 return Res;
706}
707
708/// Test if there is a dominating equivalence condition for the
709/// two operands. If there is, try to reduce the binary operation
710/// between the two operands.
711/// Example: Op0 - Op1 --> 0 when Op0 == Op1
712static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
713 const SimplifyQuery &Q, unsigned MaxRecurse) {
714 // Recursive run it can not get any benefit
715 if (MaxRecurse != RecursionLimit)
716 return nullptr;
717
718 std::optional<bool> Imp =
720 if (Imp && *Imp) {
721 Type *Ty = Op0->getType();
722 switch (Opcode) {
723 case Instruction::Sub:
724 case Instruction::Xor:
725 case Instruction::URem:
726 case Instruction::SRem:
727 return Constant::getNullValue(Ty);
728
729 case Instruction::SDiv:
730 case Instruction::UDiv:
731 return ConstantInt::get(Ty, 1);
732
733 case Instruction::And:
734 case Instruction::Or:
735 // Could be either one - choose Op1 since that's more likely a constant.
736 return Op1;
737 default:
738 break;
739 }
740 }
741 return nullptr;
742}
743
744/// Given operands for a Sub, see if we can fold the result.
745/// If not, this returns null.
746static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
747 const SimplifyQuery &Q, unsigned MaxRecurse) {
748 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
749 return C;
750
751 // X - poison -> poison
752 // poison - X -> poison
753 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
754 return PoisonValue::get(Op0->getType());
755
756 // X - undef -> undef
757 // undef - X -> undef
758 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
759 return UndefValue::get(Op0->getType());
760
761 // X - 0 -> X
762 if (match(Op1, m_Zero()))
763 return Op0;
764
765 // X - X -> 0
766 if (Op0 == Op1)
767 return Constant::getNullValue(Op0->getType());
768
769 // Is this a negation?
770 if (match(Op0, m_Zero())) {
771 // 0 - X -> 0 if the sub is NUW.
772 if (IsNUW)
773 return Constant::getNullValue(Op0->getType());
774
775 KnownBits Known = computeKnownBits(Op1, Q);
776 if (Known.Zero.isMaxSignedValue()) {
777 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
778 // Op1 must be 0 because negating the minimum signed value is undefined.
779 if (IsNSW)
780 return Constant::getNullValue(Op0->getType());
781
782 // 0 - X -> X if X is 0 or the minimum signed value.
783 return Op1;
784 }
785 }
786
787 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
788 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
789 Value *X = nullptr, *Y = nullptr, *Z = Op1;
790 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
791 // See if "V === Y - Z" simplifies.
792 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
793 // It does! Now see if "X + V" simplifies.
794 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
795 // It does, we successfully reassociated!
796 ++NumReassoc;
797 return W;
798 }
799 // See if "V === X - Z" simplifies.
800 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
801 // It does! Now see if "Y + V" simplifies.
802 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
803 // It does, we successfully reassociated!
804 ++NumReassoc;
805 return W;
806 }
807 }
808
809 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
810 // For example, X - (X + 1) -> -1
811 X = Op0;
812 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
813 // See if "V === X - Y" simplifies.
814 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
815 // It does! Now see if "V - Z" simplifies.
816 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
817 // It does, we successfully reassociated!
818 ++NumReassoc;
819 return W;
820 }
821 // See if "V === X - Z" simplifies.
822 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
823 // It does! Now see if "V - Y" simplifies.
824 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
825 // It does, we successfully reassociated!
826 ++NumReassoc;
827 return W;
828 }
829 }
830
831 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
832 // For example, X - (X - Y) -> Y.
833 Z = Op0;
834 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
835 // See if "V === Z - X" simplifies.
836 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
837 // It does! Now see if "V + Y" simplifies.
838 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
839 // It does, we successfully reassociated!
840 ++NumReassoc;
841 return W;
842 }
843
844 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
845 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
846 match(Op1, m_Trunc(m_Value(Y))))
847 if (X->getType() == Y->getType())
848 // See if "V === X - Y" simplifies.
849 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
850 // It does! Now see if "trunc V" simplifies.
851 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
852 Q, MaxRecurse - 1))
853 // It does, return the simplified "trunc V".
854 return W;
855
856 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
857 if (match(Op0, m_PtrToIntOrAddr(m_Value(X))) &&
859 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
860 return ConstantFoldIntegerCast(Result, Op0->getType(), /*IsSigned*/ true,
861 Q.DL);
862 }
863
864 // i1 sub -> xor.
865 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
866 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
867 return V;
868
869 // Threading Sub over selects and phi nodes is pointless, so don't bother.
870 // Threading over the select in "A - select(cond, B, C)" means evaluating
871 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
872 // only if B and C are equal. If B and C are equal then (since we assume
873 // that operands have already been simplified) "select(cond, B, C)" should
874 // have been simplified to the common value of B and C already. Analysing
875 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
876 // for threading over phi nodes.
877
878 if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
879 return V;
880
881 // (sub nuw C_Mask, (xor X, C_Mask)) -> X
882 if (IsNUW) {
883 Value *X;
884 if (match(Op1, m_Xor(m_Value(X), m_Specific(Op0))) &&
885 match(Op0, m_LowBitMask()))
886 return X;
887 }
888
889 return nullptr;
890}
891
892Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
893 const SimplifyQuery &Q) {
894 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
895}
896
897/// Given operands for a Mul, see if we can fold the result.
898/// If not, this returns null.
899static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
900 const SimplifyQuery &Q, unsigned MaxRecurse) {
901 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
902 return C;
903
904 // X * poison -> poison
905 if (isa<PoisonValue>(Op1))
906 return Op1;
907
908 // X * undef -> 0
909 // X * 0 -> 0
910 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
911 return Constant::getNullValue(Op0->getType());
912
913 // X * 1 -> X
914 if (match(Op1, m_One()))
915 return Op0;
916
917 // (X / Y) * Y -> X if the division is exact.
918 Value *X = nullptr;
919 if (Q.IIQ.UseInstrInfo &&
920 (match(Op0,
921 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
922 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
923 return X;
924
925 if (Op0->getType()->isIntOrIntVectorTy(1)) {
926 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
927 // representable). All other cases reduce to 0, so just return 0.
928 if (IsNSW)
929 return ConstantInt::getNullValue(Op0->getType());
930
931 // Treat "mul i1" as "and i1".
932 if (MaxRecurse)
933 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
934 return V;
935 }
936
937 // Try some generic simplifications for associative operations.
938 if (Value *V =
939 simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
940 return V;
941
942 // Mul distributes over Add. Try some generic simplifications based on this.
943 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
944 Instruction::Add, Q, MaxRecurse))
945 return V;
946
947 // If the operation is with the result of a select instruction, check whether
948 // operating on either branch of the select always yields the same value.
949 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
950 if (Value *V =
951 threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
952 return V;
953
954 // If the operation is with the result of a phi instruction, check whether
955 // operating on all incoming values of the phi always yields the same value.
956 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
957 if (Value *V =
958 threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
959 return V;
960
961 return nullptr;
962}
963
964Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
965 const SimplifyQuery &Q) {
966 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
967}
968
969/// Given a predicate and two operands, return true if the comparison is true.
970/// This is a helper for div/rem simplification where we return some other value
971/// when we can prove a relationship between the operands.
973 const SimplifyQuery &Q, unsigned MaxRecurse) {
974 Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
976 return (C && C->isAllOnesValue());
977}
978
979/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
980/// to simplify X % Y to X.
981static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
982 unsigned MaxRecurse, bool IsSigned) {
983 // Recursion is always used, so bail out at once if we already hit the limit.
984 if (!MaxRecurse--)
985 return false;
986
987 if (IsSigned) {
988 // (X srem Y) sdiv Y --> 0
989 if (match(X, m_SRem(m_Value(), m_Specific(Y))))
990 return true;
991
992 // |X| / |Y| --> 0
993 //
994 // We require that 1 operand is a simple constant. That could be extended to
995 // 2 variables if we computed the sign bit for each.
996 //
997 // Make sure that a constant is not the minimum signed value because taking
998 // the abs() of that is undefined.
999 Type *Ty = X->getType();
1000 const APInt *C;
1001 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1002 // Is the variable divisor magnitude always greater than the constant
1003 // dividend magnitude?
1004 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1005 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1006 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1007 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1008 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1009 return true;
1010 }
1011 if (match(Y, m_APInt(C))) {
1012 // Special-case: we can't take the abs() of a minimum signed value. If
1013 // that's the divisor, then all we have to do is prove that the dividend
1014 // is also not the minimum signed value.
1015 if (C->isMinSignedValue())
1016 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1017
1018 // Is the variable dividend magnitude always less than the constant
1019 // divisor magnitude?
1020 // |X| < |C| --> X > -abs(C) and X < abs(C)
1021 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1022 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1023 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1024 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1025 return true;
1026 }
1027 return false;
1028 }
1029
1030 // IsSigned == false.
1031
1032 // Is the unsigned dividend known to be less than a constant divisor?
1033 // TODO: Convert this (and above) to range analysis
1034 // ("computeConstantRangeIncludingKnownBits")?
1035 const APInt *C;
1036 if (match(Y, m_APInt(C)) && computeKnownBits(X, Q).getMaxValue().ult(*C))
1037 return true;
1038
1039 // Try again for any divisor:
1040 // Is the dividend unsigned less than the divisor?
1041 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1042}
1043
1044/// Check for common or similar folds of integer division or integer remainder.
1045/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1047 Value *Op1, const SimplifyQuery &Q,
1048 unsigned MaxRecurse) {
1049 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1050 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1051
1052 Type *Ty = Op0->getType();
1053
1054 // X / undef -> poison
1055 // X % undef -> poison
1056 if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1057 return PoisonValue::get(Ty);
1058
1059 // X / 0 -> poison
1060 // X % 0 -> poison
1061 // We don't need to preserve faults!
1062 if (match(Op1, m_Zero()))
1063 return PoisonValue::get(Ty);
1064
1065 // poison / X -> poison
1066 // poison % X -> poison
1067 if (isa<PoisonValue>(Op0))
1068 return Op0;
1069
1070 // undef / X -> 0
1071 // undef % X -> 0
1072 if (Q.isUndefValue(Op0))
1073 return Constant::getNullValue(Ty);
1074
1075 // 0 / X -> 0
1076 // 0 % X -> 0
1077 if (match(Op0, m_Zero()))
1078 return Constant::getNullValue(Op0->getType());
1079
1080 // X / X -> 1
1081 // X % X -> 0
1082 if (Op0 == Op1)
1083 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1084
1085 KnownBits Known = computeKnownBits(Op1, Q);
1086 // X / 0 -> poison
1087 // X % 0 -> poison
1088 // If the divisor is known to be zero, just return poison. This can happen in
1089 // some cases where its provable indirectly the denominator is zero but it's
1090 // not trivially simplifiable (i.e known zero through a phi node).
1091 if (Known.isZero())
1092 return PoisonValue::get(Ty);
1093
1094 // X / 1 -> X
1095 // X % 1 -> 0
1096 // If the divisor can only be zero or one, we can't have division-by-zero
1097 // or remainder-by-zero, so assume the divisor is 1.
1098 // e.g. 1, zext (i8 X), sdiv X (Y and 1)
1099 if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1100 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1101
1102 // If X * Y does not overflow, then:
1103 // X * Y / Y -> X
1104 // X * Y % Y -> 0
1105 Value *X;
1106 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1108 // The multiplication can't overflow if it is defined not to, or if
1109 // X == A / Y for some A.
1110 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1111 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1112 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1113 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1114 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1115 }
1116 }
1117
1118 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1119 return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1120
1121 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1122 return V;
1123
1124 // If the operation is with the result of a select instruction, check whether
1125 // operating on either branch of the select always yields the same value.
1126 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1127 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1128 return V;
1129
1130 // If the operation is with the result of a phi instruction, check whether
1131 // operating on all incoming values of the phi always yields the same value.
1132 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1133 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1134 return V;
1135
1136 return nullptr;
1137}
1138
1139/// These are simplifications common to SDiv and UDiv.
1141 bool IsExact, const SimplifyQuery &Q,
1142 unsigned MaxRecurse) {
1143 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1144 return C;
1145
1146 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1147 return V;
1148
1149 const APInt *DivC;
1150 if (IsExact && match(Op1, m_APInt(DivC))) {
1151 // If this is an exact divide by a constant, then the dividend (Op0) must
1152 // have at least as many trailing zeros as the divisor to divide evenly. If
1153 // it has less trailing zeros, then the result must be poison.
1154 if (DivC->countr_zero()) {
1155 KnownBits KnownOp0 = computeKnownBits(Op0, Q);
1156 if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1157 return PoisonValue::get(Op0->getType());
1158 }
1159
1160 // udiv exact (mul nsw X, C), C --> X
1161 // sdiv exact (mul nuw X, C), C --> X
1162 // where C is not a power of 2.
1163 Value *X;
1164 if (!DivC->isPowerOf2() &&
1165 (Opcode == Instruction::UDiv
1166 ? match(Op0, m_NSWMul(m_Value(X), m_Specific(Op1)))
1167 : match(Op0, m_NUWMul(m_Value(X), m_Specific(Op1)))))
1168 return X;
1169 }
1170
1171 return nullptr;
1172}
1173
1174/// These are simplifications common to SRem and URem.
1176 const SimplifyQuery &Q, unsigned MaxRecurse) {
1177 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1178 return C;
1179
1180 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1181 return V;
1182
1183 // (X << Y) % X -> 0
1184 if (Q.IIQ.UseInstrInfo) {
1185 if ((Opcode == Instruction::SRem &&
1186 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1187 (Opcode == Instruction::URem &&
1188 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
1189 return Constant::getNullValue(Op0->getType());
1190
1191 const APInt *C0;
1192 if (match(Op1, m_APInt(C0))) {
1193 // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
1194 // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
1195 if (Opcode == Instruction::SRem
1196 ? match(Op0,
1197 m_NSWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1198 return C.srem(*C0).isZero();
1199 })))
1200 : match(Op0,
1201 m_NUWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1202 return C.urem(*C0).isZero();
1203 }))))
1204 return Constant::getNullValue(Op0->getType());
1205 }
1206 }
1207 return nullptr;
1208}
1209
1210/// Given operands for an SDiv, see if we can fold the result.
1211/// If not, this returns null.
1212static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1213 const SimplifyQuery &Q, unsigned MaxRecurse) {
1214 // If two operands are negated and no signed overflow, return -1.
1215 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1216 return Constant::getAllOnesValue(Op0->getType());
1217
1218 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1219}
1220
1221Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1222 const SimplifyQuery &Q) {
1223 return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1224}
1225
1226/// Given operands for a UDiv, see if we can fold the result.
1227/// If not, this returns null.
1228static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1229 const SimplifyQuery &Q, unsigned MaxRecurse) {
1230 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1231}
1232
1233Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1234 const SimplifyQuery &Q) {
1235 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1236}
1237
1238/// Given operands for an SRem, see if we can fold the result.
1239/// If not, this returns null.
1240static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1241 unsigned MaxRecurse) {
1242 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1243 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1244 Value *X;
1245 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1246 return ConstantInt::getNullValue(Op0->getType());
1247
1248 // If the two operands are negated, return 0.
1249 if (isKnownNegation(Op0, Op1))
1250 return ConstantInt::getNullValue(Op0->getType());
1251
1252 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1253}
1254
1256 return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1257}
1258
1259/// Given operands for a URem, see if we can fold the result.
1260/// If not, this returns null.
1261static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1262 unsigned MaxRecurse) {
1263 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1264}
1265
1267 return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1268}
1269
1270/// Returns true if a shift by \c Amount always yields poison.
1271static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1272 Constant *C = dyn_cast<Constant>(Amount);
1273 if (!C)
1274 return false;
1275
1276 // X shift by undef -> poison because it may shift by the bitwidth.
1277 if (Q.isUndefValue(C))
1278 return true;
1279
1280 // Shifting by the bitwidth or more is poison. This covers scalars and
1281 // fixed/scalable vectors with splat constants.
1282 const APInt *AmountC;
1283 if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1284 return true;
1285
1286 // Try harder for fixed-length vectors:
1287 // If all lanes of a vector shift are poison, the whole shift is poison.
1289 for (unsigned I = 0,
1290 E = cast<FixedVectorType>(C->getType())->getNumElements();
1291 I != E; ++I)
1292 if (!isPoisonShift(C->getAggregateElement(I), Q))
1293 return false;
1294 return true;
1295 }
1296
1297 return false;
1298}
1299
1300/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1301/// If not, this returns null.
1303 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1304 unsigned MaxRecurse) {
1305 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1306 return C;
1307
1308 // poison shift by X -> poison
1309 if (isa<PoisonValue>(Op0))
1310 return Op0;
1311
1312 // 0 shift by X -> 0
1313 if (match(Op0, m_Zero()))
1314 return Constant::getNullValue(Op0->getType());
1315
1316 // X shift by 0 -> X
1317 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1318 // would be poison.
1319 Value *X;
1320 if (match(Op1, m_Zero()) ||
1321 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1322 return Op0;
1323
1324 // Fold undefined shifts.
1325 if (isPoisonShift(Op1, Q))
1326 return PoisonValue::get(Op0->getType());
1327
1328 // If the operation is with the result of a select instruction, check whether
1329 // operating on either branch of the select always yields the same value.
1330 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1331 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1332 return V;
1333
1334 // If the operation is with the result of a phi instruction, check whether
1335 // operating on all incoming values of the phi always yields the same value.
1336 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1337 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1338 return V;
1339
1340 // If any bits in the shift amount make that value greater than or equal to
1341 // the number of bits in the type, the shift is undefined.
1342 KnownBits KnownAmt = computeKnownBits(Op1, Q);
1343 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1344 return PoisonValue::get(Op0->getType());
1345
1346 // If all valid bits in the shift amount are known zero, the first operand is
1347 // unchanged.
1348 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1349 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1350 return Op0;
1351
1352 // Check for nsw shl leading to a poison value.
1353 if (IsNSW) {
1354 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1355 KnownBits KnownVal = computeKnownBits(Op0, Q);
1356 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1357
1358 if (KnownVal.Zero.isSignBitSet())
1359 KnownShl.Zero.setSignBit();
1360 if (KnownVal.One.isSignBitSet())
1361 KnownShl.One.setSignBit();
1362
1363 if (KnownShl.hasConflict())
1364 return PoisonValue::get(Op0->getType());
1365 }
1366
1367 return nullptr;
1368}
1369
1370/// Given operands for an LShr or AShr, see if we can fold the result. If not,
1371/// this returns null.
1373 Value *Op1, bool IsExact,
1374 const SimplifyQuery &Q, unsigned MaxRecurse) {
1375 if (Value *V =
1376 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1377 return V;
1378
1379 // X >> X -> 0
1380 if (Op0 == Op1)
1381 return Constant::getNullValue(Op0->getType());
1382
1383 // undef >> X -> 0
1384 // undef >> X -> undef (if it's exact)
1385 if (Q.isUndefValue(Op0))
1386 return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1387
1388 // The low bit cannot be shifted out of an exact shift if it is set.
1389 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1390 if (IsExact) {
1391 KnownBits Op0Known = computeKnownBits(Op0, Q);
1392 if (Op0Known.One[0])
1393 return Op0;
1394 }
1395
1396 return nullptr;
1397}
1398
1399/// Given operands for an Shl, see if we can fold the result.
1400/// If not, this returns null.
1401static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1402 const SimplifyQuery &Q, unsigned MaxRecurse) {
1403 if (Value *V =
1404 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1405 return V;
1406
1407 Type *Ty = Op0->getType();
1408 // undef << X -> 0
1409 // undef << X -> undef if (if it's NSW/NUW)
1410 if (Q.isUndefValue(Op0))
1411 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1412
1413 // (X >> A) << A -> X
1414 Value *X;
1415 if (Q.IIQ.UseInstrInfo &&
1416 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1417 return X;
1418
1419 // shl nuw i8 C, %x -> C iff C has sign bit set.
1420 if (IsNUW && match(Op0, m_Negative()))
1421 return Op0;
1422 // NOTE: could use computeKnownBits() / LazyValueInfo,
1423 // but the cost-benefit analysis suggests it isn't worth it.
1424
1425 // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1426 // that the sign-bit does not change, so the only input that does not
1427 // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1428 if (IsNSW && IsNUW &&
1429 match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1430 return Constant::getNullValue(Ty);
1431
1432 return nullptr;
1433}
1434
1435Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1436 const SimplifyQuery &Q) {
1437 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1438}
1439
1440/// Given operands for an LShr, see if we can fold the result.
1441/// If not, this returns null.
1442static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1443 const SimplifyQuery &Q, unsigned MaxRecurse) {
1444 if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1445 MaxRecurse))
1446 return V;
1447
1448 // (X << A) >> A -> X
1449 Value *X;
1450 if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1451 return X;
1452
1453 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1454 // We can return X as we do in the above case since OR alters no bits in X.
1455 // SimplifyDemandedBits in InstCombine can do more general optimization for
1456 // bit manipulation. This pattern aims to provide opportunities for other
1457 // optimizers by supporting a simple but common case in InstSimplify.
1458 Value *Y;
1459 const APInt *ShRAmt, *ShLAmt;
1460 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
1461 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1462 *ShRAmt == *ShLAmt) {
1463 const KnownBits YKnown = computeKnownBits(Y, Q);
1464 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1465 if (ShRAmt->uge(EffWidthY))
1466 return X;
1467 }
1468
1469 return nullptr;
1470}
1471
1472Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1473 const SimplifyQuery &Q) {
1474 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1475}
1476
1477/// Given operands for an AShr, see if we can fold the result.
1478/// If not, this returns null.
1479static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1480 const SimplifyQuery &Q, unsigned MaxRecurse) {
1481 if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1482 MaxRecurse))
1483 return V;
1484
1485 // -1 >>a X --> -1
1486 // (-1 << X) a>> X --> -1
1487 // We could return the original -1 constant to preserve poison elements.
1488 if (match(Op0, m_AllOnes()) ||
1489 match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1490 return Constant::getAllOnesValue(Op0->getType());
1491
1492 // (X << A) >> A -> X
1493 Value *X;
1494 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1495 return X;
1496
1497 // Arithmetic shifting an all-sign-bit value is a no-op.
1498 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, Q.AC, Q.CxtI, Q.DT);
1499 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1500 return Op0;
1501
1502 return nullptr;
1503}
1504
1505Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1506 const SimplifyQuery &Q) {
1507 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1508}
1509
1510/// Commuted variants are assumed to be handled by calling this function again
1511/// with the parameters swapped.
1513 ICmpInst *UnsignedICmp, bool IsAnd,
1514 const SimplifyQuery &Q) {
1515 Value *X, *Y;
1516
1517 CmpPredicate EqPred;
1518 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1519 !ICmpInst::isEquality(EqPred))
1520 return nullptr;
1521
1522 CmpPredicate UnsignedPred;
1523
1524 Value *A, *B;
1525 // Y = (A - B);
1526 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1527 if (match(UnsignedICmp,
1528 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1529 ICmpInst::isUnsigned(UnsignedPred)) {
1530 // A >=/<= B || (A - B) != 0 <--> true
1531 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1532 UnsignedPred == ICmpInst::ICMP_ULE) &&
1533 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1534 return ConstantInt::getTrue(UnsignedICmp->getType());
1535 // A </> B && (A - B) == 0 <--> false
1536 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1537 UnsignedPred == ICmpInst::ICMP_UGT) &&
1538 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1539 return ConstantInt::getFalse(UnsignedICmp->getType());
1540
1541 // A </> B && (A - B) != 0 <--> A </> B
1542 // A </> B || (A - B) != 0 <--> (A - B) != 0
1543 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1544 UnsignedPred == ICmpInst::ICMP_UGT))
1545 return IsAnd ? UnsignedICmp : ZeroICmp;
1546
1547 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1548 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1549 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1550 UnsignedPred == ICmpInst::ICMP_UGE))
1551 return IsAnd ? ZeroICmp : UnsignedICmp;
1552 }
1553
1554 // Given Y = (A - B)
1555 // Y >= A && Y != 0 --> Y >= A iff B != 0
1556 // Y < A || Y == 0 --> Y < A iff B != 0
1557 if (match(UnsignedICmp,
1558 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1559 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1560 EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, Q))
1561 return UnsignedICmp;
1562 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1563 EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, Q))
1564 return UnsignedICmp;
1565 }
1566 }
1567
1568 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1569 ICmpInst::isUnsigned(UnsignedPred))
1570 ;
1571 else if (match(UnsignedICmp,
1572 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1573 ICmpInst::isUnsigned(UnsignedPred))
1574 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1575 else
1576 return nullptr;
1577
1578 // X > Y && Y == 0 --> Y == 0 iff X != 0
1579 // X > Y || Y == 0 --> X > Y iff X != 0
1580 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1581 isKnownNonZero(X, Q))
1582 return IsAnd ? ZeroICmp : UnsignedICmp;
1583
1584 // X <= Y && Y != 0 --> X <= Y iff X != 0
1585 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1586 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1587 isKnownNonZero(X, Q))
1588 return IsAnd ? UnsignedICmp : ZeroICmp;
1589
1590 // The transforms below here are expected to be handled more generally with
1591 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1592 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1593 // these are candidates for removal.
1594
1595 // X < Y && Y != 0 --> X < Y
1596 // X < Y || Y != 0 --> Y != 0
1597 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1598 return IsAnd ? UnsignedICmp : ZeroICmp;
1599
1600 // X >= Y && Y == 0 --> Y == 0
1601 // X >= Y || Y == 0 --> X >= Y
1602 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1603 return IsAnd ? ZeroICmp : UnsignedICmp;
1604
1605 // X < Y && Y == 0 --> false
1606 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1607 IsAnd)
1608 return getFalse(UnsignedICmp->getType());
1609
1610 // X >= Y || Y != 0 --> true
1611 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1612 !IsAnd)
1613 return getTrue(UnsignedICmp->getType());
1614
1615 return nullptr;
1616}
1617
1618/// Test if a pair of compares with a shared operand and 2 constants has an
1619/// empty set intersection, full set union, or if one compare is a superset of
1620/// the other.
1622 bool IsAnd) {
1623 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1624 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1625 return nullptr;
1626
1627 const APInt *C0, *C1;
1628 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1629 !match(Cmp1->getOperand(1), m_APInt(C1)))
1630 return nullptr;
1631
1632 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1633 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1634
1635 // For and-of-compares, check if the intersection is empty:
1636 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1637 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1638 return getFalse(Cmp0->getType());
1639
1640 // For or-of-compares, check if the union is full:
1641 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1642 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1643 return getTrue(Cmp0->getType());
1644
1645 // Is one range a superset of the other?
1646 // If this is and-of-compares, take the smaller set:
1647 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1648 // If this is or-of-compares, take the larger set:
1649 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1650 if (Range0.contains(Range1))
1651 return IsAnd ? Cmp1 : Cmp0;
1652 if (Range1.contains(Range0))
1653 return IsAnd ? Cmp0 : Cmp1;
1654
1655 return nullptr;
1656}
1657
1659 const InstrInfoQuery &IIQ) {
1660 // (icmp (add V, C0), C1) & (icmp V, C0)
1661 CmpPredicate Pred0, Pred1;
1662 const APInt *C0, *C1;
1663 Value *V;
1664 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1665 return nullptr;
1666
1667 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1668 return nullptr;
1669
1670 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1671 if (AddInst->getOperand(1) != Op1->getOperand(1))
1672 return nullptr;
1673
1674 Type *ITy = Op0->getType();
1675 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1676 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1677
1678 const APInt Delta = *C1 - *C0;
1679 if (C0->isStrictlyPositive()) {
1680 if (Delta == 2) {
1681 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1682 return getFalse(ITy);
1683 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1684 return getFalse(ITy);
1685 }
1686 if (Delta == 1) {
1687 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1688 return getFalse(ITy);
1689 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1690 return getFalse(ITy);
1691 }
1692 }
1693 if (C0->getBoolValue() && IsNUW) {
1694 if (Delta == 2)
1695 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1696 return getFalse(ITy);
1697 if (Delta == 1)
1698 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1699 return getFalse(ITy);
1700 }
1701
1702 return nullptr;
1703}
1704
1705/// Try to simplify and/or of icmp with ctpop intrinsic.
1707 bool IsAnd) {
1708 CmpPredicate Pred0, Pred1;
1709 Value *X;
1710 const APInt *C;
1712 m_APInt(C))) ||
1713 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1714 return nullptr;
1715
1716 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1717 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1718 return Cmp1;
1719 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1720 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1721 return Cmp1;
1722
1723 return nullptr;
1724}
1725
1727 const SimplifyQuery &Q) {
1728 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1729 return X;
1730 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1731 return X;
1732
1733 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1734 return X;
1735
1736 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1737 return X;
1738 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1739 return X;
1740
1741 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1742 return X;
1743 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1744 return X;
1745
1746 return nullptr;
1747}
1748
1750 const InstrInfoQuery &IIQ) {
1751 // (icmp (add V, C0), C1) | (icmp V, C0)
1752 CmpPredicate Pred0, Pred1;
1753 const APInt *C0, *C1;
1754 Value *V;
1755 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1756 return nullptr;
1757
1758 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1759 return nullptr;
1760
1761 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1762 if (AddInst->getOperand(1) != Op1->getOperand(1))
1763 return nullptr;
1764
1765 Type *ITy = Op0->getType();
1766 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1767 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1768
1769 const APInt Delta = *C1 - *C0;
1770 if (C0->isStrictlyPositive()) {
1771 if (Delta == 2) {
1772 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1773 return getTrue(ITy);
1774 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1775 return getTrue(ITy);
1776 }
1777 if (Delta == 1) {
1778 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1779 return getTrue(ITy);
1780 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1781 return getTrue(ITy);
1782 }
1783 }
1784 if (C0->getBoolValue() && IsNUW) {
1785 if (Delta == 2)
1786 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1787 return getTrue(ITy);
1788 if (Delta == 1)
1789 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1790 return getTrue(ITy);
1791 }
1792
1793 return nullptr;
1794}
1795
1797 const SimplifyQuery &Q) {
1798 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1799 return X;
1800 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1801 return X;
1802
1803 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1804 return X;
1805
1806 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1807 return X;
1808 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1809 return X;
1810
1811 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1812 return X;
1813 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1814 return X;
1815
1816 return nullptr;
1817}
1818
1819/// Test if a pair of compares with a shared operand and 2 constants has an
1820/// empty set intersection, full set union, or if one compare is a superset of
1821/// the other.
1823 bool IsAnd) {
1824 // Look for this pattern: {and/or} (fcmp X, C0), (fcmp X, C1)).
1825 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1826 return nullptr;
1827
1828 const APFloat *C0, *C1;
1829 if (!match(Cmp0->getOperand(1), m_APFloat(C0)) ||
1830 !match(Cmp1->getOperand(1), m_APFloat(C1)))
1831 return nullptr;
1832
1834 IsAnd ? Cmp0->getPredicate() : Cmp0->getInversePredicate(), *C0);
1836 IsAnd ? Cmp1->getPredicate() : Cmp1->getInversePredicate(), *C1);
1837
1838 if (!Range0 || !Range1)
1839 return nullptr;
1840
1841 // For and-of-compares, check if the intersection is empty:
1842 // (fcmp X, C0) && (fcmp X, C1) --> empty set --> false
1843 if (Range0->intersectWith(*Range1).isEmptySet())
1844 return ConstantInt::getBool(Cmp0->getType(), !IsAnd);
1845
1846 // Is one range a superset of the other?
1847 // If this is and-of-compares, take the smaller set:
1848 // (fcmp ogt X, 4) && (fcmp ogt X, 42) --> fcmp ogt X, 42
1849 // If this is or-of-compares, take the larger set:
1850 // (fcmp ogt X, 4) || (fcmp ogt X, 42) --> fcmp ogt X, 4
1851 if (Range0->contains(*Range1))
1852 return Cmp1;
1853 if (Range1->contains(*Range0))
1854 return Cmp0;
1855
1856 return nullptr;
1857}
1858
1860 FCmpInst *RHS, bool IsAnd) {
1861 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1862 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1863 if (LHS0->getType() != RHS0->getType())
1864 return nullptr;
1865
1866 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1867 auto AbsOrSelfLHS0 = m_CombineOr(m_Specific(LHS0), m_FAbs(m_Specific(LHS0)));
1868 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1869 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1870 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1871 // (fcmp ord X, 0) & (fcmp o** X/abs(X), Y) --> fcmp o** X/abs(X), Y
1872 // (fcmp uno X, 0) & (fcmp o** X/abs(X), Y) --> false
1873 // (fcmp uno X, 0) | (fcmp u** X/abs(X), Y) --> fcmp u** X/abs(X), Y
1874 // (fcmp ord X, 0) | (fcmp u** X/abs(X), Y) --> true
1875 if ((match(RHS0, AbsOrSelfLHS0) || match(RHS1, AbsOrSelfLHS0)) &&
1876 match(LHS1, m_PosZeroFP()))
1877 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1878 ? static_cast<Value *>(RHS)
1879 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1880 }
1881
1882 auto AbsOrSelfRHS0 = m_CombineOr(m_Specific(RHS0), m_FAbs(m_Specific(RHS0)));
1883 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1884 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1885 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1886 // (fcmp o** X/abs(X), Y) & (fcmp ord X, 0) --> fcmp o** X/abs(X), Y
1887 // (fcmp o** X/abs(X), Y) & (fcmp uno X, 0) --> false
1888 // (fcmp u** X/abs(X), Y) | (fcmp uno X, 0) --> fcmp u** X/abs(X), Y
1889 // (fcmp u** X/abs(X), Y) | (fcmp ord X, 0) --> true
1890 if ((match(LHS0, AbsOrSelfRHS0) || match(LHS1, AbsOrSelfRHS0)) &&
1891 match(RHS1, m_PosZeroFP()))
1892 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1893 ? static_cast<Value *>(LHS)
1894 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1895 }
1896
1897 if (auto *V = simplifyAndOrOfFCmpsWithConstants(LHS, RHS, IsAnd))
1898 return V;
1899
1900 return nullptr;
1901}
1902
1904 Value *Op1, bool IsAnd) {
1905 // Look through casts of the 'and' operands to find compares.
1906 auto *Cast0 = dyn_cast<CastInst>(Op0);
1907 auto *Cast1 = dyn_cast<CastInst>(Op1);
1908 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1909 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1910 Op0 = Cast0->getOperand(0);
1911 Op1 = Cast1->getOperand(0);
1912 }
1913
1914 Value *V = nullptr;
1915 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1916 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1917 if (ICmp0 && ICmp1)
1918 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1919 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1920
1921 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1922 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1923 if (FCmp0 && FCmp1)
1924 V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1925
1926 if (!V)
1927 return nullptr;
1928 if (!Cast0)
1929 return V;
1930
1931 // If we looked through casts, we can only handle a constant simplification
1932 // because we are not allowed to create a cast instruction here.
1933 if (auto *C = dyn_cast<Constant>(V))
1934 return ConstantFoldCastOperand(Cast0->getOpcode(), C, Cast0->getType(),
1935 Q.DL);
1936
1937 return nullptr;
1938}
1939
1940static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1941 const SimplifyQuery &Q,
1942 bool AllowRefinement,
1944 unsigned MaxRecurse);
1945
1946static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1947 const SimplifyQuery &Q,
1948 unsigned MaxRecurse) {
1949 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1950 "Must be and/or");
1951 CmpPredicate Pred;
1952 Value *A, *B;
1953 if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
1954 !ICmpInst::isEquality(Pred))
1955 return nullptr;
1956
1957 auto Simplify = [&](Value *Res) -> Value * {
1958 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1959
1960 // and (icmp eq a, b), x implies (a==b) inside x.
1961 // or (icmp ne a, b), x implies (a==b) inside x.
1962 // If x simplifies to true/false, we can simplify the and/or.
1963 if (Pred ==
1964 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1965 if (Res == Absorber)
1966 return Absorber;
1967 if (Res == ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1968 return Op0;
1969 return nullptr;
1970 }
1971
1972 // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1973 // then we can drop the icmp, as x will already be false in the case where
1974 // the icmp is false. Similar for or and true.
1975 if (Res == Absorber)
1976 return Op1;
1977 return nullptr;
1978 };
1979
1980 // In the final case (Res == Absorber with inverted predicate), it is safe to
1981 // refine poison during simplification, but not undef. For simplicity always
1982 // disable undef-based folds here.
1983 if (Value *Res = simplifyWithOpReplaced(Op1, A, B, Q.getWithoutUndef(),
1984 /* AllowRefinement */ true,
1985 /* DropFlags */ nullptr, MaxRecurse))
1986 return Simplify(Res);
1987 if (Value *Res = simplifyWithOpReplaced(Op1, B, A, Q.getWithoutUndef(),
1988 /* AllowRefinement */ true,
1989 /* DropFlags */ nullptr, MaxRecurse))
1990 return Simplify(Res);
1991
1992 return nullptr;
1993}
1994
1995/// Given a bitwise logic op, check if the operands are add/sub with a common
1996/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1998 Instruction::BinaryOps Opcode) {
1999 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
2000 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
2001 Value *X;
2002 Constant *C1, *C2;
2003 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
2004 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
2005 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
2006 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
2007 if (ConstantExpr::getNot(C1) == C2) {
2008 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2009 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2010 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2011 Type *Ty = Op0->getType();
2012 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2014 }
2015 }
2016 return nullptr;
2017}
2018
2019// Commutative patterns for and that will be tried with both operand orders.
2021 const SimplifyQuery &Q,
2022 unsigned MaxRecurse) {
2023 // ~A & A = 0
2024 if (match(Op0, m_Not(m_Specific(Op1))))
2025 return Constant::getNullValue(Op0->getType());
2026
2027 // (A | ?) & A = A
2028 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2029 return Op1;
2030
2031 // (X | ~Y) & (X | Y) --> X
2032 Value *X, *Y;
2033 if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2034 match(Op1, m_c_Or(m_Specific(X), m_Specific(Y))))
2035 return X;
2036
2037 // If we have a multiplication overflow check that is being 'and'ed with a
2038 // check that one of the multipliers is not zero, we can omit the 'and', and
2039 // only keep the overflow check.
2040 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2041 return Op1;
2042
2043 // -A & A = A if A is a power of two or zero.
2044 if (match(Op0, m_Neg(m_Specific(Op1))) &&
2045 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
2046 return Op1;
2047
2048 // This is a similar pattern used for checking if a value is a power-of-2:
2049 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2050 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2051 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
2052 return Constant::getNullValue(Op1->getType());
2053
2054 // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2055 // M <= N.
2056 const APInt *Shift1, *Shift2;
2057 if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
2058 match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
2059 isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI) &&
2060 Shift1->uge(*Shift2))
2061 return Constant::getNullValue(Op0->getType());
2062
2063 if (Value *V =
2064 simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2065 return V;
2066
2067 return nullptr;
2068}
2069
2070/// Given operands for an And, see if we can fold the result.
2071/// If not, this returns null.
2072static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2073 unsigned MaxRecurse) {
2074 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2075 return C;
2076
2077 // X & poison -> poison
2078 if (isa<PoisonValue>(Op1))
2079 return Op1;
2080
2081 // X & undef -> 0
2082 if (Q.isUndefValue(Op1))
2083 return Constant::getNullValue(Op0->getType());
2084
2085 // X & X = X
2086 if (Op0 == Op1)
2087 return Op0;
2088
2089 // X & 0 = 0
2090 if (match(Op1, m_Zero()))
2091 return Constant::getNullValue(Op0->getType());
2092
2093 // X & -1 = X
2094 if (match(Op1, m_AllOnes()))
2095 return Op0;
2096
2097 if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2098 return Res;
2099 if (Value *Res = simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2100 return Res;
2101
2102 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2103 return V;
2104
2105 // A mask that only clears known zeros of a shifted value is a no-op.
2106 const APInt *Mask;
2107 const APInt *ShAmt;
2108 Value *X, *Y;
2109 if (match(Op1, m_APInt(Mask))) {
2110 // If all bits in the inverted and shifted mask are clear:
2111 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2112 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2113 (~(*Mask)).lshr(*ShAmt).isZero())
2114 return Op0;
2115
2116 // If all bits in the inverted and shifted mask are clear:
2117 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2118 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2119 (~(*Mask)).shl(*ShAmt).isZero())
2120 return Op0;
2121 }
2122
2123 // and 2^x-1, 2^C --> 0 where x <= C.
2124 const APInt *PowerC;
2125 Value *Shift;
2126 if (match(Op1, m_Power2(PowerC)) &&
2127 match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
2128 isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI,
2129 Q.DT)) {
2130 KnownBits Known = computeKnownBits(Shift, Q);
2131 // Use getActiveBits() to make use of the additional power of two knowledge
2132 if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2133 return ConstantInt::getNullValue(Op1->getType());
2134 }
2135
2136 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2137 return V;
2138
2139 // Try some generic simplifications for associative operations.
2140 if (Value *V =
2141 simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2142 return V;
2143
2144 // And distributes over Or. Try some generic simplifications based on this.
2145 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2146 Instruction::Or, Q, MaxRecurse))
2147 return V;
2148
2149 // And distributes over Xor. Try some generic simplifications based on this.
2150 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2151 Instruction::Xor, Q, MaxRecurse))
2152 return V;
2153
2154 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2155 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2156 // A & (A && B) -> A && B
2157 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2158 return Op1;
2159 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2160 return Op0;
2161 }
2162 // If the operation is with the result of a select instruction, check
2163 // whether operating on either branch of the select always yields the same
2164 // value.
2165 if (Value *V =
2166 threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2167 return V;
2168 }
2169
2170 // If the operation is with the result of a phi instruction, check whether
2171 // operating on all incoming values of the phi always yields the same value.
2172 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2173 if (Value *V =
2174 threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2175 return V;
2176
2177 // Assuming the effective width of Y is not larger than A, i.e. all bits
2178 // from X and Y are disjoint in (X << A) | Y,
2179 // if the mask of this AND op covers all bits of X or Y, while it covers
2180 // no bits from the other, we can bypass this AND op. E.g.,
2181 // ((X << A) | Y) & Mask -> Y,
2182 // if Mask = ((1 << effective_width_of(Y)) - 1)
2183 // ((X << A) | Y) & Mask -> X << A,
2184 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2185 // SimplifyDemandedBits in InstCombine can optimize the general case.
2186 // This pattern aims to help other passes for a common case.
2187 Value *XShifted;
2188 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(Mask)) &&
2190 m_Value(XShifted)),
2191 m_Value(Y)))) {
2192 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2193 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2194 const KnownBits YKnown = computeKnownBits(Y, Q);
2195 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2196 if (EffWidthY <= ShftCnt) {
2197 const KnownBits XKnown = computeKnownBits(X, Q);
2198 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2199 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2200 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2201 // If the mask is extracting all bits from X or Y as is, we can skip
2202 // this AND op.
2203 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2204 return Y;
2205 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2206 return XShifted;
2207 }
2208 }
2209
2210 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2211 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2213 if (match(Op0, m_c_Xor(m_Value(X),
2215 m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2217 return Constant::getNullValue(Op0->getType());
2218
2219 const APInt *C1;
2220 Value *A;
2221 // (A ^ C) & (A ^ ~C) -> 0
2222 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2223 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2224 return Constant::getNullValue(Op0->getType());
2225
2226 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2227 if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2228 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2229 if (*Implied == true)
2230 return Op0;
2231 // If Op0 is true implies Op1 is false, then they are not true together.
2232 if (*Implied == false)
2233 return ConstantInt::getFalse(Op0->getType());
2234 }
2235 if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2236 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2237 if (*Implied)
2238 return Op1;
2239 // If Op1 is true implies Op0 is false, then they are not true together.
2240 if (!*Implied)
2241 return ConstantInt::getFalse(Op1->getType());
2242 }
2243 }
2244
2245 if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2246 return V;
2247
2248 return nullptr;
2249}
2250
2252 return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2253}
2254
2255// TODO: Many of these folds could use LogicalAnd/LogicalOr.
2257 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2258 Type *Ty = X->getType();
2259
2260 // X | ~X --> -1
2261 if (match(Y, m_Not(m_Specific(X))))
2263
2264 // X | ~(X & ?) = -1
2265 if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2267
2268 // X | (X & ?) --> X
2269 if (match(Y, m_c_And(m_Specific(X), m_Value())))
2270 return X;
2271
2272 Value *A, *B;
2273
2274 // (A ^ B) | (A | B) --> A | B
2275 // (A ^ B) | (B | A) --> B | A
2276 if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2278 return Y;
2279
2280 // ~(A ^ B) | (A | B) --> -1
2281 // ~(A ^ B) | (B | A) --> -1
2282 if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2285
2286 // (A & ~B) | (A ^ B) --> A ^ B
2287 // (~B & A) | (A ^ B) --> A ^ B
2288 // (A & ~B) | (B ^ A) --> B ^ A
2289 // (~B & A) | (B ^ A) --> B ^ A
2290 if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2292 return Y;
2293
2294 // (~A ^ B) | (A & B) --> ~A ^ B
2295 // (B ^ ~A) | (A & B) --> B ^ ~A
2296 // (~A ^ B) | (B & A) --> ~A ^ B
2297 // (B ^ ~A) | (B & A) --> B ^ ~A
2298 if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2300 return X;
2301
2302 // (~A | B) | (A ^ B) --> -1
2303 // (~A | B) | (B ^ A) --> -1
2304 // (B | ~A) | (A ^ B) --> -1
2305 // (B | ~A) | (B ^ A) --> -1
2306 if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2309
2310 // (~A & B) | ~(A | B) --> ~A
2311 // (~A & B) | ~(B | A) --> ~A
2312 // (B & ~A) | ~(A | B) --> ~A
2313 // (B & ~A) | ~(B | A) --> ~A
2314 Value *NotA;
2316 m_Value(B))) &&
2318 return NotA;
2319 // The same is true of Logical And
2320 // TODO: This could share the logic of the version above if there was a
2321 // version of LogicalAnd that allowed more than just i1 types.
2323 m_Value(B))) &&
2325 return NotA;
2326
2327 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2328 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2329 Value *NotAB;
2331 m_Value(NotAB))) &&
2333 return NotAB;
2334
2335 // ~(A & B) | (A ^ B) --> ~(A & B)
2336 // ~(A & B) | (B ^ A) --> ~(A & B)
2338 m_Value(NotAB))) &&
2340 return NotAB;
2341
2342 return nullptr;
2343}
2344
2345/// Given operands for an Or, see if we can fold the result.
2346/// If not, this returns null.
2347static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2348 unsigned MaxRecurse) {
2349 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2350 return C;
2351
2352 // X | poison -> poison
2353 if (isa<PoisonValue>(Op1))
2354 return Op1;
2355
2356 // X | undef -> -1
2357 // X | -1 = -1
2358 // Do not return Op1 because it may contain undef elements if it's a vector.
2359 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2360 return Constant::getAllOnesValue(Op0->getType());
2361
2362 // X | X = X
2363 // X | 0 = X
2364 if (Op0 == Op1 || match(Op1, m_Zero()))
2365 return Op0;
2366
2367 if (Value *R = simplifyOrLogic(Op0, Op1))
2368 return R;
2369 if (Value *R = simplifyOrLogic(Op1, Op0))
2370 return R;
2371
2372 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2373 return V;
2374
2375 // Rotated -1 is still -1:
2376 // (-1 << X) | (-1 >> (C - X)) --> -1
2377 // (-1 >> X) | (-1 << (C - X)) --> -1
2378 // ...with C <= bitwidth (and commuted variants).
2379 Value *X, *Y;
2380 if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2381 match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2382 (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2383 match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2384 const APInt *C;
2385 if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2386 match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2387 C->ule(X->getType()->getScalarSizeInBits())) {
2388 return ConstantInt::getAllOnesValue(X->getType());
2389 }
2390 }
2391
2392 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2393 // are mixing in another shift that is redundant with the funnel shift.
2394
2395 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2396 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2397 if (match(Op0,
2399 match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2400 return Op0;
2401 if (match(Op1,
2403 match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2404 return Op1;
2405
2406 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2407 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2408 if (match(Op0,
2410 match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2411 return Op0;
2412 if (match(Op1,
2414 match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2415 return Op1;
2416
2417 if (Value *V =
2418 simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2419 return V;
2420 if (Value *V =
2421 simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2422 return V;
2423
2424 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2425 return V;
2426
2427 // If we have a multiplication overflow check that is being 'and'ed with a
2428 // check that one of the multipliers is not zero, we can omit the 'and', and
2429 // only keep the overflow check.
2430 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2431 return Op1;
2432 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2433 return Op0;
2434
2435 // Try some generic simplifications for associative operations.
2436 if (Value *V =
2437 simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2438 return V;
2439
2440 // Or distributes over And. Try some generic simplifications based on this.
2441 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2442 Instruction::And, Q, MaxRecurse))
2443 return V;
2444
2445 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2446 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2447 // A | (A || B) -> A || B
2448 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2449 return Op1;
2450 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2451 return Op0;
2452 }
2453 // If the operation is with the result of a select instruction, check
2454 // whether operating on either branch of the select always yields the same
2455 // value.
2456 if (Value *V =
2457 threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2458 return V;
2459 }
2460
2461 // (A & C1)|(B & C2)
2462 Value *A, *B;
2463 const APInt *C1, *C2;
2464 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2465 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2466 if (*C1 == ~*C2) {
2467 // (A & C1)|(B & C2)
2468 // If we have: ((V + N) & C1) | (V & C2)
2469 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2470 // replace with V+N.
2471 Value *N;
2472 if (C2->isMask() && // C2 == 0+1+
2474 // Add commutes, try both ways.
2475 if (MaskedValueIsZero(N, *C2, Q))
2476 return A;
2477 }
2478 // Or commutes, try both ways.
2479 if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2480 // Add commutes, try both ways.
2481 if (MaskedValueIsZero(N, *C1, Q))
2482 return B;
2483 }
2484 }
2485 }
2486
2487 // If the operation is with the result of a phi instruction, check whether
2488 // operating on all incoming values of the phi always yields the same value.
2489 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2490 if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2491 return V;
2492
2493 // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2494 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2495 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2496 return Constant::getAllOnesValue(Op0->getType());
2497
2498 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2499 if (std::optional<bool> Implied =
2500 isImpliedCondition(Op0, Op1, Q.DL, false)) {
2501 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2502 if (*Implied == false)
2503 return Op0;
2504 // If Op0 is false implies Op1 is true, then at least one is always true.
2505 if (*Implied == true)
2506 return ConstantInt::getTrue(Op0->getType());
2507 }
2508 if (std::optional<bool> Implied =
2509 isImpliedCondition(Op1, Op0, Q.DL, false)) {
2510 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2511 if (*Implied == false)
2512 return Op1;
2513 // If Op1 is false implies Op0 is true, then at least one is always true.
2514 if (*Implied == true)
2515 return ConstantInt::getTrue(Op1->getType());
2516 }
2517 }
2518
2519 if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2520 return V;
2521
2522 return nullptr;
2523}
2524
2526 return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2527}
2528
2529/// Given operands for a Xor, see if we can fold the result.
2530/// If not, this returns null.
2531static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2532 unsigned MaxRecurse) {
2533 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2534 return C;
2535
2536 // X ^ poison -> poison
2537 if (isa<PoisonValue>(Op1))
2538 return Op1;
2539
2540 // A ^ undef -> undef
2541 if (Q.isUndefValue(Op1))
2542 return Op1;
2543
2544 // A ^ 0 = A
2545 if (match(Op1, m_Zero()))
2546 return Op0;
2547
2548 // A ^ A = 0
2549 if (Op0 == Op1)
2550 return Constant::getNullValue(Op0->getType());
2551
2552 // A ^ ~A = ~A ^ A = -1
2553 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2554 return Constant::getAllOnesValue(Op0->getType());
2555
2556 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2557 Value *A, *B;
2558 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2559 if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2561 return A;
2562
2563 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2564 // The 'not' op must contain a complete -1 operand (no undef elements for
2565 // vector) for the transform to be safe.
2566 Value *NotA;
2568 m_Value(B))) &&
2570 return NotA;
2571
2572 return nullptr;
2573 };
2574 if (Value *R = foldAndOrNot(Op0, Op1))
2575 return R;
2576 if (Value *R = foldAndOrNot(Op1, Op0))
2577 return R;
2578
2579 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2580 return V;
2581
2582 // Try some generic simplifications for associative operations.
2583 if (Value *V =
2584 simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2585 return V;
2586
2587 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2588 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2589 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2590 // only if B and C are equal. If B and C are equal then (since we assume
2591 // that operands have already been simplified) "select(cond, B, C)" should
2592 // have been simplified to the common value of B and C already. Analysing
2593 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2594 // for threading over phi nodes.
2595
2596 if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2597 return V;
2598
2599 // (xor (sub nuw C_Mask, X), C_Mask) -> X
2600 {
2601 Value *X;
2602 if (match(Op0, m_NUWSub(m_Specific(Op1), m_Value(X))) &&
2603 match(Op1, m_LowBitMask()))
2604 return X;
2605 }
2606
2607 return nullptr;
2608}
2609
2611 return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2612}
2613
2615 return CmpInst::makeCmpResultType(Op->getType());
2616}
2617
2618/// Rummage around inside V looking for something equivalent to the comparison
2619/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2620/// Helper function for analyzing max/min idioms.
2622 Value *LHS, Value *RHS) {
2624 if (!SI)
2625 return nullptr;
2626 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2627 if (!Cmp)
2628 return nullptr;
2629 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2630 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2631 return Cmp;
2632 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2633 LHS == CmpRHS && RHS == CmpLHS)
2634 return Cmp;
2635 return nullptr;
2636}
2637
2638/// Return true if the underlying object (storage) must be disjoint from
2639/// storage returned by any noalias return call.
2640static bool isAllocDisjoint(const Value *V) {
2641 // For allocas, we consider only static ones (dynamic
2642 // allocas might be transformed into calls to malloc not simultaneously
2643 // live with the compared-to allocation). For globals, we exclude symbols
2644 // that might be resolve lazily to symbols in another dynamically-loaded
2645 // library (and, thus, could be malloc'ed by the implementation).
2646 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2647 return AI->isStaticAlloca();
2648 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2649 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2650 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2651 !GV->isThreadLocal();
2652 if (const Argument *A = dyn_cast<Argument>(V))
2653 return A->hasByValAttr();
2654 return false;
2655}
2656
2657/// Return true if V1 and V2 are each the base of some distict storage region
2658/// [V, object_size(V)] which do not overlap. Note that zero sized regions
2659/// *are* possible, and that zero sized regions do not overlap with any other.
2660static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2661 // Global variables always exist, so they always exist during the lifetime
2662 // of each other and all allocas. Global variables themselves usually have
2663 // non-overlapping storage, but since their addresses are constants, the
2664 // case involving two globals does not reach here and is instead handled in
2665 // constant folding.
2666 //
2667 // Two different allocas usually have different addresses...
2668 //
2669 // However, if there's an @llvm.stackrestore dynamically in between two
2670 // allocas, they may have the same address. It's tempting to reduce the
2671 // scope of the problem by only looking at *static* allocas here. That would
2672 // cover the majority of allocas while significantly reducing the likelihood
2673 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2674 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2675 // an entry block. Also, if we have a block that's not attached to a
2676 // function, we can't tell if it's "static" under the current definition.
2677 // Theoretically, this problem could be fixed by creating a new kind of
2678 // instruction kind specifically for static allocas. Such a new instruction
2679 // could be required to be at the top of the entry block, thus preventing it
2680 // from being subject to a @llvm.stackrestore. Instcombine could even
2681 // convert regular allocas into these special allocas. It'd be nifty.
2682 // However, until then, this problem remains open.
2683 //
2684 // So, we'll assume that two non-empty allocas have different addresses
2685 // for now.
2686 auto isByValArg = [](const Value *V) {
2687 const Argument *A = dyn_cast<Argument>(V);
2688 return A && A->hasByValAttr();
2689 };
2690
2691 // Byval args are backed by store which does not overlap with each other,
2692 // allocas, or globals.
2693 if (isByValArg(V1))
2694 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2695 if (isByValArg(V2))
2696 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2697
2698 return isa<AllocaInst>(V1) &&
2700}
2701
2702// A significant optimization not implemented here is assuming that alloca
2703// addresses are not equal to incoming argument values. They don't *alias*,
2704// as we say, but that doesn't mean they aren't equal, so we take a
2705// conservative approach.
2706//
2707// This is inspired in part by C++11 5.10p1:
2708// "Two pointers of the same type compare equal if and only if they are both
2709// null, both point to the same function, or both represent the same
2710// address."
2711//
2712// This is pretty permissive.
2713//
2714// It's also partly due to C11 6.5.9p6:
2715// "Two pointers compare equal if and only if both are null pointers, both are
2716// pointers to the same object (including a pointer to an object and a
2717// subobject at its beginning) or function, both are pointers to one past the
2718// last element of the same array object, or one is a pointer to one past the
2719// end of one array object and the other is a pointer to the start of a
2720// different array object that happens to immediately follow the first array
2721// object in the address space.)
2722//
2723// C11's version is more restrictive, however there's no reason why an argument
2724// couldn't be a one-past-the-end value for a stack object in the caller and be
2725// equal to the beginning of a stack object in the callee.
2726//
2727// If the C and C++ standards are ever made sufficiently restrictive in this
2728// area, it may be possible to update LLVM's semantics accordingly and reinstate
2729// this optimization.
2731 const SimplifyQuery &Q) {
2732 assert(LHS->getType() == RHS->getType() && "Must have same types");
2733 const DataLayout &DL = Q.DL;
2734 const TargetLibraryInfo *TLI = Q.TLI;
2735
2736 // We fold equality and unsigned predicates on pointer comparisons, but forbid
2737 // signed predicates since a GEP with inbounds could cross the sign boundary.
2738 if (CmpInst::isSigned(Pred))
2739 return nullptr;
2740
2741 // We have to switch to a signed predicate to handle negative indices from
2742 // the base pointer.
2743 Pred = ICmpInst::getSignedPredicate(Pred);
2744
2745 // Strip off any constant offsets so that we can reason about them.
2746 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2747 // here and compare base addresses like AliasAnalysis does, however there are
2748 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2749 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2750 // doesn't need to guarantee pointer inequality when it says NoAlias.
2751
2752 // Even if an non-inbounds GEP occurs along the path we can still optimize
2753 // equality comparisons concerning the result.
2754 bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2755 unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2756 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2757 LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2758 RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2759
2760 // If LHS and RHS are related via constant offsets to the same base
2761 // value, we can replace it with an icmp which just compares the offsets.
2762 if (LHS == RHS)
2763 return ConstantInt::get(getCompareTy(LHS),
2764 ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2765
2766 // Various optimizations for (in)equality comparisons.
2767 if (ICmpInst::isEquality(Pred)) {
2768 // Different non-empty allocations that exist at the same time have
2769 // different addresses (if the program can tell). If the offsets are
2770 // within the bounds of their allocations (and not one-past-the-end!
2771 // so we can't use inbounds!), and their allocations aren't the same,
2772 // the pointers are not equal.
2774 uint64_t LHSSize, RHSSize;
2775 ObjectSizeOpts Opts;
2777 auto *F = [](Value *V) -> Function * {
2778 if (auto *I = dyn_cast<Instruction>(V))
2779 return I->getFunction();
2780 if (auto *A = dyn_cast<Argument>(V))
2781 return A->getParent();
2782 return nullptr;
2783 }(LHS);
2784 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2785 if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) && LHSSize != 0 &&
2786 getObjectSize(RHS, RHSSize, DL, TLI, Opts) && RHSSize != 0) {
2787 APInt Dist = LHSOffset - RHSOffset;
2788 if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2789 return ConstantInt::get(getCompareTy(LHS),
2791 }
2792 }
2793
2794 // If one side of the equality comparison must come from a noalias call
2795 // (meaning a system memory allocation function), and the other side must
2796 // come from a pointer that cannot overlap with dynamically-allocated
2797 // memory within the lifetime of the current function (allocas, byval
2798 // arguments, globals), then determine the comparison result here.
2799 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2800 getUnderlyingObjects(LHS, LHSUObjs);
2801 getUnderlyingObjects(RHS, RHSUObjs);
2802
2803 // Is the set of underlying objects all noalias calls?
2804 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2805 return all_of(Objects, isNoAliasCall);
2806 };
2807
2808 // Is the set of underlying objects all things which must be disjoint from
2809 // noalias calls. We assume that indexing from such disjoint storage
2810 // into the heap is undefined, and thus offsets can be safely ignored.
2811 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2812 return all_of(Objects, ::isAllocDisjoint);
2813 };
2814
2815 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2816 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2817 return ConstantInt::get(getCompareTy(LHS),
2819
2820 // Fold comparisons for non-escaping pointer even if the allocation call
2821 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2822 // dynamic allocation call could be either of the operands. Note that
2823 // the other operand can not be based on the alloc - if it were, then
2824 // the cmp itself would be a capture.
2825 Value *MI = nullptr;
2826 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, Q))
2827 MI = LHS;
2828 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonZero(LHS, Q))
2829 MI = RHS;
2830 if (MI) {
2831 // FIXME: This is incorrect, see PR54002. While we can assume that the
2832 // allocation is at an address that makes the comparison false, this
2833 // requires that *all* comparisons to that address be false, which
2834 // InstSimplify cannot guarantee.
2835 struct CustomCaptureTracker : public CaptureTracker {
2836 bool Captured = false;
2837 void tooManyUses() override { Captured = true; }
2838 Action captured(const Use *U, UseCaptureInfo CI) override {
2839 // TODO(captures): Use UseCaptureInfo.
2840 if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2841 // Comparison against value stored in global variable. Given the
2842 // pointer does not escape, its value cannot be guessed and stored
2843 // separately in a global variable.
2844 unsigned OtherIdx = 1 - U->getOperandNo();
2845 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2846 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2847 return Continue;
2848 }
2849
2850 Captured = true;
2851 return Stop;
2852 }
2853 };
2854 CustomCaptureTracker Tracker;
2855 PointerMayBeCaptured(MI, &Tracker);
2856 if (!Tracker.Captured)
2857 return ConstantInt::get(getCompareTy(LHS),
2859 }
2860 }
2861
2862 // Otherwise, fail.
2863 return nullptr;
2864}
2865
2866/// Fold an icmp when its operands have i1 scalar type.
2868 const SimplifyQuery &Q) {
2869 Type *ITy = getCompareTy(LHS); // The return type.
2870 Type *OpTy = LHS->getType(); // The operand type.
2871 if (!OpTy->isIntOrIntVectorTy(1))
2872 return nullptr;
2873
2874 // A boolean compared to true/false can be reduced in 14 out of the 20
2875 // (10 predicates * 2 constants) possible combinations. The other
2876 // 6 cases require a 'not' of the LHS.
2877
2878 auto ExtractNotLHS = [](Value *V) -> Value * {
2879 Value *X;
2880 if (match(V, m_Not(m_Value(X))))
2881 return X;
2882 return nullptr;
2883 };
2884
2885 if (match(RHS, m_Zero())) {
2886 switch (Pred) {
2887 case CmpInst::ICMP_NE: // X != 0 -> X
2888 case CmpInst::ICMP_UGT: // X >u 0 -> X
2889 case CmpInst::ICMP_SLT: // X <s 0 -> X
2890 return LHS;
2891
2892 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2893 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2894 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2895 if (Value *X = ExtractNotLHS(LHS))
2896 return X;
2897 break;
2898
2899 case CmpInst::ICMP_ULT: // X <u 0 -> false
2900 case CmpInst::ICMP_SGT: // X >s 0 -> false
2901 return getFalse(ITy);
2902
2903 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2904 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2905 return getTrue(ITy);
2906
2907 default:
2908 break;
2909 }
2910 } else if (match(RHS, m_One())) {
2911 switch (Pred) {
2912 case CmpInst::ICMP_EQ: // X == 1 -> X
2913 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2914 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2915 return LHS;
2916
2917 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2918 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2919 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2920 if (Value *X = ExtractNotLHS(LHS))
2921 return X;
2922 break;
2923
2924 case CmpInst::ICMP_UGT: // X >u 1 -> false
2925 case CmpInst::ICMP_SLT: // X <s -1 -> false
2926 return getFalse(ITy);
2927
2928 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2929 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2930 return getTrue(ITy);
2931
2932 default:
2933 break;
2934 }
2935 }
2936
2937 switch (Pred) {
2938 default:
2939 break;
2940 case ICmpInst::ICMP_UGE:
2941 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2942 return getTrue(ITy);
2943 break;
2944 case ICmpInst::ICMP_SGE:
2945 /// For signed comparison, the values for an i1 are 0 and -1
2946 /// respectively. This maps into a truth table of:
2947 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2948 /// 0 | 0 | 1 (0 >= 0) | 1
2949 /// 0 | 1 | 1 (0 >= -1) | 1
2950 /// 1 | 0 | 0 (-1 >= 0) | 0
2951 /// 1 | 1 | 1 (-1 >= -1) | 1
2952 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2953 return getTrue(ITy);
2954 break;
2955 case ICmpInst::ICMP_ULE:
2956 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2957 return getTrue(ITy);
2958 break;
2959 case ICmpInst::ICMP_SLE:
2960 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2961 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2962 return getTrue(ITy);
2963 break;
2964 }
2965
2966 return nullptr;
2967}
2968
2969/// Try hard to fold icmp with zero RHS because this is a common case.
2971 const SimplifyQuery &Q) {
2972 if (!match(RHS, m_Zero()))
2973 return nullptr;
2974
2975 Type *ITy = getCompareTy(LHS); // The return type.
2976 switch (Pred) {
2977 default:
2978 llvm_unreachable("Unknown ICmp predicate!");
2979 case ICmpInst::ICMP_ULT:
2980 return getFalse(ITy);
2981 case ICmpInst::ICMP_UGE:
2982 return getTrue(ITy);
2983 case ICmpInst::ICMP_EQ:
2984 case ICmpInst::ICMP_ULE:
2985 if (isKnownNonZero(LHS, Q))
2986 return getFalse(ITy);
2987 break;
2988 case ICmpInst::ICMP_NE:
2989 case ICmpInst::ICMP_UGT:
2990 if (isKnownNonZero(LHS, Q))
2991 return getTrue(ITy);
2992 break;
2993 case ICmpInst::ICMP_SLT: {
2994 KnownBits LHSKnown = computeKnownBits(LHS, Q);
2995 if (LHSKnown.isNegative())
2996 return getTrue(ITy);
2997 if (LHSKnown.isNonNegative())
2998 return getFalse(ITy);
2999 break;
3000 }
3001 case ICmpInst::ICMP_SLE: {
3002 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3003 if (LHSKnown.isNegative())
3004 return getTrue(ITy);
3005 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
3006 return getFalse(ITy);
3007 break;
3008 }
3009 case ICmpInst::ICMP_SGE: {
3010 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3011 if (LHSKnown.isNegative())
3012 return getFalse(ITy);
3013 if (LHSKnown.isNonNegative())
3014 return getTrue(ITy);
3015 break;
3016 }
3017 case ICmpInst::ICMP_SGT: {
3018 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3019 if (LHSKnown.isNegative())
3020 return getFalse(ITy);
3021 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
3022 return getTrue(ITy);
3023 break;
3024 }
3025 }
3026
3027 return nullptr;
3028}
3029
3031 Value *RHS, const SimplifyQuery &Q) {
3032 Type *ITy = getCompareTy(RHS); // The return type.
3033
3034 Value *X;
3035 const APInt *C;
3036 if (!match(RHS, m_APIntAllowPoison(C)))
3037 return nullptr;
3038
3039 // Sign-bit checks can be optimized to true/false after unsigned
3040 // floating-point casts:
3041 // icmp slt (bitcast (uitofp X)), 0 --> false
3042 // icmp sgt (bitcast (uitofp X)), -1 --> true
3044 bool TrueIfSigned;
3045 if (isSignBitCheck(Pred, *C, TrueIfSigned))
3046 return ConstantInt::getBool(ITy, !TrueIfSigned);
3047 }
3048
3049 // Rule out tautological comparisons (eg., ult 0 or uge 0).
3051 if (RHS_CR.isEmptySet())
3052 return ConstantInt::getFalse(ITy);
3053 if (RHS_CR.isFullSet())
3054 return ConstantInt::getTrue(ITy);
3055
3056 ConstantRange LHS_CR =
3058 if (!LHS_CR.isFullSet()) {
3059 if (RHS_CR.contains(LHS_CR))
3060 return ConstantInt::getTrue(ITy);
3061 if (RHS_CR.inverse().contains(LHS_CR))
3062 return ConstantInt::getFalse(ITy);
3063 }
3064
3065 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3066 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3067 const APInt *MulC;
3068 if (Q.IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
3070 *MulC != 0 && C->urem(*MulC) != 0) ||
3072 *MulC != 0 && C->srem(*MulC) != 0)))
3073 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3074
3075 if (Pred == ICmpInst::ICMP_UGE && C->isOne() && isKnownNonZero(LHS, Q))
3076 return ConstantInt::getTrue(ITy);
3077
3078 return nullptr;
3079}
3080
3082
3083/// Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
3086 const SimplifyQuery &Q,
3087 unsigned Depth = 0) {
3088 if (!Res.insert(V).second)
3089 return;
3090
3091 // Can be increased if useful.
3092 if (++Depth > 1)
3093 return;
3094
3095 auto *I = dyn_cast<Instruction>(V);
3096 if (!I)
3097 return;
3098
3099 Value *X, *Y;
3101 if (match(I, m_Or(m_Value(X), m_Value(Y))) ||
3105 }
3106 // X * Y >= X --> true
3107 if (match(I, m_NUWMul(m_Value(X), m_Value(Y)))) {
3108 if (isKnownNonZero(X, Q))
3110 if (isKnownNonZero(Y, Q))
3112 }
3113 } else {
3115 switch (I->getOpcode()) {
3116 case Instruction::And:
3117 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Q, Depth);
3118 getUnsignedMonotonicValues(Res, I->getOperand(1), Type, Q, Depth);
3119 break;
3120 case Instruction::URem:
3121 case Instruction::UDiv:
3122 case Instruction::LShr:
3123 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Q, Depth);
3124 break;
3125 case Instruction::Call:
3128 break;
3129 default:
3130 break;
3131 }
3132 }
3133}
3134
3136 Value *RHS,
3137 const SimplifyQuery &Q) {
3138 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3139 return nullptr;
3140
3141 // We have LHS uge GreaterValues and LowerValues uge RHS. If any of the
3142 // GreaterValues and LowerValues are the same, it follows that LHS uge RHS.
3143 SmallPtrSet<Value *, 4> GreaterValues;
3144 SmallPtrSet<Value *, 4> LowerValues;
3147 for (Value *GV : GreaterValues)
3148 if (LowerValues.contains(GV))
3150 Pred == ICmpInst::ICMP_UGE);
3151 return nullptr;
3152}
3153
3155 Value *RHS, const SimplifyQuery &Q,
3156 unsigned MaxRecurse) {
3157 Type *ITy = getCompareTy(RHS); // The return type.
3158
3159 Value *Y = nullptr;
3160 // icmp pred (or X, Y), X
3161 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3162 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3163 KnownBits RHSKnown = computeKnownBits(RHS, Q);
3164 KnownBits YKnown = computeKnownBits(Y, Q);
3165 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3166 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3167 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3168 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3169 }
3170 }
3171
3172 // icmp pred (urem X, Y), Y
3173 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3174 switch (Pred) {
3175 default:
3176 break;
3177 case ICmpInst::ICMP_SGT:
3178 case ICmpInst::ICMP_SGE: {
3179 KnownBits Known = computeKnownBits(RHS, Q);
3180 if (!Known.isNonNegative())
3181 break;
3182 [[fallthrough]];
3183 }
3184 case ICmpInst::ICMP_EQ:
3185 case ICmpInst::ICMP_UGT:
3186 case ICmpInst::ICMP_UGE:
3187 return getFalse(ITy);
3188 case ICmpInst::ICMP_SLT:
3189 case ICmpInst::ICMP_SLE: {
3190 KnownBits Known = computeKnownBits(RHS, Q);
3191 if (!Known.isNonNegative())
3192 break;
3193 [[fallthrough]];
3194 }
3195 case ICmpInst::ICMP_NE:
3196 case ICmpInst::ICMP_ULT:
3197 case ICmpInst::ICMP_ULE:
3198 return getTrue(ITy);
3199 }
3200 }
3201
3202 // If x is nonzero:
3203 // x >>u C <u x --> true for C != 0.
3204 // x >>u C != x --> true for C != 0.
3205 // x >>u C >=u x --> false for C != 0.
3206 // x >>u C == x --> false for C != 0.
3207 // x udiv C <u x --> true for C != 1.
3208 // x udiv C != x --> true for C != 1.
3209 // x udiv C >=u x --> false for C != 1.
3210 // x udiv C == x --> false for C != 1.
3211 // TODO: allow non-constant shift amount/divisor
3212 const APInt *C;
3213 if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3214 (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3215 if (isKnownNonZero(RHS, Q)) {
3216 switch (Pred) {
3217 default:
3218 break;
3219 case ICmpInst::ICMP_EQ:
3220 case ICmpInst::ICMP_UGE:
3221 case ICmpInst::ICMP_UGT:
3222 return getFalse(ITy);
3223 case ICmpInst::ICMP_NE:
3224 case ICmpInst::ICMP_ULT:
3225 case ICmpInst::ICMP_ULE:
3226 return getTrue(ITy);
3227 }
3228 }
3229 }
3230
3231 // (x*C1)/C2 <= x for C1 <= C2.
3232 // This holds even if the multiplication overflows: Assume that x != 0 and
3233 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3234 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3235 //
3236 // Additionally, either the multiplication and division might be represented
3237 // as shifts:
3238 // (x*C1)>>C2 <= x for C1 < 2**C2.
3239 // (x<<C1)/C2 <= x for 2**C1 < C2.
3240 const APInt *C1, *C2;
3241 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3242 C1->ule(*C2)) ||
3243 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3244 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3245 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3246 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3247 if (Pred == ICmpInst::ICMP_UGT)
3248 return getFalse(ITy);
3249 if (Pred == ICmpInst::ICMP_ULE)
3250 return getTrue(ITy);
3251 }
3252
3253 // (sub C, X) == X, C is odd --> false
3254 // (sub C, X) != X, C is odd --> true
3255 if (match(LBO, m_Sub(m_APIntAllowPoison(C), m_Specific(RHS))) &&
3256 (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3257 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3258
3259 return nullptr;
3260}
3261
3262// If only one of the icmp's operands has NSW flags, try to prove that:
3263//
3264// icmp slt/sgt/sle/sge (x + C1), (x +nsw C2)
3265//
3266// is equivalent to:
3267//
3268// icmp slt/sgt/sle/sge C1, C2
3269//
3270// which is true if x + C2 has the NSW flags set and:
3271// *) C1 <= C2 && C1 >= 0, or
3272// *) C2 <= C1 && C1 <= 0.
3273//
3275 const InstrInfoQuery &IIQ) {
3276 // TODO: support other predicates.
3277 if (!ICmpInst::isSigned(Pred) || !IIQ.UseInstrInfo)
3278 return false;
3279
3280 // Canonicalize nsw add as RHS.
3281 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3282 std::swap(LHS, RHS);
3283 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3284 return false;
3285
3286 Value *X;
3287 const APInt *C1, *C2;
3288 if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
3289 !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
3290 return false;
3291
3292 return (C1->sle(*C2) && C1->isNonNegative()) ||
3293 (C2->sle(*C1) && C1->isNonPositive());
3294}
3295
3296/// TODO: A large part of this logic is duplicated in InstCombine's
3297/// foldICmpBinOp(). We should be able to share that and avoid the code
3298/// duplication.
3300 const SimplifyQuery &Q,
3301 unsigned MaxRecurse) {
3304 if (MaxRecurse && (LBO || RBO)) {
3305 // Analyze the case when either LHS or RHS is an add instruction.
3306 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3307 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3308 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3309 if (LBO && LBO->getOpcode() == Instruction::Add) {
3310 A = LBO->getOperand(0);
3311 B = LBO->getOperand(1);
3312 NoLHSWrapProblem =
3313 ICmpInst::isEquality(Pred) ||
3314 (CmpInst::isUnsigned(Pred) &&
3316 (CmpInst::isSigned(Pred) &&
3318 }
3319 if (RBO && RBO->getOpcode() == Instruction::Add) {
3320 C = RBO->getOperand(0);
3321 D = RBO->getOperand(1);
3322 NoRHSWrapProblem =
3323 ICmpInst::isEquality(Pred) ||
3324 (CmpInst::isUnsigned(Pred) &&
3326 (CmpInst::isSigned(Pred) &&
3328 }
3329
3330 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3331 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3332 if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3333 Constant::getNullValue(RHS->getType()), Q,
3334 MaxRecurse - 1))
3335 return V;
3336
3337 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3338 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3339 if (Value *V =
3341 C == LHS ? D : C, Q, MaxRecurse - 1))
3342 return V;
3343
3344 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3345 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3347 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3348 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3349 Value *Y, *Z;
3350 if (A == C) {
3351 // C + B == C + D -> B == D
3352 Y = B;
3353 Z = D;
3354 } else if (A == D) {
3355 // D + B == C + D -> B == C
3356 Y = B;
3357 Z = C;
3358 } else if (B == C) {
3359 // A + C == C + D -> A == D
3360 Y = A;
3361 Z = D;
3362 } else {
3363 assert(B == D);
3364 // A + D == C + D -> A == C
3365 Y = A;
3366 Z = C;
3367 }
3368 if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3369 return V;
3370 }
3371 }
3372
3373 if (LBO)
3374 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3375 return V;
3376
3377 if (RBO)
3379 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3380 return V;
3381
3382 // 0 - (zext X) pred C
3383 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3384 const APInt *C;
3385 if (match(RHS, m_APInt(C))) {
3386 if (C->isStrictlyPositive()) {
3387 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3389 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3391 }
3392 if (C->isNonNegative()) {
3393 if (Pred == ICmpInst::ICMP_SLE)
3395 if (Pred == ICmpInst::ICMP_SGT)
3397 }
3398 }
3399 }
3400
3401 // If C2 is a power-of-2 and C is not:
3402 // (C2 << X) == C --> false
3403 // (C2 << X) != C --> true
3404 const APInt *C;
3405 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3406 match(RHS, m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
3407 // C2 << X can equal zero in some circumstances.
3408 // This simplification might be unsafe if C is zero.
3409 //
3410 // We know it is safe if:
3411 // - The shift is nsw. We can't shift out the one bit.
3412 // - The shift is nuw. We can't shift out the one bit.
3413 // - C2 is one.
3414 // - C isn't zero.
3417 match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3418 if (Pred == ICmpInst::ICMP_EQ)
3420 if (Pred == ICmpInst::ICMP_NE)
3422 }
3423 }
3424
3425 // If C is a power-of-2:
3426 // (C << X) >u 0x8000 --> false
3427 // (C << X) <=u 0x8000 --> true
3428 if (match(LHS, m_Shl(m_Power2(), m_Value())) && match(RHS, m_SignMask())) {
3429 if (Pred == ICmpInst::ICMP_UGT)
3431 if (Pred == ICmpInst::ICMP_ULE)
3433 }
3434
3435 if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3436 return nullptr;
3437
3438 if (LBO->getOperand(0) == RBO->getOperand(0)) {
3439 switch (LBO->getOpcode()) {
3440 default:
3441 break;
3442 case Instruction::Shl: {
3443 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3444 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3445 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3446 !isKnownNonZero(LBO->getOperand(0), Q))
3447 break;
3448 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3449 RBO->getOperand(1), Q, MaxRecurse - 1))
3450 return V;
3451 break;
3452 }
3453 // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3454 // icmp ule A, B -> true
3455 // icmp ugt A, B -> false
3456 // icmp sle A, B -> true (C1 and C2 are the same sign)
3457 // icmp sgt A, B -> false (C1 and C2 are the same sign)
3458 case Instruction::And:
3459 case Instruction::Or: {
3460 const APInt *C1, *C2;
3461 if (ICmpInst::isRelational(Pred) &&
3462 match(LBO->getOperand(1), m_APInt(C1)) &&
3463 match(RBO->getOperand(1), m_APInt(C2))) {
3464 if (!C1->isSubsetOf(*C2)) {
3465 std::swap(C1, C2);
3466 Pred = ICmpInst::getSwappedPredicate(Pred);
3467 }
3468 if (C1->isSubsetOf(*C2)) {
3469 if (Pred == ICmpInst::ICMP_ULE)
3471 if (Pred == ICmpInst::ICMP_UGT)
3473 if (C1->isNonNegative() == C2->isNonNegative()) {
3474 if (Pred == ICmpInst::ICMP_SLE)
3476 if (Pred == ICmpInst::ICMP_SGT)
3478 }
3479 }
3480 }
3481 break;
3482 }
3483 }
3484 }
3485
3486 if (LBO->getOperand(1) == RBO->getOperand(1)) {
3487 switch (LBO->getOpcode()) {
3488 default:
3489 break;
3490 case Instruction::UDiv:
3491 case Instruction::LShr:
3492 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3493 !Q.IIQ.isExact(RBO))
3494 break;
3495 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3496 RBO->getOperand(0), Q, MaxRecurse - 1))
3497 return V;
3498 break;
3499 case Instruction::SDiv:
3500 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3501 !Q.IIQ.isExact(RBO))
3502 break;
3503 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3504 RBO->getOperand(0), Q, MaxRecurse - 1))
3505 return V;
3506 break;
3507 case Instruction::AShr:
3508 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3509 break;
3510 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3511 RBO->getOperand(0), Q, MaxRecurse - 1))
3512 return V;
3513 break;
3514 case Instruction::Shl: {
3515 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3516 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3517 if (!NUW && !NSW)
3518 break;
3519 if (!NSW && ICmpInst::isSigned(Pred))
3520 break;
3521 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3522 RBO->getOperand(0), Q, MaxRecurse - 1))
3523 return V;
3524 break;
3525 }
3526 }
3527 }
3528 return nullptr;
3529}
3530
3531/// simplify integer comparisons where at least one operand of the compare
3532/// matches an integer min/max idiom.
3534 const SimplifyQuery &Q,
3535 unsigned MaxRecurse) {
3536 Type *ITy = getCompareTy(LHS); // The return type.
3537 Value *A, *B;
3539 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3540
3541 // Signed variants on "max(a,b)>=a -> true".
3542 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3543 if (A != RHS)
3544 std::swap(A, B); // smax(A, B) pred A.
3545 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3546 // We analyze this as smax(A, B) pred A.
3547 P = Pred;
3548 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3549 (A == LHS || B == LHS)) {
3550 if (A != LHS)
3551 std::swap(A, B); // A pred smax(A, B).
3552 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3553 // We analyze this as smax(A, B) swapped-pred A.
3555 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3556 (A == RHS || B == RHS)) {
3557 if (A != RHS)
3558 std::swap(A, B); // smin(A, B) pred A.
3559 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3560 // We analyze this as smax(-A, -B) swapped-pred -A.
3561 // Note that we do not need to actually form -A or -B thanks to EqP.
3563 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3564 (A == LHS || B == LHS)) {
3565 if (A != LHS)
3566 std::swap(A, B); // A pred smin(A, B).
3567 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3568 // We analyze this as smax(-A, -B) pred -A.
3569 // Note that we do not need to actually form -A or -B thanks to EqP.
3570 P = Pred;
3571 }
3573 // Cases correspond to "max(A, B) p A".
3574 switch (P) {
3575 default:
3576 break;
3577 case CmpInst::ICMP_EQ:
3578 case CmpInst::ICMP_SLE:
3579 // Equivalent to "A EqP B". This may be the same as the condition tested
3580 // in the max/min; if so, we can just return that.
3581 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3582 return V;
3583 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3584 return V;
3585 // Otherwise, see if "A EqP B" simplifies.
3586 if (MaxRecurse)
3587 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3588 return V;
3589 break;
3590 case CmpInst::ICMP_NE:
3591 case CmpInst::ICMP_SGT: {
3593 // Equivalent to "A InvEqP B". This may be the same as the condition
3594 // tested in the max/min; if so, we can just return that.
3595 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3596 return V;
3597 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3598 return V;
3599 // Otherwise, see if "A InvEqP B" simplifies.
3600 if (MaxRecurse)
3601 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3602 return V;
3603 break;
3604 }
3605 case CmpInst::ICMP_SGE:
3606 // Always true.
3607 return getTrue(ITy);
3608 case CmpInst::ICMP_SLT:
3609 // Always false.
3610 return getFalse(ITy);
3611 }
3612 }
3613
3614 // Unsigned variants on "max(a,b)>=a -> true".
3616 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3617 if (A != RHS)
3618 std::swap(A, B); // umax(A, B) pred A.
3619 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3620 // We analyze this as umax(A, B) pred A.
3621 P = Pred;
3622 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3623 (A == LHS || B == LHS)) {
3624 if (A != LHS)
3625 std::swap(A, B); // A pred umax(A, B).
3626 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3627 // We analyze this as umax(A, B) swapped-pred A.
3629 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3630 (A == RHS || B == RHS)) {
3631 if (A != RHS)
3632 std::swap(A, B); // umin(A, B) pred A.
3633 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3634 // We analyze this as umax(-A, -B) swapped-pred -A.
3635 // Note that we do not need to actually form -A or -B thanks to EqP.
3637 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3638 (A == LHS || B == LHS)) {
3639 if (A != LHS)
3640 std::swap(A, B); // A pred umin(A, B).
3641 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3642 // We analyze this as umax(-A, -B) pred -A.
3643 // Note that we do not need to actually form -A or -B thanks to EqP.
3644 P = Pred;
3645 }
3647 // Cases correspond to "max(A, B) p A".
3648 switch (P) {
3649 default:
3650 break;
3651 case CmpInst::ICMP_EQ:
3652 case CmpInst::ICMP_ULE:
3653 // Equivalent to "A EqP B". This may be the same as the condition tested
3654 // in the max/min; if so, we can just return that.
3655 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3656 return V;
3657 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3658 return V;
3659 // Otherwise, see if "A EqP B" simplifies.
3660 if (MaxRecurse)
3661 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3662 return V;
3663 break;
3664 case CmpInst::ICMP_NE:
3665 case CmpInst::ICMP_UGT: {
3667 // Equivalent to "A InvEqP B". This may be the same as the condition
3668 // tested in the max/min; if so, we can just return that.
3669 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3670 return V;
3671 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3672 return V;
3673 // Otherwise, see if "A InvEqP B" simplifies.
3674 if (MaxRecurse)
3675 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3676 return V;
3677 break;
3678 }
3679 case CmpInst::ICMP_UGE:
3680 return getTrue(ITy);
3681 case CmpInst::ICMP_ULT:
3682 return getFalse(ITy);
3683 }
3684 }
3685
3686 // Comparing 1 each of min/max with a common operand?
3687 // Canonicalize min operand to RHS.
3688 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3689 match(LHS, m_SMin(m_Value(), m_Value()))) {
3690 std::swap(LHS, RHS);
3691 Pred = ICmpInst::getSwappedPredicate(Pred);
3692 }
3693
3694 Value *C, *D;
3695 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3696 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3697 (A == C || A == D || B == C || B == D)) {
3698 // smax(A, B) >=s smin(A, D) --> true
3699 if (Pred == CmpInst::ICMP_SGE)
3700 return getTrue(ITy);
3701 // smax(A, B) <s smin(A, D) --> false
3702 if (Pred == CmpInst::ICMP_SLT)
3703 return getFalse(ITy);
3704 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3705 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3706 (A == C || A == D || B == C || B == D)) {
3707 // umax(A, B) >=u umin(A, D) --> true
3708 if (Pred == CmpInst::ICMP_UGE)
3709 return getTrue(ITy);
3710 // umax(A, B) <u umin(A, D) --> false
3711 if (Pred == CmpInst::ICMP_ULT)
3712 return getFalse(ITy);
3713 }
3714
3715 return nullptr;
3716}
3717
3719 Value *LHS, Value *RHS,
3720 const SimplifyQuery &Q) {
3721 // Gracefully handle instructions that have not been inserted yet.
3722 if (!Q.AC || !Q.CxtI)
3723 return nullptr;
3724
3725 for (Value *AssumeBaseOp : {LHS, RHS}) {
3726 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3727 if (!AssumeVH)
3728 continue;
3729
3730 CallInst *Assume = cast<CallInst>(AssumeVH);
3731 if (std::optional<bool> Imp = isImpliedCondition(
3732 Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3733 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3734 return ConstantInt::get(getCompareTy(LHS), *Imp);
3735 }
3736 }
3737
3738 return nullptr;
3739}
3740
3742 Value *RHS) {
3744 if (!II)
3745 return nullptr;
3746
3747 switch (II->getIntrinsicID()) {
3748 case Intrinsic::uadd_sat:
3749 // uadd.sat(X, Y) uge X + Y
3750 if (match(RHS, m_c_Add(m_Specific(II->getArgOperand(0)),
3751 m_Specific(II->getArgOperand(1))))) {
3752 if (Pred == ICmpInst::ICMP_UGE)
3754 if (Pred == ICmpInst::ICMP_ULT)
3756 }
3757 return nullptr;
3758 case Intrinsic::usub_sat:
3759 // usub.sat(X, Y) ule X - Y
3760 if (match(RHS, m_Sub(m_Specific(II->getArgOperand(0)),
3761 m_Specific(II->getArgOperand(1))))) {
3762 if (Pred == ICmpInst::ICMP_ULE)
3764 if (Pred == ICmpInst::ICMP_UGT)
3766 }
3767 return nullptr;
3768 default:
3769 return nullptr;
3770 }
3771}
3772
3773/// Helper method to get range from metadata or attribute.
3774static std::optional<ConstantRange> getRange(Value *V,
3775 const InstrInfoQuery &IIQ) {
3777 if (MDNode *MD = IIQ.getMetadata(I, LLVMContext::MD_range))
3778 return getConstantRangeFromMetadata(*MD);
3779
3780 if (const Argument *A = dyn_cast<Argument>(V))
3781 return A->getRange();
3782 else if (const CallBase *CB = dyn_cast<CallBase>(V))
3783 return CB->getRange();
3784
3785 return std::nullopt;
3786}
3787
3788/// Given operands for an ICmpInst, see if we can fold the result.
3789/// If not, this returns null.
3791 const SimplifyQuery &Q, unsigned MaxRecurse) {
3792 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3793
3794 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3795 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3796 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3797
3798 // If we have a constant, make sure it is on the RHS.
3799 std::swap(LHS, RHS);
3800 Pred = CmpInst::getSwappedPredicate(Pred);
3801 }
3802 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3803
3804 Type *ITy = getCompareTy(LHS); // The return type.
3805
3806 // icmp poison, X -> poison
3807 if (isa<PoisonValue>(RHS))
3808 return PoisonValue::get(ITy);
3809
3810 // For EQ and NE, we can always pick a value for the undef to make the
3811 // predicate pass or fail, so we can return undef.
3812 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3813 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3814 return UndefValue::get(ITy);
3815
3816 // icmp X, X -> true/false
3817 // icmp X, undef -> true/false because undef could be X.
3818 if (LHS == RHS || Q.isUndefValue(RHS))
3819 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3820
3821 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3822 return V;
3823
3824 // TODO: Sink/common this with other potentially expensive calls that use
3825 // ValueTracking? See comment below for isKnownNonEqual().
3826 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3827 return V;
3828
3829 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q))
3830 return V;
3831
3832 // If both operands have range metadata, use the metadata
3833 // to simplify the comparison.
3834 if (std::optional<ConstantRange> RhsCr = getRange(RHS, Q.IIQ))
3835 if (std::optional<ConstantRange> LhsCr = getRange(LHS, Q.IIQ)) {
3836 if (LhsCr->icmp(Pred, *RhsCr))
3837 return ConstantInt::getTrue(ITy);
3838
3839 if (LhsCr->icmp(CmpInst::getInversePredicate(Pred), *RhsCr))
3840 return ConstantInt::getFalse(ITy);
3841 }
3842
3843 // Compare of cast, for example (zext X) != 0 -> X != 0
3846 Value *SrcOp = LI->getOperand(0);
3847 Type *SrcTy = SrcOp->getType();
3848 Type *DstTy = LI->getType();
3849
3850 // Turn icmp (ptrtoint/ptrtoaddr x), (ptrtoint/ptrtoaddr/constant) into a
3851 // compare of the input if the integer type is the same size as the
3852 // pointer address type (icmp only compares the address of the pointer).
3853 if (MaxRecurse && (isa<PtrToIntInst, PtrToAddrInst>(LI)) &&
3854 Q.DL.getAddressType(SrcTy) == DstTy) {
3855 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3856 // Transfer the cast to the constant.
3857 if (Value *V = simplifyICmpInst(Pred, SrcOp,
3858 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3859 Q, MaxRecurse - 1))
3860 return V;
3862 auto *RI = cast<CastInst>(RHS);
3863 if (RI->getOperand(0)->getType() == SrcTy)
3864 // Compare without the cast.
3865 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3866 MaxRecurse - 1))
3867 return V;
3868 }
3869 }
3870
3871 if (isa<ZExtInst>(LHS)) {
3872 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3873 // same type.
3874 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3875 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3876 // Compare X and Y. Note that signed predicates become unsigned.
3877 if (Value *V =
3879 RI->getOperand(0), Q, MaxRecurse - 1))
3880 return V;
3881 }
3882 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3883 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3884 if (SrcOp == RI->getOperand(0)) {
3885 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3886 return ConstantInt::getTrue(ITy);
3887 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3888 return ConstantInt::getFalse(ITy);
3889 }
3890 }
3891 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3892 // too. If not, then try to deduce the result of the comparison.
3893 else if (match(RHS, m_ImmConstant())) {
3895 assert(C != nullptr);
3896
3897 // Compute the constant that would happen if we truncated to SrcTy then
3898 // reextended to DstTy.
3899 Constant *Trunc =
3900 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3901 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3902 Constant *RExt =
3903 ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3904 assert(RExt && "Constant-fold of ImmConstant should not fail");
3905 Constant *AnyEq =
3907 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3908
3909 // If the re-extended constant didn't change any of the elements then
3910 // this is effectively also a case of comparing two zero-extended
3911 // values.
3912 if (AnyEq->isAllOnesValue() && MaxRecurse)
3914 SrcOp, Trunc, Q, MaxRecurse - 1))
3915 return V;
3916
3917 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3918 // there. Use this to work out the result of the comparison.
3919 if (AnyEq->isNullValue()) {
3920 switch (Pred) {
3921 default:
3922 llvm_unreachable("Unknown ICmp predicate!");
3923 // LHS <u RHS.
3924 case ICmpInst::ICMP_EQ:
3925 case ICmpInst::ICMP_UGT:
3926 case ICmpInst::ICMP_UGE:
3927 return Constant::getNullValue(ITy);
3928
3929 case ICmpInst::ICMP_NE:
3930 case ICmpInst::ICMP_ULT:
3931 case ICmpInst::ICMP_ULE:
3932 return Constant::getAllOnesValue(ITy);
3933
3934 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3935 // is non-negative then LHS <s RHS.
3936 case ICmpInst::ICMP_SGT:
3937 case ICmpInst::ICMP_SGE:
3940 Q.DL);
3941 case ICmpInst::ICMP_SLT:
3942 case ICmpInst::ICMP_SLE:
3945 Q.DL);
3946 }
3947 }
3948 }
3949 }
3950
3951 if (isa<SExtInst>(LHS)) {
3952 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3953 // same type.
3954 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3955 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3956 // Compare X and Y. Note that the predicate does not change.
3957 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3958 MaxRecurse - 1))
3959 return V;
3960 }
3961 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3962 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3963 if (SrcOp == RI->getOperand(0)) {
3964 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3965 return ConstantInt::getTrue(ITy);
3966 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3967 return ConstantInt::getFalse(ITy);
3968 }
3969 }
3970 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3971 // too. If not, then try to deduce the result of the comparison.
3972 else if (match(RHS, m_ImmConstant())) {
3974
3975 // Compute the constant that would happen if we truncated to SrcTy then
3976 // reextended to DstTy.
3977 Constant *Trunc =
3978 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3979 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3980 Constant *RExt =
3981 ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
3982 assert(RExt && "Constant-fold of ImmConstant should not fail");
3983 Constant *AnyEq =
3985 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3986
3987 // If the re-extended constant didn't change then this is effectively
3988 // also a case of comparing two sign-extended values.
3989 if (AnyEq->isAllOnesValue() && MaxRecurse)
3990 if (Value *V =
3991 simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3992 return V;
3993
3994 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3995 // bits there. Use this to work out the result of the comparison.
3996 if (AnyEq->isNullValue()) {
3997 switch (Pred) {
3998 default:
3999 llvm_unreachable("Unknown ICmp predicate!");
4000 case ICmpInst::ICMP_EQ:
4001 return Constant::getNullValue(ITy);
4002 case ICmpInst::ICMP_NE:
4003 return Constant::getAllOnesValue(ITy);
4004
4005 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
4006 // LHS >s RHS.
4007 case ICmpInst::ICMP_SGT:
4008 case ICmpInst::ICMP_SGE:
4011 Q.DL);
4012 case ICmpInst::ICMP_SLT:
4013 case ICmpInst::ICMP_SLE:
4016 Q.DL);
4017
4018 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
4019 // LHS >u RHS.
4020 case ICmpInst::ICMP_UGT:
4021 case ICmpInst::ICMP_UGE:
4022 // Comparison is true iff the LHS <s 0.
4023 if (MaxRecurse)
4025 Constant::getNullValue(SrcTy), Q,
4026 MaxRecurse - 1))
4027 return V;
4028 break;
4029 case ICmpInst::ICMP_ULT:
4030 case ICmpInst::ICMP_ULE:
4031 // Comparison is true iff the LHS >=s 0.
4032 if (MaxRecurse)
4034 Constant::getNullValue(SrcTy), Q,
4035 MaxRecurse - 1))
4036 return V;
4037 break;
4038 }
4039 }
4040 }
4041 }
4042 }
4043
4044 // icmp eq|ne X, Y -> false|true if X != Y
4045 // This is potentially expensive, and we have already computedKnownBits for
4046 // compares with 0 above here, so only try this for a non-zero compare.
4047 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
4048 isKnownNonEqual(LHS, RHS, Q)) {
4049 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
4050 }
4051
4052 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
4053 return V;
4054
4055 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
4056 return V;
4057
4059 return V;
4062 return V;
4063
4064 if (Value *V = simplifyICmpUsingMonotonicValues(Pred, LHS, RHS, Q))
4065 return V;
4068 return V;
4069
4070 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
4071 return V;
4072
4073 if (std::optional<bool> Res =
4074 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4075 return ConstantInt::getBool(ITy, *Res);
4076
4077 // Simplify comparisons of related pointers using a powerful, recursive
4078 // GEP-walk when we have target data available..
4079 if (LHS->getType()->isPointerTy())
4080 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4081 return C;
4082
4083 // If the comparison is with the result of a select instruction, check whether
4084 // comparing with either branch of the select always yields the same value.
4086 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4087 return V;
4088
4089 // If the comparison is with the result of a phi instruction, check whether
4090 // doing the compare with each incoming phi value yields a common result.
4092 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4093 return V;
4094
4095 return nullptr;
4096}
4097
4099 const SimplifyQuery &Q) {
4100 return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4101}
4102
4103/// Given operands for an FCmpInst, see if we can fold the result.
4104/// If not, this returns null.
4106 FastMathFlags FMF, const SimplifyQuery &Q,
4107 unsigned MaxRecurse) {
4108 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4109
4110 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4111 if (Constant *CRHS = dyn_cast<Constant>(RHS))
4112 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4113 Q.CxtI);
4114
4115 // If we have a constant, make sure it is on the RHS.
4116 std::swap(LHS, RHS);
4117 Pred = CmpInst::getSwappedPredicate(Pred);
4118 }
4119
4120 // Fold trivial predicates.
4121 Type *RetTy = getCompareTy(LHS);
4122 if (Pred == FCmpInst::FCMP_FALSE)
4123 return getFalse(RetTy);
4124 if (Pred == FCmpInst::FCMP_TRUE)
4125 return getTrue(RetTy);
4126
4127 // fcmp pred x, poison and fcmp pred poison, x
4128 // fold to poison
4130 return PoisonValue::get(RetTy);
4131
4132 // fcmp pred x, undef and fcmp pred undef, x
4133 // fold to true if unordered, false if ordered
4134 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4135 // Choosing NaN for the undef will always make unordered comparison succeed
4136 // and ordered comparison fail.
4137 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4138 }
4139
4140 // fcmp x,x -> true/false. Not all compares are foldable.
4141 if (LHS == RHS) {
4142 if (CmpInst::isTrueWhenEqual(Pred))
4143 return getTrue(RetTy);
4144 if (CmpInst::isFalseWhenEqual(Pred))
4145 return getFalse(RetTy);
4146 }
4147
4148 // Fold (un)ordered comparison if we can determine there are no NaNs.
4149 //
4150 // This catches the 2 variable input case, constants are handled below as a
4151 // class-like compare.
4152 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4155
4156 if (FMF.noNaNs() ||
4157 (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
4158 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4159
4160 if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
4161 return ConstantInt::get(RetTy, Pred == CmpInst::FCMP_UNO);
4162 }
4163
4164 if (std::optional<bool> Res =
4165 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4166 return ConstantInt::getBool(RetTy, *Res);
4167
4168 const APFloat *C = nullptr;
4170 std::optional<KnownFPClass> FullKnownClassLHS;
4171
4172 // Lazily compute the possible classes for LHS. Avoid computing it twice if
4173 // RHS is a 0.
4174 auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4175 fcAllFlags) {
4176 if (FullKnownClassLHS)
4177 return *FullKnownClassLHS;
4178 return computeKnownFPClass(LHS, FMF, InterestedFlags, Q);
4179 };
4180
4181 if (C && Q.CxtI) {
4182 // Fold out compares that express a class test.
4183 //
4184 // FIXME: Should be able to perform folds without context
4185 // instruction. Always pass in the context function?
4186
4187 const Function *ParentF = Q.CxtI->getFunction();
4188 auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, *ParentF, LHS, C);
4189 if (ClassVal) {
4190 FullKnownClassLHS = computeLHSClass();
4191 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4192 return getFalse(RetTy);
4193 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4194 return getTrue(RetTy);
4195 }
4196 }
4197
4198 // Handle fcmp with constant RHS.
4199 if (C) {
4200 // TODO: If we always required a context function, we wouldn't need to
4201 // special case nans.
4202 if (C->isNaN())
4203 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4204
4205 // TODO: Need version fcmpToClassTest which returns implied class when the
4206 // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4207 // isn't implementable as a class call.
4208 if (C->isNegative() && !C->isNegZero()) {
4210
4211 // TODO: We can catch more cases by using a range check rather than
4212 // relying on CannotBeOrderedLessThanZero.
4213 switch (Pred) {
4214 case FCmpInst::FCMP_UGE:
4215 case FCmpInst::FCMP_UGT:
4216 case FCmpInst::FCMP_UNE: {
4217 KnownFPClass KnownClass = computeLHSClass(Interested);
4218
4219 // (X >= 0) implies (X > C) when (C < 0)
4220 if (KnownClass.cannotBeOrderedLessThanZero())
4221 return getTrue(RetTy);
4222 break;
4223 }
4224 case FCmpInst::FCMP_OEQ:
4225 case FCmpInst::FCMP_OLE:
4226 case FCmpInst::FCMP_OLT: {
4227 KnownFPClass KnownClass = computeLHSClass(Interested);
4228
4229 // (X >= 0) implies !(X < C) when (C < 0)
4230 if (KnownClass.cannotBeOrderedLessThanZero())
4231 return getFalse(RetTy);
4232 break;
4233 }
4234 default:
4235 break;
4236 }
4237 }
4238 // Check comparison of [minnum/maxnum with constant] with other constant.
4239 const APFloat *C2;
4241 *C2 < *C) ||
4243 *C2 > *C)) {
4244 bool IsMaxNum =
4245 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4246 // The ordered relationship and minnum/maxnum guarantee that we do not
4247 // have NaN constants, so ordered/unordered preds are handled the same.
4248 switch (Pred) {
4249 case FCmpInst::FCMP_OEQ:
4250 case FCmpInst::FCMP_UEQ:
4251 // minnum(X, LesserC) == C --> false
4252 // maxnum(X, GreaterC) == C --> false
4253 return getFalse(RetTy);
4254 case FCmpInst::FCMP_ONE:
4255 case FCmpInst::FCMP_UNE:
4256 // minnum(X, LesserC) != C --> true
4257 // maxnum(X, GreaterC) != C --> true
4258 return getTrue(RetTy);
4259 case FCmpInst::FCMP_OGE:
4260 case FCmpInst::FCMP_UGE:
4261 case FCmpInst::FCMP_OGT:
4262 case FCmpInst::FCMP_UGT:
4263 // minnum(X, LesserC) >= C --> false
4264 // minnum(X, LesserC) > C --> false
4265 // maxnum(X, GreaterC) >= C --> true
4266 // maxnum(X, GreaterC) > C --> true
4267 return ConstantInt::get(RetTy, IsMaxNum);
4268 case FCmpInst::FCMP_OLE:
4269 case FCmpInst::FCMP_ULE:
4270 case FCmpInst::FCMP_OLT:
4271 case FCmpInst::FCMP_ULT:
4272 // minnum(X, LesserC) <= C --> true
4273 // minnum(X, LesserC) < C --> true
4274 // maxnum(X, GreaterC) <= C --> false
4275 // maxnum(X, GreaterC) < C --> false
4276 return ConstantInt::get(RetTy, !IsMaxNum);
4277 default:
4278 // TRUE/FALSE/ORD/UNO should be handled before this.
4279 llvm_unreachable("Unexpected fcmp predicate");
4280 }
4281 }
4282 }
4283
4284 // TODO: Could fold this with above if there were a matcher which returned all
4285 // classes in a non-splat vector.
4286 if (match(RHS, m_AnyZeroFP())) {
4287 switch (Pred) {
4288 case FCmpInst::FCMP_OGE:
4289 case FCmpInst::FCMP_ULT: {
4291 if (!FMF.noNaNs())
4292 Interested |= fcNan;
4293
4294 KnownFPClass Known = computeLHSClass(Interested);
4295
4296 // Positive or zero X >= 0.0 --> true
4297 // Positive or zero X < 0.0 --> false
4298 if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4300 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4301 break;
4302 }
4303 case FCmpInst::FCMP_UGE:
4304 case FCmpInst::FCMP_OLT: {
4306 KnownFPClass Known = computeLHSClass(Interested);
4307
4308 // Positive or zero or nan X >= 0.0 --> true
4309 // Positive or zero or nan X < 0.0 --> false
4310 if (Known.cannotBeOrderedLessThanZero())
4311 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4312 break;
4313 }
4314 default:
4315 break;
4316 }
4317 }
4318
4319 // If the comparison is with the result of a select instruction, check whether
4320 // comparing with either branch of the select always yields the same value.
4322 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4323 return V;
4324
4325 // If the comparison is with the result of a phi instruction, check whether
4326 // doing the compare with each incoming phi value yields a common result.
4328 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4329 return V;
4330
4331 return nullptr;
4332}
4333
4335 FastMathFlags FMF, const SimplifyQuery &Q) {
4336 return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4337}
4338
4340 ArrayRef<std::pair<Value *, Value *>> Ops,
4341 const SimplifyQuery &Q,
4342 bool AllowRefinement,
4344 unsigned MaxRecurse) {
4345 assert((AllowRefinement || !Q.CanUseUndef) &&
4346 "If AllowRefinement=false then CanUseUndef=false");
4347 for (const auto &OpAndRepOp : Ops) {
4348 // We cannot replace a constant, and shouldn't even try.
4349 if (isa<Constant>(OpAndRepOp.first))
4350 return nullptr;
4351
4352 // Trivial replacement.
4353 if (V == OpAndRepOp.first)
4354 return OpAndRepOp.second;
4355 }
4356
4357 if (!MaxRecurse--)
4358 return nullptr;
4359
4360 auto *I = dyn_cast<Instruction>(V);
4361 if (!I)
4362 return nullptr;
4363
4364 // The arguments of a phi node might refer to a value from a previous
4365 // cycle iteration.
4366 if (isa<PHINode>(I))
4367 return nullptr;
4368
4369 // Don't fold away llvm.is.constant checks based on assumptions.
4371 return nullptr;
4372
4373 // Don't simplify freeze.
4374 if (isa<FreezeInst>(I))
4375 return nullptr;
4376
4377 for (const auto &OpAndRepOp : Ops) {
4378 // For vector types, the simplification must hold per-lane, so forbid
4379 // potentially cross-lane operations like shufflevector.
4380 if (OpAndRepOp.first->getType()->isVectorTy() &&
4382 return nullptr;
4383 }
4384
4385 // Replace Op with RepOp in instruction operands.
4387 bool AnyReplaced = false;
4388 for (Value *InstOp : I->operands()) {
4389 if (Value *NewInstOp = simplifyWithOpsReplaced(
4390 InstOp, Ops, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4391 NewOps.push_back(NewInstOp);
4392 AnyReplaced = InstOp != NewInstOp;
4393 } else {
4394 NewOps.push_back(InstOp);
4395 }
4396
4397 // Bail out if any operand is undef and SimplifyQuery disables undef
4398 // simplification. Constant folding currently doesn't respect this option.
4399 if (isa<UndefValue>(NewOps.back()) && !Q.CanUseUndef)
4400 return nullptr;
4401 }
4402
4403 if (!AnyReplaced)
4404 return nullptr;
4405
4406 if (!AllowRefinement) {
4407 // General InstSimplify functions may refine the result, e.g. by returning
4408 // a constant for a potentially poison value. To avoid this, implement only
4409 // a few non-refining but profitable transforms here.
4410
4411 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4412 unsigned Opcode = BO->getOpcode();
4413 // id op x -> x, x op id -> x
4414 // Exclude floats, because x op id may produce a different NaN value.
4415 if (!BO->getType()->isFPOrFPVectorTy()) {
4416 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4417 return NewOps[1];
4418 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4419 /* RHS */ true))
4420 return NewOps[0];
4421 }
4422
4423 // x & x -> x, x | x -> x
4424 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4425 NewOps[0] == NewOps[1]) {
4426 // or disjoint x, x results in poison.
4427 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4428 if (PDI->isDisjoint()) {
4429 if (!DropFlags)
4430 return nullptr;
4431 DropFlags->push_back(BO);
4432 }
4433 }
4434 return NewOps[0];
4435 }
4436
4437 // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4438 // by assumption and this case never wraps, so nowrap flags can be
4439 // ignored.
4440 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4441 NewOps[0] == NewOps[1] &&
4442 any_of(Ops, [=](const auto &Rep) { return NewOps[0] == Rep.second; }))
4443 return Constant::getNullValue(I->getType());
4444
4445 // If we are substituting an absorber constant into a binop and extra
4446 // poison can't leak if we remove the select -- because both operands of
4447 // the binop are based on the same value -- then it may be safe to replace
4448 // the value with the absorber constant. Examples:
4449 // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4450 // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4451 // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4452 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4453 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4454 any_of(Ops,
4455 [=](const auto &Rep) { return impliesPoison(BO, Rep.first); }))
4456 return Absorber;
4457 }
4458
4459 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
4460 // `x == y ? 0 : ucmp(x, y)` where under the replacement y -> x,
4461 // `ucmp(x, x)` becomes `0`.
4462 if ((II->getIntrinsicID() == Intrinsic::scmp ||
4463 II->getIntrinsicID() == Intrinsic::ucmp) &&
4464 NewOps[0] == NewOps[1]) {
4465 if (II->hasPoisonGeneratingAnnotations()) {
4466 if (!DropFlags)
4467 return nullptr;
4468
4469 DropFlags->push_back(II);
4470 }
4471
4472 return ConstantInt::get(I->getType(), 0);
4473 }
4474 }
4475
4477 // getelementptr x, 0 -> x.
4478 // This never returns poison, even if inbounds is set.
4479 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4480 return NewOps[0];
4481 }
4482 } else {
4483 // The simplification queries below may return the original value. Consider:
4484 // %div = udiv i32 %arg, %arg2
4485 // %mul = mul nsw i32 %div, %arg2
4486 // %cmp = icmp eq i32 %mul, %arg
4487 // %sel = select i1 %cmp, i32 %div, i32 undef
4488 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4489 // simplifies back to %arg. This can only happen because %mul does not
4490 // dominate %div. To ensure a consistent return value contract, we make sure
4491 // that this case returns nullptr as well.
4492 auto PreventSelfSimplify = [V](Value *Simplified) {
4493 return Simplified != V ? Simplified : nullptr;
4494 };
4495
4496 return PreventSelfSimplify(
4497 ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4498 }
4499
4500 // If all operands are constant after substituting Op for RepOp then we can
4501 // constant fold the instruction.
4503 for (Value *NewOp : NewOps) {
4504 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4505 ConstOps.push_back(ConstOp);
4506 else
4507 return nullptr;
4508 }
4509
4510 // Consider:
4511 // %cmp = icmp eq i32 %x, 2147483647
4512 // %add = add nsw i32 %x, 1
4513 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4514 //
4515 // We can't replace %sel with %add unless we strip away the flags (which
4516 // will be done in InstCombine).
4517 // TODO: This may be unsound, because it only catches some forms of
4518 // refinement.
4519 if (!AllowRefinement) {
4520 if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4521 // abs cannot create poison if the value is known to never be int_min.
4522 if (auto *II = dyn_cast<IntrinsicInst>(I);
4523 II && II->getIntrinsicID() == Intrinsic::abs) {
4524 if (!ConstOps[0]->isNotMinSignedValue())
4525 return nullptr;
4526 } else
4527 return nullptr;
4528 }
4529 Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4530 /*AllowNonDeterministic=*/false);
4531 if (DropFlags && Res && I->hasPoisonGeneratingAnnotations())
4532 DropFlags->push_back(I);
4533 return Res;
4534 }
4535
4536 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4537 /*AllowNonDeterministic=*/false);
4538}
4539
4541 const SimplifyQuery &Q,
4542 bool AllowRefinement,
4544 unsigned MaxRecurse) {
4545 return simplifyWithOpsReplaced(V, {{Op, RepOp}}, Q, AllowRefinement,
4546 DropFlags, MaxRecurse);
4547}
4548
4550 const SimplifyQuery &Q,
4551 bool AllowRefinement,
4552 SmallVectorImpl<Instruction *> *DropFlags) {
4553 // If refinement is disabled, also disable undef simplifications (which are
4554 // always refinements) in SimplifyQuery.
4555 if (!AllowRefinement)
4556 return ::simplifyWithOpReplaced(V, Op, RepOp, Q.getWithoutUndef(),
4557 AllowRefinement, DropFlags, RecursionLimit);
4558 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4560}
4561
4562/// Try to simplify a select instruction when its condition operand is an
4563/// integer comparison where one operand of the compare is a constant.
4564static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4565 const APInt *Y, bool TrueWhenUnset) {
4566 const APInt *C;
4567
4568 // (X & Y) == 0 ? X & ~Y : X --> X
4569 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4570 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4571 *Y == ~*C)
4572 return TrueWhenUnset ? FalseVal : TrueVal;
4573
4574 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4575 // (X & Y) != 0 ? X : X & ~Y --> X
4576 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4577 *Y == ~*C)
4578 return TrueWhenUnset ? FalseVal : TrueVal;
4579
4580 if (Y->isPowerOf2()) {
4581 // (X & Y) == 0 ? X | Y : X --> X | Y
4582 // (X & Y) != 0 ? X | Y : X --> X
4583 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4584 *Y == *C) {
4585 // We can't return the or if it has the disjoint flag.
4586 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4587 return nullptr;
4588 return TrueWhenUnset ? TrueVal : FalseVal;
4589 }
4590
4591 // (X & Y) == 0 ? X : X | Y --> X
4592 // (X & Y) != 0 ? X : X | Y --> X | Y
4593 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4594 *Y == *C) {
4595 // We can't return the or if it has the disjoint flag.
4596 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4597 return nullptr;
4598 return TrueWhenUnset ? TrueVal : FalseVal;
4599 }
4600 }
4601
4602 return nullptr;
4603}
4604
4605static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4606 CmpPredicate Pred, Value *TVal,
4607 Value *FVal) {
4608 // Canonicalize common cmp+sel operand as CmpLHS.
4609 if (CmpRHS == TVal || CmpRHS == FVal) {
4610 std::swap(CmpLHS, CmpRHS);
4611 Pred = ICmpInst::getSwappedPredicate(Pred);
4612 }
4613
4614 // Canonicalize common cmp+sel operand as TVal.
4615 if (CmpLHS == FVal) {
4616 std::swap(TVal, FVal);
4617 Pred = ICmpInst::getInversePredicate(Pred);
4618 }
4619
4620 // A vector select may be shuffling together elements that are equivalent
4621 // based on the max/min/select relationship.
4622 Value *X = CmpLHS, *Y = CmpRHS;
4623 bool PeekedThroughSelectShuffle = false;
4624 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4625 if (Shuf && Shuf->isSelect()) {
4626 if (Shuf->getOperand(0) == Y)
4627 FVal = Shuf->getOperand(1);
4628 else if (Shuf->getOperand(1) == Y)
4629 FVal = Shuf->getOperand(0);
4630 else
4631 return nullptr;
4632 PeekedThroughSelectShuffle = true;
4633 }
4634
4635 // (X pred Y) ? X : max/min(X, Y)
4636 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4637 if (!MMI || TVal != X ||
4639 return nullptr;
4640
4641 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4642 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4643 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4644 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4645 //
4646 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4647 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4648 // If Z is true, this reduces as above, and if Z is false:
4649 // (X > Y) ? X : Y --> max(X, Y)
4650 ICmpInst::Predicate MMPred = MMI->getPredicate();
4651 if (MMPred == CmpInst::getStrictPredicate(Pred))
4652 return MMI;
4653
4654 // Other transforms are not valid with a shuffle.
4655 if (PeekedThroughSelectShuffle)
4656 return nullptr;
4657
4658 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4659 if (Pred == CmpInst::ICMP_EQ)
4660 return MMI;
4661
4662 // (X != Y) ? X : max/min(X, Y) --> X
4663 if (Pred == CmpInst::ICMP_NE)
4664 return X;
4665
4666 // (X < Y) ? X : max(X, Y) --> X
4667 // (X <= Y) ? X : max(X, Y) --> X
4668 // (X > Y) ? X : min(X, Y) --> X
4669 // (X >= Y) ? X : min(X, Y) --> X
4671 if (MMPred == CmpInst::getStrictPredicate(InvPred))
4672 return X;
4673
4674 return nullptr;
4675}
4676
4677/// An alternative way to test if a bit is set or not.
4678/// uses e.g. sgt/slt or trunc instead of eq/ne.
4679static Value *simplifySelectWithBitTest(Value *CondVal, Value *TrueVal,
4680 Value *FalseVal) {
4681 if (auto Res = decomposeBitTest(CondVal))
4682 return simplifySelectBitTest(TrueVal, FalseVal, Res->X, &Res->Mask,
4683 Res->Pred == ICmpInst::ICMP_EQ);
4684
4685 return nullptr;
4686}
4687
4688/// Try to simplify a select instruction when its condition operand is an
4689/// integer equality or floating-point equivalence comparison.
4691 ArrayRef<std::pair<Value *, Value *>> Replacements, Value *TrueVal,
4692 Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse) {
4693 Value *SimplifiedFalseVal =
4694 simplifyWithOpsReplaced(FalseVal, Replacements, Q.getWithoutUndef(),
4695 /* AllowRefinement */ false,
4696 /* DropFlags */ nullptr, MaxRecurse);
4697 if (!SimplifiedFalseVal)
4698 SimplifiedFalseVal = FalseVal;
4699
4700 Value *SimplifiedTrueVal =
4701 simplifyWithOpsReplaced(TrueVal, Replacements, Q,
4702 /* AllowRefinement */ true,
4703 /* DropFlags */ nullptr, MaxRecurse);
4704 if (!SimplifiedTrueVal)
4705 SimplifiedTrueVal = TrueVal;
4706
4707 if (SimplifiedFalseVal == SimplifiedTrueVal)
4708 return FalseVal;
4709
4710 return nullptr;
4711}
4712
4713/// Try to simplify a select instruction when its condition operand is an
4714/// integer comparison.
4715static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4716 Value *FalseVal,
4717 const SimplifyQuery &Q,
4718 unsigned MaxRecurse) {
4719 CmpPredicate Pred;
4720 Value *CmpLHS, *CmpRHS;
4721 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4722 return nullptr;
4723
4724 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4725 return V;
4726
4727 // Canonicalize ne to eq predicate.
4728 if (Pred == ICmpInst::ICMP_NE) {
4729 Pred = ICmpInst::ICMP_EQ;
4730 std::swap(TrueVal, FalseVal);
4731 }
4732
4733 // Check for integer min/max with a limit constant:
4734 // X > MIN_INT ? X : MIN_INT --> X
4735 // X < MAX_INT ? X : MAX_INT --> X
4736 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4737 Value *X, *Y;
4739 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4740 X, Y)
4741 .Flavor;
4742 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4744 X->getType()->getScalarSizeInBits());
4745 if (match(Y, m_SpecificInt(LimitC)))
4746 return X;
4747 }
4748 }
4749
4750 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4751 Value *X;
4752 const APInt *Y;
4753 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4754 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4755 /*TrueWhenUnset=*/true))
4756 return V;
4757
4758 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4759 Value *ShAmt;
4760 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4761 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4762 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4763 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4764 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4765 return X;
4766
4767 // Test for a zero-shift-guard-op around rotates. These are used to
4768 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4769 // intrinsics do not have that problem.
4770 // We do not allow this transform for the general funnel shift case because
4771 // that would not preserve the poison safety of the original code.
4772 auto isRotate =
4774 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4775 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4776 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4777 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4778 Pred == ICmpInst::ICMP_EQ)
4779 return FalseVal;
4780
4781 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4782 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4783 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4785 return FalseVal;
4786 if (match(TrueVal,
4788 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4789 return FalseVal;
4790 }
4791
4792 // If we have a scalar equality comparison, then we know the value in one of
4793 // the arms of the select. See if substituting this value into the arm and
4794 // simplifying the result yields the same value as the other arm.
4795 if (Pred == ICmpInst::ICMP_EQ) {
4796 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4797 canReplacePointersIfEqual(CmpLHS, CmpRHS, Q.DL))
4798 if (Value *V = simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, TrueVal,
4799 FalseVal, Q, MaxRecurse))
4800 return V;
4801 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4802 canReplacePointersIfEqual(CmpRHS, CmpLHS, Q.DL))
4803 if (Value *V = simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, TrueVal,
4804 FalseVal, Q, MaxRecurse))
4805 return V;
4806
4807 Value *X;
4808 Value *Y;
4809 // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4810 if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4811 match(CmpRHS, m_Zero())) {
4812 // (X | Y) == 0 implies X == 0 and Y == 0.
4814 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4815 return V;
4816 }
4817
4818 // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4819 if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4820 match(CmpRHS, m_AllOnes())) {
4821 // (X & Y) == -1 implies X == -1 and Y == -1.
4823 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4824 return V;
4825 }
4826 }
4827
4828 return nullptr;
4829}
4830
4831/// Try to simplify a select instruction when its condition operand is a
4832/// floating-point comparison.
4834 const SimplifyQuery &Q,
4835 unsigned MaxRecurse) {
4836 CmpPredicate Pred;
4837 Value *CmpLHS, *CmpRHS;
4838 if (!match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4839 return nullptr;
4841
4842 bool IsEquiv = I->isEquivalence();
4843 if (I->isEquivalence(/*Invert=*/true)) {
4844 std::swap(T, F);
4845 Pred = FCmpInst::getInversePredicate(Pred);
4846 IsEquiv = true;
4847 }
4848
4849 // This transforms is safe if at least one operand is known to not be zero.
4850 // Otherwise, the select can change the sign of a zero operand.
4851 if (IsEquiv) {
4852 if (Value *V = simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, T, F, Q,
4853 MaxRecurse))
4854 return V;
4855 if (Value *V = simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, T, F, Q,
4856 MaxRecurse))
4857 return V;
4858 }
4859
4860 // Canonicalize CmpLHS to be T, and CmpRHS to be F, if they're swapped.
4861 if (CmpLHS == F && CmpRHS == T)
4862 std::swap(CmpLHS, CmpRHS);
4863
4864 if (CmpLHS != T || CmpRHS != F)
4865 return nullptr;
4866
4867 // This transform is also safe if we do not have (do not care about) -0.0.
4868 if (Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros()) {
4869 // (T == F) ? T : F --> F
4870 if (Pred == FCmpInst::FCMP_OEQ)
4871 return F;
4872
4873 // (T != F) ? T : F --> T
4874 if (Pred == FCmpInst::FCMP_UNE)
4875 return T;
4876 }
4877
4878 return nullptr;
4879}
4880
4881/// Look for the following pattern and simplify %to_fold to %identicalPhi.
4882/// Here %phi, %to_fold and %phi.next perform the same functionality as
4883/// %identicalPhi and hence the select instruction %to_fold can be folded
4884/// into %identicalPhi.
4885///
4886/// BB1:
4887/// %identicalPhi = phi [ X, %BB0 ], [ %identicalPhi.next, %BB1 ]
4888/// %phi = phi [ X, %BB0 ], [ %phi.next, %BB1 ]
4889/// ...
4890/// %identicalPhi.next = select %cmp, %val, %identicalPhi
4891/// (or select %cmp, %identicalPhi, %val)
4892/// %to_fold = select %cmp2, %identicalPhi, %phi
4893/// %phi.next = select %cmp, %val, %to_fold
4894/// (or select %cmp, %to_fold, %val)
4895///
4896/// Prove that %phi and %identicalPhi are the same by induction:
4897///
4898/// Base case: Both %phi and %identicalPhi are equal on entry to the loop.
4899/// Inductive case:
4900/// Suppose %phi and %identicalPhi are equal at iteration i.
4901/// We look at their values at iteration i+1 which are %phi.next and
4902/// %identicalPhi.next. They would have become different only when %cmp is
4903/// false and the corresponding values %to_fold and %identicalPhi differ
4904/// (similar reason for the other "or" case in the bracket).
4905///
4906/// The only condition when %to_fold and %identicalPh could differ is when %cmp2
4907/// is false and %to_fold is %phi, which contradicts our inductive hypothesis
4908/// that %phi and %identicalPhi are equal. Thus %phi and %identicalPhi are
4909/// always equal at iteration i+1.
4911 if (PN.getParent() != IdenticalPN.getParent())
4912 return false;
4913 if (PN.getNumIncomingValues() != 2)
4914 return false;
4915
4916 // Check that only the backedge incoming value is different.
4917 unsigned DiffVals = 0;
4918 BasicBlock *DiffValBB = nullptr;
4919 for (unsigned i = 0; i < 2; i++) {
4920 BasicBlock *PredBB = PN.getIncomingBlock(i);
4921 if (PN.getIncomingValue(i) !=
4922 IdenticalPN.getIncomingValueForBlock(PredBB)) {
4923 DiffVals++;
4924 DiffValBB = PredBB;
4925 }
4926 }
4927 if (DiffVals != 1)
4928 return false;
4929 // Now check that the backedge incoming values are two select
4930 // instructions with the same condition. Either their true
4931 // values are the same, or their false values are the same.
4932 auto *SI = dyn_cast<SelectInst>(PN.getIncomingValueForBlock(DiffValBB));
4933 auto *IdenticalSI =
4934 dyn_cast<SelectInst>(IdenticalPN.getIncomingValueForBlock(DiffValBB));
4935 if (!SI || !IdenticalSI)
4936 return false;
4937 if (SI->getCondition() != IdenticalSI->getCondition())
4938 return false;
4939
4940 SelectInst *SIOtherVal = nullptr;
4941 Value *IdenticalSIOtherVal = nullptr;
4942 if (SI->getTrueValue() == IdenticalSI->getTrueValue()) {
4943 SIOtherVal = dyn_cast<SelectInst>(SI->getFalseValue());
4944 IdenticalSIOtherVal = IdenticalSI->getFalseValue();
4945 } else if (SI->getFalseValue() == IdenticalSI->getFalseValue()) {
4946 SIOtherVal = dyn_cast<SelectInst>(SI->getTrueValue());
4947 IdenticalSIOtherVal = IdenticalSI->getTrueValue();
4948 } else {
4949 return false;
4950 }
4951
4952 // Now check that the other values in select, i.e., %to_fold and
4953 // %identicalPhi, are essentially the same value.
4954 if (!SIOtherVal || IdenticalSIOtherVal != &IdenticalPN)
4955 return false;
4956 if (!(SIOtherVal->getTrueValue() == &IdenticalPN &&
4957 SIOtherVal->getFalseValue() == &PN) &&
4958 !(SIOtherVal->getTrueValue() == &PN &&
4959 SIOtherVal->getFalseValue() == &IdenticalPN))
4960 return false;
4961 return true;
4962}
4963
4964/// Given operands for a SelectInst, see if we can fold the result.
4965/// If not, this returns null.
4966static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4967 const SimplifyQuery &Q, unsigned MaxRecurse) {
4968 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4969 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4970 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4971 if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4972 return C;
4973
4974 // select poison, X, Y -> poison
4975 if (isa<PoisonValue>(CondC))
4976 return PoisonValue::get(TrueVal->getType());
4977
4978 // select undef, X, Y -> X or Y
4979 if (Q.isUndefValue(CondC))
4980 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4981
4982 // select true, X, Y --> X
4983 // select false, X, Y --> Y
4984 // For vectors, allow undef/poison elements in the condition to match the
4985 // defined elements, so we can eliminate the select.
4986 if (match(CondC, m_One()))
4987 return TrueVal;
4988 if (match(CondC, m_Zero()))
4989 return FalseVal;
4990 }
4991
4992 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4993 "Select must have bool or bool vector condition");
4994 assert(TrueVal->getType() == FalseVal->getType() &&
4995 "Select must have same types for true/false ops");
4996
4997 if (Cond->getType() == TrueVal->getType()) {
4998 // select i1 Cond, i1 true, i1 false --> i1 Cond
4999 if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
5000 return Cond;
5001
5002 // (X && Y) ? X : Y --> Y (commuted 2 ways)
5003 if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
5004 return FalseVal;
5005
5006 // (X || Y) ? X : Y --> X (commuted 2 ways)
5007 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
5008 return TrueVal;
5009
5010 // (X || Y) ? false : X --> false (commuted 2 ways)
5011 if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
5012 match(TrueVal, m_ZeroInt()))
5013 return ConstantInt::getFalse(Cond->getType());
5014
5015 // Match patterns that end in logical-and.
5016 if (match(FalseVal, m_ZeroInt())) {
5017 // !(X || Y) && X --> false (commuted 2 ways)
5018 if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
5019 return ConstantInt::getFalse(Cond->getType());
5020 // X && !(X || Y) --> false (commuted 2 ways)
5021 if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
5022 return ConstantInt::getFalse(Cond->getType());
5023
5024 // (X || Y) && Y --> Y (commuted 2 ways)
5025 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
5026 return TrueVal;
5027 // Y && (X || Y) --> Y (commuted 2 ways)
5028 if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
5029 return Cond;
5030
5031 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
5032 Value *X, *Y;
5035 return X;
5036 if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
5038 return X;
5039 }
5040
5041 // Match patterns that end in logical-or.
5042 if (match(TrueVal, m_One())) {
5043 // !(X && Y) || X --> true (commuted 2 ways)
5044 if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
5045 return ConstantInt::getTrue(Cond->getType());
5046 // X || !(X && Y) --> true (commuted 2 ways)
5047 if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
5048 return ConstantInt::getTrue(Cond->getType());
5049
5050 // (X && Y) || Y --> Y (commuted 2 ways)
5051 if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
5052 return FalseVal;
5053 // Y || (X && Y) --> Y (commuted 2 ways)
5054 if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
5055 return Cond;
5056 }
5057 }
5058
5059 // select ?, X, X -> X
5060 if (TrueVal == FalseVal)
5061 return TrueVal;
5062
5063 if (Cond == TrueVal) {
5064 // select i1 X, i1 X, i1 false --> X (logical-and)
5065 if (match(FalseVal, m_ZeroInt()))
5066 return Cond;
5067 // select i1 X, i1 X, i1 true --> true
5068 if (match(FalseVal, m_One()))
5069 return ConstantInt::getTrue(Cond->getType());
5070 }
5071 if (Cond == FalseVal) {
5072 // select i1 X, i1 true, i1 X --> X (logical-or)
5073 if (match(TrueVal, m_One()))
5074 return Cond;
5075 // select i1 X, i1 false, i1 X --> false
5076 if (match(TrueVal, m_ZeroInt()))
5077 return ConstantInt::getFalse(Cond->getType());
5078 }
5079
5080 // If the true or false value is poison, we can fold to the other value.
5081 // If the true or false value is undef, we can fold to the other value as
5082 // long as the other value isn't poison.
5083 // select ?, poison, X -> X
5084 // select ?, undef, X -> X
5085 if (isa<PoisonValue>(TrueVal) ||
5086 (Q.isUndefValue(TrueVal) && impliesPoison(FalseVal, Cond)))
5087 return FalseVal;
5088 // select ?, X, poison -> X
5089 // select ?, X, undef -> X
5090 if (isa<PoisonValue>(FalseVal) ||
5091 (Q.isUndefValue(FalseVal) && impliesPoison(TrueVal, Cond)))
5092 return TrueVal;
5093
5094 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
5095 Constant *TrueC, *FalseC;
5096 if (isa<FixedVectorType>(TrueVal->getType()) &&
5097 match(TrueVal, m_Constant(TrueC)) &&
5098 match(FalseVal, m_Constant(FalseC))) {
5099 unsigned NumElts =
5100 cast<FixedVectorType>(TrueC->getType())->getNumElements();
5102 for (unsigned i = 0; i != NumElts; ++i) {
5103 // Bail out on incomplete vector constants.
5104 Constant *TEltC = TrueC->getAggregateElement(i);
5105 Constant *FEltC = FalseC->getAggregateElement(i);
5106 if (!TEltC || !FEltC)
5107 break;
5108
5109 // If the elements match (undef or not), that value is the result. If only
5110 // one element is undef, choose the defined element as the safe result.
5111 if (TEltC == FEltC)
5112 NewC.push_back(TEltC);
5113 else if (isa<PoisonValue>(TEltC) ||
5114 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
5115 NewC.push_back(FEltC);
5116 else if (isa<PoisonValue>(FEltC) ||
5117 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
5118 NewC.push_back(TEltC);
5119 else
5120 break;
5121 }
5122 if (NewC.size() == NumElts)
5123 return ConstantVector::get(NewC);
5124 }
5125
5126 if (Value *V =
5127 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
5128 return V;
5129
5130 if (Value *V = simplifySelectWithBitTest(Cond, TrueVal, FalseVal))
5131 return V;
5132
5133 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q, MaxRecurse))
5134 return V;
5135
5136 std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
5137 if (Imp)
5138 return *Imp ? TrueVal : FalseVal;
5139 // Look for same PHIs in the true and false values.
5140 if (auto *TruePHI = dyn_cast<PHINode>(TrueVal))
5141 if (auto *FalsePHI = dyn_cast<PHINode>(FalseVal)) {
5142 if (isSelectWithIdenticalPHI(*TruePHI, *FalsePHI))
5143 return FalseVal;
5144 if (isSelectWithIdenticalPHI(*FalsePHI, *TruePHI))
5145 return TrueVal;
5146 }
5147 return nullptr;
5148}
5149
5151 const SimplifyQuery &Q) {
5152 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
5153}
5154
5155/// Given operands for an GetElementPtrInst, see if we can fold the result.
5156/// If not, this returns null.
5157static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
5159 const SimplifyQuery &Q, unsigned) {
5160 // The type of the GEP pointer operand.
5161 unsigned AS =
5162 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
5163
5164 // getelementptr P -> P.
5165 if (Indices.empty())
5166 return Ptr;
5167
5168 // Compute the (pointer) type returned by the GEP instruction.
5169 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
5170 Type *GEPTy = Ptr->getType();
5171 if (!GEPTy->isVectorTy()) {
5172 for (Value *Op : Indices) {
5173 // If one of the operands is a vector, the result type is a vector of
5174 // pointers. All vector operands must have the same number of elements.
5175 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
5176 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5177 break;
5178 }
5179 }
5180 }
5181
5182 // All-zero GEP is a no-op, unless it performs a vector splat.
5183 if (Ptr->getType() == GEPTy && all_of(Indices, match_fn(m_Zero())))
5184 return Ptr;
5185
5186 // getelementptr poison, idx -> poison
5187 // getelementptr baseptr, poison -> poison
5188 if (isa<PoisonValue>(Ptr) || any_of(Indices, IsaPred<PoisonValue>))
5189 return PoisonValue::get(GEPTy);
5190
5191 // getelementptr undef, idx -> undef
5192 if (Q.isUndefValue(Ptr))
5193 return UndefValue::get(GEPTy);
5194
5195 bool IsScalableVec =
5196 SrcTy->isScalableTy() || any_of(Indices, [](const Value *V) {
5197 return isa<ScalableVectorType>(V->getType());
5198 });
5199
5200 if (Indices.size() == 1) {
5201 Type *Ty = SrcTy;
5202 if (!IsScalableVec && Ty->isSized()) {
5203 Value *P;
5204 uint64_t C;
5205 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5206 // getelementptr P, N -> P if P points to a type of zero size.
5207 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5208 return Ptr;
5209
5210 // The following transforms are only safe if the ptrtoint cast
5211 // doesn't truncate the address of the pointers. The non-address bits
5212 // must be the same, as the underlying objects are the same.
5213 if (Indices[0]->getType()->getScalarSizeInBits() >=
5214 Q.DL.getAddressSizeInBits(AS)) {
5215 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5216 return P->getType() == GEPTy &&
5218 };
5219 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5220 if (TyAllocSize == 1 &&
5221 match(Indices[0], m_Sub(m_PtrToIntOrAddr(m_Value(P)),
5222 m_PtrToIntOrAddr(m_Specific(Ptr)))) &&
5223 CanSimplify())
5224 return P;
5225
5226 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5227 // size 1 << C.
5228 if (match(Indices[0], m_AShr(m_Sub(m_PtrToIntOrAddr(m_Value(P)),
5230 m_ConstantInt(C))) &&
5231 TyAllocSize == 1ULL << C && CanSimplify())
5232 return P;
5233
5234 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5235 // size C.
5236 if (match(Indices[0], m_SDiv(m_Sub(m_PtrToIntOrAddr(m_Value(P)),
5238 m_SpecificInt(TyAllocSize))) &&
5239 CanSimplify())
5240 return P;
5241 }
5242 }
5243 }
5244
5245 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5246 all_of(Indices.drop_back(1), match_fn(m_Zero()))) {
5247 unsigned IdxWidth =
5249 if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5250 APInt BasePtrOffset(IdxWidth, 0);
5251 Value *StrippedBasePtr =
5252 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5253
5254 // Avoid creating inttoptr of zero here: While LLVMs treatment of
5255 // inttoptr is generally conservative, this particular case is folded to
5256 // a null pointer, which will have incorrect provenance.
5257
5258 // gep (gep V, C), (sub 0, V) -> C
5259 if (match(Indices.back(),
5260 m_Neg(m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5261 !BasePtrOffset.isZero()) {
5262 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5263 return ConstantExpr::getIntToPtr(CI, GEPTy);
5264 }
5265 // gep (gep V, C), (xor V, -1) -> C-1
5266 if (match(Indices.back(),
5267 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
5268 !BasePtrOffset.isOne()) {
5269 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5270 return ConstantExpr::getIntToPtr(CI, GEPTy);
5271 }
5272 }
5273 }
5274
5275 // Check to see if this is constant foldable.
5276 if (!isa<Constant>(Ptr) || !all_of(Indices, IsaPred<Constant>))
5277 return nullptr;
5278
5280 return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), std::nullopt,
5281 Indices);
5282
5283 auto *CE =
5284 ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices, NW);
5285 return ConstantFoldConstant(CE, Q.DL);
5286}
5287
5289 GEPNoWrapFlags NW, const SimplifyQuery &Q) {
5290 return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit);
5291}
5292
5293/// Given operands for an InsertValueInst, see if we can fold the result.
5294/// If not, this returns null.
5296 ArrayRef<unsigned> Idxs,
5297 const SimplifyQuery &Q, unsigned) {
5298 if (Constant *CAgg = dyn_cast<Constant>(Agg))
5299 if (Constant *CVal = dyn_cast<Constant>(Val))
5300 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5301
5302 // insertvalue x, poison, n -> x
5303 // insertvalue x, undef, n -> x if x cannot be poison
5304 if (isa<PoisonValue>(Val) ||
5305 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5306 return Agg;
5307
5308 // insertvalue x, (extractvalue y, n), n
5310 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5311 EV->getIndices() == Idxs) {
5312 // insertvalue poison, (extractvalue y, n), n -> y
5313 // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5314 if (isa<PoisonValue>(Agg) ||
5315 (Q.isUndefValue(Agg) &&
5316 isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5317 return EV->getAggregateOperand();
5318
5319 // insertvalue y, (extractvalue y, n), n -> y
5320 if (Agg == EV->getAggregateOperand())
5321 return Agg;
5322 }
5323
5324 return nullptr;
5325}
5326
5328 ArrayRef<unsigned> Idxs,
5329 const SimplifyQuery &Q) {
5330 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5331}
5332
5334 const SimplifyQuery &Q) {
5335 // Try to constant fold.
5336 auto *VecC = dyn_cast<Constant>(Vec);
5337 auto *ValC = dyn_cast<Constant>(Val);
5338 auto *IdxC = dyn_cast<Constant>(Idx);
5339 if (VecC && ValC && IdxC)
5340 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5341
5342 // For fixed-length vector, fold into poison if index is out of bounds.
5343 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5344 if (isa<FixedVectorType>(Vec->getType()) &&
5345 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5346 return PoisonValue::get(Vec->getType());
5347 }
5348
5349 // If index is undef, it might be out of bounds (see above case)
5350 if (Q.isUndefValue(Idx))
5351 return PoisonValue::get(Vec->getType());
5352
5353 // If the scalar is poison, or it is undef and there is no risk of
5354 // propagating poison from the vector value, simplify to the vector value.
5355 if (isa<PoisonValue>(Val) ||
5356 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5357 return Vec;
5358
5359 // Inserting the splatted value into a constant splat does nothing.
5360 if (VecC && ValC && VecC->getSplatValue() == ValC)
5361 return Vec;
5362
5363 // If we are extracting a value from a vector, then inserting it into the same
5364 // place, that's the input vector:
5365 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5366 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5367 return Vec;
5368
5369 return nullptr;
5370}
5371
5372/// Given operands for an ExtractValueInst, see if we can fold the result.
5373/// If not, this returns null.
5375 const SimplifyQuery &, unsigned) {
5376 if (auto *CAgg = dyn_cast<Constant>(Agg))
5377 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5378
5379 // extractvalue x, (insertvalue y, elt, n), n -> elt
5380 unsigned NumIdxs = Idxs.size();
5381 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5382 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5383 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5384 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5385 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5386 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5387 Idxs.slice(0, NumCommonIdxs)) {
5388 if (NumIdxs == NumInsertValueIdxs)
5389 return IVI->getInsertedValueOperand();
5390 break;
5391 }
5392 }
5393
5394 // Simplify umul_with_overflow where one operand is 1.
5395 Value *V;
5396 if (Idxs.size() == 1 &&
5397 (match(Agg,
5400 m_Value(V))))) {
5401 if (Idxs[0] == 0)
5402 return V;
5403 assert(Idxs[0] == 1 && "invalid index");
5404 return getFalse(CmpInst::makeCmpResultType(V->getType()));
5405 }
5406
5407 return nullptr;
5408}
5409
5411 const SimplifyQuery &Q) {
5412 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5413}
5414
5415/// Given operands for an ExtractElementInst, see if we can fold the result.
5416/// If not, this returns null.
5418 const SimplifyQuery &Q, unsigned) {
5419 auto *VecVTy = cast<VectorType>(Vec->getType());
5420 if (auto *CVec = dyn_cast<Constant>(Vec)) {
5421 if (auto *CIdx = dyn_cast<Constant>(Idx))
5422 return ConstantExpr::getExtractElement(CVec, CIdx);
5423
5424 if (Q.isUndefValue(Vec))
5425 return UndefValue::get(VecVTy->getElementType());
5426 }
5427
5428 // An undef extract index can be arbitrarily chosen to be an out-of-range
5429 // index value, which would result in the instruction being poison.
5430 if (Q.isUndefValue(Idx))
5431 return PoisonValue::get(VecVTy->getElementType());
5432
5433 // If extracting a specified index from the vector, see if we can recursively
5434 // find a previously computed scalar that was inserted into the vector.
5435 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5436 // For fixed-length vector, fold into undef if index is out of bounds.
5437 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5438 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5439 return PoisonValue::get(VecVTy->getElementType());
5440 // Handle case where an element is extracted from a splat.
5441 if (IdxC->getValue().ult(MinNumElts))
5442 if (auto *Splat = getSplatValue(Vec))
5443 return Splat;
5444 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5445 return Elt;
5446 } else {
5447 // extractelt x, (insertelt y, elt, n), n -> elt
5448 // If the possibly-variable indices are trivially known to be equal
5449 // (because they are the same operand) then use the value that was
5450 // inserted directly.
5451 auto *IE = dyn_cast<InsertElementInst>(Vec);
5452 if (IE && IE->getOperand(2) == Idx)
5453 return IE->getOperand(1);
5454
5455 // The index is not relevant if our vector is a splat.
5456 if (Value *Splat = getSplatValue(Vec))
5457 return Splat;
5458 }
5459 return nullptr;
5460}
5461
5463 const SimplifyQuery &Q) {
5464 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5465}
5466
5467/// See if we can fold the given phi. If not, returns null.
5469 const SimplifyQuery &Q) {
5470 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5471 // here, because the PHI we may succeed simplifying to was not
5472 // def-reachable from the original PHI!
5473
5474 // If all of the PHI's incoming values are the same then replace the PHI node
5475 // with the common value.
5476 Value *CommonValue = nullptr;
5477 bool HasPoisonInput = false;
5478 bool HasUndefInput = false;
5479 for (Value *Incoming : IncomingValues) {
5480 // If the incoming value is the phi node itself, it can safely be skipped.
5481 if (Incoming == PN)
5482 continue;
5484 HasPoisonInput = true;
5485 continue;
5486 }
5487 if (Q.isUndefValue(Incoming)) {
5488 // Remember that we saw an undef value, but otherwise ignore them.
5489 HasUndefInput = true;
5490 continue;
5491 }
5492 if (CommonValue && Incoming != CommonValue)
5493 return nullptr; // Not the same, bail out.
5494 CommonValue = Incoming;
5495 }
5496
5497 // If CommonValue is null then all of the incoming values were either undef,
5498 // poison or equal to the phi node itself.
5499 if (!CommonValue)
5500 return HasUndefInput ? UndefValue::get(PN->getType())
5501 : PoisonValue::get(PN->getType());
5502
5503 if (HasPoisonInput || HasUndefInput) {
5504 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5505 // instruction, we cannot return X as the result of the PHI node unless it
5506 // dominates the PHI block.
5507 if (!valueDominatesPHI(CommonValue, PN, Q.DT))
5508 return nullptr;
5509
5510 // Make sure we do not replace an undef value with poison.
5511 if (HasUndefInput &&
5512 !isGuaranteedNotToBePoison(CommonValue, Q.AC, Q.CxtI, Q.DT))
5513 return nullptr;
5514 return CommonValue;
5515 }
5516
5517 return CommonValue;
5518}
5519
5520static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5521 const SimplifyQuery &Q, unsigned MaxRecurse) {
5522 if (auto *C = dyn_cast<Constant>(Op))
5523 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5524
5525 if (auto *CI = dyn_cast<CastInst>(Op)) {
5526 auto *Src = CI->getOperand(0);
5527 Type *SrcTy = Src->getType();
5528 Type *MidTy = CI->getType();
5529 Type *DstTy = Ty;
5530 if (Src->getType() == Ty) {
5531 auto FirstOp = CI->getOpcode();
5532 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5533 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5534 &Q.DL) == Instruction::BitCast)
5535 return Src;
5536 }
5537 }
5538
5539 // bitcast x -> x
5540 if (CastOpc == Instruction::BitCast)
5541 if (Op->getType() == Ty)
5542 return Op;
5543
5544 // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
5545 Value *Ptr, *X;
5546 if ((CastOpc == Instruction::PtrToInt || CastOpc == Instruction::PtrToAddr) &&
5547 match(Op,
5548 m_PtrAdd(m_Value(Ptr),
5550 X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
5551 return X;
5552
5553 return nullptr;
5554}
5555
5556Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5557 const SimplifyQuery &Q) {
5558 return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5559}
5560
5561/// For the given destination element of a shuffle, peek through shuffles to
5562/// match a root vector source operand that contains that element in the same
5563/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5564static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5565 int MaskVal, Value *RootVec,
5566 unsigned MaxRecurse) {
5567 if (!MaxRecurse--)
5568 return nullptr;
5569
5570 // Bail out if any mask value is undefined. That kind of shuffle may be
5571 // simplified further based on demanded bits or other folds.
5572 if (MaskVal == -1)
5573 return nullptr;
5574
5575 // The mask value chooses which source operand we need to look at next.
5576 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5577 int RootElt = MaskVal;
5578 Value *SourceOp = Op0;
5579 if (MaskVal >= InVecNumElts) {
5580 RootElt = MaskVal - InVecNumElts;
5581 SourceOp = Op1;
5582 }
5583
5584 // If the source operand is a shuffle itself, look through it to find the
5585 // matching root vector.
5586 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5587 return foldIdentityShuffles(
5588 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5589 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5590 }
5591
5592 // The source operand is not a shuffle. Initialize the root vector value for
5593 // this shuffle if that has not been done yet.
5594 if (!RootVec)
5595 RootVec = SourceOp;
5596
5597 // Give up as soon as a source operand does not match the existing root value.
5598 if (RootVec != SourceOp)
5599 return nullptr;
5600
5601 // The element must be coming from the same lane in the source vector
5602 // (although it may have crossed lanes in intermediate shuffles).
5603 if (RootElt != DestElt)
5604 return nullptr;
5605
5606 return RootVec;
5607}
5608
5610 ArrayRef<int> Mask, Type *RetTy,
5611 const SimplifyQuery &Q,
5612 unsigned MaxRecurse) {
5613 if (all_of(Mask, [](int Elem) { return Elem == PoisonMaskElem; }))
5614 return PoisonValue::get(RetTy);
5615
5616 auto *InVecTy = cast<VectorType>(Op0->getType());
5617 unsigned MaskNumElts = Mask.size();
5618 ElementCount InVecEltCount = InVecTy->getElementCount();
5619
5620 bool Scalable = InVecEltCount.isScalable();
5621
5622 SmallVector<int, 32> Indices;
5623 Indices.assign(Mask.begin(), Mask.end());
5624
5625 // Canonicalization: If mask does not select elements from an input vector,
5626 // replace that input vector with poison.
5627 if (!Scalable) {
5628 bool MaskSelects0 = false, MaskSelects1 = false;
5629 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5630 for (unsigned i = 0; i != MaskNumElts; ++i) {
5631 if (Indices[i] == -1)
5632 continue;
5633 if ((unsigned)Indices[i] < InVecNumElts)
5634 MaskSelects0 = true;
5635 else
5636 MaskSelects1 = true;
5637 }
5638 if (!MaskSelects0)
5639 Op0 = PoisonValue::get(InVecTy);
5640 if (!MaskSelects1)
5641 Op1 = PoisonValue::get(InVecTy);
5642 }
5643
5644 auto *Op0Const = dyn_cast<Constant>(Op0);
5645 auto *Op1Const = dyn_cast<Constant>(Op1);
5646
5647 // If all operands are constant, constant fold the shuffle. This
5648 // transformation depends on the value of the mask which is not known at
5649 // compile time for scalable vectors
5650 if (Op0Const && Op1Const)
5651 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5652
5653 // Canonicalization: if only one input vector is constant, it shall be the
5654 // second one. This transformation depends on the value of the mask which
5655 // is not known at compile time for scalable vectors
5656 if (!Scalable && Op0Const && !Op1Const) {
5657 std::swap(Op0, Op1);
5659 InVecEltCount.getKnownMinValue());
5660 }
5661
5662 // A splat of an inserted scalar constant becomes a vector constant:
5663 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5664 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5665 // original mask constant.
5666 // NOTE: This transformation depends on the value of the mask which is not
5667 // known at compile time for scalable vectors
5668 Constant *C;
5669 ConstantInt *IndexC;
5670 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5671 m_ConstantInt(IndexC)))) {
5672 // Match a splat shuffle mask of the insert index allowing undef elements.
5673 int InsertIndex = IndexC->getZExtValue();
5674 if (all_of(Indices, [InsertIndex](int MaskElt) {
5675 return MaskElt == InsertIndex || MaskElt == -1;
5676 })) {
5677 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5678
5679 // Shuffle mask poisons become poison constant result elements.
5680 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5681 for (unsigned i = 0; i != MaskNumElts; ++i)
5682 if (Indices[i] == -1)
5683 VecC[i] = PoisonValue::get(C->getType());
5684 return ConstantVector::get(VecC);
5685 }
5686 }
5687
5688 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5689 // value type is same as the input vectors' type.
5690 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5691 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5692 all_equal(OpShuf->getShuffleMask()))
5693 return Op0;
5694
5695 // All remaining transformation depend on the value of the mask, which is
5696 // not known at compile time for scalable vectors.
5697 if (Scalable)
5698 return nullptr;
5699
5700 // Don't fold a shuffle with undef mask elements. This may get folded in a
5701 // better way using demanded bits or other analysis.
5702 // TODO: Should we allow this?
5703 if (is_contained(Indices, -1))
5704 return nullptr;
5705
5706 // Check if every element of this shuffle can be mapped back to the
5707 // corresponding element of a single root vector. If so, we don't need this
5708 // shuffle. This handles simple identity shuffles as well as chains of
5709 // shuffles that may widen/narrow and/or move elements across lanes and back.
5710 Value *RootVec = nullptr;
5711 for (unsigned i = 0; i != MaskNumElts; ++i) {
5712 // Note that recursion is limited for each vector element, so if any element
5713 // exceeds the limit, this will fail to simplify.
5714 RootVec =
5715 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5716
5717 // We can't replace a widening/narrowing shuffle with one of its operands.
5718 if (!RootVec || RootVec->getType() != RetTy)
5719 return nullptr;
5720 }
5721 return RootVec;
5722}
5723
5724/// Given operands for a ShuffleVectorInst, fold the result or return null.
5726 ArrayRef<int> Mask, Type *RetTy,
5727 const SimplifyQuery &Q) {
5728 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5729}
5730
5732 const SimplifyQuery &Q) {
5733 if (auto *C = dyn_cast<Constant>(Op))
5734 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5735 return nullptr;
5736}
5737
5738/// Given the operand for an FNeg, see if we can fold the result. If not, this
5739/// returns null.
5741 const SimplifyQuery &Q, unsigned MaxRecurse) {
5742 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5743 return C;
5744
5745 Value *X;
5746 // fneg (fneg X) ==> X
5747 if (match(Op, m_FNeg(m_Value(X))))
5748 return X;
5749
5750 return nullptr;
5751}
5752
5754 const SimplifyQuery &Q) {
5755 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5756}
5757
5758/// Try to propagate existing NaN values when possible. If not, replace the
5759/// constant or elements in the constant with a canonical NaN.
5761 Type *Ty = In->getType();
5762 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5763 unsigned NumElts = VecTy->getNumElements();
5764 SmallVector<Constant *, 32> NewC(NumElts);
5765 for (unsigned i = 0; i != NumElts; ++i) {
5766 Constant *EltC = In->getAggregateElement(i);
5767 // Poison elements propagate. NaN propagates except signaling is quieted.
5768 // Replace unknown or undef elements with canonical NaN.
5769 if (EltC && isa<PoisonValue>(EltC))
5770 NewC[i] = EltC;
5771 else if (EltC && EltC->isNaN())
5772 NewC[i] = ConstantFP::get(
5773 EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5774 else
5775 NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5776 }
5777 return ConstantVector::get(NewC);
5778 }
5779
5780 // If it is not a fixed vector, but not a simple NaN either, return a
5781 // canonical NaN.
5782 if (!In->isNaN())
5783 return ConstantFP::getNaN(Ty);
5784
5785 // If we known this is a NaN, and it's scalable vector, we must have a splat
5786 // on our hands. Grab that before splatting a QNaN constant.
5787 if (isa<ScalableVectorType>(Ty)) {
5788 auto *Splat = In->getSplatValue();
5789 assert(Splat && Splat->isNaN() &&
5790 "Found a scalable-vector NaN but not a splat");
5791 In = Splat;
5792 }
5793
5794 // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5795 // preserve the sign/payload.
5796 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5797}
5798
5799/// Perform folds that are common to any floating-point operation. This implies
5800/// transforms based on poison/undef/NaN because the operation itself makes no
5801/// difference to the result.
5803 const SimplifyQuery &Q,
5804 fp::ExceptionBehavior ExBehavior,
5805 RoundingMode Rounding) {
5806 // Poison is independent of anything else. It always propagates from an
5807 // operand to a math result.
5809 return PoisonValue::get(Ops[0]->getType());
5810
5811 for (Value *V : Ops) {
5812 bool IsNan = match(V, m_NaN());
5813 bool IsInf = match(V, m_Inf());
5814 bool IsUndef = Q.isUndefValue(V);
5815
5816 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5817 // (an undef operand can be chosen to be Nan/Inf), then the result of
5818 // this operation is poison.
5819 if (FMF.noNaNs() && (IsNan || IsUndef))
5820 return PoisonValue::get(V->getType());
5821 if (FMF.noInfs() && (IsInf || IsUndef))
5822 return PoisonValue::get(V->getType());
5823
5824 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5825 // Undef does not propagate because undef means that all bits can take on
5826 // any value. If this is undef * NaN for example, then the result values
5827 // (at least the exponent bits) are limited. Assume the undef is a
5828 // canonical NaN and propagate that.
5829 if (IsUndef)
5830 return ConstantFP::getNaN(V->getType());
5831 if (IsNan)
5832 return propagateNaN(cast<Constant>(V));
5833 } else if (ExBehavior != fp::ebStrict) {
5834 if (IsNan)
5835 return propagateNaN(cast<Constant>(V));
5836 }
5837 }
5838 return nullptr;
5839}
5840
5841/// Given operands for an FAdd, see if we can fold the result. If not, this
5842/// returns null.
5843static Value *
5845 const SimplifyQuery &Q, unsigned MaxRecurse,
5848 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5849 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5850 return C;
5851
5852 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5853 return C;
5854
5855 // fadd X, -0 ==> X
5856 // With strict/constrained FP, we have these possible edge cases that do
5857 // not simplify to Op0:
5858 // fadd SNaN, -0.0 --> QNaN
5859 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5860 if (canIgnoreSNaN(ExBehavior, FMF) &&
5862 FMF.noSignedZeros()))
5863 if (match(Op1, m_NegZeroFP()))
5864 return Op0;
5865
5866 // fadd X, 0 ==> X, when we know X is not -0
5867 if (canIgnoreSNaN(ExBehavior, FMF))
5868 if (match(Op1, m_PosZeroFP()) &&
5869 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
5870 return Op0;
5871
5872 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5873 return nullptr;
5874
5875 if (FMF.noNaNs()) {
5876 // With nnan: X + {+/-}Inf --> {+/-}Inf
5877 if (match(Op1, m_Inf()))
5878 return Op1;
5879
5880 // With nnan: -X + X --> 0.0 (and commuted variant)
5881 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5882 // Negative zeros are allowed because we always end up with positive zero:
5883 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5884 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5885 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5886 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5887 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5888 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5889 return ConstantFP::getZero(Op0->getType());
5890
5891 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5892 match(Op1, m_FNeg(m_Specific(Op0))))
5893 return ConstantFP::getZero(Op0->getType());
5894 }
5895
5896 // (X - Y) + Y --> X
5897 // Y + (X - Y) --> X
5898 Value *X;
5899 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5900 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5901 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5902 return X;
5903
5904 return nullptr;
5905}
5906
5907/// Given operands for an FSub, see if we can fold the result. If not, this
5908/// returns null.
5909static Value *
5911 const SimplifyQuery &Q, unsigned MaxRecurse,
5914 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5915 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5916 return C;
5917
5918 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5919 return C;
5920
5921 // fsub X, +0 ==> X
5922 if (canIgnoreSNaN(ExBehavior, FMF) &&
5924 FMF.noSignedZeros()))
5925 if (match(Op1, m_PosZeroFP()))
5926 return Op0;
5927
5928 // fsub X, -0 ==> X, when we know X is not -0
5929 if (canIgnoreSNaN(ExBehavior, FMF))
5930 if (match(Op1, m_NegZeroFP()) &&
5931 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
5932 return Op0;
5933
5934 // fsub -0.0, (fsub -0.0, X) ==> X
5935 // fsub -0.0, (fneg X) ==> X
5936 Value *X;
5937 if (canIgnoreSNaN(ExBehavior, FMF))
5938 if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5939 return X;
5940
5941 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5942 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5943 if (canIgnoreSNaN(ExBehavior, FMF))
5944 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5945 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5946 match(Op1, m_FNeg(m_Value(X)))))
5947 return X;
5948
5949 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5950 return nullptr;
5951
5952 if (FMF.noNaNs()) {
5953 // fsub nnan x, x ==> 0.0
5954 if (Op0 == Op1)
5955 return Constant::getNullValue(Op0->getType());
5956
5957 // With nnan: {+/-}Inf - X --> {+/-}Inf
5958 if (match(Op0, m_Inf()))
5959 return Op0;
5960
5961 // With nnan: X - {+/-}Inf --> {-/+}Inf
5962 if (match(Op1, m_Inf()))
5963 return foldConstant(Instruction::FNeg, Op1, Q);
5964 }
5965
5966 // Y - (Y - X) --> X
5967 // (X + Y) - Y --> X
5968 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5969 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5970 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5971 return X;
5972
5973 return nullptr;
5974}
5975
5977 const SimplifyQuery &Q, unsigned MaxRecurse,
5978 fp::ExceptionBehavior ExBehavior,
5979 RoundingMode Rounding) {
5980 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5981 return C;
5982
5983 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5984 return nullptr;
5985
5986 // Canonicalize special constants as operand 1.
5987 if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5988 std::swap(Op0, Op1);
5989
5990 // X * 1.0 --> X
5991 if (match(Op1, m_FPOne()))
5992 return Op0;
5993
5994 if (match(Op1, m_AnyZeroFP())) {
5995 // X * 0.0 --> 0.0 (with nnan and nsz)
5996 if (FMF.noNaNs() && FMF.noSignedZeros())
5997 return ConstantFP::getZero(Op0->getType());
5998
5999 KnownFPClass Known = computeKnownFPClass(Op0, FMF, fcInf | fcNan, Q);
6000 if (Known.isKnownNever(fcInf | fcNan)) {
6001 // if nsz is set, return 0.0
6002 if (FMF.noSignedZeros())
6003 return ConstantFP::getZero(Op0->getType());
6004 // +normal number * (-)0.0 --> (-)0.0
6005 if (Known.SignBit == false)
6006 return Op1;
6007 // -normal number * (-)0.0 --> -(-)0.0
6008 if (Known.SignBit == true)
6009 return foldConstant(Instruction::FNeg, Op1, Q);
6010 }
6011 }
6012
6013 // sqrt(X) * sqrt(X) --> X, if we can:
6014 // 1. Remove the intermediate rounding (reassociate).
6015 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
6016 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
6017 Value *X;
6018 if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
6019 FMF.noNaNs() && FMF.noSignedZeros())
6020 return X;
6021
6022 return nullptr;
6023}
6024
6025/// Given the operands for an FMul, see if we can fold the result
6026static Value *
6028 const SimplifyQuery &Q, unsigned MaxRecurse,
6031 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6032 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
6033 return C;
6034
6035 // Now apply simplifications that do not require rounding.
6036 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
6037}
6038
6040 const SimplifyQuery &Q,
6041 fp::ExceptionBehavior ExBehavior,
6042 RoundingMode Rounding) {
6043 return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6044 Rounding);
6045}
6046
6048 const SimplifyQuery &Q,
6049 fp::ExceptionBehavior ExBehavior,
6050 RoundingMode Rounding) {
6051 return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6052 Rounding);
6053}
6054
6056 const SimplifyQuery &Q,
6057 fp::ExceptionBehavior ExBehavior,
6058 RoundingMode Rounding) {
6059 return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6060 Rounding);
6061}
6062
6064 const SimplifyQuery &Q,
6065 fp::ExceptionBehavior ExBehavior,
6066 RoundingMode Rounding) {
6067 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6068 Rounding);
6069}
6070
6071static Value *
6073 const SimplifyQuery &Q, unsigned,
6076 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6077 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
6078 return C;
6079
6080 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6081 return C;
6082
6083 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6084 return nullptr;
6085
6086 // X / 1.0 -> X
6087 if (match(Op1, m_FPOne()))
6088 return Op0;
6089
6090 // 0 / X -> 0
6091 // Requires that NaNs are off (X could be zero) and signed zeroes are
6092 // ignored (X could be positive or negative, so the output sign is unknown).
6093 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
6094 return ConstantFP::getZero(Op0->getType());
6095
6096 if (FMF.noNaNs()) {
6097 // X / X -> 1.0 is legal when NaNs are ignored.
6098 // We can ignore infinities because INF/INF is NaN.
6099 if (Op0 == Op1)
6100 return ConstantFP::get(Op0->getType(), 1.0);
6101
6102 // (X * Y) / Y --> X if we can reassociate to the above form.
6103 Value *X;
6104 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
6105 return X;
6106
6107 // -X / X -> -1.0 and
6108 // X / -X -> -1.0 are legal when NaNs are ignored.
6109 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
6110 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
6111 match(Op1, m_FNegNSZ(m_Specific(Op0))))
6112 return ConstantFP::get(Op0->getType(), -1.0);
6113
6114 // nnan ninf X / [-]0.0 -> poison
6115 if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
6116 return PoisonValue::get(Op1->getType());
6117 }
6118
6119 return nullptr;
6120}
6121
6123 const SimplifyQuery &Q,
6124 fp::ExceptionBehavior ExBehavior,
6125 RoundingMode Rounding) {
6126 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6127 Rounding);
6128}
6129
6130static Value *
6132 const SimplifyQuery &Q, unsigned,
6135 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6136 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
6137 return C;
6138
6139 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6140 return C;
6141
6142 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6143 return nullptr;
6144
6145 // Unlike fdiv, the result of frem always matches the sign of the dividend.
6146 // The constant match may include undef elements in a vector, so return a full
6147 // zero constant as the result.
6148 if (FMF.noNaNs()) {
6149 // +0 % X -> 0
6150 if (match(Op0, m_PosZeroFP()))
6151 return ConstantFP::getZero(Op0->getType());
6152 // -0 % X -> -0
6153 if (match(Op0, m_NegZeroFP()))
6154 return ConstantFP::getNegativeZero(Op0->getType());
6155 }
6156
6157 return nullptr;
6158}
6159
6161 const SimplifyQuery &Q,
6162 fp::ExceptionBehavior ExBehavior,
6163 RoundingMode Rounding) {
6164 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6165 Rounding);
6166}
6167
6168//=== Helper functions for higher up the class hierarchy.
6169
6170/// Given the operand for a UnaryOperator, see if we can fold the result.
6171/// If not, this returns null.
6172static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
6173 unsigned MaxRecurse) {
6174 switch (Opcode) {
6175 case Instruction::FNeg:
6176 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
6177 default:
6178 llvm_unreachable("Unexpected opcode");
6179 }
6180}
6181
6182/// Given the operand for a UnaryOperator, see if we can fold the result.
6183/// If not, this returns null.
6184/// Try to use FastMathFlags when folding the result.
6185static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
6186 const FastMathFlags &FMF, const SimplifyQuery &Q,
6187 unsigned MaxRecurse) {
6188 switch (Opcode) {
6189 case Instruction::FNeg:
6190 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
6191 default:
6192 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
6193 }
6194}
6195
6196Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
6197 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
6198}
6199
6201 const SimplifyQuery &Q) {
6202 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
6203}
6204
6205/// Given operands for a BinaryOperator, see if we can fold the result.
6206/// If not, this returns null.
6207static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6208 const SimplifyQuery &Q, unsigned MaxRecurse) {
6209 switch (Opcode) {
6210 case Instruction::Add:
6211 return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6212 MaxRecurse);
6213 case Instruction::Sub:
6214 return simplifySubInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6215 MaxRecurse);
6216 case Instruction::Mul:
6217 return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6218 MaxRecurse);
6219 case Instruction::SDiv:
6220 return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6221 case Instruction::UDiv:
6222 return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6223 case Instruction::SRem:
6224 return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
6225 case Instruction::URem:
6226 return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
6227 case Instruction::Shl:
6228 return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6229 MaxRecurse);
6230 case Instruction::LShr:
6231 return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6232 case Instruction::AShr:
6233 return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6234 case Instruction::And:
6235 return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
6236 case Instruction::Or:
6237 return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
6238 case Instruction::Xor:
6239 return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
6240 case Instruction::FAdd:
6241 return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6242 case Instruction::FSub:
6243 return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6244 case Instruction::FMul:
6245 return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6246 case Instruction::FDiv:
6247 return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6248 case Instruction::FRem:
6249 return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6250 default:
6251 llvm_unreachable("Unexpected opcode");
6252 }
6253}
6254
6255/// Given operands for a BinaryOperator, see if we can fold the result.
6256/// If not, this returns null.
6257/// Try to use FastMathFlags when folding the result.
6258static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6259 const FastMathFlags &FMF, const SimplifyQuery &Q,
6260 unsigned MaxRecurse) {
6261 switch (Opcode) {
6262 case Instruction::FAdd:
6263 return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
6264 case Instruction::FSub:
6265 return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
6266 case Instruction::FMul:
6267 return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
6268 case Instruction::FDiv:
6269 return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
6270 default:
6271 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6272 }
6273}
6274
6275Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6276 const SimplifyQuery &Q) {
6277 return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
6278}
6279
6280Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6281 FastMathFlags FMF, const SimplifyQuery &Q) {
6282 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
6283}
6284
6285/// Given operands for a CmpInst, see if we can fold the result.
6287 const SimplifyQuery &Q, unsigned MaxRecurse) {
6289 return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
6290 return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6291}
6292
6294 const SimplifyQuery &Q) {
6295 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
6296}
6297
6299 switch (ID) {
6300 default:
6301 return false;
6302
6303 // Unary idempotent: f(f(x)) = f(x)
6304 case Intrinsic::fabs:
6305 case Intrinsic::floor:
6306 case Intrinsic::ceil:
6307 case Intrinsic::trunc:
6308 case Intrinsic::rint:
6309 case Intrinsic::nearbyint:
6310 case Intrinsic::round:
6311 case Intrinsic::roundeven:
6312 case Intrinsic::canonicalize:
6313 case Intrinsic::arithmetic_fence:
6314 return true;
6315 }
6316}
6317
6318/// Return true if the intrinsic rounds a floating-point value to an integral
6319/// floating-point value (not an integer type).
6321 switch (ID) {
6322 default:
6323 return false;
6324
6325 case Intrinsic::floor:
6326 case Intrinsic::ceil:
6327 case Intrinsic::trunc:
6328 case Intrinsic::rint:
6329 case Intrinsic::nearbyint:
6330 case Intrinsic::round:
6331 case Intrinsic::roundeven:
6332 return true;
6333 }
6334}
6335
6337 const DataLayout &DL) {
6338 GlobalValue *PtrSym;
6339 APInt PtrOffset;
6340 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6341 return nullptr;
6342
6344
6345 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6346 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6347 return nullptr;
6348
6349 APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6350 DL.getIndexTypeSizeInBits(Ptr->getType()));
6351 if (OffsetInt.srem(4) != 0)
6352 return nullptr;
6353
6354 Constant *Loaded =
6355 ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt), DL);
6356 if (!Loaded)
6357 return nullptr;
6358
6359 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6360 if (!LoadedCE)
6361 return nullptr;
6362
6363 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6364 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6365 if (!LoadedCE)
6366 return nullptr;
6367 }
6368
6369 if (LoadedCE->getOpcode() != Instruction::Sub)
6370 return nullptr;
6371
6372 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6373 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6374 return nullptr;
6375 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6376
6377 Constant *LoadedRHS = LoadedCE->getOperand(1);
6378 GlobalValue *LoadedRHSSym;
6379 APInt LoadedRHSOffset;
6380 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6381 DL) ||
6382 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6383 return nullptr;
6384
6385 return LoadedLHSPtr;
6386}
6387
6388// TODO: Need to pass in FastMathFlags
6389static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6390 bool IsStrict) {
6391 // ldexp(poison, x) -> poison
6392 // ldexp(x, poison) -> poison
6393 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6394 return Op0;
6395
6396 // ldexp(undef, x) -> nan
6397 if (Q.isUndefValue(Op0))
6398 return ConstantFP::getNaN(Op0->getType());
6399
6400 if (!IsStrict) {
6401 // TODO: Could insert a canonicalize for strict
6402
6403 // ldexp(x, undef) -> x
6404 if (Q.isUndefValue(Op1))
6405 return Op0;
6406 }
6407
6408 const APFloat *C = nullptr;
6410
6411 // These cases should be safe, even with strictfp.
6412 // ldexp(0.0, x) -> 0.0
6413 // ldexp(-0.0, x) -> -0.0
6414 // ldexp(inf, x) -> inf
6415 // ldexp(-inf, x) -> -inf
6416 if (C && (C->isZero() || C->isInfinity()))
6417 return Op0;
6418
6419 // These are canonicalization dropping, could do it if we knew how we could
6420 // ignore denormal flushes and target handling of nan payload bits.
6421 if (IsStrict)
6422 return nullptr;
6423
6424 // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6425 if (C && C->isNaN())
6426 return ConstantFP::get(Op0->getType(), C->makeQuiet());
6427
6428 // ldexp(x, 0) -> x
6429
6430 // TODO: Could fold this if we know the exception mode isn't
6431 // strict, we know the denormal mode and other target modes.
6432 if (match(Op1, PatternMatch::m_ZeroInt()))
6433 return Op0;
6434
6435 return nullptr;
6436}
6437
6439 const SimplifyQuery &Q,
6440 const CallBase *Call) {
6441 // Idempotent functions return the same result when called repeatedly.
6442 Intrinsic::ID IID = F->getIntrinsicID();
6443 if (isIdempotent(IID))
6444 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6445 if (II->getIntrinsicID() == IID)
6446 return II;
6447
6448 if (removesFPFraction(IID)) {
6449 // Converting from int or calling a rounding function always results in a
6450 // finite integral number or infinity. For those inputs, rounding functions
6451 // always return the same value, so the (2nd) rounding is eliminated. Ex:
6452 // floor (sitofp x) -> sitofp x
6453 // round (ceil x) -> ceil x
6454 auto *II = dyn_cast<IntrinsicInst>(Op0);
6455 if ((II && removesFPFraction(II->getIntrinsicID())) ||
6456 match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
6457 return Op0;
6458 }
6459
6460 Value *X;
6461 switch (IID) {
6462 case Intrinsic::fabs:
6463 if (computeKnownFPSignBit(Op0, Q) == false)
6464 return Op0;
6465 break;
6466 case Intrinsic::bswap:
6467 // bswap(bswap(x)) -> x
6468 if (match(Op0, m_BSwap(m_Value(X))))
6469 return X;
6470 break;
6471 case Intrinsic::bitreverse:
6472 // bitreverse(bitreverse(x)) -> x
6473 if (match(Op0, m_BitReverse(m_Value(X))))
6474 return X;
6475 break;
6476 case Intrinsic::ctpop: {
6477 // ctpop(X) -> 1 iff X is non-zero power of 2.
6478 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI, Q.DT))
6479 return ConstantInt::get(Op0->getType(), 1);
6480 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6481 // ctpop(and X, 1) --> and X, 1
6482 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6484 Q))
6485 return Op0;
6486 break;
6487 }
6488 case Intrinsic::exp:
6489 // exp(log(x)) -> x
6490 if (Call->hasAllowReassoc() &&
6492 return X;
6493 break;
6494 case Intrinsic::exp2:
6495 // exp2(log2(x)) -> x
6496 if (Call->hasAllowReassoc() &&
6498 return X;
6499 break;
6500 case Intrinsic::exp10:
6501 // exp10(log10(x)) -> x
6502 if (Call->hasAllowReassoc() &&
6504 return X;
6505 break;
6506 case Intrinsic::log:
6507 // log(exp(x)) -> x
6508 if (Call->hasAllowReassoc() &&
6510 return X;
6511 break;
6512 case Intrinsic::log2:
6513 // log2(exp2(x)) -> x
6514 if (Call->hasAllowReassoc() &&
6516 match(Op0,
6518 return X;
6519 break;
6520 case Intrinsic::log10:
6521 // log10(pow(10.0, x)) -> x
6522 // log10(exp10(x)) -> x
6523 if (Call->hasAllowReassoc() &&
6525 match(Op0,
6527 return X;
6528 break;
6529 case Intrinsic::vector_reverse:
6530 // vector.reverse(vector.reverse(x)) -> x
6531 if (match(Op0, m_VecReverse(m_Value(X))))
6532 return X;
6533 // vector.reverse(splat(X)) -> splat(X)
6534 if (isSplatValue(Op0))
6535 return Op0;
6536 break;
6537 default:
6538 break;
6539 }
6540
6541 return nullptr;
6542}
6543
6544/// Given a min/max intrinsic, see if it can be removed based on having an
6545/// operand that is another min/max intrinsic with shared operand(s). The caller
6546/// is expected to swap the operand arguments to handle commutation.
6548 Value *X, *Y;
6549 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6550 return nullptr;
6551
6552 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6553 if (!MM0)
6554 return nullptr;
6555 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6556
6557 if (Op1 == X || Op1 == Y ||
6559 // max (max X, Y), X --> max X, Y
6560 if (IID0 == IID)
6561 return MM0;
6562 // max (min X, Y), X --> X
6563 if (IID0 == getInverseMinMaxIntrinsic(IID))
6564 return Op1;
6565 }
6566 return nullptr;
6567}
6568
6569/// Given a min/max intrinsic, see if it can be removed based on having an
6570/// operand that is another min/max intrinsic with shared operand(s). The caller
6571/// is expected to swap the operand arguments to handle commutation.
6573 Value *Op1) {
6574 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6575 IID == Intrinsic::maximum || IID == Intrinsic::minimum ||
6576 IID == Intrinsic::maximumnum || IID == Intrinsic::minimumnum) &&
6577 "Unsupported intrinsic");
6578
6579 auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6580 // If Op0 is not the same intrinsic as IID, do not process.
6581 // This is a difference with integer min/max handling. We do not process the
6582 // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6583 if (!M0 || M0->getIntrinsicID() != IID)
6584 return nullptr;
6585 Value *X0 = M0->getOperand(0);
6586 Value *Y0 = M0->getOperand(1);
6587 // Simple case, m(m(X,Y), X) => m(X, Y)
6588 // m(m(X,Y), Y) => m(X, Y)
6589 // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6590 // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6591 // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6592 // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6593 if (X0 == Op1 || Y0 == Op1)
6594 return M0;
6595
6596 auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6597 if (!M1)
6598 return nullptr;
6599 Value *X1 = M1->getOperand(0);
6600 Value *Y1 = M1->getOperand(1);
6601 Intrinsic::ID IID1 = M1->getIntrinsicID();
6602 // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6603 // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6604 // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6605 // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6606 // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6607 // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6608 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6609 if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6610 return M0;
6611
6612 return nullptr;
6613}
6614
6619 // For undef/poison, we can choose to either propgate undef/poison or
6620 // use the LHS value depending on what will allow more optimization.
6622};
6623// Get the optimized value for a min/max instruction with a single constant
6624// input (either undef or scalar constantFP). The result may indicate to
6625// use the non-const LHS value, use a new constant value instead (with NaNs
6626// quieted), or to choose either option in the case of undef/poison.
6628 const Intrinsic::ID IID,
6629 const CallBase *Call,
6630 Constant **OutNewConstVal) {
6631 assert(OutNewConstVal != nullptr);
6632
6633 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6634 bool ReturnsOtherForAllNaNs =
6635 IID == Intrinsic::minimumnum || IID == Intrinsic::maximumnum;
6636 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum ||
6637 IID == Intrinsic::minimumnum;
6638
6639 // min/max(x, poison) -> either x or poison
6640 if (isa<UndefValue>(RHSConst)) {
6641 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6643 }
6644
6645 const ConstantFP *CFP = dyn_cast<ConstantFP>(RHSConst);
6646 if (!CFP)
6648 APFloat CAPF = CFP->getValueAPF();
6649
6650 // minnum(x, qnan) -> x
6651 // maxnum(x, qnan) -> x
6652 // minimum(X, nan) -> qnan
6653 // maximum(X, nan) -> qnan
6654 // minimumnum(X, nan) -> x
6655 // maximumnum(X, nan) -> x
6656 if (CAPF.isNaN()) {
6657 if (PropagateNaN) {
6658 *OutNewConstVal = ConstantFP::get(CFP->getType(), CAPF.makeQuiet());
6660 } else if (ReturnsOtherForAllNaNs || !CAPF.isSignaling()) {
6662 }
6664 }
6665
6666 if (CAPF.isInfinity() || (Call && Call->hasNoInfs() && CAPF.isLargest())) {
6667 // minimum(X, -inf) -> -inf if nnan
6668 // maximum(X, +inf) -> +inf if nnan
6669 // minimumnum(X, -inf) -> -inf
6670 // maximumnum(X, +inf) -> +inf
6671 if (CAPF.isNegative() == IsMin &&
6672 (ReturnsOtherForAllNaNs || (Call && Call->hasNoNaNs()))) {
6673 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6675 }
6676
6677 // minnum(X, +inf) -> X if nnan
6678 // maxnum(X, -inf) -> X if nnan
6679 // minimum(X, +inf) -> X (ignoring quieting of sNaNs)
6680 // maximum(X, -inf) -> X (ignoring quieting of sNaNs)
6681 // minimumnum(X, +inf) -> X if nnan
6682 // maximumnum(X, -inf) -> X if nnan
6683 if (CAPF.isNegative() != IsMin &&
6684 (PropagateNaN || (Call && Call->hasNoNaNs())))
6686 }
6688}
6689
6691 Value *Op0, Value *Op1) {
6692 Constant *C0 = dyn_cast<Constant>(Op0);
6693 Constant *C1 = dyn_cast<Constant>(Op1);
6694 unsigned Width = ReturnType->getPrimitiveSizeInBits();
6695
6696 // All false predicate or reduction of neutral values ==> neutral result.
6697 switch (IID) {
6698 case Intrinsic::aarch64_sve_eorv:
6699 case Intrinsic::aarch64_sve_orv:
6700 case Intrinsic::aarch64_sve_saddv:
6701 case Intrinsic::aarch64_sve_uaddv:
6702 case Intrinsic::aarch64_sve_umaxv:
6703 if ((C0 && C0->isNullValue()) || (C1 && C1->isNullValue()))
6704 return ConstantInt::get(ReturnType, 0);
6705 break;
6706 case Intrinsic::aarch64_sve_andv:
6707 case Intrinsic::aarch64_sve_uminv:
6708 if ((C0 && C0->isNullValue()) || (C1 && C1->isAllOnesValue()))
6709 return ConstantInt::get(ReturnType, APInt::getMaxValue(Width));
6710 break;
6711 case Intrinsic::aarch64_sve_smaxv:
6712 if ((C0 && C0->isNullValue()) || (C1 && C1->isMinSignedValue()))
6713 return ConstantInt::get(ReturnType, APInt::getSignedMinValue(Width));
6714 break;
6715 case Intrinsic::aarch64_sve_sminv:
6716 if ((C0 && C0->isNullValue()) || (C1 && C1->isMaxSignedValue()))
6717 return ConstantInt::get(ReturnType, APInt::getSignedMaxValue(Width));
6718 break;
6719 }
6720
6721 switch (IID) {
6722 case Intrinsic::aarch64_sve_andv:
6723 case Intrinsic::aarch64_sve_orv:
6724 case Intrinsic::aarch64_sve_smaxv:
6725 case Intrinsic::aarch64_sve_sminv:
6726 case Intrinsic::aarch64_sve_umaxv:
6727 case Intrinsic::aarch64_sve_uminv:
6728 // sve_reduce_##(all, splat(X)) ==> X
6729 if (C0 && C0->isAllOnesValue()) {
6730 if (Value *SplatVal = getSplatValue(Op1)) {
6731 assert(SplatVal->getType() == ReturnType && "Unexpected result type!");
6732 return SplatVal;
6733 }
6734 }
6735 break;
6736 case Intrinsic::aarch64_sve_eorv:
6737 // sve_reduce_xor(all, splat(X)) ==> 0
6738 if (C0 && C0->isAllOnesValue())
6739 return ConstantInt::get(ReturnType, 0);
6740 break;
6741 }
6742
6743 return nullptr;
6744}
6745
6747 Value *Op0, Value *Op1,
6748 const SimplifyQuery &Q,
6749 const CallBase *Call) {
6750 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6751 switch (IID) {
6752 case Intrinsic::get_active_lane_mask: {
6753 if (match(Op1, m_Zero()))
6754 return ConstantInt::getFalse(ReturnType);
6755
6756 const Function *F = Call->getFunction();
6757 auto *ScalableTy = dyn_cast<ScalableVectorType>(ReturnType);
6758 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
6759 if (ScalableTy && Attr.isValid()) {
6760 std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
6761 if (!VScaleMax)
6762 break;
6763 uint64_t MaxPossibleMaskElements =
6764 (uint64_t)ScalableTy->getMinNumElements() * (*VScaleMax);
6765
6766 const APInt *Op1Val;
6767 if (match(Op0, m_Zero()) && match(Op1, m_APInt(Op1Val)) &&
6768 Op1Val->uge(MaxPossibleMaskElements))
6769 return ConstantInt::getAllOnesValue(ReturnType);
6770 }
6771 break;
6772 }
6773 case Intrinsic::abs:
6774 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6775 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6776 // on the outer abs.
6778 return Op0;
6779 break;
6780
6781 case Intrinsic::cttz: {
6782 Value *X;
6783 if (match(Op0, m_Shl(m_One(), m_Value(X))))
6784 return X;
6785 break;
6786 }
6787 case Intrinsic::ctlz: {
6788 Value *X;
6789 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6790 return X;
6791 if (match(Op0, m_AShr(m_Negative(), m_Value())))
6792 return Constant::getNullValue(ReturnType);
6793 break;
6794 }
6795 case Intrinsic::ptrmask: {
6796 // NOTE: We can't apply this simplifications based on the value of Op1
6797 // because we need to preserve provenance.
6798 if (Q.isUndefValue(Op0) || match(Op0, m_Zero()))
6799 return Constant::getNullValue(Op0->getType());
6800
6802 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6803 "Invalid mask width");
6804 // If index-width (mask size) is less than pointer-size then mask is
6805 // 1-extended.
6806 if (match(Op1, m_PtrToIntOrAddr(m_Specific(Op0))))
6807 return Op0;
6808
6809 // NOTE: We may have attributes associated with the return value of the
6810 // llvm.ptrmask intrinsic that will be lost when we just return the
6811 // operand. We should try to preserve them.
6812 if (match(Op1, m_AllOnes()) || Q.isUndefValue(Op1))
6813 return Op0;
6814
6815 Constant *C;
6816 if (match(Op1, m_ImmConstant(C))) {
6817 KnownBits PtrKnown = computeKnownBits(Op0, Q);
6818 // See if we only masking off bits we know are already zero due to
6819 // alignment.
6820 APInt IrrelevantPtrBits =
6821 PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6823 Instruction::Or, C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6824 Q.DL);
6825 if (C != nullptr && C->isAllOnesValue())
6826 return Op0;
6827 }
6828 break;
6829 }
6830 case Intrinsic::smax:
6831 case Intrinsic::smin:
6832 case Intrinsic::umax:
6833 case Intrinsic::umin: {
6834 // If the arguments are the same, this is a no-op.
6835 if (Op0 == Op1)
6836 return Op0;
6837
6838 // Canonicalize immediate constant operand as Op1.
6839 if (match(Op0, m_ImmConstant()))
6840 std::swap(Op0, Op1);
6841
6842 // Assume undef is the limit value.
6843 if (Q.isUndefValue(Op1))
6844 return ConstantInt::get(
6846
6847 const APInt *C;
6848 if (match(Op1, m_APIntAllowPoison(C))) {
6849 // Clamp to limit value. For example:
6850 // umax(i8 %x, i8 255) --> 255
6852 return ConstantInt::get(ReturnType, *C);
6853
6854 // If the constant op is the opposite of the limit value, the other must
6855 // be larger/smaller or equal. For example:
6856 // umin(i8 %x, i8 255) --> %x
6859 return Op0;
6860
6861 // Remove nested call if constant operands allow it. Example:
6862 // max (max X, 7), 5 -> max X, 7
6863 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6864 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6865 // TODO: loosen undef/splat restrictions for vector constants.
6866 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6867 const APInt *InnerC;
6868 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6869 ICmpInst::compare(*InnerC, *C,
6872 return Op0;
6873 }
6874 }
6875
6876 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6877 return V;
6878 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6879 return V;
6880
6881 ICmpInst::Predicate Pred =
6883 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6884 return Op0;
6885 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6886 return Op1;
6887
6888 break;
6889 }
6890 case Intrinsic::scmp:
6891 case Intrinsic::ucmp: {
6892 // Fold to a constant if the relationship between operands can be
6893 // established with certainty
6894 if (isICmpTrue(CmpInst::ICMP_EQ, Op0, Op1, Q, RecursionLimit))
6895 return Constant::getNullValue(ReturnType);
6896
6897 ICmpInst::Predicate PredGT =
6898 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6899 if (isICmpTrue(PredGT, Op0, Op1, Q, RecursionLimit))
6900 return ConstantInt::get(ReturnType, 1);
6901
6902 ICmpInst::Predicate PredLT =
6903 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6904 if (isICmpTrue(PredLT, Op0, Op1, Q, RecursionLimit))
6905 return ConstantInt::getSigned(ReturnType, -1);
6906
6907 break;
6908 }
6909 case Intrinsic::usub_with_overflow:
6910 case Intrinsic::ssub_with_overflow:
6911 // X - X -> { 0, false }
6912 // X - undef -> { 0, false }
6913 // undef - X -> { 0, false }
6914 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6915 return Constant::getNullValue(ReturnType);
6916 break;
6917 case Intrinsic::uadd_with_overflow:
6918 case Intrinsic::sadd_with_overflow:
6919 // X + undef -> { -1, false }
6920 // undef + x -> { -1, false }
6921 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6922 return ConstantStruct::get(
6923 cast<StructType>(ReturnType),
6924 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6925 Constant::getNullValue(ReturnType->getStructElementType(1))});
6926 }
6927 break;
6928 case Intrinsic::umul_with_overflow:
6929 case Intrinsic::smul_with_overflow:
6930 // 0 * X -> { 0, false }
6931 // X * 0 -> { 0, false }
6932 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6933 return Constant::getNullValue(ReturnType);
6934 // undef * X -> { 0, false }
6935 // X * undef -> { 0, false }
6936 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6937 return Constant::getNullValue(ReturnType);
6938 break;
6939 case Intrinsic::uadd_sat:
6940 // sat(MAX + X) -> MAX
6941 // sat(X + MAX) -> MAX
6942 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6943 return Constant::getAllOnesValue(ReturnType);
6944 [[fallthrough]];
6945 case Intrinsic::sadd_sat:
6946 // sat(X + undef) -> -1
6947 // sat(undef + X) -> -1
6948 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6949 // For signed: Assume undef is ~X, in which case X + ~X = -1.
6950 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6951 return Constant::getAllOnesValue(ReturnType);
6952
6953 // X + 0 -> X
6954 if (match(Op1, m_Zero()))
6955 return Op0;
6956 // 0 + X -> X
6957 if (match(Op0, m_Zero()))
6958 return Op1;
6959 break;
6960 case Intrinsic::usub_sat:
6961 // sat(0 - X) -> 0, sat(X - MAX) -> 0
6962 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6963 return Constant::getNullValue(ReturnType);
6964 [[fallthrough]];
6965 case Intrinsic::ssub_sat:
6966 // X - X -> 0, X - undef -> 0, undef - X -> 0
6967 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6968 return Constant::getNullValue(ReturnType);
6969 // X - 0 -> X
6970 if (match(Op1, m_Zero()))
6971 return Op0;
6972 break;
6973 case Intrinsic::load_relative:
6974 if (auto *C0 = dyn_cast<Constant>(Op0))
6975 if (auto *C1 = dyn_cast<Constant>(Op1))
6976 return simplifyRelativeLoad(C0, C1, Q.DL);
6977 break;
6978 case Intrinsic::powi:
6979 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6980 // powi(x, 0) -> 1.0
6981 if (Power->isZero())
6982 return ConstantFP::get(Op0->getType(), 1.0);
6983 // powi(x, 1) -> x
6984 if (Power->isOne())
6985 return Op0;
6986 }
6987 break;
6988 case Intrinsic::ldexp:
6989 return simplifyLdexp(Op0, Op1, Q, false);
6990 case Intrinsic::copysign:
6991 // copysign X, X --> X
6992 if (Op0 == Op1)
6993 return Op0;
6994 // copysign -X, X --> X
6995 // copysign X, -X --> -X
6996 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6997 match(Op1, m_FNeg(m_Specific(Op0))))
6998 return Op1;
6999 break;
7000 case Intrinsic::is_fpclass: {
7001 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
7002 // If all tests are made, it doesn't matter what the value is.
7003 if ((Mask & fcAllFlags) == fcAllFlags)
7004 return ConstantInt::get(ReturnType, true);
7005 if ((Mask & fcAllFlags) == 0)
7006 return ConstantInt::get(ReturnType, false);
7007 if (Q.isUndefValue(Op0))
7008 return UndefValue::get(ReturnType);
7009 break;
7010 }
7011 case Intrinsic::maxnum:
7012 case Intrinsic::minnum:
7013 case Intrinsic::maximum:
7014 case Intrinsic::minimum:
7015 case Intrinsic::maximumnum:
7016 case Intrinsic::minimumnum: {
7017 // In some cases here, we deviate from exact IEEE-754 semantics to enable
7018 // optimizations (as allowed by the LLVM IR spec) by returning one of the
7019 // arguments unmodified instead of inserting an llvm.canonicalize to
7020 // transform input sNaNs into qNaNs,
7021
7022 // If the arguments are the same, this is a no-op (ignoring NaN quieting)
7023 if (Op0 == Op1)
7024 return Op0;
7025
7026 // Canonicalize constant operand as Op1.
7027 if (isa<Constant>(Op0))
7028 std::swap(Op0, Op1);
7029
7030 if (Constant *C = dyn_cast<Constant>(Op1)) {
7032 Constant *NewConst = nullptr;
7033
7034 if (VectorType *VTy = dyn_cast<VectorType>(C->getType())) {
7035 ElementCount ElemCount = VTy->getElementCount();
7036
7037 if (Constant *SplatVal = C->getSplatValue()) {
7038 // Handle splat vectors (including scalable vectors)
7039 OptResult = OptimizeConstMinMax(SplatVal, IID, Call, &NewConst);
7040 if (OptResult == MinMaxOptResult::UseNewConstVal)
7041 NewConst = ConstantVector::getSplat(ElemCount, NewConst);
7042
7043 } else if (ElemCount.isFixed()) {
7044 // Storage to build up new const return value (with NaNs quieted)
7046
7047 // Check elementwise whether we can optimize to either a constant
7048 // value or return the LHS value. We cannot mix and match LHS +
7049 // constant elements, as this would require inserting a new
7050 // VectorShuffle instruction, which is not allowed in simplifyBinOp.
7051 OptResult = MinMaxOptResult::UseEither;
7052 for (unsigned i = 0; i != ElemCount.getFixedValue(); ++i) {
7053 auto *Elt = C->getAggregateElement(i);
7054 if (!Elt) {
7056 break;
7057 }
7058 auto ElemResult = OptimizeConstMinMax(Elt, IID, Call, &NewConst);
7059 if (ElemResult == MinMaxOptResult::CannotOptimize ||
7060 (ElemResult != OptResult &&
7061 OptResult != MinMaxOptResult::UseEither &&
7062 ElemResult != MinMaxOptResult::UseEither)) {
7064 break;
7065 }
7066 NewC[i] = NewConst;
7067 if (ElemResult != MinMaxOptResult::UseEither)
7068 OptResult = ElemResult;
7069 }
7070 if (OptResult == MinMaxOptResult::UseNewConstVal)
7071 NewConst = ConstantVector::get(NewC);
7072 }
7073 } else {
7074 // Handle scalar inputs
7075 OptResult = OptimizeConstMinMax(C, IID, Call, &NewConst);
7076 }
7077
7078 if (OptResult == MinMaxOptResult::UseOtherVal ||
7079 OptResult == MinMaxOptResult::UseEither)
7080 return Op0; // Return the other arg (ignoring NaN quieting)
7081 else if (OptResult == MinMaxOptResult::UseNewConstVal)
7082 return NewConst;
7083 }
7084
7085 // Min/max of the same operation with common operand:
7086 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
7087 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
7088 return V;
7089 if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
7090 return V;
7091
7092 break;
7093 }
7094 case Intrinsic::vector_extract: {
7095 // (extract_vector (insert_vector _, X, 0), 0) -> X
7096 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
7097 Value *X = nullptr;
7099 m_Zero())) &&
7100 IdxN == 0 && X->getType() == ReturnType)
7101 return X;
7102
7103 break;
7104 }
7105
7106 case Intrinsic::aarch64_sve_andv:
7107 case Intrinsic::aarch64_sve_eorv:
7108 case Intrinsic::aarch64_sve_orv:
7109 case Intrinsic::aarch64_sve_saddv:
7110 case Intrinsic::aarch64_sve_smaxv:
7111 case Intrinsic::aarch64_sve_sminv:
7112 case Intrinsic::aarch64_sve_uaddv:
7113 case Intrinsic::aarch64_sve_umaxv:
7114 case Intrinsic::aarch64_sve_uminv:
7115 return simplifySVEIntReduction(IID, ReturnType, Op0, Op1);
7116 default:
7117 break;
7118 }
7119
7120 return nullptr;
7121}
7122
7124 ArrayRef<Value *> Args,
7125 const SimplifyQuery &Q) {
7126 // Operand bundles should not be in Args.
7127 assert(Call->arg_size() == Args.size());
7128 unsigned NumOperands = Args.size();
7129 Function *F = cast<Function>(Callee);
7130 Intrinsic::ID IID = F->getIntrinsicID();
7131
7134 return PoisonValue::get(F->getReturnType());
7135 // Most of the intrinsics with no operands have some kind of side effect.
7136 // Don't simplify.
7137 if (!NumOperands) {
7138 switch (IID) {
7139 case Intrinsic::vscale: {
7140 Type *RetTy = F->getReturnType();
7141 ConstantRange CR = getVScaleRange(Call->getFunction(), 64);
7142 if (const APInt *C = CR.getSingleElement())
7143 return ConstantInt::get(RetTy, C->getZExtValue());
7144 return nullptr;
7145 }
7146 default:
7147 return nullptr;
7148 }
7149 }
7150
7151 if (NumOperands == 1)
7152 return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
7153
7154 if (NumOperands == 2)
7155 return simplifyBinaryIntrinsic(IID, F->getReturnType(), Args[0], Args[1], Q,
7156 Call);
7157
7158 // Handle intrinsics with 3 or more arguments.
7159 switch (IID) {
7160 case Intrinsic::masked_load:
7161 case Intrinsic::masked_gather: {
7162 Value *MaskArg = Args[1];
7163 Value *PassthruArg = Args[2];
7164 // If the mask is all zeros or undef, the "passthru" argument is the result.
7165 if (maskIsAllZeroOrUndef(MaskArg))
7166 return PassthruArg;
7167 return nullptr;
7168 }
7169 case Intrinsic::fshl:
7170 case Intrinsic::fshr: {
7171 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
7172
7173 // If both operands are undef, the result is undef.
7174 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
7175 return UndefValue::get(F->getReturnType());
7176
7177 // If shift amount is undef, assume it is zero.
7178 if (Q.isUndefValue(ShAmtArg))
7179 return Args[IID == Intrinsic::fshl ? 0 : 1];
7180
7181 const APInt *ShAmtC;
7182 if (match(ShAmtArg, m_APInt(ShAmtC))) {
7183 // If there's effectively no shift, return the 1st arg or 2nd arg.
7184 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
7185 if (ShAmtC->urem(BitWidth).isZero())
7186 return Args[IID == Intrinsic::fshl ? 0 : 1];
7187 }
7188
7189 // Rotating zero by anything is zero.
7190 if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
7191 return ConstantInt::getNullValue(F->getReturnType());
7192
7193 // Rotating -1 by anything is -1.
7194 if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
7195 return ConstantInt::getAllOnesValue(F->getReturnType());
7196
7197 return nullptr;
7198 }
7199 case Intrinsic::experimental_constrained_fma: {
7201 if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
7202 *FPI->getRoundingMode()))
7203 return V;
7204 return nullptr;
7205 }
7206 case Intrinsic::fma:
7207 case Intrinsic::fmuladd: {
7208 if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
7210 return V;
7211 return nullptr;
7212 }
7213 case Intrinsic::smul_fix:
7214 case Intrinsic::smul_fix_sat: {
7215 Value *Op0 = Args[0];
7216 Value *Op1 = Args[1];
7217 Value *Op2 = Args[2];
7218 Type *ReturnType = F->getReturnType();
7219
7220 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
7221 // when both Op0 and Op1 are constant so we do not care about that special
7222 // case here).
7223 if (isa<Constant>(Op0))
7224 std::swap(Op0, Op1);
7225
7226 // X * 0 -> 0
7227 if (match(Op1, m_Zero()))
7228 return Constant::getNullValue(ReturnType);
7229
7230 // X * undef -> 0
7231 if (Q.isUndefValue(Op1))
7232 return Constant::getNullValue(ReturnType);
7233
7234 // X * (1 << Scale) -> X
7235 APInt ScaledOne =
7236 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
7237 cast<ConstantInt>(Op2)->getZExtValue());
7238 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
7239 return Op0;
7240
7241 return nullptr;
7242 }
7243 case Intrinsic::vector_insert: {
7244 Value *Vec = Args[0];
7245 Value *SubVec = Args[1];
7246 Value *Idx = Args[2];
7247 Type *ReturnType = F->getReturnType();
7248
7249 // (insert_vector Y, (extract_vector X, 0), 0) -> X
7250 // where: Y is X, or Y is undef
7251 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
7252 Value *X = nullptr;
7253 if (match(SubVec,
7255 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
7256 X->getType() == ReturnType)
7257 return X;
7258
7259 return nullptr;
7260 }
7261 case Intrinsic::experimental_constrained_fadd: {
7263 return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7264 *FPI->getExceptionBehavior(),
7265 *FPI->getRoundingMode());
7266 }
7267 case Intrinsic::experimental_constrained_fsub: {
7269 return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7270 *FPI->getExceptionBehavior(),
7271 *FPI->getRoundingMode());
7272 }
7273 case Intrinsic::experimental_constrained_fmul: {
7275 return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7276 *FPI->getExceptionBehavior(),
7277 *FPI->getRoundingMode());
7278 }
7279 case Intrinsic::experimental_constrained_fdiv: {
7281 return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7282 *FPI->getExceptionBehavior(),
7283 *FPI->getRoundingMode());
7284 }
7285 case Intrinsic::experimental_constrained_frem: {
7287 return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7288 *FPI->getExceptionBehavior(),
7289 *FPI->getRoundingMode());
7290 }
7291 case Intrinsic::experimental_constrained_ldexp:
7292 return simplifyLdexp(Args[0], Args[1], Q, true);
7293 case Intrinsic::experimental_gc_relocate: {
7295 Value *DerivedPtr = GCR.getDerivedPtr();
7296 Value *BasePtr = GCR.getBasePtr();
7297
7298 // Undef is undef, even after relocation.
7299 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
7300 return UndefValue::get(GCR.getType());
7301 }
7302
7303 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
7304 // For now, the assumption is that the relocation of null will be null
7305 // for most any collector. If this ever changes, a corresponding hook
7306 // should be added to GCStrategy and this code should check it first.
7307 if (isa<ConstantPointerNull>(DerivedPtr)) {
7308 // Use null-pointer of gc_relocate's type to replace it.
7309 return ConstantPointerNull::get(PT);
7310 }
7311 }
7312 return nullptr;
7313 }
7314 case Intrinsic::experimental_vp_reverse: {
7315 Value *Vec = Call->getArgOperand(0);
7316 Value *EVL = Call->getArgOperand(2);
7317
7318 Value *X;
7319 // vp.reverse(vp.reverse(X)) == X (mask doesn't matter)
7321 m_Value(X), m_Value(), m_Specific(EVL))))
7322 return X;
7323
7324 // vp.reverse(splat(X)) -> splat(X) (regardless of mask and EVL)
7325 if (isSplatValue(Vec))
7326 return Vec;
7327 return nullptr;
7328 }
7329 default:
7330 return nullptr;
7331 }
7332}
7333
7335 ArrayRef<Value *> Args,
7336 const SimplifyQuery &Q) {
7337 auto *F = dyn_cast<Function>(Callee);
7338 if (!F || !canConstantFoldCallTo(Call, F))
7339 return nullptr;
7340
7341 SmallVector<Constant *, 4> ConstantArgs;
7342 ConstantArgs.reserve(Args.size());
7343 for (Value *Arg : Args) {
7345 if (!C) {
7346 if (isa<MetadataAsValue>(Arg))
7347 continue;
7348 return nullptr;
7349 }
7350 ConstantArgs.push_back(C);
7351 }
7352
7353 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
7354}
7355
7357 const SimplifyQuery &Q) {
7358 // Args should not contain operand bundle operands.
7359 assert(Call->arg_size() == Args.size());
7360
7361 // musttail calls can only be simplified if they are also DCEd.
7362 // As we can't guarantee this here, don't simplify them.
7363 if (Call->isMustTailCall())
7364 return nullptr;
7365
7366 // call undef -> poison
7367 // call null -> poison
7368 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
7369 return PoisonValue::get(Call->getType());
7370
7371 if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
7372 return V;
7373
7374 auto *F = dyn_cast<Function>(Callee);
7375 if (F && F->isIntrinsic())
7376 if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
7377 return Ret;
7378
7379 return nullptr;
7380}
7381
7384 SmallVector<Value *, 4> Args(Call->args());
7385 if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
7386 return V;
7387 if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
7388 return Ret;
7389 return nullptr;
7390}
7391
7392/// Given operands for a Freeze, see if we can fold the result.
7394 // Use a utility function defined in ValueTracking.
7396 return Op0;
7397 // We have room for improvement.
7398 return nullptr;
7399}
7400
7402 return ::simplifyFreezeInst(Op0, Q);
7403}
7404
7406 const SimplifyQuery &Q) {
7407 if (LI->isVolatile())
7408 return nullptr;
7409
7410 if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7411 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
7412
7413 // We can only fold the load if it is from a constant global with definitive
7414 // initializer. Skip expensive logic if this is not the case.
7416 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7417 return nullptr;
7418
7419 // If GlobalVariable's initializer is uniform, then return the constant
7420 // regardless of its offset.
7421 if (Constant *C = ConstantFoldLoadFromUniformValue(GV->getInitializer(),
7422 LI->getType(), Q.DL))
7423 return C;
7424
7425 // Try to convert operand into a constant by stripping offsets while looking
7426 // through invariant.group intrinsics.
7428 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
7429 Q.DL, Offset, /* AllowNonInbounts */ true,
7430 /* AllowInvariantGroup */ true);
7431 if (PtrOp == GV) {
7432 // Index size may have changed due to address space casts.
7433 Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
7434 return ConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
7435 Q.DL);
7436 }
7437
7438 return nullptr;
7439}
7440
7441/// See if we can compute a simplified version of this instruction.
7442/// If not, this returns null.
7443
7445 ArrayRef<Value *> NewOps,
7446 const SimplifyQuery &SQ,
7447 unsigned MaxRecurse) {
7448 assert(I->getFunction() && "instruction should be inserted in a function");
7449 assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
7450 "context instruction should be in the same function");
7451
7452 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
7453
7454 switch (I->getOpcode()) {
7455 default:
7456 if (all_of(NewOps, IsaPred<Constant>)) {
7457 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
7458 transform(NewOps, NewConstOps.begin(),
7459 [](Value *V) { return cast<Constant>(V); });
7460 return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
7461 }
7462 return nullptr;
7463 case Instruction::FNeg:
7464 return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
7465 case Instruction::FAdd:
7466 return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7467 MaxRecurse);
7468 case Instruction::Add:
7469 return simplifyAddInst(
7470 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7471 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7472 case Instruction::FSub:
7473 return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7474 MaxRecurse);
7475 case Instruction::Sub:
7476 return simplifySubInst(
7477 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7478 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7479 case Instruction::FMul:
7480 return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7481 MaxRecurse);
7482 case Instruction::Mul:
7483 return simplifyMulInst(
7484 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7485 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7486 case Instruction::SDiv:
7487 return simplifySDivInst(NewOps[0], NewOps[1],
7489 MaxRecurse);
7490 case Instruction::UDiv:
7491 return simplifyUDivInst(NewOps[0], NewOps[1],
7493 MaxRecurse);
7494 case Instruction::FDiv:
7495 return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7496 MaxRecurse);
7497 case Instruction::SRem:
7498 return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7499 case Instruction::URem:
7500 return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7501 case Instruction::FRem:
7502 return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7503 MaxRecurse);
7504 case Instruction::Shl:
7505 return simplifyShlInst(
7506 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7507 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7508 case Instruction::LShr:
7509 return simplifyLShrInst(NewOps[0], NewOps[1],
7511 MaxRecurse);
7512 case Instruction::AShr:
7513 return simplifyAShrInst(NewOps[0], NewOps[1],
7515 MaxRecurse);
7516 case Instruction::And:
7517 return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7518 case Instruction::Or:
7519 return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7520 case Instruction::Xor:
7521 return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7522 case Instruction::ICmp:
7523 return simplifyICmpInst(cast<ICmpInst>(I)->getCmpPredicate(), NewOps[0],
7524 NewOps[1], Q, MaxRecurse);
7525 case Instruction::FCmp:
7526 return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7527 NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
7528 case Instruction::Select:
7529 return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7530 case Instruction::GetElementPtr: {
7531 auto *GEPI = cast<GetElementPtrInst>(I);
7532 return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7533 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7534 MaxRecurse);
7535 }
7536 case Instruction::InsertValue: {
7538 return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
7539 MaxRecurse);
7540 }
7541 case Instruction::InsertElement:
7542 return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7543 case Instruction::ExtractValue: {
7544 auto *EVI = cast<ExtractValueInst>(I);
7545 return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7546 MaxRecurse);
7547 }
7548 case Instruction::ExtractElement:
7549 return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7550 case Instruction::ShuffleVector: {
7551 auto *SVI = cast<ShuffleVectorInst>(I);
7552 return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
7553 SVI->getShuffleMask(), SVI->getType(), Q,
7554 MaxRecurse);
7555 }
7556 case Instruction::PHI:
7557 return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
7558 case Instruction::Call:
7559 return simplifyCall(
7560 cast<CallInst>(I), NewOps.back(),
7561 NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7562 case Instruction::Freeze:
7563 return llvm::simplifyFreezeInst(NewOps[0], Q);
7564#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7565#include "llvm/IR/Instruction.def"
7566#undef HANDLE_CAST_INST
7567 return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
7568 MaxRecurse);
7569 case Instruction::Alloca:
7570 // No simplifications for Alloca and it can't be constant folded.
7571 return nullptr;
7572 case Instruction::Load:
7573 return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7574 }
7575}
7576
7578 ArrayRef<Value *> NewOps,
7579 const SimplifyQuery &SQ) {
7580 assert(NewOps.size() == I->getNumOperands() &&
7581 "Number of operands should match the instruction!");
7582 return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
7583}
7584
7586 SmallVector<Value *, 8> Ops(I->operands());
7588
7589 /// If called on unreachable code, the instruction may simplify to itself.
7590 /// Make life easier for users by detecting that case here, and returning a
7591 /// safe value instead.
7592 return Result == I ? PoisonValue::get(I->getType()) : Result;
7593}
7594
7595/// Implementation of recursive simplification through an instruction's
7596/// uses.
7597///
7598/// This is the common implementation of the recursive simplification routines.
7599/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7600/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7601/// instructions to process and attempt to simplify it using
7602/// InstructionSimplify. Recursively visited users which could not be
7603/// simplified themselves are to the optional UnsimplifiedUsers set for
7604/// further processing by the caller.
7605///
7606/// This routine returns 'true' only when *it* simplifies something. The passed
7607/// in simplified value does not count toward this.
7609 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7610 const DominatorTree *DT, AssumptionCache *AC,
7611 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7612 bool Simplified = false;
7614 const DataLayout &DL = I->getDataLayout();
7615
7616 // If we have an explicit value to collapse to, do that round of the
7617 // simplification loop by hand initially.
7618 if (SimpleV) {
7619 for (User *U : I->users())
7620 if (U != I)
7621 Worklist.insert(cast<Instruction>(U));
7622
7623 // Replace the instruction with its simplified value.
7624 I->replaceAllUsesWith(SimpleV);
7625
7626 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7627 I->eraseFromParent();
7628 } else {
7629 Worklist.insert(I);
7630 }
7631
7632 // Note that we must test the size on each iteration, the worklist can grow.
7633 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7634 I = Worklist[Idx];
7635
7636 // See if this instruction simplifies.
7637 SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7638 if (!SimpleV) {
7639 if (UnsimplifiedUsers)
7640 UnsimplifiedUsers->insert(I);
7641 continue;
7642 }
7643
7644 Simplified = true;
7645
7646 // Stash away all the uses of the old instruction so we can check them for
7647 // recursive simplifications after a RAUW. This is cheaper than checking all
7648 // uses of To on the recursive step in most cases.
7649 for (User *U : I->users())
7650 Worklist.insert(cast<Instruction>(U));
7651
7652 // Replace the instruction with its simplified value.
7653 I->replaceAllUsesWith(SimpleV);
7654
7655 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7656 I->eraseFromParent();
7657 }
7658 return Simplified;
7659}
7660
7662 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7663 const DominatorTree *DT, AssumptionCache *AC,
7664 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7665 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7666 assert(SimpleV && "Must provide a simplified value.");
7667 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7668 UnsimplifiedUsers);
7669}
7670
7671namespace llvm {
7673 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7674 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7675 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7676 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7677 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7678 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7679 return {F.getDataLayout(), TLI, DT, AC};
7680}
7681
7683 const DataLayout &DL) {
7684 return {DL, &AR.TLI, &AR.DT, &AR.AC};
7685}
7686
7687template <class T, class... TArgs>
7689 Function &F) {
7690 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7691 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7692 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7693 return {F.getDataLayout(), TLI, DT, AC};
7694}
7696 Function &);
7697
7699 if (!CanUseUndef)
7700 return false;
7701
7702 return match(V, m_Undef());
7703}
7704
7705} // namespace llvm
7706
7707void InstSimplifyFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
IRTranslator LLVM IR MI
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, const SimplifyQuery &Q, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
@ RecursionLimit
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyWithOpsReplaced(Value *V, ArrayRef< std::pair< Value *, Value * > > Ops, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyAndOrOfFCmpsWithConstants(FCmpInst *Cmp0, FCmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifySelectWithEquivalence(ArrayRef< std::pair< Value *, Value * > > Replacements, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifySelectWithBitTest(Value *CondVal, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not.
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
bool isSelectWithIdenticalPHI(PHINode &PN, PHINode &IdenticalPN)
Look for the following pattern and simplify to_fold to identicalPhi.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V)
Compute the base pointer and cumulative constant offsets for V.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static MinMaxOptResult OptimizeConstMinMax(const Constant *RHSConst, const Intrinsic::ID IID, const CallBase *Call, Constant **OutNewConstVal)
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Value * simplifySVEIntReduction(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1)
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNegative() const
Definition APFloat.h:1431
APFloat makeQuiet() const
Assuming this is an IEEE-754 NaN value, quiet its signaling bit.
Definition APFloat.h:1298
bool isNaN() const
Definition APFloat.h:1429
bool isSignaling() const
Definition APFloat.h:1433
bool isLargest() const
Definition APFloat.h:1447
bool isInfinity() const
Definition APFloat.h:1428
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1033
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1513
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1666
void setSignBit()
Set the sign bit to 1.
Definition APInt.h:1341
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition APInt.h:1250
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1167
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition APInt.h:362
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
bool getBoolValue() const
Convert APInt to a boolean value.
Definition APInt.h:472
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1736
bool isMask(unsigned numBits) const
Definition APInt.h:489
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1258
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
Definition APInt.h:342
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1222
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
Definition Constants.h:1397
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1284
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
Definition Constants.h:320
static LLVM_ABI Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
LLVM_ABI bool isMaxSignedValue() const
Return true if the value is the largest signed value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
LLVM_ABI bool isMinSignedValue() const
Return true if the value is the smallest signed value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
unsigned getAddressSizeInBits(unsigned AS) const
The size in bits of an address in for the given AS.
Definition DataLayout.h:507
IntegerType * getAddressType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of an address in AddressSpace.
Definition DataLayout.h:670
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
Definition DataLayout.h:498
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:771
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
DominatorTree & getDomTree()
Definition Dominators.h:329
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
bool noSignedZeros() const
Definition FMF.h:67
bool noInfs() const
Definition FMF.h:66
bool allowReassoc() const
Flag queries.
Definition FMF.h:64
bool noNaNs() const
Definition FMF.h:65
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1078
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getTrueValue() const
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Value * getOperand(unsigned i) const
Definition User.h:233
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ExceptionBehavior
Exception behavior used for floating point operations.
Definition FPEnv.h:39
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition FPEnv.h:40
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:532
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Definition FPEnv.h:68
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
Definition FPEnv.h:80
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
LLVM_ABI Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
LLVM_ABI Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Definition VE.h:377
LLVM_ABI Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2016
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
LLVM_ABI bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
LLVM_ABI Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
LLVM_ABI Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
LLVM_ABI Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition Loads.cpp:865
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
std::optional< DecomposedBitTest > decomposeBitTest(Value *Cond, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
LLVM_ABI Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
@ NearestTiesToEven
roundTiesToEven.
@ TowardNegative
roundTowardNegative.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Definition VE.h:376
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
LLVM_ABI Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2156
LLVM_ABI Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
Attempt to constant fold an insertvalue instruction with the specified operands and indices.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
@ Continue
Definition DWP.h:22
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
std::pair< Value *, FPClassTest > fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Definition FPEnv.h:86
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
LLVM_ABI Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
LLVM_ABI Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This callback is used in conjunction with PointerMayBeCaptured.
virtual Action captured(const Use *U, UseCaptureInfo CI)=0
Use U directly captures CI.UseCC and additionally CI.ResultCC through the return value of the user of...
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:80
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:242
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:274
bool hasConflict() const
Returns true if there is conflicting information.
Definition KnownBits.h:51
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition KnownBits.h:296
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:145
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:129
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
@ Min
Evaluate all branches of an unknown condition.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
const Instruction * CxtI
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
LLVM_ABI bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
AssumptionCache * AC
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const
const InstrInfoQuery IIQ
Capture information for a specific Use.