LLVM 20.0.0git
InstructionSimplify.cpp
Go to the documentation of this file.
1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements routines for folding instructions into simpler forms
10// that do not require creating new instructions. This does constant folding
11// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14// simplified: This is usually true and assuming it simplifies the logic (if
15// they have not been simplified then results are correct but maybe suboptimal).
16//
17//===----------------------------------------------------------------------===//
18
20
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Operator.h"
43#include "llvm/IR/Statepoint.h"
45#include <algorithm>
46#include <optional>
47using namespace llvm;
48using namespace llvm::PatternMatch;
49
50#define DEBUG_TYPE "instsimplify"
51
52enum { RecursionLimit = 3 };
53
54STATISTIC(NumExpand, "Number of expansions");
55STATISTIC(NumReassoc, "Number of reassociations");
56
57static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
58 unsigned);
59static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
60static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
61 const SimplifyQuery &, unsigned);
62static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
63 unsigned);
64static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
65 const SimplifyQuery &, unsigned);
67 const SimplifyQuery &, unsigned);
68static Value *simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
69 const SimplifyQuery &Q, unsigned MaxRecurse);
70static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
71static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
72 unsigned);
73static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
74 unsigned);
76 GEPNoWrapFlags, const SimplifyQuery &, unsigned);
78 const SimplifyQuery &, unsigned);
80 ArrayRef<Value *> NewOps,
81 const SimplifyQuery &SQ,
82 unsigned MaxRecurse);
83
84/// For a boolean type or a vector of boolean type, return false or a vector
85/// with every element false.
86static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
87
88/// For a boolean type or a vector of boolean type, return true or a vector
89/// with every element true.
90static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
91
92/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
93static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS) {
94 CmpInst *Cmp = dyn_cast<CmpInst>(V);
95 if (!Cmp)
96 return false;
97 CmpInst::Predicate CPred = Cmp->getPredicate();
98 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
99 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
100 return true;
101 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
102 CRHS == LHS;
103}
104
105/// Simplify comparison with true or false branch of select:
106/// %sel = select i1 %cond, i32 %tv, i32 %fv
107/// %cmp = icmp sle i32 %sel, %rhs
108/// Compose new comparison by substituting %sel with either %tv or %fv
109/// and see if it simplifies.
111 Value *Cond, const SimplifyQuery &Q,
112 unsigned MaxRecurse, Constant *TrueOrFalse) {
113 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
114 if (SimplifiedCmp == Cond) {
115 // %cmp simplified to the select condition (%cond).
116 return TrueOrFalse;
117 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
118 // It didn't simplify. However, if composed comparison is equivalent
119 // to the select condition (%cond) then we can replace it.
120 return TrueOrFalse;
121 }
122 return SimplifiedCmp;
123}
124
125/// Simplify comparison with true branch of select
127 Value *Cond, const SimplifyQuery &Q,
128 unsigned MaxRecurse) {
129 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
130 getTrue(Cond->getType()));
131}
132
133/// Simplify comparison with false branch of select
135 Value *Cond, const SimplifyQuery &Q,
136 unsigned MaxRecurse) {
137 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
138 getFalse(Cond->getType()));
139}
140
141/// We know comparison with both branches of select can be simplified, but they
142/// are not equal. This routine handles some logical simplifications.
144 Value *Cond,
145 const SimplifyQuery &Q,
146 unsigned MaxRecurse) {
147 // If the false value simplified to false, then the result of the compare
148 // is equal to "Cond && TCmp". This also catches the case when the false
149 // value simplified to false and the true value to true, returning "Cond".
150 // Folding select to and/or isn't poison-safe in general; impliesPoison
151 // checks whether folding it does not convert a well-defined value into
152 // poison.
153 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
154 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
155 return V;
156 // If the true value simplified to true, then the result of the compare
157 // is equal to "Cond || FCmp".
158 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
159 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
160 return V;
161 // Finally, if the false value simplified to true and the true value to
162 // false, then the result of the compare is equal to "!Cond".
163 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
164 if (Value *V = simplifyXorInst(
165 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
166 return V;
167 return nullptr;
168}
169
170/// Does the given value dominate the specified phi node?
171static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
172 Instruction *I = dyn_cast<Instruction>(V);
173 if (!I)
174 // Arguments and constants dominate all instructions.
175 return true;
176
177 // If we have a DominatorTree then do a precise test.
178 if (DT)
179 return DT->dominates(I, P);
180
181 // Otherwise, if the instruction is in the entry block and is not an invoke,
182 // then it obviously dominates all phi nodes.
183 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
184 !isa<CallBrInst>(I))
185 return true;
186
187 return false;
188}
189
190/// Try to simplify a binary operator of form "V op OtherOp" where V is
191/// "(B0 opex B1)" by distributing 'op' across 'opex' as
192/// "(B0 op OtherOp) opex (B1 op OtherOp)".
194 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
195 const SimplifyQuery &Q, unsigned MaxRecurse) {
196 auto *B = dyn_cast<BinaryOperator>(V);
197 if (!B || B->getOpcode() != OpcodeToExpand)
198 return nullptr;
199 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
200 Value *L =
201 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
202 if (!L)
203 return nullptr;
204 Value *R =
205 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
206 if (!R)
207 return nullptr;
208
209 // Does the expanded pair of binops simplify to the existing binop?
210 if ((L == B0 && R == B1) ||
211 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
212 ++NumExpand;
213 return B;
214 }
215
216 // Otherwise, return "L op' R" if it simplifies.
217 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
218 if (!S)
219 return nullptr;
220
221 ++NumExpand;
222 return S;
223}
224
225/// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
226/// distributing op over op'.
228 Value *R,
229 Instruction::BinaryOps OpcodeToExpand,
230 const SimplifyQuery &Q,
231 unsigned MaxRecurse) {
232 // Recursion is always used, so bail out at once if we already hit the limit.
233 if (!MaxRecurse--)
234 return nullptr;
235
236 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
237 return V;
238 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
239 return V;
240 return nullptr;
241}
242
243/// Generic simplifications for associative binary operations.
244/// Returns the simpler value, or null if none was found.
246 Value *LHS, Value *RHS,
247 const SimplifyQuery &Q,
248 unsigned MaxRecurse) {
249 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
250
251 // Recursion is always used, so bail out at once if we already hit the limit.
252 if (!MaxRecurse--)
253 return nullptr;
254
255 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
256 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
257
258 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
259 if (Op0 && Op0->getOpcode() == Opcode) {
260 Value *A = Op0->getOperand(0);
261 Value *B = Op0->getOperand(1);
262 Value *C = RHS;
263
264 // Does "B op C" simplify?
265 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
266 // It does! Return "A op V" if it simplifies or is already available.
267 // If V equals B then "A op V" is just the LHS.
268 if (V == B)
269 return LHS;
270 // Otherwise return "A op V" if it simplifies.
271 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
272 ++NumReassoc;
273 return W;
274 }
275 }
276 }
277
278 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
279 if (Op1 && Op1->getOpcode() == Opcode) {
280 Value *A = LHS;
281 Value *B = Op1->getOperand(0);
282 Value *C = Op1->getOperand(1);
283
284 // Does "A op B" simplify?
285 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
286 // It does! Return "V op C" if it simplifies or is already available.
287 // If V equals B then "V op C" is just the RHS.
288 if (V == B)
289 return RHS;
290 // Otherwise return "V op C" if it simplifies.
291 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
292 ++NumReassoc;
293 return W;
294 }
295 }
296 }
297
298 // The remaining transforms require commutativity as well as associativity.
299 if (!Instruction::isCommutative(Opcode))
300 return nullptr;
301
302 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
303 if (Op0 && Op0->getOpcode() == Opcode) {
304 Value *A = Op0->getOperand(0);
305 Value *B = Op0->getOperand(1);
306 Value *C = RHS;
307
308 // Does "C op A" simplify?
309 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
310 // It does! Return "V op B" if it simplifies or is already available.
311 // If V equals A then "V op B" is just the LHS.
312 if (V == A)
313 return LHS;
314 // Otherwise return "V op B" if it simplifies.
315 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
316 ++NumReassoc;
317 return W;
318 }
319 }
320 }
321
322 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
323 if (Op1 && Op1->getOpcode() == Opcode) {
324 Value *A = LHS;
325 Value *B = Op1->getOperand(0);
326 Value *C = Op1->getOperand(1);
327
328 // Does "C op A" simplify?
329 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
330 // It does! Return "B op V" if it simplifies or is already available.
331 // If V equals C then "B op V" is just the RHS.
332 if (V == C)
333 return RHS;
334 // Otherwise return "B op V" if it simplifies.
335 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
336 ++NumReassoc;
337 return W;
338 }
339 }
340 }
341
342 return nullptr;
343}
344
345/// In the case of a binary operation with a select instruction as an operand,
346/// try to simplify the binop by seeing whether evaluating it on both branches
347/// of the select results in the same value. Returns the common value if so,
348/// otherwise returns null.
350 Value *RHS, const SimplifyQuery &Q,
351 unsigned MaxRecurse) {
352 // Recursion is always used, so bail out at once if we already hit the limit.
353 if (!MaxRecurse--)
354 return nullptr;
355
356 SelectInst *SI;
357 if (isa<SelectInst>(LHS)) {
358 SI = cast<SelectInst>(LHS);
359 } else {
360 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
361 SI = cast<SelectInst>(RHS);
362 }
363
364 // Evaluate the BinOp on the true and false branches of the select.
365 Value *TV;
366 Value *FV;
367 if (SI == LHS) {
368 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
369 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
370 } else {
371 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
372 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
373 }
374
375 // If they simplified to the same value, then return the common value.
376 // If they both failed to simplify then return null.
377 if (TV == FV)
378 return TV;
379
380 // If one branch simplified to undef, return the other one.
381 if (TV && Q.isUndefValue(TV))
382 return FV;
383 if (FV && Q.isUndefValue(FV))
384 return TV;
385
386 // If applying the operation did not change the true and false select values,
387 // then the result of the binop is the select itself.
388 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
389 return SI;
390
391 // If one branch simplified and the other did not, and the simplified
392 // value is equal to the unsimplified one, return the simplified value.
393 // For example, select (cond, X, X & Z) & Z -> X & Z.
394 if ((FV && !TV) || (TV && !FV)) {
395 // Check that the simplified value has the form "X op Y" where "op" is the
396 // same as the original operation.
397 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
398 if (Simplified && Simplified->getOpcode() == unsigned(Opcode) &&
399 !Simplified->hasPoisonGeneratingFlags()) {
400 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
401 // We already know that "op" is the same as for the simplified value. See
402 // if the operands match too. If so, return the simplified value.
403 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
404 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
405 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
406 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
407 Simplified->getOperand(1) == UnsimplifiedRHS)
408 return Simplified;
409 if (Simplified->isCommutative() &&
410 Simplified->getOperand(1) == UnsimplifiedLHS &&
411 Simplified->getOperand(0) == UnsimplifiedRHS)
412 return Simplified;
413 }
414 }
415
416 return nullptr;
417}
418
419/// In the case of a comparison with a select instruction, try to simplify the
420/// comparison by seeing whether both branches of the select result in the same
421/// value. Returns the common value if so, otherwise returns null.
422/// For example, if we have:
423/// %tmp = select i1 %cmp, i32 1, i32 2
424/// %cmp1 = icmp sle i32 %tmp, 3
425/// We can simplify %cmp1 to true, because both branches of select are
426/// less than 3. We compose new comparison by substituting %tmp with both
427/// branches of select and see if it can be simplified.
429 const SimplifyQuery &Q, unsigned MaxRecurse) {
430 // Recursion is always used, so bail out at once if we already hit the limit.
431 if (!MaxRecurse--)
432 return nullptr;
433
434 // Make sure the select is on the LHS.
435 if (!isa<SelectInst>(LHS)) {
436 std::swap(LHS, RHS);
437 Pred = CmpInst::getSwappedPredicate(Pred);
438 }
439 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
440 SelectInst *SI = cast<SelectInst>(LHS);
441 Value *Cond = SI->getCondition();
442 Value *TV = SI->getTrueValue();
443 Value *FV = SI->getFalseValue();
444
445 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
446 // Does "cmp TV, RHS" simplify?
447 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
448 if (!TCmp)
449 return nullptr;
450
451 // Does "cmp FV, RHS" simplify?
452 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
453 if (!FCmp)
454 return nullptr;
455
456 // If both sides simplified to the same value, then use it as the result of
457 // the original comparison.
458 if (TCmp == FCmp)
459 return TCmp;
460
461 // The remaining cases only make sense if the select condition has the same
462 // type as the result of the comparison, so bail out if this is not so.
463 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
464 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
465
466 return nullptr;
467}
468
469/// In the case of a binary operation with an operand that is a PHI instruction,
470/// try to simplify the binop by seeing whether evaluating it on the incoming
471/// phi values yields the same result for every value. If so returns the common
472/// value, otherwise returns null.
474 Value *RHS, const SimplifyQuery &Q,
475 unsigned MaxRecurse) {
476 // Recursion is always used, so bail out at once if we already hit the limit.
477 if (!MaxRecurse--)
478 return nullptr;
479
480 PHINode *PI;
481 if (isa<PHINode>(LHS)) {
482 PI = cast<PHINode>(LHS);
483 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
484 if (!valueDominatesPHI(RHS, PI, Q.DT))
485 return nullptr;
486 } else {
487 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
488 PI = cast<PHINode>(RHS);
489 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
490 if (!valueDominatesPHI(LHS, PI, Q.DT))
491 return nullptr;
492 }
493
494 // Evaluate the BinOp on the incoming phi values.
495 Value *CommonValue = nullptr;
496 for (Use &Incoming : PI->incoming_values()) {
497 // If the incoming value is the phi node itself, it can safely be skipped.
498 if (Incoming == PI)
499 continue;
501 Value *V = PI == LHS
502 ? simplifyBinOp(Opcode, Incoming, RHS,
503 Q.getWithInstruction(InTI), MaxRecurse)
504 : simplifyBinOp(Opcode, LHS, Incoming,
505 Q.getWithInstruction(InTI), MaxRecurse);
506 // If the operation failed to simplify, or simplified to a different value
507 // to previously, then give up.
508 if (!V || (CommonValue && V != CommonValue))
509 return nullptr;
510 CommonValue = V;
511 }
512
513 return CommonValue;
514}
515
516/// In the case of a comparison with a PHI instruction, try to simplify the
517/// comparison by seeing whether comparing with all of the incoming phi values
518/// yields the same result every time. If so returns the common result,
519/// otherwise returns null.
521 const SimplifyQuery &Q, unsigned MaxRecurse) {
522 // Recursion is always used, so bail out at once if we already hit the limit.
523 if (!MaxRecurse--)
524 return nullptr;
525
526 // Make sure the phi is on the LHS.
527 if (!isa<PHINode>(LHS)) {
528 std::swap(LHS, RHS);
529 Pred = CmpInst::getSwappedPredicate(Pred);
530 }
531 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
532 PHINode *PI = cast<PHINode>(LHS);
533
534 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
535 if (!valueDominatesPHI(RHS, PI, Q.DT))
536 return nullptr;
537
538 // Evaluate the BinOp on the incoming phi values.
539 Value *CommonValue = nullptr;
540 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
543 // If the incoming value is the phi node itself, it can safely be skipped.
544 if (Incoming == PI)
545 continue;
546 // Change the context instruction to the "edge" that flows into the phi.
547 // This is important because that is where incoming is actually "evaluated"
548 // even though it is used later somewhere else.
550 MaxRecurse);
551 // If the operation failed to simplify, or simplified to a different value
552 // to previously, then give up.
553 if (!V || (CommonValue && V != CommonValue))
554 return nullptr;
555 CommonValue = V;
556 }
557
558 return CommonValue;
559}
560
562 Value *&Op0, Value *&Op1,
563 const SimplifyQuery &Q) {
564 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
565 if (auto *CRHS = dyn_cast<Constant>(Op1)) {
566 switch (Opcode) {
567 default:
568 break;
569 case Instruction::FAdd:
570 case Instruction::FSub:
571 case Instruction::FMul:
572 case Instruction::FDiv:
573 case Instruction::FRem:
574 if (Q.CxtI != nullptr)
575 return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
576 }
577 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
578 }
579
580 // Canonicalize the constant to the RHS if this is a commutative operation.
581 if (Instruction::isCommutative(Opcode))
582 std::swap(Op0, Op1);
583 }
584 return nullptr;
585}
586
587/// Given operands for an Add, see if we can fold the result.
588/// If not, this returns null.
589static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
590 const SimplifyQuery &Q, unsigned MaxRecurse) {
591 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
592 return C;
593
594 // X + poison -> poison
595 if (isa<PoisonValue>(Op1))
596 return Op1;
597
598 // X + undef -> undef
599 if (Q.isUndefValue(Op1))
600 return Op1;
601
602 // X + 0 -> X
603 if (match(Op1, m_Zero()))
604 return Op0;
605
606 // If two operands are negative, return 0.
607 if (isKnownNegation(Op0, Op1))
608 return Constant::getNullValue(Op0->getType());
609
610 // X + (Y - X) -> Y
611 // (Y - X) + X -> Y
612 // Eg: X + -X -> 0
613 Value *Y = nullptr;
614 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
615 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
616 return Y;
617
618 // X + ~X -> -1 since ~X = -X-1
619 Type *Ty = Op0->getType();
620 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
621 return Constant::getAllOnesValue(Ty);
622
623 // add nsw/nuw (xor Y, signmask), signmask --> Y
624 // The no-wrapping add guarantees that the top bit will be set by the add.
625 // Therefore, the xor must be clearing the already set sign bit of Y.
626 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
627 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
628 return Y;
629
630 // add nuw %x, -1 -> -1, because %x can only be 0.
631 if (IsNUW && match(Op1, m_AllOnes()))
632 return Op1; // Which is -1.
633
634 /// i1 add -> xor.
635 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
636 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
637 return V;
638
639 // Try some generic simplifications for associative operations.
640 if (Value *V =
641 simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
642 return V;
643
644 // Threading Add over selects and phi nodes is pointless, so don't bother.
645 // Threading over the select in "A + select(cond, B, C)" means evaluating
646 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
647 // only if B and C are equal. If B and C are equal then (since we assume
648 // that operands have already been simplified) "select(cond, B, C)" should
649 // have been simplified to the common value of B and C already. Analysing
650 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
651 // for threading over phi nodes.
652
653 return nullptr;
654}
655
656Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
657 const SimplifyQuery &Query) {
658 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
659}
660
661/// Compute the base pointer and cumulative constant offsets for V.
662///
663/// This strips all constant offsets off of V, leaving it the base pointer, and
664/// accumulates the total constant offset applied in the returned constant.
665/// It returns zero if there are no constant offsets applied.
666///
667/// This is very similar to stripAndAccumulateConstantOffsets(), except it
668/// normalizes the offset bitwidth to the stripped pointer type, not the
669/// original pointer type.
671 bool AllowNonInbounds = false) {
672 assert(V->getType()->isPtrOrPtrVectorTy());
673
674 APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
675 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
676 // As that strip may trace through `addrspacecast`, need to sext or trunc
677 // the offset calculated.
678 return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
679}
680
681/// Compute the constant difference between two pointer values.
682/// If the difference is not a constant, returns zero.
684 Value *RHS) {
687
688 // If LHS and RHS are not related via constant offsets to the same base
689 // value, there is nothing we can do here.
690 if (LHS != RHS)
691 return nullptr;
692
693 // Otherwise, the difference of LHS - RHS can be computed as:
694 // LHS - RHS
695 // = (LHSOffset + Base) - (RHSOffset + Base)
696 // = LHSOffset - RHSOffset
697 Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
698 if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
699 Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
700 return Res;
701}
702
703/// Test if there is a dominating equivalence condition for the
704/// two operands. If there is, try to reduce the binary operation
705/// between the two operands.
706/// Example: Op0 - Op1 --> 0 when Op0 == Op1
707static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
708 const SimplifyQuery &Q, unsigned MaxRecurse) {
709 // Recursive run it can not get any benefit
710 if (MaxRecurse != RecursionLimit)
711 return nullptr;
712
713 std::optional<bool> Imp =
715 if (Imp && *Imp) {
716 Type *Ty = Op0->getType();
717 switch (Opcode) {
718 case Instruction::Sub:
719 case Instruction::Xor:
720 case Instruction::URem:
721 case Instruction::SRem:
722 return Constant::getNullValue(Ty);
723
724 case Instruction::SDiv:
725 case Instruction::UDiv:
726 return ConstantInt::get(Ty, 1);
727
728 case Instruction::And:
729 case Instruction::Or:
730 // Could be either one - choose Op1 since that's more likely a constant.
731 return Op1;
732 default:
733 break;
734 }
735 }
736 return nullptr;
737}
738
739/// Given operands for a Sub, see if we can fold the result.
740/// If not, this returns null.
741static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
742 const SimplifyQuery &Q, unsigned MaxRecurse) {
743 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
744 return C;
745
746 // X - poison -> poison
747 // poison - X -> poison
748 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
749 return PoisonValue::get(Op0->getType());
750
751 // X - undef -> undef
752 // undef - X -> undef
753 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
754 return UndefValue::get(Op0->getType());
755
756 // X - 0 -> X
757 if (match(Op1, m_Zero()))
758 return Op0;
759
760 // X - X -> 0
761 if (Op0 == Op1)
762 return Constant::getNullValue(Op0->getType());
763
764 // Is this a negation?
765 if (match(Op0, m_Zero())) {
766 // 0 - X -> 0 if the sub is NUW.
767 if (IsNUW)
768 return Constant::getNullValue(Op0->getType());
769
770 KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
771 if (Known.Zero.isMaxSignedValue()) {
772 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
773 // Op1 must be 0 because negating the minimum signed value is undefined.
774 if (IsNSW)
775 return Constant::getNullValue(Op0->getType());
776
777 // 0 - X -> X if X is 0 or the minimum signed value.
778 return Op1;
779 }
780 }
781
782 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
783 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
784 Value *X = nullptr, *Y = nullptr, *Z = Op1;
785 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
786 // See if "V === Y - Z" simplifies.
787 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
788 // It does! Now see if "X + V" simplifies.
789 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
790 // It does, we successfully reassociated!
791 ++NumReassoc;
792 return W;
793 }
794 // See if "V === X - Z" simplifies.
795 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
796 // It does! Now see if "Y + V" simplifies.
797 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
798 // It does, we successfully reassociated!
799 ++NumReassoc;
800 return W;
801 }
802 }
803
804 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
805 // For example, X - (X + 1) -> -1
806 X = Op0;
807 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
808 // See if "V === X - Y" simplifies.
809 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
810 // It does! Now see if "V - Z" simplifies.
811 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
812 // It does, we successfully reassociated!
813 ++NumReassoc;
814 return W;
815 }
816 // See if "V === X - Z" simplifies.
817 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
818 // It does! Now see if "V - Y" simplifies.
819 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
820 // It does, we successfully reassociated!
821 ++NumReassoc;
822 return W;
823 }
824 }
825
826 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
827 // For example, X - (X - Y) -> Y.
828 Z = Op0;
829 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
830 // See if "V === Z - X" simplifies.
831 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
832 // It does! Now see if "V + Y" simplifies.
833 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
834 // It does, we successfully reassociated!
835 ++NumReassoc;
836 return W;
837 }
838
839 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
840 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
841 match(Op1, m_Trunc(m_Value(Y))))
842 if (X->getType() == Y->getType())
843 // See if "V === X - Y" simplifies.
844 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
845 // It does! Now see if "trunc V" simplifies.
846 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
847 Q, MaxRecurse - 1))
848 // It does, return the simplified "trunc V".
849 return W;
850
851 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
852 if (match(Op0, m_PtrToInt(m_Value(X))) && match(Op1, m_PtrToInt(m_Value(Y))))
853 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
854 return ConstantFoldIntegerCast(Result, Op0->getType(), /*IsSigned*/ true,
855 Q.DL);
856
857 // i1 sub -> xor.
858 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
859 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
860 return V;
861
862 // Threading Sub over selects and phi nodes is pointless, so don't bother.
863 // Threading over the select in "A - select(cond, B, C)" means evaluating
864 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
865 // only if B and C are equal. If B and C are equal then (since we assume
866 // that operands have already been simplified) "select(cond, B, C)" should
867 // have been simplified to the common value of B and C already. Analysing
868 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
869 // for threading over phi nodes.
870
871 if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
872 return V;
873
874 return nullptr;
875}
876
877Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
878 const SimplifyQuery &Q) {
879 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
880}
881
882/// Given operands for a Mul, see if we can fold the result.
883/// If not, this returns null.
884static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
885 const SimplifyQuery &Q, unsigned MaxRecurse) {
886 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
887 return C;
888
889 // X * poison -> poison
890 if (isa<PoisonValue>(Op1))
891 return Op1;
892
893 // X * undef -> 0
894 // X * 0 -> 0
895 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
896 return Constant::getNullValue(Op0->getType());
897
898 // X * 1 -> X
899 if (match(Op1, m_One()))
900 return Op0;
901
902 // (X / Y) * Y -> X if the division is exact.
903 Value *X = nullptr;
904 if (Q.IIQ.UseInstrInfo &&
905 (match(Op0,
906 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
907 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
908 return X;
909
910 if (Op0->getType()->isIntOrIntVectorTy(1)) {
911 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
912 // representable). All other cases reduce to 0, so just return 0.
913 if (IsNSW)
914 return ConstantInt::getNullValue(Op0->getType());
915
916 // Treat "mul i1" as "and i1".
917 if (MaxRecurse)
918 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
919 return V;
920 }
921
922 // Try some generic simplifications for associative operations.
923 if (Value *V =
924 simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
925 return V;
926
927 // Mul distributes over Add. Try some generic simplifications based on this.
928 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
929 Instruction::Add, Q, MaxRecurse))
930 return V;
931
932 // If the operation is with the result of a select instruction, check whether
933 // operating on either branch of the select always yields the same value.
934 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
935 if (Value *V =
936 threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
937 return V;
938
939 // If the operation is with the result of a phi instruction, check whether
940 // operating on all incoming values of the phi always yields the same value.
941 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
942 if (Value *V =
943 threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
944 return V;
945
946 return nullptr;
947}
948
949Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
950 const SimplifyQuery &Q) {
951 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
952}
953
954/// Given a predicate and two operands, return true if the comparison is true.
955/// This is a helper for div/rem simplification where we return some other value
956/// when we can prove a relationship between the operands.
957static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS,
958 const SimplifyQuery &Q, unsigned MaxRecurse) {
959 Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
960 Constant *C = dyn_cast_or_null<Constant>(V);
961 return (C && C->isAllOnesValue());
962}
963
964/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
965/// to simplify X % Y to X.
966static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
967 unsigned MaxRecurse, bool IsSigned) {
968 // Recursion is always used, so bail out at once if we already hit the limit.
969 if (!MaxRecurse--)
970 return false;
971
972 if (IsSigned) {
973 // (X srem Y) sdiv Y --> 0
974 if (match(X, m_SRem(m_Value(), m_Specific(Y))))
975 return true;
976
977 // |X| / |Y| --> 0
978 //
979 // We require that 1 operand is a simple constant. That could be extended to
980 // 2 variables if we computed the sign bit for each.
981 //
982 // Make sure that a constant is not the minimum signed value because taking
983 // the abs() of that is undefined.
984 Type *Ty = X->getType();
985 const APInt *C;
986 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
987 // Is the variable divisor magnitude always greater than the constant
988 // dividend magnitude?
989 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
990 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
991 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
992 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
993 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
994 return true;
995 }
996 if (match(Y, m_APInt(C))) {
997 // Special-case: we can't take the abs() of a minimum signed value. If
998 // that's the divisor, then all we have to do is prove that the dividend
999 // is also not the minimum signed value.
1000 if (C->isMinSignedValue())
1001 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1002
1003 // Is the variable dividend magnitude always less than the constant
1004 // divisor magnitude?
1005 // |X| < |C| --> X > -abs(C) and X < abs(C)
1006 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1007 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1008 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1009 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1010 return true;
1011 }
1012 return false;
1013 }
1014
1015 // IsSigned == false.
1016
1017 // Is the unsigned dividend known to be less than a constant divisor?
1018 // TODO: Convert this (and above) to range analysis
1019 // ("computeConstantRangeIncludingKnownBits")?
1020 const APInt *C;
1021 if (match(Y, m_APInt(C)) &&
1022 computeKnownBits(X, /* Depth */ 0, Q).getMaxValue().ult(*C))
1023 return true;
1024
1025 // Try again for any divisor:
1026 // Is the dividend unsigned less than the divisor?
1027 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1028}
1029
1030/// Check for common or similar folds of integer division or integer remainder.
1031/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1033 Value *Op1, const SimplifyQuery &Q,
1034 unsigned MaxRecurse) {
1035 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1036 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1037
1038 Type *Ty = Op0->getType();
1039
1040 // X / undef -> poison
1041 // X % undef -> poison
1042 if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1043 return PoisonValue::get(Ty);
1044
1045 // X / 0 -> poison
1046 // X % 0 -> poison
1047 // We don't need to preserve faults!
1048 if (match(Op1, m_Zero()))
1049 return PoisonValue::get(Ty);
1050
1051 // poison / X -> poison
1052 // poison % X -> poison
1053 if (isa<PoisonValue>(Op0))
1054 return Op0;
1055
1056 // undef / X -> 0
1057 // undef % X -> 0
1058 if (Q.isUndefValue(Op0))
1059 return Constant::getNullValue(Ty);
1060
1061 // 0 / X -> 0
1062 // 0 % X -> 0
1063 if (match(Op0, m_Zero()))
1064 return Constant::getNullValue(Op0->getType());
1065
1066 // X / X -> 1
1067 // X % X -> 0
1068 if (Op0 == Op1)
1069 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1070
1071 KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
1072 // X / 0 -> poison
1073 // X % 0 -> poison
1074 // If the divisor is known to be zero, just return poison. This can happen in
1075 // some cases where its provable indirectly the denominator is zero but it's
1076 // not trivially simplifiable (i.e known zero through a phi node).
1077 if (Known.isZero())
1078 return PoisonValue::get(Ty);
1079
1080 // X / 1 -> X
1081 // X % 1 -> 0
1082 // If the divisor can only be zero or one, we can't have division-by-zero
1083 // or remainder-by-zero, so assume the divisor is 1.
1084 // e.g. 1, zext (i8 X), sdiv X (Y and 1)
1085 if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1086 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1087
1088 // If X * Y does not overflow, then:
1089 // X * Y / Y -> X
1090 // X * Y % Y -> 0
1091 Value *X;
1092 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1093 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1094 // The multiplication can't overflow if it is defined not to, or if
1095 // X == A / Y for some A.
1096 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1097 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1098 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1099 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1100 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1101 }
1102 }
1103
1104 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1105 return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1106
1107 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1108 return V;
1109
1110 // If the operation is with the result of a select instruction, check whether
1111 // operating on either branch of the select always yields the same value.
1112 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1113 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1114 return V;
1115
1116 // If the operation is with the result of a phi instruction, check whether
1117 // operating on all incoming values of the phi always yields the same value.
1118 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1119 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1120 return V;
1121
1122 return nullptr;
1123}
1124
1125/// These are simplifications common to SDiv and UDiv.
1127 bool IsExact, const SimplifyQuery &Q,
1128 unsigned MaxRecurse) {
1129 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1130 return C;
1131
1132 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1133 return V;
1134
1135 const APInt *DivC;
1136 if (IsExact && match(Op1, m_APInt(DivC))) {
1137 // If this is an exact divide by a constant, then the dividend (Op0) must
1138 // have at least as many trailing zeros as the divisor to divide evenly. If
1139 // it has less trailing zeros, then the result must be poison.
1140 if (DivC->countr_zero()) {
1141 KnownBits KnownOp0 = computeKnownBits(Op0, /* Depth */ 0, Q);
1142 if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1143 return PoisonValue::get(Op0->getType());
1144 }
1145
1146 // udiv exact (mul nsw X, C), C --> X
1147 // sdiv exact (mul nuw X, C), C --> X
1148 // where C is not a power of 2.
1149 Value *X;
1150 if (!DivC->isPowerOf2() &&
1151 (Opcode == Instruction::UDiv
1152 ? match(Op0, m_NSWMul(m_Value(X), m_Specific(Op1)))
1153 : match(Op0, m_NUWMul(m_Value(X), m_Specific(Op1)))))
1154 return X;
1155 }
1156
1157 return nullptr;
1158}
1159
1160/// These are simplifications common to SRem and URem.
1162 const SimplifyQuery &Q, unsigned MaxRecurse) {
1163 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1164 return C;
1165
1166 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1167 return V;
1168
1169 // (X << Y) % X -> 0
1170 if (Q.IIQ.UseInstrInfo) {
1171 if ((Opcode == Instruction::SRem &&
1172 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1173 (Opcode == Instruction::URem &&
1174 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
1175 return Constant::getNullValue(Op0->getType());
1176
1177 const APInt *C0;
1178 if (match(Op1, m_APInt(C0))) {
1179 // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
1180 // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
1181 if (Opcode == Instruction::SRem
1182 ? match(Op0,
1183 m_NSWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1184 return C.srem(*C0).isZero();
1185 })))
1186 : match(Op0,
1187 m_NUWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1188 return C.urem(*C0).isZero();
1189 }))))
1190 return Constant::getNullValue(Op0->getType());
1191 }
1192 }
1193 return nullptr;
1194}
1195
1196/// Given operands for an SDiv, see if we can fold the result.
1197/// If not, this returns null.
1198static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1199 const SimplifyQuery &Q, unsigned MaxRecurse) {
1200 // If two operands are negated and no signed overflow, return -1.
1201 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1202 return Constant::getAllOnesValue(Op0->getType());
1203
1204 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1205}
1206
1207Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1208 const SimplifyQuery &Q) {
1209 return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1210}
1211
1212/// Given operands for a UDiv, see if we can fold the result.
1213/// If not, this returns null.
1214static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1215 const SimplifyQuery &Q, unsigned MaxRecurse) {
1216 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1217}
1218
1219Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1220 const SimplifyQuery &Q) {
1221 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1222}
1223
1224/// Given operands for an SRem, see if we can fold the result.
1225/// If not, this returns null.
1226static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1227 unsigned MaxRecurse) {
1228 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1229 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1230 Value *X;
1231 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1232 return ConstantInt::getNullValue(Op0->getType());
1233
1234 // If the two operands are negated, return 0.
1235 if (isKnownNegation(Op0, Op1))
1236 return ConstantInt::getNullValue(Op0->getType());
1237
1238 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1239}
1240
1242 return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1243}
1244
1245/// Given operands for a URem, see if we can fold the result.
1246/// If not, this returns null.
1247static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1248 unsigned MaxRecurse) {
1249 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1250}
1251
1253 return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1254}
1255
1256/// Returns true if a shift by \c Amount always yields poison.
1257static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1258 Constant *C = dyn_cast<Constant>(Amount);
1259 if (!C)
1260 return false;
1261
1262 // X shift by undef -> poison because it may shift by the bitwidth.
1263 if (Q.isUndefValue(C))
1264 return true;
1265
1266 // Shifting by the bitwidth or more is poison. This covers scalars and
1267 // fixed/scalable vectors with splat constants.
1268 const APInt *AmountC;
1269 if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1270 return true;
1271
1272 // Try harder for fixed-length vectors:
1273 // If all lanes of a vector shift are poison, the whole shift is poison.
1274 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1275 for (unsigned I = 0,
1276 E = cast<FixedVectorType>(C->getType())->getNumElements();
1277 I != E; ++I)
1278 if (!isPoisonShift(C->getAggregateElement(I), Q))
1279 return false;
1280 return true;
1281 }
1282
1283 return false;
1284}
1285
1286/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1287/// If not, this returns null.
1289 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1290 unsigned MaxRecurse) {
1291 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1292 return C;
1293
1294 // poison shift by X -> poison
1295 if (isa<PoisonValue>(Op0))
1296 return Op0;
1297
1298 // 0 shift by X -> 0
1299 if (match(Op0, m_Zero()))
1300 return Constant::getNullValue(Op0->getType());
1301
1302 // X shift by 0 -> X
1303 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1304 // would be poison.
1305 Value *X;
1306 if (match(Op1, m_Zero()) ||
1307 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1308 return Op0;
1309
1310 // Fold undefined shifts.
1311 if (isPoisonShift(Op1, Q))
1312 return PoisonValue::get(Op0->getType());
1313
1314 // If the operation is with the result of a select instruction, check whether
1315 // operating on either branch of the select always yields the same value.
1316 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1317 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1318 return V;
1319
1320 // If the operation is with the result of a phi instruction, check whether
1321 // operating on all incoming values of the phi always yields the same value.
1322 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1323 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1324 return V;
1325
1326 // If any bits in the shift amount make that value greater than or equal to
1327 // the number of bits in the type, the shift is undefined.
1328 KnownBits KnownAmt = computeKnownBits(Op1, /* Depth */ 0, Q);
1329 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1330 return PoisonValue::get(Op0->getType());
1331
1332 // If all valid bits in the shift amount are known zero, the first operand is
1333 // unchanged.
1334 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1335 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1336 return Op0;
1337
1338 // Check for nsw shl leading to a poison value.
1339 if (IsNSW) {
1340 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1341 KnownBits KnownVal = computeKnownBits(Op0, /* Depth */ 0, Q);
1342 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1343
1344 if (KnownVal.Zero.isSignBitSet())
1345 KnownShl.Zero.setSignBit();
1346 if (KnownVal.One.isSignBitSet())
1347 KnownShl.One.setSignBit();
1348
1349 if (KnownShl.hasConflict())
1350 return PoisonValue::get(Op0->getType());
1351 }
1352
1353 return nullptr;
1354}
1355
1356/// Given operands for an LShr or AShr, see if we can fold the result. If not,
1357/// this returns null.
1359 Value *Op1, bool IsExact,
1360 const SimplifyQuery &Q, unsigned MaxRecurse) {
1361 if (Value *V =
1362 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1363 return V;
1364
1365 // X >> X -> 0
1366 if (Op0 == Op1)
1367 return Constant::getNullValue(Op0->getType());
1368
1369 // undef >> X -> 0
1370 // undef >> X -> undef (if it's exact)
1371 if (Q.isUndefValue(Op0))
1372 return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1373
1374 // The low bit cannot be shifted out of an exact shift if it is set.
1375 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1376 if (IsExact) {
1377 KnownBits Op0Known = computeKnownBits(Op0, /* Depth */ 0, Q);
1378 if (Op0Known.One[0])
1379 return Op0;
1380 }
1381
1382 return nullptr;
1383}
1384
1385/// Given operands for an Shl, see if we can fold the result.
1386/// If not, this returns null.
1387static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1388 const SimplifyQuery &Q, unsigned MaxRecurse) {
1389 if (Value *V =
1390 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1391 return V;
1392
1393 Type *Ty = Op0->getType();
1394 // undef << X -> 0
1395 // undef << X -> undef if (if it's NSW/NUW)
1396 if (Q.isUndefValue(Op0))
1397 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1398
1399 // (X >> A) << A -> X
1400 Value *X;
1401 if (Q.IIQ.UseInstrInfo &&
1402 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1403 return X;
1404
1405 // shl nuw i8 C, %x -> C iff C has sign bit set.
1406 if (IsNUW && match(Op0, m_Negative()))
1407 return Op0;
1408 // NOTE: could use computeKnownBits() / LazyValueInfo,
1409 // but the cost-benefit analysis suggests it isn't worth it.
1410
1411 // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1412 // that the sign-bit does not change, so the only input that does not
1413 // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1414 if (IsNSW && IsNUW &&
1415 match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1416 return Constant::getNullValue(Ty);
1417
1418 return nullptr;
1419}
1420
1421Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1422 const SimplifyQuery &Q) {
1423 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1424}
1425
1426/// Given operands for an LShr, see if we can fold the result.
1427/// If not, this returns null.
1428static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1429 const SimplifyQuery &Q, unsigned MaxRecurse) {
1430 if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1431 MaxRecurse))
1432 return V;
1433
1434 // (X << A) >> A -> X
1435 Value *X;
1436 if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1437 return X;
1438
1439 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1440 // We can return X as we do in the above case since OR alters no bits in X.
1441 // SimplifyDemandedBits in InstCombine can do more general optimization for
1442 // bit manipulation. This pattern aims to provide opportunities for other
1443 // optimizers by supporting a simple but common case in InstSimplify.
1444 Value *Y;
1445 const APInt *ShRAmt, *ShLAmt;
1446 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
1447 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1448 *ShRAmt == *ShLAmt) {
1449 const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
1450 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1451 if (ShRAmt->uge(EffWidthY))
1452 return X;
1453 }
1454
1455 return nullptr;
1456}
1457
1458Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1459 const SimplifyQuery &Q) {
1460 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1461}
1462
1463/// Given operands for an AShr, see if we can fold the result.
1464/// If not, this returns null.
1465static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1466 const SimplifyQuery &Q, unsigned MaxRecurse) {
1467 if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1468 MaxRecurse))
1469 return V;
1470
1471 // -1 >>a X --> -1
1472 // (-1 << X) a>> X --> -1
1473 // We could return the original -1 constant to preserve poison elements.
1474 if (match(Op0, m_AllOnes()) ||
1475 match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1476 return Constant::getAllOnesValue(Op0->getType());
1477
1478 // (X << A) >> A -> X
1479 Value *X;
1480 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1481 return X;
1482
1483 // Arithmetic shifting an all-sign-bit value is a no-op.
1484 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1485 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1486 return Op0;
1487
1488 return nullptr;
1489}
1490
1491Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1492 const SimplifyQuery &Q) {
1493 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1494}
1495
1496/// Commuted variants are assumed to be handled by calling this function again
1497/// with the parameters swapped.
1499 ICmpInst *UnsignedICmp, bool IsAnd,
1500 const SimplifyQuery &Q) {
1501 Value *X, *Y;
1502
1503 CmpPredicate EqPred;
1504 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1505 !ICmpInst::isEquality(EqPred))
1506 return nullptr;
1507
1508 CmpPredicate UnsignedPred;
1509
1510 Value *A, *B;
1511 // Y = (A - B);
1512 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1513 if (match(UnsignedICmp,
1514 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1515 ICmpInst::isUnsigned(UnsignedPred)) {
1516 // A >=/<= B || (A - B) != 0 <--> true
1517 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1518 UnsignedPred == ICmpInst::ICMP_ULE) &&
1519 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1520 return ConstantInt::getTrue(UnsignedICmp->getType());
1521 // A </> B && (A - B) == 0 <--> false
1522 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1523 UnsignedPred == ICmpInst::ICMP_UGT) &&
1524 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1525 return ConstantInt::getFalse(UnsignedICmp->getType());
1526
1527 // A </> B && (A - B) != 0 <--> A </> B
1528 // A </> B || (A - B) != 0 <--> (A - B) != 0
1529 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1530 UnsignedPred == ICmpInst::ICMP_UGT))
1531 return IsAnd ? UnsignedICmp : ZeroICmp;
1532
1533 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1534 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1535 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1536 UnsignedPred == ICmpInst::ICMP_UGE))
1537 return IsAnd ? ZeroICmp : UnsignedICmp;
1538 }
1539
1540 // Given Y = (A - B)
1541 // Y >= A && Y != 0 --> Y >= A iff B != 0
1542 // Y < A || Y == 0 --> Y < A iff B != 0
1543 if (match(UnsignedICmp,
1544 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1545 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1546 EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, Q))
1547 return UnsignedICmp;
1548 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1549 EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, Q))
1550 return UnsignedICmp;
1551 }
1552 }
1553
1554 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1555 ICmpInst::isUnsigned(UnsignedPred))
1556 ;
1557 else if (match(UnsignedICmp,
1558 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1559 ICmpInst::isUnsigned(UnsignedPred))
1560 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1561 else
1562 return nullptr;
1563
1564 // X > Y && Y == 0 --> Y == 0 iff X != 0
1565 // X > Y || Y == 0 --> X > Y iff X != 0
1566 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1567 isKnownNonZero(X, Q))
1568 return IsAnd ? ZeroICmp : UnsignedICmp;
1569
1570 // X <= Y && Y != 0 --> X <= Y iff X != 0
1571 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1572 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1573 isKnownNonZero(X, Q))
1574 return IsAnd ? UnsignedICmp : ZeroICmp;
1575
1576 // The transforms below here are expected to be handled more generally with
1577 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1578 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1579 // these are candidates for removal.
1580
1581 // X < Y && Y != 0 --> X < Y
1582 // X < Y || Y != 0 --> Y != 0
1583 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1584 return IsAnd ? UnsignedICmp : ZeroICmp;
1585
1586 // X >= Y && Y == 0 --> Y == 0
1587 // X >= Y || Y == 0 --> X >= Y
1588 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1589 return IsAnd ? ZeroICmp : UnsignedICmp;
1590
1591 // X < Y && Y == 0 --> false
1592 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1593 IsAnd)
1594 return getFalse(UnsignedICmp->getType());
1595
1596 // X >= Y || Y != 0 --> true
1597 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1598 !IsAnd)
1599 return getTrue(UnsignedICmp->getType());
1600
1601 return nullptr;
1602}
1603
1604/// Test if a pair of compares with a shared operand and 2 constants has an
1605/// empty set intersection, full set union, or if one compare is a superset of
1606/// the other.
1608 bool IsAnd) {
1609 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1610 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1611 return nullptr;
1612
1613 const APInt *C0, *C1;
1614 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1615 !match(Cmp1->getOperand(1), m_APInt(C1)))
1616 return nullptr;
1617
1618 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1619 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1620
1621 // For and-of-compares, check if the intersection is empty:
1622 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1623 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1624 return getFalse(Cmp0->getType());
1625
1626 // For or-of-compares, check if the union is full:
1627 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1628 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1629 return getTrue(Cmp0->getType());
1630
1631 // Is one range a superset of the other?
1632 // If this is and-of-compares, take the smaller set:
1633 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1634 // If this is or-of-compares, take the larger set:
1635 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1636 if (Range0.contains(Range1))
1637 return IsAnd ? Cmp1 : Cmp0;
1638 if (Range1.contains(Range0))
1639 return IsAnd ? Cmp0 : Cmp1;
1640
1641 return nullptr;
1642}
1643
1645 const InstrInfoQuery &IIQ) {
1646 // (icmp (add V, C0), C1) & (icmp V, C0)
1647 CmpPredicate Pred0, Pred1;
1648 const APInt *C0, *C1;
1649 Value *V;
1650 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1651 return nullptr;
1652
1653 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1654 return nullptr;
1655
1656 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1657 if (AddInst->getOperand(1) != Op1->getOperand(1))
1658 return nullptr;
1659
1660 Type *ITy = Op0->getType();
1661 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1662 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1663
1664 const APInt Delta = *C1 - *C0;
1665 if (C0->isStrictlyPositive()) {
1666 if (Delta == 2) {
1667 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1668 return getFalse(ITy);
1669 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1670 return getFalse(ITy);
1671 }
1672 if (Delta == 1) {
1673 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1674 return getFalse(ITy);
1675 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1676 return getFalse(ITy);
1677 }
1678 }
1679 if (C0->getBoolValue() && IsNUW) {
1680 if (Delta == 2)
1681 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1682 return getFalse(ITy);
1683 if (Delta == 1)
1684 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1685 return getFalse(ITy);
1686 }
1687
1688 return nullptr;
1689}
1690
1691/// Try to simplify and/or of icmp with ctpop intrinsic.
1693 bool IsAnd) {
1694 CmpPredicate Pred0, Pred1;
1695 Value *X;
1696 const APInt *C;
1697 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
1698 m_APInt(C))) ||
1699 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1700 return nullptr;
1701
1702 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1703 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1704 return Cmp1;
1705 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1706 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1707 return Cmp1;
1708
1709 return nullptr;
1710}
1711
1713 const SimplifyQuery &Q) {
1714 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1715 return X;
1716 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1717 return X;
1718
1719 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1720 return X;
1721
1722 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1723 return X;
1724 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1725 return X;
1726
1727 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1728 return X;
1729 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1730 return X;
1731
1732 return nullptr;
1733}
1734
1736 const InstrInfoQuery &IIQ) {
1737 // (icmp (add V, C0), C1) | (icmp V, C0)
1738 CmpPredicate Pred0, Pred1;
1739 const APInt *C0, *C1;
1740 Value *V;
1741 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1742 return nullptr;
1743
1744 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1745 return nullptr;
1746
1747 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1748 if (AddInst->getOperand(1) != Op1->getOperand(1))
1749 return nullptr;
1750
1751 Type *ITy = Op0->getType();
1752 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1753 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1754
1755 const APInt Delta = *C1 - *C0;
1756 if (C0->isStrictlyPositive()) {
1757 if (Delta == 2) {
1758 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1759 return getTrue(ITy);
1760 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1761 return getTrue(ITy);
1762 }
1763 if (Delta == 1) {
1764 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1765 return getTrue(ITy);
1766 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1767 return getTrue(ITy);
1768 }
1769 }
1770 if (C0->getBoolValue() && IsNUW) {
1771 if (Delta == 2)
1772 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1773 return getTrue(ITy);
1774 if (Delta == 1)
1775 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1776 return getTrue(ITy);
1777 }
1778
1779 return nullptr;
1780}
1781
1783 const SimplifyQuery &Q) {
1784 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1785 return X;
1786 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1787 return X;
1788
1789 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1790 return X;
1791
1792 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1793 return X;
1794 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1795 return X;
1796
1797 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1798 return X;
1799 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1800 return X;
1801
1802 return nullptr;
1803}
1804
1806 FCmpInst *RHS, bool IsAnd) {
1807 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1808 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1809 if (LHS0->getType() != RHS0->getType())
1810 return nullptr;
1811
1812 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1813 auto AbsOrSelfLHS0 = m_CombineOr(m_Specific(LHS0), m_FAbs(m_Specific(LHS0)));
1814 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1815 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1816 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1817 // (fcmp ord X, 0) & (fcmp o** X/abs(X), Y) --> fcmp o** X/abs(X), Y
1818 // (fcmp uno X, 0) & (fcmp o** X/abs(X), Y) --> false
1819 // (fcmp uno X, 0) | (fcmp u** X/abs(X), Y) --> fcmp u** X/abs(X), Y
1820 // (fcmp ord X, 0) | (fcmp u** X/abs(X), Y) --> true
1821 if ((match(RHS0, AbsOrSelfLHS0) || match(RHS1, AbsOrSelfLHS0)) &&
1822 match(LHS1, m_PosZeroFP()))
1823 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1824 ? static_cast<Value *>(RHS)
1825 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1826 }
1827
1828 auto AbsOrSelfRHS0 = m_CombineOr(m_Specific(RHS0), m_FAbs(m_Specific(RHS0)));
1829 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1830 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1831 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1832 // (fcmp o** X/abs(X), Y) & (fcmp ord X, 0) --> fcmp o** X/abs(X), Y
1833 // (fcmp o** X/abs(X), Y) & (fcmp uno X, 0) --> false
1834 // (fcmp u** X/abs(X), Y) | (fcmp uno X, 0) --> fcmp u** X/abs(X), Y
1835 // (fcmp u** X/abs(X), Y) | (fcmp ord X, 0) --> true
1836 if ((match(LHS0, AbsOrSelfRHS0) || match(LHS1, AbsOrSelfRHS0)) &&
1837 match(RHS1, m_PosZeroFP()))
1838 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1839 ? static_cast<Value *>(LHS)
1840 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1841 }
1842
1843 return nullptr;
1844}
1845
1847 Value *Op1, bool IsAnd) {
1848 // Look through casts of the 'and' operands to find compares.
1849 auto *Cast0 = dyn_cast<CastInst>(Op0);
1850 auto *Cast1 = dyn_cast<CastInst>(Op1);
1851 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1852 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1853 Op0 = Cast0->getOperand(0);
1854 Op1 = Cast1->getOperand(0);
1855 }
1856
1857 Value *V = nullptr;
1858 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1859 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1860 if (ICmp0 && ICmp1)
1861 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1862 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1863
1864 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1865 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1866 if (FCmp0 && FCmp1)
1867 V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1868
1869 if (!V)
1870 return nullptr;
1871 if (!Cast0)
1872 return V;
1873
1874 // If we looked through casts, we can only handle a constant simplification
1875 // because we are not allowed to create a cast instruction here.
1876 if (auto *C = dyn_cast<Constant>(V))
1877 return ConstantFoldCastOperand(Cast0->getOpcode(), C, Cast0->getType(),
1878 Q.DL);
1879
1880 return nullptr;
1881}
1882
1883static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1884 const SimplifyQuery &Q,
1885 bool AllowRefinement,
1887 unsigned MaxRecurse);
1888
1889static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1890 const SimplifyQuery &Q,
1891 unsigned MaxRecurse) {
1892 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1893 "Must be and/or");
1894 CmpPredicate Pred;
1895 Value *A, *B;
1896 if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
1897 !ICmpInst::isEquality(Pred))
1898 return nullptr;
1899
1900 auto Simplify = [&](Value *Res) -> Value * {
1901 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1902
1903 // and (icmp eq a, b), x implies (a==b) inside x.
1904 // or (icmp ne a, b), x implies (a==b) inside x.
1905 // If x simplifies to true/false, we can simplify the and/or.
1906 if (Pred ==
1907 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1908 if (Res == Absorber)
1909 return Absorber;
1910 if (Res == ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1911 return Op0;
1912 return nullptr;
1913 }
1914
1915 // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1916 // then we can drop the icmp, as x will already be false in the case where
1917 // the icmp is false. Similar for or and true.
1918 if (Res == Absorber)
1919 return Op1;
1920 return nullptr;
1921 };
1922
1923 // In the final case (Res == Absorber with inverted predicate), it is safe to
1924 // refine poison during simplification, but not undef. For simplicity always
1925 // disable undef-based folds here.
1926 if (Value *Res = simplifyWithOpReplaced(Op1, A, B, Q.getWithoutUndef(),
1927 /* AllowRefinement */ true,
1928 /* DropFlags */ nullptr, MaxRecurse))
1929 return Simplify(Res);
1930 if (Value *Res = simplifyWithOpReplaced(Op1, B, A, Q.getWithoutUndef(),
1931 /* AllowRefinement */ true,
1932 /* DropFlags */ nullptr, MaxRecurse))
1933 return Simplify(Res);
1934
1935 return nullptr;
1936}
1937
1938/// Given a bitwise logic op, check if the operands are add/sub with a common
1939/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1941 Instruction::BinaryOps Opcode) {
1942 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1943 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1944 Value *X;
1945 Constant *C1, *C2;
1946 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1947 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1948 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
1949 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
1950 if (ConstantExpr::getNot(C1) == C2) {
1951 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1952 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1953 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1954 Type *Ty = Op0->getType();
1955 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1956 : ConstantInt::getAllOnesValue(Ty);
1957 }
1958 }
1959 return nullptr;
1960}
1961
1962// Commutative patterns for and that will be tried with both operand orders.
1964 const SimplifyQuery &Q,
1965 unsigned MaxRecurse) {
1966 // ~A & A = 0
1967 if (match(Op0, m_Not(m_Specific(Op1))))
1968 return Constant::getNullValue(Op0->getType());
1969
1970 // (A | ?) & A = A
1971 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
1972 return Op1;
1973
1974 // (X | ~Y) & (X | Y) --> X
1975 Value *X, *Y;
1976 if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
1977 match(Op1, m_c_Or(m_Specific(X), m_Specific(Y))))
1978 return X;
1979
1980 // If we have a multiplication overflow check that is being 'and'ed with a
1981 // check that one of the multipliers is not zero, we can omit the 'and', and
1982 // only keep the overflow check.
1983 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
1984 return Op1;
1985
1986 // -A & A = A if A is a power of two or zero.
1987 if (match(Op0, m_Neg(m_Specific(Op1))) &&
1988 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
1989 return Op1;
1990
1991 // This is a similar pattern used for checking if a value is a power-of-2:
1992 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
1993 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
1994 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
1995 return Constant::getNullValue(Op1->getType());
1996
1997 // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
1998 // M <= N.
1999 const APInt *Shift1, *Shift2;
2000 if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
2001 match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
2002 isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, /*Depth*/ 0, Q.AC,
2003 Q.CxtI) &&
2004 Shift1->uge(*Shift2))
2005 return Constant::getNullValue(Op0->getType());
2006
2007 if (Value *V =
2008 simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2009 return V;
2010
2011 return nullptr;
2012}
2013
2014/// Given operands for an And, see if we can fold the result.
2015/// If not, this returns null.
2016static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2017 unsigned MaxRecurse) {
2018 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2019 return C;
2020
2021 // X & poison -> poison
2022 if (isa<PoisonValue>(Op1))
2023 return Op1;
2024
2025 // X & undef -> 0
2026 if (Q.isUndefValue(Op1))
2027 return Constant::getNullValue(Op0->getType());
2028
2029 // X & X = X
2030 if (Op0 == Op1)
2031 return Op0;
2032
2033 // X & 0 = 0
2034 if (match(Op1, m_Zero()))
2035 return Constant::getNullValue(Op0->getType());
2036
2037 // X & -1 = X
2038 if (match(Op1, m_AllOnes()))
2039 return Op0;
2040
2041 if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2042 return Res;
2043 if (Value *Res = simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2044 return Res;
2045
2046 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2047 return V;
2048
2049 // A mask that only clears known zeros of a shifted value is a no-op.
2050 const APInt *Mask;
2051 const APInt *ShAmt;
2052 Value *X, *Y;
2053 if (match(Op1, m_APInt(Mask))) {
2054 // If all bits in the inverted and shifted mask are clear:
2055 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2056 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2057 (~(*Mask)).lshr(*ShAmt).isZero())
2058 return Op0;
2059
2060 // If all bits in the inverted and shifted mask are clear:
2061 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2062 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2063 (~(*Mask)).shl(*ShAmt).isZero())
2064 return Op0;
2065 }
2066
2067 // and 2^x-1, 2^C --> 0 where x <= C.
2068 const APInt *PowerC;
2069 Value *Shift;
2070 if (match(Op1, m_Power2(PowerC)) &&
2071 match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
2072 isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
2073 Q.DT)) {
2074 KnownBits Known = computeKnownBits(Shift, /* Depth */ 0, Q);
2075 // Use getActiveBits() to make use of the additional power of two knowledge
2076 if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2077 return ConstantInt::getNullValue(Op1->getType());
2078 }
2079
2080 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2081 return V;
2082
2083 // Try some generic simplifications for associative operations.
2084 if (Value *V =
2085 simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2086 return V;
2087
2088 // And distributes over Or. Try some generic simplifications based on this.
2089 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2090 Instruction::Or, Q, MaxRecurse))
2091 return V;
2092
2093 // And distributes over Xor. Try some generic simplifications based on this.
2094 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2095 Instruction::Xor, Q, MaxRecurse))
2096 return V;
2097
2098 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2099 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2100 // A & (A && B) -> A && B
2101 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2102 return Op1;
2103 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2104 return Op0;
2105 }
2106 // If the operation is with the result of a select instruction, check
2107 // whether operating on either branch of the select always yields the same
2108 // value.
2109 if (Value *V =
2110 threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2111 return V;
2112 }
2113
2114 // If the operation is with the result of a phi instruction, check whether
2115 // operating on all incoming values of the phi always yields the same value.
2116 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2117 if (Value *V =
2118 threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2119 return V;
2120
2121 // Assuming the effective width of Y is not larger than A, i.e. all bits
2122 // from X and Y are disjoint in (X << A) | Y,
2123 // if the mask of this AND op covers all bits of X or Y, while it covers
2124 // no bits from the other, we can bypass this AND op. E.g.,
2125 // ((X << A) | Y) & Mask -> Y,
2126 // if Mask = ((1 << effective_width_of(Y)) - 1)
2127 // ((X << A) | Y) & Mask -> X << A,
2128 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2129 // SimplifyDemandedBits in InstCombine can optimize the general case.
2130 // This pattern aims to help other passes for a common case.
2131 Value *XShifted;
2132 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(Mask)) &&
2134 m_Value(XShifted)),
2135 m_Value(Y)))) {
2136 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2137 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2138 const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
2139 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2140 if (EffWidthY <= ShftCnt) {
2141 const KnownBits XKnown = computeKnownBits(X, /* Depth */ 0, Q);
2142 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2143 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2144 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2145 // If the mask is extracting all bits from X or Y as is, we can skip
2146 // this AND op.
2147 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2148 return Y;
2149 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2150 return XShifted;
2151 }
2152 }
2153
2154 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2155 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2157 if (match(Op0, m_c_Xor(m_Value(X),
2159 m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2161 return Constant::getNullValue(Op0->getType());
2162
2163 const APInt *C1;
2164 Value *A;
2165 // (A ^ C) & (A ^ ~C) -> 0
2166 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2167 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2168 return Constant::getNullValue(Op0->getType());
2169
2170 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2171 if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2172 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2173 if (*Implied == true)
2174 return Op0;
2175 // If Op0 is true implies Op1 is false, then they are not true together.
2176 if (*Implied == false)
2177 return ConstantInt::getFalse(Op0->getType());
2178 }
2179 if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2180 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2181 if (*Implied)
2182 return Op1;
2183 // If Op1 is true implies Op0 is false, then they are not true together.
2184 if (!*Implied)
2185 return ConstantInt::getFalse(Op1->getType());
2186 }
2187 }
2188
2189 if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2190 return V;
2191
2192 return nullptr;
2193}
2194
2196 return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2197}
2198
2199// TODO: Many of these folds could use LogicalAnd/LogicalOr.
2201 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2202 Type *Ty = X->getType();
2203
2204 // X | ~X --> -1
2205 if (match(Y, m_Not(m_Specific(X))))
2206 return ConstantInt::getAllOnesValue(Ty);
2207
2208 // X | ~(X & ?) = -1
2209 if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2210 return ConstantInt::getAllOnesValue(Ty);
2211
2212 // X | (X & ?) --> X
2213 if (match(Y, m_c_And(m_Specific(X), m_Value())))
2214 return X;
2215
2216 Value *A, *B;
2217
2218 // (A ^ B) | (A | B) --> A | B
2219 // (A ^ B) | (B | A) --> B | A
2220 if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2222 return Y;
2223
2224 // ~(A ^ B) | (A | B) --> -1
2225 // ~(A ^ B) | (B | A) --> -1
2226 if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2228 return ConstantInt::getAllOnesValue(Ty);
2229
2230 // (A & ~B) | (A ^ B) --> A ^ B
2231 // (~B & A) | (A ^ B) --> A ^ B
2232 // (A & ~B) | (B ^ A) --> B ^ A
2233 // (~B & A) | (B ^ A) --> B ^ A
2234 if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2236 return Y;
2237
2238 // (~A ^ B) | (A & B) --> ~A ^ B
2239 // (B ^ ~A) | (A & B) --> B ^ ~A
2240 // (~A ^ B) | (B & A) --> ~A ^ B
2241 // (B ^ ~A) | (B & A) --> B ^ ~A
2242 if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2244 return X;
2245
2246 // (~A | B) | (A ^ B) --> -1
2247 // (~A | B) | (B ^ A) --> -1
2248 // (B | ~A) | (A ^ B) --> -1
2249 // (B | ~A) | (B ^ A) --> -1
2250 if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2252 return ConstantInt::getAllOnesValue(Ty);
2253
2254 // (~A & B) | ~(A | B) --> ~A
2255 // (~A & B) | ~(B | A) --> ~A
2256 // (B & ~A) | ~(A | B) --> ~A
2257 // (B & ~A) | ~(B | A) --> ~A
2258 Value *NotA;
2260 m_Value(B))) &&
2262 return NotA;
2263 // The same is true of Logical And
2264 // TODO: This could share the logic of the version above if there was a
2265 // version of LogicalAnd that allowed more than just i1 types.
2267 m_Value(B))) &&
2269 return NotA;
2270
2271 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2272 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2273 Value *NotAB;
2275 m_Value(NotAB))) &&
2277 return NotAB;
2278
2279 // ~(A & B) | (A ^ B) --> ~(A & B)
2280 // ~(A & B) | (B ^ A) --> ~(A & B)
2282 m_Value(NotAB))) &&
2284 return NotAB;
2285
2286 return nullptr;
2287}
2288
2289/// Given operands for an Or, see if we can fold the result.
2290/// If not, this returns null.
2291static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2292 unsigned MaxRecurse) {
2293 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2294 return C;
2295
2296 // X | poison -> poison
2297 if (isa<PoisonValue>(Op1))
2298 return Op1;
2299
2300 // X | undef -> -1
2301 // X | -1 = -1
2302 // Do not return Op1 because it may contain undef elements if it's a vector.
2303 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2304 return Constant::getAllOnesValue(Op0->getType());
2305
2306 // X | X = X
2307 // X | 0 = X
2308 if (Op0 == Op1 || match(Op1, m_Zero()))
2309 return Op0;
2310
2311 if (Value *R = simplifyOrLogic(Op0, Op1))
2312 return R;
2313 if (Value *R = simplifyOrLogic(Op1, Op0))
2314 return R;
2315
2316 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2317 return V;
2318
2319 // Rotated -1 is still -1:
2320 // (-1 << X) | (-1 >> (C - X)) --> -1
2321 // (-1 >> X) | (-1 << (C - X)) --> -1
2322 // ...with C <= bitwidth (and commuted variants).
2323 Value *X, *Y;
2324 if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2325 match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2326 (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2327 match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2328 const APInt *C;
2329 if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2330 match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2331 C->ule(X->getType()->getScalarSizeInBits())) {
2332 return ConstantInt::getAllOnesValue(X->getType());
2333 }
2334 }
2335
2336 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2337 // are mixing in another shift that is redundant with the funnel shift.
2338
2339 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2340 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2341 if (match(Op0,
2342 m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2343 match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2344 return Op0;
2345 if (match(Op1,
2346 m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2347 match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2348 return Op1;
2349
2350 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2351 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2352 if (match(Op0,
2353 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2354 match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2355 return Op0;
2356 if (match(Op1,
2357 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2358 match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2359 return Op1;
2360
2361 if (Value *V =
2362 simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2363 return V;
2364 if (Value *V =
2365 simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2366 return V;
2367
2368 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2369 return V;
2370
2371 // If we have a multiplication overflow check that is being 'and'ed with a
2372 // check that one of the multipliers is not zero, we can omit the 'and', and
2373 // only keep the overflow check.
2374 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2375 return Op1;
2376 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2377 return Op0;
2378
2379 // Try some generic simplifications for associative operations.
2380 if (Value *V =
2381 simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2382 return V;
2383
2384 // Or distributes over And. Try some generic simplifications based on this.
2385 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2386 Instruction::And, Q, MaxRecurse))
2387 return V;
2388
2389 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2390 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2391 // A | (A || B) -> A || B
2392 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2393 return Op1;
2394 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2395 return Op0;
2396 }
2397 // If the operation is with the result of a select instruction, check
2398 // whether operating on either branch of the select always yields the same
2399 // value.
2400 if (Value *V =
2401 threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2402 return V;
2403 }
2404
2405 // (A & C1)|(B & C2)
2406 Value *A, *B;
2407 const APInt *C1, *C2;
2408 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2409 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2410 if (*C1 == ~*C2) {
2411 // (A & C1)|(B & C2)
2412 // If we have: ((V + N) & C1) | (V & C2)
2413 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2414 // replace with V+N.
2415 Value *N;
2416 if (C2->isMask() && // C2 == 0+1+
2418 // Add commutes, try both ways.
2419 if (MaskedValueIsZero(N, *C2, Q))
2420 return A;
2421 }
2422 // Or commutes, try both ways.
2423 if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2424 // Add commutes, try both ways.
2425 if (MaskedValueIsZero(N, *C1, Q))
2426 return B;
2427 }
2428 }
2429 }
2430
2431 // If the operation is with the result of a phi instruction, check whether
2432 // operating on all incoming values of the phi always yields the same value.
2433 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2434 if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2435 return V;
2436
2437 // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2438 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2439 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2440 return Constant::getAllOnesValue(Op0->getType());
2441
2442 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2443 if (std::optional<bool> Implied =
2444 isImpliedCondition(Op0, Op1, Q.DL, false)) {
2445 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2446 if (*Implied == false)
2447 return Op0;
2448 // If Op0 is false implies Op1 is true, then at least one is always true.
2449 if (*Implied == true)
2450 return ConstantInt::getTrue(Op0->getType());
2451 }
2452 if (std::optional<bool> Implied =
2453 isImpliedCondition(Op1, Op0, Q.DL, false)) {
2454 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2455 if (*Implied == false)
2456 return Op1;
2457 // If Op1 is false implies Op0 is true, then at least one is always true.
2458 if (*Implied == true)
2459 return ConstantInt::getTrue(Op1->getType());
2460 }
2461 }
2462
2463 if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2464 return V;
2465
2466 return nullptr;
2467}
2468
2470 return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2471}
2472
2473/// Given operands for a Xor, see if we can fold the result.
2474/// If not, this returns null.
2475static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2476 unsigned MaxRecurse) {
2477 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2478 return C;
2479
2480 // X ^ poison -> poison
2481 if (isa<PoisonValue>(Op1))
2482 return Op1;
2483
2484 // A ^ undef -> undef
2485 if (Q.isUndefValue(Op1))
2486 return Op1;
2487
2488 // A ^ 0 = A
2489 if (match(Op1, m_Zero()))
2490 return Op0;
2491
2492 // A ^ A = 0
2493 if (Op0 == Op1)
2494 return Constant::getNullValue(Op0->getType());
2495
2496 // A ^ ~A = ~A ^ A = -1
2497 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2498 return Constant::getAllOnesValue(Op0->getType());
2499
2500 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2501 Value *A, *B;
2502 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2503 if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2505 return A;
2506
2507 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2508 // The 'not' op must contain a complete -1 operand (no undef elements for
2509 // vector) for the transform to be safe.
2510 Value *NotA;
2512 m_Value(B))) &&
2514 return NotA;
2515
2516 return nullptr;
2517 };
2518 if (Value *R = foldAndOrNot(Op0, Op1))
2519 return R;
2520 if (Value *R = foldAndOrNot(Op1, Op0))
2521 return R;
2522
2523 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2524 return V;
2525
2526 // Try some generic simplifications for associative operations.
2527 if (Value *V =
2528 simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2529 return V;
2530
2531 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2532 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2533 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2534 // only if B and C are equal. If B and C are equal then (since we assume
2535 // that operands have already been simplified) "select(cond, B, C)" should
2536 // have been simplified to the common value of B and C already. Analysing
2537 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2538 // for threading over phi nodes.
2539
2540 if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2541 return V;
2542
2543 return nullptr;
2544}
2545
2547 return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2548}
2549
2551 return CmpInst::makeCmpResultType(Op->getType());
2552}
2553
2554/// Rummage around inside V looking for something equivalent to the comparison
2555/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2556/// Helper function for analyzing max/min idioms.
2558 Value *LHS, Value *RHS) {
2559 SelectInst *SI = dyn_cast<SelectInst>(V);
2560 if (!SI)
2561 return nullptr;
2562 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2563 if (!Cmp)
2564 return nullptr;
2565 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2566 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2567 return Cmp;
2568 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2569 LHS == CmpRHS && RHS == CmpLHS)
2570 return Cmp;
2571 return nullptr;
2572}
2573
2574/// Return true if the underlying object (storage) must be disjoint from
2575/// storage returned by any noalias return call.
2576static bool isAllocDisjoint(const Value *V) {
2577 // For allocas, we consider only static ones (dynamic
2578 // allocas might be transformed into calls to malloc not simultaneously
2579 // live with the compared-to allocation). For globals, we exclude symbols
2580 // that might be resolve lazily to symbols in another dynamically-loaded
2581 // library (and, thus, could be malloc'ed by the implementation).
2582 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2583 return AI->isStaticAlloca();
2584 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2585 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2586 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2587 !GV->isThreadLocal();
2588 if (const Argument *A = dyn_cast<Argument>(V))
2589 return A->hasByValAttr();
2590 return false;
2591}
2592
2593/// Return true if V1 and V2 are each the base of some distict storage region
2594/// [V, object_size(V)] which do not overlap. Note that zero sized regions
2595/// *are* possible, and that zero sized regions do not overlap with any other.
2596static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2597 // Global variables always exist, so they always exist during the lifetime
2598 // of each other and all allocas. Global variables themselves usually have
2599 // non-overlapping storage, but since their addresses are constants, the
2600 // case involving two globals does not reach here and is instead handled in
2601 // constant folding.
2602 //
2603 // Two different allocas usually have different addresses...
2604 //
2605 // However, if there's an @llvm.stackrestore dynamically in between two
2606 // allocas, they may have the same address. It's tempting to reduce the
2607 // scope of the problem by only looking at *static* allocas here. That would
2608 // cover the majority of allocas while significantly reducing the likelihood
2609 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2610 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2611 // an entry block. Also, if we have a block that's not attached to a
2612 // function, we can't tell if it's "static" under the current definition.
2613 // Theoretically, this problem could be fixed by creating a new kind of
2614 // instruction kind specifically for static allocas. Such a new instruction
2615 // could be required to be at the top of the entry block, thus preventing it
2616 // from being subject to a @llvm.stackrestore. Instcombine could even
2617 // convert regular allocas into these special allocas. It'd be nifty.
2618 // However, until then, this problem remains open.
2619 //
2620 // So, we'll assume that two non-empty allocas have different addresses
2621 // for now.
2622 auto isByValArg = [](const Value *V) {
2623 const Argument *A = dyn_cast<Argument>(V);
2624 return A && A->hasByValAttr();
2625 };
2626
2627 // Byval args are backed by store which does not overlap with each other,
2628 // allocas, or globals.
2629 if (isByValArg(V1))
2630 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2631 if (isByValArg(V2))
2632 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2633
2634 return isa<AllocaInst>(V1) &&
2635 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2636}
2637
2638// A significant optimization not implemented here is assuming that alloca
2639// addresses are not equal to incoming argument values. They don't *alias*,
2640// as we say, but that doesn't mean they aren't equal, so we take a
2641// conservative approach.
2642//
2643// This is inspired in part by C++11 5.10p1:
2644// "Two pointers of the same type compare equal if and only if they are both
2645// null, both point to the same function, or both represent the same
2646// address."
2647//
2648// This is pretty permissive.
2649//
2650// It's also partly due to C11 6.5.9p6:
2651// "Two pointers compare equal if and only if both are null pointers, both are
2652// pointers to the same object (including a pointer to an object and a
2653// subobject at its beginning) or function, both are pointers to one past the
2654// last element of the same array object, or one is a pointer to one past the
2655// end of one array object and the other is a pointer to the start of a
2656// different array object that happens to immediately follow the first array
2657// object in the address space.)
2658//
2659// C11's version is more restrictive, however there's no reason why an argument
2660// couldn't be a one-past-the-end value for a stack object in the caller and be
2661// equal to the beginning of a stack object in the callee.
2662//
2663// If the C and C++ standards are ever made sufficiently restrictive in this
2664// area, it may be possible to update LLVM's semantics accordingly and reinstate
2665// this optimization.
2667 const SimplifyQuery &Q) {
2668 assert(LHS->getType() == RHS->getType() && "Must have same types");
2669 const DataLayout &DL = Q.DL;
2670 const TargetLibraryInfo *TLI = Q.TLI;
2671
2672 // We can only fold certain predicates on pointer comparisons.
2673 switch (Pred) {
2674 default:
2675 return nullptr;
2676
2677 // Equality comparisons are easy to fold.
2678 case CmpInst::ICMP_EQ:
2679 case CmpInst::ICMP_NE:
2680 break;
2681
2682 // We can only handle unsigned relational comparisons because 'inbounds' on
2683 // a GEP only protects against unsigned wrapping.
2684 case CmpInst::ICMP_UGT:
2685 case CmpInst::ICMP_UGE:
2686 case CmpInst::ICMP_ULT:
2687 case CmpInst::ICMP_ULE:
2688 // However, we have to switch them to their signed variants to handle
2689 // negative indices from the base pointer.
2690 Pred = ICmpInst::getSignedPredicate(Pred);
2691 break;
2692 }
2693
2694 // Strip off any constant offsets so that we can reason about them.
2695 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2696 // here and compare base addresses like AliasAnalysis does, however there are
2697 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2698 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2699 // doesn't need to guarantee pointer inequality when it says NoAlias.
2700
2701 // Even if an non-inbounds GEP occurs along the path we can still optimize
2702 // equality comparisons concerning the result.
2703 bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2704 unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2705 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2706 LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2707 RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2708
2709 // If LHS and RHS are related via constant offsets to the same base
2710 // value, we can replace it with an icmp which just compares the offsets.
2711 if (LHS == RHS)
2712 return ConstantInt::get(getCompareTy(LHS),
2713 ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2714
2715 // Various optimizations for (in)equality comparisons.
2716 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2717 // Different non-empty allocations that exist at the same time have
2718 // different addresses (if the program can tell). If the offsets are
2719 // within the bounds of their allocations (and not one-past-the-end!
2720 // so we can't use inbounds!), and their allocations aren't the same,
2721 // the pointers are not equal.
2723 uint64_t LHSSize, RHSSize;
2724 ObjectSizeOpts Opts;
2725 Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2726 auto *F = [](Value *V) -> Function * {
2727 if (auto *I = dyn_cast<Instruction>(V))
2728 return I->getFunction();
2729 if (auto *A = dyn_cast<Argument>(V))
2730 return A->getParent();
2731 return nullptr;
2732 }(LHS);
2733 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2734 if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) && LHSSize != 0 &&
2735 getObjectSize(RHS, RHSSize, DL, TLI, Opts) && RHSSize != 0) {
2736 APInt Dist = LHSOffset - RHSOffset;
2737 if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2738 return ConstantInt::get(getCompareTy(LHS),
2740 }
2741 }
2742
2743 // If one side of the equality comparison must come from a noalias call
2744 // (meaning a system memory allocation function), and the other side must
2745 // come from a pointer that cannot overlap with dynamically-allocated
2746 // memory within the lifetime of the current function (allocas, byval
2747 // arguments, globals), then determine the comparison result here.
2748 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2749 getUnderlyingObjects(LHS, LHSUObjs);
2750 getUnderlyingObjects(RHS, RHSUObjs);
2751
2752 // Is the set of underlying objects all noalias calls?
2753 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2754 return all_of(Objects, isNoAliasCall);
2755 };
2756
2757 // Is the set of underlying objects all things which must be disjoint from
2758 // noalias calls. We assume that indexing from such disjoint storage
2759 // into the heap is undefined, and thus offsets can be safely ignored.
2760 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2761 return all_of(Objects, ::isAllocDisjoint);
2762 };
2763
2764 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2765 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2766 return ConstantInt::get(getCompareTy(LHS),
2768
2769 // Fold comparisons for non-escaping pointer even if the allocation call
2770 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2771 // dynamic allocation call could be either of the operands. Note that
2772 // the other operand can not be based on the alloc - if it were, then
2773 // the cmp itself would be a capture.
2774 Value *MI = nullptr;
2775 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, Q))
2776 MI = LHS;
2777 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonZero(LHS, Q))
2778 MI = RHS;
2779 if (MI) {
2780 // FIXME: This is incorrect, see PR54002. While we can assume that the
2781 // allocation is at an address that makes the comparison false, this
2782 // requires that *all* comparisons to that address be false, which
2783 // InstSimplify cannot guarantee.
2784 struct CustomCaptureTracker : public CaptureTracker {
2785 bool Captured = false;
2786 void tooManyUses() override { Captured = true; }
2787 bool captured(const Use *U) override {
2788 if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2789 // Comparison against value stored in global variable. Given the
2790 // pointer does not escape, its value cannot be guessed and stored
2791 // separately in a global variable.
2792 unsigned OtherIdx = 1 - U->getOperandNo();
2793 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2794 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2795 return false;
2796 }
2797
2798 Captured = true;
2799 return true;
2800 }
2801 };
2802 CustomCaptureTracker Tracker;
2803 PointerMayBeCaptured(MI, &Tracker);
2804 if (!Tracker.Captured)
2805 return ConstantInt::get(getCompareTy(LHS),
2807 }
2808 }
2809
2810 // Otherwise, fail.
2811 return nullptr;
2812}
2813
2814/// Fold an icmp when its operands have i1 scalar type.
2816 const SimplifyQuery &Q) {
2817 Type *ITy = getCompareTy(LHS); // The return type.
2818 Type *OpTy = LHS->getType(); // The operand type.
2819 if (!OpTy->isIntOrIntVectorTy(1))
2820 return nullptr;
2821
2822 // A boolean compared to true/false can be reduced in 14 out of the 20
2823 // (10 predicates * 2 constants) possible combinations. The other
2824 // 6 cases require a 'not' of the LHS.
2825
2826 auto ExtractNotLHS = [](Value *V) -> Value * {
2827 Value *X;
2828 if (match(V, m_Not(m_Value(X))))
2829 return X;
2830 return nullptr;
2831 };
2832
2833 if (match(RHS, m_Zero())) {
2834 switch (Pred) {
2835 case CmpInst::ICMP_NE: // X != 0 -> X
2836 case CmpInst::ICMP_UGT: // X >u 0 -> X
2837 case CmpInst::ICMP_SLT: // X <s 0 -> X
2838 return LHS;
2839
2840 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2841 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2842 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2843 if (Value *X = ExtractNotLHS(LHS))
2844 return X;
2845 break;
2846
2847 case CmpInst::ICMP_ULT: // X <u 0 -> false
2848 case CmpInst::ICMP_SGT: // X >s 0 -> false
2849 return getFalse(ITy);
2850
2851 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2852 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2853 return getTrue(ITy);
2854
2855 default:
2856 break;
2857 }
2858 } else if (match(RHS, m_One())) {
2859 switch (Pred) {
2860 case CmpInst::ICMP_EQ: // X == 1 -> X
2861 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2862 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2863 return LHS;
2864
2865 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2866 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2867 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2868 if (Value *X = ExtractNotLHS(LHS))
2869 return X;
2870 break;
2871
2872 case CmpInst::ICMP_UGT: // X >u 1 -> false
2873 case CmpInst::ICMP_SLT: // X <s -1 -> false
2874 return getFalse(ITy);
2875
2876 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2877 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2878 return getTrue(ITy);
2879
2880 default:
2881 break;
2882 }
2883 }
2884
2885 switch (Pred) {
2886 default:
2887 break;
2888 case ICmpInst::ICMP_UGE:
2889 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2890 return getTrue(ITy);
2891 break;
2892 case ICmpInst::ICMP_SGE:
2893 /// For signed comparison, the values for an i1 are 0 and -1
2894 /// respectively. This maps into a truth table of:
2895 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2896 /// 0 | 0 | 1 (0 >= 0) | 1
2897 /// 0 | 1 | 1 (0 >= -1) | 1
2898 /// 1 | 0 | 0 (-1 >= 0) | 0
2899 /// 1 | 1 | 1 (-1 >= -1) | 1
2900 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2901 return getTrue(ITy);
2902 break;
2903 case ICmpInst::ICMP_ULE:
2904 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2905 return getTrue(ITy);
2906 break;
2907 case ICmpInst::ICMP_SLE:
2908 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2909 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2910 return getTrue(ITy);
2911 break;
2912 }
2913
2914 return nullptr;
2915}
2916
2917/// Try hard to fold icmp with zero RHS because this is a common case.
2919 const SimplifyQuery &Q) {
2920 if (!match(RHS, m_Zero()))
2921 return nullptr;
2922
2923 Type *ITy = getCompareTy(LHS); // The return type.
2924 switch (Pred) {
2925 default:
2926 llvm_unreachable("Unknown ICmp predicate!");
2927 case ICmpInst::ICMP_ULT:
2928 return getFalse(ITy);
2929 case ICmpInst::ICMP_UGE:
2930 return getTrue(ITy);
2931 case ICmpInst::ICMP_EQ:
2932 case ICmpInst::ICMP_ULE:
2933 if (isKnownNonZero(LHS, Q))
2934 return getFalse(ITy);
2935 break;
2936 case ICmpInst::ICMP_NE:
2937 case ICmpInst::ICMP_UGT:
2938 if (isKnownNonZero(LHS, Q))
2939 return getTrue(ITy);
2940 break;
2941 case ICmpInst::ICMP_SLT: {
2942 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
2943 if (LHSKnown.isNegative())
2944 return getTrue(ITy);
2945 if (LHSKnown.isNonNegative())
2946 return getFalse(ITy);
2947 break;
2948 }
2949 case ICmpInst::ICMP_SLE: {
2950 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
2951 if (LHSKnown.isNegative())
2952 return getTrue(ITy);
2953 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
2954 return getFalse(ITy);
2955 break;
2956 }
2957 case ICmpInst::ICMP_SGE: {
2958 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
2959 if (LHSKnown.isNegative())
2960 return getFalse(ITy);
2961 if (LHSKnown.isNonNegative())
2962 return getTrue(ITy);
2963 break;
2964 }
2965 case ICmpInst::ICMP_SGT: {
2966 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
2967 if (LHSKnown.isNegative())
2968 return getFalse(ITy);
2969 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
2970 return getTrue(ITy);
2971 break;
2972 }
2973 }
2974
2975 return nullptr;
2976}
2977
2979 Value *RHS, const InstrInfoQuery &IIQ) {
2980 Type *ITy = getCompareTy(RHS); // The return type.
2981
2982 Value *X;
2983 const APInt *C;
2984 if (!match(RHS, m_APIntAllowPoison(C)))
2985 return nullptr;
2986
2987 // Sign-bit checks can be optimized to true/false after unsigned
2988 // floating-point casts:
2989 // icmp slt (bitcast (uitofp X)), 0 --> false
2990 // icmp sgt (bitcast (uitofp X)), -1 --> true
2992 bool TrueIfSigned;
2993 if (isSignBitCheck(Pred, *C, TrueIfSigned))
2994 return ConstantInt::getBool(ITy, !TrueIfSigned);
2995 }
2996
2997 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2999 if (RHS_CR.isEmptySet())
3000 return ConstantInt::getFalse(ITy);
3001 if (RHS_CR.isFullSet())
3002 return ConstantInt::getTrue(ITy);
3003
3004 ConstantRange LHS_CR =
3006 if (!LHS_CR.isFullSet()) {
3007 if (RHS_CR.contains(LHS_CR))
3008 return ConstantInt::getTrue(ITy);
3009 if (RHS_CR.inverse().contains(LHS_CR))
3010 return ConstantInt::getFalse(ITy);
3011 }
3012
3013 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3014 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3015 const APInt *MulC;
3016 if (IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
3018 *MulC != 0 && C->urem(*MulC) != 0) ||
3020 *MulC != 0 && C->srem(*MulC) != 0)))
3021 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3022
3023 return nullptr;
3024}
3025
3027
3028/// Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
3030 MonotonicType Type, unsigned Depth = 0) {
3031 if (!Res.insert(V).second)
3032 return;
3033
3034 // Can be increased if useful.
3035 if (++Depth > 1)
3036 return;
3037
3038 auto *I = dyn_cast<Instruction>(V);
3039 if (!I)
3040 return;
3041
3042 Value *X, *Y;
3044 if (match(I, m_Or(m_Value(X), m_Value(Y))) ||
3045 match(I, m_Intrinsic<Intrinsic::uadd_sat>(m_Value(X), m_Value(Y)))) {
3048 }
3049 } else {
3051 switch (I->getOpcode()) {
3052 case Instruction::And:
3053 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Depth);
3054 getUnsignedMonotonicValues(Res, I->getOperand(1), Type, Depth);
3055 break;
3056 case Instruction::URem:
3057 case Instruction::UDiv:
3058 case Instruction::LShr:
3059 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Depth);
3060 break;
3061 case Instruction::Call:
3062 if (match(I, m_Intrinsic<Intrinsic::usub_sat>(m_Value(X))))
3064 break;
3065 default:
3066 break;
3067 }
3068 }
3069}
3070
3072 Value *RHS) {
3073 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3074 return nullptr;
3075
3076 // We have LHS uge GreaterValues and LowerValues uge RHS. If any of the
3077 // GreaterValues and LowerValues are the same, it follows that LHS uge RHS.
3078 SmallPtrSet<Value *, 4> GreaterValues;
3079 SmallPtrSet<Value *, 4> LowerValues;
3082 for (Value *GV : GreaterValues)
3083 if (LowerValues.contains(GV))
3085 Pred == ICmpInst::ICMP_UGE);
3086 return nullptr;
3087}
3088
3090 Value *RHS, const SimplifyQuery &Q,
3091 unsigned MaxRecurse) {
3092 Type *ITy = getCompareTy(RHS); // The return type.
3093
3094 Value *Y = nullptr;
3095 // icmp pred (or X, Y), X
3096 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3097 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3098 KnownBits RHSKnown = computeKnownBits(RHS, /* Depth */ 0, Q);
3099 KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
3100 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3101 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3102 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3103 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3104 }
3105 }
3106
3107 // icmp pred (urem X, Y), Y
3108 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3109 switch (Pred) {
3110 default:
3111 break;
3112 case ICmpInst::ICMP_SGT:
3113 case ICmpInst::ICMP_SGE: {
3114 KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
3115 if (!Known.isNonNegative())
3116 break;
3117 [[fallthrough]];
3118 }
3119 case ICmpInst::ICMP_EQ:
3120 case ICmpInst::ICMP_UGT:
3121 case ICmpInst::ICMP_UGE:
3122 return getFalse(ITy);
3123 case ICmpInst::ICMP_SLT:
3124 case ICmpInst::ICMP_SLE: {
3125 KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
3126 if (!Known.isNonNegative())
3127 break;
3128 [[fallthrough]];
3129 }
3130 case ICmpInst::ICMP_NE:
3131 case ICmpInst::ICMP_ULT:
3132 case ICmpInst::ICMP_ULE:
3133 return getTrue(ITy);
3134 }
3135 }
3136
3137 // If x is nonzero:
3138 // x >>u C <u x --> true for C != 0.
3139 // x >>u C != x --> true for C != 0.
3140 // x >>u C >=u x --> false for C != 0.
3141 // x >>u C == x --> false for C != 0.
3142 // x udiv C <u x --> true for C != 1.
3143 // x udiv C != x --> true for C != 1.
3144 // x udiv C >=u x --> false for C != 1.
3145 // x udiv C == x --> false for C != 1.
3146 // TODO: allow non-constant shift amount/divisor
3147 const APInt *C;
3148 if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3149 (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3150 if (isKnownNonZero(RHS, Q)) {
3151 switch (Pred) {
3152 default:
3153 break;
3154 case ICmpInst::ICMP_EQ:
3155 case ICmpInst::ICMP_UGE:
3156 case ICmpInst::ICMP_UGT:
3157 return getFalse(ITy);
3158 case ICmpInst::ICMP_NE:
3159 case ICmpInst::ICMP_ULT:
3160 case ICmpInst::ICMP_ULE:
3161 return getTrue(ITy);
3162 }
3163 }
3164 }
3165
3166 // (x*C1)/C2 <= x for C1 <= C2.
3167 // This holds even if the multiplication overflows: Assume that x != 0 and
3168 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3169 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3170 //
3171 // Additionally, either the multiplication and division might be represented
3172 // as shifts:
3173 // (x*C1)>>C2 <= x for C1 < 2**C2.
3174 // (x<<C1)/C2 <= x for 2**C1 < C2.
3175 const APInt *C1, *C2;
3176 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3177 C1->ule(*C2)) ||
3178 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3179 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3180 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3181 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3182 if (Pred == ICmpInst::ICMP_UGT)
3183 return getFalse(ITy);
3184 if (Pred == ICmpInst::ICMP_ULE)
3185 return getTrue(ITy);
3186 }
3187
3188 // (sub C, X) == X, C is odd --> false
3189 // (sub C, X) != X, C is odd --> true
3190 if (match(LBO, m_Sub(m_APIntAllowPoison(C), m_Specific(RHS))) &&
3191 (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3192 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3193
3194 return nullptr;
3195}
3196
3197// If only one of the icmp's operands has NSW flags, try to prove that:
3198//
3199// icmp slt (x + C1), (x +nsw C2)
3200//
3201// is equivalent to:
3202//
3203// icmp slt C1, C2
3204//
3205// which is true if x + C2 has the NSW flags set and:
3206// *) C1 < C2 && C1 >= 0, or
3207// *) C2 < C1 && C1 <= 0.
3208//
3210 const InstrInfoQuery &IIQ) {
3211 // TODO: only support icmp slt for now.
3212 if (Pred != CmpInst::ICMP_SLT || !IIQ.UseInstrInfo)
3213 return false;
3214
3215 // Canonicalize nsw add as RHS.
3216 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3217 std::swap(LHS, RHS);
3218 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3219 return false;
3220
3221 Value *X;
3222 const APInt *C1, *C2;
3223 if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
3224 !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
3225 return false;
3226
3227 return (C1->slt(*C2) && C1->isNonNegative()) ||
3228 (C2->slt(*C1) && C1->isNonPositive());
3229}
3230
3231/// TODO: A large part of this logic is duplicated in InstCombine's
3232/// foldICmpBinOp(). We should be able to share that and avoid the code
3233/// duplication.
3235 const SimplifyQuery &Q,
3236 unsigned MaxRecurse) {
3237 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3238 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3239 if (MaxRecurse && (LBO || RBO)) {
3240 // Analyze the case when either LHS or RHS is an add instruction.
3241 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3242 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3243 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3244 if (LBO && LBO->getOpcode() == Instruction::Add) {
3245 A = LBO->getOperand(0);
3246 B = LBO->getOperand(1);
3247 NoLHSWrapProblem =
3248 ICmpInst::isEquality(Pred) ||
3249 (CmpInst::isUnsigned(Pred) &&
3250 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3251 (CmpInst::isSigned(Pred) &&
3252 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3253 }
3254 if (RBO && RBO->getOpcode() == Instruction::Add) {
3255 C = RBO->getOperand(0);
3256 D = RBO->getOperand(1);
3257 NoRHSWrapProblem =
3258 ICmpInst::isEquality(Pred) ||
3259 (CmpInst::isUnsigned(Pred) &&
3260 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3261 (CmpInst::isSigned(Pred) &&
3262 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3263 }
3264
3265 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3266 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3267 if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3269 MaxRecurse - 1))
3270 return V;
3271
3272 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3273 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3274 if (Value *V =
3276 C == LHS ? D : C, Q, MaxRecurse - 1))
3277 return V;
3278
3279 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3280 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3282 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3283 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3284 Value *Y, *Z;
3285 if (A == C) {
3286 // C + B == C + D -> B == D
3287 Y = B;
3288 Z = D;
3289 } else if (A == D) {
3290 // D + B == C + D -> B == C
3291 Y = B;
3292 Z = C;
3293 } else if (B == C) {
3294 // A + C == C + D -> A == D
3295 Y = A;
3296 Z = D;
3297 } else {
3298 assert(B == D);
3299 // A + D == C + D -> A == C
3300 Y = A;
3301 Z = C;
3302 }
3303 if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3304 return V;
3305 }
3306 }
3307
3308 if (LBO)
3309 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3310 return V;
3311
3312 if (RBO)
3314 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3315 return V;
3316
3317 // 0 - (zext X) pred C
3318 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3319 const APInt *C;
3320 if (match(RHS, m_APInt(C))) {
3321 if (C->isStrictlyPositive()) {
3322 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3324 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3326 }
3327 if (C->isNonNegative()) {
3328 if (Pred == ICmpInst::ICMP_SLE)
3330 if (Pred == ICmpInst::ICMP_SGT)
3332 }
3333 }
3334 }
3335
3336 // If C2 is a power-of-2 and C is not:
3337 // (C2 << X) == C --> false
3338 // (C2 << X) != C --> true
3339 const APInt *C;
3340 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3341 match(RHS, m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
3342 // C2 << X can equal zero in some circumstances.
3343 // This simplification might be unsafe if C is zero.
3344 //
3345 // We know it is safe if:
3346 // - The shift is nsw. We can't shift out the one bit.
3347 // - The shift is nuw. We can't shift out the one bit.
3348 // - C2 is one.
3349 // - C isn't zero.
3350 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3351 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3352 match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3353 if (Pred == ICmpInst::ICMP_EQ)
3355 if (Pred == ICmpInst::ICMP_NE)
3357 }
3358 }
3359
3360 // If C is a power-of-2:
3361 // (C << X) >u 0x8000 --> false
3362 // (C << X) <=u 0x8000 --> true
3363 if (match(LHS, m_Shl(m_Power2(), m_Value())) && match(RHS, m_SignMask())) {
3364 if (Pred == ICmpInst::ICMP_UGT)
3366 if (Pred == ICmpInst::ICMP_ULE)
3368 }
3369
3370 if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3371 return nullptr;
3372
3373 if (LBO->getOperand(0) == RBO->getOperand(0)) {
3374 switch (LBO->getOpcode()) {
3375 default:
3376 break;
3377 case Instruction::Shl: {
3378 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3379 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3380 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3381 !isKnownNonZero(LBO->getOperand(0), Q))
3382 break;
3383 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3384 RBO->getOperand(1), Q, MaxRecurse - 1))
3385 return V;
3386 break;
3387 }
3388 // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3389 // icmp ule A, B -> true
3390 // icmp ugt A, B -> false
3391 // icmp sle A, B -> true (C1 and C2 are the same sign)
3392 // icmp sgt A, B -> false (C1 and C2 are the same sign)
3393 case Instruction::And:
3394 case Instruction::Or: {
3395 const APInt *C1, *C2;
3396 if (ICmpInst::isRelational(Pred) &&
3397 match(LBO->getOperand(1), m_APInt(C1)) &&
3398 match(RBO->getOperand(1), m_APInt(C2))) {
3399 if (!C1->isSubsetOf(*C2)) {
3400 std::swap(C1, C2);
3401 Pred = ICmpInst::getSwappedPredicate(Pred);
3402 }
3403 if (C1->isSubsetOf(*C2)) {
3404 if (Pred == ICmpInst::ICMP_ULE)
3406 if (Pred == ICmpInst::ICMP_UGT)
3408 if (C1->isNonNegative() == C2->isNonNegative()) {
3409 if (Pred == ICmpInst::ICMP_SLE)
3411 if (Pred == ICmpInst::ICMP_SGT)
3413 }
3414 }
3415 }
3416 break;
3417 }
3418 }
3419 }
3420
3421 if (LBO->getOperand(1) == RBO->getOperand(1)) {
3422 switch (LBO->getOpcode()) {
3423 default:
3424 break;
3425 case Instruction::UDiv:
3426 case Instruction::LShr:
3427 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3428 !Q.IIQ.isExact(RBO))
3429 break;
3430 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3431 RBO->getOperand(0), Q, MaxRecurse - 1))
3432 return V;
3433 break;
3434 case Instruction::SDiv:
3435 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3436 !Q.IIQ.isExact(RBO))
3437 break;
3438 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3439 RBO->getOperand(0), Q, MaxRecurse - 1))
3440 return V;
3441 break;
3442 case Instruction::AShr:
3443 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3444 break;
3445 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3446 RBO->getOperand(0), Q, MaxRecurse - 1))
3447 return V;
3448 break;
3449 case Instruction::Shl: {
3450 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3451 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3452 if (!NUW && !NSW)
3453 break;
3454 if (!NSW && ICmpInst::isSigned(Pred))
3455 break;
3456 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3457 RBO->getOperand(0), Q, MaxRecurse - 1))
3458 return V;
3459 break;
3460 }
3461 }
3462 }
3463 return nullptr;
3464}
3465
3466/// simplify integer comparisons where at least one operand of the compare
3467/// matches an integer min/max idiom.
3469 const SimplifyQuery &Q,
3470 unsigned MaxRecurse) {
3471 Type *ITy = getCompareTy(LHS); // The return type.
3472 Value *A, *B;
3474 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3475
3476 // Signed variants on "max(a,b)>=a -> true".
3477 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3478 if (A != RHS)
3479 std::swap(A, B); // smax(A, B) pred A.
3480 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3481 // We analyze this as smax(A, B) pred A.
3482 P = Pred;
3483 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3484 (A == LHS || B == LHS)) {
3485 if (A != LHS)
3486 std::swap(A, B); // A pred smax(A, B).
3487 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3488 // We analyze this as smax(A, B) swapped-pred A.
3490 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3491 (A == RHS || B == RHS)) {
3492 if (A != RHS)
3493 std::swap(A, B); // smin(A, B) pred A.
3494 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3495 // We analyze this as smax(-A, -B) swapped-pred -A.
3496 // Note that we do not need to actually form -A or -B thanks to EqP.
3498 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3499 (A == LHS || B == LHS)) {
3500 if (A != LHS)
3501 std::swap(A, B); // A pred smin(A, B).
3502 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3503 // We analyze this as smax(-A, -B) pred -A.
3504 // Note that we do not need to actually form -A or -B thanks to EqP.
3505 P = Pred;
3506 }
3508 // Cases correspond to "max(A, B) p A".
3509 switch (P) {
3510 default:
3511 break;
3512 case CmpInst::ICMP_EQ:
3513 case CmpInst::ICMP_SLE:
3514 // Equivalent to "A EqP B". This may be the same as the condition tested
3515 // in the max/min; if so, we can just return that.
3516 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3517 return V;
3518 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3519 return V;
3520 // Otherwise, see if "A EqP B" simplifies.
3521 if (MaxRecurse)
3522 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3523 return V;
3524 break;
3525 case CmpInst::ICMP_NE:
3526 case CmpInst::ICMP_SGT: {
3528 // Equivalent to "A InvEqP B". This may be the same as the condition
3529 // tested in the max/min; if so, we can just return that.
3530 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3531 return V;
3532 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3533 return V;
3534 // Otherwise, see if "A InvEqP B" simplifies.
3535 if (MaxRecurse)
3536 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3537 return V;
3538 break;
3539 }
3540 case CmpInst::ICMP_SGE:
3541 // Always true.
3542 return getTrue(ITy);
3543 case CmpInst::ICMP_SLT:
3544 // Always false.
3545 return getFalse(ITy);
3546 }
3547 }
3548
3549 // Unsigned variants on "max(a,b)>=a -> true".
3551 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3552 if (A != RHS)
3553 std::swap(A, B); // umax(A, B) pred A.
3554 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3555 // We analyze this as umax(A, B) pred A.
3556 P = Pred;
3557 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3558 (A == LHS || B == LHS)) {
3559 if (A != LHS)
3560 std::swap(A, B); // A pred umax(A, B).
3561 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3562 // We analyze this as umax(A, B) swapped-pred A.
3564 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3565 (A == RHS || B == RHS)) {
3566 if (A != RHS)
3567 std::swap(A, B); // umin(A, B) pred A.
3568 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3569 // We analyze this as umax(-A, -B) swapped-pred -A.
3570 // Note that we do not need to actually form -A or -B thanks to EqP.
3572 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3573 (A == LHS || B == LHS)) {
3574 if (A != LHS)
3575 std::swap(A, B); // A pred umin(A, B).
3576 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3577 // We analyze this as umax(-A, -B) pred -A.
3578 // Note that we do not need to actually form -A or -B thanks to EqP.
3579 P = Pred;
3580 }
3582 // Cases correspond to "max(A, B) p A".
3583 switch (P) {
3584 default:
3585 break;
3586 case CmpInst::ICMP_EQ:
3587 case CmpInst::ICMP_ULE:
3588 // Equivalent to "A EqP B". This may be the same as the condition tested
3589 // in the max/min; if so, we can just return that.
3590 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3591 return V;
3592 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3593 return V;
3594 // Otherwise, see if "A EqP B" simplifies.
3595 if (MaxRecurse)
3596 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3597 return V;
3598 break;
3599 case CmpInst::ICMP_NE:
3600 case CmpInst::ICMP_UGT: {
3602 // Equivalent to "A InvEqP B". This may be the same as the condition
3603 // tested in the max/min; if so, we can just return that.
3604 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3605 return V;
3606 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3607 return V;
3608 // Otherwise, see if "A InvEqP B" simplifies.
3609 if (MaxRecurse)
3610 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3611 return V;
3612 break;
3613 }
3614 case CmpInst::ICMP_UGE:
3615 return getTrue(ITy);
3616 case CmpInst::ICMP_ULT:
3617 return getFalse(ITy);
3618 }
3619 }
3620
3621 // Comparing 1 each of min/max with a common operand?
3622 // Canonicalize min operand to RHS.
3623 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3624 match(LHS, m_SMin(m_Value(), m_Value()))) {
3625 std::swap(LHS, RHS);
3626 Pred = ICmpInst::getSwappedPredicate(Pred);
3627 }
3628
3629 Value *C, *D;
3630 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3631 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3632 (A == C || A == D || B == C || B == D)) {
3633 // smax(A, B) >=s smin(A, D) --> true
3634 if (Pred == CmpInst::ICMP_SGE)
3635 return getTrue(ITy);
3636 // smax(A, B) <s smin(A, D) --> false
3637 if (Pred == CmpInst::ICMP_SLT)
3638 return getFalse(ITy);
3639 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3640 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3641 (A == C || A == D || B == C || B == D)) {
3642 // umax(A, B) >=u umin(A, D) --> true
3643 if (Pred == CmpInst::ICMP_UGE)
3644 return getTrue(ITy);
3645 // umax(A, B) <u umin(A, D) --> false
3646 if (Pred == CmpInst::ICMP_ULT)
3647 return getFalse(ITy);
3648 }
3649
3650 return nullptr;
3651}
3652
3654 Value *LHS, Value *RHS,
3655 const SimplifyQuery &Q) {
3656 // Gracefully handle instructions that have not been inserted yet.
3657 if (!Q.AC || !Q.CxtI)
3658 return nullptr;
3659
3660 for (Value *AssumeBaseOp : {LHS, RHS}) {
3661 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3662 if (!AssumeVH)
3663 continue;
3664
3665 CallInst *Assume = cast<CallInst>(AssumeVH);
3666 if (std::optional<bool> Imp = isImpliedCondition(
3667 Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3668 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3669 return ConstantInt::get(getCompareTy(LHS), *Imp);
3670 }
3671 }
3672
3673 return nullptr;
3674}
3675
3677 Value *RHS) {
3678 auto *II = dyn_cast<IntrinsicInst>(LHS);
3679 if (!II)
3680 return nullptr;
3681
3682 switch (II->getIntrinsicID()) {
3683 case Intrinsic::uadd_sat:
3684 // uadd.sat(X, Y) uge X + Y
3685 if (match(RHS, m_c_Add(m_Specific(II->getArgOperand(0)),
3686 m_Specific(II->getArgOperand(1))))) {
3687 if (Pred == ICmpInst::ICMP_UGE)
3689 if (Pred == ICmpInst::ICMP_ULT)
3691 }
3692 return nullptr;
3693 case Intrinsic::usub_sat:
3694 // usub.sat(X, Y) ule X - Y
3695 if (match(RHS, m_Sub(m_Specific(II->getArgOperand(0)),
3696 m_Specific(II->getArgOperand(1))))) {
3697 if (Pred == ICmpInst::ICMP_ULE)
3699 if (Pred == ICmpInst::ICMP_UGT)
3701 }
3702 return nullptr;
3703 default:
3704 return nullptr;
3705 }
3706}
3707
3708/// Helper method to get range from metadata or attribute.
3709static std::optional<ConstantRange> getRange(Value *V,
3710 const InstrInfoQuery &IIQ) {
3711 if (Instruction *I = dyn_cast<Instruction>(V))
3712 if (MDNode *MD = IIQ.getMetadata(I, LLVMContext::MD_range))
3713 return getConstantRangeFromMetadata(*MD);
3714
3715 if (const Argument *A = dyn_cast<Argument>(V))
3716 return A->getRange();
3717 else if (const CallBase *CB = dyn_cast<CallBase>(V))
3718 return CB->getRange();
3719
3720 return std::nullopt;
3721}
3722
3723/// Given operands for an ICmpInst, see if we can fold the result.
3724/// If not, this returns null.
3726 const SimplifyQuery &Q, unsigned MaxRecurse) {
3727 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3728
3729 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3730 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3731 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3732
3733 // If we have a constant, make sure it is on the RHS.
3734 std::swap(LHS, RHS);
3735 Pred = CmpInst::getSwappedPredicate(Pred);
3736 }
3737 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3738
3739 Type *ITy = getCompareTy(LHS); // The return type.
3740
3741 // icmp poison, X -> poison
3742 if (isa<PoisonValue>(RHS))
3743 return PoisonValue::get(ITy);
3744
3745 // For EQ and NE, we can always pick a value for the undef to make the
3746 // predicate pass or fail, so we can return undef.
3747 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3748 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3749 return UndefValue::get(ITy);
3750
3751 // icmp X, X -> true/false
3752 // icmp X, undef -> true/false because undef could be X.
3753 if (LHS == RHS || Q.isUndefValue(RHS))
3754 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3755
3756 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3757 return V;
3758
3759 // TODO: Sink/common this with other potentially expensive calls that use
3760 // ValueTracking? See comment below for isKnownNonEqual().
3761 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3762 return V;
3763
3764 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3765 return V;
3766
3767 // If both operands have range metadata, use the metadata
3768 // to simplify the comparison.
3769 if (std::optional<ConstantRange> RhsCr = getRange(RHS, Q.IIQ))
3770 if (std::optional<ConstantRange> LhsCr = getRange(LHS, Q.IIQ)) {
3771 if (LhsCr->icmp(Pred, *RhsCr))
3772 return ConstantInt::getTrue(ITy);
3773
3774 if (LhsCr->icmp(CmpInst::getInversePredicate(Pred), *RhsCr))
3775 return ConstantInt::getFalse(ITy);
3776 }
3777
3778 // Compare of cast, for example (zext X) != 0 -> X != 0
3779 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3780 Instruction *LI = cast<CastInst>(LHS);
3781 Value *SrcOp = LI->getOperand(0);
3782 Type *SrcTy = SrcOp->getType();
3783 Type *DstTy = LI->getType();
3784
3785 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3786 // if the integer type is the same size as the pointer type.
3787 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3788 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3789 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3790 // Transfer the cast to the constant.
3791 if (Value *V = simplifyICmpInst(Pred, SrcOp,
3792 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3793 Q, MaxRecurse - 1))
3794 return V;
3795 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3796 if (RI->getOperand(0)->getType() == SrcTy)
3797 // Compare without the cast.
3798 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3799 MaxRecurse - 1))
3800 return V;
3801 }
3802 }
3803
3804 if (isa<ZExtInst>(LHS)) {
3805 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3806 // same type.
3807 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3808 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3809 // Compare X and Y. Note that signed predicates become unsigned.
3810 if (Value *V =
3812 RI->getOperand(0), Q, MaxRecurse - 1))
3813 return V;
3814 }
3815 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3816 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3817 if (SrcOp == RI->getOperand(0)) {
3818 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3819 return ConstantInt::getTrue(ITy);
3820 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3821 return ConstantInt::getFalse(ITy);
3822 }
3823 }
3824 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3825 // too. If not, then try to deduce the result of the comparison.
3826 else if (match(RHS, m_ImmConstant())) {
3827 Constant *C = dyn_cast<Constant>(RHS);
3828 assert(C != nullptr);
3829
3830 // Compute the constant that would happen if we truncated to SrcTy then
3831 // reextended to DstTy.
3832 Constant *Trunc =
3833 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3834 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3835 Constant *RExt =
3836 ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3837 assert(RExt && "Constant-fold of ImmConstant should not fail");
3838 Constant *AnyEq =
3839 ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt, C, Q.DL);
3840 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3841
3842 // If the re-extended constant didn't change any of the elements then
3843 // this is effectively also a case of comparing two zero-extended
3844 // values.
3845 if (AnyEq->isAllOnesValue() && MaxRecurse)
3847 SrcOp, Trunc, Q, MaxRecurse - 1))
3848 return V;
3849
3850 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3851 // there. Use this to work out the result of the comparison.
3852 if (AnyEq->isNullValue()) {
3853 switch (Pred) {
3854 default:
3855 llvm_unreachable("Unknown ICmp predicate!");
3856 // LHS <u RHS.
3857 case ICmpInst::ICMP_EQ:
3858 case ICmpInst::ICMP_UGT:
3859 case ICmpInst::ICMP_UGE:
3860 return Constant::getNullValue(ITy);
3861
3862 case ICmpInst::ICMP_NE:
3863 case ICmpInst::ICMP_ULT:
3864 case ICmpInst::ICMP_ULE:
3865 return Constant::getAllOnesValue(ITy);
3866
3867 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3868 // is non-negative then LHS <s RHS.
3869 case ICmpInst::ICMP_SGT:
3870 case ICmpInst::ICMP_SGE:
3872 ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
3873 Q.DL);
3874 case ICmpInst::ICMP_SLT:
3875 case ICmpInst::ICMP_SLE:
3877 ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
3878 Q.DL);
3879 }
3880 }
3881 }
3882 }
3883
3884 if (isa<SExtInst>(LHS)) {
3885 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3886 // same type.
3887 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3888 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3889 // Compare X and Y. Note that the predicate does not change.
3890 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3891 MaxRecurse - 1))
3892 return V;
3893 }
3894 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3895 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3896 if (SrcOp == RI->getOperand(0)) {
3897 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3898 return ConstantInt::getTrue(ITy);
3899 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3900 return ConstantInt::getFalse(ITy);
3901 }
3902 }
3903 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3904 // too. If not, then try to deduce the result of the comparison.
3905 else if (match(RHS, m_ImmConstant())) {
3906 Constant *C = cast<Constant>(RHS);
3907
3908 // Compute the constant that would happen if we truncated to SrcTy then
3909 // reextended to DstTy.
3910 Constant *Trunc =
3911 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3912 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3913 Constant *RExt =
3914 ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
3915 assert(RExt && "Constant-fold of ImmConstant should not fail");
3916 Constant *AnyEq =
3917 ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt, C, Q.DL);
3918 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3919
3920 // If the re-extended constant didn't change then this is effectively
3921 // also a case of comparing two sign-extended values.
3922 if (AnyEq->isAllOnesValue() && MaxRecurse)
3923 if (Value *V =
3924 simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3925 return V;
3926
3927 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3928 // bits there. Use this to work out the result of the comparison.
3929 if (AnyEq->isNullValue()) {
3930 switch (Pred) {
3931 default:
3932 llvm_unreachable("Unknown ICmp predicate!");
3933 case ICmpInst::ICMP_EQ:
3934 return Constant::getNullValue(ITy);
3935 case ICmpInst::ICMP_NE:
3936 return Constant::getAllOnesValue(ITy);
3937
3938 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3939 // LHS >s RHS.
3940 case ICmpInst::ICMP_SGT:
3941 case ICmpInst::ICMP_SGE:
3943 ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
3944 Q.DL);
3945 case ICmpInst::ICMP_SLT:
3946 case ICmpInst::ICMP_SLE:
3948 ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
3949 Q.DL);
3950
3951 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3952 // LHS >u RHS.
3953 case ICmpInst::ICMP_UGT:
3954 case ICmpInst::ICMP_UGE:
3955 // Comparison is true iff the LHS <s 0.
3956 if (MaxRecurse)
3957 if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3958 Constant::getNullValue(SrcTy), Q,
3959 MaxRecurse - 1))
3960 return V;
3961 break;
3962 case ICmpInst::ICMP_ULT:
3963 case ICmpInst::ICMP_ULE:
3964 // Comparison is true iff the LHS >=s 0.
3965 if (MaxRecurse)
3966 if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3967 Constant::getNullValue(SrcTy), Q,
3968 MaxRecurse - 1))
3969 return V;
3970 break;
3971 }
3972 }
3973 }
3974 }
3975 }
3976
3977 // icmp eq|ne X, Y -> false|true if X != Y
3978 // This is potentially expensive, and we have already computedKnownBits for
3979 // compares with 0 above here, so only try this for a non-zero compare.
3980 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3981 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3982 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3983 }
3984
3985 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3986 return V;
3987
3988 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3989 return V;
3990
3992 return V;
3994 ICmpInst::getSwappedPredicate(Pred), RHS, LHS))
3995 return V;
3996
3998 return V;
4000 ICmpInst::getSwappedPredicate(Pred), RHS, LHS))
4001 return V;
4002
4003 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
4004 return V;
4005
4006 if (std::optional<bool> Res =
4007 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4008 return ConstantInt::getBool(ITy, *Res);
4009
4010 // Simplify comparisons of related pointers using a powerful, recursive
4011 // GEP-walk when we have target data available..
4012 if (LHS->getType()->isPointerTy())
4013 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4014 return C;
4015 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
4016 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
4017 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4018 Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
4019 Q.DL.getTypeSizeInBits(CLHS->getType()))
4020 if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
4021 CRHS->getPointerOperand(), Q))
4022 return C;
4023
4024 // If the comparison is with the result of a select instruction, check whether
4025 // comparing with either branch of the select always yields the same value.
4026 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4027 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4028 return V;
4029
4030 // If the comparison is with the result of a phi instruction, check whether
4031 // doing the compare with each incoming phi value yields a common result.
4032 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4033 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4034 return V;
4035
4036 return nullptr;
4037}
4038
4040 const SimplifyQuery &Q) {
4041 return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4042}
4043
4044/// Given operands for an FCmpInst, see if we can fold the result.
4045/// If not, this returns null.
4047 FastMathFlags FMF, const SimplifyQuery &Q,
4048 unsigned MaxRecurse) {
4049 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4050
4051 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4052 if (Constant *CRHS = dyn_cast<Constant>(RHS))
4053 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4054 Q.CxtI);
4055
4056 // If we have a constant, make sure it is on the RHS.
4057 std::swap(LHS, RHS);
4058 Pred = CmpInst::getSwappedPredicate(Pred);
4059 }
4060
4061 // Fold trivial predicates.
4063 if (Pred == FCmpInst::FCMP_FALSE)
4064 return getFalse(RetTy);
4065 if (Pred == FCmpInst::FCMP_TRUE)
4066 return getTrue(RetTy);
4067
4068 // fcmp pred x, poison and fcmp pred poison, x
4069 // fold to poison
4070 if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
4071 return PoisonValue::get(RetTy);
4072
4073 // fcmp pred x, undef and fcmp pred undef, x
4074 // fold to true if unordered, false if ordered
4075 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4076 // Choosing NaN for the undef will always make unordered comparison succeed
4077 // and ordered comparison fail.
4078 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4079 }
4080
4081 // fcmp x,x -> true/false. Not all compares are foldable.
4082 if (LHS == RHS) {
4083 if (CmpInst::isTrueWhenEqual(Pred))
4084 return getTrue(RetTy);
4085 if (CmpInst::isFalseWhenEqual(Pred))
4086 return getFalse(RetTy);
4087 }
4088
4089 // Fold (un)ordered comparison if we can determine there are no NaNs.
4090 //
4091 // This catches the 2 variable input case, constants are handled below as a
4092 // class-like compare.
4093 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4094 KnownFPClass RHSClass =
4095 computeKnownFPClass(RHS, fcAllFlags, /*Depth=*/0, Q);
4096 KnownFPClass LHSClass =
4097 computeKnownFPClass(LHS, fcAllFlags, /*Depth=*/0, Q);
4098
4099 if (FMF.noNaNs() ||
4100 (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
4101 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4102
4103 if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
4104 return ConstantInt::get(RetTy, Pred == CmpInst::FCMP_UNO);
4105 }
4106
4107 const APFloat *C = nullptr;
4109 std::optional<KnownFPClass> FullKnownClassLHS;
4110
4111 // Lazily compute the possible classes for LHS. Avoid computing it twice if
4112 // RHS is a 0.
4113 auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4114 fcAllFlags) {
4115 if (FullKnownClassLHS)
4116 return *FullKnownClassLHS;
4117 return computeKnownFPClass(LHS, FMF, InterestedFlags, 0, Q);
4118 };
4119
4120 if (C && Q.CxtI) {
4121 // Fold out compares that express a class test.
4122 //
4123 // FIXME: Should be able to perform folds without context
4124 // instruction. Always pass in the context function?
4125
4126 const Function *ParentF = Q.CxtI->getFunction();
4127 auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, *ParentF, LHS, C);
4128 if (ClassVal) {
4129 FullKnownClassLHS = computeLHSClass();
4130 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4131 return getFalse(RetTy);
4132 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4133 return getTrue(RetTy);
4134 }
4135 }
4136
4137 // Handle fcmp with constant RHS.
4138 if (C) {
4139 // TODO: If we always required a context function, we wouldn't need to
4140 // special case nans.
4141 if (C->isNaN())
4142 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4143
4144 // TODO: Need version fcmpToClassTest which returns implied class when the
4145 // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4146 // isn't implementable as a class call.
4147 if (C->isNegative() && !C->isNegZero()) {
4149
4150 // TODO: We can catch more cases by using a range check rather than
4151 // relying on CannotBeOrderedLessThanZero.
4152 switch (Pred) {
4153 case FCmpInst::FCMP_UGE:
4154 case FCmpInst::FCMP_UGT:
4155 case FCmpInst::FCMP_UNE: {
4156 KnownFPClass KnownClass = computeLHSClass(Interested);
4157
4158 // (X >= 0) implies (X > C) when (C < 0)
4159 if (KnownClass.cannotBeOrderedLessThanZero())
4160 return getTrue(RetTy);
4161 break;
4162 }
4163 case FCmpInst::FCMP_OEQ:
4164 case FCmpInst::FCMP_OLE:
4165 case FCmpInst::FCMP_OLT: {
4166 KnownFPClass KnownClass = computeLHSClass(Interested);
4167
4168 // (X >= 0) implies !(X < C) when (C < 0)
4169 if (KnownClass.cannotBeOrderedLessThanZero())
4170 return getFalse(RetTy);
4171 break;
4172 }
4173 default:
4174 break;
4175 }
4176 }
4177 // Check comparison of [minnum/maxnum with constant] with other constant.
4178 const APFloat *C2;
4179 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
4180 *C2 < *C) ||
4181 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
4182 *C2 > *C)) {
4183 bool IsMaxNum =
4184 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4185 // The ordered relationship and minnum/maxnum guarantee that we do not
4186 // have NaN constants, so ordered/unordered preds are handled the same.
4187 switch (Pred) {
4188 case FCmpInst::FCMP_OEQ:
4189 case FCmpInst::FCMP_UEQ:
4190 // minnum(X, LesserC) == C --> false
4191 // maxnum(X, GreaterC) == C --> false
4192 return getFalse(RetTy);
4193 case FCmpInst::FCMP_ONE:
4194 case FCmpInst::FCMP_UNE:
4195 // minnum(X, LesserC) != C --> true
4196 // maxnum(X, GreaterC) != C --> true
4197 return getTrue(RetTy);
4198 case FCmpInst::FCMP_OGE:
4199 case FCmpInst::FCMP_UGE:
4200 case FCmpInst::FCMP_OGT:
4201 case FCmpInst::FCMP_UGT:
4202 // minnum(X, LesserC) >= C --> false
4203 // minnum(X, LesserC) > C --> false
4204 // maxnum(X, GreaterC) >= C --> true
4205 // maxnum(X, GreaterC) > C --> true
4206 return ConstantInt::get(RetTy, IsMaxNum);
4207 case FCmpInst::FCMP_OLE:
4208 case FCmpInst::FCMP_ULE:
4209 case FCmpInst::FCMP_OLT:
4210 case FCmpInst::FCMP_ULT:
4211 // minnum(X, LesserC) <= C --> true
4212 // minnum(X, LesserC) < C --> true
4213 // maxnum(X, GreaterC) <= C --> false
4214 // maxnum(X, GreaterC) < C --> false
4215 return ConstantInt::get(RetTy, !IsMaxNum);
4216 default:
4217 // TRUE/FALSE/ORD/UNO should be handled before this.
4218 llvm_unreachable("Unexpected fcmp predicate");
4219 }
4220 }
4221 }
4222
4223 // TODO: Could fold this with above if there were a matcher which returned all
4224 // classes in a non-splat vector.
4225 if (match(RHS, m_AnyZeroFP())) {
4226 switch (Pred) {
4227 case FCmpInst::FCMP_OGE:
4228 case FCmpInst::FCMP_ULT: {
4230 if (!FMF.noNaNs())
4231 Interested |= fcNan;
4232
4233 KnownFPClass Known = computeLHSClass(Interested);
4234
4235 // Positive or zero X >= 0.0 --> true
4236 // Positive or zero X < 0.0 --> false
4237 if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4239 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4240 break;
4241 }
4242 case FCmpInst::FCMP_UGE:
4243 case FCmpInst::FCMP_OLT: {
4245 KnownFPClass Known = computeLHSClass(Interested);
4246
4247 // Positive or zero or nan X >= 0.0 --> true
4248 // Positive or zero or nan X < 0.0 --> false
4249 if (Known.cannotBeOrderedLessThanZero())
4250 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4251 break;
4252 }
4253 default:
4254 break;
4255 }
4256 }
4257
4258 // If the comparison is with the result of a select instruction, check whether
4259 // comparing with either branch of the select always yields the same value.
4260 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4261 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4262 return V;
4263
4264 // If the comparison is with the result of a phi instruction, check whether
4265 // doing the compare with each incoming phi value yields a common result.
4266 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4267 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4268 return V;
4269
4270 return nullptr;
4271}
4272
4274 FastMathFlags FMF, const SimplifyQuery &Q) {
4275 return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4276}
4277
4279 const SimplifyQuery &Q,
4280 bool AllowRefinement,
4282 unsigned MaxRecurse) {
4283 assert((AllowRefinement || !Q.CanUseUndef) &&
4284 "If AllowRefinement=false then CanUseUndef=false");
4285
4286 // Trivial replacement.
4287 if (V == Op)
4288 return RepOp;
4289
4290 if (!MaxRecurse--)
4291 return nullptr;
4292
4293 // We cannot replace a constant, and shouldn't even try.
4294 if (isa<Constant>(Op))
4295 return nullptr;
4296
4297 auto *I = dyn_cast<Instruction>(V);
4298 if (!I)
4299 return nullptr;
4300
4301 // The arguments of a phi node might refer to a value from a previous
4302 // cycle iteration.
4303 if (isa<PHINode>(I))
4304 return nullptr;
4305
4306 // For vector types, the simplification must hold per-lane, so forbid
4307 // potentially cross-lane operations like shufflevector.
4308 if (Op->getType()->isVectorTy() && !isNotCrossLaneOperation(I))
4309 return nullptr;
4310
4311 // Don't fold away llvm.is.constant checks based on assumptions.
4312 if (match(I, m_Intrinsic<Intrinsic::is_constant>()))
4313 return nullptr;
4314
4315 // Don't simplify freeze.
4316 if (isa<FreezeInst>(I))
4317 return nullptr;
4318
4319 // Replace Op with RepOp in instruction operands.
4321 bool AnyReplaced = false;
4322 for (Value *InstOp : I->operands()) {
4323 if (Value *NewInstOp = simplifyWithOpReplaced(
4324 InstOp, Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4325 NewOps.push_back(NewInstOp);
4326 AnyReplaced = InstOp != NewInstOp;
4327 } else {
4328 NewOps.push_back(InstOp);
4329 }
4330
4331 // Bail out if any operand is undef and SimplifyQuery disables undef
4332 // simplification. Constant folding currently doesn't respect this option.
4333 if (isa<UndefValue>(NewOps.back()) && !Q.CanUseUndef)
4334 return nullptr;
4335 }
4336
4337 if (!AnyReplaced)
4338 return nullptr;
4339
4340 if (!AllowRefinement) {
4341 // General InstSimplify functions may refine the result, e.g. by returning
4342 // a constant for a potentially poison value. To avoid this, implement only
4343 // a few non-refining but profitable transforms here.
4344
4345 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4346 unsigned Opcode = BO->getOpcode();
4347 // id op x -> x, x op id -> x
4348 // Exclude floats, because x op id may produce a different NaN value.
4349 if (!BO->getType()->isFPOrFPVectorTy()) {
4350 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4351 return NewOps[1];
4352 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4353 /* RHS */ true))
4354 return NewOps[0];
4355 }
4356
4357 // x & x -> x, x | x -> x
4358 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4359 NewOps[0] == NewOps[1]) {
4360 // or disjoint x, x results in poison.
4361 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4362 if (PDI->isDisjoint()) {
4363 if (!DropFlags)
4364 return nullptr;
4365 DropFlags->push_back(BO);
4366 }
4367 }
4368 return NewOps[0];
4369 }
4370
4371 // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4372 // by assumption and this case never wraps, so nowrap flags can be
4373 // ignored.
4374 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4375 NewOps[0] == RepOp && NewOps[1] == RepOp)
4376 return Constant::getNullValue(I->getType());
4377
4378 // If we are substituting an absorber constant into a binop and extra
4379 // poison can't leak if we remove the select -- because both operands of
4380 // the binop are based on the same value -- then it may be safe to replace
4381 // the value with the absorber constant. Examples:
4382 // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4383 // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4384 // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4385 Constant *Absorber =
4386 ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4387 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4388 impliesPoison(BO, Op))
4389 return Absorber;
4390 }
4391
4392 if (isa<GetElementPtrInst>(I)) {
4393 // getelementptr x, 0 -> x.
4394 // This never returns poison, even if inbounds is set.
4395 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4396 return NewOps[0];
4397 }
4398 } else {
4399 // The simplification queries below may return the original value. Consider:
4400 // %div = udiv i32 %arg, %arg2
4401 // %mul = mul nsw i32 %div, %arg2
4402 // %cmp = icmp eq i32 %mul, %arg
4403 // %sel = select i1 %cmp, i32 %div, i32 undef
4404 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4405 // simplifies back to %arg. This can only happen because %mul does not
4406 // dominate %div. To ensure a consistent return value contract, we make sure
4407 // that this case returns nullptr as well.
4408 auto PreventSelfSimplify = [V](Value *Simplified) {
4409 return Simplified != V ? Simplified : nullptr;
4410 };
4411
4412 return PreventSelfSimplify(
4413 ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4414 }
4415
4416 // If all operands are constant after substituting Op for RepOp then we can
4417 // constant fold the instruction.
4419 for (Value *NewOp : NewOps) {
4420 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4421 ConstOps.push_back(ConstOp);
4422 else
4423 return nullptr;
4424 }
4425
4426 // Consider:
4427 // %cmp = icmp eq i32 %x, 2147483647
4428 // %add = add nsw i32 %x, 1
4429 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4430 //
4431 // We can't replace %sel with %add unless we strip away the flags (which
4432 // will be done in InstCombine).
4433 // TODO: This may be unsound, because it only catches some forms of
4434 // refinement.
4435 if (!AllowRefinement) {
4436 if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4437 // abs cannot create poison if the value is known to never be int_min.
4438 if (auto *II = dyn_cast<IntrinsicInst>(I);
4439 II && II->getIntrinsicID() == Intrinsic::abs) {
4440 if (!ConstOps[0]->isNotMinSignedValue())
4441 return nullptr;
4442 } else
4443 return nullptr;
4444 }
4445 Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4446 /*AllowNonDeterministic=*/false);
4447 if (DropFlags && Res && I->hasPoisonGeneratingAnnotations())
4448 DropFlags->push_back(I);
4449 return Res;
4450 }
4451
4452 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4453 /*AllowNonDeterministic=*/false);
4454}
4455
4457 const SimplifyQuery &Q,
4458 bool AllowRefinement,
4459 SmallVectorImpl<Instruction *> *DropFlags) {
4460 // If refinement is disabled, also disable undef simplifications (which are
4461 // always refinements) in SimplifyQuery.
4462 if (!AllowRefinement)
4463 return ::simplifyWithOpReplaced(V, Op, RepOp, Q.getWithoutUndef(),
4464 AllowRefinement, DropFlags, RecursionLimit);
4465 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4467}
4468
4469/// Try to simplify a select instruction when its condition operand is an
4470/// integer comparison where one operand of the compare is a constant.
4471static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4472 const APInt *Y, bool TrueWhenUnset) {
4473 const APInt *C;
4474
4475 // (X & Y) == 0 ? X & ~Y : X --> X
4476 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4477 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4478 *Y == ~*C)
4479 return TrueWhenUnset ? FalseVal : TrueVal;
4480
4481 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4482 // (X & Y) != 0 ? X : X & ~Y --> X
4483 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4484 *Y == ~*C)
4485 return TrueWhenUnset ? FalseVal : TrueVal;
4486
4487 if (Y->isPowerOf2()) {
4488 // (X & Y) == 0 ? X | Y : X --> X | Y
4489 // (X & Y) != 0 ? X | Y : X --> X
4490 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4491 *Y == *C) {
4492 // We can't return the or if it has the disjoint flag.
4493 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4494 return nullptr;
4495 return TrueWhenUnset ? TrueVal : FalseVal;
4496 }
4497
4498 // (X & Y) == 0 ? X : X | Y --> X
4499 // (X & Y) != 0 ? X : X | Y --> X | Y
4500 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4501 *Y == *C) {
4502 // We can't return the or if it has the disjoint flag.
4503 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4504 return nullptr;
4505 return TrueWhenUnset ? TrueVal : FalseVal;
4506 }
4507 }
4508
4509 return nullptr;
4510}
4511
4512static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4513 CmpPredicate Pred, Value *TVal,
4514 Value *FVal) {
4515 // Canonicalize common cmp+sel operand as CmpLHS.
4516 if (CmpRHS == TVal || CmpRHS == FVal) {
4517 std::swap(CmpLHS, CmpRHS);
4518 Pred = ICmpInst::getSwappedPredicate(Pred);
4519 }
4520
4521 // Canonicalize common cmp+sel operand as TVal.
4522 if (CmpLHS == FVal) {
4523 std::swap(TVal, FVal);
4524 Pred = ICmpInst::getInversePredicate(Pred);
4525 }
4526
4527 // A vector select may be shuffling together elements that are equivalent
4528 // based on the max/min/select relationship.
4529 Value *X = CmpLHS, *Y = CmpRHS;
4530 bool PeekedThroughSelectShuffle = false;
4531 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4532 if (Shuf && Shuf->isSelect()) {
4533 if (Shuf->getOperand(0) == Y)
4534 FVal = Shuf->getOperand(1);
4535 else if (Shuf->getOperand(1) == Y)
4536 FVal = Shuf->getOperand(0);
4537 else
4538 return nullptr;
4539 PeekedThroughSelectShuffle = true;
4540 }
4541
4542 // (X pred Y) ? X : max/min(X, Y)
4543 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4544 if (!MMI || TVal != X ||
4546 return nullptr;
4547
4548 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4549 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4550 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4551 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4552 //
4553 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4554 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4555 // If Z is true, this reduces as above, and if Z is false:
4556 // (X > Y) ? X : Y --> max(X, Y)
4557 ICmpInst::Predicate MMPred = MMI->getPredicate();
4558 if (MMPred == CmpInst::getStrictPredicate(Pred))
4559 return MMI;
4560
4561 // Other transforms are not valid with a shuffle.
4562 if (PeekedThroughSelectShuffle)
4563 return nullptr;
4564
4565 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4566 if (Pred == CmpInst::ICMP_EQ)
4567 return MMI;
4568
4569 // (X != Y) ? X : max/min(X, Y) --> X
4570 if (Pred == CmpInst::ICMP_NE)
4571 return X;
4572
4573 // (X < Y) ? X : max(X, Y) --> X
4574 // (X <= Y) ? X : max(X, Y) --> X
4575 // (X > Y) ? X : min(X, Y) --> X
4576 // (X >= Y) ? X : min(X, Y) --> X
4578 if (MMPred == CmpInst::getStrictPredicate(InvPred))
4579 return X;
4580
4581 return nullptr;
4582}
4583
4584/// An alternative way to test if a bit is set or not uses sgt/slt instead of
4585/// eq/ne.
4587 CmpPredicate Pred, Value *TrueVal,
4588 Value *FalseVal) {
4589 if (auto Res = decomposeBitTestICmp(CmpLHS, CmpRHS, Pred))
4590 return simplifySelectBitTest(TrueVal, FalseVal, Res->X, &Res->Mask,
4591 Res->Pred == ICmpInst::ICMP_EQ);
4592
4593 return nullptr;
4594}
4595
4596/// Try to simplify a select instruction when its condition operand is an
4597/// integer equality or floating-point equivalence comparison.
4599 Value *TrueVal, Value *FalseVal,
4600 const SimplifyQuery &Q,
4601 unsigned MaxRecurse) {
4602 if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q.getWithoutUndef(),
4603 /* AllowRefinement */ false,
4604 /* DropFlags */ nullptr, MaxRecurse) == TrueVal)
4605 return FalseVal;
4606 if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4607 /* AllowRefinement */ true,
4608 /* DropFlags */ nullptr, MaxRecurse) == FalseVal)
4609 return FalseVal;
4610
4611 return nullptr;
4612}
4613
4614/// Try to simplify a select instruction when its condition operand is an
4615/// integer comparison.
4616static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4617 Value *FalseVal,
4618 const SimplifyQuery &Q,
4619 unsigned MaxRecurse) {
4620 CmpPredicate Pred;
4621 Value *CmpLHS, *CmpRHS;
4622 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4623 return nullptr;
4624
4625 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4626 return V;
4627
4628 // Canonicalize ne to eq predicate.
4629 if (Pred == ICmpInst::ICMP_NE) {
4630 Pred = ICmpInst::ICMP_EQ;
4631 std::swap(TrueVal, FalseVal);
4632 }
4633
4634 // Check for integer min/max with a limit constant:
4635 // X > MIN_INT ? X : MIN_INT --> X
4636 // X < MAX_INT ? X : MAX_INT --> X
4637 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4638 Value *X, *Y;
4640 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4641 X, Y)
4642 .Flavor;
4643 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4645 X->getType()->getScalarSizeInBits());
4646 if (match(Y, m_SpecificInt(LimitC)))
4647 return X;
4648 }
4649 }
4650
4651 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4652 Value *X;
4653 const APInt *Y;
4654 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4655 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4656 /*TrueWhenUnset=*/true))
4657 return V;
4658
4659 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4660 Value *ShAmt;
4661 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4662 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4663 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4664 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4665 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4666 return X;
4667
4668 // Test for a zero-shift-guard-op around rotates. These are used to
4669 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4670 // intrinsics do not have that problem.
4671 // We do not allow this transform for the general funnel shift case because
4672 // that would not preserve the poison safety of the original code.
4673 auto isRotate =
4675 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4676 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4677 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4678 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4679 Pred == ICmpInst::ICMP_EQ)
4680 return FalseVal;
4681
4682 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4683 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4684 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4685 match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4686 return FalseVal;
4687 if (match(TrueVal,
4688 m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4689 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4690 return FalseVal;
4691 }
4692
4693 // Check for other compares that behave like bit test.
4694 if (Value *V =
4695 simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4696 return V;
4697
4698 // If we have a scalar equality comparison, then we know the value in one of
4699 // the arms of the select. See if substituting this value into the arm and
4700 // simplifying the result yields the same value as the other arm.
4701 if (Pred == ICmpInst::ICMP_EQ) {
4702 if (Value *V = simplifySelectWithEquivalence(CmpLHS, CmpRHS, TrueVal,
4703 FalseVal, Q, MaxRecurse))
4704 return V;
4705 if (Value *V = simplifySelectWithEquivalence(CmpRHS, CmpLHS, TrueVal,
4706 FalseVal, Q, MaxRecurse))
4707 return V;
4708
4709 Value *X;
4710 Value *Y;
4711 // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4712 if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4713 match(CmpRHS, m_Zero())) {
4714 // (X | Y) == 0 implies X == 0 and Y == 0.
4715 if (Value *V = simplifySelectWithEquivalence(X, CmpRHS, TrueVal, FalseVal,
4716 Q, MaxRecurse))
4717 return V;
4718 if (Value *V = simplifySelectWithEquivalence(Y, CmpRHS, TrueVal, FalseVal,
4719 Q, MaxRecurse))
4720 return V;
4721 }
4722
4723 // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4724 if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4725 match(CmpRHS, m_AllOnes())) {
4726 // (X & Y) == -1 implies X == -1 and Y == -1.
4727 if (Value *V = simplifySelectWithEquivalence(X, CmpRHS, TrueVal, FalseVal,
4728 Q, MaxRecurse))
4729 return V;
4730 if (Value *V = simplifySelectWithEquivalence(Y, CmpRHS, TrueVal, FalseVal,
4731 Q, MaxRecurse))
4732 return V;
4733 }
4734 }
4735
4736 return nullptr;
4737}
4738
4739/// Try to simplify a select instruction when its condition operand is a
4740/// floating-point comparison.
4742 const SimplifyQuery &Q,
4743 unsigned MaxRecurse) {
4744 CmpPredicate Pred;
4745 Value *CmpLHS, *CmpRHS;
4746 if (!match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4747 return nullptr;
4748 FCmpInst *I = cast<FCmpInst>(Cond);
4749
4750 bool IsEquiv = I->isEquivalence();
4751 if (I->isEquivalence(/*Invert=*/true)) {
4752 std::swap(T, F);
4753 Pred = FCmpInst::getInversePredicate(Pred);
4754 IsEquiv = true;
4755 }
4756
4757 // This transforms is safe if at least one operand is known to not be zero.
4758 // Otherwise, the select can change the sign of a zero operand.
4759 if (IsEquiv) {
4760 if (Value *V =
4761 simplifySelectWithEquivalence(CmpLHS, CmpRHS, T, F, Q, MaxRecurse))
4762 return V;
4763 if (Value *V =
4764 simplifySelectWithEquivalence(CmpRHS, CmpLHS, T, F, Q, MaxRecurse))
4765 return V;
4766 }
4767
4768 // Canonicalize CmpLHS to be T, and CmpRHS to be F, if they're swapped.
4769 if (CmpLHS == F && CmpRHS == T)
4770 std::swap(CmpLHS, CmpRHS);
4771
4772 if (CmpLHS != T || CmpRHS != F)
4773 return nullptr;
4774
4775 // This transform is also safe if we do not have (do not care about) -0.0.
4776 if (Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros()) {
4777 // (T == F) ? T : F --> F
4778 if (Pred == FCmpInst::FCMP_OEQ)
4779 return F;
4780
4781 // (T != F) ? T : F --> T
4782 if (Pred == FCmpInst::FCMP_UNE)
4783 return T;
4784 }
4785
4786 return nullptr;
4787}
4788
4789/// Given operands for a SelectInst, see if we can fold the result.
4790/// If not, this returns null.
4791static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4792 const SimplifyQuery &Q, unsigned MaxRecurse) {
4793 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4794 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4795 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4796 if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4797 return C;
4798
4799 // select poison, X, Y -> poison
4800 if (isa<PoisonValue>(CondC))
4801 return PoisonValue::get(TrueVal->getType());
4802
4803 // select undef, X, Y -> X or Y
4804 if (Q.isUndefValue(CondC))
4805 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4806
4807 // select true, X, Y --> X
4808 // select false, X, Y --> Y
4809 // For vectors, allow undef/poison elements in the condition to match the
4810 // defined elements, so we can eliminate the select.
4811 if (match(CondC, m_One()))
4812 return TrueVal;
4813 if (match(CondC, m_Zero()))
4814 return FalseVal;
4815 }
4816
4817 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4818 "Select must have bool or bool vector condition");
4819 assert(TrueVal->getType() == FalseVal->getType() &&
4820 "Select must have same types for true/false ops");
4821
4822 if (Cond->getType() == TrueVal->getType()) {
4823 // select i1 Cond, i1 true, i1 false --> i1 Cond
4824 if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4825 return Cond;
4826
4827 // (X && Y) ? X : Y --> Y (commuted 2 ways)
4828 if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
4829 return FalseVal;
4830
4831 // (X || Y) ? X : Y --> X (commuted 2 ways)
4832 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
4833 return TrueVal;
4834
4835 // (X || Y) ? false : X --> false (commuted 2 ways)
4836 if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
4837 match(TrueVal, m_ZeroInt()))
4838 return ConstantInt::getFalse(Cond->getType());
4839
4840 // Match patterns that end in logical-and.
4841 if (match(FalseVal, m_ZeroInt())) {
4842 // !(X || Y) && X --> false (commuted 2 ways)
4843 if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
4844 return ConstantInt::getFalse(Cond->getType());
4845 // X && !(X || Y) --> false (commuted 2 ways)
4846 if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
4847 return ConstantInt::getFalse(Cond->getType());
4848
4849 // (X || Y) && Y --> Y (commuted 2 ways)
4850 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
4851 return TrueVal;
4852 // Y && (X || Y) --> Y (commuted 2 ways)
4853 if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
4854 return Cond;
4855
4856 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4857 Value *X, *Y;
4860 return X;
4861 if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4863 return X;
4864 }
4865
4866 // Match patterns that end in logical-or.
4867 if (match(TrueVal, m_One())) {
4868 // !(X && Y) || X --> true (commuted 2 ways)
4869 if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
4870 return ConstantInt::getTrue(Cond->getType());
4871 // X || !(X && Y) --> true (commuted 2 ways)
4872 if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
4873 return ConstantInt::getTrue(Cond->getType());
4874
4875 // (X && Y) || Y --> Y (commuted 2 ways)
4876 if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
4877 return FalseVal;
4878 // Y || (X && Y) --> Y (commuted 2 ways)
4879 if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
4880 return Cond;
4881 }
4882 }
4883
4884 // select ?, X, X -> X
4885 if (TrueVal == FalseVal)
4886 return TrueVal;
4887
4888 if (Cond == TrueVal) {
4889 // select i1 X, i1 X, i1 false --> X (logical-and)
4890 if (match(FalseVal, m_ZeroInt()))
4891 return Cond;
4892 // select i1 X, i1 X, i1 true --> true
4893 if (match(FalseVal, m_One()))
4894 return ConstantInt::getTrue(Cond->getType());
4895 }
4896 if (Cond == FalseVal) {
4897 // select i1 X, i1 true, i1 X --> X (logical-or)
4898 if (match(TrueVal, m_One()))
4899 return Cond;
4900 // select i1 X, i1 false, i1 X --> false
4901 if (match(TrueVal, m_ZeroInt()))
4902 return ConstantInt::getFalse(Cond->getType());
4903 }
4904
4905 // If the true or false value is poison, we can fold to the other value.
4906 // If the true or false value is undef, we can fold to the other value as
4907 // long as the other value isn't poison.
4908 // select ?, poison, X -> X
4909 // select ?, undef, X -> X
4910 if (isa<PoisonValue>(TrueVal) ||
4911 (Q.isUndefValue(TrueVal) && impliesPoison(FalseVal, Cond)))
4912 return FalseVal;
4913 // select ?, X, poison -> X
4914 // select ?, X, undef -> X
4915 if (isa<PoisonValue>(FalseVal) ||
4916 (Q.isUndefValue(FalseVal) && impliesPoison(TrueVal, Cond)))
4917 return TrueVal;
4918
4919 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4920 Constant *TrueC, *FalseC;
4921 if (isa<FixedVectorType>(TrueVal->getType()) &&
4922 match(TrueVal, m_Constant(TrueC)) &&
4923 match(FalseVal, m_Constant(FalseC))) {
4924 unsigned NumElts =
4925 cast<FixedVectorType>(TrueC->getType())->getNumElements();
4927 for (unsigned i = 0; i != NumElts; ++i) {
4928 // Bail out on incomplete vector constants.
4929 Constant *TEltC = TrueC->getAggregateElement(i);
4930 Constant *FEltC = FalseC->getAggregateElement(i);
4931 if (!TEltC || !FEltC)
4932 break;
4933
4934 // If the elements match (undef or not), that value is the result. If only
4935 // one element is undef, choose the defined element as the safe result.
4936 if (TEltC == FEltC)
4937 NewC.push_back(TEltC);
4938 else if (isa<PoisonValue>(TEltC) ||
4939 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4940 NewC.push_back(FEltC);
4941 else if (isa<PoisonValue>(FEltC) ||
4942 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4943 NewC.push_back(TEltC);
4944 else
4945 break;
4946 }
4947 if (NewC.size() == NumElts)
4948 return ConstantVector::get(NewC);
4949 }
4950
4951 if (Value *V =
4952 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4953 return V;
4954
4955 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4956 return V;
4957
4958 std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4959 if (Imp)
4960 return *Imp ? TrueVal : FalseVal;
4961
4962 return nullptr;
4963}
4964
4966 const SimplifyQuery &Q) {
4967 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4968}
4969
4970/// Given operands for an GetElementPtrInst, see if we can fold the result.
4971/// If not, this returns null.
4974 const SimplifyQuery &Q, unsigned) {
4975 // The type of the GEP pointer operand.
4976 unsigned AS =
4977 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
4978
4979 // getelementptr P -> P.
4980 if (Indices.empty())
4981 return Ptr;
4982
4983 // Compute the (pointer) type returned by the GEP instruction.
4984 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
4985 Type *GEPTy = Ptr->getType();
4986 if (!GEPTy->isVectorTy()) {
4987 for (Value *Op : Indices) {
4988 // If one of the operands is a vector, the result type is a vector of
4989 // pointers. All vector operands must have the same number of elements.
4990 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4991 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4992 break;
4993 }
4994 }
4995 }
4996
4997 // All-zero GEP is a no-op, unless it performs a vector splat.
4998 if (Ptr->getType() == GEPTy &&
4999 all_of(Indices, [](const auto *V) { return match(V, m_Zero()); }))
5000 return Ptr;
5001
5002 // getelementptr poison, idx -> poison
5003 // getelementptr baseptr, poison -> poison
5004 if (isa<PoisonValue>(Ptr) ||
5005 any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
5006 return PoisonValue::get(GEPTy);
5007
5008 // getelementptr undef, idx -> undef
5009 if (Q.isUndefValue(Ptr))
5010 return UndefValue::get(GEPTy);
5011
5012 bool IsScalableVec =
5013 SrcTy->isScalableTy() || any_of(Indices, [](const Value *V) {
5014 return isa<ScalableVectorType>(V->getType());
5015 });
5016
5017 if (Indices.size() == 1) {
5018 Type *Ty = SrcTy;
5019 if (!IsScalableVec && Ty->isSized()) {
5020 Value *P;
5021 uint64_t C;
5022 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5023 // getelementptr P, N -> P if P points to a type of zero size.
5024 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5025 return Ptr;
5026
5027 // The following transforms are only safe if the ptrtoint cast
5028 // doesn't truncate the pointers.
5029 if (Indices[0]->getType()->getScalarSizeInBits() ==
5030 Q.DL.getPointerSizeInBits(AS)) {
5031 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5032 return P->getType() == GEPTy &&
5034 };
5035 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5036 if (TyAllocSize == 1 &&
5037 match(Indices[0],
5039 CanSimplify())
5040 return P;
5041
5042 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5043 // size 1 << C.
5044 if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
5046 m_ConstantInt(C))) &&
5047 TyAllocSize == 1ULL << C && CanSimplify())
5048 return P;
5049
5050 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5051 // size C.
5052 if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
5054 m_SpecificInt(TyAllocSize))) &&
5055 CanSimplify())
5056 return P;
5057 }
5058 }
5059 }
5060
5061 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5062 all_of(Indices.drop_back(1),
5063 [](Value *Idx) { return match(Idx, m_Zero()); })) {
5064 unsigned IdxWidth =
5065 Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
5066 if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5067 APInt BasePtrOffset(IdxWidth, 0);
5068 Value *StrippedBasePtr =
5069 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5070
5071 // Avoid creating inttoptr of zero here: While LLVMs treatment of
5072 // inttoptr is generally conservative, this particular case is folded to
5073 // a null pointer, which will have incorrect provenance.
5074
5075 // gep (gep V, C), (sub 0, V) -> C
5076 if (match(Indices.back(),
5077 m_Neg(m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5078 !BasePtrOffset.isZero()) {
5079 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5080 return ConstantExpr::getIntToPtr(CI, GEPTy);
5081 }
5082 // gep (gep V, C), (xor V, -1) -> C-1
5083 if (match(Indices.back(),
5084 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
5085 !BasePtrOffset.isOne()) {
5086 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5087 return ConstantExpr::getIntToPtr(CI, GEPTy);
5088 }
5089 }
5090 }
5091
5092 // Check to see if this is constant foldable.
5093 if (!isa<Constant>(Ptr) ||
5094 !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
5095 return nullptr;
5096
5098 return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), std::nullopt,
5099 Indices);
5100
5101 auto *CE =
5102 ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices, NW);
5103 return ConstantFoldConstant(CE, Q.DL);
5104}
5105
5107 GEPNoWrapFlags NW, const SimplifyQuery &Q) {
5108 return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit);
5109}
5110
5111/// Given operands for an InsertValueInst, see if we can fold the result.
5112/// If not, this returns null.
5114 ArrayRef<unsigned> Idxs,
5115 const SimplifyQuery &Q, unsigned) {
5116 if (Constant *CAgg = dyn_cast<Constant>(Agg))
5117 if (Constant *CVal = dyn_cast<Constant>(Val))
5118 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5119
5120 // insertvalue x, poison, n -> x
5121 // insertvalue x, undef, n -> x if x cannot be poison
5122 if (isa<PoisonValue>(Val) ||
5123 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5124 return Agg;
5125
5126 // insertvalue x, (extractvalue y, n), n
5127 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
5128 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5129 EV->getIndices() == Idxs) {
5130 // insertvalue poison, (extractvalue y, n), n -> y
5131 // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5132 if (isa<PoisonValue>(Agg) ||
5133 (Q.isUndefValue(Agg) &&
5134 isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5135 return EV->getAggregateOperand();
5136
5137 // insertvalue y, (extractvalue y, n), n -> y
5138 if (Agg == EV->getAggregateOperand())
5139 return Agg;
5140 }
5141
5142 return nullptr;
5143}
5144
5146 ArrayRef<unsigned> Idxs,
5147 const SimplifyQuery &Q) {
5148 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5149}
5150
5152 const SimplifyQuery &Q) {
5153 // Try to constant fold.
5154 auto *VecC = dyn_cast<Constant>(Vec);
5155 auto *ValC = dyn_cast<Constant>(Val);
5156 auto *IdxC = dyn_cast<Constant>(Idx);
5157 if (VecC && ValC && IdxC)
5158 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5159
5160 // For fixed-length vector, fold into poison if index is out of bounds.
5161 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5162 if (isa<FixedVectorType>(Vec->getType()) &&
5163 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5164 return PoisonValue::get(Vec->getType());
5165 }
5166
5167 // If index is undef, it might be out of bounds (see above case)
5168 if (Q.isUndefValue(Idx))
5169 return PoisonValue::get(Vec->getType());
5170
5171 // If the scalar is poison, or it is undef and there is no risk of
5172 // propagating poison from the vector value, simplify to the vector value.
5173 if (isa<PoisonValue>(Val) ||
5174 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5175 return Vec;
5176
5177 // Inserting the splatted value into a constant splat does nothing.
5178 if (VecC && ValC && VecC->getSplatValue() == ValC)
5179 return Vec;
5180
5181 // If we are extracting a value from a vector, then inserting it into the same
5182 // place, that's the input vector:
5183 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5184 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5185 return Vec;
5186
5187 return nullptr;
5188}
5189
5190/// Given operands for an ExtractValueInst, see if we can fold the result.
5191/// If not, this returns null.
5193 const SimplifyQuery &, unsigned) {
5194 if (auto *CAgg = dyn_cast<Constant>(Agg))
5195 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5196
5197 // extractvalue x, (insertvalue y, elt, n), n -> elt
5198 unsigned NumIdxs = Idxs.size();
5199 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5200 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5201 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5202 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5203 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5204 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5205 Idxs.slice(0, NumCommonIdxs)) {
5206 if (NumIdxs == NumInsertValueIdxs)
5207 return IVI->getInsertedValueOperand();
5208 break;
5209 }
5210 }
5211
5212 return nullptr;
5213}
5214
5216 const SimplifyQuery &Q) {
5217 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5218}
5219
5220/// Given operands for an ExtractElementInst, see if we can fold the result.
5221/// If not, this returns null.
5223 const SimplifyQuery &Q, unsigned) {
5224 auto *VecVTy = cast<VectorType>(Vec->getType());
5225 if (auto *CVec = dyn_cast<Constant>(Vec)) {
5226 if (auto *CIdx = dyn_cast<Constant>(Idx))
5227 return ConstantExpr::getExtractElement(CVec, CIdx);
5228
5229 if (Q.isUndefValue(Vec))
5230 return UndefValue::get(VecVTy->getElementType());
5231 }
5232
5233 // An undef extract index can be arbitrarily chosen to be an out-of-range
5234 // index value, which would result in the instruction being poison.
5235 if (Q.isUndefValue(Idx))
5236 return PoisonValue::get(VecVTy->getElementType());
5237
5238 // If extracting a specified index from the vector, see if we can recursively
5239 // find a previously computed scalar that was inserted into the vector.
5240 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5241 // For fixed-length vector, fold into undef if index is out of bounds.
5242 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5243 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5244 return PoisonValue::get(VecVTy->getElementType());
5245 // Handle case where an element is extracted from a splat.
5246 if (IdxC->getValue().ult(MinNumElts))
5247 if (auto *Splat = getSplatValue(Vec))
5248 return Splat;
5249 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5250 return Elt;
5251 } else {
5252 // extractelt x, (insertelt y, elt, n), n -> elt
5253 // If the possibly-variable indices are trivially known to be equal
5254 // (because they are the same operand) then use the value that was
5255 // inserted directly.
5256 auto *IE = dyn_cast<InsertElementInst>(Vec);
5257 if (IE && IE->getOperand(2) == Idx)
5258 return IE->getOperand(1);
5259
5260 // The index is not relevant if our vector is a splat.
5261 if (Value *Splat = getSplatValue(Vec))
5262 return Splat;
5263 }
5264 return nullptr;
5265}
5266
5268 const SimplifyQuery &Q) {
5269 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5270}
5271
5272/// See if we can fold the given phi. If not, returns null.
5274 const SimplifyQuery &Q) {
5275 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5276 // here, because the PHI we may succeed simplifying to was not
5277 // def-reachable from the original PHI!
5278
5279 // If all of the PHI's incoming values are the same then replace the PHI node
5280 // with the common value.
5281 Value *CommonValue = nullptr;
5282 bool HasPoisonInput = false;
5283 bool HasUndefInput = false;
5284 for (Value *Incoming : IncomingValues) {
5285 // If the incoming value is the phi node itself, it can safely be skipped.
5286 if (Incoming == PN)
5287 continue;
5288 if (isa<PoisonValue>(Incoming)) {
5289 HasPoisonInput = true;
5290 continue;
5291 }
5292 if (Q.isUndefValue(Incoming)) {
5293 // Remember that we saw an undef value, but otherwise ignore them.
5294 HasUndefInput = true;
5295 continue;
5296 }
5297 if (CommonValue && Incoming != CommonValue)
5298 return nullptr; // Not the same, bail out.
5299 CommonValue = Incoming;
5300 }
5301
5302 // If CommonValue is null then all of the incoming values were either undef,
5303 // poison or equal to the phi node itself.
5304 if (!CommonValue)
5305 return HasUndefInput ? UndefValue::get(PN->getType())
5306 : PoisonValue::get(PN->getType());
5307
5308 if (HasPoisonInput || HasUndefInput) {
5309 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5310 // instruction, we cannot return X as the result of the PHI node unless it
5311 // dominates the PHI block.
5312 if (!valueDominatesPHI(CommonValue, PN, Q.DT))
5313 return nullptr;
5314
5315 // Make sure we do not replace an undef value with poison.
5316 if (HasUndefInput &&
5317 !isGuaranteedNotToBePoison(CommonValue, Q.AC, Q.CxtI, Q.DT))
5318 return nullptr;
5319 return CommonValue;
5320 }
5321
5322 return CommonValue;
5323}
5324
5325static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5326 const SimplifyQuery &Q, unsigned MaxRecurse) {
5327 if (auto *C = dyn_cast<Constant>(Op))
5328 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5329
5330 if (auto *CI = dyn_cast<CastInst>(Op)) {
5331 auto *Src = CI->getOperand(0);
5332 Type *SrcTy = Src->getType();
5333 Type *MidTy = CI->getType();
5334 Type *DstTy = Ty;
5335 if (Src->getType() == Ty) {
5336 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
5337 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5338 Type *SrcIntPtrTy =
5339 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
5340 Type *MidIntPtrTy =
5341 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
5342 Type *DstIntPtrTy =
5343 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
5344 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5345 SrcIntPtrTy, MidIntPtrTy,
5346 DstIntPtrTy) == Instruction::BitCast)
5347 return Src;
5348 }
5349 }
5350
5351 // bitcast x -> x
5352 if (CastOpc == Instruction::BitCast)
5353 if (Op->getType() == Ty)
5354 return Op;
5355
5356 // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
5357 Value *Ptr, *X;
5358 if (CastOpc == Instruction::PtrToInt &&
5361 X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
5362 return X;
5363
5364 return nullptr;
5365}
5366
5367Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5368 const SimplifyQuery &Q) {
5369 return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5370}
5371
5372/// For the given destination element of a shuffle, peek through shuffles to
5373/// match a root vector source operand that contains that element in the same
5374/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5375static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5376 int MaskVal, Value *RootVec,
5377 unsigned MaxRecurse) {
5378 if (!MaxRecurse--)
5379 return nullptr;
5380
5381 // Bail out if any mask value is undefined. That kind of shuffle may be
5382 // simplified further based on demanded bits or other folds.
5383 if (MaskVal == -1)
5384 return nullptr;
5385
5386 // The mask value chooses which source operand we need to look at next.
5387 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5388 int RootElt = MaskVal;
5389 Value *SourceOp = Op0;
5390 if (MaskVal >= InVecNumElts) {
5391 RootElt = MaskVal - InVecNumElts;
5392 SourceOp = Op1;
5393 }
5394
5395 // If the source operand is a shuffle itself, look through it to find the
5396 // matching root vector.
5397 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5398 return foldIdentityShuffles(
5399 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5400 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5401 }
5402
5403 // The source operand is not a shuffle. Initialize the root vector value for
5404 // this shuffle if that has not been done yet.
5405 if (!RootVec)
5406 RootVec = SourceOp;
5407
5408 // Give up as soon as a source operand does not match the existing root value.
5409 if (RootVec != SourceOp)
5410 return nullptr;
5411
5412 // The element must be coming from the same lane in the source vector
5413 // (although it may have crossed lanes in intermediate shuffles).
5414 if (RootElt != DestElt)
5415 return nullptr;
5416
5417 return RootVec;
5418}
5419
5421 ArrayRef<int> Mask, Type *RetTy,
5422 const SimplifyQuery &Q,
5423 unsigned MaxRecurse) {
5424 if (all_of(Mask, [](int Elem) { return Elem == PoisonMaskElem; }))
5425 return PoisonValue::get(RetTy);
5426
5427 auto *InVecTy = cast<VectorType>(Op0->getType());
5428 unsigned MaskNumElts = Mask.size();
5429 ElementCount InVecEltCount = InVecTy->getElementCount();
5430
5431 bool Scalable = InVecEltCount.isScalable();
5432
5433 SmallVector<int, 32> Indices;
5434 Indices.assign(Mask.begin(), Mask.end());
5435
5436 // Canonicalization: If mask does not select elements from an input vector,
5437 // replace that input vector with poison.
5438 if (!Scalable) {
5439 bool MaskSelects0 = false, MaskSelects1 = false;
5440 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5441 for (unsigned i = 0; i != MaskNumElts; ++i) {
5442 if (Indices[i] == -1)
5443 continue;
5444 if ((unsigned)Indices[i] < InVecNumElts)
5445 MaskSelects0 = true;
5446 else
5447 MaskSelects1 = true;
5448 }
5449 if (!MaskSelects0)
5450 Op0 = PoisonValue::get(InVecTy);
5451 if (!MaskSelects1)
5452 Op1 = PoisonValue::get(InVecTy);
5453 }
5454
5455 auto *Op0Const = dyn_cast<Constant>(Op0);
5456 auto *Op1Const = dyn_cast<Constant>(Op1);
5457
5458 // If all operands are constant, constant fold the shuffle. This
5459 // transformation depends on the value of the mask which is not known at
5460 // compile time for scalable vectors
5461 if (Op0Const && Op1Const)
5462 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5463
5464 // Canonicalization: if only one input vector is constant, it shall be the
5465 // second one. This transformation depends on the value of the mask which
5466 // is not known at compile time for scalable vectors
5467 if (!Scalable && Op0Const && !Op1Const) {
5468 std::swap(Op0, Op1);
5470 InVecEltCount.getKnownMinValue());
5471 }
5472
5473 // A splat of an inserted scalar constant becomes a vector constant:
5474 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5475 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5476 // original mask constant.
5477 // NOTE: This transformation depends on the value of the mask which is not
5478 // known at compile time for scalable vectors
5479 Constant *C;
5480 ConstantInt *IndexC;
5481 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5482 m_ConstantInt(IndexC)))) {
5483 // Match a splat shuffle mask of the insert index allowing undef elements.
5484 int InsertIndex = IndexC->getZExtValue();
5485 if (all_of(Indices, [InsertIndex](int MaskElt) {
5486 return MaskElt == InsertIndex || MaskElt == -1;
5487 })) {
5488 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5489
5490 // Shuffle mask poisons become poison constant result elements.
5491 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5492 for (unsigned i = 0; i != MaskNumElts; ++i)
5493 if (Indices[i] == -1)
5494 VecC[i] = PoisonValue::get(C->getType());
5495 return ConstantVector::get(VecC);
5496 }
5497 }
5498
5499 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5500 // value type is same as the input vectors' type.
5501 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5502 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5503 all_equal(OpShuf->getShuffleMask()))
5504 return Op0;
5505
5506 // All remaining transformation depend on the value of the mask, which is
5507 // not known at compile time for scalable vectors.
5508 if (Scalable)
5509 return nullptr;
5510
5511 // Don't fold a shuffle with undef mask elements. This may get folded in a
5512 // better way using demanded bits or other analysis.
5513 // TODO: Should we allow this?
5514 if (is_contained(Indices, -1))
5515 return nullptr;
5516
5517 // Check if every element of this shuffle can be mapped back to the
5518 // corresponding element of a single root vector. If so, we don't need this
5519 // shuffle. This handles simple identity shuffles as well as chains of
5520 // shuffles that may widen/narrow and/or move elements across lanes and back.
5521 Value *RootVec = nullptr;
5522 for (unsigned i = 0; i != MaskNumElts; ++i) {
5523 // Note that recursion is limited for each vector element, so if any element
5524 // exceeds the limit, this will fail to simplify.
5525 RootVec =
5526 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5527
5528 // We can't replace a widening/narrowing shuffle with one of its operands.
5529 if (!RootVec || RootVec->getType() != RetTy)
5530 return nullptr;
5531 }
5532 return RootVec;
5533}
5534
5535/// Given operands for a ShuffleVectorInst, fold the result or return null.
5537 ArrayRef<int> Mask, Type *RetTy,
5538 const SimplifyQuery &Q) {
5539 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5540}
5541
5543 const SimplifyQuery &Q) {
5544 if (auto *C = dyn_cast<Constant>(Op))
5545 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5546 return nullptr;
5547}
5548
5549/// Given the operand for an FNeg, see if we can fold the result. If not, this
5550/// returns null.
5552 const SimplifyQuery &Q, unsigned MaxRecurse) {
5553 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5554 return C;
5555
5556 Value *X;
5557 // fneg (fneg X) ==> X
5558 if (match(Op, m_FNeg(m_Value(X))))
5559 return X;
5560
5561 return nullptr;
5562}
5563
5565 const SimplifyQuery &Q) {
5566 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5567}
5568
5569/// Try to propagate existing NaN values when possible. If not, replace the
5570/// constant or elements in the constant with a canonical NaN.
5572 Type *Ty = In->getType();
5573 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5574 unsigned NumElts = VecTy->getNumElements();
5575 SmallVector<Constant *, 32> NewC(NumElts);
5576 for (unsigned i = 0; i != NumElts; ++i) {
5577 Constant *EltC = In->getAggregateElement(i);
5578 // Poison elements propagate. NaN propagates except signaling is quieted.
5579 // Replace unknown or undef elements with canonical NaN.
5580 if (EltC && isa<PoisonValue>(EltC))
5581 NewC[i] = EltC;
5582 else if (EltC && EltC->isNaN())
5583 NewC[i] = ConstantFP::get(
5584 EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5585 else
5586 NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5587 }
5588 return ConstantVector::get(NewC);
5589 }
5590
5591 // If it is not a fixed vector, but not a simple NaN either, return a
5592 // canonical NaN.
5593 if (!In->isNaN())
5594 return ConstantFP::getNaN(Ty);
5595
5596 // If we known this is a NaN, and it's scalable vector, we must have a splat
5597 // on our hands. Grab that before splatting a QNaN constant.
5598 if (isa<ScalableVectorType>(Ty)) {
5599 auto *Splat = In->getSplatValue();
5600 assert(Splat && Splat->isNaN() &&
5601 "Found a scalable-vector NaN but not a splat");
5602 In = Splat;
5603 }
5604
5605 // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5606 // preserve the sign/payload.
5607 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5608}
5609
5610/// Perform folds that are common to any floating-point operation. This implies
5611/// transforms based on poison/undef/NaN because the operation itself makes no
5612/// difference to the result.
5614 const SimplifyQuery &Q,
5615 fp::ExceptionBehavior ExBehavior,
5616 RoundingMode Rounding) {
5617 // Poison is independent of anything else. It always propagates from an
5618 // operand to a math result.
5619 if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
5620 return PoisonValue::get(Ops[0]->getType());
5621
5622 for (Value *V : Ops) {
5623 bool IsNan = match(V, m_NaN());
5624 bool IsInf = match(V, m_Inf());
5625 bool IsUndef = Q.isUndefValue(V);
5626
5627 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5628 // (an undef operand can be chosen to be Nan/Inf), then the result of
5629 // this operation is poison.
5630 if (FMF.noNaNs() && (IsNan || IsUndef))
5631 return PoisonValue::get(V->getType());
5632 if (FMF.noInfs() && (IsInf || IsUndef))
5633 return PoisonValue::get(V->getType());
5634
5635 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5636 // Undef does not propagate because undef means that all bits can take on
5637 // any value. If this is undef * NaN for example, then the result values
5638 // (at least the exponent bits) are limited. Assume the undef is a
5639 // canonical NaN and propagate that.
5640 if (IsUndef)
5641 return ConstantFP::getNaN(V->getType());
5642 if (IsNan)
5643 return propagateNaN(cast<Constant>(V));
5644 } else if (ExBehavior != fp::ebStrict) {
5645 if (IsNan)
5646 return propagateNaN(cast<Constant>(V));
5647 }
5648 }
5649 return nullptr;
5650}
5651
5652/// Given operands for an FAdd, see if we can fold the result. If not, this
5653/// returns null.
5654static Value *
5656 const SimplifyQuery &Q, unsigned MaxRecurse,
5658 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5659 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5660 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5661 return C;
5662
5663 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5664 return C;
5665
5666 // fadd X, -0 ==> X
5667 // With strict/constrained FP, we have these possible edge cases that do
5668 // not simplify to Op0:
5669 // fadd SNaN, -0.0 --> QNaN
5670 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5671 if (canIgnoreSNaN(ExBehavior, FMF) &&
5672 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5673 FMF.noSignedZeros()))
5674 if (match(Op1, m_NegZeroFP()))
5675 return Op0;
5676
5677 // fadd X, 0 ==> X, when we know X is not -0
5678 if (canIgnoreSNaN(ExBehavior, FMF))
5679 if (match(Op1, m_PosZeroFP()) &&
5680 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
5681 return Op0;
5682
5683 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5684 return nullptr;
5685
5686 if (FMF.noNaNs()) {
5687 // With nnan: X + {+/-}Inf --> {+/-}Inf
5688 if (match(Op1, m_Inf()))
5689 return Op1;
5690
5691 // With nnan: -X + X --> 0.0 (and commuted variant)
5692 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5693 // Negative zeros are allowed because we always end up with positive zero:
5694 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5695 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5696 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5697 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5698 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5699 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5700 return ConstantFP::getZero(Op0->getType());
5701
5702 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5703 match(Op1, m_FNeg(m_Specific(Op0))))
5704 return ConstantFP::getZero(Op0->getType());
5705 }
5706
5707 // (X - Y) + Y --> X
5708 // Y + (X - Y) --> X
5709 Value *X;
5710 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5711 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5712 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5713 return X;
5714
5715 return nullptr;
5716}
5717
5718/// Given operands for an FSub, see if we can fold the result. If not, this
5719/// returns null.
5720static Value *
5722 const SimplifyQuery &Q, unsigned MaxRecurse,
5724 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5725 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5726 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5727 return C;
5728
5729 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5730 return C;
5731
5732 // fsub X, +0 ==> X
5733 if (canIgnoreSNaN(ExBehavior, FMF) &&
5734 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5735 FMF.noSignedZeros()))
5736 if (match(Op1, m_PosZeroFP()))
5737 return Op0;
5738
5739 // fsub X, -0 ==> X, when we know X is not -0
5740 if (canIgnoreSNaN(ExBehavior, FMF))
5741 if (match(Op1, m_NegZeroFP()) &&
5742 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
5743 return Op0;
5744
5745 // fsub -0.0, (fsub -0.0, X) ==> X
5746 // fsub -0.0, (fneg X) ==> X
5747 Value *X;
5748 if (canIgnoreSNaN(ExBehavior, FMF))
5749 if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5750 return X;
5751
5752 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5753 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5754 if (canIgnoreSNaN(ExBehavior, FMF))
5755 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5756 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5757 match(Op1, m_FNeg(m_Value(X)))))
5758 return X;
5759
5760 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5761 return nullptr;
5762
5763 if (FMF.noNaNs()) {
5764 // fsub nnan x, x ==> 0.0
5765 if (Op0 == Op1)
5766 return Constant::getNullValue(Op0->getType());
5767
5768 // With nnan: {+/-}Inf - X --> {+/-}Inf
5769 if (match(Op0, m_Inf()))
5770 return Op0;
5771
5772 // With nnan: X - {+/-}Inf --> {-/+}Inf
5773 if (match(Op1, m_Inf()))
5774 return foldConstant(Instruction::FNeg, Op1, Q);
5775 }
5776
5777 // Y - (Y - X) --> X
5778 // (X + Y) - Y --> X
5779 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5780 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5781 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5782 return X;
5783
5784 return nullptr;
5785}
5786
5788 const SimplifyQuery &Q, unsigned MaxRecurse,
5789 fp::ExceptionBehavior ExBehavior,
5790 RoundingMode Rounding) {
5791 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5792 return C;
5793
5794 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5795 return nullptr;
5796
5797 // Canonicalize special constants as operand 1.
5798 if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5799 std::swap(Op0, Op1);
5800
5801 // X * 1.0 --> X
5802 if (match(Op1, m_FPOne()))
5803 return Op0;
5804
5805 if (match(Op1, m_AnyZeroFP())) {
5806 // X * 0.0 --> 0.0 (with nnan and nsz)
5807 if (FMF.noNaNs() && FMF.noSignedZeros())
5808 return ConstantFP::getZero(Op0->getType());
5809
5810 KnownFPClass Known =
5811 computeKnownFPClass(Op0, FMF, fcInf | fcNan, /*Depth=*/0, Q);
5812 if (Known.isKnownNever(fcInf | fcNan)) {
5813 // +normal number * (-)0.0 --> (-)0.0
5814 if (Known.SignBit == false)
5815 return Op1;
5816 // -normal number * (-)0.0 --> -(-)0.0
5817 if (Known.SignBit == true)
5818 return foldConstant(Instruction::FNeg, Op1, Q);
5819 }
5820 }
5821
5822 // sqrt(X) * sqrt(X) --> X, if we can:
5823 // 1. Remove the intermediate rounding (reassociate).
5824 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5825 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5826 Value *X;
5827 if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5828 FMF.noNaNs() && FMF.noSignedZeros())
5829 return X;
5830
5831 return nullptr;
5832}
5833
5834/// Given the operands for an FMul, see if we can fold the result
5835static Value *
5837 const SimplifyQuery &Q, unsigned MaxRecurse,
5839 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5840 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5841 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5842 return C;
5843
5844 // Now apply simplifications that do not require rounding.
5845 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5846}
5847
5849 const SimplifyQuery &Q,
5850 fp::ExceptionBehavior ExBehavior,
5851 RoundingMode Rounding) {
5852 return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5853 Rounding);
5854}
5855
5857 const SimplifyQuery &Q,
5858 fp::ExceptionBehavior ExBehavior,
5859 RoundingMode Rounding) {
5860 return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5861 Rounding);
5862}
5863
5865 const SimplifyQuery &Q,
5866 fp::ExceptionBehavior ExBehavior,
5867 RoundingMode Rounding) {
5868 return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5869 Rounding);
5870}
5871
5873 const SimplifyQuery &Q,
5874 fp::ExceptionBehavior ExBehavior,
5875 RoundingMode Rounding) {
5876 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5877 Rounding);
5878}
5879
5880static Value *
5882 const SimplifyQuery &Q, unsigned,
5884 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5885 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5886 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5887 return C;
5888
5889 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5890 return C;
5891
5892 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5893 return nullptr;
5894
5895 // X / 1.0 -> X
5896 if (match(Op1, m_FPOne()))
5897 return Op0;
5898
5899 // 0 / X -> 0
5900 // Requires that NaNs are off (X could be zero) and signed zeroes are
5901 // ignored (X could be positive or negative, so the output sign is unknown).
5902 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5903 return ConstantFP::getZero(Op0->getType());
5904
5905 if (FMF.noNaNs()) {
5906 // X / X -> 1.0 is legal when NaNs are ignored.
5907 // We can ignore infinities because INF/INF is NaN.
5908 if (Op0 == Op1)
5909 return ConstantFP::get(Op0->getType(), 1.0);
5910
5911 // (X * Y) / Y --> X if we can reassociate to the above form.
5912 Value *X;
5913 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5914 return X;
5915
5916 // -X / X -> -1.0 and
5917 // X / -X -> -1.0 are legal when NaNs are ignored.
5918 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5919 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5920 match(Op1, m_FNegNSZ(m_Specific(Op0))))
5921 return ConstantFP::get(Op0->getType(), -1.0);
5922
5923 // nnan ninf X / [-]0.0 -> poison
5924 if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
5925 return PoisonValue::get(Op1->getType());
5926 }
5927
5928 return nullptr;
5929}
5930
5932 const SimplifyQuery &Q,
5933 fp::ExceptionBehavior ExBehavior,
5934 RoundingMode Rounding) {
5935 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5936 Rounding);
5937}
5938
5939static Value *
5941 const SimplifyQuery &Q, unsigned,
5943 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5944 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5945 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5946 return C;
5947
5948 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5949 return C;
5950
5951 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5952 return nullptr;
5953
5954 // Unlike fdiv, the result of frem always matches the sign of the dividend.
5955 // The constant match may include undef elements in a vector, so return a full
5956 // zero constant as the result.
5957 if (FMF.noNaNs()) {
5958 // +0 % X -> 0
5959 if (match(Op0, m_PosZeroFP()))
5960 return ConstantFP::getZero(Op0->getType());
5961 // -0 % X -> -0
5962 if (match(Op0, m_NegZeroFP()))
5963 return ConstantFP::getNegativeZero(Op0->getType());
5964 }
5965
5966 return nullptr;
5967}
5968
5970 const SimplifyQuery &Q,
5971 fp::ExceptionBehavior ExBehavior,
5972 RoundingMode Rounding) {
5973 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5974 Rounding);
5975}
5976
5977//=== Helper functions for higher up the class hierarchy.
5978
5979/// Given the operand for a UnaryOperator, see if we can fold the result.
5980/// If not, this returns null.
5981static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5982 unsigned MaxRecurse) {
5983 switch (Opcode) {
5984 case Instruction::FNeg:
5985 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5986 default:
5987 llvm_unreachable("Unexpected opcode");
5988 }
5989}
5990
5991/// Given the operand for a UnaryOperator, see if we can fold the result.
5992/// If not, this returns null.
5993/// Try to use FastMathFlags when folding the result.
5994static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5995 const FastMathFlags &FMF, const SimplifyQuery &Q,
5996 unsigned MaxRecurse) {
5997 switch (Opcode) {
5998 case Instruction::FNeg:
5999 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
6000 default:
6001 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
6002 }
6003}
6004
6005Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
6006 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
6007}
6008
6010 const SimplifyQuery &Q) {
6011 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
6012}
6013
6014/// Given operands for a BinaryOperator, see if we can fold the result.
6015/// If not, this returns null.
6016static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6017 const SimplifyQuery &Q, unsigned MaxRecurse) {
6018 switch (Opcode) {
6019 case Instruction::Add:
6020 return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6021 MaxRecurse);
6022 case Instruction::Sub:
6023 return simplifySubInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6024 MaxRecurse);
6025 case Instruction::Mul:
6026 return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6027 MaxRecurse);
6028 case Instruction::SDiv:
6029 return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6030 case Instruction::UDiv:
6031 return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6032 case Instruction::SRem:
6033 return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
6034 case Instruction::URem:
6035 return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
6036 case Instruction::Shl:
6037 return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6038 MaxRecurse);
6039 case Instruction::LShr:
6040 return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6041 case Instruction::AShr:
6042 return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6043 case Instruction::And:
6044 return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
6045 case Instruction::Or:
6046 return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
6047 case Instruction::Xor:
6048 return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
6049 case Instruction::FAdd:
6050 return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6051 case Instruction::FSub:
6052 return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6053 case Instruction::FMul:
6054 return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6055 case Instruction::FDiv:
6056 return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6057 case Instruction::FRem:
6058 return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6059 default:
6060 llvm_unreachable("Unexpected opcode");
6061 }
6062}
6063
6064/// Given operands for a BinaryOperator, see if we can fold the result.
6065/// If not, this returns null.
6066/// Try to use FastMathFlags when folding the result.
6067static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6068 const FastMathFlags &FMF, const SimplifyQuery &Q,
6069 unsigned MaxRecurse) {
6070 switch (Opcode) {
6071 case Instruction::FAdd:
6072 return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
6073 case Instruction::FSub:
6074 return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
6075 case Instruction::FMul:
6076 return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
6077 case Instruction::FDiv:
6078 return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
6079 default:
6080 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6081 }
6082}
6083
6084Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6085 const SimplifyQuery &Q) {
6086 return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
6087}
6088
6089Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6090 FastMathFlags FMF, const SimplifyQuery &Q) {
6091 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
6092}
6093
6094/// Given operands for a CmpInst, see if we can fold the result.
6095static Value *simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
6096 const SimplifyQuery &Q, unsigned MaxRecurse) {
6097 if (CmpInst::isIntPredicate(Predicate))
6098 return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
6099 return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6100}
6101
6103 const SimplifyQuery &Q) {
6104 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
6105}
6106
6108 switch (ID) {
6109 default:
6110 return false;
6111
6112 // Unary idempotent: f(f(x)) = f(x)
6113 case Intrinsic::fabs:
6114 case Intrinsic::floor:
6115 case Intrinsic::ceil:
6116 case Intrinsic::trunc:
6117 case Intrinsic::rint:
6118 case Intrinsic::nearbyint:
6119 case Intrinsic::round:
6120 case Intrinsic::roundeven:
6121 case Intrinsic::canonicalize:
6122 case Intrinsic::arithmetic_fence:
6123 return true;
6124 }
6125}
6126
6127/// Return true if the intrinsic rounds a floating-point value to an integral
6128/// floating-point value (not an integer type).
6130 switch (ID) {
6131 default:
6132 return false;
6133
6134 case Intrinsic::floor:
6135 case Intrinsic::ceil:
6136 case Intrinsic::trunc:
6137 case Intrinsic::rint:
6138 case Intrinsic::nearbyint:
6139 case Intrinsic::round:
6140 case Intrinsic::roundeven:
6141 return true;
6142 }
6143}
6144
6146 const DataLayout &DL) {
6147 GlobalValue *PtrSym;
6148 APInt PtrOffset;
6149 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6150 return nullptr;
6151
6152 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
6153
6154 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6155 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6156 return nullptr;
6157
6158 APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6159 DL.getIndexTypeSizeInBits(Ptr->getType()));
6160 if (OffsetInt.srem(4) != 0)
6161 return nullptr;
6162
6163 Constant *Loaded =
6164 ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt), DL);
6165 if (!Loaded)
6166 return nullptr;
6167
6168 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6169 if (!LoadedCE)
6170 return nullptr;
6171
6172 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6173 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6174 if (!LoadedCE)
6175 return nullptr;
6176 }
6177
6178 if (LoadedCE->getOpcode() != Instruction::Sub)
6179 return nullptr;
6180
6181 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6182 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6183 return nullptr;
6184 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6185
6186 Constant *LoadedRHS = LoadedCE->getOperand(1);
6187 GlobalValue *LoadedRHSSym;
6188 APInt LoadedRHSOffset;
6189 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6190 DL) ||
6191 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6192 return nullptr;
6193
6194 return LoadedLHSPtr;
6195}
6196
6197// TODO: Need to pass in FastMathFlags
6198static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6199 bool IsStrict) {
6200 // ldexp(poison, x) -> poison
6201 // ldexp(x, poison) -> poison
6202 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6203 return Op0;
6204
6205 // ldexp(undef, x) -> nan
6206 if (Q.isUndefValue(Op0))
6207 return ConstantFP::getNaN(Op0->getType());
6208
6209 if (!IsStrict) {
6210 // TODO: Could insert a canonicalize for strict
6211
6212 // ldexp(x, undef) -> x
6213 if (Q.isUndefValue(Op1))
6214 return Op0;
6215 }
6216
6217 const APFloat *C = nullptr;
6219
6220 // These cases should be safe, even with strictfp.
6221 // ldexp(0.0, x) -> 0.0
6222 // ldexp(-0.0, x) -> -0.0
6223 // ldexp(inf, x) -> inf
6224 // ldexp(-inf, x) -> -inf
6225 if (C && (C->isZero() || C->isInfinity()))
6226 return Op0;
6227
6228 // These are canonicalization dropping, could do it if we knew how we could
6229 // ignore denormal flushes and target handling of nan payload bits.
6230 if (IsStrict)
6231 return nullptr;
6232
6233 // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6234 if (C && C->isNaN())
6235 return ConstantFP::get(Op0->getType(), C->makeQuiet());
6236
6237 // ldexp(x, 0) -> x
6238
6239 // TODO: Could fold this if we know the exception mode isn't
6240 // strict, we know the denormal mode and other target modes.
6241 if (match(Op1, PatternMatch::m_ZeroInt()))
6242 return Op0;
6243
6244 return nullptr;
6245}
6246
6248 const SimplifyQuery &Q,
6249 const CallBase *Call) {
6250 // Idempotent functions return the same result when called repeatedly.
6251 Intrinsic::ID IID = F->getIntrinsicID();
6252 if (isIdempotent(IID))
6253 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6254 if (II->getIntrinsicID() == IID)
6255 return II;
6256
6257 if (removesFPFraction(IID)) {
6258 // Converting from int or calling a rounding function always results in a
6259 // finite integral number or infinity. For those inputs, rounding functions
6260 // always return the same value, so the (2nd) rounding is eliminated. Ex:
6261 // floor (sitofp x) -> sitofp x
6262 // round (ceil x) -> ceil x
6263 auto *II = dyn_cast<IntrinsicInst>(Op0);
6264 if ((II && removesFPFraction(II->getIntrinsicID())) ||
6265 match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
6266 return Op0;
6267 }
6268
6269 Value *X;
6270 switch (IID) {
6271 case Intrinsic::fabs:
6272 if (computeKnownFPSignBit(Op0, /*Depth=*/0, Q) == false)
6273 return Op0;
6274 break;
6275 case Intrinsic::bswap:
6276 // bswap(bswap(x)) -> x
6277 if (match(Op0, m_BSwap(m_Value(X))))
6278 return X;
6279 break;
6280 case Intrinsic::bitreverse:
6281 // bitreverse(bitreverse(x)) -> x
6282 if (match(Op0, m_BitReverse(m_Value(X))))
6283 return X;
6284 break;
6285 case Intrinsic::ctpop: {
6286 // ctpop(X) -> 1 iff X is non-zero power of 2.
6287 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
6288 Q.DT))
6289 return ConstantInt::get(Op0->getType(), 1);
6290 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6291 // ctpop(and X, 1) --> and X, 1
6292 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6294 Q))
6295 return Op0;
6296 break;
6297 }
6298 case Intrinsic::exp:
6299 // exp(log(x)) -> x
6300 if (Call->hasAllowReassoc() &&
6301 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X))))
6302 return X;
6303 break;
6304 case Intrinsic::exp2:
6305 // exp2(log2(x)) -> x
6306 if (Call->hasAllowReassoc() &&
6307 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
6308 return X;
6309 break;
6310 case Intrinsic::exp10:
6311 // exp10(log10(x)) -> x
6312 if (Call->hasAllowReassoc() &&
6313 match(Op0, m_Intrinsic<Intrinsic::log10>(m_Value(X))))
6314 return X;
6315 break;
6316 case Intrinsic::log:
6317 // log(exp(x)) -> x
6318 if (Call->hasAllowReassoc() &&
6319 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
6320 return X;
6321 break;
6322 case Intrinsic::log2:
6323 // log2(exp2(x)) -> x
6324 if (Call->hasAllowReassoc() &&
6325 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
6326 match(Op0,
6327 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), m_Value(X)))))
6328 return X;
6329 break;
6330 case Intrinsic::log10:
6331 // log10(pow(10.0, x)) -> x
6332 // log10(exp10(x)) -> x
6333 if (Call->hasAllowReassoc() &&
6334 (match(Op0, m_Intrinsic<Intrinsic::exp10>(m_Value(X))) ||
6335 match(Op0,
6336 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), m_Value(X)))))
6337 return X;
6338 break;
6339 case Intrinsic::vector_reverse:
6340 // vector.reverse(vector.reverse(x)) -> x
6341 if (match(Op0, m_VecReverse(m_Value(X))))
6342 return X;
6343 // vector.reverse(splat(X)) -> splat(X)
6344 if (isSplatValue(Op0))
6345 return Op0;
6346 break;
6347 case Intrinsic::frexp: {
6348 // Frexp is idempotent with the added complication of the struct return.
6349 if (match(Op0, m_ExtractValue<0>(m_Value(X)))) {
6350 if (match(X, m_Intrinsic<Intrinsic::frexp>(m_Value())))
6351 return X;
6352 }
6353
6354 break;
6355 }
6356 default:
6357 break;
6358 }
6359
6360 return nullptr;
6361}
6362
6363/// Given a min/max intrinsic, see if it can be removed based on having an
6364/// operand that is another min/max intrinsic with shared operand(s). The caller
6365/// is expected to swap the operand arguments to handle commutation.
6367 Value *X, *Y;
6368 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6369 return nullptr;
6370
6371 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6372 if (!MM0)
6373 return nullptr;
6374 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6375
6376 if (Op1 == X || Op1 == Y ||
6378 // max (max X, Y), X --> max X, Y
6379 if (IID0 == IID)
6380 return MM0;
6381 // max (min X, Y), X --> X
6382 if (IID0 == getInverseMinMaxIntrinsic(IID))
6383 return Op1;
6384 }
6385 return nullptr;
6386}
6387
6388/// Given a min/max intrinsic, see if it can be removed based on having an
6389/// operand that is another min/max intrinsic with shared operand(s). The caller
6390/// is expected to swap the operand arguments to handle commutation.
6392 Value *Op1) {
6393 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6394 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6395 "Unsupported intrinsic");
6396
6397 auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6398 // If Op0 is not the same intrinsic as IID, do not process.
6399 // This is a difference with integer min/max handling. We do not process the
6400 // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6401 if (!M0 || M0->getIntrinsicID() != IID)
6402 return nullptr;
6403 Value *X0 = M0->getOperand(0);
6404 Value *Y0 = M0->getOperand(1);
6405 // Simple case, m(m(X,Y), X) => m(X, Y)
6406 // m(m(X,Y), Y) => m(X, Y)
6407 // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6408 // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6409 // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6410 // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6411 if (X0 == Op1 || Y0 == Op1)
6412 return M0;
6413
6414 auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6415 if (!M1)
6416 return nullptr;
6417 Value *X1 = M1->getOperand(0);
6418 Value *Y1 = M1->getOperand(1);
6419 Intrinsic::ID IID1 = M1->getIntrinsicID();
6420 // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6421 // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6422 // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6423 // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6424 // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6425 // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6426 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6427 if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6428 return M0;
6429
6430 return nullptr;
6431}
6432
6434 Value *Op0, Value *Op1,
6435 const SimplifyQuery &Q,
6436 const CallBase *Call) {
6437 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6438 switch (IID) {
6439 case Intrinsic::abs:
6440 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6441 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6442 // on the outer abs.
6443 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
6444 return Op0;
6445 break;
6446
6447 case Intrinsic::cttz: {
6448 Value *X;
6449 if (match(Op0, m_Shl(m_One(), m_Value(X))))
6450 return X;
6451 break;
6452 }
6453 case Intrinsic::ctlz: {
6454 Value *X;
6455 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6456 return X;
6457 if (match(Op0, m_AShr(m_Negative(), m_Value())))
6458 return Constant::getNullValue(ReturnType);
6459 break;
6460 }
6461 case Intrinsic::ptrmask: {
6462 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6463 return PoisonValue::get(Op0->getType());
6464
6465 // NOTE: We can't apply this simplifications based on the value of Op1
6466 // because we need to preserve provenance.
6467 if (Q.isUndefValue(Op0) || match(Op0, m_Zero()))
6468 return Constant::getNullValue(Op0->getType());
6469
6471 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6472 "Invalid mask width");
6473 // If index-width (mask size) is less than pointer-size then mask is
6474 // 1-extended.
6475 if (match(Op1, m_PtrToInt(m_Specific(Op0))))
6476 return Op0;
6477
6478 // NOTE: We may have attributes associated with the return value of the
6479 // llvm.ptrmask intrinsic that will be lost when we just return the
6480 // operand. We should try to preserve them.
6481 if (match(Op1, m_AllOnes()) || Q.isUndefValue(Op1))
6482 return Op0;
6483
6484 Constant *C;
6485 if (match(Op1, m_ImmConstant(C))) {
6486 KnownBits PtrKnown = computeKnownBits(Op0, /*Depth=*/0, Q);
6487 // See if we only masking off bits we know are already zero due to
6488 // alignment.
6489 APInt IrrelevantPtrBits =
6490 PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6492 Instruction::Or, C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6493 Q.DL);
6494 if (C != nullptr && C->isAllOnesValue())
6495 return Op0;
6496 }
6497 break;
6498 }
6499 case Intrinsic::smax:
6500 case Intrinsic::smin:
6501 case Intrinsic::umax:
6502 case Intrinsic::umin: {
6503 // If the arguments are the same, this is a no-op.
6504 if (Op0 == Op1)
6505 return Op0;
6506
6507 // Canonicalize immediate constant operand as Op1.
6508 if (match(Op0, m_ImmConstant()))
6509 std::swap(Op0, Op1);
6510
6511 // Assume undef is the limit value.
6512 if (Q.isUndefValue(Op1))
6513 return ConstantInt::get(
6515
6516 const APInt *C;
6517 if (match(Op1, m_APIntAllowPoison(C))) {
6518 // Clamp to limit value. For example:
6519 // umax(i8 %x, i8 255) --> 255
6521 return ConstantInt::get(ReturnType, *C);
6522
6523 // If the constant op is the opposite of the limit value, the other must
6524 // be larger/smaller or equal. For example:
6525 // umin(i8 %x, i8 255) --> %x
6528 return Op0;
6529
6530 // Remove nested call if constant operands allow it. Example:
6531 // max (max X, 7), 5 -> max X, 7
6532 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6533 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6534 // TODO: loosen undef/splat restrictions for vector constants.
6535 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6536 const APInt *InnerC;
6537 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6538 ICmpInst::compare(*InnerC, *C,
6539 ICmpInst::getNonStrictPredicate(
6541 return Op0;
6542 }
6543 }
6544
6545 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6546 return V;
6547 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6548 return V;
6549
6550 ICmpInst::Predicate Pred =
6551 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
6552 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6553 return Op0;
6554 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6555 return Op1;
6556
6557 break;
6558 }
6559 case Intrinsic::scmp:
6560 case Intrinsic::ucmp: {
6561 // Fold to a constant if the relationship between operands can be
6562 // established with certainty
6563 if (isICmpTrue(CmpInst::ICMP_EQ, Op0, Op1, Q, RecursionLimit))
6564 return Constant::getNullValue(ReturnType);
6565
6566 ICmpInst::Predicate PredGT =
6567 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6568 if (isICmpTrue(PredGT, Op0, Op1, Q, RecursionLimit))
6569 return ConstantInt::get(ReturnType, 1);
6570
6571 ICmpInst::Predicate PredLT =
6572 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6573 if (isICmpTrue(PredLT, Op0, Op1, Q, RecursionLimit))
6574 return ConstantInt::getSigned(ReturnType, -1);
6575
6576 break;
6577 }
6578 case Intrinsic::usub_with_overflow:
6579 case Intrinsic::ssub_with_overflow:
6580 // X - X -> { 0, false }
6581 // X - undef -> { 0, false }
6582 // undef - X -> { 0, false }
6583 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6584 return Constant::getNullValue(ReturnType);
6585 break;
6586 case Intrinsic::uadd_with_overflow:
6587 case Intrinsic::sadd_with_overflow:
6588 // X + undef -> { -1, false }
6589 // undef + x -> { -1, false }
6590 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6591 return ConstantStruct::get(
6592 cast<StructType>(ReturnType),
6593 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6594 Constant::getNullValue(ReturnType->getStructElementType(1))});
6595 }
6596 break;
6597 case Intrinsic::umul_with_overflow:
6598 case Intrinsic::smul_with_overflow:
6599 // 0 * X -> { 0, false }
6600 // X * 0 -> { 0, false }
6601 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6602 return Constant::getNullValue(ReturnType);
6603 // undef * X -> { 0, false }
6604 // X * undef -> { 0, false }
6605 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6606 return Constant::getNullValue(ReturnType);
6607 break;
6608 case Intrinsic::uadd_sat:
6609 // sat(MAX + X) -> MAX
6610 // sat(X + MAX) -> MAX
6611 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6612 return Constant::getAllOnesValue(ReturnType);
6613 [[fallthrough]];
6614 case Intrinsic::sadd_sat:
6615 // sat(X + undef) -> -1
6616 // sat(undef + X) -> -1
6617 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6618 // For signed: Assume undef is ~X, in which case X + ~X = -1.
6619 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6620 return Constant::getAllOnesValue(ReturnType);
6621
6622 // X + 0 -> X
6623 if (match(Op1, m_Zero()))
6624 return Op0;
6625 // 0 + X -> X
6626 if (match(Op0, m_Zero()))
6627 return Op1;
6628 break;
6629 case Intrinsic::usub_sat:
6630 // sat(0 - X) -> 0, sat(X - MAX) -> 0
6631 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6632 return Constant::getNullValue(ReturnType);
6633 [[fallthrough]];
6634 case Intrinsic::ssub_sat:
6635 // X - X -> 0, X - undef -> 0, undef - X -> 0
6636 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6637 return Constant::getNullValue(ReturnType);
6638 // X - 0 -> X
6639 if (match(Op1, m_Zero()))
6640 return Op0;
6641 break;
6642 case Intrinsic::load_relative:
6643 if (auto *C0 = dyn_cast<Constant>(Op0))
6644 if (auto *C1 = dyn_cast<Constant>(Op1))
6645 return simplifyRelativeLoad(C0, C1, Q.DL);
6646 break;
6647 case Intrinsic::powi:
6648 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6649 // powi(x, 0) -> 1.0
6650 if (Power->isZero())
6651 return ConstantFP::get(Op0->getType(), 1.0);
6652 // powi(x, 1) -> x
6653 if (Power->isOne())
6654 return Op0;
6655 }
6656 break;
6657 case Intrinsic::ldexp:
6658 return simplifyLdexp(Op0, Op1, Q, false);
6659 case Intrinsic::copysign:
6660 // copysign X, X --> X
6661 if (Op0 == Op1)
6662 return Op0;
6663 // copysign -X, X --> X
6664 // copysign X, -X --> -X
6665 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6666 match(Op1, m_FNeg(m_Specific(Op0))))
6667 return Op1;
6668 break;
6669 case Intrinsic::is_fpclass: {
6670 if (isa<PoisonValue>(Op0))
6671 return PoisonValue::get(ReturnType);
6672
6673 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6674 // If all tests are made, it doesn't matter what the value is.
6675 if ((Mask & fcAllFlags) == fcAllFlags)
6676 return ConstantInt::get(ReturnType, true);
6677 if ((Mask & fcAllFlags) == 0)
6678 return ConstantInt::get(ReturnType, false);
6679 if (Q.isUndefValue(Op0))
6680 return UndefValue::get(ReturnType);
6681 break;
6682 }
6683 case Intrinsic::maxnum:
6684 case Intrinsic::minnum:
6685 case Intrinsic::maximum:
6686 case Intrinsic::minimum: {
6687 // If the arguments are the same, this is a no-op.
6688 if (Op0 == Op1)
6689 return Op0;
6690
6691 // Canonicalize constant operand as Op1.
6692 if (isa<Constant>(Op0))
6693 std::swap(Op0, Op1);
6694
6695 // If an argument is undef, return the other argument.
6696 if (Q.isUndefValue(Op1))
6697 return Op0;
6698
6699 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6700 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6701
6702 // minnum(X, nan) -> X
6703 // maxnum(X, nan) -> X
6704 // minimum(X, nan) -> nan
6705 // maximum(X, nan) -> nan
6706 if (match(Op1, m_NaN()))
6707 return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
6708
6709 // In the following folds, inf can be replaced with the largest finite
6710 // float, if the ninf flag is set.
6711 const APFloat *C;
6712 if (match(Op1, m_APFloat(C)) &&
6713 (C->isInfinity() || (Call && Call->hasNoInfs() && C->isLargest()))) {
6714 // minnum(X, -inf) -> -inf
6715 // maxnum(X, +inf) -> +inf
6716 // minimum(X, -inf) -> -inf if nnan
6717 // maximum(X, +inf) -> +inf if nnan
6718 if (C->isNegative() == IsMin &&
6719 (!PropagateNaN || (Call && Call->hasNoNaNs())))
6720 return ConstantFP::get(ReturnType, *C);
6721
6722 // minnum(X, +inf) -> X if nnan
6723 // maxnum(X, -inf) -> X if nnan
6724 // minimum(X, +inf) -> X
6725 // maximum(X, -inf) -> X
6726 if (C->isNegative() != IsMin &&
6727 (PropagateNaN || (Call && Call->hasNoNaNs())))
6728 return Op0;
6729 }
6730
6731 // Min/max of the same operation with common operand:
6732 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6733 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
6734 return V;
6735 if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
6736 return V;
6737
6738 break;
6739 }
6740 case Intrinsic::vector_extract: {
6741 // (extract_vector (insert_vector _, X, 0), 0) -> X
6742 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6743 Value *X = nullptr;
6744 if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(), m_Value(X),
6745 m_Zero())) &&
6746 IdxN == 0 && X->getType() == ReturnType)
6747 return X;
6748
6749 break;
6750 }
6751 default:
6752 break;
6753 }
6754
6755 return nullptr;
6756}
6757
6758static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
6759 ArrayRef<Value *> Args,
6760 const SimplifyQuery &Q) {
6761 // Operand bundles should not be in Args.
6762 assert(Call->arg_size() == Args.size());
6763 unsigned NumOperands = Args.size();
6764 Function *F = cast<Function>(Callee);
6765 Intrinsic::ID IID = F->getIntrinsicID();
6766
6767 // Most of the intrinsics with no operands have some kind of side effect.
6768 // Don't simplify.
6769 if (!NumOperands) {
6770 switch (IID) {
6771 case Intrinsic::vscale: {
6772 Type *RetTy = F->getReturnType();
6773 ConstantRange CR = getVScaleRange(Call->getFunction(), 64);
6774 if (const APInt *C = CR.getSingleElement())
6775 return ConstantInt::get(RetTy, C->getZExtValue());
6776 return nullptr;
6777 }
6778 default:
6779 return nullptr;
6780 }
6781 }
6782
6783 if (NumOperands == 1)
6784 return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
6785
6786 if (NumOperands == 2)
6787 return simplifyBinaryIntrinsic(IID, F->getReturnType(), Args[0], Args[1], Q,
6788 Call);
6789
6790 // Handle intrinsics with 3 or more arguments.
6791 switch (IID) {
6792 case Intrinsic::masked_load:
6793 case Intrinsic::masked_gather: {
6794 Value *MaskArg = Args[2];
6795 Value *PassthruArg = Args[3];
6796 // If the mask is all zeros or undef, the "passthru" argument is the result.
6797 if (maskIsAllZeroOrUndef(MaskArg))
6798 return PassthruArg;
6799 return nullptr;
6800 }
6801 case Intrinsic::fshl:
6802 case Intrinsic::fshr: {
6803 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6804
6805 // If both operands are undef, the result is undef.
6806 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
6807 return UndefValue::get(F->getReturnType());
6808
6809 // If shift amount is undef, assume it is zero.
6810 if (Q.isUndefValue(ShAmtArg))
6811 return Args[IID == Intrinsic::fshl ? 0 : 1];
6812
6813 const APInt *ShAmtC;
6814 if (match(ShAmtArg, m_APInt(ShAmtC))) {
6815 // If there's effectively no shift, return the 1st arg or 2nd arg.
6816 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
6817 if (ShAmtC->urem(BitWidth).isZero())
6818 return Args[IID == Intrinsic::fshl ? 0 : 1];
6819 }
6820
6821 // Rotating zero by anything is zero.
6822 if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
6823 return ConstantInt::getNullValue(F->getReturnType());
6824
6825 // Rotating -1 by anything is -1.
6826 if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
6827 return ConstantInt::getAllOnesValue(F->getReturnType());
6828
6829 return nullptr;
6830 }
6831 case Intrinsic::experimental_constrained_fma: {
6832 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6833 if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
6834 *FPI->getRoundingMode()))
6835 return V;
6836 return nullptr;
6837 }
6838 case Intrinsic::fma:
6839 case Intrinsic::fmuladd: {
6840 if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
6841 RoundingMode::NearestTiesToEven))
6842 return V;
6843 return nullptr;
6844 }
6845 case Intrinsic::smul_fix:
6846 case Intrinsic::smul_fix_sat: {
6847 Value *Op0 = Args[0];
6848 Value *Op1 = Args[1];
6849 Value *Op2 = Args[2];
6850 Type *ReturnType = F->getReturnType();
6851
6852 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
6853 // when both Op0 and Op1 are constant so we do not care about that special
6854 // case here).
6855 if (isa<Constant>(Op0))
6856 std::swap(Op0, Op1);
6857
6858 // X * 0 -> 0
6859 if (match(Op1, m_Zero()))
6860 return Constant::getNullValue(ReturnType);
6861
6862 // X * undef -> 0
6863 if (Q.isUndefValue(Op1))
6864 return Constant::getNullValue(ReturnType);
6865
6866 // X * (1 << Scale) -> X
6867 APInt ScaledOne =
6868 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6869 cast<ConstantInt>(Op2)->getZExtValue());
6870 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
6871 return Op0;
6872
6873 return nullptr;
6874 }
6875 case Intrinsic::vector_insert: {
6876 Value *Vec = Args[0];
6877 Value *SubVec = Args[1];
6878 Value *Idx = Args[2];
6879 Type *ReturnType = F->getReturnType();
6880
6881 // (insert_vector Y, (extract_vector X, 0), 0) -> X
6882 // where: Y is X, or Y is undef
6883 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6884 Value *X = nullptr;
6885 if (match(SubVec,
6886 m_Intrinsic<Intrinsic::vector_extract>(m_Value(X), m_Zero())) &&
6887 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
6888 X->getType() == ReturnType)
6889 return X;
6890
6891 return nullptr;
6892 }
6893 case Intrinsic::experimental_constrained_fadd: {
6894 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6895 return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6896 *FPI->getExceptionBehavior(),
6897 *FPI->getRoundingMode());
6898 }
6899 case Intrinsic::experimental_constrained_fsub: {
6900 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6901 return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6902 *FPI->getExceptionBehavior(),
6903 *FPI->getRoundingMode());
6904 }
6905 case Intrinsic::experimental_constrained_fmul: {
6906 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6907 return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6908 *FPI->getExceptionBehavior(),
6909 *FPI->getRoundingMode());
6910 }
6911 case Intrinsic::experimental_constrained_fdiv: {
6912 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6913 return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6914 *FPI->getExceptionBehavior(),
6915 *FPI->getRoundingMode());
6916 }
6917 case Intrinsic::experimental_constrained_frem: {
6918 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6919 return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6920 *FPI->getExceptionBehavior(),
6921 *FPI->getRoundingMode());
6922 }
6923 case Intrinsic::experimental_constrained_ldexp:
6924 return simplifyLdexp(Args[0], Args[1], Q, true);
6925 case Intrinsic::experimental_gc_relocate: {
6926 GCRelocateInst &GCR = *cast<GCRelocateInst>(Call);
6927 Value *DerivedPtr = GCR.getDerivedPtr();
6928 Value *BasePtr = GCR.getBasePtr();
6929
6930 // Undef is undef, even after relocation.
6931 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
6932 return UndefValue::get(GCR.getType());
6933 }
6934
6935 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
6936 // For now, the assumption is that the relocation of null will be null
6937 // for most any collector. If this ever changes, a corresponding hook
6938 // should be added to GCStrategy and this code should check it first.
6939 if (isa<ConstantPointerNull>(DerivedPtr)) {
6940 // Use null-pointer of gc_relocate's type to replace it.
6941 return ConstantPointerNull::get(PT);
6942 }
6943 }
6944 return nullptr;
6945 }
6946 default:
6947 return nullptr;
6948 }
6949}
6950
6952 ArrayRef<Value *> Args,
6953 const SimplifyQuery &Q) {
6954 auto *F = dyn_cast<Function>(Callee);
6955 if (!F || !canConstantFoldCallTo(Call, F))
6956 return nullptr;
6957
6958 SmallVector<Constant *, 4> ConstantArgs;
6959 ConstantArgs.reserve(Args.size());
6960 for (Value *Arg : Args) {
6961 Constant *C = dyn_cast<Constant>(Arg);
6962 if (!C) {
6963 if (isa<MetadataAsValue>(Arg))
6964 continue;
6965 return nullptr;
6966 }
6967 ConstantArgs.push_back(C);
6968 }
6969
6970 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
6971}
6972
6974 const SimplifyQuery &Q) {
6975 // Args should not contain operand bundle operands.
6976 assert(Call->arg_size() == Args.size());
6977
6978 // musttail calls can only be simplified if they are also DCEd.
6979 // As we can't guarantee this here, don't simplify them.
6980 if (Call->isMustTailCall())
6981 return nullptr;
6982
6983 // call undef -> poison
6984 // call null -> poison
6985 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6986 return PoisonValue::get(Call->getType());
6987
6988 if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
6989 return V;
6990
6991 auto *F = dyn_cast<Function>(Callee);
6992 if (F && F->isIntrinsic())
6993 if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
6994 return Ret;
6995
6996 return nullptr;
6997}
6998
7000 assert(isa<ConstrainedFPIntrinsic>(Call));
7001 SmallVector<Value *, 4> Args(Call->args());
7002 if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
7003 return V;
7004 if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
7005 return Ret;
7006 return nullptr;
7007}
7008
7009/// Given operands for a Freeze, see if we can fold the result.
7011 // Use a utility function defined in ValueTracking.
7013 return Op0;
7014 // We have room for improvement.
7015 return nullptr;
7016}
7017
7019 return ::simplifyFreezeInst(Op0, Q);
7020}
7021
7023 const SimplifyQuery &Q) {
7024 if (LI->isVolatile())
7025 return nullptr;
7026
7027 if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7028 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
7029
7030 // We can only fold the load if it is from a constant global with definitive
7031 // initializer. Skip expensive logic if this is not the case.
7032 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
7033 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7034 return nullptr;
7035
7036 // If GlobalVariable's initializer is uniform, then return the constant
7037 // regardless of its offset.
7038 if (Constant *C = ConstantFoldLoadFromUniformValue(GV->getInitializer(),
7039 LI->getType(), Q.DL))
7040 return C;
7041
7042 // Try to convert operand into a constant by stripping offsets while looking
7043 // through invariant.group intrinsics.
7045 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
7046 Q.DL, Offset, /* AllowNonInbounts */ true,
7047 /* AllowInvariantGroup */ true);
7048 if (PtrOp == GV) {
7049 // Index size may have changed due to address space casts.
7050 Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
7051 return ConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
7052 Q.DL);
7053 }
7054
7055 return nullptr;
7056}
7057
7058/// See if we can compute a simplified version of this instruction.
7059/// If not, this returns null.
7060
7062 ArrayRef<Value *> NewOps,
7063 const SimplifyQuery &SQ,
7064 unsigned MaxRecurse) {
7065 assert(I->getFunction() && "instruction should be inserted in a function");
7066 assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
7067 "context instruction should be in the same function");
7068
7069 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
7070
7071 switch (I->getOpcode()) {
7072 default:
7073 if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
7074 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
7075 transform(NewOps, NewConstOps.begin(),
7076 [](Value *V) { return cast<Constant>(V); });
7077 return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
7078 }
7079 return nullptr;
7080 case Instruction::FNeg:
7081 return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
7082 case Instruction::FAdd:
7083 return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7084 MaxRecurse);
7085 case Instruction::Add:
7086 return simplifyAddInst(
7087 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7088 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7089 case Instruction::FSub:
7090 return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7091 MaxRecurse);
7092 case Instruction::Sub:
7093 return simplifySubInst(
7094 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7095 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7096 case Instruction::FMul:
7097 return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7098 MaxRecurse);
7099 case Instruction::Mul:
7100 return simplifyMulInst(
7101 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7102 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7103 case Instruction::SDiv:
7104 return simplifySDivInst(NewOps[0], NewOps[1],
7105 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7106 MaxRecurse);
7107 case Instruction::UDiv:
7108 return simplifyUDivInst(NewOps[0], NewOps[1],
7109 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7110 MaxRecurse);
7111 case Instruction::FDiv:
7112 return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7113 MaxRecurse);
7114 case Instruction::SRem:
7115 return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7116 case Instruction::URem:
7117 return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7118 case Instruction::FRem:
7119 return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7120 MaxRecurse);
7121 case Instruction::Shl:
7122 return simplifyShlInst(
7123 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7124 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7125 case Instruction::LShr:
7126 return simplifyLShrInst(NewOps[0], NewOps[1],
7127 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7128 MaxRecurse);
7129 case Instruction::AShr:
7130 return simplifyAShrInst(NewOps[0], NewOps[1],
7131 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7132 MaxRecurse);
7133 case Instruction::And:
7134 return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7135 case Instruction::Or:
7136 return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7137 case Instruction::Xor:
7138 return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7139 case Instruction::ICmp:
7140 return simplifyICmpInst(cast<ICmpInst>(I)->getCmpPredicate(), NewOps[0],
7141 NewOps[1], Q, MaxRecurse);
7142 case Instruction::FCmp:
7143 return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7144 NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
7145 case Instruction::Select:
7146 return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7147 case Instruction::GetElementPtr: {
7148 auto *GEPI = cast<GetElementPtrInst>(I);
7149 return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7150 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7151 MaxRecurse);
7152 }
7153 case Instruction::InsertValue: {
7154 InsertValueInst *IV = cast<InsertValueInst>(I);
7155 return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
7156 MaxRecurse);
7157 }
7158 case Instruction::InsertElement:
7159 return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7160 case Instruction::ExtractValue: {
7161 auto *EVI = cast<ExtractValueInst>(I);
7162 return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7163 MaxRecurse);
7164 }
7165 case Instruction::ExtractElement:
7166 return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7167 case Instruction::ShuffleVector: {
7168 auto *SVI = cast<ShuffleVectorInst>(I);
7169 return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
7170 SVI->getShuffleMask(), SVI->getType(), Q,
7171 MaxRecurse);
7172 }
7173 case Instruction::PHI:
7174 return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
7175 case Instruction::Call:
7176 return simplifyCall(
7177 cast<CallInst>(I), NewOps.back(),
7178 NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7179 case Instruction::Freeze:
7180 return llvm::simplifyFreezeInst(NewOps[0], Q);
7181#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7182#include "llvm/IR/Instruction.def"
7183#undef HANDLE_CAST_INST
7184 return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
7185 MaxRecurse);
7186 case Instruction::Alloca:
7187 // No simplifications for Alloca and it can't be constant folded.
7188 return nullptr;
7189 case Instruction::Load:
7190 return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7191 }
7192}
7193
7195 ArrayRef<Value *> NewOps,
7196 const SimplifyQuery &SQ) {
7197 assert(NewOps.size() == I->getNumOperands() &&
7198 "Number of operands should match the instruction!");
7199 return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
7200}
7201
7203 SmallVector<Value *, 8> Ops(I->operands());
7205
7206 /// If called on unreachable code, the instruction may simplify to itself.
7207 /// Make life easier for users by detecting that case here, and returning a
7208 /// safe value instead.
7209 return Result == I ? PoisonValue::get(I->getType()) : Result;
7210}
7211
7212/// Implementation of recursive simplification through an instruction's
7213/// uses.
7214///
7215/// This is the common implementation of the recursive simplification routines.
7216/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7217/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7218/// instructions to process and attempt to simplify it using
7219/// InstructionSimplify. Recursively visited users which could not be
7220/// simplified themselves are to the optional UnsimplifiedUsers set for
7221/// further processing by the caller.
7222///
7223/// This routine returns 'true' only when *it* simplifies something. The passed
7224/// in simplified value does not count toward this.
7226 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7227 const DominatorTree *DT, AssumptionCache *AC,
7228 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7229 bool Simplified = false;
7231 const DataLayout &DL = I->getDataLayout();
7232
7233 // If we have an explicit value to collapse to, do that round of the
7234 // simplification loop by hand initially.
7235 if (SimpleV) {
7236 for (User *U : I->users())
7237 if (U != I)
7238 Worklist.insert(cast<Instruction>(U));
7239
7240 // Replace the instruction with its simplified value.
7241 I->replaceAllUsesWith(SimpleV);
7242
7243 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7244 I->eraseFromParent();
7245 } else {
7246 Worklist.insert(I);
7247 }
7248
7249 // Note that we must test the size on each iteration, the worklist can grow.
7250 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7251 I = Worklist[Idx];
7252
7253 // See if this instruction simplifies.
7254 SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7255 if (!SimpleV) {
7256 if (UnsimplifiedUsers)
7257 UnsimplifiedUsers->insert(I);
7258 continue;
7259 }
7260
7261 Simplified = true;
7262
7263 // Stash away all the uses of the old instruction so we can check them for
7264 // recursive simplifications after a RAUW. This is cheaper than checking all
7265 // uses of To on the recursive step in most cases.
7266 for (User *U : I->users())
7267 Worklist.insert(cast<Instruction>(U));
7268
7269 // Replace the instruction with its simplified value.
7270 I->replaceAllUsesWith(SimpleV);
7271
7272 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7273 I->eraseFromParent();
7274 }
7275 return Simplified;
7276}
7277
7279 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7280 const DominatorTree *DT, AssumptionCache *AC,
7281 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7282 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7283 assert(SimpleV && "Must provide a simplified value.");
7284 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7285 UnsimplifiedUsers);
7286}
7287
7288namespace llvm {
7290 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7291 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7292 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7293 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7294 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7295 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7296 return {F.getDataLayout(), TLI, DT, AC};
7297}
7298
7300 const DataLayout &DL) {
7301 return {DL, &AR.TLI, &AR.DT, &AR.AC};
7302}
7303
7304template <class T, class... TArgs>
7306 Function &F) {
7307 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7308 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7309 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7310 return {F.getDataLayout(), TLI, DT, AC};
7311}
7313 Function &);
7314
7316 if (!CanUseUndef)
7317 return false;
7318
7319 return match(V, m_Undef());
7320}
7321
7322} // namespace llvm
7323
7324void InstSimplifyFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
IRTranslator LLVM IR MI
static Value * simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q)
Given operands for a Freeze, see if we can fold the result.
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not uses sgt/slt instead of eq/ne.
static Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
static Value * simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
static Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
static Value * simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
static Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
static Value * simplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a SelectInst, see if we can fold the result.
static Value * simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
static Value * simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &SQ, unsigned MaxRecurse)
See if we can compute a simplified version of this instruction.
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
static Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
static Value * simplifyCmpInst(CmpPredicate, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
static Value * simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
static Value * simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
static Value * simplifySelectWithEquivalence(Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, see if we can fold the result.
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
static Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given the operand for an FNeg, see if we can fold the result.
static Value * simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an Or, see if we can fold the result.
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
static Value * simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, see if we can fold the result.
static Value * simplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a Xor, see if we can fold the result.
static Value * simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static Value * simplifyGEPInst(Type *, Value *, ArrayRef< Value * >, GEPNoWrapFlags, const SimplifyQuery &, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
static Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
static Value * simplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an And, see if we can fold the result.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS)
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
static Value * simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given the operands for an FMul, see if we can fold the result.
@ RecursionLimit
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static const uint32_t IV[8]
Definition: blake3_impl.h:78
Class for arbitrary precision integers.
Definition: APInt.h:78
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1007
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition: APInt.h:1492
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1640
void setSignBit()
Set the sign bit to 1.
Definition: APInt.h:1340
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1111
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition: APInt.h:1249
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1618
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition: APInt.h:361
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1015
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:356
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:475
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:471
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1710
bool isMask(unsigned numBits) const
Definition: APInt.h:488
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition: APInt.h:405
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1257
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
Definition: APInt.h:341
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1130
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:296
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:200
bool isOne() const
Determine if this is a value of 1.
Definition: APInt.h:389
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:239
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1221
an instruction to allocate memory on the stack
Definition: Instructions.h:63
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:177
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:213
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:198
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
This class represents a function call, abstracting a target machine's calling convention.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:988
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:856
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:946
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:702
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:703
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:697
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:700
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:701
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:699
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:683
bool isSigned() const
Definition: InstrTypes.h:928
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:940
bool isFPPredicate() const
Definition: InstrTypes.h:780
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:787
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
bool isIntPredicate() const
Definition: InstrTypes.h:781
bool isUnsigned() const
Definition: InstrTypes.h:934
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2307
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2554
static Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
Definition: Constants.cpp:2762
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2631
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2576
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2599
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
Definition: Constants.h:1379
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1267
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2691
static Constant * getZero(Type *Ty, bool Negative=false)
Definition: Constants.cpp:1057
static Constant * getNegativeZero(Type *Ty)
Definition: Constants.h:309
static Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
Definition: Constants.cpp:1024
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.h:126
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:880
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1826
This class represents a range of values.
Definition: ConstantRange.h:47
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1378
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
Definition: Constants.cpp:1472
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1421
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
Definition: Constants.cpp:107
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
Definition: Constants.cpp:277
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:364
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:851
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:754
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:878
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:369
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
bool noSignedZeros() const
Definition: FMF.h:68
bool noInfs() const
Definition: FMF.h:67
bool allowReassoc() const
Flag queries.
Definition: FMF.h:65
bool noNaNs() const
Definition: FMF.h:66
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
An instruction for reading from memory.
Definition: Instructions.h:176
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:205
Metadata node.
Definition: Metadata.h:1069
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
This class represents a cast from a pointer to an integer.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:704
void reserve(size_type N)
Definition: SmallVector.h:663
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:243
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
static IntegerType * getInt32Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
This class represents zero extension of integer types.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
Definition: PatternMatch.h:550
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
Definition: PatternMatch.h:664
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
Definition: PatternMatch.h:726
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:619
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
Definition: PatternMatch.h:764
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:592
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
Definition: PatternMatch.h:782
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
Definition: PatternMatch.h:928
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:245
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:903
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:599
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:864
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
Definition: PatternMatch.h:481
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
Definition: PatternMatch.h:931
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
Definition: PatternMatch.h:322
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
Definition: PatternMatch.h:773
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:316
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
Definition: PatternMatch.h:710
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
ExceptionBehavior
Exception behavior used for floating point operations.
Definition: FPEnv.h:38
@ ebStrict
This corresponds to "fpexcept.strict".
Definition: FPEnv.h:41
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition: FPEnv.h:39
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:353
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Definition: FPEnv.h:65
Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
Definition: FPEnv.h:77
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Definition: VE.h:376
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1952
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1187
Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
@ Or
Bitwise or logical OR of integers.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Definition: VE.h:375
Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
std::optional< bool > computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return false if we can prove that the specified FP value's sign bit is 0.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2087
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Definition: FPEnv.h:83
Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This callback is used in conjunction with PointerMayBeCaptured.
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
virtual bool captured(const Use *U)=0
captured - Information about the pointer was captured by the user of use U.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
Definition: SimplifyQuery.h:25
bool isExact(const BinaryOperator *Op) const
Definition: SimplifyQuery.h:48
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
Definition: SimplifyQuery.h:30
bool hasNoSignedWrap(const InstT *Op) const
Definition: SimplifyQuery.h:42
bool hasNoUnsignedWrap(const InstT *Op) const
Definition: SimplifyQuery.h:36
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:100
bool isZero() const
Returns true if value is all zero.
Definition: KnownBits.h:79
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:234
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition: KnownBits.h:266
bool hasConflict() const
Returns true if there is conflicting information.
Definition: KnownBits.h:50
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:43
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition: KnownBits.h:288
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:240
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition: KnownBits.h:137
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition: KnownBits.h:121
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:97
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
Definition: KnownBits.cpp:285
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
Definition: SimplifyQuery.h:71
const Instruction * CxtI
Definition: SimplifyQuery.h:75
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
Definition: SimplifyQuery.h:87
const DominatorTree * DT
Definition: SimplifyQuery.h:73
SimplifyQuery getWithInstruction(const Instruction *I) const
bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
AssumptionCache * AC
Definition: SimplifyQuery.h:74
const TargetLibraryInfo * TLI
Definition: SimplifyQuery.h:72
SimplifyQuery getWithoutUndef() const
const InstrInfoQuery IIQ
Definition: SimplifyQuery.h:82